repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Bitcoin-ABC/bitcoin-abc | test/functional/abc-replay-protection.py | 1 | 11337 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
This test checks activation of UAHF and the different consensus
related to this activation.
It is derived from the much more complex p2p-fullblocktest.
"""
import time
from test_framework.blocktools import (
create_block,
create_coinbase,
create_tx_with_script,
make_conform_to_ctor,
)
from test_framework.key import ECKey
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
ToHex,
)
from test_framework.p2p import P2PDataStore
from test_framework.script import (
CScript,
OP_CHECKSIG,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_FORKID,
SignatureHashForkId,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
# far into the future
REPLAY_PROTECTION_START_TIME = 2000000000
# Error due to invalid signature
RPC_INVALID_SIGNATURE_ERROR = "mandatory-script-verify-flag-failed (Signature must be zero for failed CHECK(MULTI)SIG operation)"
class PreviousSpendableOutput(object):
def __init__(self, tx=CTransaction(), n=-1):
self.tx = tx
self.n = n
class ReplayProtectionTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.block_heights = {}
self.tip = None
self.blocks = {}
self.extra_args = [['[email protected]',
"-replayprotectionactivationtime={}".format(
REPLAY_PROTECTION_START_TIME),
"-acceptnonstdtxn=1"]]
def next_block(self, number):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height)
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
# Do PoW, which is cheap on regnet
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
def run_test(self):
node = self.nodes[0]
node.add_p2p_connection(P2PDataStore())
node.setmocktime(REPLAY_PROTECTION_START_TIME)
self.genesis_hash = int(node.getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
block.vtx.extend(new_transactions)
old_sha256 = block.sha256
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[
block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
# shorthand
block = self.next_block
# Create a new block
block(0)
save_spendable_output()
node.p2p.send_blocks_and_test([self.tip], node)
# Now we need that block to mature so we can spend the coinbase.
maturity_blocks = []
for i in range(99):
block(5000 + i)
maturity_blocks.append(self.tip)
save_spendable_output()
node.p2p.send_blocks_and_test(maturity_blocks, node)
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(100):
out.append(get_spendable_output())
# Generate a key pair to test P2SH sigops count
private_key = ECKey()
private_key.generate()
public_key = private_key.get_pubkey().get_bytes()
# This is a little handier to use than the version in blocktools.py
def create_fund_and_spend_tx(spend, forkvalue=0):
# Fund transaction
script = CScript([public_key, OP_CHECKSIG])
txfund = create_tx_with_script(
spend.tx, spend.n, b'', amount=50 * COIN - 1000, script_pub_key=script)
txfund.rehash()
# Spend transaction
txspend = CTransaction()
txspend.vout.append(CTxOut(50 * COIN - 2000, CScript([OP_TRUE])))
txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b''))
# Sign the transaction
sighashtype = (forkvalue << 8) | SIGHASH_ALL | SIGHASH_FORKID
sighash = SignatureHashForkId(
script, txspend, 0, sighashtype, 50 * COIN - 1000)
sig = private_key.sign_ecdsa(sighash) + \
bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))
txspend.vin[0].scriptSig = CScript([sig])
txspend.rehash()
return [txfund, txspend]
def send_transaction_to_mempool(tx):
tx_id = node.sendrawtransaction(ToHex(tx))
assert tx_id in set(node.getrawmempool())
return tx_id
# Before the fork, no replay protection required to get in the mempool.
txns = create_fund_and_spend_tx(out[0])
send_transaction_to_mempool(txns[0])
send_transaction_to_mempool(txns[1])
# And txns get mined in a block properly.
block(1)
update_block(1, txns)
node.p2p.send_blocks_and_test([self.tip], node)
# Replay protected transactions are rejected.
replay_txns = create_fund_and_spend_tx(out[1], 0xffdead)
send_transaction_to_mempool(replay_txns[0])
assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR,
node.sendrawtransaction, ToHex(replay_txns[1]))
# And block containing them are rejected as well.
block(2)
update_block(2, replay_txns)
node.p2p.send_blocks_and_test(
[self.tip], node, success=False, reject_reason='blk-bad-inputs')
# Rewind bad block
tip(1)
# Create a block that would activate the replay protection.
bfork = block(5555)
bfork.nTime = REPLAY_PROTECTION_START_TIME - 1
update_block(5555, [])
node.p2p.send_blocks_and_test([self.tip], node)
activation_blocks = []
for i in range(5):
block(5100 + i)
activation_blocks.append(self.tip)
node.p2p.send_blocks_and_test(activation_blocks, node)
# Check we are just before the activation time
assert_equal(
node.getblockchaininfo()['mediantime'],
REPLAY_PROTECTION_START_TIME - 1)
# We are just before the fork, replay protected txns still are rejected
assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR,
node.sendrawtransaction, ToHex(replay_txns[1]))
block(3)
update_block(3, replay_txns)
node.p2p.send_blocks_and_test(
[self.tip], node, success=False, reject_reason='blk-bad-inputs')
# Rewind bad block
tip(5104)
# Send some non replay protected txns in the mempool to check
# they get cleaned at activation.
txns = create_fund_and_spend_tx(out[2])
send_transaction_to_mempool(txns[0])
tx_id = send_transaction_to_mempool(txns[1])
# Activate the replay protection
block(5556)
node.p2p.send_blocks_and_test([self.tip], node)
# Check we just activated the replay protection
assert_equal(
node.getblockchaininfo()['mediantime'],
REPLAY_PROTECTION_START_TIME)
# Non replay protected transactions are not valid anymore,
# so they should be removed from the mempool.
assert tx_id not in set(node.getrawmempool())
# Good old transactions are now invalid.
send_transaction_to_mempool(txns[0])
assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR,
node.sendrawtransaction, ToHex(txns[1]))
# They also cannot be mined
block(4)
update_block(4, txns)
node.p2p.send_blocks_and_test(
[self.tip], node, success=False, reject_reason='blk-bad-inputs')
# Rewind bad block
tip(5556)
# The replay protected transaction is now valid
replay_tx0_id = send_transaction_to_mempool(replay_txns[0])
replay_tx1_id = send_transaction_to_mempool(replay_txns[1])
# Make sure the transaction are ready to be mined.
tmpl = node.getblocktemplate()
found_id0 = False
found_id1 = False
for txn in tmpl['transactions']:
txid = txn['txid']
if txid == replay_tx0_id:
found_id0 = True
elif txid == replay_tx1_id:
found_id1 = True
assert found_id0 and found_id1
# And the mempool is still in good shape.
assert replay_tx0_id in set(node.getrawmempool())
assert replay_tx1_id in set(node.getrawmempool())
# They also can also be mined
block(5)
update_block(5, replay_txns)
node.p2p.send_blocks_and_test([self.tip], node)
# Ok, now we check if a reorg work properly across the activation.
postforkblockid = node.getbestblockhash()
node.invalidateblock(postforkblockid)
assert replay_tx0_id in set(node.getrawmempool())
assert replay_tx1_id in set(node.getrawmempool())
# Deactivating replay protection.
forkblockid = node.getbestblockhash()
node.invalidateblock(forkblockid)
# The funding tx is not evicted from the mempool, since it's valid in
# both sides of the fork
assert replay_tx0_id in set(node.getrawmempool())
assert replay_tx1_id not in set(node.getrawmempool())
# Check that we also do it properly on deeper reorg.
node.reconsiderblock(forkblockid)
node.reconsiderblock(postforkblockid)
node.invalidateblock(forkblockid)
assert replay_tx0_id in set(node.getrawmempool())
assert replay_tx1_id not in set(node.getrawmempool())
if __name__ == '__main__':
ReplayProtectionTest().main()
| mit | 787,828,342,569,320,100 | 34.428125 | 129 | 0.61242 | false |
scality/ScalitySproxydSwift | test/scenario/multi-backend/fabfile/saio.py | 1 | 3713 | import os
import os.path
import fabric.contrib.files
from fabric.api import sudo
from utils import build_object_ring, render
def disk_setup(swift_user):
# Setup a loopdevice to act as disk for swift
sudo('mkdir -p /srv')
sudo('truncate -s 1GB /srv/swift-disk')
sudo('mkfs.xfs /srv/swift-disk')
fabric.contrib.files.append(
filename='/etc/fstab',
text='/srv/swift-disk /mnt/sdb1 xfs loop,noatime 0 0',
use_sudo=True
)
sudo('mkdir /mnt/sdb1')
sudo('mount /mnt/sdb1')
# Prepare directory structure for 4 swift nodes, with two "partitions" each
node_mkdir = 'mkdir -p /mnt/sdb1/{0:d}/node/sdb{1:d}'
num_nodes = 4
for i in range(1, num_nodes + 1):
sudo(node_mkdir.format(i, i))
sudo(node_mkdir.format(i, i + num_nodes))
sudo('ln -s /mnt/sdb1/{0:d} /srv/{1:d}'.format(i, i))
sudo('mkdir /var/cache/swift{0:d}'.format(i))
sudo('chown -R {0:s}: /mnt/sdb1'.format(swift_user))
sudo('mkdir /var/run/swift')
sudo('chown {0:s}: /var/run/swift /var/cache/swift*'.format(swift_user))
render(
directory='assets/saio/phase1/etc',
filenames=['rc.local'],
local_path_prefix='assets/saio/phase1',
content={'user': swift_user},
)
sudo('chmod 755 /etc/rc.local')
sudo('chown root: /etc/rc.local')
def install(swift_user):
sudo('pip install '
'git+https://github.com/openstack/[email protected]')
sudo('pip install git+https://github.com/openstack/[email protected]')
content = {
'user': swift_user,
'group': swift_user,
}
for path, _, filenames in os.walk('assets/saio/phase1/etc/swift'):
render(path, filenames, 'assets/saio/phase1', content)
sudo('chown -R {0:s}: /etc/swift'.format(swift_user))
def build_rings(swift_user):
# Account ring
build_object_ring(
swift_user=swift_user,
name='account.builder',
devices=[
'r1z1-127.0.0.1:6012/sdb1',
'r1z2-127.0.0.1:6022/sdb2',
'r1z3-127.0.0.1:6032/sdb3',
'r1z4-127.0.0.1:6042/sdb4',
],
)
# Container ring
build_object_ring(
swift_user=swift_user,
name='container.builder',
devices=[
'r1z1-127.0.0.1:6011/sdb1',
'r1z2-127.0.0.1:6021/sdb2',
'r1z3-127.0.0.1:6031/sdb3',
'r1z4-127.0.0.1:6041/sdb4',
],
)
# Object ring
build_object_ring(
swift_user=swift_user,
name='object.builder',
devices=[
'r1z1-127.0.0.1:6010/sdb1',
'r1z1-127.0.0.1:6010/sdb5',
'r1z2-127.0.0.1:6020/sdb2',
'r1z2-127.0.0.1:6020/sdb6',
'r1z3-127.0.0.1:6030/sdb3',
'r1z3-127.0.0.1:6030/sdb7',
'r1z4-127.0.0.1:6040/sdb4',
'r1z4-127.0.0.1:6040/sdb8',
],
)
def setup_rsync(swift_user):
render(
directory='assets/saio/phase1/etc',
filenames=['rsyncd.conf'],
local_path_prefix='assets/saio/phase1',
content={'user': swift_user, 'group': swift_user},
)
fabric.contrib.files.sed(
filename='/etc/default/rsync',
before='RSYNC_ENABLE=false',
after='RSYNC_ENABLE=true',
use_sudo=True,
)
sudo('sudo service rsync restart')
def install_scality_swift():
sudo('pip install '
'git+https://github.com/scality/scality-sproxyd-client.git')
sudo('pip install git+https://github.com/scality/ScalitySproxydSwift.git')
def start(swift_user):
sudo('swift-init main start', user=swift_user)
def stop(swift_user):
sudo('swift-init main stop', user=swift_user)
| apache-2.0 | 453,750,375,810,706,200 | 27.128788 | 79 | 0.578508 | false |
gwu-libraries/sfm-ui | sfm/sfm/settings/test_settings.py | 1 | 1817 | from sfm.settings.common import *
import tempfile
import os
DATABASES = {
# for unit tests
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'testdb'
}
}
SFM_DB_DATA_DIR = os.path.join(tempfile.gettempdir(), "test-data")
SFM_MQ_DATA_DIR = os.path.join(tempfile.gettempdir(), "test-data")
SFM_EXPORT_DATA_DIR = os.path.join(tempfile.gettempdir(), "test-data")
SFM_CONTAINERS_DATA_DIR = os.path.join(tempfile.gettempdir(), "test-data")
SFM_COLLECTION_SET_DATA_DIR = os.path.join(tempfile.gettempdir(), "test-data")
SCHEDULER_DB_URL = "sqlite:///testdb"
SCHEDULE_HARVESTS = False
PERFORM_EXPORTS = False
PERFORM_EMAILS = False
PERFORM_USER_HARVEST_EMAILS = False
PERFORM_SERIALIZE = False
ADMINS = [("sfmadmin", "[email protected]")]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(process)d %(name)s %(message)s'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
'django.request': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
'apscheduler': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
'ui': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'message_consumer': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
},
}
| mit | -1,005,177,759,540,445,600 | 23.226667 | 82 | 0.519538 | false |
mikexine/tweetset | tweetset/collect/views.py | 1 | 6251 | from django.shortcuts import render, get_object_or_404, redirect
from django.http import Http404, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.contrib import auth
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.views import login as login_view
from django.contrib.auth.models import User
from django.contrib import messages
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from collect.models import Collection
from django.db.models import Count
from collect.utils import pagination_helper
from collect.forms import CollectionForm
from django.utils.text import slugify
import json
import gzip
import csv
from collect.utils import flatten
def encode_if_string(s):
try:
return s.encode('utf-8')
except:
return s
@login_required
def download_csv(request, collection_id):
c = get_object_or_404(Collection,pk=collection_id,user=request.user)
response = HttpResponse(content_type='application/gzip')
response['Content-Disposition'] = 'attachment; filename="'+slugify(c.name)+'.csv.gz"'
with gzip.GzipFile(fileobj=response, mode="w") as f:
list_of_tweets = []
for t in c.tweets.all():
list_of_tweets.append(flatten(t.data))
if len(list_of_tweets) > 0:
writer = csv.DictWriter(f,
['id', 'text', 'retweeted', 'created_at',
'user_id', 'user_screen_name'],
extrasaction='ignore', dialect='excel')
writer.writeheader()
for t in list_of_tweets:
writer.writerow({k: encode_if_string(v) for k, v in t.items()})
return response
@login_required
def download_json(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
print c.tweets.all()
response = HttpResponse(content_type='application/gzip')
response['Content-Disposition'] = 'attachment; filename="' + slugify(c.name) + '.json.gz"'
list_of_tweets = []
for t in c.tweets.all():
list_of_tweets.append(t.data)
with gzip.GzipFile(fileobj=response, mode="w") as f:
f.write(json.dumps(list_of_tweets, indent=4))
return response
@login_required
def map(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
return render(request, "collect/stats/map.html", {
'collection': c,
'collection_id': collection_id
})
@login_required
def time_chart(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
return render(request, "collect/stats/time_chart.html", {
'collection': c,
'collection_id': collection_id
})
@login_required
def frequencies(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
return render(request, "collect/stats/frequencies.html", {
'collection': c,
'collection_id': collection_id
})
@login_required
def tweets(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
page = request.GET.get('page', 1)
tweets, show_first, show_last, page_numbers = pagination_helper(object_list=c.tweets.all(), page=page, per_page=25, allow_empty_first_page=True)
return render(request, 'collect/tweets.html', {
'collection': c,
'tweets': tweets,
'show_first': show_first,
'show_last': show_last,
'page_numbers': page_numbers, })
@login_required
def edit_collection(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
if request.method == 'POST':
form = CollectionForm(request.POST, instance=c)
if form.is_valid():
new_collection = form.save(commit=False)
new_collection.save()
return redirect('dashboard')
else:
form = CollectionForm(instance=c)
return render(request, 'collect/edit_collection.html',
{'collection': c, 'form': form, })
@login_required
def new_collection(request):
if request.method == 'POST':
form = CollectionForm(request.POST)
if form.is_valid():
new_collection = form.save(commit=False)
new_collection.user = request.user
new_collection.save()
return redirect('dashboard')
else:
form = CollectionForm()
return render(request, 'collect/new_collection.html', {'form': form, })
@login_required
def stop_collection(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
c.stop()
return redirect('dashboard')
@login_required
def start_collection(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
if c.start():
messages.success(request, "Collection successfully started!")
else:
messages.error(request, "Collection could not be started.")
return redirect('dashboard')
@login_required
def make_stats(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
a = c.mstats()
print a
if a:
messages.success(request, "Stats will be available soon!")
else:
messages.error(request, "Err.. Stats could not be started.")
return redirect('dashboard')
@login_required
def delete_collection(request, collection_id):
c = get_object_or_404(Collection, pk=collection_id, user=request.user)
c.delete()
return redirect('dashboard')
@login_required
def dashboard(request):
collections = Collection.objects.filter(user=request.user).annotate(num_tweets=Count('tweets'))
return render(request, 'collect/dashboard.html',
{'collections': collections, })
def index(request):
return render(request, 'collect/index.html')
def contacts(request):
return render(request, 'collect/contacts.html')
def collect_login(request, *args, **kwargs):
return login_view(request, *args, **kwargs)
| mit | -1,877,005,152,931,094,800 | 32.25 | 148 | 0.666773 | false |
HardLight/denyhosts | DenyHosts/sync.py | 1 | 7559 | import logging
import os
import time
import sys
import socket
import requests
if sys.version_info < (3, 0):
from xmlrpclib import ServerProxy
else:
from xmlrpc.client import ServerProxy, Transport, ProtocolError
from .constants import SYNC_TIMESTAMP, SYNC_HOSTS, SYNC_HOSTS_TMP, SYNC_RECEIVED_HOSTS, SOCKET_TIMEOUT
logger = logging.getLogger("sync")
debug, info, error, exception = logger.debug, logger.info, logger.error, logger.exception
def get_plural(items):
if len(items) != 1:
return "s"
else:
return ""
if sys.version_info >= (3, 0):
class RequestsTransport(Transport):
def request(self, host, handler, data, verbose=False):
# set the headers, including the user-agent
headers = {"User-Agent": "my-user-agent",
"Content-Type": "text/xml",
"Accept-Encoding": "gzip"}
url = "http://%s%s" % (host, handler)
response = None
try:
response = requests.post(url, data=data, headers=headers, timeout=SOCKET_TIMEOUT)
response.raise_for_status()
except requests.RequestException as e:
if response is None:
exception(ProtocolError(url, 500, str(e), ""))
else:
exception(ProtocolError(
url,
response.status_code,
str(e),
response.headers
))
if response is not None:
return self.parse_response(response)
return response
def parse_response(self, resp):
"""
Parse the xmlrpc response.
"""
p, u = self.getparser()
p.feed(resp.text)
p.close()
return u.close()
class Sync(object):
def __init__(self, prefs):
self.__prefs = prefs
self.__work_dir = prefs.get('WORK_DIR')
self.__connected = False
self.__hosts_added = []
self.__server = None
self.__default_timeout = socket.getdefaulttimeout()
self.__pymajor_version = sys.version_info[0]
self.__sync_server = self.__prefs.get('SYNC_SERVER')
def xmlrpc_connect(self):
debug("xmlrpc_conect()")
# python 2
if self.__pymajor_version == 2:
socket.setdefaulttimeout(SOCKET_TIMEOUT) # set global socket timeout
for i in range(0, 3):
debug("XMLRPC Connection attempt: %d" % i)
try:
# python 2
if self.__pymajor_version == 2:
self.__server = ServerProxy(self.__sync_server)
else:
self.__server = ServerProxy(self.__sync_server, transport=RequestsTransport())
debug("Connected To SYNC Server")
self.__connected = True
break
except Exception as e:
error(str(e))
self.__connected = False
time.sleep(30)
if not self.__connected:
error('Failed to connect to %s after 3 attempts' % self.__sync_server)
# python 2
if self.__pymajor_version == 2:
socket.setdefaulttimeout(self.__default_timeout) # set timeout back to the default
return self.__connected
def xmlrpc_disconnect(self):
if self.__connected:
try:
# self.__server.close()
self.__server = None
except Exception:
pass
self.__connected = False
def get_sync_timestamp(self):
timestamp = 0
try:
with open(os.path.join(self.__work_dir, SYNC_TIMESTAMP)) as fp:
line = fp.readline().strip()
if len(line) > 0:
timestamp = int(line)
return timestamp
return timestamp
except Exception as e:
error(str(e))
return 0
def set_sync_timestamp(self, timestamp):
try:
with open(os.path.join(self.__work_dir, SYNC_TIMESTAMP), "w") as fp:
fp.write(timestamp)
except Exception as e:
error(e)
def send_new_hosts(self):
debug("send_new_hosts()")
self.__hosts_added = []
try:
src_file = os.path.join(self.__work_dir, SYNC_HOSTS)
dest_file = os.path.join(self.__work_dir, SYNC_HOSTS_TMP)
os.rename(src_file, dest_file)
except OSError:
return False
hosts = []
with open(dest_file, 'r') as fp:
# less memory usage than using readlines()
for line in fp:
hosts.append(line.strip())
try:
self.__send_new_hosts(hosts)
info("sent %d new host%s", len(hosts), get_plural(hosts))
self.__hosts_added = hosts
except Exception:
os.rename(dest_file, src_file)
return False
try:
os.unlink(dest_file)
except OSError:
pass
return True
def __send_new_hosts(self, hosts):
debug("__send_new_hosts()")
if not self.__connected and not self.xmlrpc_connect():
error("Could not initiate xmlrpc connection")
return
for i in range(0, 3):
try:
self.__server.add_hosts(hosts)
break
except Exception as e:
exception(e)
time.sleep(30)
def receive_new_hosts(self):
debug("receive_new_hosts()")
data = self.__receive_new_hosts()
if data is None:
return None
try:
timestamp = data['timestamp']
self.set_sync_timestamp(timestamp)
hosts = data['hosts']
info("received %d new host%s", len(hosts), get_plural(hosts))
debug("hosts added %s", hosts)
self.__save_received_hosts(hosts, timestamp)
return hosts
except Exception as e:
exception(e)
return None
def __receive_new_hosts(self):
debug("__receive_new_hosts()")
if not self.__connected and not self.xmlrpc_connect():
error("Could not initiate xmlrpc connection")
return
timestamp = self.get_sync_timestamp()
sync_dl_threshold = self.__prefs.get("SYNC_DOWNLOAD_THRESHOLD")
sync_dl_resiliency = self.__prefs.get("SYNC_DOWNLOAD_RESILIENCY")
data = None
for i in range(0, 3):
try:
data = self.__server.get_new_hosts(
timestamp,
sync_dl_threshold,
self.__hosts_added,
sync_dl_resiliency
)
break
except Exception as e:
exception(e)
pass
time.sleep(30)
if data is None:
error('Unable to retrieve data from the sync server')
return data
def __save_received_hosts(self, hosts, timestamp):
debug('__save_received_hosts()')
try:
timestr = time.ctime(float(timestamp))
with open(os.path.join(self.__work_dir, SYNC_RECEIVED_HOSTS), "a") as fp:
for host in hosts:
fp.write("%s:%s\n" % (host, timestr))
except IOError as e:
error(e)
return
finally:
fp.close()
| gpl-2.0 | 7,353,463,990,728,281,000 | 31.165957 | 102 | 0.513957 | false |
susi/angya | widgets/nav.py | 1 | 1162 | """This module defines the left navigation and its buttons."""
import flask
from google.appengine.api import users
class Navigation(object):
"""The Navigation returns information to render the nav menu buttons."""
def __init__(self, app):
self.app = app
def render(self):
"""Returns a json map of the buttons for the navigation bar."""
buttons = [
{'name': 'close',
'url': 'javascript:closeNavigation()',
'hint': 'close navigation'},
{'name': 'list',
'url': 'javascript:tripmanager.listTrips()',
'hint': 'my trips list'},
{'name': 'edit',
'url': 'javascript:tripmanager.createTrip()',
'hint': 'create trip'},
{'name': 'marker',
'url': 'javascript:tripmanager.addPlace()',
'hint': 'add place to trip'},
{'name': 'map-type',
'url': 'javascript:swapMapType()',
'hint': 'change map type'},
]
widget = {
'name': 'left-nav',
'buttons': buttons,
'js': flask.url_for('static', filename='js/widgets/nav.js'),
'css': flask.url_for('static', filename='css/widgets/nav.css')
}
return flask.jsonify(**widget)
| apache-2.0 | 3,088,551,322,639,043,600 | 28.794872 | 74 | 0.583477 | false |
sbenthall/bigbang | tests/bigbang_tests.py | 1 | 7419 | from nose.tools import *
from testfixtures import LogCapture
from bigbang import repo_loader
import bigbang.archive as archive
import bigbang.mailman as mailman
import bigbang.parse as parse
import bigbang.process as process
import bigbang.utils as utils
import mailbox
import os
import networkx as nx
import pandas as pd
from config.config import CONFIG
test_txt = ""
TEMP_DIR = os.path.join(CONFIG.test_data_path, "tmp")
def test_git_dependancy():
repo = repo_loader.get_repo("https://github.com/sbenthall/bigbang.git", in_type = "remote")
def setup():
try:
os.mkdir(TEMP_DIR)
except OSError: # Python 2.7-specific, alas; FileExistsError in py3
pass # temporary directory already exists, that's cool
def teardown():
# remove all files in the temporary files directory, as cleanup
temp_files = os.listdir(TEMP_DIR)
for f in temp_files:
os.remove(os.path.join(TEMP_DIR, f))
def test_split_references():
refs = " <[email protected]>\n\t<[email protected]>"
split = parse.split_references(refs)
assert len(split) == 2, split
def test_mailman_chain():
name = "bigbang-dev-test.txt"
#archive loaded from mbox
arx = archive.Archive(name,archive_dir="tests/data",mbox=True)
arx.save("test.csv")
#archive loaded from stored csv
arx2 = archive.load("test.csv")
print arx.data.dtypes
print arx.data.shape
assert arx.data.shape == arx2.data.shape, \
"Original and restored archives are different shapes"
assert (arx2.data.index == arx.data.index).all(), \
"Original and restored archives have nonidentical indices"
assert [t.get_num_messages() for t in arx.get_threads()] == [3,1,2], \
"Thread message count in mbox archive is off"
assert [t.get_num_messages() for t in arx2.get_threads()] == [3,1,2], \
"Thread message count in restored archive is off"
# smoke test entity resolution
arx2.resolve_entities()
os.remove("test.csv")
def test_clean_message():
name = "2001-November.txt"
arx = archive.Archive(name,archive_dir="tests/data",mbox=True)
body = arx.data['Body'][ '<[email protected]>']
assert "But seemingly it is even stranger than this." in body, \
"Selected wrong message"
assert "Is it a problem of lapack3.0 of of" in body, \
"Quoted text is not in uncleaned message"
assert "Is it a problem of lapack3.0 of of" not in utils.clean_message(body), \
"Quoted text is in cleaned message"
def test_from_header_distance():
a = 'Fernando.Perez at colorado.edu (Fernando.Perez at colorado.edu)'
b = 'Fernando.Perez at colorado.edu ([email protected])'
assert process.from_header_distance(a,b) == 0, \
"from_header_distance computing incorrect value"
a = ''
b = ''
assert True, \
"from_header_distance computing incorrect value"
def test_email_entity_resolution():
name = "2001-November.txt"
arx = archive.Archive(name,archive_dir="tests/data",mbox=True)
e = process.resolve_sender_entities(arx.get_activity(resolved=False))
eact = utils.repartition_dataframe(arx.get_activity(),e)
assert True, "email entity resolution crashed"
def test_labeled_blockmodel():
g = nx.DiGraph()
g.add_edge(0,1)
g.add_edge(0,2)
g.add_edge(0,3)
g.add_edge(0,4)
p = {'B': [1,2,3,4], 'A': [0]}
bg = utils.labeled_blockmodel(g,p)
assert list(bg.edges(data=True))[0][2]['weight'] == 4.0, \
"Incorrect edge weight in labeled blockmodel"
assert list(bg.edges()) == [('A','B')], \
"Incorrected edges in labeled blockmodel"
def test_valid_urls():
test_urls_path = os.path.join(CONFIG.test_data_path, 'urls-test-file.txt')
with LogCapture() as l:
urls = mailman.urls_to_collect(test_urls_path)
assert "#ignored" not in urls, "failed to ignore a comment line"
assert "http://www.example.com/1" in urls, "failed to find valid url"
assert "http://www.example.com/2/" in urls, "failed to find valid url, whitespace strip issue"
assert "https://www.example.com/3/" in urls, "failed to find valid url, whitespace strip issue"
assert "invalid.com" not in urls, "accepted invalid url"
assert len(l.actual()) == 2, "wrong number of log entries"
for (fromwhere, level, msg) in l.actual():
assert level == "WARNING", "logged something that wasn't a warning"
assert len(urls) == 3, "wrong number of urls parsed from file"
def test_empty_list_compute_activity_issue_246():
test_df_csv_path = os.path.join(CONFIG.test_data_path, 'empty-archive-df.csv')
df = pd.read_csv(test_df_csv_path)
with assert_raises(mailman.MissingDataException):
empty_archive = archive.Archive(df)
activity = empty_archive.get_activity()
def test_mailman_normalizer():
browse_url = 'https://mailarchive.ietf.org/arch/browse/ietf/'
search_url = 'https://mailarchive.ietf.org/arch/search/?email_list=ietf'
random_url = 'http://example.com'
better_url = 'https://www.ietf.org/mail-archive/text/ietf/'
assert mailman.normalize_archives_url(browse_url) == better_url, "failed to normalize"
assert mailman.normalize_archives_url(search_url) == better_url, "failed to normalize"
assert mailman.normalize_archives_url(random_url) == random_url, "should not have changed other url"
def test_mailman_list_name():
ietf_archive_url = 'https://www.ietf.org/mail-archive/text/ietf/'
w3c_archive_url = 'https://lists.w3.org/Archives/Public/public-privacy/'
random_url = 'http://example.com'
assert mailman.get_list_name(ietf_archive_url) == 'ietf', "failed to grab ietf list name"
assert mailman.get_list_name(w3c_archive_url) == 'public-privacy', "failed to grab w3c list name"
assert mailman.get_list_name(random_url) == random_url, "should not have changed other url"
def test_activity_summary():
list_url = 'https://lists.w3.org/Archives/Public/test-activity-summary/'
activity_frame = mailman.open_activity_summary(list_url, archive_dir=CONFIG.test_data_path)
assert str(type(activity_frame)) == "<class 'pandas.core.frame.DataFrame'>", "not a DataFrame?"
assert len(activity_frame.columns) == 1, "activity summary should have one column"
def test_provenance():
test_list_name = 'test-list-name'
test_list_url = 'https://example.com/test-list-url/'
test_notes = 'Test notes.'
mailman.populate_provenance(TEMP_DIR, list_name=test_list_name, list_url=test_list_url, notes=test_notes)
assert os.path.exists(os.path.join(TEMP_DIR, mailman.PROVENANCE_FILENAME)), "provenance file should have been created"
provenance = mailman.access_provenance(TEMP_DIR)
assert provenance != None, "provenance should be something"
assert provenance['list']['list_name'] == test_list_name, "list name should be in the provenance"
assert provenance['list']['list_url'] == test_list_url, "list url should be in the provenance"
assert provenance['notes'] == test_notes, "notes should be in the provenance"
provenance['notes'] = 'modified provenance'
mailman.update_provenance(TEMP_DIR, provenance)
provenance_next = mailman.access_provenance(TEMP_DIR)
assert provenance_next['notes'] == 'modified provenance', "confirm modified provenance" | agpl-3.0 | -7,112,561,389,532,358,000 | 36.664975 | 122 | 0.684189 | false |
CodeReclaimers/neat-python | examples/xor/visualize.py | 1 | 5915 | from __future__ import print_function
import copy
import warnings
import graphviz
import matplotlib.pyplot as plt
import numpy as np
def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
""" Plots the population's average and best fitness. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
generation = range(len(statistics.most_fit_genomes))
best_fitness = [c.fitness for c in statistics.most_fit_genomes]
avg_fitness = np.array(statistics.get_fitness_mean())
stdev_fitness = np.array(statistics.get_fitness_stdev())
plt.plot(generation, avg_fitness, 'b-', label="average")
plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
plt.plot(generation, best_fitness, 'r-', label="best")
plt.title("Population's average and best fitness")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.grid()
plt.legend(loc="best")
if ylog:
plt.gca().set_yscale('symlog')
plt.savefig(filename)
if view:
plt.show()
plt.close()
def plot_spikes(spikes, view=False, filename=None, title=None):
""" Plots the trains for a single spiking neuron. """
t_values = [t for t, I, v, u, f in spikes]
v_values = [v for t, I, v, u, f in spikes]
u_values = [u for t, I, v, u, f in spikes]
I_values = [I for t, I, v, u, f in spikes]
f_values = [f for t, I, v, u, f in spikes]
fig = plt.figure()
plt.subplot(4, 1, 1)
plt.ylabel("Potential (mv)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, v_values, "g-")
if title is None:
plt.title("Izhikevich's spiking neuron model")
else:
plt.title("Izhikevich's spiking neuron model ({0!s})".format(title))
plt.subplot(4, 1, 2)
plt.ylabel("Fired")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, f_values, "r-")
plt.subplot(4, 1, 3)
plt.ylabel("Recovery (u)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, u_values, "r-")
plt.subplot(4, 1, 4)
plt.ylabel("Current (I)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, I_values, "r-o")
if filename is not None:
plt.savefig(filename)
if view:
plt.show()
plt.close()
fig = None
return fig
def plot_species(statistics, view=False, filename='speciation.svg'):
""" Visualizes speciation throughout evolution. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
species_sizes = statistics.get_species_sizes()
num_generations = len(species_sizes)
curves = np.array(species_sizes).T
fig, ax = plt.subplots()
ax.stackplot(range(num_generations), *curves)
plt.title("Speciation")
plt.ylabel("Size per Species")
plt.xlabel("Generations")
plt.savefig(filename)
if view:
plt.show()
plt.close()
def draw_net(config, genome, view=False, filename=None, node_names=None, show_disabled=True, prune_unused=False,
node_colors=None, fmt='svg'):
""" Receives a genome and draws a neural network with arbitrary topology. """
# Attributes for network nodes.
if graphviz is None:
warnings.warn("This display is not available due to a missing optional dependency (graphviz)")
return
if node_names is None:
node_names = {}
assert type(node_names) is dict
if node_colors is None:
node_colors = {}
assert type(node_colors) is dict
node_attrs = {
'shape': 'circle',
'fontsize': '9',
'height': '0.2',
'width': '0.2'}
dot = graphviz.Digraph(format=fmt, node_attr=node_attrs)
inputs = set()
for k in config.genome_config.input_keys:
inputs.add(k)
name = node_names.get(k, str(k))
input_attrs = {'style': 'filled', 'shape': 'box', 'fillcolor': node_colors.get(k, 'lightgray')}
dot.node(name, _attributes=input_attrs)
outputs = set()
for k in config.genome_config.output_keys:
outputs.add(k)
name = node_names.get(k, str(k))
node_attrs = {'style': 'filled', 'fillcolor': node_colors.get(k, 'lightblue')}
dot.node(name, _attributes=node_attrs)
if prune_unused:
connections = set()
for cg in genome.connections.values():
if cg.enabled or show_disabled:
connections.add((cg.in_node_id, cg.out_node_id))
used_nodes = copy.copy(outputs)
pending = copy.copy(outputs)
while pending:
new_pending = set()
for a, b in connections:
if b in pending and a not in used_nodes:
new_pending.add(a)
used_nodes.add(a)
pending = new_pending
else:
used_nodes = set(genome.nodes.keys())
for n in used_nodes:
if n in inputs or n in outputs:
continue
attrs = {'style': 'filled',
'fillcolor': node_colors.get(n, 'white')}
dot.node(str(n), _attributes=attrs)
for cg in genome.connections.values():
if cg.enabled or show_disabled:
#if cg.input not in used_nodes or cg.output not in used_nodes:
# continue
input, output = cg.key
a = node_names.get(input, str(input))
b = node_names.get(output, str(output))
style = 'solid' if cg.enabled else 'dotted'
color = 'green' if cg.weight > 0 else 'red'
width = str(0.1 + abs(cg.weight / 5.0))
dot.edge(a, b, _attributes={'style': style, 'color': color, 'penwidth': width})
dot.render(filename, view=view)
return dot
| bsd-3-clause | 8,867,354,121,167,928,000 | 29.025381 | 112 | 0.597464 | false |
jdgwartney/boundary-einstein-python | bootstrap.py | 1 | 2028 | #!/usr/bin/env python
import os
import shutil
import sys
import subprocess
import tarfile
import urllib
class Bootstrap:
def __init__(self,
version="12.0.4",
base='http://pypi.python.org/packages/source/v/virtualenv',
python="python2",
env="py",
requirements="requirements.txt"):
self.version = version
self.base = base
self.python = python
self.env = env
self.dirname = 'virtualenv-' + self.version
self.tgz_file = self.dirname + '.tar.gz'
self.venv_url = self.base + '/' + self.tgz_file
self.requirements=requirements
def shellcmd(self,cmd,echo=False):
""" Run 'cmd' in the shell and return its standard out.
"""
if echo: print '[cmd] {0}'.format(cmd)
out = subprocess.check_output(cmd,stderr=sys.stderr,shell=True)
if echo: print out
return out
def download(self):
""" Fetch virtualenv from PyPI
"""
urllib.urlretrieve(self.venv_url,self.tgz_file)
def extract(self):
""" Untar
"""
tar = tarfile.open(self.tgz_file,"r:gz")
tar.extractall()
def create(self):
""" Create the initial env
"""
self.shellcmd('{0} {1}/virtualenv.py {2}'.format(self.python,self.dirname,self.env))
def install(self):
"""Install the virtualenv package itself into the initial env
"""
self.shellcmd('{0}/bin/pip install {1}'.format(self.env,self.tgz_file))
def install_libs(self):
"""Install the virtualenv package itself into the initial env
"""
self.shellcmd('{0}/bin/pip install -r {1}'.format(self.env,self.requirements))
def cleanup(self):
""" Cleanup
"""
os.remove(self.tgz_file)
shutil.rmtree(self.dirname)
def setup(self):
"""Bootraps a python environment
"""
self.download()
self.extract()
self.create()
self.install()
self.cleanup()
if os.path.isfile(self.requirements):
self.install_libs()
if __name__ == "__main__":
bootstrap = Bootstrap()
bootstrap.setup()
| apache-2.0 | -8,298,653,270,635,625,000 | 24.670886 | 88 | 0.622288 | false |
Praxyk/Praxyk-DevOps | server/unittest/unit_test.py | 1 | 2481 | #!/bin/env python
import _fix_path_
import sys
import datetime
class UnitTest :
def maintest(self,name, desc, f) :
return self.logger.log_event(self.logclient, 'UNIT TEST', ("s" if f else "f"),
['Test Name', 'Description'],
(str(name), desc) )
# this is used for asserting actions are true that don't constiitute the main prupose of
# a test, but still needs to be logged and verified. Ex - a test that tries to update items
# in a database might need to login to the DB first, they would pass the result of the login
# attempt to this function, but the result of the updates to the maintest() function
def subtest(self,name, desc, f) :
return self.logger.log_event(self.logclient, 'SUB-TEST', ("s" if f else "f"),
['Test Name', 'Description'],
(str(name), desc) )
def logteststart(self, name, info="") :
return self.logger.log_event(self.logclient, 'UNIT TEST', 'a', ['Test Name', 'Info'], (name, info))
def loginfo(self, name, info) :
return self.logger.log_event(self.logclient, 'TEST-INFO', "i", ['Message'], str(info))
def loghead(self) :
title = self.title + ' UNIT TEST START '
exchar = '-'
logstr = '\n' + 30*exchar + title + 30*exchar + '\n'
logstr += '''Start Time : ''' + str(datetime.datetime.now()).split(' ')[1] + '\n'
for data in self.head_data :
logstr += 3*exchar+' [%s] \n' % data
logstr += 30*exchar + len(title)*exchar + 30*exchar + '\n'
self.logger.logblock(self.logclient, logstr)
def logtail(self, result) :
title = self.title + ' UNIT TEST FINISH '
exchar = '-'
logstr = '\n' + 30*exchar + title + 30*exchar + '\n'
logstr += 'End Time : ' + str(datetime.datetime.now()).split(' ')[1] + '\n'
logstr += 'Result : ' + str(result) + '\n'
for data in self.tail_data :
logstr += 3*exchar+' [%s] \n' % data
logstr += 30*exchar + len(title)*exchar + 30*exchar + '\n'
self.logger.logblock(self.logclient, logstr)
def __init__(self, testargs) : #logftest, testtbl, schema) :
self.passed = False
self.head_data
self.tail_data
self.title
self.logclient = testargs['logclient']
self.logger = testargs['logutil']
self.loghead()
| gpl-2.0 | 1,886,044,161,479,319,300 | 43.303571 | 107 | 0.554212 | false |
yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/storage/tests/latest/test_storage_account_scenarios.py | 1 | 114261 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import time
import unittest
from azure.cli.testsdk import (ScenarioTest, LocalContextScenarioTest, JMESPathCheck, ResourceGroupPreparer,
StorageAccountPreparer, api_version_constraint, live_only, LiveScenarioTest,
record_only)
from azure.cli.testsdk.decorators import serial_test
from azure.cli.core.profiles import ResourceType
from ..storage_test_util import StorageScenarioMixin
from knack.util import CLIError
from datetime import datetime, timedelta
from azure_devtools.scenario_tests import AllowLargeResponse
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2016-12-01')
class StorageAccountTests(StorageScenarioMixin, ScenarioTest):
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2017-06-01')
@ResourceGroupPreparer(name_prefix='cli_test_storage_service_endpoints')
def test_storage_account_service_endpoints(self, resource_group):
kwargs = {
'rg': resource_group,
'acc': self.create_random_name(prefix='cli', length=24),
'vnet': 'vnet1',
'subnet': 'subnet1'
}
self.cmd('storage account create -g {rg} -n {acc} --bypass Metrics --default-action Deny --https-only'.format(**kwargs),
checks=[
JMESPathCheck('networkRuleSet.bypass', 'Metrics'),
JMESPathCheck('networkRuleSet.defaultAction', 'Deny')])
self.cmd('storage account update -g {rg} -n {acc} --bypass Logging --default-action Allow'.format(**kwargs),
checks=[
JMESPathCheck('networkRuleSet.bypass', 'Logging'),
JMESPathCheck('networkRuleSet.defaultAction', 'Allow')])
self.cmd('storage account update -g {rg} -n {acc} --set networkRuleSet.default_action=deny'.format(**kwargs),
checks=[
JMESPathCheck('networkRuleSet.bypass', 'Logging'),
JMESPathCheck('networkRuleSet.defaultAction', 'Deny')])
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet}'.format(**kwargs))
self.cmd(
'network vnet subnet update -g {rg} --vnet-name {vnet} -n {subnet} --service-endpoints Microsoft.Storage'.format(
**kwargs))
self.cmd('storage account network-rule add -g {rg} --account-name {acc} --ip-address 25.1.2.3'.format(**kwargs))
# test network-rule add idempotent
self.cmd('storage account network-rule add -g {rg} --account-name {acc} --ip-address 25.1.2.3'.format(**kwargs))
self.cmd(
'storage account network-rule add -g {rg} --account-name {acc} --ip-address 25.2.0.0/24'.format(**kwargs))
self.cmd(
'storage account network-rule add -g {rg} --account-name {acc} --vnet-name {vnet} --subnet {subnet}'.format(
**kwargs))
self.cmd('storage account network-rule list -g {rg} --account-name {acc}'.format(**kwargs), checks=[
JMESPathCheck('length(ipRules)', 2),
JMESPathCheck('length(virtualNetworkRules)', 1)
])
# test network-rule add idempotent
self.cmd(
'storage account network-rule add -g {rg} --account-name {acc} --vnet-name {vnet} --subnet {subnet}'.format(
**kwargs))
self.cmd('storage account network-rule list -g {rg} --account-name {acc}'.format(**kwargs), checks=[
JMESPathCheck('length(ipRules)', 2),
JMESPathCheck('length(virtualNetworkRules)', 1)
])
self.cmd(
'storage account network-rule remove -g {rg} --account-name {acc} --ip-address 25.1.2.3'.format(**kwargs))
self.cmd(
'storage account network-rule remove -g {rg} --account-name {acc} --vnet-name {vnet} --subnet {subnet}'.format(
**kwargs))
self.cmd('storage account network-rule list -g {rg} --account-name {acc}'.format(**kwargs), checks=[
JMESPathCheck('length(ipRules)', 1),
JMESPathCheck('length(virtualNetworkRules)', 0)
])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2020-08-01-preview')
@ResourceGroupPreparer(name_prefix='cli_test_storage_service_endpoints')
@StorageAccountPreparer()
def test_storage_account_resource_access_rules(self, resource_group, storage_account):
self.kwargs = {
'rg': resource_group,
'sa': storage_account,
'rid1': "/subscriptions/a7e99807-abbf-4642-bdec-2c809a96a8bc/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1",
'rid2': "/subscriptions/a7e99807-abbf-4642-bdec-2c809a96a8bc/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2",
'rid3': "/subscriptions/a7e99807-abbf-4642-bdec-2c809a96a8bc/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3",
'tid1': "72f988bf-86f1-41af-91ab-2d7cd011db47",
'tid2': "72f988bf-86f1-41af-91ab-2d7cd011db47"
}
self.cmd(
'storage account network-rule add -g {rg} --account-name {sa} --resource-id {rid1} --tenant-id {tid1}')
self.cmd('storage account network-rule list -g {rg} --account-name {sa}', checks=[
JMESPathCheck('length(resourceAccessRules)', 1)
])
# test network-rule add idempotent
self.cmd(
'storage account network-rule add -g {rg} --account-name {sa} --resource-id {rid1} --tenant-id {tid1}')
self.cmd('storage account network-rule list -g {rg} --account-name {sa}', checks=[
JMESPathCheck('length(resourceAccessRules)', 1)
])
# test network-rule add more
self.cmd(
'storage account network-rule add -g {rg} --account-name {sa} --resource-id {rid2} --tenant-id {tid1}')
self.cmd('storage account network-rule list -g {rg} --account-name {sa}', checks=[
JMESPathCheck('length(resourceAccessRules)', 2)
])
self.cmd(
'storage account network-rule add -g {rg} --account-name {sa} --resource-id {rid3} --tenant-id {tid2}')
self.cmd('storage account network-rule list -g {rg} --account-name {sa}', checks=[
JMESPathCheck('length(resourceAccessRules)', 3)
])
# remove network-rule
self.cmd(
'storage account network-rule remove -g {rg} --account-name {sa} --resource-id {rid1} --tenant-id {tid1}')
self.cmd('storage account network-rule list -g {rg} --account-name {sa}', checks=[
JMESPathCheck('length(resourceAccessRules)', 2)
])
self.cmd(
'storage account network-rule remove -g {rg} --account-name {sa} --resource-id {rid2} --tenant-id {tid2}')
self.cmd('storage account network-rule list -g {rg} --account-name {sa}', checks=[
JMESPathCheck('length(resourceAccessRules)', 1)
])
@serial_test()
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2017-06-01')
@ResourceGroupPreparer(location='southcentralus')
def test_create_storage_account_with_assigned_identity(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
cmd = 'az storage account create -n {} -g {} --sku Standard_LRS --assign-identity'.format(name, resource_group)
result = self.cmd(cmd).get_output_in_json()
self.assertIn('identity', result)
self.assertTrue(result['identity']['principalId'])
self.assertTrue(result['identity']['tenantId'])
@serial_test()
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2017-06-01')
@ResourceGroupPreparer(location='southcentralus')
def test_update_storage_account_with_assigned_identity(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} --sku Standard_LRS'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('identity', None)])
update_cmd = 'az storage account update -n {} -g {} --assign-identity'.format(name, resource_group)
result = self.cmd(update_cmd).get_output_in_json()
self.assertIn('identity', result)
self.assertTrue(result['identity']['principalId'])
self.assertTrue(result['identity']['tenantId'])
@AllowLargeResponse()
@ResourceGroupPreparer(parameter_name_for_location='location')
def test_create_storage_account(self, resource_group, location):
name = self.create_random_name(prefix='cli', length=24)
self.cmd('az storage account create -n {} -g {} --sku {} -l {}'.format(
name, resource_group, 'Standard_LRS', location))
self.cmd('storage account check-name --name {}'.format(name), checks=[
JMESPathCheck('nameAvailable', False),
JMESPathCheck('reason', 'AlreadyExists')
])
self.cmd('storage account list -g {}'.format(resource_group), checks=[
JMESPathCheck('[0].location', 'westus'),
JMESPathCheck('[0].sku.name', 'Standard_LRS'),
JMESPathCheck('[0].resourceGroup', resource_group)
])
self.cmd('az storage account show -n {} -g {}'.format(name, resource_group), checks=[
JMESPathCheck('name', name),
JMESPathCheck('location', location),
JMESPathCheck('sku.name', 'Standard_LRS'),
JMESPathCheck('kind', 'StorageV2')
])
self.cmd('az storage account show -n {}'.format(name), checks=[
JMESPathCheck('name', name),
JMESPathCheck('location', location),
JMESPathCheck('sku.name', 'Standard_LRS'),
JMESPathCheck('kind', 'StorageV2')
])
self.cmd('storage account show-connection-string -g {} -n {} --protocol http'.format(
resource_group, name), checks=[
JMESPathCheck("contains(connectionString, 'https')", False),
JMESPathCheck("contains(connectionString, '{}')".format(name), True)])
self.cmd('storage account update -g {} -n {} --tags foo=bar cat'
.format(resource_group, name),
checks=JMESPathCheck('tags', {'cat': '', 'foo': 'bar'}))
self.cmd('storage account update -g {} -n {} --sku Standard_GRS --tags'
.format(resource_group, name),
checks=[JMESPathCheck('tags', {}),
JMESPathCheck('sku.name', 'Standard_GRS')])
self.cmd('storage account update -g {} -n {} --set tags.test=success'
.format(resource_group, name),
checks=JMESPathCheck('tags', {'test': 'success'}))
self.cmd('storage account delete -g {} -n {} --yes'.format(resource_group, name))
self.cmd('storage account check-name --name {}'.format(name),
checks=JMESPathCheck('nameAvailable', True))
large_file_name = self.create_random_name(prefix='cli', length=24)
self.cmd('storage account create -g {} -n {} --sku {} --enable-large-file-share'.format(
resource_group, large_file_name, 'Standard_LRS'))
self.cmd('az storage account show -n {} -g {}'.format(large_file_name, resource_group), checks=[
JMESPathCheck('name', large_file_name),
JMESPathCheck('sku.name', 'Standard_LRS'),
JMESPathCheck('largeFileSharesState', 'Enabled')
])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(location='eastus2euap')
def test_create_storage_account_with_double_encryption(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
self.cmd('az storage account create -n {} -g {} --require-infrastructure-encryption'.format(
name, resource_group), checks=[
JMESPathCheck('name', name),
JMESPathCheck('encryption.requireInfrastructureEncryption', True)
])
self.cmd('az storage account show -n {} -g {}'.format(name, resource_group), checks=[
JMESPathCheck('name', name),
JMESPathCheck('encryption.requireInfrastructureEncryption', True)
])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2017-10-01')
@ResourceGroupPreparer(parameter_name_for_location='location', location='southcentralus')
def test_create_storage_account_v2(self, resource_group, location):
self.kwargs.update({
'name': self.create_random_name(prefix='cli', length=24),
'loc': location
})
self.cmd('storage account create -n {name} -g {rg} -l {loc} --kind StorageV2',
checks=[JMESPathCheck('kind', 'StorageV2')])
self.cmd('storage account check-name --name {name}', checks=[
JMESPathCheck('nameAvailable', False),
JMESPathCheck('reason', 'AlreadyExists')
])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2016-01-01')
@ResourceGroupPreparer(location='southcentralus')
def test_storage_create_default_sku(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {}'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('sku.name', 'Standard_RAGRS')])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2017-10-01')
@ResourceGroupPreparer(location='southcentralus')
def test_storage_create_default_kind(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {}'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('kind', 'StorageV2')])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2018-02-01')
@ResourceGroupPreparer(location='southcentralus', name_prefix='cli_storage_account_hns')
def test_storage_create_with_hns(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} --kind StorageV2 --hns'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('isHnsEnabled', True)])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2018-02-01')
@ResourceGroupPreparer(location='southcentralus', name_prefix='cli_storage_account_hns')
def test_storage_create_with_hns_true(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} --kind StorageV2 --hns true'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('isHnsEnabled', True)])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2018-02-01')
@ResourceGroupPreparer(location='southcentralus', name_prefix='cli_storage_account_hns')
def test_storage_create_with_hns_false(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} --kind StorageV2 --hns false'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('isHnsEnabled', False)])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(location='eastus2euap', name_prefix='cli_storage_account_encryption')
def test_storage_create_with_encryption_key_type(self, resource_group):
name = self.create_random_name(prefix='cliencryption', length=24)
create_cmd = 'az storage account create -n {} -g {} --kind StorageV2 -t Account -q Service'.format(
name, resource_group)
self.cmd(create_cmd, checks=[
JMESPathCheck('encryption.services.queue', None),
JMESPathCheck('encryption.services.table.enabled', True),
JMESPathCheck('encryption.services.table.keyType', 'Account'),
])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer(location='eastus', name_prefix='cli_storage_account')
def test_storage_create_with_public_access(self, resource_group):
name1 = self.create_random_name(prefix='cli', length=24)
name2 = self.create_random_name(prefix='cli', length=24)
name3 = self.create_random_name(prefix='cli', length=24)
self.cmd('az storage account create -n {} -g {} --allow-blob-public-access'.format(name1, resource_group),
checks=[JMESPathCheck('allowBlobPublicAccess', True)])
self.cmd('az storage account create -n {} -g {} --allow-blob-public-access true'.format(name2, resource_group),
checks=[JMESPathCheck('allowBlobPublicAccess', True)])
self.cmd('az storage account create -n {} -g {} --allow-blob-public-access false'.format(name3, resource_group),
checks=[JMESPathCheck('allowBlobPublicAccess', False)])
@AllowLargeResponse()
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer(location='eastus', name_prefix='cli_storage_account')
@StorageAccountPreparer(name_prefix='blob')
def test_storage_update_with_public_access(self, storage_account):
self.cmd('az storage account update -n {} --allow-blob-public-access'.format(storage_account),
checks=[JMESPathCheck('allowBlobPublicAccess', True)])
self.cmd('az storage account update -n {} --allow-blob-public-access true'.format(storage_account),
checks=[JMESPathCheck('allowBlobPublicAccess', True)])
self.cmd('az storage account update -n {} --allow-blob-public-access false'.format(storage_account),
checks=[JMESPathCheck('allowBlobPublicAccess', False)])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer(location='eastus', name_prefix='cli_storage_account')
def test_storage_create_with_min_tls(self, resource_group):
name1 = self.create_random_name(prefix='cli', length=24)
name2 = self.create_random_name(prefix='cli', length=24)
name3 = self.create_random_name(prefix='cli', length=24)
name4 = self.create_random_name(prefix='cli', length=24)
self.cmd('az storage account create -n {} -g {}'.format(name1, resource_group),
checks=[JMESPathCheck('minimumTlsVersion', None)])
self.cmd('az storage account create -n {} -g {} --min-tls-version TLS1_0'.format(name2, resource_group),
checks=[JMESPathCheck('minimumTlsVersion', 'TLS1_0')])
self.cmd('az storage account create -n {} -g {} --min-tls-version TLS1_1'.format(name3, resource_group),
checks=[JMESPathCheck('minimumTlsVersion', 'TLS1_1')])
self.cmd('az storage account create -n {} -g {} --min-tls-version TLS1_2'.format(name4, resource_group),
checks=[JMESPathCheck('minimumTlsVersion', 'TLS1_2')])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer(location='eastus', name_prefix='cli_storage_account')
@StorageAccountPreparer(name_prefix='tls')
def test_storage_update_with_min_tls(self, storage_account, resource_group):
self.cmd('az storage account show -n {} -g {}'.format(storage_account, resource_group),
checks=[JMESPathCheck('minimumTlsVersion', None)])
self.cmd('az storage account update -n {} -g {} --min-tls-version TLS1_0'.format(
storage_account, resource_group), checks=[JMESPathCheck('minimumTlsVersion', 'TLS1_0')])
self.cmd('az storage account update -n {} -g {} --min-tls-version TLS1_1'.format(
storage_account, resource_group), checks=[JMESPathCheck('minimumTlsVersion', 'TLS1_1')])
self.cmd('az storage account update -n {} -g {} --min-tls-version TLS1_2'.format(
storage_account, resource_group), checks=[JMESPathCheck('minimumTlsVersion', 'TLS1_2')])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(location='eastus', name_prefix='cli_storage_account_routing')
def test_storage_account_with_routing_preference(self, resource_group):
# Create Storage Account with Publish MicrosoftEndpoint, choose MicrosoftRouting
name1 = self.create_random_name(prefix='clirouting', length=24)
create_cmd1 = 'az storage account create -n {} -g {} --routing-choice MicrosoftRouting --publish-microsoft-endpoint true'.format(
name1, resource_group)
self.cmd(create_cmd1, checks=[
JMESPathCheck('routingPreference.publishInternetEndpoints', None),
JMESPathCheck('routingPreference.publishMicrosoftEndpoints', True),
JMESPathCheck('routingPreference.routingChoice', 'MicrosoftRouting'),
])
# Update Storage Account with Publish InternetEndpoint
update_cmd1 = 'az storage account update -n {} -g {} --routing-choice InternetRouting --publish-microsoft-endpoint false --publish-internet-endpoint true'.format(
name1, resource_group)
self.cmd(update_cmd1, checks=[
JMESPathCheck('routingPreference.publishInternetEndpoints', True),
JMESPathCheck('routingPreference.publishMicrosoftEndpoints', False),
JMESPathCheck('routingPreference.routingChoice', 'InternetRouting'),
])
# Create Storage Account with Publish InternetEndpoint, choose InternetRouting
name2 = self.create_random_name(prefix='clirouting', length=24)
create_cmd2 = 'az storage account create -n {} -g {} --routing-choice InternetRouting --publish-internet-endpoints true --publish-microsoft-endpoints false'.format(
name2, resource_group)
self.cmd(create_cmd2, checks=[
JMESPathCheck('routingPreference.publishInternetEndpoints', True),
JMESPathCheck('routingPreference.publishMicrosoftEndpoints', False),
JMESPathCheck('routingPreference.routingChoice', 'InternetRouting'),
])
# Update Storage Account with MicrosoftRouting routing choice
update_cmd2 = 'az storage account update -n {} -g {} --routing-choice MicrosoftRouting'\
.format(name2, resource_group)
self.cmd(update_cmd2, checks=[
JMESPathCheck('routingPreference.routingChoice', 'MicrosoftRouting'),
])
# Create without any routing preference
name3 = self.create_random_name(prefix='clirouting', length=24)
create_cmd3 = 'az storage account create -n {} -g {}'.format(
name3, resource_group)
self.cmd(create_cmd3, checks=[
JMESPathCheck('routingPreference', None),
])
# Update Storage Account with Publish MicrosoftEndpoint, choose MicrosoftRouting
update_cmd3 = 'az storage account update -n {} -g {} --routing-choice MicrosoftRouting --publish-internet-endpoints false --publish-microsoft-endpoints true'\
.format(name3, resource_group)
self.cmd(update_cmd3, checks=[
JMESPathCheck('routingPreference.publishInternetEndpoints', False),
JMESPathCheck('routingPreference.publishMicrosoftEndpoints', True),
JMESPathCheck('routingPreference.routingChoice', 'MicrosoftRouting'),
])
@AllowLargeResponse()
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer(location='eastus', name_prefix='cli_storage_account')
def test_storage_account_with_shared_key_access(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
self.cmd('az storage account create -n {} -g {} --allow-shared-key-access'.format(name, resource_group),
checks=[JMESPathCheck('allowSharedKeyAccess', True)])
self.cmd('az storage account create -n {} -g {} --allow-shared-key-access false'.format(name, resource_group),
checks=[JMESPathCheck('allowSharedKeyAccess', False)])
self.cmd('az storage account create -n {} -g {} --allow-shared-key-access true'.format(name, resource_group),
checks=[JMESPathCheck('allowSharedKeyAccess', True)])
self.cmd('az storage account update -n {} --allow-shared-key-access false'.format(name),
checks=[JMESPathCheck('allowSharedKeyAccess', False)])
self.cmd('az storage account update -n {} --allow-shared-key-access true'.format(name),
checks=[JMESPathCheck('allowSharedKeyAccess', True)])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2021-02-01')
@ResourceGroupPreparer(location='eastus', name_prefix='cli_storage_account')
def test_storage_account_with_key_and_sas_policy(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
self.cmd('az storage account create -n {} -g {}'.format(name, resource_group),
checks=[JMESPathCheck('keyPolicy', None),
JMESPathCheck('sasPolicy', None)])
self.cmd('az storage account create -n {} -g {} --key-exp-days 3'.format(name, resource_group),
checks=[JMESPathCheck('keyPolicy.keyExpirationPeriodInDays', 3),
JMESPathCheck('sasPolicy', None)])
self.cmd('az storage account create -n {} -g {} --sas-exp 1.23:59:59'.format(name, resource_group),
checks=[JMESPathCheck('keyPolicy.keyExpirationPeriodInDays', 3),
JMESPathCheck('sasPolicy.sasExpirationPeriod', '1.23:59:59')])
self.cmd('az storage account update -n {} -g {} --key-exp-days 100000'.format(name, resource_group),
checks=[JMESPathCheck('keyPolicy.keyExpirationPeriodInDays', 100000),
JMESPathCheck('sasPolicy.sasExpirationPeriod', '1.23:59:59')])
self.cmd('az storage account update -n {} -g {} --sas-exp 100000.00:00:00'.format(name, resource_group),
checks=[JMESPathCheck('keyPolicy.keyExpirationPeriodInDays', 100000),
JMESPathCheck('sasPolicy.sasExpirationPeriod', '100000.00:00:00')])
def test_show_usage(self):
self.cmd('storage account show-usage -l westus', checks=JMESPathCheck('name.value', 'StorageAccounts'))
def test_show_usage_no_location(self):
with self.assertRaises(SystemExit):
self.cmd('storage account show-usage')
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_logging_operations(self, resource_group, storage_account):
connection_string = self.cmd(
'storage account show-connection-string -g {} -n {} -otsv'.format(resource_group, storage_account)).output
self.cmd('storage logging show --connection-string {}'.format(connection_string), checks=[
JMESPathCheck('blob.read', False),
JMESPathCheck('blob.retentionPolicy.enabled', False)
])
self.cmd('storage logging update --services b --log r --retention 1 '
'--service b --connection-string {}'.format(connection_string))
self.cmd('storage logging show --connection-string {}'.format(connection_string), checks=[
JMESPathCheck('blob.read', True),
JMESPathCheck('blob.retentionPolicy.enabled', True),
JMESPathCheck('blob.retentionPolicy.days', 1)
])
self.cmd('storage logging off --connection-string {}'.format(connection_string))
self.cmd('storage logging show --connection-string {}'.format(connection_string), checks=[
JMESPathCheck('blob.delete', False),
JMESPathCheck('blob.write', False),
JMESPathCheck('blob.read', False),
JMESPathCheck('blob.retentionPolicy.enabled', False),
JMESPathCheck('blob.retentionPolicy.days', None),
JMESPathCheck('queue.delete', False),
JMESPathCheck('queue.write', False),
JMESPathCheck('queue.read', False),
JMESPathCheck('queue.retentionPolicy.enabled', False),
JMESPathCheck('queue.retentionPolicy.days', None),
JMESPathCheck('table.delete', False),
JMESPathCheck('table.write', False),
JMESPathCheck('table.read', False),
JMESPathCheck('table.retentionPolicy.enabled', False),
JMESPathCheck('table.retentionPolicy.days', None)
])
# Table service
with self.assertRaisesRegexp(CLIError, "incorrect usage: for table service, the supported version for logging is `1.0`"):
self.cmd('storage logging update --services t --log r --retention 1 '
'--version 2.0 --connection-string {}'.format(connection_string))
# Set version to 1.0
self.cmd('storage logging update --services t --log r --retention 1 --version 1.0 --connection-string {} '
.format(connection_string))
time.sleep(10)
self.cmd('storage logging show --connection-string {}'.format(connection_string), checks=[
JMESPathCheck('table.version', '1.0'),
JMESPathCheck('table.delete', False),
JMESPathCheck('table.write', False),
JMESPathCheck('table.read', True),
JMESPathCheck('table.retentionPolicy.enabled', True),
JMESPathCheck('table.retentionPolicy.days', 1)
])
# Use default version
self.cmd('storage logging update --services t --log r --retention 1 --connection-string {}'.format(
connection_string))
time.sleep(10)
self.cmd('storage logging show --connection-string {}'.format(connection_string), checks=[
JMESPathCheck('table.version', '1.0'),
JMESPathCheck('table.delete', False),
JMESPathCheck('table.write', False),
JMESPathCheck('table.read', True),
JMESPathCheck('table.retentionPolicy.enabled', True),
JMESPathCheck('table.retentionPolicy.days', 1)
])
@live_only()
@ResourceGroupPreparer()
def test_logging_error_operations(self, resource_group):
# BlobStorage doesn't support logging for some services
blob_storage = self.create_random_name(prefix='blob', length=24)
self.cmd('storage account create -g {} -n {} --kind BlobStorage --access-tier hot --https-only'.format(
resource_group, blob_storage))
blob_connection_string = self.cmd(
'storage account show-connection-string -g {} -n {} -otsv'.format(resource_group, blob_storage)).output
with self.assertRaisesRegexp(CLIError, "Your storage account doesn't support logging"):
self.cmd('storage logging show --services q --connection-string {}'.format(blob_connection_string))
# PremiumStorage doesn't support logging for some services
premium_storage = self.create_random_name(prefix='premium', length=24)
self.cmd('storage account create -g {} -n {} --sku Premium_LRS --https-only'.format(
resource_group, premium_storage))
premium_connection_string = self.cmd(
'storage account show-connection-string -g {} -n {} -otsv'.format(resource_group, premium_storage)).output
with self.assertRaisesRegexp(CLIError, "Your storage account doesn't support logging"):
self.cmd('storage logging show --services q --connection-string {}'.format(premium_connection_string))
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_metrics_operations(self, resource_group, storage_account_info):
self.storage_cmd('storage metrics show', storage_account_info) \
.assert_with_checks(JMESPathCheck('file.hour.enabled', True),
JMESPathCheck('file.minute.enabled', False))
self.storage_cmd('storage metrics update --services f --api true --hour true --minute true --retention 1 ',
storage_account_info)
self.storage_cmd('storage metrics show', storage_account_info).assert_with_checks(
JMESPathCheck('file.hour.enabled', True),
JMESPathCheck('file.minute.enabled', True))
@AllowLargeResponse()
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='account_1')
@StorageAccountPreparer(parameter_name='account_2')
def test_list_storage_accounts(self, account_1, account_2):
accounts_list = self.cmd('az storage account list').get_output_in_json()
assert len(accounts_list) >= 2
assert next(acc for acc in accounts_list if acc['name'] == account_1)
assert next(acc for acc in accounts_list if acc['name'] == account_2)
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_renew_account_key(self, resource_group, storage_account):
original_keys = self.cmd('storage account keys list -g {} -n {}'
.format(resource_group, storage_account)).get_output_in_json()
# key1 = keys_result[0]
# key2 = keys_result[1]
assert original_keys[0] and original_keys[1]
renewed_keys = self.cmd('storage account keys renew -g {} -n {} --key primary'
.format(resource_group, storage_account)).get_output_in_json()
print(renewed_keys)
print(original_keys)
assert renewed_keys[0] != original_keys[0]
assert renewed_keys[1] == original_keys[1]
original_keys = renewed_keys
renewed_keys = self.cmd('storage account keys renew -g {} -n {} --key secondary'
.format(resource_group, storage_account)).get_output_in_json()
assert renewed_keys[0] == original_keys[0]
assert renewed_keys[1] != original_keys[1]
@record_only() # Need to configure domain service first
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_renew_account_kerb_key(self, resource_group):
name = self.create_random_name(prefix='clistoragekerbkey', length=24)
self.kwargs = {'sc': name, 'rg': resource_group}
self.cmd('storage account create -g {rg} -n {sc} -l eastus2euap --enable-files-aadds')
self.cmd('storage account keys list -g {rg} -n {sc}', checks=JMESPathCheck('length(@)', 4))
original_keys = self.cmd('storage account keys list -g {rg} -n {sc} --expand-key-type kerb',
checks=JMESPathCheck('length(@)', 4)).get_output_in_json()
renewed_access_keys = self.cmd('storage account keys renew -g {rg} -n {sc} --key secondary').get_output_in_json()
assert renewed_access_keys[0] == original_keys[0]
assert renewed_access_keys[1] != original_keys[1]
renewed_kerb_keys = self.cmd(
'storage account keys renew -g {rg} -n {sc} --key primary --key-type kerb').get_output_in_json()
assert renewed_kerb_keys[2] != original_keys[2]
assert renewed_kerb_keys[3] == original_keys[3]
@AllowLargeResponse()
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_create_account_sas(self, storage_account):
from azure.cli.core.azclierror import RequiredArgumentMissingError
with self.assertRaises(RequiredArgumentMissingError):
self.cmd('storage account generate-sas --resource-types o --services b --expiry 2000-01-01 '
'--permissions r --account-name ""')
invalid_connection_string = "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;"
with self.assertRaises(RequiredArgumentMissingError):
self.cmd('storage account generate-sas --resource-types o --services b --expiry 2000-01-01 '
'--permissions r --connection-string {}'.format(invalid_connection_string))
sas = self.cmd('storage account generate-sas --resource-types o --services b '
'--expiry 2046-12-31T08:23Z --permissions r --https-only --account-name {}'
.format(storage_account)).output
self.assertIn('sig=', sas, 'SAS token {} does not contain sig segment'.format(sas))
self.assertIn('se=', sas, 'SAS token {} does not contain se segment'.format(sas))
def test_list_locations(self):
self.cmd('az account list-locations',
checks=[JMESPathCheck("[?name=='westus'].displayName | [0]", 'West US')])
@ResourceGroupPreparer(location='southcentralus')
@StorageAccountPreparer(location='southcentralus')
def test_customer_managed_key(self, resource_group, storage_account):
self.kwargs = {'rg': resource_group, 'sa': storage_account, 'vt': self.create_random_name('clitest', 24)}
self.kwargs['vid'] = self.cmd('az keyvault create -n {vt} -g {rg} '
'-otsv --query id').output.rstrip('\n')
self.kwargs['vtn'] = self.cmd('az keyvault show -n {vt} -g {rg} '
'-otsv --query properties.vaultUri').output.strip('\n')
self.kwargs['ver'] = self.cmd("az keyvault key create -n testkey -p software --vault-name {vt} "
"-otsv --query 'key.kid'").output.rsplit('/', 1)[1].rstrip('\n')
self.kwargs['oid'] = self.cmd("az storage account update -n {sa} -g {rg} --assign-identity "
"-otsv --query 'identity.principalId'").output.strip('\n')
self.cmd('az keyvault set-policy -n {vt} --object-id {oid} -g {rg} '
'--key-permissions get wrapKey unwrapKey recover')
self.cmd('az keyvault update -n {vt} -g {rg} --set properties.enableSoftDelete=true')
self.cmd('az resource update --id {vid} --set properties.enablePurgeProtection=true')
# Enable key auto-rotation
result = self.cmd('az storage account update -n {sa} -g {rg} '
'--encryption-key-source Microsoft.Keyvault '
'--encryption-key-vault {vtn} '
'--encryption-key-name testkey ').get_output_in_json()
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# Pin to a version and opt out for key auto-rotation
result = self.cmd('az storage account update -n {sa} -g {rg} '
'--encryption-key-version {ver}').get_output_in_json()
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], self.kwargs['ver'])
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# Enable key auto-rotation again
result = self.cmd('az storage account update -n {sa} -g {rg} '
'--encryption-key-version ""').get_output_in_json()
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], "")
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# Change Key name
self.cmd("az keyvault key create -n newkey -p software --vault-name {vt} ")
result = self.cmd('az storage account update -n {sa} -g {rg} '
'--encryption-key-vault {vtn} '
'--encryption-key-name "newkey"').get_output_in_json()
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'newkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], "")
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# Change Key source
result = self.cmd('az storage account update -n {sa} -g {rg} '
'--encryption-key-source Microsoft.Storage').get_output_in_json()
self.assertEqual(result['encryption']['keySource'], "Microsoft.Storage")
@ResourceGroupPreparer(location='eastus2euap')
def test_user_assigned_identity(self, resource_group):
self.kwargs = {
'rg': resource_group,
'sa1': self.create_random_name(prefix='sa1', length=24),
'sa2': self.create_random_name(prefix='sa2', length=24),
'sa3': self.create_random_name(prefix='sa3', length=24),
'identity': self.create_random_name(prefix='id', length=24),
'vt': self.create_random_name('clitest', 24)
}
# Prepare managed identity
identity = self.cmd('az identity create -n {identity} -g {rg}').get_output_in_json()
self.kwargs['iid'] = identity['id']
self.kwargs['oid'] = identity['principalId']
# Prepare key vault
keyvault = self.cmd('az keyvault create -n {vt} -g {rg} ').get_output_in_json()
self.kwargs['vid'] = keyvault['id']
self.kwargs['vtn'] = keyvault['properties']['vaultUri']
self.kwargs['ver'] = self.cmd("az keyvault key create -n testkey -p software --vault-name {vt} "
"-otsv --query 'key.kid'").output.rsplit('/', 1)[1].rstrip('\n')
# Make UAI access to keyvault
self.cmd('az keyvault set-policy -n {vt} --object-id {oid} -g {rg} '
'--key-permissions get wrapKey unwrapKey recover')
self.cmd('az keyvault update -n {vt} -g {rg} --set properties.enableSoftDelete=true')
self.cmd('az resource update --id {vid} --set properties.enablePurgeProtection=true')
# CMK at create with UAI
result = self.cmd('az storage account create -n {sa1} -g {rg} '
'--encryption-key-source Microsoft.Keyvault '
'--encryption-key-vault {vtn} '
'--encryption-key-name testkey '
'--key-vault-user-identity-id {iid} '
'--identity-type UserAssigned '
'--user-identity-id {iid}').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa1'])
self.assertEqual(result['identity']['type'], 'UserAssigned')
self.assertIn(self.kwargs['iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], self.kwargs['iid'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# Clear a UserAssigned identity when in use with CMK will break access to the account
result = self.cmd('az storage account update -n {sa1} -g {rg} --identity-type None ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa1'])
self.assertEqual(result['identity']['type'], 'None')
self.assertEqual(result['identity']['userAssignedIdentities'], None)
# Recover from Identity clear
result = self.cmd('az storage account update -n {sa1} -g {rg} --identity-type UserAssigned --user-identity-id {iid}').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa1'])
self.assertEqual(result['identity']['type'], 'UserAssigned')
self.assertIn(self.kwargs['iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], self.kwargs['iid'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# CMK with UAI -> CMK with SAI
# 1. Add System Assigned Identity if it does not exist.
result = self.cmd('az storage account update -n {sa1} -g {rg} '
'--encryption-key-source Microsoft.Keyvault '
'--encryption-key-vault {vtn} '
'--encryption-key-name testkey '
'--identity-type SystemAssigned,UserAssigned ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa1'])
self.assertEqual(result['identity']['type'], 'SystemAssigned,UserAssigned')
# 2. Add GET/WRAP/UNWRAP permissions on $KeyVaultUri for System Assigned identity.
self.kwargs['oid'] = self.cmd("az storage account update -n {sa1} -g {rg} --assign-identity "
"-otsv --query 'identity.principalId'").output.strip('\n')
self.cmd('az keyvault set-policy -n {vt} --object-id {oid} -g {rg} '
'--key-permissions get wrapKey unwrapKey recover')
# 3. Update encryption.identity to use the SystemAssigned identity. SAI must have access to existing KeyVault.
result = self.cmd('az storage account update -n {sa1} -g {rg} '
'--key-vault-user-identity-id "" ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa1'])
self.assertEqual(result['identity']['type'], 'SystemAssigned')
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], "")
# CMK with SAI -> MMK
result = self.cmd('az storage account update -n {sa1} -g {rg} '
'--encryption-key-source Microsoft.Storage ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa1'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Storage")
self.assertEqual(result['encryption']['keyVaultProperties'], None)
# MMK at create
result = self.cmd('az storage account create -n {sa2} -g {rg} --encryption-key-source Microsoft.Storage')\
.get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Storage")
self.assertEqual(result['encryption']['keyVaultProperties'], None)
# CMK with UAI and add SAI at create
result = self.cmd('az storage account create -n {sa3} -g {rg} '
'--encryption-key-source Microsoft.Keyvault '
'--encryption-key-vault {vtn} '
'--encryption-key-name testkey '
'--key-vault-user-identity-id {iid} '
'--identity-type SystemAssigned,UserAssigned '
'--user-identity-id {iid}').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa3'])
self.assertEqual(result['identity']['type'], 'SystemAssigned,UserAssigned')
self.assertIn(self.kwargs['iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'],
self.kwargs['iid'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# MMK -> CMK wth UAI
self.kwargs['sid'] = self.cmd("az storage account update -n {sa2} -g {rg} --assign-identity "
"-otsv --query 'identity.principalId'").output.strip('\n')
self.cmd('az keyvault set-policy -n {vt} --object-id {sid} -g {rg} '
'--key-permissions get wrapKey unwrapKey recover')
self.cmd('az keyvault set-policy -n {vt} --object-id {sid} -g {rg} '
'--key-permissions get wrapKey unwrapKey recover')
result = self.cmd('az storage account update -n {sa2} -g {rg} '
'--encryption-key-source Microsoft.Keyvault '
'--encryption-key-vault {vtn} '
'--encryption-key-name testkey '
'--identity-type SystemAssigned ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'SystemAssigned')
self.assertEqual(result['identity']['principalId'], self.kwargs['sid'])
self.assertEqual(result['encryption']['encryptionIdentity'], None)
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# CMK wth UAI -> MMK
result = self.cmd('az storage account create -n {sa2} -g {rg} --encryption-key-source Microsoft.Storage')\
.get_output_in_json()
self.assertEqual(result['encryption']['keySource'], "Microsoft.Storage")
self.assertEqual(result['encryption']['keyVaultProperties'], None)
# MMK -> CMK wth SAI
result = self.cmd('az storage account update -n {sa2} -g {rg} '
'--encryption-key-source Microsoft.Keyvault '
'--encryption-key-vault {vtn} '
'--encryption-key-name testkey '
'--identity-type SystemAssigned ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'SystemAssigned')
self.assertEqual(result['identity']['principalId'], self.kwargs['sid'])
self.assertEqual(result['encryption']['encryptionIdentity'], None)
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# Clear a SystemAssigned identity when in use with CMK will break access to the account
result = self.cmd('az storage account update -n {sa2} -g {rg} --identity-type None ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'None')
self.assertEqual(result['identity']['userAssignedIdentities'], None)
# Recover account if SystemAssignedIdentity used for CMK is cleared
# 1. Create a new $UserAssignedIdentity that has access to $KeyVaultUri and update the account to use the new $UserAssignedIdentity for encryption (if not present already).
result = self.cmd('az storage account update -n {sa2} -g {rg} '
'--identity-type UserAssigned '
'--user-identity-id {iid} '
'--key-vault-user-identity-id {iid} ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'UserAssigned')
self.assertIn(self.kwargs['iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], self.kwargs['iid'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# 2. Update account to use SAI,UAI identity.
result = self.cmd('az storage account update -n {sa2} -g {rg} --identity-type SystemAssigned,UserAssigned')\
.get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'SystemAssigned,UserAssigned')
self.assertIn(self.kwargs['iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], self.kwargs['iid'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# 3. Clear the $UserAssignedIdentity used for encryption.
result = self.cmd('az storage account update -n {sa2} -g {rg} --key-vault-user-identity-id ""')\
.get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'SystemAssigned,UserAssigned')
self.assertIn(self.kwargs['iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], '')
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# 4. Remove $UserAssignedIdentity from the top level identity bag.
result = self.cmd('az storage account update -n {sa2} -g {rg} --identity-type SystemAssigned')\
.get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'SystemAssigned')
self.assertEqual(result['identity']['userAssignedIdentities'], None)
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], '')
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
self.kwargs['sid'] = result['identity']['principalId']
# CMK with SAI -> CMK with UAI
result = self.cmd('az storage account update -n {sa2} -g {rg} '
'--encryption-key-source Microsoft.Keyvault '
'--encryption-key-vault {vtn} '
'--encryption-key-name testkey '
'--key-vault-user-identity-id {iid} '
'--identity-type SystemAssigned,UserAssigned '
'--user-identity-id {iid}').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'SystemAssigned,UserAssigned')
self.assertIn(self.kwargs['iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], self.kwargs['iid'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# CMK with UAI1 -> CMK with UAI2
self.kwargs['new_id'] = self.create_random_name(prefix='newid', length=24)
identity = self.cmd('az identity create -n {new_id} -g {rg}').get_output_in_json()
self.kwargs['new_iid'] = identity['id']
self.kwargs['new_oid'] = identity['principalId']
self.cmd('az keyvault set-policy -n {vt} --object-id {new_oid} -g {rg} '
'--key-permissions get wrapKey unwrapKey recover')
result = self.cmd('az storage account update -n {sa2} -g {rg} '
'--encryption-key-source Microsoft.Keyvault '
'--encryption-key-vault {vtn} '
'--encryption-key-name testkey '
'--key-vault-user-identity-id {new_iid} '
'--identity-type UserAssigned '
'--user-identity-id {new_iid}').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'UserAssigned')
self.assertIn(self.kwargs['new_iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], self.kwargs['new_iid'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
# Clear a UserAssigned identity when in use with CMK will break access to the account
result = self.cmd('az storage account update -n {sa2} -g {rg} '
'--identity-type None ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'None')
self.assertEqual(result['identity']['userAssignedIdentities'], None)
# Recover from Identity clear
result = self.cmd('az storage account update -n {sa2} -g {rg} '
'--identity-type UserAssigned '
'--user-identity-id {new_iid} '
'--key-vault-user-identity-id {new_iid} ').get_output_in_json()
self.assertEqual(result['name'], self.kwargs['sa2'])
self.assertEqual(result['identity']['type'], 'UserAssigned')
self.assertIn(self.kwargs['new_iid'], result['identity']['userAssignedIdentities'])
self.assertEqual(result['encryption']['encryptionIdentity']['encryptionUserAssignedIdentity'], self.kwargs['new_iid'])
self.assertEqual(result['encryption']['keySource'], "Microsoft.Keyvault")
self.assertEqual(result['encryption']['keyVaultProperties']['keyName'], 'testkey')
self.assertEqual(result['encryption']['keyVaultProperties']['keyVaultUri'], self.kwargs['vtn'])
self.assertEqual(result['encryption']['keyVaultProperties']['keyVersion'], None)
self.assertIn('lastKeyRotationTimestamp', result['encryption']['keyVaultProperties'])
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_account_show_exit_codes(self, resource_group, storage_account):
self.kwargs = {'rg': resource_group, 'sa': storage_account}
self.assertEqual(self.cmd('storage account show -g {rg} -n {sa}').exit_code, 0)
with self.assertRaises(SystemExit) as ex:
self.cmd('storage account show text_causing_parsing_error')
self.assertEqual(ex.exception.code, 2)
with self.assertRaises(SystemExit) as ex:
self.cmd('storage account show -g fake_group -n {sa}')
self.assertEqual(ex.exception.code, 3)
with self.assertRaises(SystemExit) as ex:
self.cmd('storage account show -g {rg} -n fake_account')
self.assertEqual(ex.exception.code, 3)
@ResourceGroupPreparer()
@StorageAccountPreparer(kind='StorageV2')
def test_management_policy(self, resource_group, storage_account):
import os
curr_dir = os.path.dirname(os.path.realpath(__file__))
policy_file = os.path.join(curr_dir, 'mgmt_policy.json').replace('\\', '\\\\')
self.kwargs = {'rg': resource_group, 'sa': storage_account, 'policy': policy_file}
self.cmd('storage account management-policy create --account-name {sa} -g {rg} --policy @"{policy}"',
checks=[JMESPathCheck('policy.rules[0].name', 'olcmtest'),
JMESPathCheck('policy.rules[0].enabled', True),
JMESPathCheck('policy.rules[0].definition.actions.baseBlob.tierToCool.daysAfterModificationGreaterThan', 30),
JMESPathCheck('policy.rules[0].definition.actions.baseBlob.tierToArchive.daysAfterModificationGreaterThan', 90),
JMESPathCheck('policy.rules[0].definition.actions.baseBlob.delete.daysAfterModificationGreaterThan', 1000),
JMESPathCheck('policy.rules[0].definition.actions.snapshot.tierToCool.daysAfterCreationGreaterThan', 30),
JMESPathCheck('policy.rules[0].definition.actions.snapshot.tierToArchive.daysAfterCreationGreaterThan', 90),
JMESPathCheck('policy.rules[0].definition.actions.snapshot.delete.daysAfterCreationGreaterThan', 1000),
JMESPathCheck('policy.rules[0].definition.actions.version.tierToCool.daysAfterCreationGreaterThan', 30),
JMESPathCheck('policy.rules[0].definition.actions.version.tierToArchive.daysAfterCreationGreaterThan', 90),
JMESPathCheck('policy.rules[0].definition.actions.version.delete.daysAfterCreationGreaterThan', 1000),
JMESPathCheck('policy.rules[0].definition.filters.blobTypes[0]', "blockBlob"),
JMESPathCheck('policy.rules[0].definition.filters.prefixMatch[0]', "olcmtestcontainer1")])
self.cmd('storage account management-policy update --account-name {sa} -g {rg}'
' --set "policy.rules[0].name=newname"')
self.cmd('storage account management-policy show --account-name {sa} -g {rg}',
checks=JMESPathCheck('policy.rules[0].name', 'newname'))
self.cmd('storage account management-policy delete --account-name {sa} -g {rg}')
self.cmd('storage account management-policy show --account-name {sa} -g {rg}', expect_failure=True)
@record_only() # Need to configure domain service first
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_update_storage_account_with_files_aadds(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {}'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('azureFilesIdentityBasedAuthentication', None)])
update_cmd = 'az storage account update -n {} -g {} --enable-files-aadds'.format(name, resource_group)
result = self.cmd(update_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'AADDS')
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_update_storage_account_with_files_aadds_false(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {}'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('azureFilesIdentityBasedAuthentication', None)])
update_cmd = 'az storage account update -n {} -g {} --enable-files-aadds false'.format(name, resource_group)
result = self.cmd(update_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'None')
@record_only() # Need to configure domain service first
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_update_storage_account_with_files_aadds_true(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {}'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('azureFilesIdentityBasedAuthentication', None)])
update_cmd = 'az storage account update -n {} -g {} --enable-files-aadds true'.format(name, resource_group)
result = self.cmd(update_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'AADDS')
@record_only() # Need to configure domain service first
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_create_storage_account_with_files_aadds(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} --enable-files-aadds'.format(name, resource_group)
result = self.cmd(create_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'AADDS')
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_create_storage_account_with_files_aadds_false(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} --enable-files-aadds false'.format(name, resource_group)
result = self.cmd(create_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'None')
@record_only() # Need to configure domain service first
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_create_storage_account_with_files_aadds_true(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} --enable-files-aadds true'.format(name, resource_group)
result = self.cmd(create_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'AADDS')
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_create_storage_account_with_files_adds(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
self.kwargs.update({
'rg': resource_group,
'sc': name,
'domain_name': 'mydomain.com',
'net_bios_domain_name': 'mydomain.com',
'forest_name': 'mydomain.com',
'domain_guid': '12345678-1234-1234-1234-123456789012',
'domain_sid': 'S-1-5-21-1234567890-1234567890-1234567890',
'azure_storage_sid': 'S-1-5-21-1234567890-1234567890-1234567890-1234'
})
create_cmd = """storage account create -n {sc} -g {rg} -l eastus2euap --enable-files-adds --domain-name
{domain_name} --net-bios-domain-name {net_bios_domain_name} --forest-name {forest_name} --domain-guid
{domain_guid} --domain-sid {domain_sid} --azure-storage-sid {azure_storage_sid}"""
result = self.cmd(create_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'AD')
activeDirectoryProperties = result['azureFilesIdentityBasedAuthentication']['activeDirectoryProperties']
self.assertEqual(activeDirectoryProperties['azureStorageSid'], self.kwargs['azure_storage_sid'])
self.assertEqual(activeDirectoryProperties['domainGuid'], self.kwargs['domain_guid'])
self.assertEqual(activeDirectoryProperties['domainName'], self.kwargs['domain_name'])
self.assertEqual(activeDirectoryProperties['domainSid'], self.kwargs['domain_sid'])
self.assertEqual(activeDirectoryProperties['forestName'], self.kwargs['forest_name'])
self.assertEqual(activeDirectoryProperties['netBiosDomainName'], self.kwargs['net_bios_domain_name'])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_create_storage_account_with_files_adds_false(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
self.kwargs.update({
'rg': resource_group,
'sc': name
})
result = self.cmd("storage account create -n {sc} -g {rg} -l eastus2euap --enable-files-adds false").get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'None')
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_create_storage_account_with_files_adds_true(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
self.kwargs.update({
'rg': resource_group,
'sc': name,
'domain_name': 'mydomain.com',
'net_bios_domain_name': 'mydomain.com',
'forest_name': 'mydomain.com',
'domain_guid': '12345678-1234-1234-1234-123456789012',
'domain_sid': 'S-1-5-21-1234567890-1234567890-1234567890',
'azure_storage_sid': 'S-1-5-21-1234567890-1234567890-1234567890-1234'
})
create_cmd = """storage account create -n {sc} -g {rg} -l eastus2euap --enable-files-adds true --domain-name
{domain_name} --net-bios-domain-name {net_bios_domain_name} --forest-name {forest_name} --domain-guid
{domain_guid} --domain-sid {domain_sid} --azure-storage-sid {azure_storage_sid}"""
result = self.cmd(create_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'AD')
activeDirectoryProperties = result['azureFilesIdentityBasedAuthentication']['activeDirectoryProperties']
self.assertEqual(activeDirectoryProperties['azureStorageSid'], self.kwargs['azure_storage_sid'])
self.assertEqual(activeDirectoryProperties['domainGuid'], self.kwargs['domain_guid'])
self.assertEqual(activeDirectoryProperties['domainName'], self.kwargs['domain_name'])
self.assertEqual(activeDirectoryProperties['domainSid'], self.kwargs['domain_sid'])
self.assertEqual(activeDirectoryProperties['forestName'], self.kwargs['forest_name'])
self.assertEqual(activeDirectoryProperties['netBiosDomainName'], self.kwargs['net_bios_domain_name'])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_update_storage_account_with_files_adds(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} -l eastus2euap'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('azureFilesIdentityBasedAuthentication', None)])
self.kwargs.update({
'rg': resource_group,
'sc': name,
'domain_name': 'mydomain.com',
'net_bios_domain_name': 'mydomain.com',
'forest_name': 'mydomain.com',
'domain_guid': '12345678-1234-1234-1234-123456789012',
'domain_sid': 'S-1-5-21-1234567890-1234567890-1234567890',
'azure_storage_sid': 'S-1-5-21-1234567890-1234567890-1234567890-1234'
})
update_cmd = """storage account update -n {sc} -g {rg} --enable-files-adds --domain-name {domain_name}
--net-bios-domain-name {net_bios_domain_name} --forest-name {forest_name} --domain-guid {domain_guid}
--domain-sid {domain_sid} --azure-storage-sid {azure_storage_sid}"""
result = self.cmd(update_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'AD')
activeDirectoryProperties = result['azureFilesIdentityBasedAuthentication']['activeDirectoryProperties']
self.assertEqual(activeDirectoryProperties['azureStorageSid'], self.kwargs['azure_storage_sid'])
self.assertEqual(activeDirectoryProperties['domainGuid'], self.kwargs['domain_guid'])
self.assertEqual(activeDirectoryProperties['domainName'], self.kwargs['domain_name'])
self.assertEqual(activeDirectoryProperties['domainSid'], self.kwargs['domain_sid'])
self.assertEqual(activeDirectoryProperties['forestName'], self.kwargs['forest_name'])
self.assertEqual(activeDirectoryProperties['netBiosDomainName'], self.kwargs['net_bios_domain_name'])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_update_storage_account_with_files_adds_false(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} -l eastus2euap'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('azureFilesIdentityBasedAuthentication', None)])
update_cmd = 'az storage account update -n {} -g {} --enable-files-adds false'.format(name, resource_group)
result = self.cmd(update_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'None')
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer()
def test_update_storage_account_with_files_adds_true(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} -l eastus2euap'.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('azureFilesIdentityBasedAuthentication', None)])
self.kwargs.update({
'rg': resource_group,
'sc': name,
'domain_name': 'mydomain.com',
'net_bios_domain_name': 'mydomain.com',
'forest_name': 'mydomain.com',
'domain_guid': '12345678-1234-1234-1234-123456789012',
'domain_sid': 'S-1-5-21-1234567890-1234567890-1234567890',
'azure_storage_sid': 'S-1-5-21-1234567890-1234567890-1234567890-1234'
})
update_cmd = """storage account update -n {sc} -g {rg} --enable-files-adds true --domain-name {domain_name}
--net-bios-domain-name {net_bios_domain_name} --forest-name {forest_name} --domain-guid {domain_guid}
--domain-sid {domain_sid} --azure-storage-sid {azure_storage_sid}"""
result = self.cmd(update_cmd).get_output_in_json()
self.assertIn('azureFilesIdentityBasedAuthentication', result)
self.assertEqual(result['azureFilesIdentityBasedAuthentication']['directoryServiceOptions'], 'AD')
activeDirectoryProperties = result['azureFilesIdentityBasedAuthentication']['activeDirectoryProperties']
self.assertEqual(activeDirectoryProperties['azureStorageSid'], self.kwargs['azure_storage_sid'])
self.assertEqual(activeDirectoryProperties['domainGuid'], self.kwargs['domain_guid'])
self.assertEqual(activeDirectoryProperties['domainName'], self.kwargs['domain_name'])
self.assertEqual(activeDirectoryProperties['domainSid'], self.kwargs['domain_sid'])
self.assertEqual(activeDirectoryProperties['forestName'], self.kwargs['forest_name'])
self.assertEqual(activeDirectoryProperties['netBiosDomainName'], self.kwargs['net_bios_domain_name'])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2020-08-01-preview')
@ResourceGroupPreparer(location='westus', name_prefix='cliedgezone')
def test_storage_account_extended_location(self, resource_group):
self.kwargs = {
'sa1': self.create_random_name(prefix='edge1', length=12),
'sa2': self.create_random_name(prefix='edge2', length=12),
'rg': resource_group
}
self.cmd('storage account create -n {sa1} -g {rg} --edge-zone microsoftrrdclab1 -l eastus2euap --sku Premium_LRS',
checks=[
JMESPathCheck('extendedLocation.name', 'microsoftrrdclab1'),
JMESPathCheck('extendedLocation.type', 'EdgeZone')
])
self.cmd('storage account create -n {sa2} -g {rg} --edge-zone microsoftlosangeles1 --sku Premium_LRS',
checks=[
JMESPathCheck('extendedLocation.name', 'microsoftlosangeles1'),
JMESPathCheck('extendedLocation.type', 'EdgeZone')
])
class RoleScenarioTest(LiveScenarioTest):
def run_under_service_principal(self):
account_info = self.cmd('account show').get_output_in_json()
return account_info['user']['type'] == 'servicePrincipal'
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
class RevokeStorageAccountTests(StorageScenarioMixin, RoleScenarioTest, LiveScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_storage_revoke_keys')
@StorageAccountPreparer()
def test_storage_account_revoke_delegation_keys(self, resource_group, storage_account):
if self.run_under_service_principal():
return # this test delete users which are beyond a SP's capacity, so quit...
from datetime import datetime, timedelta
import time
expiry = (datetime.utcnow() + timedelta(hours=1)).strftime('%Y-%m-%dT%H:%MZ')
account_info = self.get_account_info(resource_group, storage_account)
c = self.create_container(account_info)
b = self.create_random_name('blob', 24)
local_file = self.create_temp_file(128, full_random=False)
self.kwargs.update({
'expiry': expiry,
'account': storage_account,
'container': c,
'local_file': local_file,
'blob': b,
'rg': resource_group
})
result = self.cmd('storage account show -n {account} -g {rg}').get_output_in_json()
self.kwargs['sc_id'] = result['id']
user = self.create_random_name('testuser', 15)
self.kwargs['upn'] = user + '@azuresdkteam.onmicrosoft.com'
self.cmd('ad user create --display-name tester123 --password Test123456789 --user-principal-name {upn}')
time.sleep(15) # By-design, it takes some time for RBAC system propagated with graph object change
self.cmd('role assignment create --assignee {upn} --role "Storage Blob Data Contributor" --scope {sc_id}')
container_sas = self.cmd('storage blob generate-sas --account-name {account} -n {blob} -c {container} --expiry {expiry} --permissions '
'rw --https-only --as-user --auth-mode login -otsv').output
self.kwargs['container_sas'] = container_sas
self.cmd('storage blob upload -c {container} -n {blob} -f "{local_file}" --account-name {account} --sas-token {container_sas}')
blob_sas = self.cmd('storage blob generate-sas --account-name {account} -n {blob} -c {container} --expiry {expiry} --permissions '
'r --https-only --as-user --auth-mode login -otsv').output
self.kwargs['blob_sas'] = blob_sas
self.cmd('storage blob show -c {container} -n {blob} --account-name {account} --sas-token {blob_sas}') \
.assert_with_checks(JMESPathCheck('name', b))
self.cmd('storage account revoke-delegation-keys -n {account} -g {rg}')
time.sleep(60) # By-design, it takes some time for RBAC system propagated with graph object change
self.cmd('storage blob show -c {container} -n {blob} --account-name {account} --sas-token {blob_sas}', expect_failure=True)
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
class BlobServicePropertiesTests(StorageScenarioMixin, ScenarioTest):
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(name_prefix='cli_storage_account_update_change_feed')
@StorageAccountPreparer(kind='StorageV2', name_prefix='clitest', location="eastus2euap")
def test_storage_account_update_change_feed(self, resource_group, storage_account):
self.kwargs.update({
'sa': storage_account,
'rg': resource_group,
'cmd': 'storage account blob-service-properties update'
})
from azure.cli.core.azclierror import InvalidArgumentValueError
with self.assertRaises(InvalidArgumentValueError):
self.cmd('{cmd} --enable-change-feed false --change-feed-retention-days 14600 -n {sa} -g {rg}')
with self.assertRaises(InvalidArgumentValueError):
self.cmd('{cmd} --change-feed-retention-days 1 -n {sa} -g {rg}')
with self.assertRaises(InvalidArgumentValueError):
self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days -1 -n {sa} -g {rg}')
with self.assertRaises(InvalidArgumentValueError):
self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days 0 -n {sa} -g {rg}')
with self.assertRaises(InvalidArgumentValueError):
self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days 146001 -n {sa} -g {rg}')
result = self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days 1 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['changeFeed']['enabled'], True)
self.assertEqual(result['changeFeed']['retentionInDays'], 1)
result = self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days 100 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['changeFeed']['enabled'], True)
self.assertEqual(result['changeFeed']['retentionInDays'], 100)
result = self.cmd('{cmd} --enable-change-feed true --change-feed-retention-days 14600 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['changeFeed']['enabled'], True)
self.assertEqual(result['changeFeed']['retentionInDays'], 14600)
result = self.cmd('{cmd} --enable-change-feed false -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['changeFeed']['enabled'], False)
self.assertEqual(result['changeFeed']['retentionInDays'], None)
@ResourceGroupPreparer(name_prefix='cli_storage_account_update_delete_retention_policy')
@StorageAccountPreparer()
def test_storage_account_update_delete_retention_policy(self, resource_group, storage_account):
self.kwargs.update({
'sa': storage_account,
'rg': resource_group,
'cmd': 'storage account blob-service-properties update'
})
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-delete-retention true -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-delete-retention false --delete-retention-days 365 -n {sa} -g {rg}').get_output_in_json()
with self.assertRaises(SystemExit):
self.cmd('{cmd} --delete-retention-days 1 -n {sa} -g {rg}').get_output_in_json()
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-delete-retention true --delete-retention-days -1 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-delete-retention true --delete-retention-days 0 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-delete-retention true --delete-retention-days 366 -n {sa} -g {rg}')
result = self.cmd('{cmd} --enable-delete-retention true --delete-retention-days 1 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['deleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['deleteRetentionPolicy']['days'], 1)
result = self.cmd('{cmd} --enable-delete-retention true --delete-retention-days 100 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['deleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['deleteRetentionPolicy']['days'], 100)
result = self.cmd('{cmd} --enable-delete-retention true --delete-retention-days 365 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['deleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['deleteRetentionPolicy']['days'], 365)
result = self.cmd('{cmd} --enable-delete-retention false -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['deleteRetentionPolicy']['enabled'], False)
self.assertEqual(result['deleteRetentionPolicy']['days'], None)
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(name_prefix="cli_test_sa_versioning")
@StorageAccountPreparer(location="eastus2euap", kind="StorageV2")
def test_storage_account_update_versioning(self):
result = self.cmd('storage account blob-service-properties update --enable-versioning true -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['isVersioningEnabled'], True)
result = self.cmd('storage account blob-service-properties update --enable-versioning false -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['isVersioningEnabled'], False)
result = self.cmd('storage account blob-service-properties update --enable-versioning -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['isVersioningEnabled'], True)
result = self.cmd('storage account blob-service-properties show -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['isVersioningEnabled'], True)
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(name_prefix='cli_storage_account_update_delete_retention_policy')
@StorageAccountPreparer(kind='StorageV2', name_prefix='clitest', location='eastus2euap')
def test_storage_account_update_container_delete_retention_policy(self, resource_group, storage_account):
self.kwargs.update({
'sa': storage_account,
'rg': resource_group,
'cmd': 'storage account blob-service-properties update'
})
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-container-delete-retention true -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-container-delete-retention false --container-delete-retention-days 365 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --container-delete-retention-days 1 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days -1 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days 0 -n {sa} -g {rg}')
with self.assertRaises(SystemExit):
self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days 366 -n {sa} -g {rg}')
result = self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days 1 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['containerDeleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['containerDeleteRetentionPolicy']['days'], 1)
result = self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days 100 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['containerDeleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['containerDeleteRetentionPolicy']['days'], 100)
result = self.cmd('{cmd} --enable-container-delete-retention true --container-delete-retention-days 365 -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['containerDeleteRetentionPolicy']['enabled'], True)
self.assertEqual(result['containerDeleteRetentionPolicy']['days'], 365)
result = self.cmd('{cmd} --enable-container-delete-retention false -n {sa} -g {rg}').get_output_in_json()
self.assertEqual(result['containerDeleteRetentionPolicy']['enabled'], False)
self.assertEqual(result['containerDeleteRetentionPolicy']['days'], None)
@ResourceGroupPreparer()
@StorageAccountPreparer(kind="StorageV2")
def test_storage_account_default_service_properties(self):
from azure.cli.core.azclierror import InvalidArgumentValueError
self.cmd('storage account blob-service-properties show -n {sa} -g {rg}', checks=[
self.check('defaultServiceVersion', None)])
with self.assertRaisesRegexp(InvalidArgumentValueError, 'Valid example: 2008-10-27'):
self.cmd('storage account blob-service-properties update --default-service-version 2018 -n {sa} -g {rg}')
self.cmd('storage account blob-service-properties update --default-service-version 2018-11-09 -n {sa} -g {rg}',
checks=[self.check('defaultServiceVersion', '2018-11-09')])
self.cmd('storage account blob-service-properties show -n {sa} -g {rg}',
checks=[self.check('defaultServiceVersion', '2018-11-09')])
class FileServicePropertiesTests(StorageScenarioMixin, ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_file_soft_delete')
@StorageAccountPreparer(name_prefix='filesoftdelete', kind='StorageV2', location='eastus2euap')
def test_storage_account_file_delete_retention_policy(self, resource_group, storage_account):
from azure.cli.core.azclierror import ValidationError
self.kwargs.update({
'sa': storage_account,
'rg': resource_group,
'cmd': 'storage account file-service-properties'
})
self.cmd('{cmd} show --account-name {sa} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', True),
JMESPathCheck('shareDeleteRetentionPolicy.days', 7))
# Test update without properties
self.cmd('{cmd} update --account-name {sa} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', True),
JMESPathCheck('shareDeleteRetentionPolicy.days', 7))
self.cmd('{cmd} update --enable-delete-retention false -n {sa} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', False),
JMESPathCheck('shareDeleteRetentionPolicy.days', None))
self.cmd('{cmd} show -n {sa} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', False),
JMESPathCheck('shareDeleteRetentionPolicy.days', 0))
# Test update without properties
self.cmd('{cmd} update --account-name {sa} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', False),
JMESPathCheck('shareDeleteRetentionPolicy.days', None))
with self.assertRaises(ValidationError):
self.cmd('{cmd} update --enable-delete-retention true -n {sa} -g {rg}')
with self.assertRaisesRegexp(ValidationError, "Delete Retention Policy hasn't been enabled,"):
self.cmd('{cmd} update --delete-retention-days 1 -n {sa} -g {rg} -n {sa} -g {rg}')
with self.assertRaises(ValidationError):
self.cmd('{cmd} update --enable-delete-retention false --delete-retention-days 1 -n {sa} -g {rg}')
self.cmd(
'{cmd} update --enable-delete-retention true --delete-retention-days 10 -n {sa} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', True),
JMESPathCheck('shareDeleteRetentionPolicy.days', 10))
self.cmd('{cmd} update --delete-retention-days 1 -n {sa} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', True),
JMESPathCheck('shareDeleteRetentionPolicy.days', 1))
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2020-08-01-preview')
@ResourceGroupPreparer(name_prefix='cli_file_smb')
@StorageAccountPreparer(parameter_name='storage_account1', name_prefix='filesmb1', kind='FileStorage',
sku='Premium_LRS', location='centraluseuap')
@StorageAccountPreparer(parameter_name='storage_account2', name_prefix='filesmb2', kind='StorageV2')
def test_storage_account_file_smb_multichannel(self, resource_group, storage_account1, storage_account2):
from azure.core.exceptions import ResourceExistsError
self.kwargs.update({
'sa': storage_account1,
'sa2': storage_account2,
'rg': resource_group,
'cmd': 'storage account file-service-properties'
})
with self.assertRaisesRegexp(ResourceExistsError, "SMB Multichannel is not supported for the account."):
self.cmd('{cmd} update --mc -n {sa2} -g {rg}')
self.cmd('{cmd} show -n {sa} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', True),
JMESPathCheck('shareDeleteRetentionPolicy.days', 7),
JMESPathCheck('protocolSettings.smb.multichannel.enabled', False))
self.cmd('{cmd} show -n {sa2} -g {rg}').assert_with_checks(
JMESPathCheck('shareDeleteRetentionPolicy.enabled', True),
JMESPathCheck('shareDeleteRetentionPolicy.days', 7),
JMESPathCheck('protocolSettings.smb.multichannel', None))
self.cmd(
'{cmd} update --enable-smb-multichannel -n {sa} -g {rg}').assert_with_checks(
JMESPathCheck('protocolSettings.smb.multichannel.enabled', True))
self.cmd(
'{cmd} update --enable-smb-multichannel false -n {sa} -g {rg}').assert_with_checks(
JMESPathCheck('protocolSettings.smb.multichannel.enabled', False))
self.cmd(
'{cmd} update --enable-smb-multichannel true -n {sa} -g {rg}').assert_with_checks(
JMESPathCheck('protocolSettings.smb.multichannel.enabled', True))
class StorageAccountPrivateLinkScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_sa_plr')
@StorageAccountPreparer(name_prefix='saplr', kind='StorageV2', sku='Standard_LRS')
def test_storage_account_private_link(self, storage_account):
self.kwargs.update({
'sa': storage_account
})
self.cmd('storage account private-link-resource list --account-name {sa} -g {rg}', checks=[
self.check('length(@)', 6)])
class StorageAccountPrivateEndpointScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_sa_pe')
@StorageAccountPreparer(name_prefix='saplr', kind='StorageV2')
def test_storage_account_private_endpoint(self, storage_account):
self.kwargs.update({
'sa': storage_account,
'loc': 'eastus',
'vnet': self.create_random_name('cli-vnet-', 24),
'subnet': self.create_random_name('cli-subnet-', 24),
'pe': self.create_random_name('cli-pe-', 24),
'pe_connection': self.create_random_name('cli-pec-', 24),
})
# Prepare network
self.cmd('network vnet create -n {vnet} -g {rg} -l {loc} --subnet-name {subnet}',
checks=self.check('length(newVNet.subnets)', 1))
self.cmd('network vnet subnet update -n {subnet} --vnet-name {vnet} -g {rg} '
'--disable-private-endpoint-network-policies true',
checks=self.check('privateEndpointNetworkPolicies', 'Disabled'))
# Create a private endpoint connection
pr = self.cmd('storage account private-link-resource list --account-name {sa} -g {rg}').get_output_in_json()
self.kwargs['group_id'] = pr[0]['groupId']
storage = self.cmd('storage account show -n {sa} -g {rg}').get_output_in_json()
self.kwargs['sa_id'] = storage['id']
private_endpoint = self.cmd(
'network private-endpoint create -g {rg} -n {pe} --vnet-name {vnet} --subnet {subnet} -l {loc} '
'--connection-name {pe_connection} --private-connection-resource-id {sa_id} '
'--group-ids blob').get_output_in_json()
self.assertEqual(private_endpoint['name'], self.kwargs['pe'])
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['name'], self.kwargs['pe_connection'])
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['privateLinkServiceConnectionState']['status'], 'Approved')
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['provisioningState'], 'Succeeded')
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['groupIds'][0], self.kwargs['group_id'])
self.kwargs['pe_id'] = private_endpoint['privateLinkServiceConnections'][0]['id']
# Show the connection at storage account
storage = self.cmd('storage account show -n {sa} -g {rg}').get_output_in_json()
self.assertIn('privateEndpointConnections', storage)
self.assertEqual(len(storage['privateEndpointConnections']), 1)
self.assertEqual(storage['privateEndpointConnections'][0]['privateLinkServiceConnectionState']['status'],
'Approved')
self.kwargs['sa_pec_id'] = storage['privateEndpointConnections'][0]['id']
self.kwargs['sa_pec_name'] = storage['privateEndpointConnections'][0]['name']
self.cmd('storage account private-endpoint-connection show --account-name {sa} -g {rg} --name {sa_pec_name}',
checks=self.check('id', '{sa_pec_id}'))
with self.assertRaisesRegexp(CLIError, 'Your connection is already approved. No need to approve again.'):
self.cmd('storage account private-endpoint-connection approve --account-name {sa} -g {rg} --name {sa_pec_name}')
self.cmd('storage account private-endpoint-connection reject --account-name {sa} -g {rg} --name {sa_pec_name}',
checks=[self.check('privateLinkServiceConnectionState.status', 'Rejected')])
with self.assertRaisesRegexp(CLIError, 'You cannot approve the connection request after rejection.'):
self.cmd('storage account private-endpoint-connection approve --account-name {sa} -g {rg} --name {sa_pec_name}')
self.cmd('storage account private-endpoint-connection delete --id {sa_pec_id} -y')
class StorageAccountSkuScenarioTest(ScenarioTest):
@unittest.skip('Storage account type Standard_ZRS cannot be changed to Standard_GZRS')
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer(name_prefix='clistorage', location='eastus2')
@StorageAccountPreparer(name_prefix='clistoragesku', location='eastus2euap', kind='StorageV2', sku='Standard_ZRS')
def test_storage_account_sku(self, resource_group, storage_account):
self.kwargs = {
'gzrs_sa': self.create_random_name(prefix='cligzrs', length=24),
'GZRS': 'Standard_GZRS',
'rg': resource_group,
'sa': storage_account
}
# Create storage account with GZRS
self.cmd('az storage account create -n {gzrs_sa} -g {rg} --sku {GZRS} --https-only --kind StorageV2', checks=[
self.check('sku.name', '{GZRS}'),
self.check('name', '{gzrs_sa}')
])
# Convert ZRS to GZRS
self.cmd('az storage account show -n {sa} -g {rg}', checks=[
self.check('sku.name', 'Standard_ZRS'),
self.check('name', '{sa}')
])
self.cmd('az storage account update -n {sa} -g {rg} --sku {GZRS}', checks=[
self.check('sku.name', '{GZRS}'),
self.check('name', '{sa}'),
])
self.cmd('az storage account show -n {sa} -g {rg}', checks=[
self.check('sku.name', '{GZRS}'),
self.check('name', '{sa}')
])
self.cmd('az storage account delete -n {gzrs_sa} -g {rg} -y')
class StorageAccountFailoverScenarioTest(ScenarioTest):
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-04-01')
@ResourceGroupPreparer(name_prefix='clistorage', location='westus2')
def test_storage_account_failover(self, resource_group):
self.kwargs = {
'sa': self.create_random_name(prefix="storagegrzs", length=24),
'rg': resource_group
}
self.cmd('storage account create -n {sa} -g {rg} -l eastus2euap --kind StorageV2 --sku Standard_RAGRS --https-only',
checks=[self.check('name', '{sa}'),
self.check('sku.name', 'Standard_RAGRS')])
while True:
can_failover = self.cmd('storage account show -n {sa} -g {rg} --expand geoReplicationStats --query '
'geoReplicationStats.canFailover -o tsv').output.strip('\n')
if can_failover == 'true':
break
time.sleep(10)
self.cmd('storage account show -n {sa} -g {rg} --expand geoReplicationStats', checks=[
self.check('geoReplicationStats.canFailover', True),
self.check('failoverInProgress', None)
])
time.sleep(900)
self.cmd('storage account failover -n {sa} -g {rg} --no-wait -y')
self.cmd('storage account show -n {sa} -g {rg} --expand geoReplicationStats', checks=[
self.check('name', '{sa}'),
self.check('failoverInProgress', True)
])
class StorageAccountLocalContextScenarioTest(LocalContextScenarioTest):
@ResourceGroupPreparer(name_prefix='clistorage', location='westus2')
def test_storage_account_local_context(self):
self.kwargs.update({
'account_name': self.create_random_name(prefix='cli', length=24)
})
self.cmd('storage account create -g {rg} -n {account_name} --https-only',
checks=[self.check('name', self.kwargs['account_name'])])
self.cmd('storage account show',
checks=[self.check('name', self.kwargs['account_name'])])
with self.assertRaises(CLIError):
self.cmd('storage account delete')
self.cmd('storage account delete -n {account_name} -y')
class StorageAccountORScenarioTest(StorageScenarioMixin, ScenarioTest):
@AllowLargeResponse()
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2019-06-01')
@ResourceGroupPreparer(name_prefix='cli_test_storage_account_ors', location='eastus2')
@StorageAccountPreparer(parameter_name='source_account', location='eastus2', kind='StorageV2')
@StorageAccountPreparer(parameter_name='destination_account', location='eastus2', kind='StorageV2')
@StorageAccountPreparer(parameter_name='new_account', location='eastus2', kind='StorageV2')
def test_storage_account_or_policy(self, resource_group, source_account, destination_account, new_account):
src_account_info = self.get_account_info(resource_group, source_account)
src_container = self.create_container(src_account_info)
dest_account_info = self.get_account_info(resource_group, destination_account)
dest_container = self.create_container(dest_account_info)
self.kwargs.update({
'rg': resource_group,
'src_sc': source_account,
'dest_sc': destination_account,
'new_sc': new_account,
'scont': src_container,
'dcont': dest_container,
})
# Enable ChangeFeed for Source Storage Accounts
self.cmd('storage account blob-service-properties update -n {src_sc} -g {rg} --enable-change-feed', checks=[
JMESPathCheck('changeFeed.enabled', True)])
# Enable Versioning for two Storage Accounts
self.cmd('storage account blob-service-properties update -n {src_sc} -g {rg} --enable-versioning', checks=[
JMESPathCheck('isVersioningEnabled', True)])
self.cmd('storage account blob-service-properties update -n {dest_sc} -g {rg} --enable-versioning', checks=[
JMESPathCheck('isVersioningEnabled', True)])
# Create ORS policy on destination account
result = self.cmd('storage account or-policy create -n {dest_sc} -s {src_sc} --dcont {dcont} '
'--scont {scont} -t "2020-02-19T16:05:00Z"').get_output_in_json()
self.assertIn('policyId', result)
self.assertIn('ruleId', result['rules'][0])
self.assertEqual(result["rules"][0]["filters"]["minCreationTime"], "2020-02-19T16:05:00Z")
self.kwargs.update({
'policy_id': result["policyId"],
'rule_id': result["rules"][0]["ruleId"]
})
# Get policy properties from destination account
self.cmd('storage account or-policy show -g {rg} -n {dest_sc} --policy-id {policy_id}') \
.assert_with_checks(JMESPathCheck('type', "Microsoft.Storage/storageAccounts/objectReplicationPolicies")) \
.assert_with_checks(JMESPathCheck('sourceAccount', source_account)) \
.assert_with_checks(JMESPathCheck('destinationAccount', destination_account)) \
.assert_with_checks(JMESPathCheck('rules[0].sourceContainer', src_container)) \
.assert_with_checks(JMESPathCheck('rules[0].destinationContainer', dest_container))
# Add rules
src_container1 = self.create_container(src_account_info)
dest_container1 = self.create_container(dest_account_info)
self.cmd('storage account or-policy rule list -g {rg} -n {dest_sc} --policy-id {policy_id}')\
.assert_with_checks(JMESPathCheck('length(@)', 1))
self.cmd('storage account or-policy rule show -g {rg} -n {dest_sc} --rule-id {rule_id} --policy-id {policy_id}')\
.assert_with_checks(JMESPathCheck('ruleId', result["rules"][0]["ruleId"])) \
.assert_with_checks(JMESPathCheck('sourceContainer', src_container)) \
.assert_with_checks(JMESPathCheck('destinationContainer', dest_container))
result = self.cmd('storage account or-policy rule add -g {} -n {} --policy-id {} -d {} -s {} -t "2020-02-19T16:05:00Z"'.format(
resource_group, destination_account, self.kwargs["policy_id"], dest_container1, src_container1)).get_output_in_json()
self.assertEqual(result["rules"][0]["filters"]["minCreationTime"], "2020-02-19T16:05:00Z")
self.cmd('storage account or-policy rule list -g {rg} -n {dest_sc} --policy-id {policy_id}')\
.assert_with_checks(JMESPathCheck('length(@)', 2))
# Update rules
self.cmd('storage account or-policy rule update -g {} -n {} --policy-id {} --rule-id {} --prefix-match blobA blobB -t "2020-02-20T16:05:00Z"'.format(
resource_group, destination_account, result['policyId'], result['rules'][1]['ruleId'])) \
.assert_with_checks(JMESPathCheck('filters.prefixMatch[0]', 'blobA')) \
.assert_with_checks(JMESPathCheck('filters.prefixMatch[1]', 'blobB')) \
.assert_with_checks(JMESPathCheck('filters.minCreationTime', '2020-02-20T16:05:00Z'))
self.cmd('storage account or-policy rule show -g {} -n {} --policy-id {} --rule-id {}'.format(
resource_group, destination_account, result['policyId'], result['rules'][1]['ruleId'])) \
.assert_with_checks(JMESPathCheck('filters.prefixMatch[0]', 'blobA')) \
.assert_with_checks(JMESPathCheck('filters.prefixMatch[1]', 'blobB')) \
.assert_with_checks(JMESPathCheck('filters.minCreationTime', '2020-02-20T16:05:00Z'))
# Remove rules
self.cmd('storage account or-policy rule remove -g {} -n {} --policy-id {} --rule-id {}'.format(
resource_group, destination_account, result['policyId'], result['rules'][1]['ruleId']))
self.cmd('storage account or-policy rule list -g {rg} -n {dest_sc} --policy-id {policy_id}') \
.assert_with_checks(JMESPathCheck('length(@)', 1))
# Set ORS policy to source account
with self.assertRaisesRegex(CLIError, 'ValueError: Please specify --policy-id with auto-generated policy id'):
self.cmd('storage account or-policy create -g {rg} -n {src_sc} -d {dest_sc} -s {src_sc} --dcont {dcont} --scont {scont}')
import json
temp_dir = self.create_temp_dir()
policy_file = os.path.join(temp_dir, "policy.json")
with open(policy_file, "w") as f:
policy = self.cmd('storage account or-policy show -g {rg} -n {dest_sc} --policy-id {policy_id}')\
.get_output_in_json()
json.dump(policy, f)
self.kwargs['policy'] = policy_file
self.cmd('storage account or-policy create -g {rg} -n {src_sc} -p @"{policy}"')\
.assert_with_checks(JMESPathCheck('type', "Microsoft.Storage/storageAccounts/objectReplicationPolicies")) \
.assert_with_checks(JMESPathCheck('sourceAccount', source_account)) \
.assert_with_checks(JMESPathCheck('destinationAccount', destination_account)) \
.assert_with_checks(JMESPathCheck('rules[0].sourceContainer', src_container)) \
.assert_with_checks(JMESPathCheck('rules[0].destinationContainer', dest_container)) \
.assert_with_checks(JMESPathCheck('rules[0].filters.minCreationTime', '2020-02-19T16:05:00Z'))
# Update ORS policy
self.cmd('storage account or-policy update -g {} -n {} --policy-id {} --source-account {}'.format(
resource_group, destination_account, self.kwargs["policy_id"], new_account)) \
.assert_with_checks(JMESPathCheck('sourceAccount', new_account))
# Delete policy from destination and source account
self.cmd('storage account or-policy delete -g {rg} -n {src_sc} --policy-id {policy_id}')
self.cmd('storage account or-policy list -g {rg} -n {src_sc}') \
.assert_with_checks(JMESPathCheck('length(@)', 0))
self.cmd('storage account or-policy delete -g {rg} -n {dest_sc} --policy-id {policy_id}')
self.cmd('storage account or-policy list -g {rg} -n {dest_sc}') \
.assert_with_checks(JMESPathCheck('length(@)', 0))
| mit | 306,901,749,235,503,800 | 58.635177 | 180 | 0.645382 | false |
megaserg/geographiclib-cython-bindings | tests.py | 1 | 1720 | import unittest
import pytest
from geographiclib.geodesic import Geodesic
from geographiclib_cython import Geodesic as CythonGeodesic
from geopy.distance import great_circle
# Run with: python -m pytest tests.py
class TestGeodesic(unittest.TestCase):
def test_inverse(self):
actual = CythonGeodesic.WGS84.Inverse(10, 20, 30, 40)
expected = Geodesic.WGS84.Inverse(10, 20, 30, 40)
assert actual['s12'] == pytest.approx(expected['s12'], 1e-10)
assert actual['azi1'] == pytest.approx(expected['azi1'], 1e-10)
assert actual['azi2'] == pytest.approx(expected['azi2'], 1e-10)
def test_direct(self):
actual = CythonGeodesic.WGS84.Direct(10, 20, 30, 4000)
expected = Geodesic.WGS84.Direct(10, 20, 30, 4000)
assert actual['lat2'] == pytest.approx(expected['lat2'], 1e-10)
assert actual['lon2'] == pytest.approx(expected['lon2'], 1e-10)
assert actual['azi2'] == pytest.approx(expected['azi2'], 1e-10)
def test_inverse_line(self):
actual_line = CythonGeodesic.WGS84.InverseLine(10, 20, 30, 40)
expected_line = Geodesic.WGS84.InverseLine(10, 20, 30, 40)
assert actual_line.s13 == pytest.approx(expected_line.s13, 1e-10)
actual_pos = actual_line.Position(100000)
expected_pos = expected_line.Position(100000)
assert actual_pos['lat2'] == pytest.approx(expected_pos['lat2'], 1e-10)
assert actual_pos['lon2'] == pytest.approx(expected_pos['lon2'], 1e-10)
def test_sphere_distance(self):
actual = CythonGeodesic.Sphere().Inverse(10, 20, 30, 40)
expected = great_circle((10, 20), (30, 40))
assert actual['s12'] == pytest.approx(expected.meters, 1e-10)
| mit | 2,819,382,609,057,512,400 | 40.95122 | 79 | 0.659884 | false |
dhavalmanjaria/dma-student-information-system | user_management/tests/test_basic_info_form.py | 1 | 2326 | from django.test import TestCase
from user_management.forms import UserForm, BasicInfoForm, StudentInfoForm
from django.contrib.auth.models import User, Group
from django.contrib.auth.hashers import check_password
from datetime import datetime
from user_management.management.commands import initgroups
import logging
LOG = logging.getLogger('app')
class BasicInfoFormTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cmd = initgroups.Command()
cmd.handle()
def setUp(self):
form_data = {
'username': 'u_test_basic_info_form',
'password1': 'dhaval27',
'password2': 'dhaval27',
'email': '[email protected]'
}
form = UserForm(data=form_data)
self.assertTrue(form.is_valid())
LOG.debug(form.errors)
u = form.save()
form_data = {
'date_of_birth': '22/7/89',
'contact_number': '9881585223',
'group': Group.objects.get(name='Student').pk
}
form = BasicInfoForm(data=form_data, instance=u)
self.assertTrue(form.is_valid())
LOG.debug(form.errors)
u = form.save()
def test_basic_info_exists(self):
test_user = User.objects.get(username='u_test_basic_info_form')
self.assertTrue(test_user.basicinfo is not None)
def test_basic_info_save(self):
test_user = User.objects.get(username='u_test_basic_info_form')
form_data = {
'date_of_birth': '22/7/89',
'contact_number': '9881585223',
'group': Group.objects.get(name='Student').pk
}
form = BasicInfoForm(data=form_data, instance=test_user)
self.assertTrue(form.is_valid())
form.save()
def test_date_of_birth(self):
test_user = User.objects.get(username='u_test_basic_info_form')
dob = test_user.basicinfo.date_of_birth
entered_dob = datetime(1989, 7, 22).date()
self.assertEqual(dob, entered_dob)
def test_contact_number(self):
test_user = User.objects.get(username='u_test_basic_info_form')
self.assertEqual(
test_user.basicinfo.contact_number, '9881585223')
def tearDown(self):
Group.objects.all().delete()
| gpl-2.0 | -3,238,570,885,542,016,000 | 32.205882 | 74 | 0.600602 | false |
sekikn/ambari | ambari-agent/src/test/python/ambari_agent/TestCommandHooksOrchestrator.py | 2 | 2534 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from unittest import TestCase
from ambari_agent.models.hooks import HookPrefix
from mock.mock import patch
from ambari_agent.CommandHooksOrchestrator import HookSequenceBuilder, ResolvedHooks, HooksOrchestrator
class TestCommandHooksOrchestrator(TestCase):
def setUp(self):
def injector():
pass
def file_cache():
pass
file_cache.__setattr__("get_hook_base_dir", lambda x: os.path.join("tmp"))
injector.__setattr__("file_cache", file_cache)
self._orchestrator = HooksOrchestrator(injector)
@patch("os.path.isfile")
def test_check_orchestrator(self, is_file_mock):
is_file_mock.return_value = True
ret = self._orchestrator.resolve_hooks({
"commandType": "EXECUTION_COMMAND",
"serviceName": "ZOOKEEPER",
"role": "ZOOKEEPER_SERVER"
}, "START")
self.assertTrue(ret)
self.assertEquals(len(ret.post_hooks), 3)
self.assertEquals(len(ret.pre_hooks), 3)
def test_hook_seq_builder(self):
seq = list(HookSequenceBuilder().build(HookPrefix.pre, "cmd", "srv", "role"))
seq_rev = list(HookSequenceBuilder().build(HookPrefix.post, "cmd", "srv", "role"))
# testing base default sequence definition
check_list = [
"before-cmd",
"before-cmd-srv",
"before-cmd-srv-role"
]
check_list_1 = [
"after-cmd-srv-role",
"after-cmd-srv",
"after-cmd"
]
self.assertEquals(seq, check_list)
self.assertEquals(seq_rev, check_list_1)
def test_hook_resolved(self):
def pre():
for i in range(1, 5):
yield i
def post():
for i in range(1, 3):
yield i
ret = ResolvedHooks(pre(), post())
self.assertEqual(ret.pre_hooks, list(pre()))
self.assertEqual(ret.post_hooks, list(post()))
| apache-2.0 | 833,573,844,282,085,100 | 27.47191 | 103 | 0.693765 | false |
theDrake/python-experiments | YouFace/page.py | 1 | 5613 | from html import *
TITLE = 'YouFace'
SUBTITLE = "A billion dollars and it's yours!"
STYLESHEET = '/youface.css'
LINKLIST = [('http://cit.cs.dixie.edu/cs/1410/', 'CS 1410'), \
('http://new.dixie.edu/reg/syllabus/', 'College calendar'),]
class Form(BlockContainer):
def __init__(self, action):
BlockContainer.__init__(self, 'form')
self.addAttr('method', 'post')
self.addAttr('action', action)
class Label(Container):
def __init__(self, forAttr):
Container.__init__(self, 'label')
self.addAttr('for', forAttr)
class Input(Tag):
def __init__(self, inputType, name, value=None):
Tag.__init__(self, 'input')
self.addAttr('type', inputType)
self.addAttr('name', name)
if value:
self.addAttr('value', value)
class Box(Div):
def __init__(self, title):
Div.__init__(self)
self.addClass('box')
titleTag = H(1)
titleTag.addText(title)
self.addTag(titleTag)
class StatusBox(Box):
def __init__(self, userName):
Box.__init__(self, 'Welcome, ' + userName)
p1 = P()
p1.addTag(Label('status').addText('Change your status:'))
p1.addTag(Input('text', 'status'))
p2 = P()
p2.addTag(Input('submit', 'change', 'Change'))
self.addTag(Form('/status').addTag(p1).addTag(p2))
class RecentActivityBox(Box):
def __init__(self, activities):
Box.__init__(self, 'Recent status updates')
activityList = Ul()
for a in activities:
activityList.addTag(Li().addText(str(a)))
self.addTag(activityList)
class UnFriendBox(Box):
def __init__(self, friendName):
Box.__init__(self, 'You are currently friends with ' + friendName)
f = Form('/unfriend')
f.addTag(Input('hidden', 'name', friendName))
p = P()
p.addTag(Input('submit', 'unfriend', 'Unfriend'))
f.addTag(p)
self.addTag(P().addTag(f))
class LoginBox(Box):
def __init__(self):
Box.__init__(self, 'Login')
p1 = P()
p1.addTag(Label('name').addText('Name:'))
p1.addTag(Input('text', 'name'))
p2 = P()
p2.addTag(Label('password').addText('Password:'))
p2.addTag(Input('password', 'password'))
p3 = P()
p3.addTag(Input('submit', 'type', 'Login'))
p3.addTag(Input('submit', 'type', 'Create'))
p3.addTag(Input('submit', 'type', 'Delete'))
self.addTag(Form('/login').addTag(p1).addTag(p2).addTag(p3))
class Gadget(Div):
def __init__(self, title):
Div.__init__(self)
self.addClass('gadget')
self.addTag(H(1).addText(title))
class LinksGadget(Gadget):
def __init__(self, links=LINKLIST):
Gadget.__init__(self, 'Links')
linkList = Ul()
for link in links:
linkList.addTag(Li().addTag(A(link[0]).addText(str(link[1]))))
self.addTag(linkList)
class FriendsGadget(Gadget):
def __init__(self, friends):
Gadget.__init__(self, 'Friends')
friendList = Ul()
for name in friends:
listItem = Li().addTag(A('/friend/' + name).addText(name))
friendList.addTag(listItem)
self.addTag(friendList)
p = P()
p.addTag(Input('text', 'name'))
p.addTag(Input('submit', 'addfriend', 'Add Friend'))
self.addTag(Form('/addfriend').addTag(p))
class LogoutGadget(Gadget):
def __init__(self):
Gadget.__init__(self, 'Logout')
p = P().addTag(Input('submit', 'logout', 'Logout'))
self.addTag(Form('/logout').addTag(p))
class Page:
def __init__(self):
self.boxList = []
self.gadgetList = []
self.head = Head().addTag(Meta()).addTag(Title().addText(TITLE))
self.head.addTag(Stylesheet(STYLESHEET))
self.header = Div().setId('header')
self.header.addTag(H(1).addTag(A('/').addText(TITLE)))
self.header.addTag(H(2).addText(SUBTITLE))
def addBox(self, box):
self.boxList.append(box)
return self
def addGadget(self, gadget):
self.gadgetList.append(gadget)
return self
def __str__(self):
mainColumn = Div().setId('maincolumn')
for b in self.boxList:
mainColumn.addTag(b)
sidebar = Div().setId('sidebar')
for g in self.gadgetList:
sidebar.addTag(g)
mainContainer = Div().setId('maincontainer').addTag(self.header)
mainContainer.addTag(mainColumn).addTag(sidebar)
body = Body().addTag(mainContainer)
html = Html().addTag(self.head).addTag(body)
return str(html)
def __repr__(self):
return self.__str__()
class LoginPage(Page):
def __init__(self, linkList=LINKLIST):
Page.__init__(self)
self.addBox(LoginBox()).addGadget(LinksGadget(linkList))
class UserPage(Page):
def __init__(self, friends, linkList=LINKLIST):
Page.__init__(self)
self.addGadget(LogoutGadget()).addGadget(FriendsGadget(friends))
self.addGadget(LinksGadget(linkList))
class FeedPage(UserPage):
def __init__(self, name, recentStatusUpdates, friends):
UserPage.__init__(self, friends)
self.addBox(StatusBox(name))
self.addBox(RecentActivityBox(recentStatusUpdates))
class FriendPage(UserPage):
def __init__(self, name, recentStatusUpdates, friends):
UserPage.__init__(self, friends)
self.addBox(UnFriendBox(name))
self.addBox(RecentActivityBox(recentStatusUpdates))
def main():
print 'page.py'
if __name__ == '__main__':
main()
| mit | -4,225,685,870,735,543,000 | 31.445087 | 74 | 0.582576 | false |
ProfHoekstra/bluesky | bluesky/traffic/windfield.py | 1 | 9404 | """ Wind implementation for BlueSky."""
from numpy import array, sin, cos, arange, radians, ones, append, ndarray, \
amin, minimum, repeat, delete, zeros, around, maximum, floor, \
interp, pi
from bluesky.tools.aero import ft
class Windfield():
""" Windfield class:
Methods:
clear() = clear windfield, no wind vectors defined
addpoint(lat,lon,winddir,winddspd,windalt=None)
= add a wind vector to a position,
windvector can be arrays for altitudes (optional)
returns index of vector (0,1,2,3,..)
all units are SI units, angles in degrees
get(lat,lon,alt=0)
= get wind vector for given position and optional
altitude, all can be arrays,
vnorth and veast will be returned in same dimension
remove(idx) = remove a defined profile using the index
Members:
lat(nvec) = latitudes of wind definitions
lon(nvec) = longitudes of wind definitions
altaxis(nalt) = altitude axis (fixed, 250 m resolution)
vnorth(nalt,nvec) = wind north component [m/s]
veast(nalt,nvec) = wind east component [m/s]
winddim = Windfield dimension, will automatically be detected:
0 = no wind
1 = constant wind
2 = 2D field (no alt profiles),
3 = 3D field (alt dependent wind at some points)
"""
def __init__(self):
# For altitude use fixed axis to allow vectorisation later
self.altmax = 45000. * ft # [m]
self.altstep = 100. * ft # [m]
# Axis
self.altaxis = arange(0., self.altmax + self.altstep, self.altstep)
self.idxalt = arange(0, len(self.altaxis), 1.)
self.nalt = len(self.altaxis)
# List of indices of points with an altitude profile (for 3D check)
self.iprof = []
# Clear actual field
self.clear()
return
def clear(self): #Clear actual field
# Windfield dimension will automatically be detected:
# 0 = no wind, 1 = constant wind, 2 = 2D field (no alt profiles),
# 3 = 3D field (alt matters), used to speed up interpolation
self.winddim = 0
self.lat = array([])
self.lon = array([])
self.vnorth = array([[]])
self.veast = array([[]])
self.nvec = 0
return
def addpoint(self,lat,lon,winddir,windspd,windalt=None):
""" addpoint: adds a lat,lon position with a wind direction [deg]
and wind speedd [m/s]
Optionally an array with altitudes can be used in which case windspd
and wind speed need to have the same dimension
"""
# If scalar, copy into table for altitude axis
if not(type(windalt) in [ndarray,list]) and windalt == None: # scalar to array
prof3D = False # no wind profile, just one value
wspd = ones(self.nalt)*windspd
wdir = ones(self.nalt)*winddir
vnaxis = wspd*cos(radians(wdir)+pi)
veaxis = wspd*sin(radians(wdir)+pi)
# if list or array, convert to alt axis of wind field
else:
prof3D = True # switch on 3D parameter as an altitude array is given
wspd = array(windspd)
wdir = array(winddir)
altvn = wspd*cos(radians(wdir)+pi)
altve = wspd*sin(radians(wdir)+pi)
alttab = windalt
vnaxis = interp(self.altaxis, alttab, altvn)
veaxis = interp(self.altaxis, alttab, altve)
# print array([vnaxis]).transpose()
self.lat = append(self.lat,lat)
self.lon = append(self.lon,lon)
idx = len(self.lat)-1
if self.nvec==0:
self.vnorth = array([vnaxis]).transpose()
self.veast = array([veaxis]).transpose()
else:
self.vnorth = append(self.vnorth,array([vnaxis]).transpose(),axis=1)
self.veast = append(self.veast, array([veaxis]).transpose(),axis=1)
if self.winddim<3: # No 3D => set dim to 0,1 or 2 dep on nr of points
self.winddim = min(2,len(self.lat))
if prof3D:
self.winddim = 3
self.iprof.append(idx)
self.nvec = self.nvec+1
return idx # return index of added point
def getdata(self,userlat,userlon,useralt=0.0): # in case no altitude specified and field is 3D, use sea level wind
eps = 1e-20 # [m2] to avoid divison by zero for using exact same points
swvector = (type(userlat)==list or type(userlat)==ndarray)
if swvector:
npos = len(userlat)
else:
npos = 1
# Convert user input to right shape: columns for positions
lat = array(userlat).reshape((1,npos))
lon = array(userlon).reshape((1,npos))
# Make altitude into an array, with zero or float value broadcast over npos
if type(useralt)==ndarray:
alt = useralt
elif type(useralt)==list:
alt = array(useralt)
elif type(useralt)==float:
alt = useralt*ones(npos)
else:
alt = zeros(npos)
# Check dimension of wind field
if self.winddim == 0: # None = no wind
vnorth = zeros(npos)
veast = zeros(npos)
elif self.winddim == 1: # Constant = one point defined, so constant wind
vnorth = ones(npos)*self.vnorth[0,0]
veast = ones(npos)*self.veast[0,0]
elif self.winddim >= 2: # 2D/3D field = more points defined but no altitude profile
#---- Get horizontal weight factors
# Average cosine for flat-eartyh approximation
cavelat = cos(radians(0.5*(lat+array([self.lat]).transpose())))
# Lat and lon distance in 60 nm units (1 lat degree)
dy = lat - array([self.lat]).transpose() #(nvec,npos)
dx = cavelat*(lon - array([self.lon]).transpose())
# Calulate invesre distance squared
invd2 = 1./(eps+dx*dx+dy*dy) # inverse of distance squared
# Normalize weights
sumsid2 = ones((1,self.nvec)).dot(invd2) # totals to normalize weights
totals = repeat(sumsid2,self.nvec,axis=0) # scale up dims to (nvec,npos)
horfact = invd2/totals # rows x col = nvec x npos, weight factors
#---- Altitude interpolation
# No altitude profiles used: do 2D planar interpolation only
if self.winddim == 2 or ((type(useralt) not in (list,ndarray)) and useralt==0.0): # 2D field no altitude interpolation
vnorth = self.vnorth[0,:].dot(horfact)
veast = self.veast[0,:].dot(horfact)
# 3D interpolation as one or more points contain altitude profile
else:
# Get altitude index as float for alt interpolation
idxalt = maximum(0., minimum(self.altaxis[-1]-eps, alt) / self.altstep) # find right index
# Convert to index and factor
ialt = floor(idxalt).astype(int) # index array for lower altitude
falt = idxalt-ialt # factor for upper value
# Altitude interpolation combined with horizontal
nvec = len(self.lon) # Get number of definition points
# North wind (y-direction ot lat direction)
vn0 = (self.vnorth[ialt,:]*horfact.T).dot(ones((nvec,1))) # hor interpolate lower alt (npos x)
vn1 = (self.vnorth[ialt+1,:]*horfact.T).dot(ones((nvec,1))) # hor interpolate lower alts (npos x)
vnorth = (1.-falt)*(vn0.reshape(npos)) + falt*(vn1.reshape(npos)) # As 1D array
# East wind (x-direction or lon direction)
ve0 = (self.veast[ialt,:]*horfact.T).dot(ones((nvec,1)))
ve1 = (self.veast[ialt+1,:]*horfact.T).dot(ones((nvec,1)))
veast = (1.-falt)*(ve0.reshape(npos)) + falt*(ve1.reshape(npos)) # As 1D array
# Return same type as positons were given
if type(userlat)==ndarray:
return vnorth,veast
elif type(userlat)==list:
return list(vnorth),list(veast)
else:
return float(vnorth),float(veast)
def remove(self,idx): # remove a point using the returned index when it was added
if idx<len(self.lat):
self.lat = delete(self.lat,idx)
self.lon = delete(self.lat,idx)
self.vnorth = delete(self.vnorth,idx,axis=1)
self.veast = delete(self.veast ,idx,axis=1)
if idx in self.iprof:
self.iprof.remove(idx)
if self.winddim<3 or len(self.iprof)==0 or len(self.lat)==0:
self.winddim = min(2,len(self.lat)) # Check for 0, 1D, 2D or 3D
return
| gpl-3.0 | -166,896,459,923,706,270 | 39.245614 | 130 | 0.542003 | false |
mdworks2016/work_development | Python/20_Third_Certification/venv/lib/python3.7/site-packages/celery/contrib/sphinx.py | 1 | 3587 | # -*- coding: utf-8 -*-
"""Sphinx documentation plugin used to document tasks.
Introduction
============
Usage
-----
Add the extension to your :file:`docs/conf.py` configuration module:
.. code-block:: python
extensions = (...,
'celery.contrib.sphinx')
If you'd like to change the prefix for tasks in reference documentation
then you can change the ``celery_task_prefix`` configuration value:
.. code-block:: python
celery_task_prefix = '(task)' # < default
With the extension installed `autodoc` will automatically find
task decorated objects (e.g. when using the automodule directive)
and generate the correct (as well as add a ``(task)`` prefix),
and you can also refer to the tasks using `:task:proj.tasks.add`
syntax.
Use ``.. autotask::`` to alternatively manually document a task.
"""
from __future__ import absolute_import, unicode_literals
from sphinx.domains.python import PyModulelevel
from sphinx.ext.autodoc import FunctionDocumenter
from celery.app.task import BaseTask
try: # pragma: no cover
from inspect import formatargspec, getfullargspec
except ImportError: # Py2
from inspect import formatargspec, getargspec as getfullargspec # noqa
class TaskDocumenter(FunctionDocumenter):
"""Document task definitions."""
objtype = 'task'
member_order = 11
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return isinstance(member, BaseTask) and getattr(member, '__wrapped__')
def format_args(self):
wrapped = getattr(self.object, '__wrapped__', None)
if wrapped is not None:
argspec = getfullargspec(wrapped)
if argspec[0] and argspec[0][0] in ('cls', 'self'):
del argspec[0][0]
fmt = formatargspec(*argspec)
fmt = fmt.replace('\\', '\\\\')
return fmt
return ''
def document_members(self, all_members=False):
pass
def check_module(self):
# Normally checks if *self.object* is really defined in the module
# given by *self.modname*. But since functions decorated with the @task
# decorator are instances living in the celery.local, we have to check
# the wrapped function instead.
wrapped = getattr(self.object, '__wrapped__', None)
if wrapped and getattr(wrapped, '__module__') == self.modname:
return True
return super(TaskDocumenter, self).check_module()
class TaskDirective(PyModulelevel):
"""Sphinx task directive."""
def get_signature_prefix(self, sig):
return self.env.config.celery_task_prefix
def autodoc_skip_member_handler(app, what, name, obj, skip, options):
"""Handler for autodoc-skip-member event."""
# Celery tasks created with the @task decorator have the property
# that *obj.__doc__* and *obj.__class__.__doc__* are equal, which
# trips up the logic in sphinx.ext.autodoc that is supposed to
# suppress repetition of class documentation in an instance of the
# class. This overrides that behavior.
if isinstance(obj, BaseTask) and getattr(obj, '__wrapped__'):
if skip:
return False
return None
def setup(app):
"""Setup Sphinx extension."""
app.setup_extension('sphinx.ext.autodoc')
app.add_autodocumenter(TaskDocumenter)
app.add_directive_to_domain('py', 'task', TaskDirective)
app.add_config_value('celery_task_prefix', '(task)', True)
app.connect('autodoc-skip-member', autodoc_skip_member_handler)
return {
'parallel_read_safe': True
}
| apache-2.0 | 4,783,719,491,929,786,000 | 31.609091 | 79 | 0.666574 | false |
guildai/guild | guild/serving_util.py | 1 | 3478 | # Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import json
import logging
import socket
from werkzeug import routing
from werkzeug import serving
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.middleware.shared_data import SharedDataMiddleware
from werkzeug.wrappers import Request, Response
log = logging.getLogger("guild")
class QuietRequestHandler(serving.WSGIRequestHandler):
def log(self, type, message, *args):
if type != 'info':
super(QuietRequestHandler, self).log(type, message, *args)
class StaticBase(object):
def __init__(self, exports):
self._app = SharedDataMiddleware(self._not_found, exports)
def handle(self, _req):
return self._app
@staticmethod
def _not_found(_env, _start_resp):
raise NotFound()
class StaticDir(StaticBase):
def __init__(self, dir):
super(StaticDir, self).__init__({"/": dir})
def handle_index(self, _req):
def app(env, start_resp):
env["PATH_INFO"] = "/index.html"
return self._app(env, start_resp)
return app
def make_server(host, port, app, logging=True):
if host is None:
raise RuntimeError("host cannot be None")
if port is None:
raise RuntimeError("port cannot be None")
if logging:
request_handler = serving.WSGIRequestHandler
else:
request_handler = QuietRequestHandler
try:
return serving.make_server(
host, port, app, threaded=True, request_handler=request_handler
)
except socket.error as e:
if host:
raise
log.debug(
"error starting server on %s:%s (%s), " "trying IPv6 default host '::'",
host,
port,
e,
)
return serving.make_server("::", port, app, threaded=True)
def json_resp(data, status=200):
return Response(
json.dumps(data),
status=status,
content_type="application/json",
headers=[("Access-Control-Allow-Origin", "*")],
)
def Rule(path, handler, *args):
return routing.Rule(path, endpoint=(handler, args))
def Map(rules):
return routing.Map([Rule(path, handler, *args) for path, handler, args, in rules])
def dispatch(routes, env, start_resp):
urls = routes.bind_to_environ(env)
try:
(handler, args), kw = urls.match()
except HTTPException as e:
return e(env, start_resp)
else:
args = (Request(env),) + args
kw = _del_underscore_vars(kw)
try:
return handler(*args, **kw)(env, start_resp)
except HTTPException as e:
return e(env, start_resp)
def _del_underscore_vars(kw):
return {k: kw[k] for k in kw if k[0] != "_"}
def App(routes):
def app(env, start_resp):
return dispatch(routes, env, start_resp)
return app
| apache-2.0 | 3,184,719,521,864,172,500 | 26.603175 | 86 | 0.642036 | false |
omelkonian/cds | cds/modules/webhooks/views.py | 1 | 3801 | # -*- coding: utf-8 -*-
#
# This file is part of CERN Document Server.
# Copyright (C) 2016, 2017 CERN.
#
# CERN Document Server is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Document Server is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Document Server; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Task status manipulation."""
from __future__ import absolute_import
from invenio_db import db
from flask.views import MethodView
from invenio_webhooks.views import blueprint, error_handler
from invenio_oauth2server import require_api_auth, require_oauth_scopes
from invenio_webhooks.views import ReceiverEventResource
from .receivers import build_task_payload
from .status import iterate_result, collect_info, ResultEncoder
class TaskResource(MethodView):
"""Task Endpoint."""
@require_api_auth()
@require_oauth_scopes('webhooks:event')
@error_handler
def put(self, receiver_id, event_id, task_id):
"""Handle PUT request: restart a task."""
event = ReceiverEventResource._get_event(receiver_id, event_id)
payload = build_task_payload(event, task_id)
if payload:
event.receiver.rerun_task(**payload)
db.session.commit()
return '', 204
return '', 400
@require_api_auth()
@require_oauth_scopes('webhooks:event')
@error_handler
def delete(self, receiver_id, event_id, task_id):
"""Handle DELETE request: stop and clean a task."""
event = ReceiverEventResource._get_event(receiver_id, event_id)
payload = build_task_payload(event, task_id)
if payload:
event.receiver.clean_task(**payload)
db.session.commit()
return '', 204
return '', 400
class EventFeedbackResource(MethodView):
"""Event informations."""
@require_api_auth()
@require_oauth_scopes('webhooks:event')
@error_handler
def get(self, receiver_id, event_id):
"""Handle GET request: get more informations."""
event = ReceiverEventResource._get_event(receiver_id, event_id)
raw_info = event.receiver._raw_info(event=event)
def collect(task_name, result):
if isinstance(result.info, Exception):
(args,) = result.info.args
return {
'id': result.id,
'status': result.status,
'info': args,
'name': task_name
}
else:
return collect_info(task_name, result)
result = iterate_result(
raw_info=raw_info, fun=collect)
return ResultEncoder().encode(result), 200
task_item = TaskResource.as_view('task_item')
event_feedback_item = EventFeedbackResource.as_view('event_feedback_item')
blueprint.add_url_rule(
'/hooks/receivers/<string:receiver_id>/events/<string:event_id>'
'/tasks/<string:task_id>',
view_func=task_item,
)
blueprint.add_url_rule(
'/hooks/receivers/<string:receiver_id>/events/<string:event_id>/feedback',
view_func=event_feedback_item,
)
| gpl-2.0 | -581,411,640,366,413,000 | 33.87156 | 78 | 0.665614 | false |
leighpauls/k2cro4 | third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/test_expectations.py | 1 | 4480 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Checks WebKit style for test_expectations files."""
import logging
import optparse
import os
import re
import sys
from common import TabChecker
from webkitpy.common.host import Host
from webkitpy.layout_tests.models.test_expectations import TestExpectationParser
_log = logging.getLogger(__name__)
class TestExpectationsChecker(object):
"""Processes TestExpectations lines for validating the syntax."""
categories = set(['test/expectations'])
def _determine_port_from_expectations_path(self, host, expectations_path):
# Pass a configuration to avoid calling default_configuration() when initializing the port (takes 0.5 seconds on a Mac Pro!).
options_wk1 = optparse.Values({'configuration': 'Release', 'webkit_test_runner': False})
options_wk2 = optparse.Values({'configuration': 'Release', 'webkit_test_runner': True})
for port_name in host.port_factory.all_port_names():
ports = [host.port_factory.get(port_name, options=options_wk1), host.port_factory.get(port_name, options=options_wk2)]
for port in ports:
for test_expectation_file in port.expectations_files():
if test_expectation_file.replace(port.path_from_webkit_base() + host.filesystem.sep, '') == expectations_path:
return port
return None
def __init__(self, file_path, handle_style_error, host=None):
self._file_path = file_path
self._handle_style_error = handle_style_error
self._handle_style_error.turn_off_line_filtering()
self._tab_checker = TabChecker(file_path, handle_style_error)
# FIXME: host should be a required parameter, not an optional one.
host = host or Host()
host.initialize_scm()
self._port_obj = self._determine_port_from_expectations_path(host, file_path)
# Suppress error messages of test_expectations module since they will be reported later.
log = logging.getLogger("webkitpy.layout_tests.layout_package.test_expectations")
log.setLevel(logging.CRITICAL)
def _handle_error_message(self, lineno, message, confidence):
pass
def check_test_expectations(self, expectations_str, tests=None):
parser = TestExpectationParser(self._port_obj, tests, allow_rebaseline_modifier=False)
expectations = parser.parse('expectations', expectations_str)
level = 5
for expectation_line in expectations:
for warning in expectation_line.warnings:
self._handle_style_error(expectation_line.line_number, 'test/expectations', level, warning)
def check_tabs(self, lines):
self._tab_checker.check(lines)
def check(self, lines):
expectations = '\n'.join(lines)
if self._port_obj:
self.check_test_expectations(expectations_str=expectations, tests=None)
# Warn tabs in lines as well
self.check_tabs(lines)
| bsd-3-clause | 8,073,396,660,738,244,000 | 44.252525 | 133 | 0.714509 | false |
mrakitin/sirepo | sirepo/template/elegant_command_parser.py | 1 | 4455 | # -*- coding: utf-8 -*-
u"""elegant command parser.
:copyright: Copyright (c) 2016 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdlog, pkdp
from sirepo.template.line_parser import LineParser
import re
_SKIP_COMMANDS = ['subprocess']
def parse_file(command_text):
parser = LineParser(0)
lines = command_text.replace('\r', '').split('\n')
prev_line = ''
commands = []
for line in lines:
parser.increment_line_number()
if re.search(r'^#', line):
continue
line = re.sub(r'\!.*$', '', line)
if not line:
continue
if re.search(r'\&end', line):
if not _parse_line(parser, prev_line + ' ' + line, commands):
break
prev_line = ''
elif re.search(r'\&', line) or len(prev_line):
prev_line += ' ' + line
else:
# ignoring lines between command markers
pass
if prev_line and re.search(r'\&', prev_line):
parser.raise_error('missing &end for command: {}'.format(prev_line))
_update_lattice_names(commands)
return commands
def _parse_array_value(parser):
# read off the end of the array value list
# parse values until a "&end" or "value =" is reached
#
# response[2] = %s.vhrm, %s.hvrm,
# distribution_type[0] = "gaussian", "gaussian",
# enforce_rms_values[0] = 1,1,1,
# distribution_type[0] = gaussian, gaussian, hard-edge,
# distribution_type[0] = 3*"gaussian",
# distribution_cutoff[0] = 3*3,
res = ''
index = parser.get_index()
while True:
value = parser.parse_value()
if value == '&end':
parser.reset_index(index)
break
parser.ignore_whitespace()
if parser.peek_char() == '=':
parser.reset_index(index)
break
if value:
res += value
else:
if parser.peek_char() == ',':
parser.assert_char(',')
res += ','
elif parser.peek_char() == '*':
parser.assert_char('*')
res += '*'
else:
parser.raise_error('expecting an array value')
index = parser.get_index()
if not res:
parser.raise_error('missing array value')
res = re.sub(r',$', '', res)
return res
def _parse_line(parser, line, commands):
parser.set_line(line)
parser.ignore_whitespace()
parser.assert_char('&')
command = PKDict(
_id=parser.next_id(),
_type=parser.parse_value(r'\s+'),
)
if command['_type'] == 'stop':
return False
parser.ignore_whitespace()
while True:
value = parser.parse_value()
if not value:
if parser.peek_char() == ',':
parser.assert_char(',')
continue
parser.raise_error('expecting a command element')
if value == '&end':
break
if parser.peek_char() == '=':
parser.assert_char('=')
if re.search(r'\[', value):
command[value] = _parse_array_value(parser)
else:
command[value] = parser.parse_value(r'[\s,=\!)]')
else:
parser.raise_error('trailing input: {}'.format(value))
parser.assert_end_of_line()
if not command['_type'] in _SKIP_COMMANDS:
commands.append(command)
return True
def _update_lattice_names(commands):
# preserve the name of the first run_setup.lattic
# others may map to previous save_lattice names
is_first_run_setup = True
save_lattices = []
for cmd in commands:
if cmd['_type'] == 'save_lattice':
name = re.sub(r'\%s', '', cmd['filename'])
save_lattices.append(name)
if cmd['_type'] == 'run_setup':
if is_first_run_setup:
is_first_run_setup = False
continue
for index in reversed(range(len(save_lattices))):
if re.search(re.escape(save_lattices[index]), cmd['lattice'], re.IGNORECASE):
cmd['lattice'] = 'save_lattice' if index == 0 else 'save_lattice{}'.format(index + 1)
break
else:
cmd['lattice'] = 'Lattice'
| apache-2.0 | -7,543,986,354,780,968,000 | 31.757353 | 105 | 0.54119 | false |
arindam31/Multi-File-Renamer | setup.py | 1 | 1168 | #Full working with icon working too.
from distutils.core import setup
import py2exe
import os
MFCDIR = r"C:\Python27\Lib\site-packages\pythonwin"
MFCFILES = ["mfc90.dll", "mfc90u.dll", "mfcm90.dll", "mfcm90u.dll",
"Microsoft.VC90.MFC.manifest"]
mfcfiles = map(lambda x: os.path.join(MFCDIR, x), MFCFILES)
data_files = mfcfiles
#Find details in py2exe\build_exe.py and __init__.py
setup(
# The first three parameters are not required, if at least a
# 'version' is given, then a versioninfo resource is built from
# them and added to the executables.
version = "1.0.0",
description = "An application to edit multiple files at once",
name = "BB Multi File Renamer",
data_files = data_files,
# targets to build
windows = [
{
"script":"Main.py",
"icon_resources":[(0,"desktop.ico")]
}
],
options = {"py2exe":
{
"dll_excludes":["MSVCP90.dll"],
"includes" : ["win32ui","win32con","win32print"]
}
}
)
| gpl-3.0 | 251,931,449,424,868,740 | 28.2 | 78 | 0.543664 | false |
smartboyathome/Wonderland-Engine | tests/WhiteRabbitTests/test_master.py | 1 | 2034 | '''
Copyright (c) 2012 Alexander Abbott
This file is part of the Cheshire Cyber Defense Scoring Engine (henceforth
referred to as Cheshire).
Cheshire is free software: you can redistribute it and/or modify it under
the terms of the GNU Affero General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
Cheshire is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
more details.
You should have received a copy of the GNU Affero General Public License
along with Cheshire. If not, see <http://www.gnu.org/licenses/>.
'''
import time
from tests.WhiteRabbitTests import MasterTestCase
class TestMaster(MasterTestCase):
pass
'''def test_team_count(self):
assert len(self.master.checkers) == 3
def test_start_team_processes(self):
self.master.run_command('start')
for team_id in self.master.checkers:
assert self.master.checkers[team_id].process.is_alive()
def test_stop_team_processes(self):
self.master.run_command('start')
self.master.run_command('stop')
for team_id in self.master.checkers:
self.master.checkers[team_id].process.join(5)
assert not self.master.checkers[team_id].process.is_alive()
def test_default_team_processes_stopped(self):
for team_id in self.master.checkers:
assert not self.master.checkers[team_id].process.is_alive()
def test_shutdown_signal_emitted(self):
with self.assertRaises(SystemExit):
self.master.run_command('shutdown')
def test_check_scores(self):
self.master.run_command('start')
time.sleep(5)
scores = self.db_wrapper.get_scores_for_all_teams()
\'''for team in scores:
assert scores[team] == 5''' | agpl-3.0 | -5,287,805,744,160,156,000 | 36.685185 | 79 | 0.683382 | false |
rohithkodali/numword | numword/numword_en_gb.py | 1 | 1036 | #This file is part of numword. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
'''
numword for EN_GB
'''
from .numword_en import NumWordEN
class NumWordENGB(NumWordEN):
'''
NumWord EN_GB
'''
def currency(self, val, longval=True):
'''
Convert to currency
'''
return self._split(val, hightxt=u"pound/s", lowtxt=u"pence",
jointxt=u"and", longval=longval)
_NW = NumWordENGB()
def cardinal(value):
'''
Convert to cardinal
'''
return _NW.cardinal(value)
def ordinal(value):
'''
Convert to ordinal
'''
return _NW.ordinal(value)
def ordinal_number(value):
'''
Convert to ordinal number
'''
return _NW.ordinal_number(value)
def currency(value, longval=True):
'''
Convert to currency
'''
return _NW.currency(value, longval=longval)
def year(value, longval=True):
'''
Convert to year
'''
return _NW.year(value, longval=longval)
| lgpl-2.1 | 786,346,942,397,314,000 | 17.175439 | 71 | 0.617761 | false |
ensemblr/llvm-project-boilerplate | include/llvm/tools/lld/docs/conf.py | 1 | 8301 | # -*- coding: utf-8 -*-
#
# lld documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'lld'
copyright = u'2011-%d, LLVM Project' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4'
# The full version, including alpha/beta/rc tags.
release = '4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'llvm-theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# If given, this must be the name of an image file (path relative to the
# configuration directory) that is the favicon of the docs. Modern browsers use
# this as icon for tabs, windows and bookmarks. It should be a Windows-style
# icon file (.ico), which is 16x16 or 32x32 pixels large. Default: None. The
# image file will be copied to the _static directory of the output HTML, but
# only if the file does not already exist there.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'llddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'lld.tex', u'lld Documentation',
u'LLVM project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('contents', 'lld', u'lld Documentation',
[u'LLVM project'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('contents', 'lld', u'lld Documentation',
u'LLVM project', 'lld', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# FIXME: Define intersphinx configration.
intersphinx_mapping = {}
# -- Options for extensions ----------------------------------------------------
# Enable this if you want TODOs to show up in the generated documentation.
todo_include_todos = True
| mit | 1,174,032,416,981,189,000 | 31.552941 | 80 | 0.702084 | false |
JensRantil/http-trigger-trigger | release.py | 1 | 1521 | """Script for creating releases."""
import os
import sys
import shutil
if len(sys.argv) != 2:
print "Usage: ./" + sys.argv[0] + " <tag/version>"
sys.exit(1)
version = sys.argv[1]
if 'GOPATH' not in os.environ:
print "GOPATH not set."
sys.exit(1)
VARIANTS = [('linux', ['386', 'amd64', 'arm']),
('darwin', ['amd64', '386'])]
releasepath = 'releases'
for opsystem, variants in VARIANTS:
for variant in variants:
variantdir = "http-trigger-trigger-{0}-{1}".format(opsystem, variant)
print "Building release for {0}...".format(variantdir)
variantpath = os.path.join(releasepath, variantdir)
os.makedirs(variantpath)
os.environ['GOOS'] = opsystem
os.environ['GOARCH'] = variant
exitcode = os.system('go build http-trigger-trigger.go')
if exitcode != 0:
print "Error building {0}. Exitting...".format(variantdir)
sys.exit(1)
shutil.move('http-trigger-trigger', variantpath)
shutil.copy('README.rst', variantpath)
shutil.copy('setup.ini.example', variantpath)
#os.system('tar czf {0}.tar.gz {1}'.format(variantdir, variantpath))
tarfile = os.path.join(releasepath,
variantdir + "-" + version + '.tar.gz')
os.system('tar -C {0} -czf {1} {2}'.format(releasepath,
tarfile,
variantdir))
shutil.rmtree(variantpath)
| mit | -53,057,553,182,827,090 | 32.8 | 77 | 0.5595 | false |
NeilBryant/check_mk | web/htdocs/metrics.py | 1 | 47008 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# Frequently used variable names:
# perf_data_string: Raw performance data as sent by the core, e.g "foor=17M;1;2;4;5"
# perf_data: Split performance data, e.g. [("foo", "17", "M", "1", "2", "4", "5")]
# translated_metrics: Completely parsed and translated into metrics, e.g. { "foo" : { "value" : 17.0, "unit" : { "render" : ... }, ... } }
# color: RGB color representation ala HTML, e.g. "#ffbbc3" or "#FFBBC3", len() is always 7!
# color_rgb: RGB color split into triple (r, g, b), where r,b,g in (0.0 .. 1.0)
# unit_name: The ID of a unit, e.g. "%"
# unit: The definition-dict of a unit like in unit_info
# graph_template: Template for a graph. Essentially a dict with the key "metrics"
import math, time, colorsys
import config, defaults, pagetypes, table
from lib import *
from valuespec import *
import livestatus
# .--Plugins-------------------------------------------------------------.
# | ____ _ _ |
# | | _ \| |_ _ __ _(_)_ __ ___ |
# | | |_) | | | | |/ _` | | '_ \/ __| |
# | | __/| | |_| | (_| | | | | \__ \ |
# | |_| |_|\__,_|\__, |_|_| |_|___/ |
# | |___/ |
# +----------------------------------------------------------------------+
# | Typical code for loading Multisite plugins of this module |
# '----------------------------------------------------------------------'
# Datastructures and functions needed before plugins can be loaded
loaded_with_language = False
def load_plugins():
global loaded_with_language
if loaded_with_language == current_language:
return
global unit_info ; unit_info = {}
global metric_info ; metric_info = {}
global check_metrics ; check_metrics = {}
global perfometer_info ; perfometer_info = []
global graph_info ; graph_info = []
load_web_plugins("metrics", globals())
loaded_with_language = current_language
#.
# .--Constants-----------------------------------------------------------.
# | ____ _ _ |
# | / ___|___ _ __ ___| |_ __ _ _ __ | |_ ___ |
# | | | / _ \| '_ \/ __| __/ _` | '_ \| __/ __| |
# | | |__| (_) | | | \__ \ || (_| | | | | |_\__ \ |
# | \____\___/|_| |_|___/\__\__,_|_| |_|\__|___/ |
# | |
# +----------------------------------------------------------------------+
# | Various constants to be used by the declarations of the plugins. |
# '----------------------------------------------------------------------'
KB = 1024
MB = KB * 1024
GB = MB * 1024
TB = GB * 1024
PB = TB * 1024
m = 0.001
K = 1000
M = K * 1000
G = M * 1000
T = G * 1000
P = T * 1000
scale_symbols = {
m : "m",
1 : "",
KB : "k",
MB : "M",
GB : "G",
TB : "T",
PB : "P",
K : "k",
M : "M",
G : "G",
T : "T",
P : "P",
}
scalar_colors = {
"warn" : "#ffff00",
"crit" : "#ff0000",
}
#.
# .--Helpers-------------------------------------------------------------.
# | _ _ _ |
# | | | | | ___| |_ __ ___ _ __ ___ |
# | | |_| |/ _ \ | '_ \ / _ \ '__/ __| |
# | | _ | __/ | |_) | __/ | \__ \ |
# | |_| |_|\___|_| .__/ \___|_| |___/ |
# | |_| |
# +----------------------------------------------------------------------+
# | Various helper functions |
# '----------------------------------------------------------------------'
# "45.0" -> 45.0, "45" -> 45
def float_or_int(v):
try:
return int(v)
except:
return float(v)
def metric_to_text(metric, value=None):
if value == None:
value = metric["value"]
return metric["unit"]["render"](value)
# A few helper function to be used by the definitions
#.
# .--Colors--------------------------------------------------------------.
# | ____ _ |
# | / ___|___ | | ___ _ __ ___ |
# | | | / _ \| |/ _ \| '__/ __| |
# | | |__| (_) | | (_) | | \__ \ |
# | \____\___/|_|\___/|_| |___/ |
# | |
# +----------------------------------------------------------------------+
# | Functions and constants dealing with colors |
# '----------------------------------------------------------------------'
cmk_color_palette = {
# do not use:
# "0" : (0.33, 1, 1), # green
# "1" : (0.167, 1, 1), # yellow
# "2" : (0, 1, 1), # red
# red area
"11" : (0.775, 1, 1),
"12" : (0.8, 1, 1),
"13" : (0.83, 1, 1),
"14" : (0.05, 1, 1),
"15" : (0.08, 1, 1),
"16" : (0.105, 1, 1),
# yellow area
"21" : (0.13, 1, 1),
"22" : (0.14, 1, 1),
"23" : (0.155, 1, 1),
"24" : (0.185, 1, 1),
"25" : (0.21, 1, 1),
"26" : (0.25, 1, 1),
# green area
"31" : (0.45, 1, 1),
"32" : (0.5, 1, 1),
"33" : (0.515, 1, 1),
"34" : (0.53, 1, 1),
"35" : (0.55, 1, 1),
"36" : (0.57, 1, 1),
# blue area
"41" : (0.59, 1, 1),
"42" : (0.62, 1, 1),
"43" : (0.66, 1, 1),
"44" : (0.71, 1, 1),
"45" : (0.73, 1, 1),
"46" : (0.75, 1, 1),
# special colors
"51" : (0, 0, 0.5), # grey_50
"52" : (0.067, 0.7, 0.5), # brown 1
"53" : (0.083, 0.8, 0.55), # brown 2
}
def get_palette_color_by_index(i, shading='a'):
color_key = sorted(cmk_color_palette.keys())[i % len(cmk_color_palette)]
return "%s/%s" % (color_key, shading)
# 23/c -> #ff8040
# #ff8040 -> #ff8040
def parse_color_into_hexrgb(color_string):
if color_string[0] == "#":
return color_string
elif "/" in color_string:
cmk_color_index, color_shading = color_string.split("/")
hsv = list(cmk_color_palette[cmk_color_index])
# Colors of the yellow ("2") and green ("3") area need to be darkened (in third place of the hsv tuple),
# colors of the red and blue area need to be brightened (in second place of the hsv tuple).
# For both shadings we need different factors.
cmk_color_nuance_index = 1
cmk_color_nuance_factor = 0.6
if cmk_color_index[0] in ["2", "3"]:
cmk_color_nuance_index = 2
cmk_color_nuance_factor = 0.8
if color_shading == 'b':
hsv[cmk_color_nuance_index] *= cmk_color_nuance_factor
color_hexrgb = hsv_to_hexrgb(hsv)
return color_hexrgb
else:
return "#808080"
def hsv_to_hexrgb(hsv):
return render_color(colorsys.hsv_to_rgb(*hsv))
# "#ff0080" -> (1.0, 0.0, 0.5)
def parse_color(color):
try:
return tuple([ int(color[a:a+2], 16) / 255.0 for a in (1,3,5) ])
except Exception, e:
raise MKGeneralException(_("Invalid color specification '%s'") % color)
def render_color(color_rgb):
return "#%02x%02x%02x" % (
int(color_rgb[0] * 255),
int(color_rgb[1] * 255),
int(color_rgb[2] * 255),)
# Make a color darker. v ranges from 0 (not darker) to 1 (black)
def darken_color(rgb, v):
def darken(x, v):
return x * (1.0 - v)
return tuple([ darken(x, v) for x in rgb ])
# Make a color lighter. v ranges from 0 (not lighter) to 1 (white)
def lighten_color(rgb, v):
def lighten(x, v):
return x + ((1.0 - x) * v)
return tuple([ lighten(x, v) for x in rgb ])
def mix_colors(a, b):
return tuple([
(ca + cb) / 2.0
for (ca, cb)
in zip(a, b)
])
#.
# .--Evaluation----------------------------------------------------------.
# | _____ _ _ _ |
# | | ____|_ ____ _| |_ _ __ _| |_(_) ___ _ __ |
# | | _| \ \ / / _` | | | | |/ _` | __| |/ _ \| '_ \ |
# | | |___ \ V / (_| | | |_| | (_| | |_| | (_) | | | | |
# | |_____| \_/ \__,_|_|\__,_|\__,_|\__|_|\___/|_| |_| |
# | |
# +----------------------------------------------------------------------+
# | Parsing of performance data into metrics, evaluation of expressions |
# '----------------------------------------------------------------------'
# Convert perf_data_string into perf_data, extract check_command
def parse_perf_data(perf_data_string, check_command=None):
# Strip away arguments like in "check_http!-H mathias-kettner.de"
check_command = check_command.split("!")[0]
if not perf_data_string:
return None, check_command
parts = perf_data_string.split()
# Try if check command is appended to performance data
# in a PNP like style
if parts[-1].startswith("[") and parts[-1].endswith("]"):
check_command = parts[-1][1:-1]
del parts[-1]
# Python's isdigit() works only on str. We deal with unicode since
# we deal with data coming from Livestatus
def isdigit(x):
return x in [ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' ]
# Parse performance data, at least try
try:
perf_data = []
for part in parts:
varname, values = part.split("=")
value_parts = values.split(";")
while len(value_parts) < 5:
value_parts.append(None)
value_text, warn, crit, min, max = value_parts[0:5]
if value_text == "":
continue # ignore useless empty variable
# separate value from unit
i = 0
while i < len(value_text) and (isdigit(value_text[i]) or value_text[i] in ['.', ',', '-']):
i += 1
unit_name = value_text[i:]
value = value_text[:i]
perf_data.append((varname, value, unit_name, warn, crit, min, max))
except:
if config.debug:
raise
perf_data = None
return perf_data, check_command
# Convert Ascii-based performance data as output from a check plugin
# into floating point numbers, do scaling if neccessary.
# Simple example for perf_data: [(u'temp', u'48.1', u'', u'70', u'80', u'', u'')]
# Result for this example:
# { "temp" : "value" : 48.1, "warn" : 70, "crit" : 80, "unit" : { ... } }
def translate_metrics(perf_data, check_command):
cm = check_metrics.get(check_command, {})
translated_metrics = {}
color_index = 0
for nr, entry in enumerate(perf_data):
varname = entry[0]
value_text = entry[1]
translation_entry = {} # Default: no translation neccessary
if varname in cm:
translation_entry = cm[varname]
else:
for orig_varname, te in cm.items():
if orig_varname[0] == "~" and regex(orig_varname[1:]).match(varname): # Regex entry
translation_entry = te
break
# Translate name
metric_name = translation_entry.get("name", varname)
if metric_name in translated_metrics:
continue # ignore duplicate value
if metric_name not in metric_info:
color_index += 1
palette_color = get_palette_color_by_index(color_index)
mi = {
"title" : metric_name.title(),
"unit" : "",
"color" : parse_color_into_hexrgb(palette_color),
}
else:
mi = metric_info[metric_name].copy()
mi["color"] = parse_color_into_hexrgb(mi["color"])
# Optional scaling
scale = translation_entry.get("scale", 1.0)
new_entry = {
"value" : float_or_int(value_text) * scale,
"orig_name" : varname,
"scale" : scale, # needed for graph definitions
"scalar" : {},
}
# Do not create graphs for ungraphed metrics if listed here
new_entry["auto_graph"] = translation_entry.get("auto_graph", True)
# Add warn, crit, min, max
for index, key in [ (3, "warn"), (4, "crit"), (5, "min"), (6, "max") ]:
if len(entry) < index + 1:
break
elif entry[index]:
try:
value = float_or_int(entry[index])
new_entry["scalar"][key] = value * scale
except:
if config.debug:
raise
pass # empty of invalid number
new_entry.update(mi)
new_entry["unit"] = unit_info[new_entry["unit"]]
translated_metrics[metric_name] = new_entry
# TODO: warn, crit, min, max
# if entry[2]:
# # TODO: lower and upper levels
# translated_metrics[metric_name]["warn"] = float(entry[2])
return translated_metrics
# Evaluates an expression, returns a triple of value, unit and color.
# e.g. "fs_used:max" -> 12.455, "b", "#00ffc6",
# e.g. "fs_used(%)" -> 17.5, "%", "#00ffc6",
# e.g. "fs_used:max(%)" -> 100.0, "%", "#00ffc6",
# e.g. 123.4 -> 123.4, "", None
# e.g. "123.4#ff0000" -> 123.4, "", "#ff0000",
# Note:
# "fs_growth.max" is the same as fs_growth. The .max is just
# relevant when fetching RRD data and is used for selecting
# the consolidation function MAX.
def evaluate(expression, translated_metrics):
if type(expression) in (float, int):
return evaluate_literal(expression, translated_metrics)
else:
if "#" in expression:
expression, explicit_color = expression.rsplit("#", 1) # drop appended color information
else:
explicit_color = None
if "@" in expression:
expression, explicit_unit_name = expression.rsplit("@", 1) # appended unit name
else:
explicit_unit_name = None
value, unit, color = evaluate_rpn(expression, translated_metrics)
if explicit_color:
color = "#" + explicit_color
if explicit_unit_name:
unit = unit_info[explicit_unit_name]
return value, unit, color
# TODO: real unit computation!
def unit_mult(u1, u2):
if u1 == unit_info[""] or u1 == unit_info["count"]:
return u2
else:
return u1
unit_div = unit_mult
unit_add = unit_mult
unit_sub = unit_mult
def operator_minmax(a, b, func):
v = func(a[0], b[0])
# Use unit and color of the winner. If the winner
# has none (e.g. it is a scalar like 0), then take
# unit and color of the loser.
if v == a[0]:
winner = a
loser = b
else:
winner = b
loser = a
if winner[1] != unit_info[""]:
unit = winner[1]
else:
unit = loser[1]
return v, unit, winner[2] or loser[2]
# TODO: Do real unit computation, detect non-matching units
rpn_operators = {
"+" : lambda a, b: ((a[0] + b[0]), unit_mult(a[1], b[1]), choose_operator_color(a[2], b[2])),
"-" : lambda a, b: ((a[0] - b[0]), unit_sub(a[1], b[1]), choose_operator_color(a[2], b[2])),
"*" : lambda a, b: ((a[0] * b[0]), unit_add(a[1], b[1]), choose_operator_color(a[2], b[2])),
"/" : lambda a, b: ((a[0] / b[0]), unit_div(a[1], b[1]), choose_operator_color(a[2], b[2])),
">" : lambda a, b: ((a[0] > b[0] and 1.0 or 0.0), unit_info[""], "#000000"),
"<" : lambda a, b: ((a[0] < b[0] and 1.0 or 0.0), unit_info[""], "#000000"),
">=" : lambda a, b: ((a[0] >= b[0] and 1.0 or 0.0), unit_info[""], "#000000"),
"<=" : lambda a, b: ((a[0] <= b[0] and 1.0 or 0.0), unit_info[""], "#000000"),
"MIN" : lambda a, b: operator_minmax(a, b, min),
"MAX" : lambda a, b: operator_minmax(a, b, max),
}
def choose_operator_color(a, b):
if a == None:
return b
elif b == None:
return a
else:
return render_color(mix_colors(parse_color(a), parse_color(b)))
def evaluate_rpn(expression, translated_metrics):
parts = expression.split(",")
stack = [] # stack tuples of (value, unit, color)
while parts:
operator_name = parts[0]
parts = parts[1:]
if operator_name in rpn_operators:
if len(stack) < 2:
raise MKGeneralException("Syntax error in expression '%s': too few operands" % expression)
op1 = stack[-2]
op2 = stack[-1]
result = rpn_operators[operator_name](op1, op2)
stack = stack[:-2] + [ result ]
else:
stack.append(evaluate_literal(operator_name, translated_metrics))
if len(stack) != 1:
raise MKGeneralException("Syntax error in expression '%s': too many operands left" % expression)
return stack[0]
def evaluate_literal(expression, translated_metrics):
if type(expression) == int:
return float(expression), unit_info["count"], None
elif type(expression) == float:
return expression, unit_info[""], None
elif expression[0].isdigit() or expression[0] == '-':
return float(expression), unit_info[""], None
if expression.endswith(".max") or expression.endswith(".min") or expression.endswith(".average"):
expression = expression.rsplit(".", 1)[0]
color = None
# TODO: Error handling with useful exceptions
if expression.endswith("(%)"):
percent = True
expression = expression[:-3]
else:
percent = False
if ":" in expression:
varname, scalarname = expression.split(":")
value = translated_metrics[varname]["scalar"].get(scalarname)
color = scalar_colors.get(scalarname)
else:
varname = expression
value = translated_metrics[varname]["value"]
if percent:
maxvalue = translated_metrics[varname]["scalar"]["max"]
if maxvalue != 0:
value = 100.0 * float(value) / maxvalue
else:
value = 0.0
unit = unit_info["%"]
else:
unit = translated_metrics[varname]["unit"]
if color == None:
color = parse_color_into_hexrgb(metric_info[varname]["color"])
return value, unit, color
# Replace expressions in strings like CPU Load - %(load1:max@count) CPU Cores"
def replace_expressions(text, translated_metrics):
def eval_to_string(match):
expression = match.group()[2:-1]
unit_name = None
if "@" in expression:
expression, unit_name = expression.split("@")
value, unit, color = evaluate(expression, translated_metrics)
if unit_name:
unit = unit_info[unit_name]
if value != None:
return unit["render"](value)
else:
return _("n/a")
r = regex(r"%\([^)]*\)")
return r.sub(eval_to_string, text)
#.
# .--Perf-O-Meters-------------------------------------------------------.
# | ____ __ ___ __ __ _ |
# | | _ \ ___ _ __ / _| / _ \ | \/ | ___| |_ ___ _ __ ___ |
# | | |_) / _ \ '__| |_ _____| | | |_____| |\/| |/ _ \ __/ _ \ '__/ __| |
# | | __/ __/ | | _|_____| |_| |_____| | | | __/ || __/ | \__ \ |
# | |_| \___|_| |_| \___/ |_| |_|\___|\__\___|_| |___/ |
# | |
# +----------------------------------------------------------------------+
# | Implementation of Perf-O-Meters |
# '----------------------------------------------------------------------'
def get_perfometers(translated_metrics):
for perfometer in perfometer_info:
if perfometer_possible(perfometer, translated_metrics):
yield perfometer
# TODO: We will run into a performance problem here when we
# have more and more Perf-O-Meter definitions.
# TODO: remove all tuple-perfometers and use dicts
def perfometer_possible(perfometer, translated_metrics):
if type(perfometer) == dict:
if perfometer["type"] == "linear":
required = perfometer["segments"][:]
elif perfometer["type"] == "logarithmic":
required = [ perfometer["metric"] ]
else:
pass # TODO: dual, stacked?
if "label" in perfometer and perfometer["label"] != None:
required.append(perfometer["label"][0])
if "total" in perfometer:
required.append(perfometer["total"])
for req in required:
try:
evaluate(req, translated_metrics)
except:
return False
if "condition" in perfometer:
try:
value, color, unit = evaluate(perfometer["condition"], translated_metrics)
if value == 0.0:
return False
except:
return False
return True
perf_type, perf_args = perfometer
if perf_type == "logarithmic":
required = [ perf_args[0] ]
elif perf_type == "linear":
required = perf_args[0]
if perf_args[1]:
required = required + [perf_args[1]] # Reference value for 100%
if perf_args[2]:
required = required + [perf_args[2]] # Labelling value
elif perf_type in ("stacked", "dual"):
for sub_perf in perf_args:
if not perfometer_possible(sub_perf, translated_metrics):
return False
return True
else:
raise MKInternalError(_("Undefined Perf-O-Meter type '%s'") % perf_type)
for req in required:
try:
evaluate(req, translated_metrics)
except:
return False
return True
def metricometer_logarithmic(value, half_value, base, color):
# Negative values are printed like positive ones (e.g. time offset)
value = abs(float(value))
if value == 0.0:
pos = 0
else:
half_value = float(half_value)
h = math.log(half_value, base) # value to be displayed at 50%
pos = 50 + 10.0 * (math.log(value, base) - h)
if pos < 2:
pos = 2
if pos > 98:
pos = 98
return [ (pos, color), (100 - pos, "#ffffff") ]
def build_perfometer(perfometer, translated_metrics):
# TODO: alle nicht-dict Perfometer umstellen
if type(perfometer) == dict:
if perfometer["type"] == "logarithmic":
value, unit, color = evaluate(perfometer["metric"], translated_metrics)
label = unit["render"](value)
stack = [ metricometer_logarithmic(value, perfometer["half_value"], perfometer["exponent"], color) ]
elif perfometer["type"] == "linear":
entry = []
stack = [entry]
summed = 0.0
for ex in perfometer["segments"]:
value, unit, color = evaluate(ex, translated_metrics)
summed += value
if "total" in perfometer:
total, unit, color = evaluate(perfometer["total"], translated_metrics)
else:
total = summed
if total == 0:
entry.append((100.0, "#ffffff"))
else:
for ex in perfometer["segments"]:
value, unit, color = evaluate(ex, translated_metrics)
entry.append((100.0 * value / total, color))
# Paint rest only, if it is positive and larger than one promille
if total - summed > 0.001:
entry.append((100.0 * (total - summed) / total, "#ffffff"))
# Use unit of first metrics for output of sum. We assume that all
# stackes metrics have the same unit anyway
value, unit, color = evaluate(perfometer["segments"][0], translated_metrics)
label = unit["render"](summed)
# "label" option in all Perf-O-Meters overrides automatic label
if "label" in perfometer:
if perfometer["label"] == None:
label = ""
else:
expr, unit_name = perfometer["label"]
value, unit, color = evaluate(expr, translated_metrics)
if unit_name:
unit = unit_info[unit_name]
label = unit["render"](value)
return label, stack
# This stuf is deprecated and will be removed soon. Watch out!
perfometer_type, definition = perfometer
if perfometer_type == "logarithmic":
expression, median, exponent = definition
value, unit, color = evaluate(expression, translated_metrics)
label = unit["render"](value)
stack = [ metricometer_logarithmic(value, median, exponent, color) ]
# TODO: das hier fliegt raus
elif perfometer_type == "linear":
entry = []
stack = [entry]
# NOTE: This might be converted to a dict later.
metrics_expressions, total_spec, label_expression = definition
summed = 0.0
for ex in metrics_expressions:
value, unit_name, color = evaluate(ex, translated_metrics)
summed += value
if total_spec == None:
total = summed
else:
total, unit_name, color = evaluate(total_spec, translated_metrics)
if total == 0:
entry.append((100.0, "#ffffff"))
else:
for ex in metrics_expressions:
value, unit_name, color = evaluate(ex, translated_metrics)
entry.append((100.0 * value / total, color))
# Paint rest only, if it is positive and larger than one promille
if total - summed > 0.001:
entry.append((100.0 * (total - summed) / total, "#ffffff"))
# Use unit of first metrics for output of sum. We assume that all
# stackes metrics have the same unit anyway
if label_expression:
expr, unit_name = label_expression
value, unit, color = evaluate(expr, translated_metrics)
if unit_name:
unit = unit_info[unit_name]
label = unit["render"](summed)
else: # absolute
value, unit, color = evaluate(metrics_expressions[0], translated_metrics)
label = unit["render"](summed)
elif perfometer_type == "stacked":
stack = []
labels = []
for sub_perf in definition:
sub_label, sub_stack = build_perfometer(sub_perf, translated_metrics)
stack.append(sub_stack[0])
if sub_label:
labels.append(sub_label)
if labels:
label = " / ".join(labels)
else:
label = ""
return label, stack
elif perfometer_type == "dual":
labels = []
if len(definition) != 2:
raise MKInternalError(_("Perf-O-Meter of type 'dual' must contain exactly two definitions, not %d") % len(definition))
content = []
for nr, sub_perf in enumerate(definition):
sub_label, sub_stack = build_perfometer(sub_perf, translated_metrics)
if len(sub_stack) != 1:
raise MKInternalError(_("Perf-O-Meter of type 'dual' must only contain plain Perf-O-Meters"))
half_stack = [ (value/2, color) for (value, color) in sub_stack[0] ]
if nr == 0:
half_stack.reverse()
content += half_stack
if sub_label:
labels.append(sub_label)
if labels:
label = " / ".join(labels)
else:
label = ""
return label, [ content ]
else:
raise MKInternalError(_("Unsupported Perf-O-Meter type '%s'") % perfometer_type)
return label, stack
#.
# .--Graphs--------------------------------------------------------------.
# | ____ _ |
# | / ___|_ __ __ _ _ __ | |__ ___ |
# | | | _| '__/ _` | '_ \| '_ \/ __| |
# | | |_| | | | (_| | |_) | | | \__ \ |
# | \____|_| \__,_| .__/|_| |_|___/ |
# | |_| |
# +----------------------------------------------------------------------+
# | Implementation of time graphs - basic code, not the rendering |
# | Rendering of the graphs is done by PNP4Nagios, we just create PHP |
# | templates for PNP here.
# '----------------------------------------------------------------------'
def get_graph_templates(translated_metrics):
if not translated_metrics:
return []
explicit_templates = get_explicit_graph_templates(translated_metrics)
already_graphed_metrics = get_graphed_metrics(explicit_templates)
implicit_templates = get_implicit_graph_templates(translated_metrics, already_graphed_metrics)
return explicit_templates + implicit_templates
def get_explicit_graph_templates(translated_metrics):
templates = []
for graph_template in graph_info:
if graph_possible(graph_template, translated_metrics):
templates.append(graph_template)
elif graph_possible_without_optional_metrics(graph_template, translated_metrics):
templates.append(graph_without_missing_optional_metrics(graph_template, translated_metrics))
return templates
def get_implicit_graph_templates(translated_metrics, already_graphed_metrics):
templates = []
for metric_name, metric_entry in sorted(translated_metrics.items()):
if metric_entry["auto_graph"] and metric_name not in already_graphed_metrics:
templates.append(generic_graph_template(metric_name))
return templates
def get_graphed_metrics(graph_templates):
graphed_metrics = set([])
for graph_template in graph_templates:
graphed_metrics.update(metrics_used_by_graph(graph_template))
return graphed_metrics
def metrics_used_by_graph(graph_template):
used_metrics = []
for metric_definition in graph_template["metrics"]:
used_metrics += list(metrics_used_in_definition(metric_definition[0]))
return used_metrics
def metrics_used_in_definition(metric_definition):
without_unit = metric_definition.split("@")[0]
without_color = metric_definition.split("#")[0]
parts = without_color.split(",")
for part in parts:
metric_name = part.split(".")[0] # drop .min, .max, .average
if metric_name in metric_info:
yield metric_name
def graph_possible(graph_template, translated_metrics):
for metric_definition in graph_template["metrics"]:
try:
evaluate(metric_definition[0], translated_metrics)
except Exception, e:
return False
# Allow graphs to be disabled if certain (better) metrics
# are available
if "conflicting_metrics" in graph_template:
for var in graph_template["conflicting_metrics"]:
if var in translated_metrics:
return False
return True
def graph_possible_without_optional_metrics(graph_template, translated_metrics):
if "optional_metrics" in graph_template:
return graph_possible(graph_template,
add_fake_metrics(translated_metrics, graph_template["optional_metrics"]))
def graph_without_missing_optional_metrics(graph_template, translated_metrics):
working_metrics = []
for metric_definition in graph_template["metrics"]:
try:
evaluate(metric_definition[0], translated_metrics)
working_metrics.append(metric_definition)
except:
pass
reduced_graph_template = graph_template.copy()
reduced_graph_template["metrics"] = working_metrics
return reduced_graph_template
def add_fake_metrics(translated_metrics, metric_names):
with_fake = translated_metrics.copy()
for metric_name in metric_names:
with_fake[metric_name] = {
"value" : 1.0,
"scale" : 1.0,
"unit" : unit_info[""],
"color" : "#888888",
}
return with_fake
def generic_graph_template(metric_name):
return {
"metrics" : [
( metric_name, "area" ),
],
"scalars" : [
metric_name + ":warn",
metric_name + ":crit",
]
}
def get_graph_range(graph_template, translated_metrics):
if "range" in graph_template:
min_value, max_value = [
evaluate(r, translated_metrics)[0]
for r in graph_template["range"]
]
else:
# Compute range of displayed data points
max_value = None
min_value = None
return min_value, max_value
# Called with exactly one variable: the template ID. Example:
# "check_mk-kernel.util:guest,steal,system,user,wait".
def page_pnp_template():
template_id = html.var("id")
check_command, perf_var_string = template_id.split(":", 1)
perf_var_names = perf_var_string.split(",")
# Fake performance values in order to be able to find possible graphs
perf_data = [ ( varname, 1, "", 1, 1, 1, 1 ) for varname in perf_var_names ]
translated_metrics = translate_metrics(perf_data, check_command)
if not translated_metrics:
return # check not supported
# Collect output in string. In case of an exception to not output
# any definitions
output = ""
for graph_template in get_graph_templates(translated_metrics):
graph_code = render_graph_pnp(graph_template, translated_metrics)
output += graph_code
html.write(output)
# TODO: some_value.max not yet working
def render_graph_pnp(graph_template, translated_metrics):
graph_title = None
vertical_label = None
rrdgraph_commands = ""
legend_precision = graph_template.get("legend_precision", 2)
legend_scale = graph_template.get("legend_scale", 1)
legend_scale_symbol = scale_symbols[legend_scale]
# Define one RRD variable for each of the available metrics.
# Note: We need to use the original name, not the translated one.
for var_name, metrics in translated_metrics.items():
rrd = "$RRDBASE$_" + pnp_cleanup(metrics["orig_name"]) + ".rrd"
scale = metrics["scale"]
unit = metrics["unit"]
render_scale = unit.get("render_scale", 1)
if scale != 1.0 or render_scale != 1.0:
rrdgraph_commands += "DEF:%s_UNSCALED=%s:1:MAX " % (var_name, rrd)
rrdgraph_commands += "CDEF:%s=%s_UNSCALED,%f,* " % (var_name, var_name, scale * render_scale)
else:
rrdgraph_commands += "DEF:%s=%s:1:MAX " % (var_name, rrd)
# Scaling for legend
rrdgraph_commands += "CDEF:%s_LEGSCALED=%s,%f,/ " % (var_name, var_name, legend_scale)
# Prepare negative variants for upside-down graph
rrdgraph_commands += "CDEF:%s_NEG=%s,-1,* " % (var_name, var_name)
rrdgraph_commands += "CDEF:%s_LEGSCALED_NEG=%s_LEGSCALED,-1,* " % (var_name, var_name)
# Compute width of columns in case of mirrored legend
total_width = 89 # characters
left_width = max([len(_("Average")), len(_("Maximum")), len(_("Last"))]) + 2
column_width = (total_width - left_width) / len(graph_template["metrics"]) - 2
# Now add areas and lines to the graph
graph_metrics = []
# Graph with upside down metrics? (e.g. for Disk IO)
have_upside_down = False
# Compute width of the right column of the legend
max_title_length = 0
for nr, metric_definition in enumerate(graph_template["metrics"]):
if len(metric_definition) >= 3:
title = metric_definition[2]
elif not "," in metric_definition:
metric_name = metric_definition[0].split("#")[0]
mi = translated_metrics[metric_name]
title = mi["title"]
else:
title = ""
max_title_length = max(max_title_length, len(title))
for nr, metric_definition in enumerate(graph_template["metrics"]):
metric_name = metric_definition[0]
line_type = metric_definition[1] # "line", "area", "stack"
# Optional title, especially for derived values
if len(metric_definition) >= 3:
title = metric_definition[2]
else:
title = ""
# Prefixed minus renders the metrics in negative direction
if line_type[0] == '-':
have_upside_down = True
upside_down = True
upside_down_factor = -1
line_type = line_type[1:]
upside_down_suffix = "_NEG"
else:
upside_down = False
upside_down_factor = 1
upside_down_suffix = ""
if line_type == "line":
draw_type = "LINE"
draw_stack = ""
elif line_type == "area":
draw_type = "AREA"
draw_stack = ""
elif line_type == "stack":
draw_type = "AREA"
draw_stack = ":STACK"
# User can specify alternative color using a suffixed #aabbcc
if '#' in metric_name:
metric_name, custom_color = metric_name.split("#", 1)
else:
custom_color = None
commands = ""
# Derived value with RBN syntax (evaluated by RRDTool!).
if "," in metric_name:
# We evaluate just in order to get color and unit.
# TODO: beware of division by zero. All metrics are set to 1 here.
value, unit, color = evaluate(metric_name, translated_metrics)
# Choose a unique name for the derived variable and compute it
commands += "CDEF:DERIVED%d=%s " % (nr , metric_name)
if upside_down:
commands += "CDEF:DERIVED%d_NEG=DERIVED%d,-1,* " % (nr, nr)
metric_name = "DERIVED%d" % nr
# Scaling and upsidedown handling for legend
commands += "CDEF:%s_LEGSCALED%s=%s,%f,/ " % (metric_name, upside_down_suffix, metric_name, legend_scale * upside_down_factor)
else:
mi = translated_metrics[metric_name]
if not title:
title = mi["title"]
color = parse_color_into_hexrgb(mi["color"])
unit = mi["unit"]
if custom_color:
color = "#" + custom_color
# Paint the graph itself
# TODO: Die Breite des Titels intelligent berechnen. Bei legend = "mirrored" muss man die
# Vefügbare Breite ermitteln und aufteilen auf alle Titel
right_pad = " " * (max_title_length - len(title))
commands += "%s:%s%s%s:\"%s%s\"%s " % (draw_type, metric_name, upside_down_suffix, color, title.replace(":", "\\:"), right_pad, draw_stack)
if line_type == "area":
commands += "LINE:%s%s%s " % (metric_name, upside_down_suffix, render_color(darken_color(parse_color(color), 0.2)))
unit_symbol = unit["symbol"]
if unit_symbol == "%":
unit_symbol = "%%"
else:
unit_symbol = " " + unit_symbol
graph_metrics.append((metric_name, unit_symbol, commands))
# Use title and label of this metrics as default for the graph
if title and not graph_title:
graph_title = title
if not vertical_label:
vertical_label = unit["title"]
# Now create the rrdgraph commands for all metrics - according to the choosen layout
for metric_name, unit_symbol, commands in graph_metrics:
rrdgraph_commands += commands
legend_symbol = unit_symbol
if unit_symbol and unit_symbol[0] == " ":
legend_symbol = " %s%s" % (legend_scale_symbol, unit_symbol[1:])
for what, what_title in [ ("AVERAGE", _("average")), ("MAX", _("max")), ("LAST", _("last")) ]:
rrdgraph_commands += "GPRINT:%%s_LEGSCALED:%%s:\"%%%%8.%dlf%%s %%s\" " % legend_precision % \
(metric_name, what, legend_symbol, what_title)
rrdgraph_commands += "COMMENT:\"\\n\" "
# For graphs with both up and down, paint a gray rule at 0
if have_upside_down:
rrdgraph_commands += "HRULE:0#c0c0c0 "
# Now compute the arguments for the command line of rrdgraph
rrdgraph_arguments = ""
graph_title = graph_template.get("title", graph_title)
vertical_label = graph_template.get("vertical_label", vertical_label)
rrdgraph_arguments += " --vertical-label %s --title %s " % (
quote_shell_string(vertical_label or " "),
quote_shell_string(graph_title))
min_value, max_value = get_graph_range(graph_template, translated_metrics)
if min_value != None and max_value != None:
rrdgraph_arguments += " -l %f -u %f" % (min_value, max_value)
else:
rrdgraph_arguments += " -l 0"
return graph_title + "\n" + rrdgraph_arguments + "\n" + rrdgraph_commands + "\n"
#.
# .--Hover-Graph---------------------------------------------------------.
# | _ _ ____ _ |
# | | | | | _____ _____ _ __ / ___|_ __ __ _ _ __ | |__ |
# | | |_| |/ _ \ \ / / _ \ '__|____| | _| '__/ _` | '_ \| '_ \ |
# | | _ | (_) \ V / __/ | |_____| |_| | | | (_| | |_) | | | | |
# | |_| |_|\___/ \_/ \___|_| \____|_| \__,_| .__/|_| |_| |
# | |_| |
# '----------------------------------------------------------------------'
def new_style_graphs_possible():
return browser_supports_canvas() and not html.is_mobile()
def browser_supports_canvas():
user_agent = html.get_user_agent()
if 'MSIE' in user_agent:
matches = regex('MSIE ([0-9]{1,}[\.0-9]{0,})').search(user_agent)
return not matches or float(matches.group(1)) >= 9.0
else:
return True
def page_show_graph():
site = html.var('site')
host_name = html.var('host_name')
service = html.var('service')
if new_style_graphs_possible():
# FIXME HACK TODO We don't have the current perfata and check command
# here, but we only need it till metrics.render_svc_time_graph() does
# not need these information anymore.
if service == "_HOST_":
query = "GET hosts\n" \
"Filter: host_name = %s\n" \
"Columns: perf_data metrics check_command\n" % host_name
else:
query = "GET services\n" \
"Filter: host_name = %s\n" \
"Filter: service_description = %s\n" \
"Columns: perf_data metrics check_command\n" % (host_name, service)
html.live.set_only_sites([site])
try:
data = html.live.query_row(query)
except livestatus.MKLivestatusNotFoundError:
html.write('<div class="error">%s</div>' %
_('Failed to fetch data for graph. Maybe the site is not reachable?'))
return
html.live.set_only_sites(None)
if service == "_HOST_":
row = {
'site' : site,
'host_name' : host_name,
'host_perf_data' : data[0],
'host_metrics' : data[1],
'host_check_command' : data[2],
}
else:
row = {
'site' : site,
'host_name' : host_name,
'service_description' : service,
'service_perf_data' : data[0],
'service_metrics' : data[1],
'service_check_command' : data[2],
}
# now try to render the graph with our graphing. If it is not possible,
# add JS code to let browser fetch the PNP graph
try:
# Currently always displaying 24h graph
end_time = time.time()
start_time = end_time - 8 * 3600
htmlcode = render_time_graph(row, start_time, end_time, size=(30, 10), font_size=8, show_legend=False, graph_id_prefix="hover")
if htmlcode:
html.write(htmlcode)
return
except NameError:
if config.debug:
raise
pass
# Fallback to PNP graph rendering
host = pnp_cleanup(host_name)
svc = pnp_cleanup(service)
site = html.site_status[site]["site"]
if html.mobile:
url = site["url_prefix"] + ("pnp4nagios/index.php?kohana_uri=/mobile/popup/%s/%s" % \
(html.urlencode(host), html.urlencode(svc)))
else:
url = site["url_prefix"] + ("pnp4nagios/index.php/popup?host=%s&srv=%s" % \
(html.urlencode(host), html.urlencode(svc)))
html.write(url)
| gpl-2.0 | 7,823,285,372,340,483,000 | 35.666927 | 147 | 0.495054 | false |
sostenibilidad-unam/posgrado | posgradmin/posgradmin/management/commands/exporta_cursos.py | 1 | 3459 | # coding: utf-8
from django.core.management.base import BaseCommand
from posgradmin.models import Curso
from django.template.loader import render_to_string
from os import path
from datetime import datetime
import random
from sh import mkdir
from django.utils.text import slugify
import argparse
class Command(BaseCommand):
help = u'Exporta cursos a formato markdown para la página'
def add_arguments(self, parser):
parser.add_argument('--cursos',
type=argparse.FileType('r'),
help='path a la pagina principal de cursos')
parser.add_argument('--outdir',
required=True,
help='path al directorio donde escribir')
def handle(self, *args, **options):
export(options['cursos'], options['outdir'])
def export(cursos, outdir):
mkdir('-p', outdir)
intersemestral = [True, False]
tipos = [
(u"Cursos obligatorios", 'Obligatoria'),
(u"Cursos obligatorios por campo", 'Obligatorias por campo'),
(u"Cursos optativos", 'Optativa'),
(u"Seminarios de Doctorado", u"Seminario de Doctorado")
]
sedes = [
'en línea',
'CDMX',
'Morelia',
u'León'
]
index = cursos.read()
cursos_path = cursos.name
cursos.close()
cursos_md = ""
for inter in intersemestral:
for tipo in tipos:
for sede in sedes:
cursos = Curso.objects.filter(
status='publicado').filter(
intersemestral=inter).filter(
asignatura__tipo=tipo[1]).filter(
sede=sede).order_by('asignatura__asignatura')
if cursos:
if inter:
cursos_md += "\n\n\n# Cursos Intersemestrales\n\n"
else:
cursos_md += "\n\n\n# Cursos Semestrales\n\n"
cursos_md += u"\n\n## %s %s\n\n" % (tipo[0], sede)
for c in cursos:
curso_slug = slugify(c.asignatura.asignatura
+ '_'
+ c.sede)
cursos_md += " - [%s](/cursos/%s/)\n" % (c.asignatura.asignatura, curso_slug)
index = index.replace("<!-- " + slugify("%s %s" % (tipo[0], sede)) + " -->",
cursos_md)
index = index.replace("<!-- cursos-siges -->", cursos_md)
with open(cursos_path, 'w') as f:
f.write(index)
# crear una página por curso
for c in Curso.objects.filter(status='publicado'):
# mkdir('-p', path.join(outdir, ''))
if c.sede is None:
sede = ""
else:
sede = c.sede
curso_slug = slugify(c.asignatura.asignatura
+ '_'
+ sede)
c_md = path.join(outdir,
'%s.md' % curso_slug)
with open(c_md, 'w') as f:
f.write(render_to_string(
'posgradmin/curso.md',
{'curso': c,
'curso_slug': curso_slug,
'academicos': ["<a href='mailto:%s'>%s</a>" % (p.user.email, p) for p in c.academicos.all()],
'pleca': random.randint(0, 19)
}))
| gpl-3.0 | 7,672,059,377,457,798,000 | 31.904762 | 110 | 0.4822 | false |
balanced/balanced-python | setup.py | 1 | 2182 | """
Balanced Python client library.
See ``README.md`` for usage advice.
"""
import os
import re
try:
import setuptools
except ImportError:
import distutils.core
setup = distutils.core.setup
else:
setup = setuptools.setup
def _get_version():
path = os.path.join(PATH_TO_FILE, 'balanced', '__init__.py')
version_re = r".*__version__ = '(.*?)'"
fo = open(path)
try:
return re.compile(version_re, re.S).match(fo.read()).group(1)
finally:
fo.close()
def _get_long_description():
path = os.path.join(PATH_TO_FILE, 'README.md')
fo = open(path)
try:
return fo.read()
finally:
fo.close()
def parse_requirements(file_name):
requirements = []
for line in open(file_name, 'r').read().split('\n'):
if re.match(r'(\s*#)|(\s*$)', line):
continue
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1', line))
elif re.match(r'\s*-f\s+', line):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(file_name):
dependency_links = []
for line in open(file_name, 'r').read().split('\n'):
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
return dependency_links
PATH_TO_FILE = os.path.dirname(__file__)
VERSION = _get_version()
LONG_DESCRIPTION = _get_long_description()
setup(
name='balanced',
version=VERSION,
url='https://balancedpayments.com/',
license='MIT License',
author='Balanced',
author_email='[email protected]',
description='Payments platform for marketplaces',
long_description=LONG_DESCRIPTION,
packages=['balanced'],
test_suite='nose.collector',
install_requires=parse_requirements('requirements.txt'),
dependency_links=parse_dependency_links('requirements.txt'),
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| mit | 4,966,423,551,292,434,000 | 24.670588 | 77 | 0.600825 | false |
zengchunyun/s12 | day10/homework/twisted_fram/EchoServer.py | 1 | 2339 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: zengchunyun
"""
from twisted.internet import protocol
from twisted.internet import reactor
class EchoServer(protocol.Protocol): # 创建Protocol的派生类EchoServer
def dataReceived(self, data): # 重写父类dataReceived方法,当有接收到客户端发来数据时,会调用此方法,并将用户数据传入
self.transport.write(bytes(str(data), "utf8")) # 通过父类的transport的write方法,将接收到用户的输入,完全的发送给客户端,
# 由于3.x的需要自己转换发送的数据类型,所以这里使用bytes转换成bytes数据类型
def main(): # 定义程序主函数
factory = protocol.ServerFactory() # 实例化ServerFactory类,ServerFactory继承了factory
factory.protocol = EchoServer # 重写factory类的protocol属性,将EchoServer类的地址赋给protocol
reactor.listenTCP(8000, factory, interface="127.0.0.1")
# print(type(reactor)) # 通过type打印出reactor的父类
# twisted.internet.selectreactor.SelectReactor
# 再进一步分析SelectReactor的父类twisted.internet.posixbase.PosixReactorBase下有一个
# listenTCP方法(port, factory, backlog=50, interface=''),backlog代表最大listen队列为50
# listenTCP下执行twisted.internet.tcp.Port类
# PosixReactorBase又继承了父类twisted.internet.base._SignalReactorMixin,然后执行了该父类的run方法
reactor.run()
# run方法执行该类本身的startRunning方法,startRunning再调用ReactorBase类的startRunning方法
# run方法再执行类本身的mainLoop方法
# mainLoop方法则一直循环执行SelectReactor.doIteration(t)方法,该方法则调用了事件驱动select.select轮询事件
# 当有可读事件时,执行self._doReadOrWrite方法,该方法通过反射器调用twisted.internet.tcp.Connection的doRead方法,通过该方法
# 返回self._dataReceived(data),该方法定义了self.protocol.dataReceived(data),这个self.protocol就是我们
# 这里定义的protocol.ServerFactory().protocol,然后执行dataReceived(data),这个方法已经被我们重写了,也就是我们listenTCP传入的factory
# 执行factory.protocol.dataReceived(data) 等于执行EchoServer().dataReceived(data)方法
if __name__ == "__main__":
main()
| gpl-2.0 | -6,804,515,873,428,824,000 | 44.394737 | 105 | 0.78087 | false |
mandeepdhami/neutron | neutron/tests/unit/api/test_extensions.py | 1 | 32843 | # Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import routes
import six
import webob
import webob.exc as webexc
import webtest
import neutron
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.common import exceptions
from neutron.db import db_base_plugin_v2
from neutron import manager
from neutron.plugins.common import constants
from neutron.plugins.ml2 import plugin as ml2_plugin
from neutron import quota
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit import extension_stubs as ext_stubs
import neutron.tests.unit.extensions
from neutron.tests.unit.extensions import extendedattribute as extattr
from neutron.tests.unit import testlib_api
from neutron import wsgi
LOG = logging.getLogger(__name__)
_uuid = test_base._uuid
_get_path = test_base._get_path
extensions_path = ':'.join(neutron.tests.unit.extensions.__path__)
class ExtensionsTestApp(wsgi.Router):
def __init__(self, options={}):
mapper = routes.Mapper()
controller = ext_stubs.StubBaseAppController()
mapper.resource("dummy_resource", "/dummy_resources",
controller=controller)
super(ExtensionsTestApp, self).__init__(mapper)
class FakePluginWithExtension(db_base_plugin_v2.NeutronDbPluginV2):
"""A fake plugin used only for extension testing in this file."""
supported_extension_aliases = ["FOXNSOX"]
def method_to_support_foxnsox_extension(self, context):
self._log("method_to_support_foxnsox_extension", context)
class PluginInterfaceTest(base.BaseTestCase):
def test_issubclass_hook(self):
class A(object):
def f(self):
pass
class B(extensions.PluginInterface):
@abc.abstractmethod
def f(self):
pass
self.assertTrue(issubclass(A, B))
def test_issubclass_hook_class_without_abstract_methods(self):
class A(object):
def f(self):
pass
class B(extensions.PluginInterface):
def f(self):
pass
self.assertFalse(issubclass(A, B))
def test_issubclass_hook_not_all_methods_implemented(self):
class A(object):
def f(self):
pass
class B(extensions.PluginInterface):
@abc.abstractmethod
def f(self):
pass
@abc.abstractmethod
def g(self):
pass
self.assertFalse(issubclass(A, B))
class ResourceExtensionTest(base.BaseTestCase):
class ResourceExtensionController(wsgi.Controller):
def index(self, request):
return "resource index"
def show(self, request, id):
return {'data': {'id': id}}
def notimplemented_function(self, request, id):
return webob.exc.HTTPNotImplemented()
def custom_member_action(self, request, id):
return {'member_action': 'value'}
def custom_collection_action(self, request, **kwargs):
return {'collection': 'value'}
class DummySvcPlugin(wsgi.Controller):
def get_plugin_type(self):
return constants.DUMMY
def index(self, request, **kwargs):
return "resource index"
def custom_member_action(self, request, **kwargs):
return {'member_action': 'value'}
def collection_action(self, request, **kwargs):
return {'collection': 'value'}
def show(self, request, id):
return {'data': {'id': id}}
def test_exceptions_notimplemented(self):
controller = self.ResourceExtensionController()
member = {'notimplemented_function': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
member_actions=member)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
# Ideally we would check for a 501 code here but webtest doesn't take
# anything that is below 200 or above 400 so we can't actually check
# it. It throws webtest.AppError instead.
try:
test_app.get("/tweedles/some_id/notimplemented_function")
# Shouldn't be reached
self.assertTrue(False)
except webtest.AppError as e:
self.assertIn('501', str(e))
def test_resource_can_be_added_as_extension(self):
res_ext = extensions.ResourceExtension(
'tweedles', self.ResourceExtensionController())
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
index_response = test_app.get("/tweedles")
self.assertEqual(200, index_response.status_int)
self.assertEqual("resource index", index_response.body)
show_response = test_app.get("/tweedles/25266")
self.assertEqual({'data': {'id': "25266"}}, show_response.json)
def test_resource_gets_prefix_of_plugin(self):
class DummySvcPlugin(wsgi.Controller):
def index(self, request):
return ""
def get_plugin_type(self):
return constants.DUMMY
res_ext = extensions.ResourceExtension(
'tweedles', DummySvcPlugin(), path_prefix="/dummy_svc")
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
index_response = test_app.get("/dummy_svc/tweedles")
self.assertEqual(200, index_response.status_int)
def test_resource_extension_with_custom_member_action(self):
controller = self.ResourceExtensionController()
member = {'custom_member_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
member_actions=member)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/some_id/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
def test_resource_ext_with_custom_member_action_gets_plugin_prefix(self):
controller = self.DummySvcPlugin()
member = {'custom_member_action': "GET"}
collections = {'collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
path_prefix="/dummy_svc",
member_actions=member,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/dummy_svc/tweedles/1/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
response = test_app.get("/dummy_svc/tweedles/collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'],
"value")
def test_plugin_prefix_with_parent_resource(self):
controller = self.DummySvcPlugin()
parent = dict(member_name="tenant",
collection_name="tenants")
member = {'custom_member_action': "GET"}
collections = {'collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller, parent,
path_prefix="/dummy_svc",
member_actions=member,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
index_response = test_app.get("/dummy_svc/tenants/1/tweedles")
self.assertEqual(200, index_response.status_int)
response = test_app.get("/dummy_svc/tenants/1/"
"tweedles/1/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
response = test_app.get("/dummy_svc/tenants/2/"
"tweedles/collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'],
"value")
def test_resource_extension_for_get_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
LOG.debug(jsonutils.loads(response.body))
self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def test_resource_extension_for_put_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "PUT"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.put("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
def test_resource_extension_for_post_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "POST"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.post("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
def test_resource_extension_for_delete_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "DELETE"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.delete("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
def test_resource_ext_for_formatted_req_on_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/custom_collection_action.json")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def test_resource_ext_for_nested_resource_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "GET"}
parent = dict(collection_name='beetles', member_name='beetle')
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections,
parent=parent)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/beetles/beetle_id"
"/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def test_resource_extension_with_custom_member_action_and_attr_map(self):
controller = self.ResourceExtensionController()
member = {'custom_member_action': "GET"}
params = {
'tweedles': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
}
}
res_ext = extensions.ResourceExtension('tweedles', controller,
member_actions=member,
attr_map=params)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/some_id/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
def test_returns_404_for_non_existent_extension(self):
test_app = _setup_extensions_test_app(SimpleExtensionManager(None))
response = test_app.get("/non_extistant_extension", status='*')
self.assertEqual(404, response.status_int)
class ActionExtensionTest(base.BaseTestCase):
def setUp(self):
super(ActionExtensionTest, self).setUp()
self.extension_app = _setup_extensions_test_app()
def test_extended_action_for_adding_extra_data(self):
action_name = 'FOXNSOX:add_tweedle'
action_params = dict(name='Beetle')
req_body = jsonutils.dumps({action_name: action_params})
response = self.extension_app.post('/dummy_resources/1/action',
req_body,
content_type='application/json')
self.assertEqual("Tweedle Beetle Added.", response.body)
def test_extended_action_for_deleting_extra_data(self):
action_name = 'FOXNSOX:delete_tweedle'
action_params = dict(name='Bailey')
req_body = jsonutils.dumps({action_name: action_params})
response = self.extension_app.post("/dummy_resources/1/action",
req_body,
content_type='application/json')
self.assertEqual("Tweedle Bailey Deleted.", response.body)
def test_returns_404_for_non_existent_action(self):
non_existent_action = 'blah_action'
action_params = dict(name="test")
req_body = jsonutils.dumps({non_existent_action: action_params})
response = self.extension_app.post("/dummy_resources/1/action",
req_body,
content_type='application/json',
status='*')
self.assertEqual(404, response.status_int)
def test_returns_404_for_non_existent_resource(self):
action_name = 'add_tweedle'
action_params = dict(name='Beetle')
req_body = jsonutils.dumps({action_name: action_params})
response = self.extension_app.post("/asdf/1/action", req_body,
content_type='application/json',
status='*')
self.assertEqual(404, response.status_int)
class RequestExtensionTest(base.BaseTestCase):
def test_headers_can_be_extended(self):
def extend_headers(req, res):
assert req.headers['X-NEW-REQUEST-HEADER'] == "sox"
res.headers['X-NEW-RESPONSE-HEADER'] = "response_header_data"
return res
app = self._setup_app_with_request_handler(extend_headers, 'GET')
response = app.get("/dummy_resources/1",
headers={'X-NEW-REQUEST-HEADER': "sox"})
self.assertEqual(response.headers['X-NEW-RESPONSE-HEADER'],
"response_header_data")
def test_extend_get_resource_response(self):
def extend_response_data(req, res):
data = jsonutils.loads(res.body)
data['FOXNSOX:extended_key'] = req.GET.get('extended_key')
res.body = jsonutils.dumps(data)
return res
app = self._setup_app_with_request_handler(extend_response_data, 'GET')
response = app.get("/dummy_resources/1?extended_key=extended_data")
self.assertEqual(200, response.status_int)
response_data = jsonutils.loads(response.body)
self.assertEqual('extended_data',
response_data['FOXNSOX:extended_key'])
self.assertEqual('knox', response_data['fort'])
def test_get_resources(self):
app = _setup_extensions_test_app()
response = app.get("/dummy_resources/1?chewing=newblue")
response_data = jsonutils.loads(response.body)
self.assertEqual('newblue', response_data['FOXNSOX:googoose'])
self.assertEqual("Pig Bands!", response_data['FOXNSOX:big_bands'])
def test_edit_previously_uneditable_field(self):
def _update_handler(req, res):
data = jsonutils.loads(res.body)
data['uneditable'] = req.params['uneditable']
res.body = jsonutils.dumps(data)
return res
base_app = webtest.TestApp(setup_base_app(self))
response = base_app.put("/dummy_resources/1",
{'uneditable': "new_value"})
self.assertEqual(response.json['uneditable'], "original_value")
ext_app = self._setup_app_with_request_handler(_update_handler,
'PUT')
ext_response = ext_app.put("/dummy_resources/1",
{'uneditable': "new_value"})
self.assertEqual(ext_response.json['uneditable'], "new_value")
def _setup_app_with_request_handler(self, handler, verb):
req_ext = extensions.RequestExtension(verb,
'/dummy_resources/:(id)',
handler)
manager = SimpleExtensionManager(None, None, req_ext)
return _setup_extensions_test_app(manager)
class ExtensionManagerTest(base.BaseTestCase):
def test_invalid_extensions_are_not_registered(self):
class InvalidExtension(object):
"""Invalid extension.
This Extension doesn't implement extension methods :
get_name, get_description and get_updated
"""
def get_alias(self):
return "invalid_extension"
ext_mgr = extensions.ExtensionManager('')
ext_mgr.add_extension(InvalidExtension())
ext_mgr.add_extension(ext_stubs.StubExtension("valid_extension"))
self.assertIn('valid_extension', ext_mgr.extensions)
self.assertNotIn('invalid_extension', ext_mgr.extensions)
class PluginAwareExtensionManagerTest(base.BaseTestCase):
def test_unsupported_extensions_are_not_loaded(self):
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1", "e3"])
plugin_info = {constants.CORE: stub_plugin}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.StubExtension("e1"))
ext_mgr.add_extension(ext_stubs.StubExtension("e2"))
ext_mgr.add_extension(ext_stubs.StubExtension("e3"))
self.assertIn("e1", ext_mgr.extensions)
self.assertNotIn("e2", ext_mgr.extensions)
self.assertIn("e3", ext_mgr.extensions)
def test_extensions_are_not_loaded_for_plugins_unaware_of_extensions(self):
class ExtensionUnawarePlugin(object):
"""This plugin does not implement supports_extension method.
Extensions will not be loaded when this plugin is used.
"""
pass
plugin_info = {constants.CORE: ExtensionUnawarePlugin()}
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.StubExtension("e1"))
self.assertNotIn("e1", ext_mgr.extensions)
def test_extensions_not_loaded_for_plugin_without_expected_interface(self):
class PluginWithoutExpectedIface(object):
"""Does not implement get_foo method as expected by extension."""
supported_extension_aliases = ["supported_extension"]
plugin_info = {constants.CORE: PluginWithoutExpectedIface()}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.ExtensionExpectingPluginInterface(
"supported_extension"))
self.assertNotIn("e1", ext_mgr.extensions)
def test_extensions_are_loaded_for_plugin_with_expected_interface(self):
class PluginWithExpectedInterface(object):
"""Implements get_foo method as expected by extension."""
supported_extension_aliases = ["supported_extension"]
def get_foo(self, bar=None):
pass
plugin_info = {constants.CORE: PluginWithExpectedInterface()}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.ExtensionExpectingPluginInterface(
"supported_extension"))
self.assertIn("supported_extension", ext_mgr.extensions)
def test_extensions_expecting_neutron_plugin_interface_are_loaded(self):
class ExtensionForQuamtumPluginInterface(ext_stubs.StubExtension):
"""This Extension does not implement get_plugin_interface method.
This will work with any plugin implementing NeutronPluginBase
"""
pass
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
plugin_info = {constants.CORE: stub_plugin}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ExtensionForQuamtumPluginInterface("e1"))
self.assertIn("e1", ext_mgr.extensions)
def test_extensions_without_need_for__plugin_interface_are_loaded(self):
class ExtensionWithNoNeedForPluginInterface(ext_stubs.StubExtension):
"""This Extension does not need any plugin interface.
This will work with any plugin implementing NeutronPluginBase
"""
def get_plugin_interface(self):
return None
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
plugin_info = {constants.CORE: stub_plugin}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ExtensionWithNoNeedForPluginInterface("e1"))
self.assertIn("e1", ext_mgr.extensions)
def test_extension_loaded_for_non_core_plugin(self):
class NonCorePluginExtenstion(ext_stubs.StubExtension):
def get_plugin_interface(self):
return None
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
plugin_info = {constants.DUMMY: stub_plugin}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(NonCorePluginExtenstion("e1"))
self.assertIn("e1", ext_mgr.extensions)
def test_unloaded_supported_extensions_raises_exception(self):
stub_plugin = ext_stubs.StubPlugin(
supported_extensions=["unloaded_extension"])
plugin_info = {constants.CORE: stub_plugin}
self.assertRaises(exceptions.ExtensionsNotFound,
extensions.PluginAwareExtensionManager,
'', plugin_info)
class ExtensionControllerTest(testlib_api.WebTestCase):
def setUp(self):
super(ExtensionControllerTest, self).setUp()
self.test_app = _setup_extensions_test_app()
def test_index_gets_all_registerd_extensions(self):
response = self.test_app.get("/extensions." + self.fmt)
res_body = self.deserialize(response)
foxnsox = res_body["extensions"][0]
self.assertEqual(foxnsox["alias"], "FOXNSOX")
def test_extension_can_be_accessed_by_alias(self):
response = self.test_app.get("/extensions/FOXNSOX." + self.fmt)
foxnsox_extension = self.deserialize(response)
foxnsox_extension = foxnsox_extension['extension']
self.assertEqual(foxnsox_extension["alias"], "FOXNSOX")
def test_show_returns_not_found_for_non_existent_extension(self):
response = self.test_app.get("/extensions/non_existent" + self.fmt,
status="*")
self.assertEqual(response.status_int, 404)
def app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return ExtensionsTestApp(conf)
def setup_base_app(test):
base.BaseTestCase.config_parse()
app = config.load_paste_app('extensions_test_app')
return app
def setup_extensions_middleware(extension_manager=None):
extension_manager = (extension_manager or
extensions.PluginAwareExtensionManager(
extensions_path,
{constants.CORE: FakePluginWithExtension()}))
base.BaseTestCase.config_parse()
app = config.load_paste_app('extensions_test_app')
return extensions.ExtensionMiddleware(app, ext_mgr=extension_manager)
def _setup_extensions_test_app(extension_manager=None):
return webtest.TestApp(setup_extensions_middleware(extension_manager))
class SimpleExtensionManager(object):
def __init__(self, resource_ext=None, action_ext=None, request_ext=None):
self.resource_ext = resource_ext
self.action_ext = action_ext
self.request_ext = request_ext
def get_resources(self):
resource_exts = []
if self.resource_ext:
resource_exts.append(self.resource_ext)
return resource_exts
def get_actions(self):
action_exts = []
if self.action_ext:
action_exts.append(self.action_ext)
return action_exts
def get_request_extensions(self):
request_extensions = []
if self.request_ext:
request_extensions.append(self.request_ext)
return request_extensions
class ExtensionExtendedAttributeTestPlugin(
ml2_plugin.Ml2Plugin):
supported_extension_aliases = [
'ext-obj-test', "extended-ext-attr"
]
def __init__(self, configfile=None):
super(ExtensionExtendedAttributeTestPlugin, self)
self.objs = []
self.objh = {}
def create_ext_test_resource(self, context, ext_test_resource):
obj = ext_test_resource['ext_test_resource']
id = _uuid()
obj['id'] = id
self.objs.append(obj)
self.objh.update({id: obj})
return obj
def get_ext_test_resources(self, context, filters=None, fields=None):
return self.objs
def get_ext_test_resource(self, context, id, fields=None):
return self.objh[id]
class ExtensionExtendedAttributeTestCase(base.BaseTestCase):
def setUp(self):
super(ExtensionExtendedAttributeTestCase, self).setUp()
plugin = (
"neutron.tests.unit.api.test_extensions."
"ExtensionExtendedAttributeTestPlugin"
)
# point config file to: neutron/tests/etc/neutron.conf
self.config_parse()
self.setup_coreplugin(plugin)
ext_mgr = extensions.PluginAwareExtensionManager(
extensions_path,
{constants.CORE: ExtensionExtendedAttributeTestPlugin}
)
ext_mgr.extend_resources("2.0", {})
extensions.PluginAwareExtensionManager._instance = ext_mgr
app = config.load_paste_app('extensions_test_app')
self._api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self._tenant_id = "8c70909f-b081-452d-872b-df48e6c355d1"
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP):
self.saved_attr_map[res] = attrs.copy()
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
extattr.EXTENDED_ATTRIBUTES_2_0)
self.agentscheduler_dbMinxin = manager.NeutronManager.get_plugin()
self.addCleanup(self.restore_attribute_map)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
def restore_attribute_map(self):
# Restore the original RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def _do_request(self, method, path, data=None, params=None, action=None):
content_type = 'application/json'
body = None
if data is not None: # empty dict is valid
body = wsgi.Serializer().serialize(data, content_type)
req = testlib_api.create_request(
path, body, content_type,
method, query_string=params)
res = req.get_response(self._api)
if res.status_code >= 400:
raise webexc.HTTPClientError(detail=res.body, code=res.status_code)
if res.status_code != webexc.HTTPNoContent.code:
return res.json
def _ext_test_resource_create(self, attr=None):
data = {
"ext_test_resource": {
"tenant_id": self._tenant_id,
"name": "test",
extattr.EXTENDED_ATTRIBUTE: attr
}
}
res = self._do_request('POST', _get_path('ext_test_resources'), data)
return res['ext_test_resource']
def test_ext_test_resource_create(self):
ext_test_resource = self._ext_test_resource_create()
attr = _uuid()
ext_test_resource = self._ext_test_resource_create(attr)
self.assertEqual(ext_test_resource[extattr.EXTENDED_ATTRIBUTE], attr)
def test_ext_test_resource_get(self):
attr = _uuid()
obj = self._ext_test_resource_create(attr)
obj_id = obj['id']
res = self._do_request('GET', _get_path(
'ext_test_resources/{0}'.format(obj_id)))
obj2 = res['ext_test_resource']
self.assertEqual(obj2[extattr.EXTENDED_ATTRIBUTE], attr)
| apache-2.0 | -2,762,023,589,588,214,000 | 39.951372 | 79 | 0.62135 | false |
AhmedHani/acmASCIS-ML-Hack-2017 | Session_1/dev/server/datasets_processing/manager.py | 1 | 7728 | ___author__ = 'acmASCIS'
'''
by ahani at {9/22/2016}
'''
import random
from dev.server.datasets_generator._sort import Sort
from dev.server.datasets_generator._matmul import Matmul
from dev.server.datasets_processing.validator import Validator
class Manager(object):
def __init__(self):
super(Manager, self).__init__()
@staticmethod
def make_default_sorting_dataset(dataset_size=10, array_length=5):
"""
Make a random generated dataset for checking the sorting algorithm correctness
:param dataset_size: (int) Number of arrays that would be created
:param array_length: (int) The array length
:return: input and output files path the contains the dataset and its empty output file
"""
file_index = random.Random().randint(0, 10)
input_file_path = "./datasets/sorting/sort" + str(file_index) + ".in"
output_file_path = "./datasets/sorting/sort" + str(file_index) + ".out"
_sort = Sort(array_length)
with open(input_file_path, "r") as writer:
for i in range(dataset_size):
array_sample = _sort.generate_data()
array_string = ' '.join(array_sample)
writer.write(array_string)
return input_file_path, output_file_path
@staticmethod
def make_custom_sorting_dataset(arrays):
"""
Establish the target dataset from the user.
:param arrays: (array of arrays) each array contains integer elements
:return: input and output files path the contains the dataset and its empty output file
"""
Validator.validate_custom_sorting_dataset(arrays)
file_index = random.Random().randint(0, 10)
input_file_path = "./datasets/sorting/sort" + str(file_index) + ".in"
output_file_path = "./datasets/sorting/sort" + str(file_index) + ".out"
with open(input_file_path, "r") as writer:
for i in range(len(arrays)):
_sort = Sort(len(arrays[i]))
_sort.set_data(arrays[i])
array_string = ' '.join(arrays[i])
writer.write(array_string)
return input_file_path, output_file_path
@staticmethod
def make_default_freq_dataset(dataset_size=10, array_length=5):
"""
Make a random generated dataset for checking the frequency calculation algorithm correctness
:param dataset_size: (int) Number of arrays that would be created
:param array_length: (int) The array length
:return: input and output files path the contains the dataset and its empty output file
"""
file_index = random.Random().randint(0, 10)
input_file_path = "./datasets/freq/freq" + str(file_index) + ".in"
output_file_path = "./datasets/freq/freq" + str(file_index) + ".out"
_sort = Sort(array_length)
with open(input_file_path, "r") as writer:
for i in range(dataset_size):
array_sample = _sort.generate_data()
array_string = ' '.join(array_sample)
writer.write(array_string)
return input_file_path, output_file_path
@staticmethod
def make_custom_freq_dataset(arrays):
"""
Establish the target dataset from the user.
:param arrays: (array of arrays) each array contains integer elements
:return: input and output files path the contains the dataset and its empty output file
"""
Validator.validate_custom_freq_dataset(arrays)
file_index = random.Random().randint(0, 10)
input_file_path = "./datasets/freq/freq" + str(file_index) + ".in"
output_file_path = "./datasets/freq/freq" + str(file_index) + ".out"
with open(input_file_path, "r") as writer:
for i in range(len(arrays)):
_sort = Sort(len(arrays[i]))
_sort.set_data(arrays[i])
array_string = ' '.join(arrays[i])
writer.write(array_string)
return input_file_path, output_file_path
@staticmethod
def make_default_matmul_dataset(dataset_size=10, matrix_a_size=(3, 3), matrix_b_size=(3, 3)):
"""
Make a random generated dataset for checking the matrix multiplication algorithm correctness
:param dataset_size: (int) an integer that specifies the number of test cases
:param matrix_a_size: (tuple) that specifies the first matrix size
:param matrix_b_size: (tuple) that specifies the second matrix size
:return: input and output files path the contains the dataset and its empty output file
"""
file_index = random.Random().randint(0, 10)
input_file_path = "./datasets/arrays_multipliction/matmul" + str(file_index) + ".in"
output_file_path = "./datasets/freq/matmul" + str(file_index) + ".out"
with open(input_file_path, "r") as writer:
writer.write(str(dataset_size))
for i in range(dataset_size):
matmul = Matmul(matrix_a_size, matrix_b_size)
matrix_a, matrix_b = matmul.get_matrices()
writer.write(str(matrix_a_size[0]) + " " + str(matrix_a_size[1]) + " " + str(matrix_b_size[0]) + " " + str(matrix_b_size[1]))
for i in range(len(matrix_a)):
for j in range(len(matrix_a[0])):
if j < len(matrix_a[0]) - 1:
writer.write(str(matrix_a[i][j]) + " ")
else:
writer.write(str(matrix_a[i][j]))
writer.write("")
for i in range(len(matrix_b)):
for j in range(len(matrix_b[0])):
if j < len(matrix_b[0]) - 1:
writer.write(str(matrix_b[i][j]) + " ")
else:
writer.write(str(matrix_b[i][j]))
return input_file_path, output_file_path
@staticmethod
def make_custom_matmul_dataset(matrices_list):
"""
Establish the target dataset from the user.
:param matrices_list: (array of tuples) each array contains a tuple that contains key: first matrix value: second matrix (i.e (matrix_a, matrix_b)
:return: input and output files path the contains the dataset and its empty output file
"""
Validator.validate_custom_matmul_dataset(matrices_list)
file_index = random.Random().randint(0, 10)
input_file_path = "./datasets/arrays_multipliction/matmul" + str(file_index) + ".in"
output_file_path = "./datasets/freq/matmul" + str(file_index) + ".out"
with open(input_file_path, "r") as writer:
writer.write(str(len(matrices_list)))
for item in matrices_list:
writer.write(str(len(item[0])) + " " + str(len(item[0][0])) + " " + str(len(item[1])) + " " + str(len(item[1][0])))
matrix_a = item[0]
matrix_b = item[1]
for i in range(len(matrix_a)):
for j in range(len(matrix_a[0])):
if j < len(matrix_a[0]) - 1:
writer.write(str(matrix_a[i][j]) + " ")
else:
writer.write(str(matrix_a[i][j]))
writer.write("")
for i in range(len(matrix_b)):
for j in range(len(matrix_b[0])):
if j < len(matrix_b[0]) - 1:
writer.write(str(matrix_b[i][j]) + " ")
else:
writer.write(str(matrix_b[i][j]))
return input_file_path, output_file_path | mit | -4,601,814,935,475,399,700 | 42.421348 | 154 | 0.566123 | false |
sam-m888/gprime | gprime/filters/rules/citation/_matchespagesubstringof.py | 1 | 1934 | #
# gPrime - A web-based genealogy program
#
# Copyright (C) 2011 Helge Herz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
# "Sources having a title that contain a substring"
#-------------------------------------------------------------------------
class MatchesPageSubstringOf(Rule):
"""Citation Volume/Page title containing <substring>"""
labels = [ _('Text:')]
name = _('Citations with Volume/Page containing <text>')
description = _("Matches citations whose Volume/Page contains a "
"certain substring")
category = _('General filters')
allow_regex = True
def apply(self, db, object):
""" Apply the filter """
return self.match_substring(0, object.get_page())
| gpl-2.0 | 2,667,130,032,348,129,000 | 36.921569 | 79 | 0.537229 | false |
Hitachi-Data-Systems/org-chart-builder | pptx/dml/line.py | 1 | 2249 | # encoding: utf-8
"""
DrawingML objects related to line formatting
"""
from __future__ import absolute_import, print_function, unicode_literals
from ..enum.dml import MSO_FILL
from .fill import FillFormat
from ..util import Emu, lazyproperty
class LineFormat(object):
"""
Provides access to line properties such as line color, style, and width.
Typically accessed via the ``.line`` property of a shape such as |Shape|
or |Picture|.
"""
def __init__(self, parent):
super(LineFormat, self).__init__()
self._parent = parent
@lazyproperty
def color(self):
"""
The |ColorFormat| instance that provides access to the color settings
for this line. Essentially a shortcut for ``line.fill.fore_color``.
As a side-effect, accessing this property causes the line fill type
to be set to ``MSO_FILL.SOLID``. If this sounds risky for your use
case, use ``line.fill.type`` to non-destructively discover the
existing fill type.
"""
if self.fill.type != MSO_FILL.SOLID:
self.fill.solid()
return self.fill.fore_color
@lazyproperty
def fill(self):
"""
|FillFormat| instance for this line, providing access to fill
properties such as foreground color.
"""
ln = self._get_or_add_ln()
return FillFormat.from_fill_parent(ln)
@property
def width(self):
"""
The width of the line expressed as an integer number of :ref:`English
Metric Units <EMU>`. The returned value is an instance of
|BaseLength|, a value class having properties such as `.inches`,
`.cm`, and `.pt` for converting the value into convenient units.
"""
ln = self._ln
if ln is None:
return Emu(0)
return ln.w
@width.setter
def width(self, emu):
if emu is None:
emu = 0
ln = self._get_or_add_ln()
ln.w = emu
def _get_or_add_ln(self):
"""
Return the ``<a:ln>`` element containing the line format properties
in the XML.
"""
return self._parent.get_or_add_ln()
@property
def _ln(self):
return self._parent.ln
| apache-2.0 | -2,358,455,342,825,472,500 | 28.592105 | 77 | 0.600711 | false |
ChristianTremblay/BAC0 | BAC0/core/proprietary_objects/jci.py | 1 | 6165 | #!/usr/bin/env python
"""
Johnson Controls Proprietary Objects for FX/FEC Line
"""
from bacpypes.primitivedata import (
Real,
Boolean,
CharacterString,
Enumerated,
Unsigned,
Atomic,
)
from bacpypes.object import (
Object,
DeviceObject,
AnalogValueObject,
AnalogInputObject,
AnalogOutputObject,
BinaryValueObject,
Property,
register_object_type,
)
#
# Proprietary Objects and their attributes
#
JCIDeviceObject = {
"name": "JCIDeviceObject",
"vendor_id": 5,
"objectType": "device",
"bacpypes_type": DeviceObject,
"properties": {
"SupervisorOnline": {"obj_id": 3653, "primitive": Boolean, "mutable": True},
"Model": {"obj_id": 1320, "primitive": CharacterString, "mutable": True},
"ModelType": {"obj_id": 32527, "primitive": CharacterString, "mutable": True},
"State": {"obj_id": 2390, "primitive": CharacterString, "mutable": False},
"MemoryUsage": {"obj_id": 2581, "primitive": Real, "mutable": False},
"ObjectMemoryUsage": {"obj_id": 2582, "primitive": Real, "mutable": False},
"CPU": {"obj_id": 2583, "primitive": Real, "mutable": False},
"FlashUsage": {"obj_id": 2584, "primitive": Real, "mutable": False},
"JCISystemStatus": {"obj_id": 847, "primitive": Enumerated, "mutable": False},
"SABusPerformance": {
"obj_id": 12157,
"primitive": Enumerated,
"mutable": False,
},
"SABusTokenLoopTime": {
"obj_id": 12158,
"primitive": Unsigned,
"mutable": False,
},
"SABusCOVRcvPerMinute": {
"obj_id": 12159,
"primitive": Unsigned,
"mutable": False,
},
"SABusCOVWritesPerMinute": {
"obj_id": 12160,
"primitive": Unsigned,
"mutable": False,
},
"CPU Idle": {"obj_id": 30082, "primitive": Real, "mutable": False},
"alarm": {"obj_id": 673, "primitive": Boolean, "mutable": False},
"end_of_line": {"obj_id": 603, "primitive": Boolean, "mutable": False},
"objectStatus": {"obj_id": 512, "primitive": Enumerated, "mutable": False},
},
}
# EOL ?
# MEMORY ?
JCIAnalogValueObject = {
"name": "JCIAnalogValueObject",
"vendor_id": 5,
"objectType": "analogValue",
"bacpypes_type": AnalogValueObject,
"properties": {
"FLOW-SP_EEPROM": {"obj_id": 3113, "primitive": Real, "mutable": True},
"Offset": {"obj_id": 956, "primitive": Real, "mutable": True},
"Offline": {"obj_id": 913, "primitive": Boolean, "mutable": False},
"SABusAddr": {"obj_id": 3645, "primitive": Unsigned, "mutable": False},
"PeerToPeer": {"obj_id": 748, "primitive": Atomic, "mutable": False},
"P2P_ErrorStatus": {"obj_id": 746, "primitive": Enumerated, "mutable": False},
},
}
JCIAnalogInputObject = {
"name": "JCIAnalogInputObject",
"vendor_id": 5,
"objectType": "analogInput",
"bacpypes_type": AnalogInputObject,
"properties": {
"Offset": {"obj_id": 956, "primitive": Real, "mutable": True},
"Offline": {"obj_id": 913, "primitive": Boolean, "mutable": False},
"SABusAddr": {"obj_id": 3645, "primitive": Unsigned, "mutable": False},
"InputRangeLow": {"obj_id": 1293, "primitive": Real, "mutable": True},
"InputRangeHigh": {"obj_id": 1294, "primitive": Real, "mutable": True},
"OutputRangeLow": {"obj_id": 1295, "primitive": Real, "mutable": True},
"OutputRangeHigh": {"obj_id": 1296, "primitive": Real, "mutable": True},
},
}
JCIAnalogOutputObject = {
"name": "JCIAnalogOutputObject",
"vendor_id": 5,
"objectType": "analogOutput",
"bacpypes_type": AnalogOutputObject,
"properties": {
"Offline": {"obj_id": 913, "primitive": Boolean, "mutable": False},
"SABusAddr": {"obj_id": 3645, "primitive": Unsigned, "mutable": False},
"InputRangeLow": {"obj_id": 1293, "primitive": Real, "mutable": True},
"InputRangeHigh": {"obj_id": 1294, "primitive": Real, "mutable": True},
"OutputRangeLow": {"obj_id": 1295, "primitive": Real, "mutable": True},
"OutputRangeHigh": {"obj_id": 1296, "primitive": Real, "mutable": True},
"polarity": {"obj_id": "polarity", "primitive": Enumerated, "mutable": True},
"stroketime": {"obj_id": 3478, "primitive": Real, "mutable": True},
},
}
def tec_short_point_list():
return [
("binaryInput", 30827),
("binaryInput", 30828),
("binaryOutput", 86908),
("binaryOutput", 86909),
("binaryOutput", 86910),
("binaryOutput", 86911),
("binaryOutput", 86912),
("binaryOutput", 87101),
("binaryOutput", 87102),
("multiStateValue", 29501),
("multiStateValue", 29500),
("multiStateValue", 29509),
("multiStateValue", 29517),
("multiStateValue", 29518),
("multiStateValue", 29519),
("multiStateValue", 29520),
("multiStateValue", 29524),
("multiStateValue", 29525),
("multiStateValue", 29527),
("multiStateValue", 29712),
("multiStateValue", 29700),
("multiStateValue", 29709),
("multiStateValue", 29708),
("analogValue", 29505),
("analogValue", 29502),
("analogValue", 29503),
("analogValue", 29504),
("analogValue", 29506),
("analogValue", 29507),
("analogValue", 29508),
("analogValue", 29515),
("analogValue", 29522),
("analogValue", 29529),
("analogValue", 29530),
("analogValue", 29532),
("analogValue", 29701),
("analogValue", 29703),
("analogValue", 29705),
("analogValue", 29706),
("analogValue", 29707),
("analogValue", 29714),
("analogValue", 29717),
("analogValue", 29725),
("analogValue", 29726),
("analogValue", 29727),
("analogOutput", 86905),
("analogOutput", 86914),
("analogOutput", 86915),
("multiStateValue", 6),
("trendLog", 101010),
]
| lgpl-3.0 | 5,717,510,765,979,427,000 | 34.431034 | 86 | 0.560908 | false |
tf198/pycart | pycart/git_repo.py | 1 | 7009 | from jinja2 import Environment, FileSystemLoader
import os
from datetime import datetime
import git, renderer, utils, settings
import web, logging
import cPickle as pickle
from cache import cache
logger = logging.getLogger(__name__)
def render_template(template_name, **context):
extensions = context.pop('extensions', [])
g = context.pop('globals', {})
jinja_env = Environment(
loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
extensions=extensions,
)
jinja_env.globals.update(g)
# jinja_env.update_template_context(context)
return jinja_env.get_template(template_name).render(context)
ACTION_ICONS = {'add': 'file',
'modify': 'align-left',
'delete': 'trash'}
# explicitly defined repos
repos = settings.REPOS.copy()
# add repo directories
logger.info("Searching for repos")
for d in getattr(settings, "REPO_DIRS", []):
for directory, subdirs, files in os.walk(d):
root, ext = os.path.splitext(directory)
if ext == '.git':
repos[root[len(d) + 1:]] = directory
# remove excluded repos
for x in getattr(settings, "REPO_EXCLUDE", []):
if x in repos:
del(repos[x])
logger.info("{0} repos found".format(len(repos)))
class RepoMixin(object):
template = None
sha_type = None
def GET(self, *args):
self.cache_key = str(':'.join(args))
d = self.get_context(*args)
helpers = {'author_link': utils.author_link,
'author_gravatar': utils.author_gravatar,
'timesince': utils.timesince}
return render_template(self.template, globals=helpers, **d)
def get_repo(self, repo):
try:
repo_path = repos[repo]
except KeyError:
raise web.notfound("No repo named {0}".format(repo))
return git.repo(repo_path)
def get_base_context(self, repo, sha, path):
d = {}
self.repo = self.get_repo(repo)
try:
if sha in self.repo: # explicit sha
d['ref_name'] = sha[:10]
d['ref_link'] = sha
self.sha = self.repo.get_object(sha)
else:
d['ref_name'] = d['ref_link'] = sha
self.sha = git.get_branch(self.repo, sha)
except KeyError:
logger.exception("Failed to find sha: {0}".format(sha))
raise web.notfound('Bad SHA: {0}'.format(sha))
d['repo'] = repo
d['sha'] = self.sha.id
d['branches'] = git.branches(self.repo)
d['tags'] = git.tags(self.repo)
d['sha_type'] = self.sha_type
d['path'] = path.strip('/')
d['breadcrumbs'] = d['path'].split('/') if path else []
return d
class ListView(object):
def GET(self):
return render_template('list.html', repos=repos.keys())
class TreeView(RepoMixin):
template = "tree.html"
sha_type = 'branch'
def get_listing(self, node, path):
listing_key = self.cache_key + ':listing'
if self.cache_key in cache:
if cache[self.cache_key] == self.sha.id:
logger.info("Using cached data for /%s", path)
d = pickle.loads(cache[listing_key])
d['commit'] = self.repo.get_object(d['commit'])
return d
else:
logger.info("Expiring cache for /%s", path)
try:
del(cache[listing_key])
except KeyError:
pass
d = {'data': None,
'filename': None,
'listing': [],
'commit': None}
last_commit = None
for e in node.items():
commit = git.get_commit(self.repo, self.sha, os.path.join(path, e.path))
is_file = e.mode & 0100000
icon = 'file' if is_file else 'folder-open'
mode = utils.filemode(e.mode) if is_file else ""
d['listing'].append((icon,
e.path,
commit.message ,
mode,
datetime.fromtimestamp(commit.commit_time)))
if last_commit is None or commit.commit_time > last_commit.commit_time:
last_commit = commit
if e.path.lower().startswith('readme'):
d['data'] = e.sha
d['filename'] = "{0}/{1}".format(path, e.path)
d['commit'] = last_commit.id
cache[self.cache_key] = self.sha.id
cache[listing_key] = pickle.dumps(d)
d['commit'] = last_commit
return d
def get_context(self, repo, sha, path):
d = self.get_base_context(repo, sha, path)
path = d['path']
try:
node = git.get_by_path(self.repo, self.sha, d['breadcrumbs'])
except IndexError:
d['error'] = "{0} does not exist in this branch".format(path)
return d
if hasattr(node, 'items'): # is directory
d.update(self.get_listing(node, path))
else: # is a file
d['data'] = node.id
d['commit'] = git.get_commit(self.repo, self.sha, path)
d['filename'] = path
if d['data'] is not None:
text, meta = renderer.render_file(d['filename'], self.repo.get_object(d['data']).data)
d['data'] = text
d['language'] = meta.get('language', 'Unknown')
d['inline_style'] = renderer.get_style()
d['cache_trigger'] = d['commit'].id
return d
class CommitView(RepoMixin):
template = "commit.html"
sha_type = 'commit'
def get_context(self, repo, sha):
d = self.get_base_context(repo, sha, "")
try:
commit = self.repo.get_object(sha)
except KeyError:
raise web.notfound("No such commit")
if commit.__class__.__name__ != "Commit":
raise web.notfound("Not a valid commit")
files = []
for change in git.get_changes(self.repo, commit):
if change.type == 'delete':
files.append((ACTION_ICONS.get('delete'), change.old.path, commit.parents[0], 'Deleted'))
else:
diff = git.unified_diff(self.repo, change.old, change.new)
html = renderer.render_diff(diff)
files.append((ACTION_ICONS.get(change.type, 'fire'), change.new.path, commit.id, html))
d['inline_style'] = renderer.get_style()
d['files'] = files
d['branch'] = commit.id
d['commit'] = commit
d['branch_name'] = commit.id[:10]
return d
class HistoryView(RepoMixin):
template = "history.html"
sha_type = 'commit'
def get_context(self, repo, sha, path):
d = self.get_base_context(repo, sha, path)
walker = self.repo.get_walker(include=[self.sha.id], paths=[d['path']])
d['history'] = [ entry.commit for entry in walker ]
return d
| gpl-3.0 | 3,639,235,360,990,462,000 | 29.081545 | 105 | 0.54002 | false |
reclaro/castel | castel/advcounter.py | 1 | 5902 | """ Advanced counter.
This script is used to get some statistics from a text file.
The script parse a file and returns the number of words, line,
the most commom letter and the average number of letters per word.
The script has a mandatory argument which is the file to parse.
It is possible to pass different options to set a different
configuration file, the number of decimal digit returned in the
calculation and the encoding of the file
"""
import argparse
import logging
import sys
from configparser import SafeConfigParser
from configparser import NoOptionError
from configparser import NoSectionError
from stevedore import driver
def get_config_value(config_file, section, key):
"""
Parse a configuration file and return the value associated
to the given key.
args:
config_file: name of the configuration file
secion: name of the section in the configuration file
where the key is defined
key: the name of the key fo lookup in the configuration
file
ret:
the value corresponding to the associated given key
"""
try:
config = SafeConfigParser()
config.read(config_file)
return config.get(section, key)
except NoOptionError:
print("No Option %s in the section %s" % (key, section))
sys.exit(1)
except NoSectionError:
print("No section %s defined " % (section))
sys.exit(1)
def get_driver(config_file):
"""
Load the backend driver according to the value specified in the
configuration file.
args:
config_file: The name of the configuration file
ret:
The class to use as defined in the configuration file
"""
driver_name = get_config_value(config_file, 'default', 'driver')
mgr = driver.DriverManager(namespace='advcounter.plugin',
name=driver_name,
invoke_on_load=True,
)
return mgr.driver
def get_iowrapper(engine_driver, stream_name, encoding):
"""
Call the open method of the configured engine driver to
open the input file specifying the encoding type of the file
args:
engine_driver: the class of the configured driver used to
perform the statistics
stream_name: the name of the file to open
encoding: the encoding to use for reading the file
ret:
The TextIOWrapper returned by the open file
"""
try:
return engine_driver.open_file(stream_name, encoding=encoding)
except FileNotFoundError:
print("File \'%s\' not found" % stream_name)
sys.exit(1)
def configure_logging(config_file):
"""
Configure the logging details according to the values
defined in the configuration file.
args:
config_file: the name of the configuration file
"""
debug_levels = {'debug': logging.DEBUG,
'error': logging.ERROR,
'critical': logging.CRITICAL,
'fatal': logging.FATAL,
'info': logging.INFO,
'warning': logging.WARNING}
log_file = get_config_value(config_file, 'default', 'log_file')
log_level = get_config_value(config_file, 'default', 'log_level')
logging.basicConfig(filename=log_file, level=debug_levels[log_level])
def parse_options():
""" This function manage the options passed to the script
The method uses the argparse library to parse the input
options defined for the script
"""
parser = argparse.ArgumentParser()
parser.add_argument("file", help="Name of the file to parse")
parser.add_argument("-d",
"--decimal",
metavar="integer",
default=1,
type=int,
help="Number of decimal digits returned by"
" calculations, default is 1")
parser.add_argument("-c",
"--config",
default="advcounter.conf",
help="Path for the config file, default"
" is advcounter.conf")
parser.add_argument("-e",
"--encoding",
default="utf-8",
help="Encoding of the input file")
return parser.parse_args()
def get_and_print_results(engine_driver, file_obj):
"""Call the engine to get and print the results
This method call the different method exposed by the driver
engine to get back the results.
The results are printed to the standard output
args:
engine_driver: the driver configured to parse the file
file_obj: the TextIoWrapper to pass to the engine methods
"""
print("number of lines",
engine_driver.get_total_lines(file_obj))
file_obj.seek(0)
print("number of words",
engine_driver.get_total_words(file_obj))
file_obj.seek(0)
print("most common letter",
engine_driver.most_common_letter(file_obj))
file_obj.seek(0)
print("average letter per word",
engine_driver.get_avg_letters_per_word(file_obj))
def main():
"""
Main function which parses the options defined and call the
methods to the engine driver configured to get the statistics
results
"""
args = parse_options()
engine_driver = get_driver(args.config)
engine_driver.precision = args.decimal
configure_logging(args.config)
file_obj = get_iowrapper(engine_driver, args.file, args.encoding)
try:
get_and_print_results(engine_driver, file_obj)
except UnicodeDecodeError:
print("File \'%s\' is not in the %s format" %
(args.file, args.encoding))
sys.exit(1)
finally:
file_obj.close()
if __name__ == '__main__':
main()
| gpl-2.0 | -5,990,204,540,903,453,000 | 32.344633 | 73 | 0.618096 | false |
NicolasHug/Surprise | surprise/prediction_algorithms/random_pred.py | 1 | 1319 | """ Algorithm predicting a random rating.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from .algo_base import AlgoBase
class NormalPredictor(AlgoBase):
"""Algorithm predicting a random rating based on the distribution of the
training set, which is assumed to be normal.
The prediction :math:`\hat{r}_{ui}` is generated from a normal distribution
:math:`\mathcal{N}(\hat{\mu}, \hat{\sigma}^2)` where :math:`\hat{\mu}` and
:math:`\hat{\sigma}` are estimated from the training data using Maximum
Likelihood Estimation:
.. math::
\\hat{\mu} &= \\frac{1}{|R_{train}|} \\sum_{r_{ui} \\in R_{train}}
r_{ui}\\\\\\\\\
\\hat{\sigma} &= \\sqrt{\\sum_{r_{ui} \\in R_{train}}
\\frac{(r_{ui} - \\hat{\mu})^2}{|R_{train}|}}
"""
def __init__(self):
AlgoBase.__init__(self)
def fit(self, trainset):
AlgoBase.fit(self, trainset)
num = sum((r - self.trainset.global_mean)**2
for (_, _, r) in self.trainset.all_ratings())
denum = self.trainset.n_ratings
self.sigma = np.sqrt(num / denum)
return self
def estimate(self, *_):
return np.random.normal(self.trainset.global_mean, self.sigma)
| bsd-3-clause | 7,401,184,946,207,124,000 | 28.311111 | 79 | 0.582259 | false |
franciscomoura/data-science-and-bigdata | introducao-linguagens-estatisticas/mineracao-dados-python/codigo-fonte/code-06.py | 1 | 2285 | # -*- coding: utf-8 -*-
# code-06.py
"""
Dependência: Matplotlib, NumPy
Executar no prompt: pip install matplotlib
Executar no prompt: pip install numpy
Executar no prompt: pip install scikit-learn
Executar no prompt: pip install scipy
*** Atenção:
Este arquivo deverá executado no mesmo diretório do arquivo iris.csv
"""
import numpy as np
# lê as primeiras 4 colunas
data = np.genfromtxt('iris.csv', delimiter=',', usecols=(0, 1, 2, 3))
# lê a quinta coluna(última)
target_names = np.genfromtxt('iris.csv', delimiter=',', usecols=(4), dtype=str)
# converter o vetor de strings que contêm a classe em números inteiros
target = np.zeros(len(target_names), dtype=np.int)
target[target_names == 'setosa'] = 0
target[target_names == 'versicolor'] = 1
target[target_names == 'virginica'] = 2
# parte 1
from sklearn.cluster import KMeans
# inicialização correta para o cluster mostrar o mesmo resultado a cada execução
kmeans = KMeans(n_clusters=3, init="k-means++", random_state=3425)
kmeans.fit(data)
# parte 2
clusters = kmeans.predict(data)
# parte 3
print("Completude e homogeneidade:")
from sklearn.metrics import completeness_score, homogeneity_score
print(completeness_score(target, clusters))
# Saída: 0.764986151449
print(homogeneity_score(target, clusters))
# Saída: 0.751485402199
# parte 4 - revisada
print("Gera o gráfico de dispersão")
import pylab as pl
pl.figure()
pl.subplot(211) # topo, figura com as classes reais
pl.plot(data[target == 0, 2], data[target == 0, 3], 'bo', alpha=.7) # 0 setosa
pl.plot(data[target == 1, 2], data[target == 1, 3], 'ro', alpha=.7) # 1 versicolor
pl.plot(data[target == 2, 2], data[target == 2, 3], 'go', alpha=.7) # 2 virginica
pl.xlabel('Comprimento da petala - cm')
pl.ylabel('Largura da petala - cm')
pl.axis([0.5, 7, 0, 3])
pl.subplot(212) # embaixo, figura com as classes atribuídas automaticamente
pl.plot(data[clusters == 0, 2], data[clusters == 0, 3], 'go', alpha=.7) # clusters 0 verginica
pl.plot(data[clusters == 1, 2], data[clusters == 1, 3], 'bo', alpha=.7) # clusters 1 setosa
pl.plot(data[clusters == 2, 2], data[clusters == 2, 3], 'ro', alpha=.7) # clusters 2 versicolor
pl.xlabel('Comprimento da petala - cm')
pl.ylabel('Largura da petala - cm')
pl.axis([0.5, 7, 0, 3])
pl.show()
| apache-2.0 | -8,049,449,182,533,556,000 | 31.371429 | 96 | 0.700353 | false |
mozaik-association/mozaik | odoo_addons/mozaik_sample_accounting/__openerp__.py | 1 | 1795 | # -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of mozaik_sample_accounting, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# mozaik_sample_accounting is free software:
# you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# mozaik_sample_accounting is distributed in the hope that it will
# be useful but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with mozaik_sample_accounting.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'MOZAIK: Sample Accounting Localization',
'version': '8.0.1.0.0',
"author": "ACSONE SA/NV",
"maintainer": "ACSONE SA/NV",
"website": "http://www.acsone.eu",
'category': 'Political Association',
'depends': [
'mozaik_sample_customization',
'l10n_mozaik',
],
'description': """
MOZAIK Sample Accounting Localization
=====================================
""",
'images': [
],
'data': [
'../mozaik_account/tests/data/account_installer.xml',
'data/retrocession_validation.xml'
],
'qweb': [
],
'demo': [
],
'test': [
],
'license': 'AGPL-3',
'sequence': 150,
'auto_install': False,
'installable': True,
}
| agpl-3.0 | 5,638,437,834,411,976,000 | 31.053571 | 78 | 0.558217 | false |
ctb/pygr | tests/oldtests/annotation_hg18_megatest.py | 1 | 51599 |
import ConfigParser, sys, os, string
from pygr.mapping import Collection
import pygr.Data
try:
import hashlib
except ImportError:
import md5 as hashlib
config = ConfigParser.ConfigParser({'testOutputBaseDir' : '.', 'smallSampleKey': ''})
config.read([ os.path.join(os.path.expanduser('~'), '.pygrrc'), os.path.join(os.path.expanduser('~'), 'pygr.cfg'), '.pygrrc', 'pygr.cfg' ])
msaDir = config.get('megatests_hg18', 'msaDir')
seqDir = config.get('megatests_hg18', 'seqDir')
smallSampleKey = config.get('megatests_hg18', 'smallSampleKey')
testInputDB = config.get('megatests', 'testInputDB')
testInputDir = config.get('megatests', 'testInputDir')
testOutputBaseDir = config.get('megatests', 'testOutputBaseDir')
if smallSampleKey:
smallSamplePostfix = '_' + smallSampleKey
else:
smallSamplePostfix = ''
## msaDir CONTAINS PRE-BUILT NLMSA
## seqDir CONTAINS GENOME ASSEMBLIES AND THEIR SEQDB FILES
## TEST INPUT/OUPTUT FOR COMPARISON, THESE FILES SHOULD BE IN THIS DIRECTORY
## exonAnnotFileName = 'Annotation_ConservedElement_Exons_hg18.txt'
## intronAnnotFileName = 'Annotation_ConservedElement_Introns_hg18.txt'
## stopAnnotFileName = 'Annotation_ConservedElement_Stop_hg18.txt'
## testDir = os.path.join(testOutputBaseDir, 'TEST_' + ''.join(tmpList)) SHOULD BE DELETED IF YOU WANT TO RUN IN '.'
# DIRECTIONARY FOR DOC STRING OF SEQDB
docStringDict = {
'anoCar1':' Lizard Genome (January 2007)',
'bosTau3':'Cow Genome (August 2006)',
'canFam2':'Dog Genome (May 2005)',
'cavPor2':'Guinea Pig (October 2005)',
'danRer4':'Zebrafish Genome (March 2006)',
'dasNov1':'Armadillo Genome (May 2005)',
'echTel1':'Tenrec Genome (July 2005)',
'eriEur1':'European Hedgehog (Junuary 2006)',
'equCab1':'Horse Genome (January 2007)',
'felCat3':'Cat Genome (March 2006)',
'fr2':'Fugu Genome (October 2004)',
'galGal3':'Chicken Genome (May 2006)',
'gasAcu1':'Stickleback Genome (February 2006)',
'hg18':'Human Genome (May 2006)',
'loxAfr1':'Elephant Genome (May 2005)',
'mm8':'Mouse Genome (March 2006)',
'monDom4':'Opossum Genome (January 2006)',
'ornAna1':'Platypus Genome (March 2007)',
'oryCun1':'Rabbit Genome (May 2005)',
'oryLat1':'Medaka Genome (April 2006)',
'otoGar1':'Bushbaby Genome (December 2006)',
'panTro2':'Chimpanzee Genome (March 2006)',
'rheMac2':'Rhesus Genome (January 2006)',
'rn4':'Rat Genome (November 2004)',
'sorAra1':'Shrew (Junuary 2006)',
'tetNig1':'Tetraodon Genome (February 2004)',
'tupBel1':'Tree Shrew (December 2006)',
'xenTro2':'X. tropicalis Genome (August 2005)'
}
# GENOME ASSEMBLY LIST FOR DM2 MULTIZ15WAY
msaSpeciesList = ['anoCar1', 'bosTau3', 'canFam2', 'cavPor2', 'danRer4', 'dasNov1', 'echTel1', \
'equCab1', 'eriEur1', 'felCat3', 'fr2', 'galGal3', 'gasAcu1', 'hg18', 'loxAfr1', \
'mm8', 'monDom4', 'ornAna1', 'oryCun1', 'oryLat1', 'otoGar1', 'panTro2', 'rheMac2', \
'rn4', 'sorAra1', 'tetNig1', 'tupBel1', 'xenTro2']
class PygrBuildNLMSAMegabase(object):
'restrict megatest to an initially empty directory, need large space to perform'
def __init__(self, testDir = None):
import random
tmpList = [c for c in 'PygrBuildNLMSAMegabase']
random.shuffle(tmpList)
testDir = os.path.join(testOutputBaseDir, 'TEST_' + ''.join(tmpList)) # FOR TEST, SHOULD BE DELETED
if testDir is None: testDir = 'TEST_' + ''.join(tmpList) # NOT SPECIFIED, USE CURRENT DIRECTORY
try:
os.mkdir(testDir)
testDir = os.path.realpath(testDir)
except:
raise IOError
self.path = testDir
try:
tmpFileName = os.path.join(testDir, 'DELETE_THIS_TEMP_FILE')
open(tmpFileName, 'w').write('A'*1024*1024) # WRITE 1MB FILE FOR TESTING
except:
raise IOError
pygr.Data.update(self.path)
from pygr import seqdb
for orgstr in msaSpeciesList:
genome = seqdb.BlastDB(os.path.join(seqDir, orgstr))
genome.__doc__ = docStringDict[orgstr]
pygr.Data.addResource('TEST.Seq.Genome.' + orgstr, genome)
pygr.Data.save()
def copyFile(self, filename): # COPY A FILE INTO TEST DIRECTORY
newname = os.path.join(self.path, os.path.basename(filename))
open(newname, 'w').write(open(filename, 'r').read())
return newname
def teardown(self):
'delete the temporary directory and files'
for dirpath, subdirs, files in os.walk(self.path, topdown = False): # SHOULD BE DELETED BOTTOM-UP FASHION
# THIS PART MAY NOT WORK IN NFS MOUNTED DIRECTORY DUE TO .nfsXXXXXXXXX CREATION
# IN NFS MOUNTED DIRECTORY, IT CANNOT BE DELETED UNTIL CLOSING PYGRDATA
for filename in files:
os.remove(os.path.join(dirpath, filename))
os.rmdir(dirpath)
class Build_Test(PygrBuildNLMSAMegabase):
def seqdb_test(self): # CHECK PYGR.DATA CONTENTS
l = pygr.Data.dir('TEST')
preList = ['TEST.Seq.Genome.' + orgstr for orgstr in msaSpeciesList]
assert l == preList
def collectionannot_test(self): # BUILD ANNOTATION DB FROM FILE
from pygr import seqdb, cnestedlist, sqlgraph
hg18 = pygr.Data.getResource('TEST.Seq.Genome.hg18')
# BUILD ANNOTATION DATABASE FOR REFSEQ EXONS
exon_slices = Collection(filename = os.path.join(self.path, 'refGene_exonAnnot_hg18.cdb'), \
intKeys = True, mode = 'c', writeback = False) # ONLY C
exon_db = seqdb.AnnotationDB(exon_slices, hg18,
sliceAttrDict = dict(id = 0, exon_id = 1, orientation = 2,
gene_id = 3, start = 4, stop = 5))
msa = cnestedlist.NLMSA(os.path.join(self.path, 'refGene_exonAnnot_hg18'), 'w', \
pairwiseMode = True, bidirectional = False)
for lines in open(os.path.join(testInputDir, 'refGene_exonAnnot%s_hg18.txt' % smallSamplePostfix), 'r').xreadlines():
row = [x for x in lines.split('\t')] # CONVERT TO LIST SO MUTABLE
row[1] = int(row[1]) # CONVERT FROM STRING TO INTEGER
exon_slices[row[1]] = row
exon = exon_db[row[1]] # GET THE ANNOTATION OBJECT FOR THIS EXON
msa.addAnnotation(exon) # SAVE IT TO GENOME MAPPING
exon_db.clear_cache() # not really necessary; cache should autoGC
exon_slices.close() # SHELVE SHOULD BE EXPLICITLY CLOSED IN ORDER TO SAVE CURRENT CONTENTS
msa.build() # FINALIZE GENOME ALIGNMENT INDEXES
exon_db.__doc__ = 'Exon Annotation Database for hg18'
pygr.Data.addResource('TEST.Annotation.hg18.exons', exon_db)
msa.__doc__ = 'NLMSA Exon for hg18'
pygr.Data.addResource('TEST.Annotation.NLMSA.hg18.exons', msa)
exon_schema = pygr.Data.ManyToManyRelation(hg18, exon_db, bindAttrs = ('exon1',))
exon_schema.__doc__ = 'Exon Schema for hg18'
pygr.Data.addSchema('TEST.Annotation.NLMSA.hg18.exons', exon_schema)
# BUILD ANNOTATION DATABASE FOR REFSEQ SPLICES
splice_slices = Collection(filename = os.path.join(self.path, 'refGene_spliceAnnot_hg18.cdb'), \
intKeys = True, mode = 'c', writeback = False) # ONLY C
splice_db = seqdb.AnnotationDB(splice_slices, hg18,
sliceAttrDict = dict(id = 0, splice_id = 1, orientation = 2,
gene_id = 3, start = 4, stop = 5))
msa = cnestedlist.NLMSA(os.path.join(self.path, 'refGene_spliceAnnot_hg18'), 'w', \
pairwiseMode = True, bidirectional = False)
for lines in open(os.path.join(testInputDir, 'refGene_spliceAnnot%s_hg18.txt' % smallSamplePostfix), 'r').xreadlines():
row = [x for x in lines.split('\t')] # CONVERT TO LIST SO MUTABLE
row[1] = int(row[1]) # CONVERT FROM STRING TO INTEGER
splice_slices[row[1]] = row
splice = splice_db[row[1]] # GET THE ANNOTATION OBJECT FOR THIS EXON
msa.addAnnotation(splice) # SAVE IT TO GENOME MAPPING
splice_db.clear_cache() # not really necessary; cache should autoGC
splice_slices.close() # SHELVE SHOULD BE EXPLICITLY CLOSED IN ORDER TO SAVE CURRENT CONTENTS
msa.build() # FINALIZE GENOME ALIGNMENT INDEXES
splice_db.__doc__ = 'Splice Annotation Database for hg18'
pygr.Data.addResource('TEST.Annotation.hg18.splices', splice_db)
msa.__doc__ = 'NLMSA Splice for hg18'
pygr.Data.addResource('TEST.Annotation.NLMSA.hg18.splices', msa)
splice_schema = pygr.Data.ManyToManyRelation(hg18, splice_db, bindAttrs = ('splice1',))
splice_schema.__doc__ = 'Splice Schema for hg18'
pygr.Data.addSchema('TEST.Annotation.NLMSA.hg18.splices', splice_schema)
# BUILD ANNOTATION DATABASE FOR REFSEQ EXONS
cds_slices = Collection(filename = os.path.join(self.path, 'refGene_cdsAnnot_hg18.cdb'), \
intKeys = True, mode = 'c', writeback = False) # ONLY C
cds_db = seqdb.AnnotationDB(cds_slices, hg18,
sliceAttrDict = dict(id = 0, cds_id = 1, orientation = 2,
gene_id = 3, start = 4, stop = 5))
msa = cnestedlist.NLMSA(os.path.join(self.path, 'refGene_cdsAnnot_hg18'), 'w', \
pairwiseMode = True, bidirectional = False)
for lines in open(os.path.join(testInputDir, 'refGene_cdsAnnot%s_hg18.txt' % smallSamplePostfix), 'r').xreadlines():
row = [x for x in lines.split('\t')] # CONVERT TO LIST SO MUTABLE
row[1] = int(row[1]) # CONVERT FROM STRING TO INTEGER
cds_slices[row[1]] = row
cds = cds_db[row[1]] # GET THE ANNOTATION OBJECT FOR THIS EXON
msa.addAnnotation(cds) # SAVE IT TO GENOME MAPPING
cds_db.clear_cache() # not really necessary; cache should autoGC
cds_slices.close() # SHELVE SHOULD BE EXPLICITLY CLOSED IN ORDER TO SAVE CURRENT CONTENTS
msa.build() # FINALIZE GENOME ALIGNMENT INDEXES
cds_db.__doc__ = 'CDS Annotation Database for hg18'
pygr.Data.addResource('TEST.Annotation.hg18.cdss', cds_db)
msa.__doc__ = 'NLMSA CDS for hg18'
pygr.Data.addResource('TEST.Annotation.NLMSA.hg18.cdss', msa)
cds_schema = pygr.Data.ManyToManyRelation(hg18, cds_db, bindAttrs = ('cds1',))
cds_schema.__doc__ = 'CDS Schema for hg18'
pygr.Data.addSchema('TEST.Annotation.NLMSA.hg18.cdss', cds_schema)
# BUILD ANNOTATION DATABASE FOR MOST CONSERVED ELEMENTS FROM UCSC
ucsc_slices = Collection(filename = os.path.join(self.path, 'phastConsElements28way_hg18.cdb'), \
intKeys = True, mode = 'c', writeback = False) # ONLY C
ucsc_db = seqdb.AnnotationDB(ucsc_slices, hg18,
sliceAttrDict = dict(id = 0, ucsc_id = 1, orientation = 2,
gene_id = 3, start = 4, stop = 5))
msa = cnestedlist.NLMSA(os.path.join(self.path, 'phastConsElements28way_hg18'), 'w', \
pairwiseMode = True, bidirectional = False)
for lines in open(os.path.join(testInputDir, 'phastConsElements28way%s_hg18.txt' % smallSamplePostfix), 'r').xreadlines():
row = [x for x in lines.split('\t')] # CONVERT TO LIST SO MUTABLE
row[1] = int(row[1]) # CONVERT FROM STRING TO INTEGER
ucsc_slices[row[1]] = row
ucsc = ucsc_db[row[1]] # GET THE ANNOTATION OBJECT FOR THIS EXON
msa.addAnnotation(ucsc) # SAVE IT TO GENOME MAPPING
ucsc_db.clear_cache() # not really necessary; cache should autoGC
ucsc_slices.close() # SHELVE SHOULD BE EXPLICITLY CLOSED IN ORDER TO SAVE CURRENT CONTENTS
msa.build() # FINALIZE GENOME ALIGNMENT INDEXES
ucsc_db.__doc__ = 'Most Conserved Elements for hg18'
pygr.Data.addResource('TEST.Annotation.UCSC.hg18.mostconserved', ucsc_db)
msa.__doc__ = 'NLMSA for Most Conserved Elements for hg18'
pygr.Data.addResource('TEST.Annotation.UCSC.NLMSA.hg18.mostconserved', msa)
ucsc_schema = pygr.Data.ManyToManyRelation(hg18, ucsc_db, bindAttrs = ('element1',))
ucsc_schema.__doc__ = 'Schema for UCSC Most Conserved Elements for hg18'
pygr.Data.addSchema('TEST.Annotation.UCSC.NLMSA.hg18.mostconserved', ucsc_schema)
# BUILD ANNOTATION DATABASE FOR SNP126 FROM UCSC
snp_slices = Collection(filename = os.path.join(self.path, 'snp126_hg18.cdb'), \
intKeys = True, protocol = 2, mode = 'c', writeback = False) # ONLY C
snp_db = seqdb.AnnotationDB(snp_slices, hg18,
sliceAttrDict = dict(id = 0, snp_id = 1, orientation = 2, gene_id = 3, start = 4,
stop = 5, score = 6, ref_NCBI = 7, ref_UCSC = 8, observed = 9,
molType = 10, myClass = 11, myValid = 12, avHet = 13, avHetSE = 14,
myFunc = 15, locType = 16, myWeight = 17))
msa = cnestedlist.NLMSA(os.path.join(self.path, 'snp126_hg18'), 'w', \
pairwiseMode = True, bidirectional = False)
for lines in open(os.path.join(testInputDir, 'snp126%s_hg18.txt' % smallSamplePostfix), 'r').xreadlines():
row = [x for x in lines.split('\t')] # CONVERT TO LIST SO MUTABLE
row[1] = int(row[1]) # CONVERT FROM STRING TO INTEGER
snp_slices[row[1]] = row
snp = snp_db[row[1]] # GET THE ANNOTATION OBJECT FOR THIS EXON
msa.addAnnotation(snp) # SAVE IT TO GENOME MAPPING
snp_db.clear_cache() # not really necessary; cache should autoGC
snp_slices.close() # SHELVE SHOULD BE EXPLICITLY CLOSED IN ORDER TO SAVE CURRENT CONTENTS
msa.build() # FINALIZE GENOME ALIGNMENT INDEXES
snp_db.__doc__ = 'SNP126 for hg18'
pygr.Data.addResource('TEST.Annotation.UCSC.hg18.snp126', snp_db)
msa.__doc__ = 'NLMSA for SNP126 for hg18'
pygr.Data.addResource('TEST.Annotation.UCSC.NLMSA.hg18.snp126', msa)
snp_schema = pygr.Data.ManyToManyRelation(hg18, snp_db, bindAttrs = ('snp1',))
snp_schema.__doc__ = 'Schema for UCSC SNP126 for hg18'
pygr.Data.addSchema('TEST.Annotation.UCSC.NLMSA.hg18.snp126', snp_schema)
pygr.Data.save()
pygr.Data.clear_cache()
# QUERY TO EXON AND SPLICES ANNOTATION DATABASE
hg18 = pygr.Data.getResource('TEST.Seq.Genome.hg18')
exonmsa = pygr.Data.getResource('TEST.Annotation.NLMSA.hg18.exons')
splicemsa = pygr.Data.getResource('TEST.Annotation.NLMSA.hg18.splices')
conservedmsa = pygr.Data.getResource('TEST.Annotation.UCSC.NLMSA.hg18.mostconserved')
snpmsa = pygr.Data.getResource('TEST.Annotation.UCSC.NLMSA.hg18.snp126')
cdsmsa = pygr.Data.getResource('TEST.Annotation.NLMSA.hg18.cdss')
exons = pygr.Data.getResource('TEST.Annotation.hg18.exons')
splices = pygr.Data.getResource('TEST.Annotation.hg18.splices')
mostconserved = pygr.Data.getResource('TEST.Annotation.UCSC.hg18.mostconserved')
snp126 = pygr.Data.getResource('TEST.Annotation.UCSC.hg18.snp126')
cdss = pygr.Data.getResource('TEST.Annotation.hg18.cdss')
# OPEN hg18_MULTIZ28WAY NLMSA
msa = cnestedlist.NLMSA(os.path.join(msaDir, 'hg18_multiz28way'), 'r', trypath = [seqDir])
exonAnnotFileName = os.path.join(testInputDir, 'Annotation_ConservedElement_Exons%s_hg18.txt' % smallSamplePostfix)
intronAnnotFileName = os.path.join(testInputDir, 'Annotation_ConservedElement_Introns%s_hg18.txt' % smallSamplePostfix)
stopAnnotFileName = os.path.join(testInputDir, 'Annotation_ConservedElement_Stop%s_hg18.txt' % smallSamplePostfix)
newexonAnnotFileName = os.path.join(self.path, 'new_Exons_hg18.txt')
newintronAnnotFileName = os.path.join(self.path, 'new_Introns_hg18.txt')
newstopAnnotFileName = os.path.join(self.path, 'new_stop_hg18.txt')
tmpexonAnnotFileName = self.copyFile(exonAnnotFileName)
tmpintronAnnotFileName = self.copyFile(intronAnnotFileName)
tmpstopAnnotFileName = self.copyFile(stopAnnotFileName)
if smallSampleKey:
chrList = [ smallSampleKey ]
else:
chrList = hg18.seqLenDict.keys()
chrList.sort()
outfile = open(newexonAnnotFileName, 'w')
for chrid in chrList:
slice = hg18[chrid]
# EXON ANNOTATION DATABASE
try:
ex1 = exonmsa[slice]
except:
continue
else:
exlist1 = [(ix.exon_id, ix) for ix in ex1.keys()]
exlist1.sort()
for ixx, exon in exlist1:
saveList = []
tmp = exon.sequence
tmpexon = exons[exon.exon_id]
tmpslice = tmpexon.sequence # FOR REAL EXON COORDINATE
wlist1 = 'EXON', chrid, tmpexon.exon_id, tmpexon.gene_id, tmpslice.start, tmpslice.stop
try:
out1 = conservedmsa[tmp]
except KeyError:
pass
else:
elementlist = [(ix.ucsc_id, ix) for ix in out1.keys()]
elementlist.sort()
for iyy, element in elementlist:
if element.stop - element.start < 100: continue
score = int(string.split(element.gene_id, '=')[1])
if score < 100: continue
tmp2 = element.sequence
tmpelement = mostconserved[element.ucsc_id]
tmpslice2 = tmpelement.sequence # FOR REAL ELEMENT COORDINATE
wlist2 = wlist1 + (tmpelement.ucsc_id, tmpelement.gene_id, tmpslice2.start, tmpslice2.stop)
slicestart, sliceend = max(tmp.start, tmp2.start), min(tmp.stop, tmp2.stop)
if slicestart < 0 or sliceend < 0: sys.exit('wrong query')
tmp1 = msa.seqDict['hg18.' + chrid][slicestart:sliceend]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start < 100: continue
palign, pident = e.pAligned(), e.pIdentity()
if palign < 0.8 or pident < 0.8: continue
palign, pident = '%.2f' % palign, '%.2f' % pident
wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, \
(~msa.seqDict)[dest], \
str(dest), dest.start, dest.stop, palign, pident)
saveList.append('\t'.join(map(str, wlist3)) + '\n')
saveList.sort()
for saveline in saveList:
outfile.write(saveline)
outfile.close()
md5old = hashlib.md5()
md5old.update(open(tmpexonAnnotFileName, 'r').read())
md5new = hashlib.md5()
md5new.update(open(newexonAnnotFileName, 'r').read())
assert md5old.digest() == md5new.digest() # MD5 COMPARISON INSTEAD OF COMPARING EACH CONTENTS
outfile = open(newintronAnnotFileName, 'w')
for chrid in chrList:
slice = hg18[chrid]
# SPLICE ANNOTATION DATABASE
try:
sp1 = splicemsa[slice]
except:
continue
else:
splist1 = [(ix.splice_id, ix) for ix in sp1.keys()]
splist1.sort()
for ixx, splice in splist1:
saveList = []
tmp = splice.sequence
tmpsplice = splices[splice.splice_id]
tmpslice = tmpsplice.sequence # FOR REAL EXON COORDINATE
wlist1 = 'INTRON', chrid, tmpsplice.splice_id, tmpsplice.gene_id, tmpslice.start, tmpslice.stop
try:
out1 = conservedmsa[tmp]
except KeyError:
pass
else:
elementlist = [(ix.ucsc_id, ix) for ix in out1.keys()]
elementlist.sort()
for iyy, element in elementlist:
if element.stop - element.start < 100: continue
score = int(string.split(element.gene_id, '=')[1])
if score < 100: continue
tmp2 = element.sequence
tmpelement = mostconserved[element.ucsc_id]
tmpslice2 = tmpelement.sequence # FOR REAL ELEMENT COORDINATE
wlist2 = wlist1 + (tmpelement.ucsc_id, tmpelement.gene_id, tmpslice2.start, tmpslice2.stop)
slicestart, sliceend = max(tmp.start, tmp2.start), min(tmp.stop, tmp2.stop)
if slicestart < 0 or sliceend < 0: sys.exit('wrong query')
tmp1 = msa.seqDict['hg18.' + chrid][slicestart:sliceend]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start < 100: continue
palign, pident = e.pAligned(), e.pIdentity()
if palign < 0.8 or pident < 0.8: continue
palign, pident = '%.2f' % palign, '%.2f' % pident
wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, \
(~msa.seqDict)[dest], \
str(dest), dest.start, dest.stop, palign, pident)
saveList.append('\t'.join(map(str, wlist3)) + '\n')
saveList.sort()
for saveline in saveList:
outfile.write(saveline)
# SNP IN SPLICE SITES
saveList = []
gt = tmpslice[:2]
ag = tmpslice[-2:]
try:
gtout = snpmsa[gt]
agout = snpmsa[ag]
except KeyError:
pass
else:
gtlist = gtout.keys()
aglist = agout.keys()
for snp in gtlist:
tmpsnp = snp.sequence
annsnp = snp126[snp.snp_id]
wlist2 = ('SNP5', chrid, tmpsplice.gene_id, gt.start, gt.stop, str(gt)) \
+ (annsnp.snp_id, tmpsnp.start, tmpsnp.stop, \
str(tmpsnp), annsnp.gene_id, annsnp.ref_NCBI, annsnp.ref_UCSC, \
annsnp.observed, annsnp.molType, \
annsnp.myClass, annsnp.myValid)
tmp1 = msa.seqDict['hg18.' + chrid][abs(gt.start):abs(gt.stop)]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start != 2 or dest.stop - dest.start != 2: continue
palign, pident = e.pAligned(), e.pIdentity()
palign, pident = '%.2f' % palign, '%.2f' % pident
wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, \
(~msa.seqDict)[dest], \
str(dest), dest.start, dest.stop, palign, pident)
saveList.append('\t'.join(map(str, wlist3)) + '\n')
for snp in aglist:
tmpsnp = snp.sequence
annsnp = snp126[snp.snp_id]
wlist2 = ('SNP3', chrid, tmpsplice.gene_id, ag.start, ag.stop, str(ag)) \
+ (annsnp.snp_id, tmpsnp.start, tmpsnp.stop, \
str(tmpsnp), annsnp.gene_id, annsnp.ref_NCBI, annsnp.ref_UCSC, \
annsnp.observed, annsnp.molType, \
annsnp.myClass, annsnp.myValid)
tmp1 = msa.seqDict['hg18.' + chrid][abs(ag.start):abs(ag.stop)]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start != 2 or dest.stop - dest.start != 2: continue
palign, pident = e.pAligned(), e.pIdentity()
palign, pident = '%.2f' % palign, '%.2f' % pident
wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, \
(~msa.seqDict)[dest], \
str(dest), dest.start, dest.stop, palign, pident)
saveList.append('\t'.join(map(str, wlist3)) + '\n')
saveList.sort()
for saveline in saveList:
outfile.write(saveline)
outfile.close()
md5old = hashlib.md5()
md5old.update(open(tmpintronAnnotFileName, 'r').read())
md5new = hashlib.md5()
md5new.update(open(newintronAnnotFileName, 'r').read())
assert md5old.digest() == md5new.digest() # MD5 COMPARISON INSTEAD OF COMPARING EACH CONTENTS
outfile = open(newstopAnnotFileName, 'w')
for chrid in chrList:
slice = hg18[chrid]
# STOP ANNOTATION DATABASE
try:
cds1 = cdsmsa[slice]
except:
continue
else:
cdslist1 = [(ix.cds_id, ix) for ix in cds1.keys()]
cdslist1.sort()
for ixx, cds in cdslist1:
saveList = []
tmp = cds.sequence
tmpcds = cdss[cds.cds_id]
tmpslice = tmpcds.sequence # FOR REAL EXON COORDINATE
wlist1 = 'STOP', chrid, tmpcds.cds_id, tmpcds.gene_id, tmpslice.start, tmpslice.stop
if tmpslice.start < 0:
stopstart, stopend = -tmpslice.stop, -tmpslice.start
stop = -hg18[chrid][stopstart:stopstart+3]
else:
stopstart, stopend = tmpslice.start, tmpslice.stop
stop = hg18[chrid][stopend-3:stopend]
if str(stop).upper() not in ('TAA', 'TAG', 'TGA'): continue
try:
snp1 = snpmsa[stop]
except KeyError:
pass
else:
snplist = [(ix.snp_id, ix) for ix in snp1.keys()]
snplist.sort()
for iyy, snp in snplist:
tmpsnp = snp.sequence
annsnp = snp126[snp.snp_id]
wlist2 = wlist1 + (str(stop), stop.start, stop.stop) \
+ (annsnp.snp_id, tmpsnp.start, tmpsnp.stop, \
str(tmpsnp), annsnp.gene_id, annsnp.ref_NCBI, annsnp.ref_UCSC, \
annsnp.observed, annsnp.molType, \
annsnp.myClass, annsnp.myValid)
if tmpslice.start < 0:
tmp1 = -msa.seqDict['hg18.' + chrid][stopstart:stopstart+3]
else:
tmp1 = msa.seqDict['hg18.' + chrid][stopend-3:stopend]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start != 3 or dest.stop - dest.start != 3: continue
palign, pident = e.pAligned(), e.pIdentity()
palign, pident = '%.2f' % palign, '%.2f' % pident
if str(dest).upper() not in ('TAA', 'TAG', 'TGA'): nonstr = 'NONSENSE'
else: nonstr = 'STOP'
wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, \
(~msa.seqDict)[dest], \
str(dest), dest.start, dest.stop, palign, pident, nonstr)
saveList.append('\t'.join(map(str, wlist3)) + '\n')
saveList.sort()
for saveline in saveList:
outfile.write(saveline)
outfile.close()
md5old = hashlib.md5()
md5old.update(open(tmpstopAnnotFileName, 'r').read())
md5new = hashlib.md5()
md5new.update(open(newstopAnnotFileName, 'r').read())
assert md5old.digest() == md5new.digest() # MD5 COMPARISON INSTEAD OF COMPARING EACH CONTENTS
def mysqlannot_test(self): # BUILD ANNOTATION DB FROM MYSQL
from pygr import seqdb, cnestedlist, sqlgraph
hg18 = pygr.Data.getResource('TEST.Seq.Genome.hg18')
# BUILD ANNOTATION DATABASE FOR REFSEQ EXONS: MYSQL VERSION
exon_slices = sqlgraph.SQLTableClustered('%s.pygr_refGene_exonAnnot%s_hg18' % ( testInputDB, smallSamplePostfix ),
clusterKey = 'chromosome', maxCache = 0)
exon_db = seqdb.AnnotationDB(exon_slices, hg18, sliceAttrDict = dict(id = 'chromosome', \
gene_id = 'name', exon_id = 'exon_id'))
msa = cnestedlist.NLMSA(os.path.join(self.path, 'refGene_exonAnnot_SQL_hg18'), 'w', \
pairwiseMode = True, bidirectional = False)
for id in exon_db:
msa.addAnnotation(exon_db[id])
exon_db.clear_cache() # not really necessary; cache should autoGC
exon_slices.clear_cache()
msa.build()
exon_db.__doc__ = 'SQL Exon Annotation Database for hg18'
pygr.Data.addResource('TEST.Annotation.SQL.hg18.exons', exon_db)
msa.__doc__ = 'SQL NLMSA Exon for hg18'
pygr.Data.addResource('TEST.Annotation.NLMSA.SQL.hg18.exons', msa)
exon_schema = pygr.Data.ManyToManyRelation(hg18, exon_db, bindAttrs = ('exon2',))
exon_schema.__doc__ = 'SQL Exon Schema for hg18'
pygr.Data.addSchema('TEST.Annotation.NLMSA.SQL.hg18.exons', exon_schema)
# BUILD ANNOTATION DATABASE FOR REFSEQ SPLICES: MYSQL VERSION
splice_slices = sqlgraph.SQLTableClustered('%s.pygr_refGene_spliceAnnot%s_hg18' % ( testInputDB, smallSamplePostfix ),
clusterKey = 'chromosome', maxCache = 0)
splice_db = seqdb.AnnotationDB(splice_slices, hg18, sliceAttrDict = dict(id = 'chromosome', \
gene_id = 'name', splice_id = 'splice_id'))
msa = cnestedlist.NLMSA(os.path.join(self.path, 'refGene_spliceAnnot_SQL_hg18'), 'w', \
pairwiseMode = True, bidirectional = False)
for id in splice_db:
msa.addAnnotation(splice_db[id])
splice_db.clear_cache() # not really necessary; cache should autoGC
splice_slices.clear_cache()
msa.build()
splice_db.__doc__ = 'SQL Splice Annotation Database for hg18'
pygr.Data.addResource('TEST.Annotation.SQL.hg18.splices', splice_db)
msa.__doc__ = 'SQL NLMSA Splice for hg18'
pygr.Data.addResource('TEST.Annotation.NLMSA.SQL.hg18.splices', msa)
splice_schema = pygr.Data.ManyToManyRelation(hg18, splice_db, bindAttrs = ('splice2',))
splice_schema.__doc__ = 'SQL Splice Schema for hg18'
pygr.Data.addSchema('TEST.Annotation.NLMSA.SQL.hg18.splices', splice_schema)
# BUILD ANNOTATION DATABASE FOR REFSEQ EXONS: MYSQL VERSION
cds_slices = sqlgraph.SQLTableClustered('%s.pygr_refGene_cdsAnnot%s_hg18' % ( testInputDB, smallSamplePostfix ),
clusterKey = 'chromosome', maxCache = 0)
cds_db = seqdb.AnnotationDB(cds_slices, hg18, sliceAttrDict = dict(id = 'chromosome', \
gene_id = 'name', cds_id = 'cds_id'))
msa = cnestedlist.NLMSA(os.path.join(self.path, 'refGene_cdsAnnot_SQL_hg18'), 'w', \
pairwiseMode = True, bidirectional = False)
for id in cds_db:
msa.addAnnotation(cds_db[id])
cds_db.clear_cache() # not really necessary; cache should autoGC
cds_slices.clear_cache()
msa.build()
cds_db.__doc__ = 'SQL CDS Annotation Database for hg18'
pygr.Data.addResource('TEST.Annotation.SQL.hg18.cdss', cds_db)
msa.__doc__ = 'SQL NLMSA CDS for hg18'
pygr.Data.addResource('TEST.Annotation.NLMSA.SQL.hg18.cdss', msa)
cds_schema = pygr.Data.ManyToManyRelation(hg18, cds_db, bindAttrs = ('cds2',))
cds_schema.__doc__ = 'SQL CDS Schema for hg18'
pygr.Data.addSchema('TEST.Annotation.NLMSA.SQL.hg18.cdss', cds_schema)
# BUILD ANNOTATION DATABASE FOR MOST CONSERVED ELEMENTS FROM UCSC: MYSQL VERSION
ucsc_slices = sqlgraph.SQLTableClustered('%s.pygr_phastConsElements28way%s_hg18' % ( testInputDB, smallSamplePostfix ),
clusterKey = 'chromosome', maxCache = 0)
ucsc_db = seqdb.AnnotationDB(ucsc_slices, hg18, sliceAttrDict = dict(id = 'chromosome', \
gene_id = 'name', ucsc_id = 'ucsc_id'))
msa = cnestedlist.NLMSA(os.path.join(self.path, 'phastConsElements28way_SQL_hg18'), 'w', \
pairwiseMode = True, bidirectional = False)
for id in ucsc_db:
msa.addAnnotation(ucsc_db[id])
ucsc_db.clear_cache() # not really necessary; cache should autoGC
ucsc_slices.clear_cache()
msa.build()
ucsc_db.__doc__ = 'SQL Most Conserved Elements for hg18'
pygr.Data.addResource('TEST.Annotation.UCSC.SQL.hg18.mostconserved', ucsc_db)
msa.__doc__ = 'SQL NLMSA for Most Conserved Elements for hg18'
pygr.Data.addResource('TEST.Annotation.UCSC.NLMSA.SQL.hg18.mostconserved', msa)
ucsc_schema = pygr.Data.ManyToManyRelation(hg18, ucsc_db, bindAttrs = ('element2',))
ucsc_schema.__doc__ = 'SQL Schema for UCSC Most Conserved Elements for hg18'
pygr.Data.addSchema('TEST.Annotation.UCSC.NLMSA.SQL.hg18.mostconserved', ucsc_schema)
# BUILD ANNOTATION DATABASE FOR SNP126 FROM UCSC: MYSQL VERSION
snp_slices = sqlgraph.SQLTableClustered('%s.pygr_snp126%s_hg18' % ( testInputDB, smallSamplePostfix ),
clusterKey = 'clusterKey', maxCache = 0)
snp_db = seqdb.AnnotationDB(snp_slices, hg18, sliceAttrDict = dict(id = 'chromosome', gene_id = 'name',
snp_id = 'snp_id', score = 'score', ref_NCBI = 'ref_NCBI', ref_UCSC = 'ref_UCSC',
observed = 'observed', molType = 'molType', myClass = 'myClass', myValid = 'myValid',
avHet = 'avHet', avHetSE = 'avHetSE', myFunc = 'myFunc', locType = 'locType',
myWeight = 'myWeight'))
msa = cnestedlist.NLMSA(os.path.join(self.path, 'snp126_SQL_hg18'), 'w', \
pairwiseMode = True, bidirectional = False)
for id in snp_db:
msa.addAnnotation(snp_db[id])
snp_db.clear_cache() # not really necessary; cache should autoGC
snp_slices.clear_cache()
msa.build()
snp_db.__doc__ = 'SQL SNP126 for hg18'
pygr.Data.addResource('TEST.Annotation.UCSC.SQL.hg18.snp126', snp_db)
msa.__doc__ = 'SQL NLMSA for SNP126 for hg18'
pygr.Data.addResource('TEST.Annotation.UCSC.NLMSA.SQL.hg18.snp126', msa)
snp_schema = pygr.Data.ManyToManyRelation(hg18, snp_db, bindAttrs = ('snp2',))
snp_schema.__doc__ = 'SQL Schema for UCSC SNP126 for hg18'
pygr.Data.addSchema('TEST.Annotation.UCSC.NLMSA.SQL.hg18.snp126', snp_schema)
pygr.Data.save()
pygr.Data.clear_cache()
# QUERY TO EXON AND SPLICES ANNOTATION DATABASE
hg18 = pygr.Data.getResource('TEST.Seq.Genome.hg18')
exonmsa = pygr.Data.getResource('TEST.Annotation.NLMSA.SQL.hg18.exons')
splicemsa = pygr.Data.getResource('TEST.Annotation.NLMSA.SQL.hg18.splices')
conservedmsa = pygr.Data.getResource('TEST.Annotation.UCSC.NLMSA.SQL.hg18.mostconserved')
snpmsa = pygr.Data.getResource('TEST.Annotation.UCSC.NLMSA.SQL.hg18.snp126')
cdsmsa = pygr.Data.getResource('TEST.Annotation.NLMSA.SQL.hg18.cdss')
exons = pygr.Data.getResource('TEST.Annotation.SQL.hg18.exons')
splices = pygr.Data.getResource('TEST.Annotation.SQL.hg18.splices')
mostconserved = pygr.Data.getResource('TEST.Annotation.UCSC.SQL.hg18.mostconserved')
snp126 = pygr.Data.getResource('TEST.Annotation.UCSC.SQL.hg18.snp126')
cdss = pygr.Data.getResource('TEST.Annotation.SQL.hg18.cdss')
# OPEN hg18_MULTIZ28WAY NLMSA
msa = cnestedlist.NLMSA(os.path.join(msaDir, 'hg18_multiz28way'), 'r', trypath = [seqDir])
exonAnnotFileName = os.path.join(testInputDir, 'Annotation_ConservedElement_Exons%s_hg18.txt' % smallSamplePostfix)
intronAnnotFileName = os.path.join(testInputDir, 'Annotation_ConservedElement_Introns%s_hg18.txt' % smallSamplePostfix)
stopAnnotFileName = os.path.join(testInputDir, 'Annotation_ConservedElement_Stop%s_hg18.txt' % smallSamplePostfix)
newexonAnnotFileName = os.path.join(self.path, 'new_Exons_hg18.txt')
newintronAnnotFileName = os.path.join(self.path, 'new_Introns_hg18.txt')
newstopAnnotFileName = os.path.join(self.path, 'new_stop_hg18.txt')
tmpexonAnnotFileName = self.copyFile(exonAnnotFileName)
tmpintronAnnotFileName = self.copyFile(intronAnnotFileName)
tmpstopAnnotFileName = self.copyFile(stopAnnotFileName)
if smallSampleKey:
chrList = [ smallSampleKey ]
else:
chrList = hg18.seqLenDict.keys()
chrList.sort()
outfile = open(newexonAnnotFileName, 'w')
for chrid in chrList:
slice = hg18[chrid]
# EXON ANNOTATION DATABASE
try:
ex1 = exonmsa[slice]
except:
continue
else:
exlist1 = [(ix.exon_id, ix) for ix in ex1.keys()]
exlist1.sort()
for ixx, exon in exlist1:
saveList = []
tmp = exon.sequence
tmpexon = exons[exon.exon_id]
tmpslice = tmpexon.sequence # FOR REAL EXON COORDINATE
wlist1 = 'EXON', chrid, tmpexon.exon_id, tmpexon.gene_id, tmpslice.start, tmpslice.stop
try:
out1 = conservedmsa[tmp]
except KeyError:
pass
else:
elementlist = [(ix.ucsc_id, ix) for ix in out1.keys()]
elementlist.sort()
for iyy, element in elementlist:
if element.stop - element.start < 100: continue
score = int(string.split(element.gene_id, '=')[1])
if score < 100: continue
tmp2 = element.sequence
tmpelement = mostconserved[element.ucsc_id]
tmpslice2 = tmpelement.sequence # FOR REAL ELEMENT COORDINATE
wlist2 = wlist1 + (tmpelement.ucsc_id, tmpelement.gene_id, tmpslice2.start, tmpslice2.stop)
slicestart, sliceend = max(tmp.start, tmp2.start), min(tmp.stop, tmp2.stop)
if slicestart < 0 or sliceend < 0: sys.exit('wrong query')
tmp1 = msa.seqDict['hg18.' + chrid][slicestart:sliceend]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start < 100: continue
palign, pident = e.pAligned(), e.pIdentity()
if palign < 0.8 or pident < 0.8: continue
palign, pident = '%.2f' % palign, '%.2f' % pident
wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, \
(~msa.seqDict)[dest], \
str(dest), dest.start, dest.stop, palign, pident)
saveList.append('\t'.join(map(str, wlist3)) + '\n')
saveList.sort()
for saveline in saveList:
outfile.write(saveline)
outfile.close()
md5old = hashlib.md5()
md5old.update(open(tmpexonAnnotFileName, 'r').read())
md5new = hashlib.md5()
md5new.update(open(newexonAnnotFileName, 'r').read())
assert md5old.digest() == md5new.digest() # MD5 COMPARISON INSTEAD OF COMPARING EACH CONTENTS
outfile = open(newintronAnnotFileName, 'w')
for chrid in chrList:
slice = hg18[chrid]
# SPLICE ANNOTATION DATABASE
try:
sp1 = splicemsa[slice]
except:
continue
else:
splist1 = [(ix.splice_id, ix) for ix in sp1.keys()]
splist1.sort()
for ixx, splice in splist1:
saveList = []
tmp = splice.sequence
tmpsplice = splices[splice.splice_id]
tmpslice = tmpsplice.sequence # FOR REAL EXON COORDINATE
wlist1 = 'INTRON', chrid, tmpsplice.splice_id, tmpsplice.gene_id, tmpslice.start, tmpslice.stop
try:
out1 = conservedmsa[tmp]
except KeyError:
pass
else:
elementlist = [(ix.ucsc_id, ix) for ix in out1.keys()]
elementlist.sort()
for iyy, element in elementlist:
if element.stop - element.start < 100: continue
score = int(string.split(element.gene_id, '=')[1])
if score < 100: continue
tmp2 = element.sequence
tmpelement = mostconserved[element.ucsc_id]
tmpslice2 = tmpelement.sequence # FOR REAL ELEMENT COORDINATE
wlist2 = wlist1 + (tmpelement.ucsc_id, tmpelement.gene_id, tmpslice2.start, tmpslice2.stop)
slicestart, sliceend = max(tmp.start, tmp2.start), min(tmp.stop, tmp2.stop)
if slicestart < 0 or sliceend < 0: sys.exit('wrong query')
tmp1 = msa.seqDict['hg18.' + chrid][slicestart:sliceend]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start < 100: continue
palign, pident = e.pAligned(), e.pIdentity()
if palign < 0.8 or pident < 0.8: continue
palign, pident = '%.2f' % palign, '%.2f' % pident
wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, \
(~msa.seqDict)[dest], \
str(dest), dest.start, dest.stop, palign, pident)
saveList.append('\t'.join(map(str, wlist3)) + '\n')
saveList.sort()
for saveline in saveList:
outfile.write(saveline)
# SNP IN SPLICE SITES
saveList = []
gt = tmpslice[:2]
ag = tmpslice[-2:]
try:
gtout = snpmsa[gt]
agout = snpmsa[ag]
except KeyError:
pass
else:
gtlist = gtout.keys()
aglist = agout.keys()
for snp in gtlist:
tmpsnp = snp.sequence
annsnp = snp126[snp.snp_id]
wlist2 = ('SNP5', chrid, tmpsplice.gene_id, gt.start, gt.stop, str(gt)) \
+ (annsnp.snp_id, tmpsnp.start, tmpsnp.stop, \
str(tmpsnp), annsnp.gene_id, annsnp.ref_NCBI, annsnp.ref_UCSC, \
annsnp.observed, annsnp.molType, \
annsnp.myClass, annsnp.myValid)
tmp1 = msa.seqDict['hg18.' + chrid][abs(gt.start):abs(gt.stop)]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start != 2 or dest.stop - dest.start != 2: continue
palign, pident = e.pAligned(), e.pIdentity()
palign, pident = '%.2f' % palign, '%.2f' % pident
wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, \
(~msa.seqDict)[dest], \
str(dest), dest.start, dest.stop, palign, pident)
saveList.append('\t'.join(map(str, wlist3)) + '\n')
for snp in aglist:
tmpsnp = snp.sequence
annsnp = snp126[snp.snp_id]
wlist2 = ('SNP3', chrid, tmpsplice.gene_id, ag.start, ag.stop, str(ag)) \
+ (annsnp.snp_id, tmpsnp.start, tmpsnp.stop, \
str(tmpsnp), annsnp.gene_id, annsnp.ref_NCBI, annsnp.ref_UCSC, \
annsnp.observed, annsnp.molType, \
annsnp.myClass, annsnp.myValid)
tmp1 = msa.seqDict['hg18.' + chrid][abs(ag.start):abs(ag.stop)]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start != 2 or dest.stop - dest.start != 2: continue
palign, pident = e.pAligned(), e.pIdentity()
palign, pident = '%.2f' % palign, '%.2f' % pident
wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, \
(~msa.seqDict)[dest], \
str(dest), dest.start, dest.stop, palign, pident)
saveList.append('\t'.join(map(str, wlist3)) + '\n')
saveList.sort()
for saveline in saveList:
outfile.write(saveline)
outfile.close()
md5old = hashlib.md5()
md5old.update(open(tmpintronAnnotFileName, 'r').read())
md5new = hashlib.md5()
md5new.update(open(newintronAnnotFileName, 'r').read())
assert md5old.digest() == md5new.digest() # MD5 COMPARISON INSTEAD OF COMPARING EACH CONTENTS
outfile = open(newstopAnnotFileName, 'w')
for chrid in chrList:
slice = hg18[chrid]
# STOP ANNOTATION DATABASE
try:
cds1 = cdsmsa[slice]
except:
continue
else:
cdslist1 = [(ix.cds_id, ix) for ix in cds1.keys()]
cdslist1.sort()
for ixx, cds in cdslist1:
saveList = []
tmp = cds.sequence
tmpcds = cdss[cds.cds_id]
tmpslice = tmpcds.sequence # FOR REAL EXON COORDINATE
wlist1 = 'STOP', chrid, tmpcds.cds_id, tmpcds.gene_id, tmpslice.start, tmpslice.stop
if tmpslice.start < 0:
stopstart, stopend = -tmpslice.stop, -tmpslice.start
stop = -hg18[chrid][stopstart:stopstart+3]
else:
stopstart, stopend = tmpslice.start, tmpslice.stop
stop = hg18[chrid][stopend-3:stopend]
if str(stop).upper() not in ('TAA', 'TAG', 'TGA'): continue
try:
snp1 = snpmsa[stop]
except KeyError:
pass
else:
snplist = [(ix.snp_id, ix) for ix in snp1.keys()]
snplist.sort()
for iyy, snp in snplist:
tmpsnp = snp.sequence
annsnp = snp126[snp.snp_id]
wlist2 = wlist1 + (str(stop), stop.start, stop.stop) \
+ (annsnp.snp_id, tmpsnp.start, tmpsnp.stop, \
str(tmpsnp), annsnp.gene_id, annsnp.ref_NCBI, annsnp.ref_UCSC, \
annsnp.observed, annsnp.molType, \
annsnp.myClass, annsnp.myValid)
if tmpslice.start < 0:
tmp1 = -msa.seqDict['hg18.' + chrid][stopstart:stopstart+3]
else:
tmp1 = msa.seqDict['hg18.' + chrid][stopend-3:stopend]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start != 3 or dest.stop - dest.start != 3: continue
palign, pident = e.pAligned(), e.pIdentity()
palign, pident = '%.2f' % palign, '%.2f' % pident
if str(dest).upper() not in ('TAA', 'TAG', 'TGA'): nonstr = 'NONSENSE'
else: nonstr = 'STOP'
wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, \
(~msa.seqDict)[dest], \
str(dest), dest.start, dest.stop, palign, pident, nonstr)
saveList.append('\t'.join(map(str, wlist3)) + '\n')
saveList.sort()
for saveline in saveList:
outfile.write(saveline)
outfile.close()
md5old = hashlib.md5()
md5old.update(open(tmpstopAnnotFileName, 'r').read())
md5new = hashlib.md5()
md5new.update(open(newstopAnnotFileName, 'r').read())
assert md5old.digest() == md5new.digest() # MD5 COMPARISON INSTEAD OF COMPARING EACH CONTENTS
| bsd-3-clause | 5,233,328,099,987,706,000 | 58.998837 | 139 | 0.532646 | false |
wengzhilai/family | iSoft/dal/QueryDal.py | 1 | 3612 | from iSoft.entity.model import db, FaQuery
import math
import json
from iSoft.model.AppReturnDTO import AppReturnDTO
from iSoft.core.Fun import Fun
import re
class QueryDal(FaQuery):
def __init__(self):
pass
def query_findall(self, pageIndex, pageSize, criterion, where):
relist, is_succ = Fun.model_findall(FaQuery, pageIndex, pageSize,
criterion, where)
return relist, is_succ
def query_Save(self, in_dict, saveKeys):
jsonStr = re.sub(r'\r|\n| ', "", in_dict["QUERY_CFG_JSON"])
jsonStr = re.sub(r'"onComponentInitFunction"((.|\n)+?)},', "", jsonStr)
jsonStr = re.sub(r',},', ",", jsonStr)
try:
x = json.loads(jsonStr)
except :
return None, AppReturnDTO(False, "列配置信息有误")
relist, is_succ = Fun.model_save(FaQuery, self, in_dict, saveKeys)
return relist, is_succ
def query_delete(self, key):
is_succ = Fun.model_delete(FaQuery, key)
return is_succ
def query_single(self, key):
relist, is_succ = Fun.model_single(FaQuery, key)
return relist, is_succ
def query_singleByCode(self, code):
db_ent = FaQuery.query.filter(FaQuery.CODE == code).first()
if db_ent is None:
return db_ent, AppReturnDTO(False, "代码不存在")
return db_ent, AppReturnDTO(True)
# 查看数据
def query_queryByCode(self, code, pageIndex, pageSize, criterion, where):
sql, cfg, msg = self.query_GetSqlByCode(code, criterion, where)
if not msg.IsSuccess:
return sql, msg
relist = db.session.execute(sql)
num = relist.rowcount
relist.close()
if pageIndex < 1:
pageSize = 1
if pageSize < 1:
pageSize = 10
# 最大页码
max_page = math.ceil(num / pageSize) # 向上取整
if pageIndex > max_page:
return None, AppReturnDTO(True, num)
pageSql = "{0} LIMIT {1},{2}".format(sql, (pageIndex - 1) * pageSize,
pageSize)
allData, msg = Fun.sql_to_dict(pageSql)
if msg.IsSuccess:
msg.Msg = num
# relist = relist.paginate(pageIndex, per_page=pageSize).items
return allData, msg
def query_GetSqlByCode(self, code, criterion, where):
"""
根据查询代码运算出查询的SQL
用于导出数据,并统一管理配置的SQL
返回SQL和配置
"""
db_ent = FaQuery.query.filter(FaQuery.CODE == code).first()
if db_ent is None:
return "", "", AppReturnDTO(False, "代码不存在")
sql = db_ent.QUERY_CONF
orderArr = []
for order in criterion:
orderArr.append("T.%(Key)s %(Value)s" % order)
whereArr = []
for search in where:
if search["Type"] == "like":
whereArr.append("T.%(Key)s like ('%%%(Value)s%%')" % search)
else:
whereArr.append("T.%(Key)s %(Type)s %(Value)s " % search)
sql = "SELECT * FROM ({0}) T{1}{2}".format(
sql,
" WHERE " + " AND ".join(whereArr) if len(whereArr) > 0 else "",
" ORDER BY " + " , ".join(orderArr) if len(orderArr) > 0 else "",
)
jsonStr = re.sub(r'\r|\n| ', "", db_ent.QUERY_CFG_JSON)
jsonStr = re.sub(r'"onComponentInitFunction"((.|\n)+?)},', "", jsonStr)
jsonStr = re.sub(r',},', ",", jsonStr)
return sql, json.loads(jsonStr), AppReturnDTO(True)
| bsd-3-clause | 4,524,635,679,267,799,000 | 32.557692 | 79 | 0.547564 | false |
heromod/migrid | mig/mig-xsss/jobmanager.py | 1 | 4660 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# jobmanager - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import time
import pickle
import os
G_WEEKDICTFILE = './data/weekdict.dat'
G_ACTIVELOGFILE = './log/active'
# Returns Tuppel of the form: ("Year", "Month", "MonthDay", "WeekNr", "WeekDay", "Hour", "Minutes")
def getTimeTuppel():
year = time.strftime('%Y', time.localtime())
month = time.strftime('%m', time.localtime())
monthday = time.strftime('%d', time.localtime())
weeknr = time.strftime('%U', time.localtime())
weekday = time.strftime('%w', time.localtime())
hour = time.strftime('%H', time.localtime())
minutes = time.strftime('%M', time.localtime())
return (
year,
month,
monthday,
weeknr,
weekday,
hour,
minutes,
)
# Get the dictionary with estimated times
def getWeekDict():
input = open(G_WEEKDICTFILE, 'r')
weekDict = pickle.load(input)
input.close()
return weekDict
# Write the dictionary with estimated times
def writeWeekDict(param_WeekDict):
output = open(G_WEEKDICTFILE, 'w')
pickle.dump(param_WeekDict, output)
output.close()
# Log when screensaver was activited,
# how long it was expected to be active and how long it actually was active.
# log syntax: YEAR MONTH MONTHDAY WEEKNR WEEKDAY HOURS MINUTES ACTIVE_MINUTES EXPECTED_ACTIVE_MINUTES
def writeActiveLog(param_tStartTime, param_iNumOfMinutes,
param_iExpectedTime):
logline = '' + param_tStartTime[0] + '\t' + param_tStartTime[1]\
+ '\t' + param_tStartTime[2] + '\t' + param_tStartTime[3]\
+ '\t' + param_tStartTime[4] + '\t' + param_tStartTime[5]\
+ '\t' + param_tStartTime[6] + '\t' + str(param_iNumOfMinutes)\
+ '\t' + str(param_iExpectedTime) + '\n'
output = open(G_ACTIVELOGFILE, 'a')
output.write(logline)
output.close()
# Returns the expected number of minutes screensaver will
# be active.
#
# param_tActivated[4]: Weekday
# param_tActivated[5]: Hour
def getExpectedActiveMinutes(param_tActivated):
weekDict = getWeekDict()
return weekDict[int(param_tActivated[4])][int(param_tActivated[5])]
# Get the timedifference in minutes betewen the
# timetuppel param_tStartTime and param_tEndTime
def getTimeDiff(param_tStartTime, param_tEndTime):
iNumOfWeeks = int(param_tEndTime[3]) - int(param_tStartTime[3])
iNumOfDays = int(param_tEndTime[4]) - int(param_tStartTime[4])
iNumOfHours = int(param_tEndTime[5]) - int(param_tStartTime[5])
iNumOfMinutes = int(param_tEndTime[6]) - int(param_tStartTime[6])
if iNumOfWeeks < 0:
iNumOfWeeks = 53 + iNumOfWeeks
if iNumOfDays < 0:
iNumOfWeeks = iNumOfWeeks - 1
iNumOfDays = 7 + iNumOfDays
if iNumOfHours < 0:
iNumOfDays = iNumOfDays - 1
iNumOfHours = 24 + iNumOfHours
if iNumOfMinutes < 0:
iNumOfHours = iNumOfHours - 1
iNumOfMinutes = 60 + iNumOfMinutes
iNumOfMinutes = ((iNumOfWeeks * 7 + iNumOfDays) * 24 + iNumOfHours)\
* 60 + iNumOfMinutes
return iNumOfMinutes
# Log the time the screensaver has been active
def logTimeActive(param_tActivated, param_tDeActivated,
param_fExpectedTimeFactor):
iNumOfMinutes = getTimeDiff(param_tActivated, param_tDeActivated)
weekDict = getWeekDict()
iLastExpectedTime = \
weekDict[int(param_tActivated[4])][int(param_tActivated[5])]
writeActiveLog(param_tActivated, iNumOfMinutes, iLastExpectedTime)
iThisExpectedTime = param_fExpectedTimeFactor * iNumOfMinutes + (1
- param_fExpectedTimeFactor) * iLastExpectedTime
weekDict[int(param_tActivated[4])][int(param_tActivated[5])] = \
iThisExpectedTime
writeWeekDict(weekDict)
| gpl-2.0 | 4,537,091,169,991,236,600 | 27.242424 | 101 | 0.674893 | false |
rjfarmer/mesaTest | main.py | 1 | 1816 | #!/usr/bin/env python
#Note its this is both python2.7 and 3 compatible (other versions may work)
#Copyright (c) 2015, Robert Farmer [email protected]
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from __future__ import print_function,unicode_literals
import inputCfg
import config
import checkout as c
import log as l
import build as b
import test as t
cfg=config.config()
inp=inputCfg.inputProcess(cfg)
#test suites to run
cfg.test_names=['0.001M_tau1_atm','15M_dynamo']
#cfg.version_list=["cabecd188bb18003ada7c9470d005ac007d1be2c","597e4d662bb9f56cc9f1005d00210293072b5066"]
#List of versions
cfg.version_list=["7518","7525"]
#Results
cfg.log_file='/home/rob/Desktop/mesaTest.log'
#Somewhere to build MESA
cfg.temp_fold='/media/data/mesa/temp/'
cfg.mesasdk_root='/media/data/mesa/sdk/mesasdk-20141212'
cfg.omp_num_threads='8'
#Ignore for now
cfg.vcs_mode='svn'
cfg.vcs_git_base_folder='/media/data/mesa/mesa/dev/'
for cfg.version in cfg.version_list:
print("Running "+cfg.version)
cfg.setDefaults()
cfg.setPaths()
log=l.logger(cfg)
check=c.checkout(cfg)
gb=b.build(cfg)
tt=t.test(cfg)
log.writeLog(cfg)
cfg.cleanup()
print("Done "+cfg.version)
| gpl-2.0 | -4,912,147,528,989,903,000 | 28.290323 | 105 | 0.764317 | false |
YACOWS/opps-polls | opps/polls/admin.py | 1 | 3227 | # -*- coding: utf-8 -*-
from django.contrib import admin
from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import (Poll, Choice, PollPost, PollBox,
PollBoxPolls, PollConfig)
from opps.core.admin import PublishableAdmin
from redactor.widgets import RedactorEditor
class PollAdminForm(forms.ModelForm):
class Meta:
model = Poll
widgets = {"headline": RedactorEditor()}
class ChoiceInline(admin.TabularInline):
model = Choice
fk_name = 'poll'
raw_id_fields = ['image']
action = None
extra = 1
fieldsets = [(None, {'fields': ('choice', 'image', 'order', 'votes')})]
class PollPostInline(admin.TabularInline):
model = PollPost
fk_name = 'poll'
raw_id_fields = ['post']
actions = None
extra = 1
classes = ('collapse',)
class PollAdmin(PublishableAdmin):
form = PollAdminForm
prepopulated_fields = {"slug": ["question"]}
list_display = ['question', 'channel', 'date_available', 'date_end', 'published']
list_filter = ["date_end", "date_available", "published", "channel"]
search_fields = ["question", "headline"]
exclude = ('user',)
raw_id_fields = ['main_image', 'channel']
inlines = [ChoiceInline, PollPostInline]
fieldsets = (
(_(u'Identification'), {
'fields': ('question', 'slug')}),
(_(u'Content'), {
'fields': ('headline', 'main_image', 'tags')}),
(_(u'Relationships'), {
'fields': ('channel',)}),
(_(u'Publication'), {
'classes': ('extrapretty'),
'fields': ('published', ('date_available', 'date_end'),
'order', 'multiple_choices', ('min_multiple_choices',
'max_multiple_choices'), 'display_choice_images',
'show_results')}),
)
class PollBoxPollsInline(admin.TabularInline):
model = PollBoxPolls
fk_name = 'pollbox'
raw_id_fields = ['poll']
actions = None
extra = 1
fieldsets = [(None, {
'classes': ('collapse',),
'fields': ('poll', 'order')})]
class PollBoxAdmin(PublishableAdmin):
prepopulated_fields = {"slug": ["name"]}
list_display = ['name', 'date_available', 'published']
list_filter = ['date_available', 'published']
inlines = [PollBoxPollsInline]
exclude = ('user',)
raw_id_fields = ['channel', 'article']
fieldsets = (
(_(u'Identification'), {
'fields': ('site', 'name', 'slug')}),
(_(u'Relationships'), {
'fields': (('channel', 'article'),)}),
(_(u'Publication'), {
'classes': ('extrapretty'),
'fields': ('published', 'date_available')}),
)
class PollConfigAdmin(PublishableAdmin):
list_display = ['key', 'key_group', 'channel', 'date_insert',
'date_available', 'published']
list_filter = ["key", 'key_group', "channel", "published"]
search_fields = ["key", "key_group", "value"]
raw_id_fields = ['poll', 'channel', 'article']
exclude = ('user',)
admin.site.register(Poll, PollAdmin)
admin.site.register(PollBox, PollBoxAdmin)
admin.site.register(PollConfig, PollConfigAdmin)
| mit | 2,595,985,477,295,994,400 | 29.733333 | 85 | 0.579795 | false |
softwaresaved/SSINetworkGraphics | Fellows/Python/map_fellows_network.py | 1 | 3548 | import os
import ast
import requests, gspread
import numpy as np
import matplotlib.pyplot as plt
from oauth2client.client import SignedJwtAssertionCredentials
from mpl_toolkits.basemap import Basemap
#Google Authorisation section and getting a worksheet from Google Spreadsheet
def authenticate_google_docs():
f = file(os.path.join('SSI Network Graphics-3357cb9f30de.p12'), 'rb')
SIGNED_KEY = f.read()
f.close()
scope = ['https://spreadsheets.google.com/feeds', 'https://docs.google.com/feeds']
credentials = SignedJwtAssertionCredentials('[email protected]', SIGNED_KEY, scope)
data = {
'refresh_token' : '1/NM56uCG7uFT6VVAAYX3B5TbcMk43wn1xE8Wr-7dsb7lIgOrJDtdun6zK6XiATCKT',
'client_id' : '898367260-pmm78rtfct8af7e0utis686bv78eqmqs.apps.googleusercontent.com',
'client_secret' : 'Cby-rjWDg_wWTSQw_8DDKb3v',
'grant_type' : 'refresh_token',
}
r = requests.post('https://accounts.google.com/o/oauth2/token', data = data)
credentials.access_token = ast.literal_eval(r.text)['access_token']
gc = gspread.authorize(credentials)
return gc
gc_ret = authenticate_google_docs()
sh = gc_ret.open_by_url('https://docs.google.com/spreadsheets/d/13_ZIdeF7oS0xwp_nhGRoVTv7PaXvfLMwVxvgt_hNOkg/edit#gid=383409775')
worksheet_list = sh.worksheets() # Get list of worksheets
#Print the names of first and second worksheets
print "First 2 worksheets of Fellows data Google spreadsheet are:", worksheet_list[0], worksheet_list[1]
# Get all values from the first, seventh and eight columns of Sample datset
values_list_names = worksheet_list[0].col_values(1)
destination_lat_values = worksheet_list[0].col_values(7)
destination_lon_values = worksheet_list[0].col_values(8)
print "Names of SSI fellows are:",values_list_names
print "Destination Latitude values are:",destination_lat_values
print "Destination Longitude values are:", destination_lon_values
# get all values from first, fourth and fifth columns of Home Institutions worksheet
fellows_list_names = worksheet_list[1].col_values(1)
home_lat_values = worksheet_list[1].col_values(4)
home_lon_values = worksheet_list[1].col_values(5)
print "Names of SSI fellows are:",fellows_list_names
print "Home Institution Latitude values are:",home_lat_values
print "Home Institution Longitude values are:", home_lon_values
# create new figure, axes instances.
fig=plt.figure()
ax=fig.add_axes([0.1,0.1,0.8,0.8])
# setup mercator map projection.
m = Basemap(llcrnrlon=-150.,llcrnrlat=-40.,urcrnrlon=150.,urcrnrlat=80.,\
rsphere=(6378137.00,6356752.3142),\
resolution='l',projection='merc',\
lat_0=40.,lon_0=-20.,lat_ts=20.)
#Plotting fellows routes on map
print "No. of unique fellows are:", (len(worksheet_list[1].col_values(1))-1)
colcode = ['b','r','g','y','m','c','k','w']
i = 1
j = 1
print "No. of destination entries in the Sample datasheet:", (len(worksheet_list[0].col_values(7))-1)
while i < len(worksheet_list[1].col_values(1)):
while j < len(worksheet_list[0].col_values(7)):
m.drawgreatcircle(float(home_lon_values[i]),float(home_lat_values[i]),float(destination_lon_values[j]),float(destination_lat_values[j]),linewidth=2,color=colcode[i-1])
j = j + 1
i = i + 1
#label=fellows_list_names[i]
m.drawcoastlines()
m.fillcontinents()
# draw parallels
m.drawparallels(np.arange(10,90,20),labels=[1,1,0,1])
# draw meridians
m.drawmeridians(np.arange(-180,180,30),labels=[1,1,0,1])
ax.set_title('SSI Fellows Impact')
plt.legend()
plt.show()
| bsd-3-clause | 2,741,477,577,881,294,300 | 35.958333 | 176 | 0.721251 | false |
klahnakoski/JsonSchemaToMarkdown | vendor/mo_hg/hg_mozilla_org.py | 1 | 28441 | # encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from collections import Mapping
from copy import copy
import mo_threads
from mo_dots import set_default, Null, coalesce, unwraplist, listwrap, wrap, Data
from mo_future import text_type, binary_type
from mo_hg.parse import diff_to_json, diff_to_moves
from mo_hg.repos.changesets import Changeset
from mo_hg.repos.pushs import Push
from mo_hg.repos.revisions import Revision, revision_schema
from mo_json import json2value
from mo_kwargs import override
from mo_logs import Log, strings, machine_metadata
from mo_logs.exceptions import Explanation, assert_no_exception, Except, suppress_exception
from mo_logs.strings import expand_template
from mo_math.randoms import Random
from mo_threads import Thread, Lock, Queue, THREAD_STOP, Till
from mo_times.dates import Date
from mo_times.durations import SECOND, Duration, HOUR, MINUTE, DAY
from pyLibrary.env import http, elasticsearch
from pyLibrary.meta import cache
_hg_branches = None
_OLD_BRANCH = None
def _count(values):
return len(list(values))
def _late_imports():
global _hg_branches
global _OLD_BRANCH
from mo_hg import hg_branches as _hg_branches
from mo_hg.hg_branches import OLD_BRANCH as _OLD_BRANCH
_ = _hg_branches
_ = _OLD_BRANCH
DEFAULT_LOCALE = "en-US"
DEBUG = False
DAEMON_DEBUG = False
DAEMON_HG_INTERVAL = 30 * SECOND # HOW LONG TO WAIT BETWEEN HG REQUESTS (MAX)
DAEMON_WAIT_AFTER_TIMEOUT = 10 * MINUTE # IF WE SEE A TIMEOUT, THEN WAIT
WAIT_AFTER_NODE_FAILURE = 10 * MINUTE # IF WE SEE A NODE FAILURE OR CLUSTER FAILURE, THEN WAIT
WAIT_AFTER_CACHE_MISS = 30 * SECOND # HOW LONG TO WAIT BETWEEN CACHE MISSES
DAEMON_DO_NO_SCAN = ["try"] # SOME BRANCHES ARE NOT WORTH SCANNING
DAEMON_QUEUE_SIZE = 2 ** 15
DAEMON_RECENT_HG_PULL = 2 * SECOND # DETERMINE IF WE GOT DATA FROM HG (RECENT), OR ES (OLDER)
MAX_TODO_AGE = DAY # THE DAEMON WILL NEVER STOP SCANNING; DO NOT ADD OLD REVISIONS TO THE todo QUEUE
MIN_ETL_AGE = Date("03may2018").unix # ARTIFACTS OLDER THAN THIS IN ES ARE REPLACED
UNKNOWN_PUSH = "Unknown push {{revision}}"
MAX_DIFF_SIZE = 1000
DIFF_URL = "{{location}}/raw-rev/{{rev}}"
FILE_URL = "{{location}}/raw-file/{{rev}}{{path}}"
last_called_url = {}
class HgMozillaOrg(object):
"""
USE hg.mozilla.org FOR REPO INFORMATION
USE ES AS A FASTER CACHE FOR THE SAME
"""
@override
def __init__(
self,
hg=None, # CONNECT TO hg
repo=None, # CONNECTION INFO FOR ES CACHE
branches=None, # CONNECTION INFO FOR ES CACHE
use_cache=False, # True IF WE WILL USE THE ES FOR DOWNLOADING BRANCHES
timeout=30 * SECOND,
kwargs=None
):
if not _hg_branches:
_late_imports()
self.es_locker = Lock()
self.todo = mo_threads.Queue("todo for hg daemon", max=DAEMON_QUEUE_SIZE)
self.settings = kwargs
self.timeout = Duration(timeout)
# VERIFY CONNECTIVITY
with Explanation("Test connect with hg"):
response = http.head(self.settings.hg.url)
if branches == None:
self.branches = _hg_branches.get_branches(kwargs=kwargs)
self.es = None
return
self.last_cache_miss = Date.now()
set_default(repo, {"schema": revision_schema})
self.es = elasticsearch.Cluster(kwargs=repo).get_or_create_index(kwargs=repo)
def setup_es(please_stop):
with suppress_exception:
self.es.add_alias()
with suppress_exception:
self.es.set_refresh_interval(seconds=1)
Thread.run("setup_es", setup_es)
self.branches = _hg_branches.get_branches(kwargs=kwargs)
self.timeout = timeout
Thread.run("hg daemon", self._daemon)
def _daemon(self, please_stop):
while not please_stop:
with Explanation("looking for work"):
try:
branch, revisions = self.todo.pop(till=please_stop)
except Exception as e:
if please_stop:
break
else:
raise e
if branch.name in DAEMON_DO_NO_SCAN:
continue
revisions = set(revisions)
# FIND THE REVSIONS ON THIS BRANCH
for r in list(revisions):
try:
rev = self.get_revision(Revision(branch=branch, changeset={"id": r}))
if DAEMON_DEBUG:
Log.note("found revision with push date {{date|datetime}}", date=rev.push.date)
revisions.discard(r)
if rev.etl.timestamp > Date.now()-DAEMON_RECENT_HG_PULL:
# SOME PUSHES ARE BIG, RUNNING THE RISK OTHER MACHINES ARE
# ALSO INTERESTED AND PERFORMING THE SAME SCAN. THIS DELAY
# WILL HAVE SMALL EFFECT ON THE MAJORITY OF SMALL PUSHES
# https://bugzilla.mozilla.org/show_bug.cgi?id=1417720
Till(seconds=Random.float(DAEMON_HG_INTERVAL).seconds*2).wait()
except Exception as e:
Log.warning(
"Scanning {{branch}} {{revision|left(12)}}",
branch=branch.name,
revision=r,
cause=e
)
if "Read timed out" in e:
Till(seconds=DAEMON_WAIT_AFTER_TIMEOUT.seconds).wait()
# FIND ANY BRANCH THAT MAY HAVE THIS REVISION
for r in list(revisions):
self._find_revision(r)
@cache(duration=HOUR, lock=True)
def get_revision(self, revision, locale=None, get_diff=False, get_moves=True):
"""
EXPECTING INCOMPLETE revision OBJECT
RETURNS revision
"""
rev = revision.changeset.id
if not rev:
return Null
elif rev == "None":
return Null
elif revision.branch.name == None:
return Null
locale = coalesce(locale, revision.branch.locale, DEFAULT_LOCALE)
output = self._get_from_elasticsearch(revision, locale=locale, get_diff=get_diff)
if output:
if not get_diff: # DIFF IS BIG, DO NOT KEEP IT IF NOT NEEDED
output.changeset.diff = None
if not get_moves:
output.changeset.moves = None
DEBUG and Log.note("Got hg ({{branch}}, {{locale}}, {{revision}}) from ES", branch=output.branch.name, locale=locale, revision=output.changeset.id)
if output.push.date >= Date.now()-MAX_TODO_AGE:
self.todo.add((output.branch, listwrap(output.parents)))
self.todo.add((output.branch, listwrap(output.children)))
if output.push.date:
return output
# RATE LIMIT CALLS TO HG (CACHE MISSES)
next_cache_miss = self.last_cache_miss + (Random.float(WAIT_AFTER_CACHE_MISS.seconds * 2) * SECOND)
self.last_cache_miss = Date.now()
if next_cache_miss > self.last_cache_miss:
Log.note("delaying next hg call for {{seconds|round(decimal=1)}}", seconds=next_cache_miss - self.last_cache_miss)
Till(till=next_cache_miss.unix).wait()
found_revision = copy(revision)
if isinstance(found_revision.branch, (text_type, binary_type)):
lower_name = found_revision.branch.lower()
else:
lower_name = found_revision.branch.name.lower()
if not lower_name:
Log.error("Defective revision? {{rev|json}}", rev=found_revision.branch)
b = found_revision.branch = self.branches[(lower_name, locale)]
if not b:
b = found_revision.branch = self.branches[(lower_name, DEFAULT_LOCALE)]
if not b:
Log.warning("can not find branch ({{branch}}, {{locale}})", branch=lower_name, locale=locale)
return Null
if Date.now() - Date(b.etl.timestamp) > _OLD_BRANCH:
self.branches = _hg_branches.get_branches(kwargs=self.settings)
push = self._get_push(found_revision.branch, found_revision.changeset.id)
url1 = found_revision.branch.url.rstrip("/") + "/json-info?node=" + found_revision.changeset.id[0:12]
url2 = found_revision.branch.url.rstrip("/") + "/json-rev/" + found_revision.changeset.id[0:12]
with Explanation("get revision from {{url}}", url=url1, debug=DEBUG):
raw_rev2 = Null
try:
raw_rev1 = self._get_raw_json_info(url1, found_revision.branch)
raw_rev2 = self._get_raw_json_rev(url2, found_revision.branch)
except Exception as e:
if "Hg denies it exists" in e:
raw_rev1 = Data(node=revision.changeset.id)
else:
raise e
output = self._normalize_revision(set_default(raw_rev1, raw_rev2), found_revision, push, get_diff, get_moves)
if output.push.date >= Date.now()-MAX_TODO_AGE:
self.todo.add((output.branch, listwrap(output.parents)))
self.todo.add((output.branch, listwrap(output.children)))
if not get_diff: # DIFF IS BIG, DO NOT KEEP IT IF NOT NEEDED
output.changeset.diff = None
if not get_moves:
output.changeset.moves = None
return output
def _get_from_elasticsearch(self, revision, locale=None, get_diff=False, get_moves=True):
rev = revision.changeset.id
if self.es.cluster.version.startswith("1.7."):
query = {
"query": {"filtered": {
"query": {"match_all": {}},
"filter": {"and": [
{"term": {"changeset.id12": rev[0:12]}},
{"term": {"branch.name": revision.branch.name}},
{"term": {"branch.locale": coalesce(locale, revision.branch.locale, DEFAULT_LOCALE)}},
{"range": {"etl.timestamp": {"gt": MIN_ETL_AGE}}}
]}
}},
"size": 20
}
else:
query = {
"query": {"bool": {"must": [
{"term": {"changeset.id12": rev[0:12]}},
{"term": {"branch.name": revision.branch.name}},
{"term": {"branch.locale": coalesce(locale, revision.branch.locale, DEFAULT_LOCALE)}},
{"range": {"etl.timestamp": {"gt": MIN_ETL_AGE}}}
]}},
"size": 20
}
for attempt in range(3):
try:
with self.es_locker:
docs = self.es.search(query).hits.hits
if len(docs) == 0:
return None
best = docs[0]._source
if len(docs) > 1:
for d in docs:
if d._id.endswith(d._source.branch.locale):
best = d._source
Log.warning("expecting no more than one document")
return best
except Exception as e:
e = Except.wrap(e)
if "EsRejectedExecutionException[rejected execution (queue capacity" in e:
(Till(seconds=Random.int(30))).wait()
continue
else:
Log.warning("Bad ES call, waiting for {{num}} seconds", num=WAIT_AFTER_NODE_FAILURE.seconds, cause=e)
Till(seconds=WAIT_AFTER_NODE_FAILURE.seconds).wait()
continue
Log.warning("ES did not deliver, fall back to HG")
return None
@cache(duration=HOUR, lock=True)
def _get_raw_json_info(self, url, branch):
raw_revs = self._get_and_retry(url, branch)
if "(not in 'served' subset)" in raw_revs:
Log.error("Tried {{url}}. Hg denies it exists.", url=url)
if isinstance(raw_revs, text_type) and raw_revs.startswith("unknown revision '"):
Log.error("Tried {{url}}. Hg denies it exists.", url=url)
if len(raw_revs) != 1:
Log.error("do not know what to do")
return raw_revs.values()[0]
@cache(duration=HOUR, lock=True)
def _get_raw_json_rev(self, url, branch):
raw_rev = self._get_and_retry(url, branch)
return raw_rev
@cache(duration=HOUR, lock=True)
def _get_push(self, branch, changeset_id):
if self.es.cluster.version.startswith("1.7."):
query = {
"query": {"filtered": {
"query": {"match_all": {}},
"filter": {"and": [
{"term": {"branch.name": branch.name}},
{"prefix": {"changeset.id": changeset_id[0:12]}}
]}
}},
"size": 1
}
else:
query = {
"query": {"bool": {"must": [
{"term": {"branch.name": branch.name}},
{"prefix": {"changeset.id": changeset_id[0:12]}}
]}},
"size": 1
}
try:
# ALWAYS TRY ES FIRST
with self.es_locker:
response = self.es.search(query)
json_push = response.hits.hits[0]._source.push
if json_push:
return json_push
except Exception:
pass
url = branch.url.rstrip("/") + "/json-pushes?full=1&changeset=" + changeset_id
with Explanation("Pulling pushlog from {{url}}", url=url, debug=DEBUG):
Log.note(
"Reading pushlog from {{url}}",
url=url,
changeset=changeset_id
)
data = self._get_and_retry(url, branch)
# QUEUE UP THE OTHER CHANGESETS IN THE PUSH
self.todo.add((branch, [c.node for cs in data.values().changesets for c in cs]))
pushes = [
Push(id=int(index), date=_push.date, user=_push.user)
for index, _push in data.items()
]
if len(pushes) == 0:
return Null
elif len(pushes) == 1:
return pushes[0]
else:
Log.error("do not know what to do")
def _normalize_revision(self, r, found_revision, push, get_diff, get_moves):
new_names = set(r.keys()) - KNOWN_TAGS
if new_names and not r.tags:
Log.warning(
"hg is returning new property names {{names|quote}} for {{changeset}} from {{url}}",
names=new_names,
changeset=r.node,
url=found_revision.branch.url
)
changeset = Changeset(
id=r.node,
id12=r.node[0:12],
author=r.user,
description=strings.limit(coalesce(r.description, r.desc), 2000),
date=parse_hg_date(r.date),
files=r.files,
backedoutby=r.backedoutby if r.backedoutby else None,
bug=self._extract_bug_id(r.description)
)
rev = Revision(
branch=found_revision.branch,
index=r.rev,
changeset=changeset,
parents=unwraplist(list(set(r.parents))),
children=unwraplist(list(set(r.children))),
push=push,
phase=r.phase,
bookmarks=unwraplist(r.bookmarks),
landingsystem=r.landingsystem,
etl={"timestamp": Date.now().unix, "machine": machine_metadata}
)
r.pushuser = None
r.pushdate = None
r.pushid = None
r.node = None
r.user = None
r.desc = None
r.description = None
r.date = None
r.files = None
r.backedoutby = None
r.parents = None
r.children = None
r.bookmarks = None
r.landingsystem = None
set_default(rev, r)
# ADD THE DIFF
if get_diff:
rev.changeset.diff = self._get_json_diff_from_hg(rev)
if get_moves:
rev.changeset.moves = self._get_moves_from_hg(rev)
try:
_id = coalesce(rev.changeset.id12, "") + "-" + rev.branch.name + "-" + coalesce(rev.branch.locale, DEFAULT_LOCALE)
with self.es_locker:
self.es.add({"id": _id, "value": rev})
except Exception as e:
e = Except.wrap(e)
Log.warning("Did not save to ES, waiting {{duration}}", duration=WAIT_AFTER_NODE_FAILURE, cause=e)
Till(seconds=WAIT_AFTER_NODE_FAILURE.seconds).wait()
if "FORBIDDEN/12/index read-only" in e:
pass # KNOWN FAILURE MODE
return rev
def _get_and_retry(self, url, branch, **kwargs):
"""
requests 2.5.0 HTTPS IS A LITTLE UNSTABLE
"""
kwargs = set_default(kwargs, {"timeout": self.timeout.seconds})
try:
output = _get_url(url, branch, **kwargs)
return output
except Exception as e:
if UNKNOWN_PUSH in e:
Log.error("Tried {{url}} and failed", {"url": url}, cause=e)
try:
(Till(seconds=5)).wait()
return _get_url(url.replace("https://", "http://"), branch, **kwargs)
except Exception as f:
pass
path = url.split("/")
if path[3] == "l10n-central":
# FROM https://hg.mozilla.org/l10n-central/tr/json-pushes?full=1&changeset=a6eeb28458fd
# TO https://hg.mozilla.org/mozilla-central/json-pushes?full=1&changeset=a6eeb28458fd
path = path[0:3] + ["mozilla-central"] + path[5:]
return self._get_and_retry("/".join(path), branch, **kwargs)
elif len(path) > 5 and path[5] == "mozilla-aurora":
# FROM https://hg.mozilla.org/releases/l10n/mozilla-aurora/pt-PT/json-pushes?full=1&changeset=b44a8c68fc60
# TO https://hg.mozilla.org/releases/mozilla-aurora/json-pushes?full=1&changeset=b44a8c68fc60
path = path[0:4] + ["mozilla-aurora"] + path[7:]
return self._get_and_retry("/".join(path), branch, **kwargs)
elif len(path) > 5 and path[5] == "mozilla-beta":
# FROM https://hg.mozilla.org/releases/l10n/mozilla-beta/lt/json-pushes?full=1&changeset=03fbf7556c94
# TO https://hg.mozilla.org/releases/mozilla-beta/json-pushes?full=1&changeset=b44a8c68fc60
path = path[0:4] + ["mozilla-beta"] + path[7:]
return self._get_and_retry("/".join(path), branch, **kwargs)
elif len(path) > 7 and path[5] == "mozilla-release":
# FROM https://hg.mozilla.org/releases/l10n/mozilla-release/en-GB/json-pushes?full=1&changeset=57f513ab03308adc7aa02cc2ea8d73fe56ae644b
# TO https://hg.mozilla.org/releases/mozilla-release/json-pushes?full=1&changeset=57f513ab03308adc7aa02cc2ea8d73fe56ae644b
path = path[0:4] + ["mozilla-release"] + path[7:]
return self._get_and_retry("/".join(path), branch, **kwargs)
elif len(path) > 5 and path[4] == "autoland":
# FROM https://hg.mozilla.org/build/autoland/json-pushes?full=1&changeset=3ccccf8e5036179a3178437cabc154b5e04b333d
# TO https://hg.mozilla.org/integration/autoland/json-pushes?full=1&changeset=3ccccf8e5036179a3178437cabc154b5e04b333d
path = path[0:3] + ["try"] + path[5:]
return self._get_and_retry("/".join(path), branch, **kwargs)
Log.error("Tried {{url}} twice. Both failed.", {"url": url}, cause=[e, f])
@cache(duration=HOUR, lock=True)
def _find_revision(self, revision):
please_stop = False
locker = Lock()
output = []
queue = Queue("branches", max=2000)
queue.extend(b for b in self.branches if b.locale == DEFAULT_LOCALE and b.name in ["try", "mozilla-inbound", "autoland"])
queue.add(THREAD_STOP)
problems = []
def _find(please_stop):
for b in queue:
if please_stop:
return
try:
url = b.url + "json-info?node=" + revision
rev = self.get_revision(Revision(branch=b, changeset={"id": revision}))
with locker:
output.append(rev)
Log.note("Revision found at {{url}}", url=url)
except Exception as f:
problems.append(f)
threads = []
for i in range(3):
threads.append(Thread.run("find changeset " + text_type(i), _find, please_stop=please_stop))
for t in threads:
with assert_no_exception:
t.join()
return output
def _extract_bug_id(self, description):
"""
LOOK INTO description to FIND bug_id
"""
if description == None:
return None
match = re.findall(r'[Bb](?:ug)?\s*([0-9]{5,7})', description)
if match:
return int(match[0])
return None
def _get_json_diff_from_hg(self, revision):
"""
:param revision: INCOMPLETE REVISION OBJECT
:return:
"""
@cache(duration=MINUTE, lock=True)
def inner(changeset_id):
if self.es.cluster.version.startswith("1.7."):
query = {
"query": {"filtered": {
"query": {"match_all": {}},
"filter": {"and": [
{"prefix": {"changeset.id": changeset_id}},
{"range": {"etl.timestamp": {"gt": MIN_ETL_AGE}}}
]}
}},
"size": 1
}
else:
query = {
"query": {"bool": {"must": [
{"prefix": {"changeset.id": changeset_id}},
{"range": {"etl.timestamp": {"gt": MIN_ETL_AGE}}}
]}},
"size": 1
}
try:
# ALWAYS TRY ES FIRST
with self.es_locker:
response = self.es.search(query)
json_diff = response.hits.hits[0]._source.changeset.diff
if json_diff:
return json_diff
except Exception as e:
pass
url = expand_template(DIFF_URL, {"location": revision.branch.url, "rev": changeset_id})
DEBUG and Log.note("get unified diff from {{url}}", url=url)
try:
response = http.get(url)
diff = response.content.decode("utf8")
json_diff = diff_to_json(diff)
num_changes = _count(c for f in json_diff for c in f.changes)
if json_diff:
if revision.changeset.description.startswith("merge "):
return None # IGNORE THE MERGE CHANGESETS
elif num_changes < MAX_DIFF_SIZE:
return json_diff
else:
Log.warning("Revision at {{url}} has a diff with {{num}} changes, ignored", url=url, num=num_changes)
for file in json_diff:
file.changes = None
return json_diff
except Exception as e:
Log.warning("could not get unified diff from {{url}}", url=url, cause=e)
return inner(revision.changeset.id)
def _get_moves_from_hg(self, revision):
"""
:param revision: INCOMPLETE REVISION OBJECT
:return:
"""
@cache(duration=MINUTE, lock=True)
def inner(changeset_id):
if self.es.cluster.version.startswith("1.7."):
query = {
"query": {"filtered": {
"query": {"match_all": {}},
"filter": {"and": [
{"prefix": {"changeset.id": changeset_id}},
{"range": {"etl.timestamp": {"gt": MIN_ETL_AGE}}}
]}
}},
"size": 1
}
else:
query = {
"query": {"bool": {"must": [
{"prefix": {"changeset.id": changeset_id}},
{"range": {"etl.timestamp": {"gt": MIN_ETL_AGE}}}
]}},
"size": 1
}
try:
# ALWAYS TRY ES FIRST
with self.es_locker:
response = self.es.search(query)
moves = response.hits.hits[0]._source.changeset.moves
if moves:
return moves
except Exception as e:
pass
url = expand_template(DIFF_URL, {"location": revision.branch.url, "rev": changeset_id})
DEBUG and Log.note("get unified diff from {{url}}", url=url)
try:
moves = http.get(url).content.decode('latin1') # THE ENCODING DOES NOT MATTER BECAUSE WE ONLY USE THE '+', '-' PREFIXES IN THE DIFF
return diff_to_moves(text_type(moves))
except Exception as e:
Log.warning("could not get unified diff from {{url}}", url=url, cause=e)
return inner(revision.changeset.id)
def _get_source_code_from_hg(self, revision, file_path):
response = http.get(expand_template(FILE_URL, {"location": revision.branch.url, "rev": revision.changeset.id, "path": file_path}))
return response.content.decode("utf8", "replace")
def _trim(url):
return url.split("/json-pushes?")[0].split("/json-info?")[0].split("/json-rev/")[0]
def _get_url(url, branch, **kwargs):
with Explanation("get push from {{url}}", url=url, debug=DEBUG):
response = http.get(url, **kwargs)
data = json2value(response.content.decode("utf8"))
if isinstance(data, (text_type, str)) and data.startswith("unknown revision"):
Log.error(UNKNOWN_PUSH, revision=strings.between(data, "'", "'"))
branch.url = _trim(url) # RECORD THIS SUCCESS IN THE BRANCH
return data
def parse_hg_date(date):
if isinstance(date, text_type):
return Date(date)
elif isinstance(date, list):
# FIRST IN TUPLE (timestamp, time_zone) TUPLE, WHERE timestamp IS GMT
return Date(date[0])
else:
Log.error("Can not deal with date like {{date|json}}", date=date)
def minimize_repo(repo):
"""
RETURN A MINIMAL VERSION OF THIS CHANGESET
"""
if repo == None:
return Null
output = wrap(_copy_but(repo, _exclude_from_repo))
output.changeset.description = strings.limit(output.changeset.description, 1000)
return output
_exclude_from_repo = Data()
for k in [
"changeset.files",
"changeset.diff",
"changeset.moves",
"etl",
"branch.last_used",
"branch.description",
"branch.etl",
"branch.parent_name",
"children",
"parents",
"phase",
"bookmarks",
"tags"
]:
_exclude_from_repo[k] = True
_exclude_from_repo = _exclude_from_repo
def _copy_but(value, exclude):
output = {}
for k, v in value.items():
e = exclude.get(k, {})
if e!=True:
if isinstance(v, Mapping):
v2 = _copy_but(v, e)
if v2 != None:
output[k] = v2
elif v != None:
output[k] = v
return output if output else None
KNOWN_TAGS = {
"rev",
"node",
"user",
"description",
"desc",
"date",
"files",
"backedoutby",
"parents",
"children",
"branch",
"tags",
"pushuser",
"pushdate",
"pushid",
"phase",
"bookmarks",
"landingsystem"
}
| mpl-2.0 | -8,912,115,382,508,336,000 | 37.800819 | 159 | 0.53451 | false |
matthewbentley/teenlink | callhandler.py | 1 | 5899 | import webapp2
import jinja2
import os
from twilio import twiml
from twilio.rest import TwilioRestClient
from twilio.util import RequestValidator
from google.appengine.ext import ndb
import logging
import json
from private import account_sid, auth_token
from common import make_template
class Call(ndb.Model):
"""Model for the calls db"""
calls = ndb.StringProperty(indexed=True)
class User(ndb.Model):
"""Model for the user db"""
fullname = ndb.StringProperty(indexed=True)
phone_number = ndb.StringProperty(indexed=True)
phone_worker = ndb.BooleanProperty()
can_text = ndb.BooleanProperty()
PAB = ndb.BooleanProperty()
class Group(ndb.Model):
"""Model for groups db"""
groupname = ndb.StringProperty(indexed=True)
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
from common import add_header
class StartHere(webapp2.RequestHandler):
"""XML response for the initial recording call"""
def get(self):
validator = RequestValidator(auth_token)
url = self.request.url
params = {}
try:
twilio_signature = self.request.headers["X-Twilio-Signature"]
except:
twilio_signature = ""
r = twiml.Response()
if validator.validate(url, params, twilio_signature):
logging.debug(self.request.get('to_call'))
r.record(action="/handle-recording?to_call=%s" % (self.request.get('to_call')), method="GET")
self.response.headers['Content-Type'] = 'text/xml'
self.response.write(str(r))
class HandleRecording(webapp2.RedirectHandler):
"""Makes calls to everyone who needs to get the call, points twilio to make-calls"""
def get(self):
client = TwilioRestClient(account_sid, auth_token)
validator = RequestValidator(auth_token)
url = self.request.url
params = {}
try:
twilio_signature = self.request.headers["X-Twilio-Signature"]
logging.debug(twilio_signature)
except:
twilio_signature = ""
if validator.validate(url, params, twilio_signature):
logging.debug("Validated")
call_id = self.request.get('to_call')
print call_id
infos = Call.query(Call.key==ndb.Key(Call, int(call_id))).fetch()
print infos
for info in infos:
print info
for i in json.loads(info.calls):
print i
call_out = client.calls.create(to=i, from_="2065576875",
url="https://teen-link.appspot.com/make-calls?RecordingUrl=" + self.request.get("RecordingUrl"),
method="GET",
if_machine="Continue")
print call_out
else:
self.response.headers['Content-Type'] = 'text/html'
self.response.write("Please don't try to hack me.")
class MakeCalls(webapp2.RedirectHandler):
"""Returns XML to play the recording"""
def get(self):
validator = RequestValidator(auth_token)
url = self.request.url
params = {}
try:
twilio_signature = self.request.headers["X-Twilio-Signature"]
except:
twilio_signature = ""
if validator.validate(url, params, twilio_signature):
r = twiml.Response()
r.play(self.request.get("RecordingUrl"))
self.response.headers['Content-Type'] = 'text/xml'
self.response.write(str(r))
else:
self.response.headers['Content-Type'] = 'text/html'
self.response.write("Please don't try to hack me.")
class MainPage(webapp2.RequestHandler):
"""Main landing page with links to different pages"""
def get(self):
template_values = make_template(self)
pages={"Add User":"/users/manage", "List and Edit Users":"/users/list", "Make New Call":"/action/newcall"}
template_values['pages'] = pages
template = JINJA_ENVIRONMENT.get_template('home.jinja')
self.response.write(template.render(template_values))
#class Test(webapp2.RequestHandler):
# def get(self):
## if self.request.get('group'):
## group=Group(key_name=self.request.get('group'))
## group.groupname = self.request.get('group')
## group.put()
# if self.request.get('user') and self.request.get('group'):
## print self.request.get('user')
## print self.request.get('group')
# info = ndb.gql("SELECT * FROM User WHERE fullname=:1", self.request.get('user'))
# group=Group(parent=info.next().key())
# group.groupname = self.request.get('group')
# group.put()
## info = ndb.GqlQuery("SELECT * FROM Group WHERE groupname=:1", self.request.get('group'))
## print info.next().parent().fullname
## print info.next()
## info = ndb.GqlQuery("SELECT * FROM User WHERE fullname=:1", self.request.get('user'))
## key = info.next()
## infog = Group.all().next().parent()
## info = User.all().filter("fullname ==", self.request.get('user'))
## info2 = info
## print infog.fullname
## print dir(infog.ancestor(key).next())
app = webapp2.WSGIApplication([
('/twiml', StartHere),
('/handle-recording', HandleRecording),
('/make-calls', MakeCalls),
('/index', MainPage),
('/', MainPage)],
debug=True)
# Test comment please ignore
| mit | 6,147,632,002,842,502,000 | 39.129252 | 149 | 0.577556 | false |
gusnaughton/CTFd | CTFd/admin.py | 1 | 24338 | from flask import render_template, request, redirect, abort, jsonify, url_for, session
from CTFd.utils import sha512, is_safe_url, authed, admins_only, is_admin, unix_time, unix_time_millis, get_config, set_config, get_digitalocean, sendmail, rmdir
from CTFd.models import db, Teams, Solves, Challenges, WrongKeys, Keys, Tags, Files, Tracking, Pages, Config
from itsdangerous import TimedSerializer, BadTimeSignature
from werkzeug.utils import secure_filename
from socket import inet_aton, inet_ntoa
from passlib.hash import bcrypt_sha256
from flask import current_app as app
import logging
import hashlib
import time
import re
import os
import json
def init_admin(app):
@app.route('/admin', methods=['GET', 'POST'])
def admin():
if request.method == 'POST':
username = request.form.get('name')
password = request.form.get('password')
admin = Teams.query.filter_by(name=request.form['name'], admin=True).first()
if admin and bcrypt_sha256.verify(request.form['password'], admin.password):
try:
session.regenerate() # NO SESSION FIXATION FOR YOU
except:
pass # TODO: Some session objects dont implement regenerate :(
session['username'] = admin.name
session['id'] = admin.id
session['admin'] = True
session['nonce'] = sha512(os.urandom(10))
db.session.close()
return redirect('/admin/graphs')
if is_admin():
return redirect('/admin/graphs')
return render_template('admin/login.html')
@app.route('/admin/graphs')
@admins_only
def admin_graphs():
return render_template('admin/graphs.html')
@app.route('/admin/config', methods=['GET', 'POST'])
@admins_only
def admin_config():
if request.method == "POST":
try:
start = int(request.form['start'])
end = int(request.form['end'])
except (ValueError, TypeError):
start = None
end = None
try:
view_challenges_unregistered = bool(request.form.get('view_challenges_unregistered', None))
prevent_registration = bool(request.form.get('prevent_registration', None))
prevent_name_change = bool(request.form.get('prevent_name_change', None))
view_after_ctf = bool(request.form.get('view_after_ctf', None))
except (ValueError, TypeError):
view_challenges_unregistered = None
prevent_registration = None
prevent_name_change = None
view_after_ctf = None
finally:
view_challenges_unregistered = set_config('view_challenges_unregistered', view_challenges_unregistered)
prevent_registration = set_config('prevent_registration', prevent_registration)
prevent_name_change = set_config('prevent_name_change', prevent_name_change)
view_after_ctf = set_config('view_after_ctf', view_after_ctf)
ctf_name = set_config("ctf_name", request.form.get('ctf_name', None))
mg_api_key = set_config("mg_api_key", request.form.get('mg_api_key', None))
do_api_key = set_config("do_api_key", request.form.get('do_api_key', None))
max_tries = set_config("max_tries", request.form.get('max_tries', None))
db_start = Config.query.filter_by(key='start').first()
db_start.value = start
db_end = Config.query.filter_by(key='end').first()
db_end.value = end
db.session.add(db_start)
db.session.add(db_end)
db.session.commit()
return redirect('/admin/config')
ctf_name = get_config('ctf_name')
if not ctf_name:
set_config('do_api_key', None)
mg_api_key = get_config('do_api_key')
if not mg_api_key:
set_config('do_api_key', None)
do_api_key = get_config('do_api_key')
if not do_api_key:
set_config('do_api_key', None)
max_tries = get_config('max_tries')
if not max_tries:
set_config('max_tries', 0)
max_tries = 0
view_after_ctf = get_config('view_after_ctf') == '1'
if not view_after_ctf:
set_config('view_after_ctf', 0)
view_after_ctf = 0
start = get_config('start')
if not start:
set_config('start', None)
end = get_config('end')
if not end:
set_config('end', None)
view_challenges_unregistered = get_config('view_challenges_unregistered') == '1'
if not view_challenges_unregistered:
set_config('view_challenges_unregistered', None)
prevent_registration = get_config('prevent_registration') == '1'
if not prevent_registration:
set_config('prevent_registration', None)
prevent_name_change = get_config('prevent_name_change') == '1'
if not prevent_name_change:
set_config('prevent_name_change', None)
db.session.commit()
db.session.close()
return render_template('admin/config.html', ctf_name=ctf_name, start=start, end=end,
max_tries=max_tries,
view_challenges_unregistered=view_challenges_unregistered,
prevent_registration=prevent_registration, do_api_key=do_api_key, mg_api_key=mg_api_key,
prevent_name_change=prevent_name_change,
view_after_ctf=view_after_ctf)
@app.route('/admin/pages', defaults={'route': None}, methods=['GET', 'POST'])
@app.route('/admin/pages/<route>', methods=['GET', 'POST'])
@admins_only
def admin_pages(route):
if route and request.method == 'GET':
page = Pages.query.filter_by(route=route).first()
return render_template('admin/editor.html', page=page)
if route and request.method == 'POST':
page = Pages.query.filter_by(route=route).first()
errors = []
html = request.form['html']
route = request.form['route']
if not route:
errors.append('Missing URL route')
if errors:
page = Pages(html, "")
return render_template('/admin/editor.html', page=page)
if page:
page.route = route
page.html = html
db.session.commit()
return redirect('/admin/pages')
page = Pages(route, html)
db.session.add(page)
db.session.commit()
return redirect('/admin/pages')
if not route and request.method == 'POST':
return render_template('admin/editor.html')
pages = Pages.query.all()
return render_template('admin/pages.html', routes=pages)
@app.route('/admin/page/<pageroute>/delete', methods=['POST'])
@admins_only
def delete_page(pageroute):
page = Pages.query.filter_by(route=pageroute).first()
db.session.delete(page)
db.session.commit()
return '1'
@app.route('/admin/hosts', methods=['GET'])
@admins_only
def admin_hosts():
m = get_digitalocean()
errors = []
if not m:
errors.append("Your Digital Ocean API key is not set")
return render_template('admin/hosts.html', errors=errors)
hosts = m.get_all_droplets()
slugs = m.get_all_sizes()
images = m.get_all_images()
regions = m.get_all_regions()
return render_template('admin/hosts.html', hosts=hosts, slugs=slugs, images=images, regions=regions)
@app.route('/admin/chals', methods=['POST', 'GET'])
@admins_only
def admin_chals():
if request.method == 'POST':
chals = Challenges.query.add_columns('id', 'name', 'value', 'description', 'category').order_by(Challenges.value).all()
json = {'game':[]}
for x in chals:
json['game'].append({'id':x[1], 'name':x[2], 'value':x[3], 'description':x[4], 'category':x[5]})
db.session.close()
return jsonify(json)
else:
return render_template('admin/chals.html')
@app.route('/admin/keys/<chalid>', methods=['POST', 'GET'])
@admins_only
def admin_keys(chalid):
if request.method == 'GET':
keys = Keys.query.filter_by(chal=chalid).all()
json = {'keys':[]}
for x in keys:
json['keys'].append({'id':x.id, 'key':x.flag, 'type':x.key_type})
return jsonify(json)
elif request.method == 'POST':
keys = Keys.query.filter_by(chal=chalid).all()
for x in keys:
db.session.delete(x)
newkeys = request.form.getlist('keys[]')
newvals = request.form.getlist('vals[]')
for flag, val in zip(newkeys, newvals):
key = Keys(chalid, flag, val)
db.session.add(key)
db.session.commit()
db.session.close()
return '1'
@app.route('/admin/tags/<chalid>', methods=['GET', 'POST'])
@admins_only
def admin_tags(chalid):
if request.method == 'GET':
tags = Tags.query.filter_by(chal=chalid).all()
json = {'tags':[]}
for x in tags:
json['tags'].append({'id':x.id, 'chal':x.chal, 'tag':x.tag})
return jsonify(json)
elif request.method == 'POST':
newtags = request.form.getlist('tags[]')
for x in newtags:
tag = Tags(chalid, x)
db.session.add(tag)
db.session.commit()
db.session.close()
return '1'
@app.route('/admin/tags/<tagid>/delete', methods=['POST'])
@admins_only
def admin_delete_tags(tagid):
if request.method == 'POST':
tag = Tags.query.filter_by(id=tagid).first_or_404()
db.session.delete(tag)
db.session.commit()
db.session.close()
return "1"
@app.route('/admin/files/<chalid>', methods=['GET', 'POST'])
@admins_only
def admin_files(chalid):
if request.method == 'GET':
files = Files.query.filter_by(chal=chalid).all()
json = {'files':[]}
for x in files:
json['files'].append({'id':x.id, 'file':x.location})
return jsonify(json)
if request.method == 'POST':
if request.form['method'] == "delete":
f = Files.query.filter_by(id=request.form['file']).first_or_404()
if os.path.isfile(f.location):
os.unlink(f.location)
db.session.delete(f)
db.session.commit()
db.session.close()
return "1"
elif request.form['method'] == "upload":
files = request.files.getlist('files[]')
for f in files:
filename = secure_filename(f.filename)
if len(filename) <= 0:
continue
md5hash = hashlib.md5(os.urandom(64)).hexdigest()
# BUG NEEDS TO GO TO S3
base = os.path.dirname(os.path.dirname(__file__))
## mod_wsgi does some sad things with cwd so the upload directory needs to be shifted a bit
if not os.path.exists(os.path.join(base, app.config['UPLOAD_FOLDER'], md5hash)):
os.makedirs(os.path.join(base, app.config['UPLOAD_FOLDER'], md5hash))
f.save(os.path.join(base, app.config['UPLOAD_FOLDER'], md5hash, filename))
## This needs to be relative to CTFd so doesn't nee base.
db_f = Files(chalid, os.path.join(app.config['UPLOAD_FOLDER'], md5hash, filename))
db.session.add(db_f)
db.session.commit()
db.session.close()
return redirect('/admin/chals')
@app.route('/admin/teams')
@admins_only
def admin_teams():
teams = Teams.query.all()
return render_template('admin/teams.html', teams=teams)
@app.route('/admin/team/<teamid>', methods=['GET', 'POST'])
@admins_only
def admin_team(teamid):
user = Teams.query.filter_by(id=teamid).first()
solves = Solves.query.filter_by(teamid=teamid).all()
addrs = Tracking.query.filter_by(team=teamid).group_by(Tracking.ip).all()
score = user.score()
place = user.place()
if request.method == 'GET':
return render_template('admin/team.html', solves=solves, team=user, addrs=addrs, score=score, place=place)
elif request.method == 'POST':
admin = request.form.get('admin', "false")
admin = 1 if admin == "true" else 0
if admin:
user.admin = 1
db.session.commit()
return jsonify({'data': ['success']})
name = request.form.get('name', None)
password = request.form.get('password', None)
email = request.form.get('email', None)
website = request.form.get('website', None)
affiliation = request.form.get('affiliation', None)
country = request.form.get('country', None)
errors = []
name_used = Teams.query.filter(Teams.name == name).first()
if name_used and int(name_used.id) != int(teamid):
errors.append('That name is taken')
email_used = Teams.query.filter(Teams.email == email).first()
if email_used and int(email_used.id) != int(teamid):
errors.append('That email is taken')
if errors:
db.session.close()
return jsonify({'data':errors})
else:
user.name = name
user.email = email
if password:
user.password = bcrypt_sha256.encrypt(password)
user.website = website
user.affiliation = affiliation
user.country = country
db.session.commit()
db.session.close()
return jsonify({'data':['success']})
@app.route('/admin/team/<teamid>/mail', methods=['POST'])
@admins_only
def email_user(teamid):
message = request.form.get('msg', None)
team = Teams.query.filter(Teams.id == teamid).first()
if message and team:
if sendmail(team.email, message):
return "1"
return "0"
@app.route('/admin/team/<teamid>/ban', methods=['POST'])
@admins_only
def ban(teamid):
user = Teams.query.filter_by(id=teamid).first()
user.banned = 1
db.session.commit()
return redirect('/admin/scoreboard')
@app.route('/admin/team/<teamid>/unban', methods=['POST'])
@admins_only
def unban(teamid):
user = Teams.query.filter_by(id=teamid).first()
user.banned = None
db.session.commit()
return redirect('/admin/scoreboard')
@app.route('/admin/team/<teamid>/delete', methods=['POST'])
@admins_only
def delete_team(teamid):
user = Teams.query.filter_by(id=teamid).first()
db.session.delete(user)
db.session.commit()
return '1'
@app.route('/admin/graphs/<graph_type>')
@admins_only
def admin_graph(graph_type):
if graph_type == 'categories':
categories = db.session.query(Challenges.category, db.func.count(Challenges.category)).group_by(Challenges.category).all()
json = {'categories':[]}
for category, count in categories:
json['categories'].append({'category':category, 'count':count})
return jsonify(json)
elif graph_type == "solves":
solves = Solves.query.add_columns(db.func.count(Solves.chalid)).group_by(Solves.chalid).all()
json = {}
for chal, count in solves:
json[chal.chal.name] = count
return jsonify(json)
@app.route('/admin/scoreboard')
@admins_only
def admin_scoreboard():
score = db.func.sum(Challenges.value).label('score')
quickest = db.func.max(Solves.date).label('quickest')
teams = db.session.query(Solves.teamid, Teams.name, Teams.banned, score).join(Teams).join(Challenges).group_by(Solves.teamid).order_by(score.desc(), quickest)
db.session.close()
return render_template('admin/scoreboard.html', teams=teams)
@app.route('/admin/scores')
@admins_only
def admin_scores():
score = db.func.sum(Challenges.value).label('score')
quickest = db.func.max(Solves.date).label('quickest')
teams = db.session.query(Solves.teamid, Teams.name, score).join(Teams).join(Challenges).filter(Teams.banned == None).group_by(Solves.teamid).order_by(score.desc(), quickest)
db.session.close()
json = {'teams':[]}
for i, x in enumerate(teams):
json['teams'].append({'place':i+1, 'id':x.teamid, 'name':x.name,'score':int(x.score)})
return jsonify(json)
@app.route('/admin/solves/<teamid>', methods=['GET'])
@admins_only
def admin_solves(teamid="all"):
if teamid == "all":
solves = Solves.query.all()
else:
solves = Solves.query.filter_by(teamid=teamid).all()
db.session.close()
json = {'solves':[]}
for x in solves:
json['solves'].append({'id':x.id, 'chal':x.chal.name, 'chalid':x.chalid,'team':x.teamid, 'value': x.chal.value, 'category':x.chal.category, 'time':unix_time(x.date)})
return jsonify(json)
@app.route('/admin/solves/<teamid>/<chalid>/delete', methods=['POST'])
@admins_only
def delete_solve(teamid, chalid):
solve = Solves.query.filter_by(teamid=teamid, chalid=chalid).first()
db.session.delete(solve)
db.session.commit()
return '1'
@app.route('/admin/statistics', methods=['GET'])
@admins_only
def admin_stats():
db.session.commit()
teams_registered = db.session.query(db.func.count(Teams.id)).first()[0]
wrong_count = db.session.query(db.func.count(WrongKeys.id)).first()[0]
solve_count = db.session.query(db.func.count(Solves.id)).first()[0]
challenge_count = db.session.query(db.func.count(Challenges.id)).first()[0]
most_solved_chal = Solves.query.add_columns(db.func.count(Solves.chalid).label('solves')).group_by(Solves.chalid).order_by('solves DESC').first()
least_solved_chal = Challenges.query.add_columns(db.func.count(Solves.chalid).label('solves')).outerjoin(Solves).group_by(Challenges.id).order_by('solves ASC').first()
db.session.close()
return render_template('admin/statistics.html', team_count=teams_registered,
wrong_count=wrong_count,
solve_count=solve_count,
challenge_count=challenge_count,
most_solved=most_solved_chal,
least_solved=least_solved_chal
)
@app.route('/admin/wrong_keys/<page>', methods=['GET'])
@admins_only
def admin_wrong_key(page='1'):
page = abs(int(page))
results_per_page = 50
page_start = results_per_page * ( page - 1 )
page_end = results_per_page * ( page - 1 ) + results_per_page
wrong_keys = WrongKeys.query.add_columns(WrongKeys.flag, WrongKeys.team, WrongKeys.date,\
Challenges.name.label('chal_name'), Teams.name.label('team_name')).\
join(Challenges).join(Teams).order_by('team_name ASC').slice(page_start, page_end).all()
wrong_count = db.session.query(db.func.count(WrongKeys.id)).first()[0]
pages = int(wrong_count / results_per_page) + (wrong_count % results_per_page > 0)
return render_template('admin/wrong_keys.html', wrong_keys=wrong_keys, pages=pages)
@app.route('/admin/correct_keys/<page>', methods=['GET'])
@admins_only
def admin_correct_key(page='1'):
page = abs(int(page))
results_per_page = 50
page_start = results_per_page * (page - 1)
page_end = results_per_page * (page - 1) + results_per_page
solves = Solves.query.add_columns(Solves.chalid, Solves.teamid, Solves.date, Solves.flag, \
Challenges.name.label('chal_name'), Teams.name.label('team_name')).\
join(Challenges).join(Teams).order_by('team_name ASC').slice(page_start, page_end).all()
solve_count = db.session.query(db.func.count(Solves.id)).first()[0]
pages = int(solve_count / results_per_page) + (solve_count % results_per_page > 0)
return render_template('admin/correct_keys.html', solves=solves, pages=pages)
@app.route('/admin/fails/<teamid>', methods=['GET'])
@admins_only
def admin_fails(teamid='all'):
if teamid == "all":
fails = WrongKeys.query.count()
solves = Solves.query.count()
db.session.close()
json = {'fails':str(fails), 'solves': str(solves)}
return jsonify(json)
else:
fails = WrongKeys.query.filter_by(team=teamid).count()
solves = Solves.query.filter_by(teamid=teamid).count()
db.session.close()
json = {'fails':str(fails), 'solves': str(solves)}
return jsonify(json)
@app.route('/admin/chal/new', methods=['POST'])
def admin_create_chal():
files = request.files.getlist('files[]')
# Create challenge
chal = Challenges(request.form['name'], request.form['desc'], request.form['value'], request.form['category'])
db.session.add(chal)
db.session.commit()
# Add keys
key = Keys(chal.id, request.form['key'], request.form['key_type[0]'])
db.session.add(key)
db.session.commit()
for f in files:
filename = secure_filename(f.filename)
if len(filename) <= 0:
continue
md5hash = hashlib.md5(filename).hexdigest()
if not os.path.exists(os.path.join(os.path.normpath(app.config['UPLOAD_FOLDER']), md5hash)):
os.makedirs(os.path.join(os.path.normpath(app.config['UPLOAD_FOLDER']), md5hash))
f.save(os.path.join(os.path.normpath(app.config['UPLOAD_FOLDER']), md5hash, filename))
db_f = Files(chal.id, os.path.join(os.path.normpath(app.config['UPLOAD_FOLDER']), md5hash, filename))
db.session.add(db_f)
db.session.commit()
db.session.close()
return redirect('/admin/chals')
@app.route('/admin/chal/delete', methods=['POST'])
def admin_delete_chal():
challenge = Challenges.query.filter_by(id=request.form['id']).first()
if challenge:
WrongKeys.query.filter_by(chal=challenge.id).delete()
Solves.query.filter_by(chalid=challenge.id).delete()
Keys.query.filter_by(chal=challenge.id).delete()
files = Files.query.filter_by(chal=challenge.id).all()
Files.query.filter_by(chal=challenge.id).delete()
for file in files:
folder = os.path.dirname(file.location)
rmdir(folder)
Tags.query.filter_by(chal=challenge.id).delete()
Challenges.query.filter_by(id=challenge.id).delete()
db.session.commit()
db.session.close()
return '1'
@app.route('/admin/chal/update', methods=['POST'])
def admin_update_chal():
challenge = Challenges.query.filter_by(id=request.form['id']).first()
challenge.name = request.form['name']
challenge.description = request.form['desc']
challenge.value = request.form['value']
challenge.category = request.form['category']
db.session.add(challenge)
db.session.commit()
db.session.close()
return redirect('/admin/chals')
| apache-2.0 | 8,646,510,302,047,544,000 | 39.631052 | 181 | 0.567138 | false |
BeyondTheClouds/rome | test/nova/tests/compute_nodes_tests.py | 1 | 10676 | __author__ = 'jonathan'
from misc import ModelsObjectComparatorMixin
from nova import test
from oslo.serialization import jsonutils
from test.nova import _fixtures as models
from lib.rome.core.orm.query import Query as RomeQuery
from lib.rome.core.session.session import Session as RomeSession
from sqlalchemy.sql import false
from sqlalchemy.sql import func
from nova import exception
from nova import db
import copy
import unittest
from nova import context
from oslo.config import cfg
CONF = cfg.CONF
from lib.rome.core.orm.query import Query
import test.nova._fixtures as models
class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
map(lambda x: x.delete(), Query(models.Service).all())
map(lambda x: x.delete(), Query(models.ComputeNode).all())
super(ComputeNodeTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.service_dict = dict(host='host1', binary='nova-compute',
topic=CONF.compute_topic, report_count=1,
disabled=False)
self.service = db.service_create(self.ctxt, self.service_dict)
self.compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
vcpus_used=0, memory_mb_used=0,
local_gb_used=0, free_ram_mb=1024,
free_disk_gb=2048, hypervisor_type="xen",
hypervisor_version=1, cpu_info="",
running_vms=0, current_workload=0,
service_id=self.service['id'],
disk_available_least=100,
hypervisor_hostname='abracadabra104',
host_ip='127.0.0.1',
supported_instances='',
pci_stats='',
metrics='',
extra_resources='',
stats='', numa_topology='')
# add some random stats
self.stats = dict(num_instances=3, num_proj_12345=2,
num_proj_23456=2, num_vm_building=3)
self.compute_node_dict['stats'] = jsonutils.dumps(self.stats)
# self.flags(reserved_host_memory_mb=0)
# self.flags(reserved_host_disk_mb=0)
self.item = db.compute_node_create(self.ctxt, self.compute_node_dict)
# def test_compute_node_create(self):
# self._assertEqualObjects(self.compute_node_dict, self.item,
# ignored_keys=self._ignored_keys + ['stats'])
# new_stats = jsonutils.loads(self.item['stats'])
# self.assertEqual(self.stats, new_stats)
def test_compute_node_get_all(self):
date_fields = set(['created_at', 'updated_at',
'deleted_at', 'deleted'])
for no_date_fields in [False, True]:
nodes = db.compute_node_get_all(self.ctxt, no_date_fields)
self.assertEqual(1, len(nodes))
node = nodes[0]
self._assertEqualObjects(self.compute_node_dict, node,
ignored_keys=self._ignored_keys +
['stats', 'service'])
node_fields = set(node.keys())
if no_date_fields:
self.assertFalse(date_fields & node_fields)
else:
self.assertTrue(date_fields <= node_fields)
new_stats = jsonutils.loads(node['stats'])
self.assertEqual(self.stats, new_stats)
# def test_compute_node_get_all_deleted_compute_node(self):
# # Create a service and compute node and ensure we can find its stats;
# # delete the service and compute node when done and loop again
# for x in range(2, 5):
# # Create a service
# service_data = self.service_dict.copy()
# service_data['host'] = 'host-%s' % x
# service = db.service_create(self.ctxt, service_data)
#
# # Create a compute node
# compute_node_data = self.compute_node_dict.copy()
# compute_node_data['service_id'] = service['id']
# compute_node_data['stats'] = jsonutils.dumps(self.stats.copy())
# compute_node_data['hypervisor_hostname'] = 'hypervisor-%s' % x
# node = db.compute_node_create(self.ctxt, compute_node_data)
#
# # Ensure the "new" compute node is found
# nodes = db.compute_node_get_all(self.ctxt, False)
# self.assertEqual(2, len(nodes))
# found = None
# for n in nodes:
# if n['id'] == node['id']:
# found = n
# break
# self.assertIsNotNone(found)
# # Now ensure the match has stats!
# self.assertNotEqual(jsonutils.loads(found['stats']), {})
#
# # Now delete the newly-created compute node to ensure the related
# # compute node stats are wiped in a cascaded fashion
# db.compute_node_delete(self.ctxt, node['id'])
#
# # Clean up the service
# db.service_destroy(self.ctxt, service['id'])
#
# def test_compute_node_get_all_mult_compute_nodes_one_service_entry(self):
# service_data = self.service_dict.copy()
# service_data['host'] = 'host2'
# service = db.service_create(self.ctxt, service_data)
#
# existing_node = dict(self.item.iteritems())
# existing_node['service'] = dict(self.service.iteritems())
# expected = [existing_node]
#
# for name in ['bm_node1', 'bm_node2']:
# compute_node_data = self.compute_node_dict.copy()
# compute_node_data['service_id'] = service['id']
# compute_node_data['stats'] = jsonutils.dumps(self.stats)
# compute_node_data['hypervisor_hostname'] = 'bm_node_1'
# node = db.compute_node_create(self.ctxt, compute_node_data)
#
# node = dict(node.iteritems())
# node['service'] = dict(service.iteritems())
#
# expected.append(node)
#
# result = sorted(db.compute_node_get_all(self.ctxt, False),
# key=lambda n: n['hypervisor_hostname'])
#
# self._assertEqualListsOfObjects(expected, result,
# ignored_keys=['stats'])
#
# def test_compute_node_get(self):
# compute_node_id = self.item['id']
# node = db.compute_node_get(self.ctxt, compute_node_id)
# self._assertEqualObjects(self.compute_node_dict, node,
# ignored_keys=self._ignored_keys + ['stats', 'service'])
# new_stats = jsonutils.loads(node['stats'])
# self.assertEqual(self.stats, new_stats)
#
# def test_compute_node_update(self):
# compute_node_id = self.item['id']
# stats = jsonutils.loads(self.item['stats'])
# # change some values:
# stats['num_instances'] = 8
# stats['num_tribbles'] = 1
# values = {
# 'vcpus': 4,
# 'stats': jsonutils.dumps(stats),
# }
# item_updated = db.compute_node_update(self.ctxt, compute_node_id,
# values)
# self.assertEqual(4, item_updated['vcpus'])
# new_stats = jsonutils.loads(item_updated['stats'])
# self.assertEqual(stats, new_stats)
#
# def test_compute_node_delete(self):
# compute_node_id = self.item['id']
# db.compute_node_delete(self.ctxt, compute_node_id)
# nodes = db.compute_node_get_all(self.ctxt)
# self.assertEqual(len(nodes), 0)
#
# def test_compute_node_search_by_hypervisor(self):
# nodes_created = []
# new_service = copy.copy(self.service_dict)
# for i in xrange(3):
# new_service['binary'] += str(i)
# new_service['topic'] += str(i)
# service = db.service_create(self.ctxt, new_service)
# self.compute_node_dict['service_id'] = service['id']
# self.compute_node_dict['hypervisor_hostname'] = 'testhost' + str(i)
# self.compute_node_dict['stats'] = jsonutils.dumps(self.stats)
# node = db.compute_node_create(self.ctxt, self.compute_node_dict)
# nodes_created.append(node)
# nodes = db.compute_node_search_by_hypervisor(self.ctxt, 'host')
# self.assertEqual(3, len(nodes))
# self._assertEqualListsOfObjects(nodes_created, nodes,
# ignored_keys=self._ignored_keys + ['stats', 'service'])
#
# def test_compute_node_statistics(self):
# stats = db.compute_node_statistics(self.ctxt)
# self.assertEqual(stats.pop('count'), 1)
# for k, v in stats.iteritems():
# self.assertEqual(v, self.item[k])
#
# def test_compute_node_statistics_disabled_service(self):
# serv = db.service_get_by_host_and_topic(
# self.ctxt, 'host1', CONF.compute_topic)
# db.service_update(self.ctxt, serv['id'], {'disabled': True})
# stats = db.compute_node_statistics(self.ctxt)
# self.assertEqual(stats.pop('count'), 0)
#
# def test_compute_node_not_found(self):
# self.assertRaises(exception.ComputeHostNotFound, db.compute_node_get,
# self.ctxt, 100500)
#
# def test_compute_node_update_always_updates_updated_at(self):
# item_updated = db.compute_node_update(self.ctxt,
# self.item['id'], {})
# self.assertNotEqual(self.item['updated_at'],
# item_updated['updated_at'])
#
# def test_compute_node_update_override_updated_at(self):
# # Update the record once so updated_at is set.
# first = db.compute_node_update(self.ctxt, self.item['id'],
# {'free_ram_mb': '12'})
# self.assertIsNotNone(first['updated_at'])
#
# # Update a second time. Make sure that the updated_at value we send
# # is overridden.
# second = db.compute_node_update(self.ctxt, self.item['id'],
# {'updated_at': first.updated_at,
# 'free_ram_mb': '13'})
# self.assertNotEqual(first['updated_at'], second['updated_at'])
if __name__ == "__main__":
unittest.main() | mit | 8,735,583,337,094,786,000 | 45.220779 | 81 | 0.550019 | false |
kalmanolah/nite | nite/logging.py | 1 | 1361 | """Logging module."""
import logging
import logging.config
default_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'extended': {
'format': '%(asctime)s %(name)s.%(levelname)s[%(process)s]: %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'simple_colored': {
'()': 'colorlog.ColoredFormatter',
'format': '%(asctime)s %(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s',
'datefmt': '%H:%M:%S'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
'formatter': 'simple_colored'
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'filename': '/var/log/nite.log',
'maxBytes': 10485760,
'backupCount': 5,
'formatter': 'extended',
'encoding': 'utf8'
}
},
'root': {
'level': 'INFO',
'handlers': ['console', 'file']
}
}
def configure_logging(config=None, debug=False):
"""Configure logging with a provided configuration."""
cfg = default_config.copy()
if config:
cfg.update(config)
logging.config.dictConfig(cfg)
if debug:
logging.root.setLevel(logging.DEBUG)
| mit | 6,925,608,377,581,118,000 | 25.173077 | 94 | 0.50698 | false |
google/nerfactor | third_party/xiuminglib/xiuminglib/io/objmtl.py | 1 | 21250 | # pylint: disable=len-as-condition
from os.path import basename, dirname, join
from shutil import copy
import numpy as np
from .. import os as xm_os
from ..imprt import preset_import
from ..log import get_logger
logger = get_logger()
class Obj:
"""Wavefront .obj Object.
Face, vertex, or other indices here all start from 1.
Attributes:
o (str)
v (numpy.ndarray)
f (list)
vn (numpy.ndarray)
fn (list)
vt (numpy.ndarray)
ft (list)
s (bool)
mtllib (str)
usemtl (str)
diffuse_map_path (str)
diffuse_map_scale (float)
"""
def __init__(
self, o=None, v=None, f=None, vn=None, fn=None, vt=None, ft=None,
s=False, mtllib=None, usemtl=None, diffuse_map_path=None,
diffuse_map_scale=1):
"""
Args:
o (str, optional): Object name.
v (numpy.ndarray, optional): Vertex coordinates.
f (list, optional): Faces' vertex indices (1-indexed), e.g.,
``[[1, 2, 3], [4, 5, 6], [7, 8, 9, 10], ...]``.
vn (numpy.ndarray, optional): Vertex normals of shape N-by-3,
normalized or not.
fn (list, optional): Faces' vertex normal indices, e.g.,
``[[1, 1, 1], [], [2, 2, 2, 2], ...]``. Must be of the same
length as ``f``.
vt (numpy.ndarray, optional): Vertex texture coordinates of shape
N-by-2. Coordinates must be normalized to :math:`[0, 1]`.
ft (list, optional): Faces' texture vertex indices, e.g.,
``[[1, 2, 3], [4, 5, 6], [], ...]``. Must be of the same length
as ``f``.
s (bool, optional): Group smoothing.
mtllib (str, optional): Material file name, e.g., ``'cube.mtl'``.
usemtl (str, optional): Material name (defined in .mtl file).
diffuse_map_path (str, optional): Path to diffuse texture map.
diffuse_map_scale (float, optional): Scale of diffuse texture map.
"""
self.mtllib = mtllib
self.o = o
# Vertices
if v is not None:
assert (len(v.shape) == 2 and v.shape[1] == 3), "'v' must be *-by-3"
if vt is not None:
assert (len(vt.shape) == 2 and vt.shape[1] == 2), \
"'vt' must be *-by-2"
if vn is not None:
assert (len(vn.shape) == 2 and vn.shape[1] == 3), \
"'vn' must be *-by-3"
self.v = v
self.vt = vt
self.vn = vn
# Faces
if f is not None:
if ft is not None:
assert (len(ft) == len(f)), \
"'ft' must be of the same length as 'f' (use '[]' to fill)"
if fn is not None:
assert (len(fn) == len(f)), \
"'fn' must be of the same length as 'f' (use '[]' to fill)"
self.f = f
self.ft = ft
self.fn = fn
self.usemtl = usemtl
self.s = s
self.diffuse_map_path = diffuse_map_path
self.diffuse_map_scale = diffuse_map_scale
def load_file(self, obj_file):
"""Loads a (basic) .obj file as an object.
Populates attributes with contents read from file.
Args:
obj_file (str): Path to .obj file.
"""
fid = open(obj_file, 'r')
lines = [l.strip('\n') for l in fid.readlines()]
lines = [l for l in lines if len(l) > 0] # remove empty lines
# Check if there's only one object
n_o = len([l for l in lines if l[0] == 'o'])
if n_o > 1:
raise ValueError((
".obj file containing multiple objects is not supported "
"-- consider using ``assimp`` instead"))
# Count for array initializations
n_v = len([l for l in lines if l[:2] == 'v '])
n_vt = len([l for l in lines if l[:3] == 'vt '])
n_vn = len([l for l in lines if l[:3] == 'vn '])
lines_f = [l for l in lines if l[:2] == 'f ']
n_f = len(lines_f)
# Initialize arrays
mtllib = None
o = None
v = np.zeros((n_v, 3))
vt = np.zeros((n_vt, 2))
vn = np.zeros((n_vn, 3))
usemtl = None
s = False
f = [None] * n_f
# If there's no 'ft' or 'fn' for a 'f', a '[]' is inserted as a
# placeholder. This guarantees 'f[i]' always corresponds to 'ft[i]'
# and 'fn[i]'
ft = [None] * n_f
fn = [None] * n_f
# Load data line by line
n_ft, n_fn = 0, 0
i_v, i_vt, i_vn, i_f = 0, 0, 0, 0
for l in lines:
if l[0] == '#': # comment
pass
elif l[:7] == 'mtllib ': # mtl file
mtllib = l[7:]
elif l[:2] == 'o ': # object name
o = l[2:]
elif l[:2] == 'v ': # geometric vertex
v[i_v, :] = [float(x) for x in l[2:].split(' ')]
i_v += 1
elif l[:3] == 'vt ': # texture vertex
vt[i_vt, :] = [float(x) for x in l[3:].split(' ')]
i_vt += 1
elif l[:3] == 'vn ': # normal vector
vn[i_vn, :] = [float(x) for x in l[3:].split(' ')]
i_vn += 1
elif l[:7] == 'usemtl ': # material name
usemtl = l[7:]
elif l[:2] == 's ': # group smoothing
if l[2:] == 'on':
s = True
elif l[:2] == 'f ': # face
n_slashes = l[2:].split(' ')[0].count('/')
if n_slashes == 0: # just f (1 2 3)
f[i_f] = [int(x) for x in l[2:].split(' ')]
ft[i_f] = []
fn[i_f] = []
elif n_slashes == 1: # f and ft (1/1 2/2 3/3)
f[i_f] = [int(x.split('/')[0]) for x in l[2:].split(' ')]
ft[i_f] = [int(x.split('/')[1]) for x in l[2:].split(' ')]
fn[i_f] = []
n_ft += 1
elif n_slashes == 2:
if l[2:].split(' ')[0].count('//') == 1:
# f and fn (1//1 2//1 3//1)
f[i_f] = [
int(x.split('//')[0]) for x in l[2:].split(' ')]
ft[i_f] = []
fn[i_f] = [
int(x.split('//')[1]) for x in l[2:].split(' ')]
n_fn += 1
else:
# f, ft and fn (1/1/1 2/2/1 3/3/1)
f[i_f] = [
int(x.split('/')[0]) for x in l[2:].split(' ')]
ft[i_f] = [
int(x.split('/')[1]) for x in l[2:].split(' ')]
fn[i_f] = [
int(x.split('/')[2]) for x in l[2:].split(' ')]
n_ft += 1
n_fn += 1
i_f += 1
else:
raise ValueError("Unidentified line type: %s" % l)
# Update self
self.mtllib = mtllib
self.o = o
self.v = v
self.vt = vt if vt.shape[0] > 0 else None
self.vn = vn if vn.shape[0] > 0 else None
self.f = f
self.ft = ft if any(ft) else None # any member list not empty
self.fn = fn if any(fn) else None
self.usemtl = usemtl
self.s = s
# Print model info
def print_info(self):
# Basic stats
mtllib = self.mtllib
o = self.o
n_v = self.v.shape[0] if self.v is not None else 0
n_vt = self.vt.shape[0] if self.vt is not None else 0
n_vn = self.vn.shape[0] if self.vn is not None else 0
usemtl = self.usemtl
s = self.s
diffuse_map_path = self.diffuse_map_path
diffuse_map_scale = self.diffuse_map_scale
n_f = len(self.f) if self.f is not None else 0
if self.ft is not None:
n_ft = sum(len(x) > 0 for x in self.ft)
else:
n_ft = 0
if self.fn is not None:
n_fn = sum(len(x) > 0 for x in self.fn)
else:
n_fn = 0
logger.info("-------------------------------------------------------")
logger.info("Object name 'o' %s", o)
logger.info("Material file 'mtllib' %s", mtllib)
logger.info("Material 'usemtl' %s", usemtl)
logger.info("Diffuse texture map 'map_Kd' %s", diffuse_map_path)
logger.info("Diffuse map scale %f", diffuse_map_scale)
logger.info("Group smoothing 's' %r", s)
logger.info("# geometric vertices 'v' %d", n_v)
logger.info("# texture vertices 'vt' %d", n_vt)
logger.info("# normal vectors 'vn' %d", n_vn)
logger.info("# geometric faces 'f x/o/o' %d", n_f)
logger.info("# texture faces 'f o/x/o' %d", n_ft)
logger.info("# normal faces 'f o/o/x' %d", n_fn)
# How many triangles, quads, etc.
if n_f > 0:
logger.info("")
logger.info("Among %d faces:", n_f)
vert_counts = [len(x) for x in self.f]
for c in np.unique(vert_counts):
howmany = vert_counts.count(c)
logger.info(" - %d are formed by %d vertices", howmany, c)
logger.info("-------------------------------------------------------")
# Set vn and fn according to v and f
def set_face_normals(self):
"""Sets face normals according to geometric vertices and their orders
in forming faces.
Returns:
tuple:
- **vn** (*numpy.ndarray*) -- Normal vectors.
- **fn** (*list*) -- Normal faces. Each member list consists of
the same integer, e.g., ``[[1, 1, 1], [2, 2, 2, 2], ...]``.
"""
n_f = len(self.f)
vn = np.zeros((n_f, 3))
fn = [None] * n_f
# For each face
for i, verts_id in enumerate(self.f):
# Vertices must be coplanar to be valid, so we can just pick the
# first three
ind = [x - 1 for x in verts_id[:3]] # in .obj, index starts from 1,
# not 0
verts = self.v[ind, :]
p1p2 = verts[1, :] - verts[0, :]
p1p3 = verts[2, :] - verts[0, :]
normal = np.cross(p1p2, p1p3)
if np.linalg.norm(normal) == 0:
raise ValueError((
"Normal vector of zero length probably due to numerical "
"issues?"))
vn[i, :] = normal / np.linalg.norm(normal) # normalize
fn[i] = [i + 1] * len(verts_id)
# Set normals and return
self.vn = vn
self.fn = fn
logger.info((
"Face normals recalculated with 'v' and 'f' -- 'vn' and 'fn' "
"updated"))
return vn, fn
# Output object to file
def write_file(self, objpath):
"""Writes the current model to a .obj file.
Args:
objpath (str): Path to the output .obj.
Writes
- Output .obj file.
"""
mtllib = self.mtllib
o = self.o
v, vt, vn = self.v, self.vt, self.vn
usemtl = self.usemtl
s = self.s
f, ft, fn = self.f, self.ft, self.fn
# mkdir if necessary
outdir = dirname(objpath)
xm_os.makedirs(outdir)
# Write .obj
with open(objpath, 'w') as fid:
# Material file
if mtllib is not None:
fid.write('mtllib %s\n' % mtllib)
# Object name
fid.write('o %s\n' % o)
# Vertices
for i in range(v.shape[0]):
fid.write('v %f %f %f\n' % tuple(v[i, :]))
if vt is not None:
for i in range(vt.shape[0]):
fid.write('vt %f %f\n' % tuple(vt[i, :]))
if vn is not None:
for i in range(vn.shape[0]):
fid.write('vn %f %f %f\n' % tuple(vn[i, :]))
# Material name
if usemtl is not None:
fid.write('usemtl %s\n' % usemtl)
# Group smoothing
if s:
fid.write('s on\n')
else:
fid.write('s off\n')
# Faces
if ft is None and fn is None: # just f (1 2 3)
for v_id in f:
fid.write(('f' + ' %d' * len(v_id) + '\n') % tuple(v_id))
elif ft is not None and fn is None:
# f and ft (1/1 2/2 3/3 or 1 2 3)
for i, v_id in enumerate(f):
vt_id = ft[i]
if len(vt_id) == len(v_id):
fid.write((
'f' + ' %d/%d' * len(v_id) + '\n') % tuple(
[x for pair in zip(v_id, vt_id) for x in pair]))
elif not vt_id:
fid.write(
('f' + ' %d' * len(v_id) + '\n') % tuple(v_id))
else:
raise ValueError((
"'ft[%d]', not empty, doesn't match length of "
"'f[%d]'") % (i, i))
elif ft is None and fn is not None:
# f and fn (1//1 2//1 3//1 or 1 2 3)
for i, v_id in enumerate(f):
vn_id = fn[i]
if len(vn_id) == len(v_id):
fid.write((
'f' + ' %d//%d' * len(v_id) + '\n') % tuple(
[x for pair in zip(v_id, vn_id) for x in pair]))
elif not vn_id:
fid.write(
('f' + ' %d' * len(v_id) + '\n') % tuple(v_id))
else:
raise ValueError((
"'fn[%d]', not empty, doesn't match length of "
"'f[%d]'") % (i, i))
elif ft is not None and fn is not None:
# f, ft and fn (1/1/1 2/2/1 3/3/1 or 1/1 2/2 3/3 or
# 1//1 2//1 3//1 or 1 2 3)
for i, v_id in enumerate(f):
vt_id = ft[i]
vn_id = fn[i]
if len(vt_id) == len(v_id) and len(vn_id) == len(v_id):
fid.write((
'f' + ' %d/%d/%d' * len(v_id) + '\n') % tuple(
[x for triple in zip(v_id, vt_id, vn_id)
for x in triple]))
elif len(vt_id) == len(v_id) and not vn_id:
fid.write((
'f' + ' %d/%d' * len(v_id) + '\n') % tuple(
[x for pair in zip(v_id, vt_id) for x in pair]))
elif not vt_id and len(vn_id) == len(v_id):
fid.write((
'f' + ' %d//%d' * len(v_id) + '\n') % tuple(
[x for pair in zip(v_id, vn_id) for x in pair]))
elif not vt_id and not vn_id:
fid.write(
('f' + ' %d' * len(v_id) + '\n') % tuple(v_id))
else:
raise ValueError((
"If not empty, 'ft[%d]' or 'fn[%d]' doesn't match "
"length of 'f[%d]'") % (i, i, i))
logger.info("Done writing to %s", objpath)
class Mtl:
r"""Wavefront .mtl object.
Attributes:
mtlfile (str): Material file name, set to ``obj.mtllib``.
newmtl (str): Material name, set to ``obj.usemtl``.
map_Kd_path (str): Path to the diffuse map, set to
``obj.diffuse_map_path``.
map_Kd_scale (float): Scale of the diffuse map, set to
``obj.diffuse_map_scale``.
Ns (float)
Ka (tuple)
Kd (tuple)
Ks (tuple)
Ni (float)
d (float)
illum (int)
"""
def __init__(
self, obj, Ns=96.078431, Ka=(1, 1, 1), Kd=(0.64, 0.64, 0.64),
Ks=(0.5, 0.5, 0.5), Ni=1, d=1, illum=2):
r"""
Args:
obj (Obj): ``Obj`` object for which this ``Mtl`` object is created.
Ns (float, optional): Specular exponent, normally
:math:`\in[0, 1000]`.
Ka (tuple, optional): Ambient reflectivity, each float normally
:math:`\in[0, 1]`. Values outside increase or decrease
relectivity accordingly.
Kd (tuple, optional): Diffuse reflectivity. Same range as ``Ka``.
Ks (tuple, optional): Specular reflectivity. Same range as ``Ka``.
Ni (float, optional): Optical density, a.k.a. index of refraction
:math:`\in[0.001, 10]`. 1 means light doesn't bend as it passes
through. Increasing it increases the amount of bending. Glass
has an index of refraction of about 1.5. Values of less than 1.0
produce bizarre results and are not recommended.
d (float, optional): Amount this material dissolves into the
background :math:`\in[0, 1]`. 1.0 is fully opaque (default),
and 0 is fully dissolved (completely transparent). Unlike a real
transparent material, the dissolve does not depend upon material
thickness, nor does it have any spectral character. Dissolve
works on all illumination models.
illum (int, optional): Illumination model
:math:`\in[0, 1, ..., 10]`.
"""
self.mtlfile = obj.mtllib
self.newmtl = obj.usemtl
self.map_Kd_path = obj.diffuse_map_path
self.map_Kd_scale = obj.diffuse_map_scale
self.Ns = Ns
self.Ka = Ka
self.Kd = Kd
self.Ks = Ks
self.Ni = Ni
self.d = d
self.illum = illum
def print_info(self):
logger.info("---------------------------------------------------------")
logger.info("Material file %s", self.mtlfile)
logger.info("Material name 'newmtl' %s", self.newmtl)
logger.info("Diffuse texture map 'map_Kd' %s", self.map_Kd_path)
logger.info("Diffuse map scale %f", self.map_Kd_scale)
logger.info("Specular exponent 'Ns' %f", self.Ns)
logger.info("Ambient reflectivity 'Ka' %s", self.Ka)
logger.info("Diffuse reflectivity 'Kd' %s", self.Kd)
logger.info("Specular reflectivity 'Ks' %s", self.Ks)
logger.info("Refraction index 'Ni' %s", self.Ni)
logger.info("Dissolve 'd' %f", self.d)
logger.info("Illumination model 'illum' %d", self.illum)
logger.info("---------------------------------------------------------")
def write_file(self, outdir):
"""Unit tests that can also serve as example usage.
Args:
outdir (str): Output directory.
Writes
- Output .mtl file.
"""
cv2 = preset_import('cv2', assert_success=True)
# Validate inputs
assert (self.mtlfile is not None and self.newmtl is not None), \
"'mtlfile' and 'newmtl' must not be 'None'"
# mkdir if necessary
xm_os.makedirs(outdir)
# Write .mtl
mtlpath = join(outdir, self.mtlfile)
with open(mtlpath, 'w') as fid:
fid.write('newmtl %s\n' % self.newmtl)
fid.write('Ns %f\n' % self.Ns)
fid.write('Ka %f %f %f\n' % self.Ka)
fid.write('Kd %f %f %f\n' % self.Kd)
fid.write('Ks %f %f %f\n' % self.Ks)
fid.write('Ni %f\n' % self.Ni)
fid.write('d %f\n' % self.d)
fid.write('illum %d\n' % self.illum)
map_Kd_path = self.map_Kd_path
map_Kd_scale = self.map_Kd_scale
if map_Kd_path is not None:
fid.write('map_Kd %s\n' % basename(map_Kd_path))
if map_Kd_scale == 1:
copy(map_Kd_path, outdir)
else:
im = cv2.imread(map_Kd_path, cv2.IMREAD_UNCHANGED) # TODO: switch to xm.io.img
im = cv2.resize(im, None, fx=map_Kd_scale, fy=map_Kd_scale) # TODO: switch to xm.img
cv2.imwrite(join(outdir, basename(map_Kd_path)), im) # TODO: switch to xm.io.img
logger.info("Done writing to %s", mtlpath)
def main():
"""Unit tests that can also serve as example usage."""
objf = '../../../toy-data/obj-mtl_cube/cube.obj'
myobj = Obj()
myobj.print_info()
myobj.load_file(objf)
myobj.print_info()
objf_reproduce = objf.replace('.obj', '_reproduce.obj')
myobj.write_file(objf_reproduce)
myobj.set_face_normals()
myobj.print_info()
if __name__ == '__main__':
main()
| apache-2.0 | -7,813,962,791,409,773,000 | 40.830709 | 104 | 0.438729 | false |
pybursa/homeworks | s_shybkoy/hw5/hw5_task1.py | 1 | 2975 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
u"""
Задание 1: классный Человек.
УСЛОВИЕ:
Реализовать класс Person, который отображает запись в книге контактов.
Класс имеет 4 атрибута:
- surname - строка - фамилия контакта (обязательный)
- first_name - строка - имя контакта (обязательный)
- nickname - строка - псевдоним (опциональный)
- birth_date - объект datetime.date (обязательный)
Каждый вызов класса должен создавать экземпляр (инстанс) класса с указанными
атрибутами.
Также класс имеет 2 метода:
- get_age() - считает возраст контакта в полных годах на дату вызова и
возвращает строку вида: "27";
- get_fullname() - возвращает строку, отражающую полное имя (фамилия + имя)
контакта;
"""
__author__ = "Sergei Shybkoi"
__copyright__ = "Copyright 2014, The Homework Project"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "2014-11-18"
import datetime
class Person(object):
u"""Класс Person"""
def __init__(self, surname, first_name, birth_date, nickname=None):
u"""Инишн класса"""
try:
var_date = datetime.datetime.strptime(birth_date, "%Y-%m-%d")
res_date = datetime.date(var_date.year,
var_date.month, var_date.day)
except TypeError:
print "Incorrect type of birthday date!"
res_date = None
except ValueError:
print "Wrong value of birthday date!"
res_date = None
self.surname = surname
self.first_name = first_name
self.birth_date = res_date
if nickname is not None:
self.nickname = nickname
def get_age(self):
u"""Метод класса подсчитывает и выводит количество полных лет"""
if self.birth_date is not None:
today_date = datetime.date.today()
delta = today_date.year - self.birth_date.year
if today_date.month <= self.birth_date.month \
and today_date.day < self.birth_date.day:
delta -= 1
print "Age:", delta
return str(delta)
else:
print "No correct data about person's birthday."
return "0"
def get_fullname(self):
u"""Метод выводит и возвращаем полное имя экземпляра класса Person"""
print self.surname, self.first_name
return self.surname + " " + self.first_name
| gpl-2.0 | -1,922,213,597,879,849,000 | 32.814286 | 77 | 0.60197 | false |
pmisik/buildbot | master/buildbot/changes/gitpoller.py | 1 | 17640 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import re
import stat
from urllib.parse import quote as urlquote
from twisted.internet import defer
from twisted.python import log
from buildbot import config
from buildbot.changes import base
from buildbot.util import bytes2unicode
from buildbot.util import private_tempdir
from buildbot.util import runprocess
from buildbot.util.git import GitMixin
from buildbot.util.git import getSshKnownHostsContents
from buildbot.util.misc import writeLocalFile
from buildbot.util.state import StateMixin
class GitError(Exception):
"""Raised when git exits with code 128."""
class GitPoller(base.PollingChangeSource, StateMixin, GitMixin):
"""This source will poll a remote git repo for changes and submit
them to the change master."""
compare_attrs = ("repourl", "branches", "workdir", "pollInterval", "gitbin", "usetimestamps",
"category", "project", "pollAtLaunch", "buildPushesWithNoCommits",
"sshPrivateKey", "sshHostKey", "sshKnownHosts", "pollRandomDelayMin",
"pollRandomDelayMax")
secrets = ("sshPrivateKey", "sshHostKey", "sshKnownHosts")
def __init__(self, repourl, branches=None, branch=None, workdir=None, pollInterval=10 * 60,
gitbin="git", usetimestamps=True, category=None, project=None, pollinterval=-2,
fetch_refspec=None, encoding="utf-8", name=None, pollAtLaunch=False,
buildPushesWithNoCommits=False, only_tags=False, sshPrivateKey=None,
sshHostKey=None, sshKnownHosts=None, pollRandomDelayMin=0, pollRandomDelayMax=0):
# for backward compatibility; the parameter used to be spelled with 'i'
if pollinterval != -2:
pollInterval = pollinterval
if name is None:
name = repourl
super().__init__(name=name, pollInterval=pollInterval, pollAtLaunch=pollAtLaunch,
pollRandomDelayMin=pollRandomDelayMin,
pollRandomDelayMax=pollRandomDelayMax, sshPrivateKey=sshPrivateKey,
sshHostKey=sshHostKey, sshKnownHosts=sshKnownHosts)
if project is None:
project = ''
if only_tags and (branch or branches):
config.error("GitPoller: can't specify only_tags and branch/branches")
if branch and branches:
config.error("GitPoller: can't specify both branch and branches")
elif branch:
branches = [branch]
elif not branches:
if only_tags:
branches = lambda ref: ref.startswith('refs/tags/') # noqa: E731
else:
branches = ['master']
self.repourl = repourl
self.branches = branches
self.encoding = encoding
self.buildPushesWithNoCommits = buildPushesWithNoCommits
self.gitbin = gitbin
self.workdir = workdir
self.usetimestamps = usetimestamps
self.category = category if callable(
category) else bytes2unicode(category, encoding=self.encoding)
self.project = bytes2unicode(project, encoding=self.encoding)
self.changeCount = 0
self.lastRev = {}
self.sshPrivateKey = sshPrivateKey
self.sshHostKey = sshHostKey
self.sshKnownHosts = sshKnownHosts
self.setupGit(logname='GitPoller')
if fetch_refspec is not None:
config.error("GitPoller: fetch_refspec is no longer supported. "
"Instead, only the given branches are downloaded.")
if self.workdir is None:
self.workdir = 'gitpoller-work'
@defer.inlineCallbacks
def _checkGitFeatures(self):
stdout = yield self._dovccmd('--version', [])
self.parseGitFeatures(stdout)
if not self.gitInstalled:
raise EnvironmentError('Git is not installed')
if (self.sshPrivateKey is not None and
not self.supportsSshPrivateKeyAsEnvOption):
raise EnvironmentError('SSH private keys require Git 2.3.0 or newer')
@defer.inlineCallbacks
def activate(self):
# make our workdir absolute, relative to the master's basedir
if not os.path.isabs(self.workdir):
self.workdir = os.path.join(self.master.basedir, self.workdir)
log.msg("gitpoller: using workdir '{}'".format(self.workdir))
try:
self.lastRev = yield self.getState('lastRev', {})
super().activate()
except Exception as e:
log.err(e, 'while initializing GitPoller repository')
def describe(self):
str = ('GitPoller watching the remote git repository ' +
bytes2unicode(self.repourl, self.encoding))
if self.branches:
if self.branches is True:
str += ', branches: ALL'
elif not callable(self.branches):
str += ', branches: ' + ', '.join(self.branches)
if not self.master:
str += " [STOPPED - check log]"
return str
def _getBranches(self):
d = self._dovccmd('ls-remote', ['--refs', self.repourl])
@d.addCallback
def parseRemote(rows):
branches = []
for row in rows.splitlines():
if '\t' not in row:
# Not a useful line
continue
sha, ref = row.split("\t")
branches.append(ref)
return branches
return d
def _headsFilter(self, branch):
"""Filter out remote references that don't begin with 'refs/heads'."""
return branch.startswith("refs/heads/")
def _removeHeads(self, branch):
"""Remove 'refs/heads/' prefix from remote references."""
if branch.startswith("refs/heads/"):
branch = branch[11:]
return branch
def _trackerBranch(self, branch):
# manually quote tilde for Python 3.7
url = urlquote(self.repourl, '').replace('~', '%7E')
return "refs/buildbot/{}/{}".format(url, self._removeHeads(branch))
def poll_should_exit(self):
# A single gitpoller loop may take a while on a loaded master, which would block
# reconfiguration, so we try to exit early.
return not self.doPoll.running
@defer.inlineCallbacks
def poll(self):
yield self._checkGitFeatures()
try:
yield self._dovccmd('init', ['--bare', self.workdir])
except GitError as e:
log.msg(e.args[0])
return
branches = self.branches if self.branches else []
remote_refs = yield self._getBranches()
if self.poll_should_exit():
return
if branches is True or callable(branches):
if callable(self.branches):
branches = [b for b in remote_refs if self.branches(b)]
else:
branches = [b for b in remote_refs if self._headsFilter(b)]
elif branches and remote_refs:
remote_branches = [self._removeHeads(b) for b in remote_refs]
branches = sorted(list(set(branches) & set(remote_branches)))
refspecs = [
'+{}:{}'.format(self._removeHeads(branch), self._trackerBranch(branch))
for branch in branches
]
try:
yield self._dovccmd('fetch', [self.repourl] + refspecs,
path=self.workdir)
except GitError as e:
log.msg(e.args[0])
return
revs = {}
log.msg('gitpoller: processing changes from "{}"'.format(self.repourl))
for branch in branches:
try:
if self.poll_should_exit(): # pragma: no cover
# Note that we still want to update the last known revisions for the branches
# we did process
break
rev = yield self._dovccmd(
'rev-parse', [self._trackerBranch(branch)], path=self.workdir)
revs[branch] = bytes2unicode(rev, self.encoding)
yield self._process_changes(revs[branch], branch)
except Exception:
log.err(_why="trying to poll branch {} of {}".format(
branch, self.repourl))
self.lastRev.update(revs)
yield self.setState('lastRev', self.lastRev)
def _get_commit_comments(self, rev):
args = ['--no-walk', r'--format=%s%n%b', rev, '--']
d = self._dovccmd('log', args, path=self.workdir)
return d
def _get_commit_timestamp(self, rev):
# unix timestamp
args = ['--no-walk', r'--format=%ct', rev, '--']
d = self._dovccmd('log', args, path=self.workdir)
@d.addCallback
def process(git_output):
if self.usetimestamps:
try:
stamp = int(git_output)
except Exception as e:
log.msg(('gitpoller: caught exception converting output \'{}\' to timestamp'
).format(git_output))
raise e
return stamp
return None
return d
def _get_commit_files(self, rev):
args = ['--name-only', '--no-walk', r'--format=%n', rev, '--']
d = self._dovccmd('log', args, path=self.workdir)
def decode_file(file):
# git use octal char sequences in quotes when non ASCII
match = re.match('^"(.*)"$', file)
if match:
file = bytes2unicode(match.groups()[0], encoding=self.encoding,
errors='unicode_escape')
return bytes2unicode(file, encoding=self.encoding)
@d.addCallback
def process(git_output):
fileList = [decode_file(file)
for file in
[s for s in git_output.splitlines() if len(s)]]
return fileList
return d
def _get_commit_author(self, rev):
args = ['--no-walk', r'--format=%aN <%aE>', rev, '--']
d = self._dovccmd('log', args, path=self.workdir)
@d.addCallback
def process(git_output):
if not git_output:
raise EnvironmentError('could not get commit author for rev')
return git_output
return d
@defer.inlineCallbacks
def _get_commit_committer(self, rev):
args = ['--no-walk', r'--format=%cN <%cE>', rev, '--']
res = yield self._dovccmd('log', args, path=self.workdir)
if not res:
raise EnvironmentError('could not get commit committer for rev')
return res
@defer.inlineCallbacks
def _process_changes(self, newRev, branch):
"""
Read changes since last change.
- Read list of commit hashes.
- Extract details from each commit.
- Add changes to database.
"""
# initial run, don't parse all history
if not self.lastRev:
return
# get the change list
revListArgs = (['--ignore-missing'] +
['--format=%H', '{}'.format(newRev)] +
['^' + rev
for rev in sorted(self.lastRev.values())] +
['--'])
self.changeCount = 0
results = yield self._dovccmd('log', revListArgs, path=self.workdir)
# process oldest change first
revList = results.split()
revList.reverse()
if self.buildPushesWithNoCommits and not revList:
existingRev = self.lastRev.get(branch)
if existingRev != newRev:
revList = [newRev]
if existingRev is None:
# This branch was completely unknown, rebuild
log.msg('gitpoller: rebuilding {} for new branch "{}"'.format(
newRev, branch))
else:
# This branch is known, but it now points to a different
# commit than last time we saw it, rebuild.
log.msg('gitpoller: rebuilding {} for updated branch "{}"'.format(
newRev, branch))
self.changeCount = len(revList)
self.lastRev[branch] = newRev
if self.changeCount:
log.msg('gitpoller: processing {} changes: {} from "{}" branch "{}"'.format(
self.changeCount, revList, self.repourl, branch))
for rev in revList:
dl = defer.DeferredList([
self._get_commit_timestamp(rev),
self._get_commit_author(rev),
self._get_commit_committer(rev),
self._get_commit_files(rev),
self._get_commit_comments(rev),
], consumeErrors=True)
results = yield dl
# check for failures
failures = [r[1] for r in results if not r[0]]
if failures:
for failure in failures:
log.err(
failure, "while processing changes for {} {}".format(newRev, branch))
# just fail on the first error; they're probably all related!
failures[0].raiseException()
timestamp, author, committer, files, comments = [r[1] for r in results]
yield self.master.data.updates.addChange(
author=author,
committer=committer,
revision=bytes2unicode(rev, encoding=self.encoding),
files=files, comments=comments, when_timestamp=timestamp,
branch=bytes2unicode(self._removeHeads(branch)),
project=self.project,
repository=bytes2unicode(self.repourl, encoding=self.encoding),
category=self.category, src='git')
def _isSshPrivateKeyNeededForCommand(self, command):
commandsThatNeedKey = [
'fetch',
'ls-remote',
]
if self.sshPrivateKey is not None and command in commandsThatNeedKey:
return True
return False
def _downloadSshPrivateKey(self, keyPath):
# We change the permissions of the key file to be user-readable only so
# that ssh does not complain. This is not used for security because the
# parent directory will have proper permissions.
writeLocalFile(keyPath, self.sshPrivateKey, mode=stat.S_IRUSR)
def _downloadSshKnownHosts(self, path):
if self.sshKnownHosts is not None:
contents = self.sshKnownHosts
else:
contents = getSshKnownHostsContents(self.sshHostKey)
writeLocalFile(path, contents)
def _getSshPrivateKeyPath(self, ssh_data_path):
return os.path.join(ssh_data_path, 'ssh-key')
def _getSshKnownHostsPath(self, ssh_data_path):
return os.path.join(ssh_data_path, 'ssh-known-hosts')
@defer.inlineCallbacks
def _dovccmd(self, command, args, path=None):
if self._isSshPrivateKeyNeededForCommand(command):
with private_tempdir.PrivateTemporaryDirectory(
dir=self.workdir, prefix='.buildbot-ssh') as tmp_path:
stdout = yield self._dovccmdImpl(command, args, path, tmp_path)
else:
stdout = yield self._dovccmdImpl(command, args, path, None)
return stdout
@defer.inlineCallbacks
def _dovccmdImpl(self, command, args, path, ssh_workdir):
full_args = []
full_env = os.environ.copy()
if self._isSshPrivateKeyNeededForCommand(command):
key_path = self._getSshPrivateKeyPath(ssh_workdir)
self._downloadSshPrivateKey(key_path)
known_hosts_path = None
if self.sshHostKey is not None or self.sshKnownHosts is not None:
known_hosts_path = self._getSshKnownHostsPath(ssh_workdir)
self._downloadSshKnownHosts(known_hosts_path)
self.adjustCommandParamsForSshPrivateKey(full_args, full_env,
key_path, None,
known_hosts_path)
full_args += [command] + args
res = yield runprocess.run_process(self.master.reactor, [self.gitbin] + full_args, path,
env=full_env)
(code, stdout, stderr) = res
stdout = bytes2unicode(stdout, self.encoding)
stderr = bytes2unicode(stderr, self.encoding)
if code != 0:
if code == 128:
raise GitError('command {} in {} on repourl {} failed with exit code {}: {}'.format(
full_args, path, self.repourl, code, stderr))
raise EnvironmentError(('command {} in {} on repourl {} failed with exit code {}: {}'
).format(full_args, path, self.repourl, code, stderr))
return stdout.strip()
| gpl-2.0 | -7,909,465,922,960,029,000 | 37.940397 | 100 | 0.579762 | false |
tboyce1/home-assistant | homeassistant/helpers/template.py | 2 | 17501 | """Template helper methods for rendering strings with Home Assistant data."""
from datetime import datetime
import json
import logging
import math
import random
import re
import jinja2
from jinja2 import contextfilter
from jinja2.sandbox import ImmutableSandboxedEnvironment
from homeassistant.const import (
ATTR_LATITUDE, ATTR_LONGITUDE, ATTR_UNIT_OF_MEASUREMENT, MATCH_ALL,
STATE_UNKNOWN)
from homeassistant.core import State
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import location as loc_helper
from homeassistant.loader import bind_hass, get_component
from homeassistant.util import convert
from homeassistant.util import dt as dt_util
from homeassistant.util import location as loc_util
from homeassistant.util.async import run_callback_threadsafe
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RE_NONE_ENTITIES = re.compile(r"distance\(|closest\(", re.I | re.M)
_RE_GET_ENTITIES = re.compile(
r"(?:(?:states\.|(?:is_state|is_state_attr|states)"
r"\((?:[\ \'\"]?))([\w]+\.[\w]+)|([\w]+))", re.I | re.M
)
@bind_hass
def attach(hass, obj):
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, dict):
for child in obj.values():
attach(hass, child)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(value, variables=None):
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables)
for item in value]
elif isinstance(value, dict):
return {key: render_complex(item, variables)
for key, item in value.items()}
return value.async_render(variables)
def extract_entities(template, variables=None):
"""Extract all entities for state_changed listener from template string."""
if template is None or _RE_NONE_ENTITIES.search(template):
return MATCH_ALL
extraction = _RE_GET_ENTITIES.findall(template)
extraction_final = []
for result in extraction:
if result[0] == 'trigger.entity_id' and 'trigger' in variables and \
'entity_id' in variables['trigger']:
extraction_final.append(variables['trigger']['entity_id'])
elif result[0]:
extraction_final.append(result[0])
if variables and result[1] in variables and \
isinstance(variables[result[1]], str):
extraction_final.append(variables[result[1]])
if extraction_final:
return list(set(extraction_final))
return MATCH_ALL
class Template(object):
"""Class to hold a template and manage caching and rendering."""
def __init__(self, template, hass=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError('Expected template to be a string')
self.template = template
self._compiled_code = None
self._compiled = None
self.hass = hass
def ensure_valid(self):
"""Return if template is valid."""
if self._compiled_code is not None:
return
try:
self._compiled_code = ENV.compile(self.template)
except jinja2.exceptions.TemplateSyntaxError as err:
raise TemplateError(err)
def extract_entities(self, variables=None):
"""Extract all entities for state_changed listener."""
return extract_entities(self.template, variables)
def render(self, variables=None, **kwargs):
"""Render given template."""
if variables is not None:
kwargs.update(variables)
return run_callback_threadsafe(
self.hass.loop, self.async_render, kwargs).result()
def async_render(self, variables=None, **kwargs):
"""Render given template.
This method must be run in the event loop.
"""
if self._compiled is None:
self._ensure_compiled()
if variables is not None:
kwargs.update(variables)
try:
return self._compiled.render(kwargs).strip()
except jinja2.TemplateError as err:
raise TemplateError(err)
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
return run_callback_threadsafe(
self.hass.loop, self.async_render_with_possible_json_value, value,
error_value).result()
# pylint: disable=invalid-name
def async_render_with_possible_json_value(self, value,
error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self._compiled is None:
self._ensure_compiled()
variables = {
'value': value
}
try:
variables['value_json'] = json.loads(value)
except ValueError:
pass
try:
return self._compiled.render(variables).strip()
except jinja2.TemplateError as ex:
_LOGGER.error("Error parsing value: %s (value: %s, template: %s)",
ex, value, self.template)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(self):
"""Bind a template to a specific hass instance."""
self.ensure_valid()
assert self.hass is not None, 'hass variable not set on template'
template_methods = TemplateMethods(self.hass)
global_vars = ENV.make_globals({
'closest': template_methods.closest,
'distance': template_methods.distance,
'is_state': self.hass.states.is_state,
'is_state_attr': template_methods.is_state_attr,
'states': AllStates(self.hass),
})
self._compiled = jinja2.Template.from_code(
ENV, self._compiled_code, global_vars, None)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (self.__class__ == other.__class__ and
self.template == other.template and
self.hass == other.hass)
class AllStates(object):
"""Class to expose all HA states as attributes."""
def __init__(self, hass):
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
return DomainStates(self._hass, name)
def __iter__(self):
"""Return all states."""
return iter(
_wrap_state(state) for state in
sorted(self._hass.states.async_all(),
key=lambda state: state.entity_id))
def __len__(self):
"""Return number of states."""
return len(self._hass.states.async_entity_ids())
def __call__(self, entity_id):
"""Return the states."""
state = self._hass.states.get(entity_id)
return STATE_UNKNOWN if state is None else state.state
class DomainStates(object):
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass, domain):
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return _wrap_state(
self._hass.states.get('{}.{}'.format(self._domain, name)))
def __iter__(self):
"""Return the iteration over all the states."""
return iter(sorted(
(_wrap_state(state) for state in self._hass.states.async_all()
if state.domain == self._domain),
key=lambda state: state.entity_id))
def __len__(self):
"""Return number of states."""
return len(self._hass.states.async_entity_ids(self._domain))
class TemplateState(State):
"""Class to represent a state object in a template."""
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, state):
"""Initialize template state."""
self._state = state
@property
def state_with_unit(self):
"""Return the state concatenated with the unit if available."""
state = object.__getattribute__(self, '_state')
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
if unit is None:
return state.state
return "{} {}".format(state.state, unit)
def __getattribute__(self, name):
"""Return an attribute of the state."""
if name in TemplateState.__dict__:
return object.__getattribute__(self, name)
else:
return getattr(object.__getattribute__(self, '_state'), name)
def __repr__(self):
"""Representation of Template State."""
rep = object.__getattribute__(self, '_state').__repr__()
return '<template ' + rep[1:]
def _wrap_state(state):
"""Wrap a state."""
return None if state is None else TemplateState(state)
class TemplateMethods(object):
"""Class to expose helpers to templates."""
def __init__(self, hass):
"""Initialize the helpers."""
self._hass = hass
def closest(self, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
"""
if len(args) == 1:
latitude = self._hass.config.latitude
longitude = self._hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = self._resolve_state(args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
elif not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s",
point_state)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s",
args[0], args[1])
return None
entities = args[2]
if isinstance(entities, (AllStates, DomainStates)):
states = list(entities)
else:
if isinstance(entities, State):
gr_entity_id = entities.entity_id
else:
gr_entity_id = str(entities)
group = get_component('group')
states = [self._hass.states.get(entity_id) for entity_id
in group.expand_entity_ids(self._hass, [gr_entity_id])]
return _wrap_state(loc_helper.closest(latitude, longitude, states))
def distance(self, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, State):
latitude = value.attributes.get(ATTR_LATITUDE)
longitude = value.attributes.get(ATTR_LONGITUDE)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:State does not contains a location: %s",
value)
return None
else:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s",
value)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning("Distance:Unable to process latitude and "
"longitude: %s, %s", value, value_2)
return None
locations.append((latitude, longitude))
if len(locations) == 1:
return self._hass.config.distance(*locations[0])
return self._hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), 'm')
def is_state_attr(self, entity_id, name, value):
"""Test if a state is a specific attribute."""
state_obj = self._hass.states.get(entity_id)
return state_obj is not None and \
state_obj.attributes.get(name) == value
def _resolve_state(self, entity_id_or_state):
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
elif isinstance(entity_id_or_state, str):
return self._hass.states.get(entity_id_or_state)
return None
def forgiving_round(value, precision=0):
"""Round accepted strings."""
try:
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(
dt_util.utc_from_timestamp(value)).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
ENV = TemplateEnvironment()
ENV.filters['round'] = forgiving_round
ENV.filters['multiply'] = multiply
ENV.filters['log'] = logarithm
ENV.filters['timestamp_custom'] = timestamp_custom
ENV.filters['timestamp_local'] = timestamp_local
ENV.filters['timestamp_utc'] = timestamp_utc
ENV.filters['is_defined'] = fail_when_undefined
ENV.filters['max'] = max
ENV.filters['min'] = min
ENV.filters['random'] = random_every_time
ENV.globals['log'] = logarithm
ENV.globals['float'] = forgiving_float
ENV.globals['now'] = dt_util.now
ENV.globals['utcnow'] = dt_util.utcnow
ENV.globals['as_timestamp'] = forgiving_as_timestamp
ENV.globals['relative_time'] = dt_util.get_age
ENV.globals['strptime'] = strptime
| apache-2.0 | 3,488,710,536,227,762,000 | 30.994516 | 79 | 0.602366 | false |
garrettkatz/directional-fibers | dfibers/experiments/levy_opt/levy_opt.py | 1 | 6952 | """
Measure global optimization performance of Levy function
"""
import sys, time
import numpy as np
import matplotlib.pyplot as pt
import multiprocessing as mp
import dfibers.traversal as tv
import dfibers.numerical_utilities as nu
import dfibers.logging_utilities as lu
import dfibers.fixed_points as fx
import dfibers.solvers as sv
import dfibers.examples.levy as lv
from mpl_toolkits.mplot3d import Axes3D
def run_trial(args):
basename, sample, timeout = args
stop_time = time.clock() + timeout
logfile = open("%s_s%d.log"%(basename,sample),"w")
# Set up fiber arguments
np.random.seed()
v = 20*np.random.rand(2,1) - 10 # random point in domain
c = lv.f(v) # direction at that point
c = c + 0.1*np.random.randn(2,1) # perturb for more variability
fiber_kwargs = {
"f": lv.f,
"ef": lv.ef,
"Df": lv.Df,
"compute_step_amount": lambda trace: (0.0001, 0),
"v": v,
"c": c,
"stop_time": stop_time,
"terminate": lambda trace: (np.fabs(trace.x[:-1]) > 10).any(),
"max_solve_iterations": 2**5,
}
solve_start = time.clock()
# Run in one direction
solution = sv.fiber_solver(
logger=lu.Logger(logfile).plus_prefix("+: "),
**fiber_kwargs)
X1 = np.concatenate(solution["Fiber trace"].points, axis=1)
V1 = solution["Fixed points"]
z = solution["Fiber trace"].z_initial
# print("Status: %s\n"%solution["Fiber trace"].status)
# Run in other direction (negate initial tangent)
solution = sv.fiber_solver(
z= -z,
logger=lu.Logger(logfile).plus_prefix("-: "),
**fiber_kwargs)
X2 = np.concatenate(solution["Fiber trace"].points, axis=1)
V2 = solution["Fixed points"]
# print("Status: %s\n"%solution["Fiber trace"].status)
# Join fiber segments
fiber = np.concatenate((np.fliplr(X1), X2), axis=1)
# Union solutions
fxpts = fx.sanitize_points(
np.concatenate((V1, V2), axis=1),
f = lv.f,
ef = lv.ef,
Df = lv.Df,
duplicates = lambda V, v: (np.fabs(V - v) < 10**-6).all(axis=0),
)
# Save results
with open("%s_s%d.npz"%(basename,sample), 'w') as rf: np.savez(rf, **{
"fxpts": fxpts,
"fiber": fiber,
"runtime": time.clock() - solve_start })
logfile.close()
def run_experiment(basename, num_samples, timeout, num_procs=0):
pool_args = []
for sample in range(num_samples):
pool_args.append((basename, sample, timeout))
if num_procs > 0:
num_procs = min(num_procs, mp.cpu_count())
print("using %d processes..."%num_procs)
pool = mp.Pool(processes=num_procs)
pool.map(run_trial, pool_args)
pool.close()
pool.join()
else:
for pa in pool_args: run_trial(pa)
def compile_results(basename, num_samples):
L = []
F = []
runtimes = []
for sample in range(num_samples):
with open("%s_s%d.npz"%(basename,sample), 'r') as rf: data = dict(np.load(rf))
fxpts = data["fxpts"]
Fs = np.fabs(lv.f(fxpts)).max(axis=0)
Ls = lv.levy(fxpts)
within = (np.fabs(fxpts) < 10).all(axis=0)
mean_within = Ls[within].mean() if within.any() else np.nan
print("sample %d: %d secs, %d solns, mean %f, mean within %f, min %f"%(
sample, data["runtime"], len(Ls), Ls.mean(), mean_within, Ls.min()))
L.append(Ls)
F.append(Fs)
runtimes.append(data["runtime"])
counts = np.array([len(Ls) for Ls in L])
bests = np.array([Ls.min() for Ls in L])
resids = np.array([Fs.max() for Fs in F])
runtimes = np.array(runtimes)
print("avg count = %d, avg best = %f, avg resid = %f, best best = %f"%(
counts.mean(), bests.mean(), resids.mean(), bests.min()))
return counts, bests, runtimes
def plot_results(basename, num_samples, counts, bests, runtimes, timeout):
### Optimization order stats
pt.figure(figsize=(5,4))
pt.subplot(2,1,1)
pt.plot(np.sort(bests), '-k.')
pt.xlabel("Ordered samples")
pt.ylabel("Best objective value")
##### Work complexity
pt.subplot(2,1,2)
terms = (runtimes < timeout)
pt.plot(runtimes[terms], bests[terms], 'k+', markerfacecolor='none')
pt.plot(runtimes[~terms], bests[~terms], 'ko', markerfacecolor='none')
pt.legend(["terminated","timed out"])
pt.xlabel("Runtime (seconds)")
pt.ylabel("Best objective value")
pt.tight_layout()
pt.show()
### Fiber visuals
pt.figure(figsize=(4,7))
# objective fun
X_surface, Y_surface = np.mgrid[-10:10:100j,-10:10:100j]
L = lv.levy(np.array([X_surface.flatten(), Y_surface.flatten()])).reshape(X_surface.shape)
ax_surface = pt.gcf().add_subplot(2,1,1,projection="3d")
ax_surface.plot_surface(X_surface, Y_surface, L, linewidth=0, antialiased=False, color='gray')
ax_surface.set_xlabel("v0")
ax_surface.set_ylabel("v1")
ax_surface.set_zlabel("levy(v)")
ax_surface.view_init(azim=-80, elev=20)
# fibers
ax = pt.gcf().add_subplot(2,1,2)
X_grid, Y_grid = np.mgrid[-10:10:60j,-10:10:60j]
XY = np.array([X_grid.flatten(), Y_grid.flatten()])
C_XY = lv.f(XY)
ax.quiver(XY[0,:],XY[1,:],C_XY[0,:],C_XY[1,:],color=0.5*np.ones((1,3)),
scale=10,units='xy',angles='xy')
num_plot_samples = 3
sort_idx = np.argsort(bests)
plot_idx = [0] + list(np.random.permutation(num_samples)[:num_plot_samples-1])
samples = sort_idx[plot_idx]
# samples = [41,73,20] # all through global
# samples = [41, 97, 11] # two through global
# samples = [41, 49, 13] # two through global, one horiz not through
# samples = [41, 46, 70] # one through global, one horiz
# samples = [41, 96, 27] # two through global, one almost horiz
samples = [41, 63, 28] # two through global, all interesting
print("samples:")
print(samples)
for i,sample in enumerate(samples[::-1]):
with open("%s_s%d.npz"%(basename,sample), 'r') as rf: data = dict(np.load(rf))
fxpts = data["fxpts"]
fiber = data["fiber"][:,::]
L = lv.levy(fxpts).min()
col = 0.5*float(num_plot_samples-i-1)/num_plot_samples
print(sample,col)
ax.plot(fiber[0],fiber[1],color=(col,col,col,1), linestyle='-', linewidth=1)
pt.plot(fxpts[0],fxpts[1], 'o', color=(col,col,col,1))
pt.xlabel("v0")
pt.ylabel("v1",rotation=0)
pt.yticks(np.linspace(-10,10,5))
pt.xlim([-10,10])
pt.ylim([-10,10])
pt.tight_layout()
pt.show()
if __name__ == "__main__":
basename = "levy_opt"
num_samples = 100
num_plot_samples = 3
timeout = 60*30
num_procs = 10
# run_experiment(basename, num_samples=num_samples, timeout=timeout, num_procs=num_procs)
counts, bests, runtimes = compile_results(basename, num_samples)
plot_results(basename, num_samples, counts, bests, runtimes, timeout)
| mit | 6,812,665,237,747,678,000 | 32.423077 | 98 | 0.597957 | false |
pedro-aaron/stego-chi-2 | embeddingRgb.py | 1 | 2081 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Watermarkero, Mario, Ariel
"""
from PIL import Image
import random
import matplotlib.pyplot as plt
import numpy as np
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def marcarPixel(color, bitporinsertar):
if (color%2)==1:
if bitporinsertar==0:
color=color-1
elif (color%2)==0:
if bitporinsertar==1:
color=color+1
return color
def plotLsbRgb(img):
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.set_title('Imagen RGB')
ax1.imshow(img)
ax2.set_title('LSB RGB')
img=255*(img%2)
ax2.imshow(img)
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10,
right=0.95, hspace=0.3,wspace=0.35)
#imagen original
path="img3.jpg"
imgOriginal = np.array(Image.open(path))
nFilas, nCols, nCanales = imgOriginal.shape
#marca
key=41196
random.seed(key)
porcentajeDeimagenPorMarcar=50
sizeMarca = nCols*int(porcentajeDeimagenPorMarcar*(nFilas/100))
#marca = [random.randint(0,1) for i in range(sizeMarca)]
plotLsbRgb(imgOriginal)
#proceso de marcado
imgMarcada = imgOriginal.copy();
cont = 1 #contador del numero de bits inscrustados
#Proceso de incrustacion
for fila in range(0,nFilas):
for columna in range(0,nCols):
pixel=imgOriginal[fila,columna]
newPixel = [marcarPixel(
pixel[0],random.randint(0,1)),
marcarPixel(pixel[1],random.randint(0,1)),
marcarPixel(pixel[2],random.randint(0,1))]
imgMarcada[fila,columna] = newPixel
if cont >= sizeMarca:
break
cont = cont +1
if cont >= sizeMarca:
break
plotLsbRgb(imgMarcada)
image = Image.fromarray(imgMarcada, 'RGB')
image.save('ImagenMarcada.bmp')
print('Porciento de la imagen marcada: ' + str(porcentajeDeimagenPorMarcar)+'%')
print('bits incrustados: ' + str(sizeMarca*3))
print('Bytes incrustados: ' + str(sizeMarca*3/8))
print('KiloBytes incrustados: ' + str(sizeMarca*3/8/1024))
print('MegaBytes incrustados: ' + str(sizeMarca*3/8/1024/1024))
| mit | 1,626,415,533,977,255,400 | 28.309859 | 80 | 0.658818 | false |
RodericDay/MiniPNM | test_minipnm.py | 1 | 2708 | #!/usr/bin/env python
from __future__ import division
import os
import itertools as it
import pytest
import numpy as np
import minipnm as mini
def test_print():
network = mini.Delaunay.random(100)
print( network )
def test_prune():
delaunay = mini.Delaunay(np.random.rand(100,3))
original_size = delaunay.size
changed = delaunay - ~delaunay.boundary()
new_size = changed.size
assert type(delaunay) is type(changed)
assert np.greater(original_size, new_size).all()
def test_subtract_all():
network = mini.Cubic([3,3,3])
reduced = network.copy()
reduced.prune(network.indexes!=-1)
assert set(network.keys()) == set(reduced.keys())
assert reduced.size == 0
assert all([value.size==0 for value in reduced.values()])
rereduced = reduced.copy()
rereduced.prune(network.indexes!=-1)
assert set(network.keys()) == set(rereduced.keys())
assert rereduced.size == 0
assert all(value.size==0 for value in rereduced.values())
def test_render():
try:
import vtk
except ImportError:
return
network = mini.Delaunay.random(100)
scene = mini.Scene()
network.render(scene=scene)
def test_handling_of_pseudo_array_input():
network = mini.Network()
with pytest.raises(TypeError):
network.points = None, None, None
network.points = [(1,1,1), [2,2,2], np.array([3,3,3])]
network.pairs = (0,1)
network.pairs = [(1,2), [2,0]]
def test_merge():
network = mini.Delaunay.random(100)
inside, outside = network.split(network.boundary())
(inside | outside)
def test_qhull_coplanar():
points = np.random.rand(100,3)
points.T[2] = 0
network = mini.Delaunay(points)
network.boundary()
def test_lengths():
# create a voxelized sphere. black (ones, vs. zeros) is void.
N = 13
im = np.ones([N,N,N])
for i in [i for i, c in np.ndenumerate(im) if np.linalg.norm(np.subtract(i, N/2-0.5))>N/2.5]:
im[i] = 0
def disable_test_save_and_load():
try:
original = mini.Cubic([20,20,20])
mini.save(original)
copy = mini.load('Cubic.npz')
assert type(original) is type(copy)
for key, value in original.items():
np.testing.assert_allclose(copy[key], value)
finally:
os.system("rm Cubic.npz")
def test_clone():
original = mini.Cubic([5,5,5])
copy = original.copy()
assert type(original) is type(copy)
unmatched = set(original.keys()) ^ set(copy.keys())
assert not unmatched
for key, value in original.items():
np.testing.assert_allclose(value, copy[key])
if __name__ == '__main__':
errors = pytest.main()
os.system("find . -name '*.pyc' -delete")
| mit | -6,733,002,228,057,733,000 | 28.11828 | 97 | 0.632939 | false |
akshaykr/oracle_cb | RegretExp.py | 1 | 6615 | import numpy as np
import sklearn.linear_model
import sklearn.tree
import Simulators, Logger, Evaluators, Semibandits, Metrics
import warnings
import argparse
import pickle
import sys
class RegretExp(object):
def __init__(self, weight=None, link="linear", K=10, L=5, T=1000, dataset="synth", feat_noise=0.25, reward_noise=1.0, policies="finite", structure='none'):
self.T = T
self.K = K
self.L = L
if weight == None:
weight = np.arange(1,self.L+1)
self.weight = weight
self.link = link
self.feat_noise = feat_noise
self.reward_noise = reward_noise
self.dataset = dataset
self.policies = policies
self.structure = structure
if self.dataset == "synth":
print("----Generating Semibandit Simulator----")
self.Sim = Simulators.OrderedSBSim(100,100,self.K,
self.L,self.feat_noise,
w_vec=self.weight,
link=self.link,
one_pass=False)
print("----Done----")
elif self.dataset == "mq2007":
print("----Generating MQ2007 Simulator----")
self.Sim = Simulators.DatasetBandit(self.L,loop=True,
dataset='mq2007',
metric=Metrics.NavigationalTTS,
## metric=Metrics.NDCG,
structure=self.structure)
if self.policies == "finite":
trees = pickle.load(open('./mq2007_trees.pkl', 'rb'))
self.Sim.set_policies(trees)
print("----Done----")
elif self.dataset == "mq2008":
print("----Generating MQ2008 Simulator----")
self.Sim = Simulators.DatasetBandit(self.L,loop=True,
dataset='mq2008',
metric=Metrics.NavigationalTTS,
structure=self.structure)
if self.policies == "finite":
trees = pickle.load(open('./mq2008_trees.pkl', 'rb'))
self.Sim.set_policies(trees)
print("----Done----")
elif self.dataset == 'yahoo':
print("----Generating Yahoo Simulator----")
self.Sim = Simulators.DatasetBandit(self.L,loop=True,
dataset='yahoo',
## metric=Metrics.NDCG,
metric=Metrics.NavigationalTTS,
structure=self.structure)
if self.policies == "finite":
trees = pickle.load(open('./yahoo_trees.pkl', 'rb'))
self.Sim.set_policies(trees)
print("----Done----")
else:
print("Error invalid dataset")
sys.exit(1)
def run_alg(self, Alg, params={}):
A = Alg(self.Sim)
(reward, regret) = A.play(self.T,params=params,verbose=False)
return (reward, regret)
if __name__=='__main__':
warnings.simplefilter("ignore")
parser = argparse.ArgumentParser()
parser.add_argument('--T', action='store',
default=1000,
help='number of rounds')
parser.add_argument('--link', action='store', choices=['linear', 'logistic'], default='linear')
parser.add_argument('--dataset', action='store', choices=['synth','mq2007','mq2008', 'yahoo'])
parser.add_argument('--policies', action='store', choices=['finite', 'tree', 'linear'], default='linear')
parser.add_argument('--K', action='store', default=10)
parser.add_argument('--L', action='store', default=5)
parser.add_argument('--structure', action='store', default='none', choices=['none','cluster'])
Args = parser.parse_args(sys.argv[1:])
print(Args)
Args.T = int(Args.T)
Args.K = int(Args.K)
Args.L = int(Args.L)
weight = np.arange(1,Args.L+1)[::-1] ## np.arange(1,Args.L+1,1)[::-1] ## /np.sum(np.arange(1,Args.L+1))
Algs = {
## 'EELS': Semibandits.EELS,
'EELS2': Semibandits.EELS2,
## 'Eps': Semibandits.EpsGreedy,
'EpsOracle': Semibandits.EpsGreedy,
## 'Random': Semibandits.Semibandit
}
Params = {
'EELS': {
'link': Args.link,
},
'EELS2': {
'link': Args.link,
},
'Eps': {
'reward': True,
},
'EpsOracle': {
'reward': False,
'weight': weight,
'link': Args.link
},
'Random': {}
}
if Args.dataset != "synth" and Args.policies == 'tree':
Params['EELS']['learning_alg'] = sklearn.tree.DecisionTreeRegressor
Params['EELS2']['learning_alg'] = sklearn.tree.DecisionTreeRegressor
Params['Eps']['learning_alg'] = sklearn.tree.DecisionTreeRegressor
Params['EpsOracle']['learning_alg'] = sklearn.tree.DecisionTreeRegressor
if Args.dataset != "synth" and Args.policies == 'linear':
Params['EELS']['learning_alg'] = sklearn.linear_model.LinearRegression
Params['EELS2']['learning_alg'] = sklearn.linear_model.LinearRegression
Params['Eps']['learning_alg'] = sklearn.linear_model.LinearRegression
Params['EpsOracle']['learning_alg'] = sklearn.linear_model.LinearRegression
Out = {
'EELS': [],
'EELS_regret': [],
'EELS2': [],
'EELS2_regret': [],
'Eps': [],
'Eps_regret': [],
'EpsOracle': [],
'EpsOracle_regret': [],
'Random': [],
'Random_regret': []
}
Exp = RegretExp(weight = weight, link=Args.link, K=Args.K, L=Args.L, T=Args.T, dataset=Args.dataset, policies=Args.policies,structure=Args.structure)
for i in range(10):
print('----Iter %d----' % (i))
for (k,v) in Algs.items():
print('----Running %s with params %s----' % (k, Params[k]))
(reward, regret) = Exp.run_alg(v, params=Params[k])
Out[k].append(reward)
Out[k+"_regret"].append(regret)
print('%s final: %0.3f' % (k, reward[-1]))
pickle.dump(Out, open("./data/%s_%s_%s_link=%s_T=%d_K=%d_L=%d.pkl" %(Args.dataset, Args.policies, Args.structure, Args.link, Args.T, Args.K, Args.L), "wb"))
| mit | 1,477,917,713,475,098,000 | 41.133758 | 160 | 0.503099 | false |
eallik/spinoff | spinoff/util/logging/logging.py | 1 | 7740 | # coding: utf8
from __future__ import print_function, absolute_import
import datetime
import inspect
import re
import sys
import time
import traceback
import types
import os
import multiprocessing
from collections import defaultdict
from spinoff.util.python import dump_method_call
try:
import colorama
except ImportError:
colorama = None
WIN32 = sys.platform == 'win32'
_lock = multiprocessing.Lock()
if WIN32:
from .win32fix import fix_unicode_on_win32
fix_unicode_on_win32()
if colorama:
import colorama.initialise
# colorama remembers those at import time, so we need to set them again after our unicode fix
colorama.initialise.orig_stdout = sys.stdout
colorama.initialise.orig_stderr = sys.stderr
# colorama doesn't touch stuff that is not .isatty()==True for some reason
try:
sys.stdout.isatty = sys.stderr.isatty = lambda: True
except AttributeError:
pass
# see also: http://code.google.com/p/colorama/issues/detail?id=41
colorama.init()
else:
print("Colored log output disabled on WIN32; easy_install colorama to enable")
if not WIN32 or colorama:
BLUE = '\x1b[1;34m'
CYAN = '\x1b[1;36m'
GREEN = '\x1b[1;32m'
RED = '\x1b[1;31m'
DARK_RED = '\x1b[0;31m'
RESET_COLOR = '\x1b[0m'
YELLOW = '\x1b[1;33m'
BLINK = '\x1b[5;31m'
else:
BLUE = ''
CYAN = ''
GREEN = ''
RED = ''
DARK_RED = ''
RESET_COLOR = ''
YELLOW = ''
BLINK = ''
OUTFILE = sys.stderr
LEVEL = 0
ENABLE_ONLY = False
LEVELS = [
('dbg', GREEN),
('log', GREEN),
('log', GREEN),
('log', GREEN),
('log', GREEN),
('fail', YELLOW),
('flaw', YELLOW),
('err', RED),
('err', RED),
('panic', BLINK + RED),
('fatal', BLINK + RED),
]
LEVELS = [(name.ljust(5), style) for name, style in LEVELS]
def dbg(*args, **kwargs):
_write(0, *args, **kwargs)
def dbg_call(fn, *args, **kwargs):
t0 = time.time()
ret = fn(*args, **kwargs)
t1 = time.time()
_write(0, "%sms for %s => %r" % (round((t1 - t0) * 1000), dump_method_call(fn.__name__, args, kwargs), ret))
return ret
def dbg1(*args, **kwargs):
_write(0, end='', *args, **kwargs)
# def dbg2(*args, **kwargs):
# _write(0, end='.', *args, **kwargs)
def dbg3(*args, **kwargs):
_write(0, end='\n', *args, **kwargs)
def log(*args, **kwargs):
_write(1, *args, **kwargs)
def fail(*args, **kwargs):
_write(5, *args, **kwargs)
def flaw(*args, **kwargs):
"""Logs a failure that is more important to the developer than a regular failure because there might be a static
programming flaw in the code as opposed to a state/conflict/interaction induced one.
"""
_write(6, *args, **kwargs)
def err(*args, **kwargs):
_write(7, *((RED,) + args + (RESET_COLOR,)), **kwargs)
def panic(*args, **kwargs):
_write(9, *((RED,) + args + (RESET_COLOR,)), **kwargs)
def fatal(*args, **kwargs):
_write(10, *((RED,) + args + (RESET_COLOR,)), **kwargs)
_pending_end = defaultdict(bool)
_logstrings = {}
def get_calling_context(frame):
caller = frame.f_locals.get('self', frame.f_locals.get('cls', None))
f_code = frame.f_code
file, lineno, caller_name = f_code.co_filename, frame.f_lineno, f_code.co_name
file = file.rsplit('/', 1)[-1]
return file, lineno, caller_name, caller
def _write(level, *args, **kwargs):
_lock.acquire()
try:
if level >= LEVEL:
frame = sys._getframe(2)
file, lineno, caller_name, caller = get_calling_context(frame)
if caller:
caller_module = caller.__module__
cls_name = caller.__name__ if isinstance(caller, type) else type(caller).__name__
caller_full_path = '%s.%s' % (caller_module, cls_name)
else:
# TODO: find a faster way to get the module than inspect.getmodule
caller = inspect.getmodule(frame)
if caller:
caller_full_path = caller_module = caller.__name__
else:
caller_full_path = caller_module = '' # .pyc
if ENABLE_ONLY and not any(re.match(x, caller_full_path) for x in ENABLE_ONLY):
return
caller_fn = getattr(caller, caller_name, None)
logstring = getattr(caller_fn, '_r_logstring', None) if caller_fn else None
if not logstring:
# TODO: add logstring "inheritance"
logstring = getattr(caller_fn, '_logstring', None)
if logstring:
if isinstance(logstring, unicode):
logstring = logstring.encode('utf8')
else:
logstring = caller_name + (':' if args else '')
logstring = YELLOW + logstring + RESET_COLOR
# cache it
if isinstance(caller_fn, types.MethodType):
caller_fn.im_func._r_logstring = logstring
elif caller_fn:
caller_fn._r_logstring = logstring
logname = getattr(caller, '_r_logname', None) if caller else ''
if logname is None:
logname = CYAN + get_logname(caller) + RESET_COLOR
if not hasattr(caller, '__slots__'):
caller._r_logname = logname
statestr = GREEN + ' '.join(k for k, v in get_logstate(caller).items() if v) + RESET_COLOR
comment = get_logcomment(caller)
file = os.path.split(file)[-1]
loc = "%s:%s" % (file, lineno)
if level >= 9: # blink for panics
loc = BLINK + loc + RESET_COLOR
levelname = LEVELS[level][1] + LEVELS[level][0] + RESET_COLOR
dump_parent_caller = kwargs.pop('caller', False)
# args = tuple(x.encode('utf-8') for x in args if isinstance(x, unicode))
print(("%s %s %s %s %s %s in %s" %
(datetime.datetime.strftime(datetime.datetime.utcfromtimestamp(time.time() - time.timezone), "%X.%f"), os.getpid(), levelname, loc, logname, statestr, logstring)),
file=OUTFILE, *(args + (comment,)))
if dump_parent_caller:
parent_frame = frame
for i in range(dump_parent_caller):
parent_frame = parent_frame.f_back
if not parent_frame:
break
file_, lineno, caller_name, caller = get_calling_context(parent_frame)
loc = "%s:%s" % (file_, lineno)
print(" " * (i + 1) + "(invoked by) %s %s %s" % (get_logname(caller), caller_name, loc), file=OUTFILE)
except Exception:
# from nose.tools import set_trace; set_trace()
print(RED, "!!%d: (logger failure)" % (level,), file=sys.stderr, *args, **kwargs)
print(RED, "...while trying to log", repr(args), repr(comment) if 'comment' in locals() else '')
print(traceback.format_exc(), RESET_COLOR, file=sys.stderr)
finally:
_lock.release()
def get_logname(obj):
return (obj.__name__
if isinstance(obj, type) else
repr(obj).strip('<>')
if not isinstance(obj, types.ModuleType) else
obj.__name__)
def get_logstate(obj):
try:
return obj.logstate()
except AttributeError:
return {}
def get_logcomment(obj):
try:
x = obj.logcomment
except AttributeError:
return ''
else:
return ' ' + x()
def logstring(logstr):
def dec(fn):
fn._logstring = logstr
return fn
return dec
| bsd-2-clause | -4,722,331,085,763,889,000 | 27.043478 | 181 | 0.555297 | false |
hsghost/ntm | ntm/sentiment/sa.py | 1 | 1863 | #!/usr/bin/python
import nltk
import nltk.data
from nltk.util import ngrams
from nltk.tokenize import word_tokenize
import MySQLdb
mHost = "10.240.119.20"
mUser = "root"
mPasswd = "cis700fall2014"
mDb = "cis700"
mCharset = "utf8"
conn = MySQLdb.connect(host=mHost,user=mUser,passwd=mPasswd,db=mDb,charset=mCharset)
cur = conn.cursor()
classifier = nltk.data.load("classifiers/movie_reviews_NaiveBayes.pickle")
def sa_text (raw_text):
dtext = raw_text.decode('utf-8')
text = word_tokenize(dtext)
feats = dict([(word, True) for word in text + list(ngrams(text, 2))])
return classifier.classify(feats)
# @param tweet_id
# @return sentiment towords it
def sa_by_tweet_id (tweet_id):
cur.execute("select content from tweets where id=%s", tweet_id)
res = cur.fetchall()
if len(res) == 0:
return "nul"
tweet_text = res[0]
return sa_text(tweet_text[0])
def get_uid (tweet_id):
cur.execute("select user from tweets where id=%s", tweet_id)
res = cur.fetchall()
if len(res) == 0:
return "nul"
return res[0]
def sa_on_word (word):
cur.execute("select id from tweets_newterm where word=%s", word)
res = cur.fetchall()
pos = []
neg = []
for tid in res:
sent = sa_by_tweet_id(tid)
uid = get_uid(tid)
if sent == "pos":
pos += uid
elif sent == "neg":
neg += uid
ret = [word, pos, neg]
return ret
# main entry
# get top 'num' of new term and do SA
# @para num
# @return list[word, pos, neg]
def sa_main(num = 20):
cur.execute("select word,freq from newterm where count>10 and analyzed_time=0 order by freq DESC limit %s", num)
res = cur.fetchall()
sa = []
for r in res:
sow=sa_on_word(r[0])
sow.append(r[1])
sa.append(sow)
print sa
return sa
# print sa_main(10)
| mit | -4,181,793,751,906,841,000 | 21.719512 | 116 | 0.618894 | false |
mat128/netman | tests/adapters/configured_test_case.py | 1 | 4519 | # Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import wraps
from unittest import SkipTest
from hamcrest import assert_that, is_
from netman.adapters.switches.cached import CachedSwitch
from netman.adapters.switches.remote import RemoteSwitch
from netman.core.objects.exceptions import NetmanException
from netman.core.objects.switch_descriptor import SwitchDescriptor
from netman.main import app
from tests.adapters.flask_helper import FlaskRequest
from tests.adapters.model_list import available_models
def sub_dict(d, *keys):
return dict((k, d[k]) for k in keys)
class ValidatingCachedSwitch(CachedSwitch):
def get_bond(self, number):
bond = super(ValidatingCachedSwitch, self).get_bond(number)
assert_that(bond, is_(self.real_switch.get_bond(number)))
return bond
def get_bonds(self):
bonds = super(ValidatingCachedSwitch, self).get_bonds()
assert_that(bonds, is_(self.real_switch.get_bonds()))
return bonds
def get_interfaces(self):
interfaces = super(ValidatingCachedSwitch, self).get_interfaces()
assert_that(interfaces, is_(self.real_switch.get_interfaces()))
return interfaces
def get_vlan(self, number):
vlan = super(ValidatingCachedSwitch, self).get_vlan(number)
assert_that(vlan, is_(self.real_switch.get_vlan(number)))
return vlan
def get_vlans(self):
vlans = super(ValidatingCachedSwitch, self).get_vlans()
assert_that(vlans, is_(self.real_switch.get_vlans()))
return vlans
class ConfiguredTestCase(unittest.TestCase):
_dev_sample = None
switch_specs = None
def setUp(self):
if self.switch_specs is not None:
specs = type(self).switch_specs
else:
specs = next(s for s in available_models if s["model"] == self._dev_sample)
self.switch_hostname = specs["hostname"]
self.switch_port = specs["port"]
self.switch_type = specs["model"]
self.switch_username = specs["username"]
self.switch_password = specs["password"]
self.test_port = specs["test_port_name"]
self.test_ports = specs["ports"]
self.test_vrrp_track_id = specs.get("test_vrrp_track_id")
self.remote_switch = RemoteSwitch(SwitchDescriptor(
netman_server='', **sub_dict(
specs, 'hostname', 'port', 'model', 'username', 'password')))
self.remote_switch.requests = FlaskRequest(app.test_client())
self.client = ValidatingCachedSwitch(self.remote_switch)
self.try_to = ExceptionIgnoringProxy(self.client, [NotImplementedError])
self.janitor = ExceptionIgnoringProxy(self.client, [NotImplementedError, NetmanException])
self.client.connect()
self.client.start_transaction()
def tearDown(self):
self.client.end_transaction()
self.client.disconnect()
def get_vlan_from_list(self, number):
try:
return next((vlan for vlan in self.client.get_vlans()
if vlan.number == number))
except StopIteration:
raise AssertionError("Vlan #{} not found".format(number))
def skip_on_switches(*to_skip):
def resource_decorator(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
if not self.switch_type in to_skip:
return fn(self, *args, **kwargs)
else:
raise SkipTest('Test not executed on Switch model %s' % self.switch_type)
return wrapper
return resource_decorator
class ExceptionIgnoringProxy(object):
def __init__(self, target, exceptions):
self.target = target
self.exceptions = tuple(exceptions)
def __getattr__(self, item):
def wrapper(*args, **kwargs):
try:
return getattr(self.target, item)(*args, **kwargs)
except self.exceptions:
return None
return wrapper
| apache-2.0 | -730,102,936,339,714,300 | 33.496183 | 98 | 0.66143 | false |
mementum/backtrader | samples/observer-benchmark/observer-benchmark.py | 1 | 7280 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import datetime
import random
import backtrader as bt
class St(bt.Strategy):
params = (
('period', 10),
('printout', False),
('stake', 1000),
)
def __init__(self):
sma = bt.indicators.SMA(self.data, period=self.p.period)
self.crossover = bt.indicators.CrossOver(self.data, sma)
def start(self):
if self.p.printout:
txtfields = list()
txtfields.append('Len')
txtfields.append('Datetime')
txtfields.append('Open')
txtfields.append('High')
txtfields.append('Low')
txtfields.append('Close')
txtfields.append('Volume')
txtfields.append('OpenInterest')
print(','.join(txtfields))
def next(self):
if self.p.printout:
# Print only 1st data ... is just a check that things are running
txtfields = list()
txtfields.append('%04d' % len(self))
txtfields.append(self.data.datetime.datetime(0).isoformat())
txtfields.append('%.2f' % self.data0.open[0])
txtfields.append('%.2f' % self.data0.high[0])
txtfields.append('%.2f' % self.data0.low[0])
txtfields.append('%.2f' % self.data0.close[0])
txtfields.append('%.2f' % self.data0.volume[0])
txtfields.append('%.2f' % self.data0.openinterest[0])
print(','.join(txtfields))
if self.position:
if self.crossover < 0.0:
if self.p.printout:
print('CLOSE {} @%{}'.format(size,
self.data.close[0]))
self.close()
else:
if self.crossover > 0.0:
self.buy(size=self.p.stake)
if self.p.printout:
print('BUY {} @%{}'.format(self.p.stake,
self.data.close[0]))
TIMEFRAMES = {
None: None,
'days': bt.TimeFrame.Days,
'weeks': bt.TimeFrame.Weeks,
'months': bt.TimeFrame.Months,
'years': bt.TimeFrame.Years,
'notimeframe': bt.TimeFrame.NoTimeFrame,
}
def runstrat(args=None):
args = parse_args(args)
cerebro = bt.Cerebro()
cerebro.broker.set_cash(args.cash)
dkwargs = dict()
if args.fromdate:
fromdate = datetime.datetime.strptime(args.fromdate, '%Y-%m-%d')
dkwargs['fromdate'] = fromdate
if args.todate:
todate = datetime.datetime.strptime(args.todate, '%Y-%m-%d')
dkwargs['todate'] = todate
data0 = bt.feeds.YahooFinanceCSVData(dataname=args.data0, **dkwargs)
cerebro.adddata(data0, name='Data0')
cerebro.addstrategy(St,
period=args.period,
stake=args.stake,
printout=args.printout)
if args.timereturn:
cerebro.addobserver(bt.observers.TimeReturn,
timeframe=TIMEFRAMES[args.timeframe])
else:
benchdata = data0
if args.benchdata1:
data1 = bt.feeds.YahooFinanceCSVData(dataname=args.data1, **dkwargs)
cerebro.adddata(data1, name='Data1')
benchdata = data1
cerebro.addobserver(bt.observers.Benchmark,
data=benchdata,
timeframe=TIMEFRAMES[args.timeframe])
cerebro.run()
if args.plot:
pkwargs = dict()
if args.plot is not True: # evals to True but is not True
pkwargs = eval('dict(' + args.plot + ')') # args were passed
cerebro.plot(**pkwargs)
def parse_args(pargs=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Benchmark/TimeReturn Observers Sample')
parser.add_argument('--data0', required=False,
default='../../datas/yhoo-1996-2015.txt',
help='Data0 to be read in')
parser.add_argument('--data1', required=False,
default='../../datas/orcl-1995-2014.txt',
help='Data1 to be read in')
parser.add_argument('--benchdata1', required=False, action='store_true',
help=('Benchmark against data1'))
parser.add_argument('--fromdate', required=False,
default='2005-01-01',
help='Starting date in YYYY-MM-DD format')
parser.add_argument('--todate', required=False,
default='2006-12-31',
help='Ending date in YYYY-MM-DD format')
parser.add_argument('--printout', required=False, action='store_true',
help=('Print data lines'))
parser.add_argument('--cash', required=False, action='store',
type=float, default=50000,
help=('Cash to start with'))
parser.add_argument('--period', required=False, action='store',
type=int, default=30,
help=('Period for the crossover moving average'))
parser.add_argument('--stake', required=False, action='store',
type=int, default=1000,
help=('Stake to apply for the buy operations'))
parser.add_argument('--timereturn', required=False, action='store_true',
default=None,
help=('Use TimeReturn observer instead of Benchmark'))
parser.add_argument('--timeframe', required=False, action='store',
default=None, choices=TIMEFRAMES.keys(),
help=('TimeFrame to apply to the Observer'))
# Plot options
parser.add_argument('--plot', '-p', nargs='?', required=False,
metavar='kwargs', const=True,
help=('Plot the read data applying any kwargs passed\n'
'\n'
'For example:\n'
'\n'
' --plot style="candle" (to plot candles)\n'))
if pargs:
return parser.parse_args(pargs)
return parser.parse_args()
if __name__ == '__main__':
runstrat()
| gpl-3.0 | 5,036,608,712,308,105,000 | 34.339806 | 80 | 0.542308 | false |
ecreall/nova-ideo | novaideo/content/processes/novaideo_abstract_process/behaviors.py | 1 | 8566 | # -*- coding: utf8 -*-
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import pytz
import datetime
from pyramid.httpexceptions import HTTPFound
from persistent.list import PersistentList
from dace.objectofcollaboration.principal import User
from dace.objectofcollaboration.principal.util import (
has_role,
get_current)
from dace.processinstance.activity import InfiniteCardinality
from ..user_management.behaviors import global_user_processsecurity
from novaideo.content.interface import (
INovaIdeoApplication, ISearchableEntity,
IEmojiable)
from novaideo import _, nothing
from novaideo.utilities.util import update_ajax_action
def select_roles_validation(process, context):
return has_role(role=('Member',))
def select_processsecurity_validation(process, context):
user = get_current()
return user is not context and \
context not in getattr(user, 'selections', []) and \
global_user_processsecurity()
def select_state_validation(process, context):
return context.is_published
class SelectEntity(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'communication-action'
style_interaction = 'ajax-action'
style_interaction_type = 'direct'
style_picto = 'glyphicon glyphicon-star-empty'
style_order = 100
isSequential = False
context = ISearchableEntity
roles_validation = select_roles_validation
processsecurity_validation = select_processsecurity_validation
state_validation = select_state_validation
def get_title(self, context, request, nb_only=False):
len_selections = getattr(context, 'len_selections', 0)
if nb_only:
return str(len_selections)
return _("${title} (${number})",
mapping={'number': len_selections,
'title': request.localizer.translate(self.title)})
def start(self, context, request, appstruct, **kw):
user = get_current()
user.addtoproperty('selections', context)
if not isinstance(context, User):
channel = getattr(context, 'channel', None)
if channel and user not in channel.members:
channel.addtoproperty('members', user)
user.reindex()
context.reindex()
return {}
def redirect(self, context, request, **kw):
return nothing
def selecta_roles_validation(process, context):
return has_role(role=('Anonymous',), ignore_superiors=True)
def selecta_processsecurity_validation(process, context):
return True
class SelectEntityAnonymous(SelectEntity):
roles_validation = selecta_roles_validation
processsecurity_validation = selecta_processsecurity_validation
style_interaction = 'ajax-action'
style_interaction_type = 'popover'
behavior_id = 'select_anonymous'
def start(self, context, request, appstruct, **kw):
return {}
def deselect_roles_validation(process, context):
return has_role(role=('Member',))
def deselect_processsecurity_validation(process, context):
user = get_current()
return (context in getattr(user, 'selections', [])) and \
global_user_processsecurity()
class DeselectEntity(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'communication-action'
style_interaction = 'ajax-action'
style_interaction_type = 'direct'
style_picto = 'glyphicon glyphicon-star'
style_order = 101
isSequential = False
context = ISearchableEntity
roles_validation = deselect_roles_validation
processsecurity_validation = deselect_processsecurity_validation
state_validation = select_state_validation
def get_title(self, context, request, nb_only=False):
len_selections = getattr(context, 'len_selections', 0)
if nb_only:
return str(len_selections)
return _("${title} (${number})",
mapping={'number': len_selections,
'title': request.localizer.translate(self.title)})
def start(self, context, request, appstruct, **kw):
user = get_current()
user.delfromproperty('selections', context)
if not isinstance(context, User):
channel = getattr(context, 'channel', None)
if channel:
channel.delfromproperty('members', user)
user.reindex()
context.reindex()
return {}
def redirect(self, context, request, **kw):
return nothing
def addr_roles_validation(process, context):
return has_role(role=('Member',))
def addr_state_validation(process, context):
return 'published' in context.state
def addr_processsecurity_validation(process, context):
security = global_user_processsecurity()
if security:
can_add_reaction = False
if hasattr(context, 'can_add_reaction'):
can_add_reaction = context.can_add_reaction(get_current(), process)
return can_add_reaction
return False
class AddReaction(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'communication-body-action'
style_interaction = 'ajax-action'
style_interaction_type = 'popover'
style_picto = 'novaideo-icon icon-add-emoji'
template = 'novaideo:views/templates/actions/add_reaction_idea.pt'
context = IEmojiable
roles_validation = addr_roles_validation
state_validation = addr_state_validation
processsecurity_validation = addr_processsecurity_validation
def get_update_action(self, context, request):
actions_data = update_ajax_action(
context, request, self.process_id, 'updatereaction')
if actions_data and actions_data[0]:
return actions_data[0][0]
return None
def start(self, context, request, appstruct, **kw):
reaction = appstruct.get('reaction', None)
context.add_emoji(reaction, get_current(request))
return {}
def redirect(self, context, request, **kw):
return nothing
class UpdateReaction(AddReaction):
style = 'button' #TODO add style abstract class
style_descriminator = 'controled-action'
style_interaction = 'ajax-action'
style_interaction_type = 'direct'
style_picto = 'none'
template = None
def get_title(self, selected):
return selected and _('Remove my reaction') or _('Add a reaction')
def deadline_roles_validation(process, context):
return has_role(role=('Examiner', ))
def adddeadline_processsecurity_validation(process, context):
return getattr(context, 'content_to_examine', []) and\
datetime.datetime.now(tz=pytz.UTC) >= \
context.deadlines[-1].replace(tzinfo=pytz.UTC) and \
global_user_processsecurity()
class AddDeadLine(InfiniteCardinality):
style_descriminator = 'admin-action'
style_picto = 'glyphicon glyphicon-time'
style_order = 9
submission_title = _('Save')
isSequential = False
context = INovaIdeoApplication
roles_validation = deadline_roles_validation
processsecurity_validation = adddeadline_processsecurity_validation
def start(self, context, request, appstruct, **kw):
if hasattr(context, 'deadlines'):
context.deadlines.append(appstruct['deadline'])
else:
context.deadlines = PersistentList([appstruct['deadline']])
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context))
def editdeadline_processsecurity_validation(process, context):
return getattr(context, 'content_to_examine', []) and\
global_user_processsecurity() and \
getattr(context, 'deadlines', [])
class EditDeadLine(InfiniteCardinality):
style_descriminator = 'admin-action'
style_picto = 'glyphicon glyphicon-time'
style_order = 9
submission_title = _('Save')
isSequential = False
context = INovaIdeoApplication
roles_validation = deadline_roles_validation
processsecurity_validation = editdeadline_processsecurity_validation
def start(self, context, request, appstruct, **kw):
current = context.deadlines[-1]
context.deadlines.remove(current)
context.deadlines.append(appstruct['deadline'])
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context))
#TODO behaviors
| agpl-3.0 | -7,422,076,832,301,350,000 | 31.082397 | 79 | 0.681882 | false |
edwardekstrom/BZRflag | bzagents/pf_agent.py | 1 | 10127 | #!/usr/bin/python -tt
# An incredibly simple agent. All we do is find the closest enemy tank, drive
# towards it, and shoot. Note that if friendly fire is allowed, you will very
# often kill your own tanks with this code.
#################################################################
# NOTE TO STUDENTS
# This is a starting point for you. You will need to greatly
# modify this code if you want to do anything useful. But this
# should help you to know how to interact with BZRC in order to
# get the information you need.
#
# After starting the bzrflag server, this is one way to start
# this code:
# python agent0.py [hostname] [port]
#
# Often this translates to something like the following (with the
# port name being printed out by the bzrflag server):
# python agent0.py localhost 49857
#################################################################
import sys
import math
import time
from bzrc import BZRC, Command
from pFields import PField
class PFAgent(object):
"""Class handles all command and control logic for a teams tanks."""
def __init__(self, bzrc):
self.bzrc = bzrc
self.constants = self.bzrc.get_constants()
self.commands = []
self.potentialFields = []
self.flag_sphere = 400
self.obstacle_sphere = 1000
self.enemy_sphere = 100
self.obstacles = bzrc.get_obstacles()
self.obstacle_centers = []
for ob in self.obstacles:
totalX = 0
totalY = 0
for corner in ob:
totalX += corner[0]
totalY += corner[1]
averageX = totalX / len(ob)
averageY = totalY / len(ob)
for corner in ob:
if self.dist(averageX,averageY,corner[0],corner[1]) > self.obstacle_sphere:
self.obstacle_sphere = self.dist(averageX,averageY,corner[0],corner[1])
# print self.obstacle_sphere
tup = (averageX,averageY)
self.obstacle_centers.append(tup)
# print ""
# for o in self.bzrc.get_obstacles():
# print o
# print ""
def tick(self, time_diff):
"""Some time has passed; decide what to do next."""
# print
mytanks, othertanks, flags, shots = self.bzrc.get_lots_o_stuff()
self.mytanks = mytanks
self.othertanks = othertanks
self.flags = flags
self.shots = shots
self.enemies = [tank for tank in othertanks if tank.color !=
self.constants['team']]
self.commands = []
for tank in mytanks:
if tank.status != 'dead':
pfo = None
obstacle_x, obstacle_y, d = self.closest_obstacle(tank)
if d < self.obstacle_sphere:
# print str(d)
pfo = PField(obstacle_x, obstacle_y, 0, self.obstacle_sphere, 'tangent')
pfe = None
enemy_x, enemy_y, enemy_dist = self.closest_enemy(tank, self.enemies)
if enemy_dist < self.enemy_sphere:
#print enemy_dist
pfe = PField(enemy_x, enemy_y, 0, self.enemy_sphere, 'repel')
# if flag possession, then put a pf on the home_base
pf = None
if(tank.flag == '-'):
best_flag = self.choose_best_flag(tank)
pf = PField(best_flag.x, best_flag.y, 0, self.flag_sphere, 'attract')
# if not possessed, then put a pf on a flag
else:
home_base_x, home_base_y = self.find_home_base(tank)
pf = PField(home_base_x, home_base_y, 0, self.flag_sphere, 'attract')
self.pf_move(tank, pf, pfo, pfe)
#for tank in mytanks:
#self.attack_enemies(tank)
#for tank in mytanks:
#self.run_to_flag(tank)
results = self.bzrc.do_commands(self.commands)
def pf_move(self, tank, pf, pfo, pfe):
final_angle = 0
if pfo != None:
# print 'pfo != None'
#print self.constants['team'] + " tank: %d = pfo" % tank.index
speedmod, angle = pfo.calc_vector(tank.x, tank.y)
elif pfe != None:
# print 'pfe ! = None'
#print self.constants['team'] + " tank: %d = pfe" % tank.index
speedmod, angle = pfe.calc_vector(tank.x, tank.y)
else:
# print 'else'
#print self.constants['team'] + " tank: %d = pf" % tank.index
speedmod, angle = pf.calc_vector(tank.x, tank.y)
angle = self.normalize_angle(angle - tank.angle)
if final_angle == 0:
final_angle = angle
else:
final_angle = (float(final_angle) + float(angle)) / 2.0
# current_tank_speed = math.sqrt(float(tank.vx**2) + float(tank.vy**2))
# print current_tank_speed
#command = Command(tank.index, speedmod * current_tank_speed, 2 * final_angle, True)
command = Command(tank.index, speedmod, 2 * final_angle, True)
self.commands.append(command)
def closest_obstacle(self, tank):
closest_x = (2 * float(self.constants['worldsize']))**2
closest_y = (2 * float(self.constants['worldsize']))**2
best_d = (2 * float(self.constants['worldsize']))**2
# obstacles = self.bzrc.get_obstacles()
for o in self.obstacle_centers:
x,y = o
d = self.dist(x, y, tank.x, tank.y)
if d < best_d:
best_d = d
closest_x = x
closest_y = y
return (closest_x, closest_y, best_d)
def closest_enemy(self, tank, enemies):
closest_x = (2 * float(self.constants['worldsize']))**2
closest_y = (2 * float(self.constants['worldsize']))**2
best_d = (2 * float(self.constants['worldsize']))**2
for e in enemies:
d = self.dist(e.x, e.y, tank.x, tank.y)
if d < best_d:
best_d = d
closest_x = e.x
closest_y = e.y
return (closest_x, closest_y, best_d)
def dist(self, x1, y1, x2, y2):
return (x1 - x2)**2 + (y1 - y2)**2
def find_home_base(self, tank):
bases = self.bzrc.get_bases()
for base in bases:
if base.color == self.constants['team']:
xdist = abs(base.corner1_x - base.corner3_x) / 2.0
ydist = abs(base.corner1_y - base.corner3_y) / 2.0
base_x = max(base.corner1_x, base.corner3_x) - (xdist/2.0)
base_y = max(base.corner1_y, base.corner3_y) - (ydist/2.0)
return (base_x, base_y)
def choose_best_flag(self, tank):
best_flag = None
best_flag_dist = 2 * float(self.constants['worldsize'])
for f in self.flags:
# print str(len(self.flags))
if f.color != self.constants['team'] and f.poss_color != self.constants['team']:
dist = math.sqrt((f.x - tank.x)**2 + (f.y - tank.y)**2)
if dist < best_flag_dist:
best_flag_dist = dist
best_flag = f
if best_flag is None:
return self.flags[0]
else:
return best_flag
# return self.flags[2]
def run_to_flag(self, tank):
best_flag = None
best_flag_dist = 2 * float(self.constants['worldsize'])
for f in self.flags:
if f.color != self.constants['team']:
dist = math.sqrt((f.x - tank.x)**2 + (f.y - tank.y)**2)
if dist < best_flag_dist:
best_flag_dist = dist
best_flag = f
if best_flag is None:
command = Command(tank.index, 0, 0, False)
self.commands.append(command)
else:
self.move_to_position(tank, best_flag.x, best_flag.y)
def attack_enemies(self, tank):
"""Find the closest enemy and chase it, shooting as you go."""
best_enemy = None
best_dist = 2 * float(self.constants['worldsize'])
for enemy in self.enemies:
if enemy.status != 'alive':
continue
dist = math.sqrt((enemy.x - tank.x)**2 + (enemy.y - tank.y)**2)
if dist < best_dist:
best_dist = dist
best_enemy = enemy
if best_enemy is None:
command = Command(tank.index, 0, 0, False)
self.commands.append(command)
else:
self.move_to_position(tank, best_enemy.x, best_enemy.y)
def move_to_position(self, tank, target_x, target_y):
"""Set command to move to given coordinates."""
target_angle = math.atan2(target_y - tank.y,
target_x - tank.x)
relative_angle = self.normalize_angle(target_angle - tank.angle)
# index, speed, angvel, shoot
command = Command(tank.index, 1, 2 * relative_angle, False)
self.commands.append(command)
def normalize_angle(self, angle):
"""Make any angle be between +/- pi."""
angle -= 2 * math.pi * int (angle / (2 * math.pi))
if angle <= -math.pi:
angle += 2 * math.pi
elif angle > math.pi:
angle -= 2 * math.pi
return angle
def main():
# Process CLI arguments.
try:
execname, host, port = sys.argv
except ValueError:
execname = sys.argv[0]
print >>sys.stderr, '%s: incorrect number of arguments' % execname
print >>sys.stderr, 'usage: %s hostname port' % sys.argv[0]
sys.exit(-1)
# Connect.
#bzrc = BZRC(host, int(port), debug=True)
bzrc = BZRC(host, int(port))
agent = PFAgent(bzrc)
prev_time = time.time()
# Run the agent
try:
while True:
time_diff = time.time() - prev_time
agent.tick(time_diff)
except KeyboardInterrupt:
print "Exiting due to keyboard interrupt."
bzrc.close()
if __name__ == '__main__':
main()
# vim: et sw=4 sts=4
| gpl-3.0 | -4,527,369,260,058,859,000 | 34.533333 | 92 | 0.530661 | false |
planetarymike/IDL-Colorbars | IDL_py_test/018_Pastels.py | 1 | 5628 | from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[1., 0., 0.282353],
[1., 0., 0.282353],
[1., 0., 0.290196],
[1., 0., 0.298039],
[1., 0., 0.305882],
[1., 0., 0.313725],
[1., 0., 0.321569],
[1., 0., 0.329412],
[1., 0., 0.337255],
[1., 0., 0.345098],
[1., 0., 0.352941],
[1., 0., 0.356863],
[1., 0., 0.364706],
[1., 0., 0.372549],
[1., 0., 0.380392],
[1., 0., 0.388235],
[1., 0., 0.396078],
[1., 0., 0.403922],
[1., 0., 0.411765],
[1., 0., 0.419608],
[1., 0., 0.427451],
[1., 0., 0.435294],
[1., 0., 0.443137],
[1., 0., 0.45098],
[1., 0., 0.458824],
[1., 0., 0.466667],
[1., 0., 0.47451],
[1., 0., 0.482353],
[1., 0., 0.490196],
[1., 0., 0.498039],
[1., 0., 0.505882],
[1., 0., 0.513725],
[1., 0., 0.521569],
[1., 0., 0.529412],
[1., 0., 0.537255],
[1., 0., 0.545098],
[1., 0., 0.552941],
[1., 0., 0.556863],
[1., 0., 0.564706],
[1., 0., 0.572549],
[1., 0., 0.580392],
[1., 0., 0.588235],
[1., 0., 0.596078],
[1., 0., 0.603922],
[1., 0., 0.611765],
[1., 0., 0.619608],
[1., 0., 0.627451],
[1., 0., 0.635294],
[1., 0., 0.643137],
[1., 0., 0.65098],
[1., 0., 0.658824],
[1., 0., 0.666667],
[1., 0., 0.67451],
[1., 0., 0.682353],
[1., 0., 0.690196],
[1., 0., 0.698039],
[1., 0., 0.705882],
[1., 0., 0.713725],
[1., 0., 0.721569],
[1., 0., 0.729412],
[1., 0., 0.737255],
[1., 0., 0.745098],
[1., 0., 0.74902],
[1., 0., 0.756863],
[1., 0., 0.764706],
[1., 0., 0.772549],
[1., 0., 0.780392],
[1., 0., 0.788235],
[1., 0., 0.796078],
[1., 0., 0.803922],
[1., 0., 0.811765],
[1., 0., 0.819608],
[1., 0., 0.827451],
[1., 0., 0.835294],
[1., 0., 0.843137],
[1., 0., 0.85098],
[1., 0., 0.858824],
[1., 0., 0.866667],
[1., 0., 0.87451],
[1., 0., 0.882353],
[1., 0., 0.890196],
[1., 0., 0.898039],
[1., 0., 0.905882],
[1., 0., 0.913725],
[1., 0., 0.921569],
[1., 0., 0.929412],
[1., 0., 0.937255],
[1., 0., 0.945098],
[1., 0., 0.94902],
[1., 0., 0.956863],
[1., 0., 0.964706],
[1., 0., 0.972549],
[1., 0., 0.980392],
[1., 0., 0.988235],
[1., 0., 0.996078],
[0.992157, 0., 1.],
[0.984314, 0., 1.],
[0.976471, 0., 1.],
[0.968627, 0., 1.],
[0.960784, 0., 1.],
[0.952941, 0., 1.],
[0.945098, 0., 1.],
[0.937255, 0., 1.],
[0.929412, 0., 1.],
[0.921569, 0., 1.],
[0.913725, 0., 1.],
[0.905882, 0., 1.],
[0.898039, 0., 1.],
[0.890196, 0., 1.],
[0.882353, 0., 1.],
[0.87451, 0., 1.],
[0.866667, 0., 1.],
[0.858824, 0., 1.],
[0.85098, 0., 1.],
[0.847059, 0., 1.],
[0.839216, 0., 1.],
[0.831373, 0., 1.],
[0.823529, 0., 1.],
[0.815686, 0., 1.],
[0.807843, 0., 1.],
[0.8, 0., 1.],
[0.792157, 0., 1.],
[0.784314, 0., 1.],
[0.776471, 0., 1.],
[0.768627, 0., 1.],
[0.760784, 0., 1.],
[0.752941, 0., 1.],
[0.745098, 0., 1.],
[0.737255, 0., 1.],
[0.729412, 0., 1.],
[0., 0.54902, 1.],
[0., 0.572549, 1.],
[0., 0.596078, 1.],
[0., 0.615686, 1.],
[0., 0.639216, 1.],
[0., 0.662745, 1.],
[0., 0.682353, 1.],
[0., 0.705882, 1.],
[0., 0.729412, 1.],
[0., 0.752941, 1.],
[0., 0.772549, 1.],
[0., 0.796078, 1.],
[0., 0.819608, 1.],
[0., 0.839216, 1.],
[0., 0.862745, 1.],
[0., 0.886275, 1.],
[0., 0.909804, 1.],
[0., 0.929412, 1.],
[0., 0.952941, 1.],
[0., 0.976471, 1.],
[0., 1., 1.],
[0., 1., 0.976471],
[0., 1., 0.952941],
[0., 1., 0.929412],
[0., 1., 0.909804],
[0., 1., 0.886275],
[0., 1., 0.862745],
[0., 1., 0.839216],
[0., 1., 0.819608],
[0., 1., 0.796078],
[0., 1., 0.772549],
[0., 1., 0.752941],
[0., 1., 0.729412],
[0., 1., 0.705882],
[0., 1., 0.682353],
[0., 1., 0.662745],
[0., 1., 0.639216],
[0., 1., 0.615686],
[0., 1., 0.596078],
[0., 1., 0.572549],
[0., 1., 0.54902],
[0., 1., 0.52549],
[0., 1., 0.505882],
[0., 1., 0.482353],
[0., 1., 0.458824],
[0., 1., 0.439216],
[0., 1., 0.415686],
[0., 1., 0.392157],
[0., 1., 0.368627],
[0., 1., 0.34902],
[0., 1., 0.32549],
[0., 1., 0.301961],
[0., 1., 0.278431],
[0., 1., 0.258824],
[0., 1., 0.235294],
[0., 1., 0.211765],
[0., 1., 0.192157],
[0., 1., 0.168627],
[0., 1., 0.145098],
[0., 1., 0.121569],
[0., 1., 0.101961],
[0., 1., 0.0784314],
[0., 1., 0.054902],
[0., 1., 0.0352941],
[0., 1., 0.0117647],
[0.00784314, 1., 0.],
[0.0313725, 1., 0.],
[0.0509804, 1., 0.],
[0.0745098, 1., 0.],
[0.0980392, 1., 0.],
[0.117647, 1., 0.],
[0.141176, 1., 0.],
[0.164706, 1., 0.],
[0.188235, 1., 0.],
[0.207843, 1., 0.],
[0.231373, 1., 0.],
[0.254902, 1., 0.],
[0.278431, 1., 0.],
[0.298039, 1., 0.],
[0.321569, 1., 0.],
[0.345098, 1., 0.],
[0.364706, 1., 0.],
[0.388235, 1., 0.],
[0.411765, 1., 0.],
[0.435294, 1., 0.],
[0.454902, 1., 0.],
[0.478431, 1., 0.],
[0.501961, 1., 0.],
[0.521569, 1., 0.],
[0.545098, 1., 0.],
[0.568627, 1., 0.],
[0.592157, 1., 0.],
[0.611765, 1., 0.],
[0.635294, 1., 0.],
[0.658824, 1., 0.],
[0.678431, 1., 0.],
[0.701961, 1., 0.],
[0.72549, 1., 0.],
[0.74902, 1., 0.],
[0.768627, 1., 0.],
[0.792157, 1., 0.],
[0.815686, 1., 0.],
[0.839216, 1., 0.],
[0.858824, 1., 0.],
[0.882353, 1., 0.],
[0.905882, 1., 0.],
[0.92549, 1., 0.],
[0.94902, 1., 0.],
[0.972549, 1., 0.],
[0.996078, 1., 0.],
[1., 0.980392, 0.],
[1., 0.956863, 0.],
[1., 0.933333, 0.],
[1., 0.913725, 0.],
[1., 0.890196, 0.],
[1., 0.866667, 0.],
[1., 0.843137, 0.],
[1., 0.823529, 0.],
[1., 0.8, 0.],
[1., 0.776471, 0.],
[1., 0.756863, 0.],
[1., 0.733333, 0.],
[1., 0.709804, 0.],
[1., 0.686275, 0.],
[1., 0.666667, 0.],
[1., 0.666667, 0.]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| gpl-2.0 | 6,271,818,433,585,241,000 | 19.540146 | 69 | 0.465885 | false |
jlublin/landpatterngen | target_svg.py | 1 | 4266 | #!/usr/bin/env python3
import xml.etree.ElementTree as ET
def get_target():
return SVG()
class SVG:
def __init__(self):
self.svg = ET.parse('skeleton.svg')
self.mmpx = 3.543307
def output(self, path):
self.svg.write(path)
def add_package(self, package):
'''
Target SVG only handles one drawing at a time, only last added drawing will be part of output
'''
self.svg = ET.parse('skeleton.svg')
self.package = \
{
'name': package['name'],
'pads': [],
'mnt_pads': [],
'holes': [],
'lines': [],
'circles': [],
'rectangles': [] ,
'texts': []
}
def output(self, fout):
package = self.package
for pad in package['pads']:
self.gen_pac_pad(pad)
for mnt_pad in package['mnt_pads']: # TODO, adding mnt_pads not done
self.gen_pac_mnt_pad(mnt_pad)
for hole in package['holes']:
self.gen_pac_hole(hole)
for line in package['lines']:
self.gen_pac_line(line)
if(0):
for circle in package['circles']:
self.gen_pac_circle(circle)
for rect in package['rectangles']:
self.gen_pac_rectangle(rect)
for text in package['texts']:
self.gen_pac_text(text)
self.svg.write(fout)
def add_pac_pad(self, type, angle, size, pos, number):
self.package['pads'].append(
{
'type': type,
'angle': angle,
'size': size,
'pos': pos,
'number': number
})
def add_pac_hole(self, diameter, pos):
self.package['holes'].append(
{
'd': diameter,
'pos': pos
})
def add_pac_line(self, layer, width, vertices):
self.package['lines'].append(
{
'layer': layer,
'width': width,
'vertices': vertices
})
def gen_pac_pad(self, pad): # type, angle, size, pos, number
top_layer = self.svg.find('.//g[@id="Top"]')
# TODO: Types and angle
el = ET.SubElement(top_layer, 'rect')
el.set('style', 'fill:#ff0000;fill-opacity:1;stroke:none;stroke-width:10;stroke-linecap:square;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1')
el.set('id', 'pin_{}'.format(pad['number']))
el.set('width', '{}'.format(pad['size'][0]*self.mmpx))
el.set('height', '{}'.format(pad['size'][1]*self.mmpx))
el.set('x', '{}'.format((pad['pos'][0] - pad['size'][0]/2)*self.mmpx))
el.set('y', '{}'.format((pad['pos'][1] - pad['size'][1]/2)*self.mmpx))
def gen_pac_hole(self, hole):
top_layer = self.svg.find('.//g[@id="Holes"]')
circle = ET.SubElement(top_layer, 'circle')
circle.set('style', 'fill:#eeee00;fill-opacity:1;stroke:none;stroke-width:0.0;stroke-linecap:square;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"')
circle.set('cx', '{}'.format(hole['pos'][0]*self.mmpx))
circle.set('cy', '{}'.format(hole['pos'][1]*self.mmpx))
circle.set('r', '{}'.format(hole['d']/2*self.mmpx))
def gen_pac_line(self, line):
layer = self.svg.find('.//g[@id="{}"]'.format(line['layer']))
if(line['layer'] == 'Courtyard'):
color = '#e63a81'
elif(line['layer'] == 'Silk'):
color = '#111111'
else:
color = '#000000'
el = ET.SubElement(layer, 'path')
el.set('style', 'fill:none;fill-rule:evenodd;stroke:{color};stroke-width:{}mm;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none'.format(line['width'], color=color))
pathdata = ''
first = True
for (x,y) in line['vertices']:
if(first):
pathdata += 'M ' + '{},{}'.format(x*self.mmpx,y*self.mmpx)
first = False
elif(x == 'end'):
pathdata += ' z'
else:
pathdata += ' L ' + '{},{}'.format(x*self.mmpx,y*self.mmpx)
el.set('d', pathdata)
def gen_circle(self, layer_name, diameter, pos):
layer = self.svg.find('.//g[@id="{}"]'.format(layer_name))
if(layer_name == 'Courtyard'):
color = '#e63a81'
elif(layer_name == 'Silk'):
color = '#111111'
else:
color = '#000000'
circle = ET.SubElement(layer, 'circle')
circle.set('style', 'fill:#{color};fill-opacity:1;stroke:none;stroke-width:0.0;stroke-linecap:square;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"'.format(color=color))
circle.set('cx', '{}'.format(pos[0]*self.mmpx))
circle.set('cy', '{}'.format(pos[1]*self.mmpx))
circle.set('r', '{}'.format(diameter/2*self.mmpx))
if(__name__ == '__main__'):
target = get_target()
target.output('test.svg')
| gpl-3.0 | -2,957,371,261,297,704,000 | 23.517241 | 217 | 0.61158 | false |
GaretJax/ipd | ipd/metadata/resource.py | 1 | 7569 | import json
from uuid import UUID
from twisted.internet import defer
from twisted.web import resource, server
class RecursiveResource(resource.Resource, object):
isLeaf = False
def getChild(self, name, request):
if name == '':
child = self
else:
try:
child = self.children[name]
except KeyError:
child = super(RecursiveResource, self).getChild(name, request)
return child
class MetadataMixin(object):
def __init__(self, server):
super(MetadataMixin, self).__init__()
self.meta_server = server
def get_metadata_from_request(self, request):
h = request.requestHeaders
hypervisor = h.getRawHeaders('X-Tenant-ID')[0]
domain_uuid = UUID(hex=h.getRawHeaders('X-Instance-ID')[0])
#domain_ip = h.getRawHeaders('X-Forwarded-For')[0]
return self.meta_server.get_metadata_for_uuid(hypervisor, domain_uuid)
class DelayedRendererMixin(object):
def _delayed_renderer(self, request):
raise NotImplementedError
def finish_write(self, res, request):
request.write(res)
request.finish()
def finish_err(self, failure, request):
request.setResponseCode(500)
request.write('500: Internal server error')
request.finish()
return failure
def render_GET(self, request):
d = self._delayed_renderer(request)
d.addCallback(self.finish_write, request)
d.addErrback(self.finish_err, request)
return server.NOT_DONE_YET
class UserdataResource(DelayedRendererMixin, resource.Resource, object):
isLeaf = True
def __init__(self, server):
super(UserdataResource, self).__init__()
self.meta_server = server
def get_userdata_from_request(self, request):
h = request.requestHeaders
hypervisor = h.getRawHeaders('X-Tenant-ID')[0]
domain_uuid = UUID(hex=h.getRawHeaders('X-Instance-ID')[0])
#domain_ip = h.getRawHeaders('X-Forwarded-For')[0]
return self.meta_server.get_userdata_for_uuid(hypervisor, domain_uuid)
def _delayed_renderer(self, request):
return self.get_userdata_from_request(request)
class AtomResource(DelayedRendererMixin, MetadataMixin, resource.Resource,
object):
def _delayed_renderer(self, request):
d = self.get_metadata_from_request(request)
d.addCallback(self.get_value)
return d
def get_value(self, metadata):
raise NotImplementedError()
class KeyedAtomResource(AtomResource):
isLeaf = True
def __init__(self, server, key):
super(KeyedAtomResource, self).__init__(server)
self._key = key
def get_value(self, metadata):
val = metadata
for k in self._key:
val = val[k]
return str(val)
class KeysResource(AtomResource):
isLeaf = False
formats = {
'openssh-key': 'OPENSSH'
}
def get_value(self, metadata):
keys = ('{}={}'.format(i, k[0])
for i, k in enumerate(metadata['public_keys']))
return '\n'.join(keys)
def getChild(self, name, request):
if not name:
return self
key = int(name)
fmt = self.formats[request.postpath[0]]
return KeyRenderer(self.meta_server, key, fmt)
class KeyRenderer(KeyedAtomResource):
def __init__(self, server, key, fmt):
super(KeyedAtomResource, self).__init__(server)
self._key = key
self._format = fmt
def get_value(self, metadata):
key = metadata['public_keys'][self._key][1]
return key.toString(self._format)
class IndexResource(RecursiveResource):
isleaf = False
def render_GET(self, request):
for k, v in sorted(self.children.items()):
request.write(k)
if not v.isLeaf:
request.write('/\n')
else:
request.write('\n')
request.finish()
return server.NOT_DONE_YET
class EC2MetadataAPI(IndexResource):
isLeaf = False
version = '2009-04-04'
def __init__(self, server):
super(EC2MetadataAPI, self).__init__()
meta = IndexResource()
meta.putChild('hostname', KeyedAtomResource(server, ['hostname']))
meta.putChild('instance-id', KeyedAtomResource(server, ['uuid']))
meta.putChild('public-keys', KeysResource(server))
self.putChild('meta-data', meta)
self.putChild('user-data', UserdataResource(server))
class OpenstackMetadataAPI(IndexResource):
version = '2012-08-10'
def __init__(self, server):
super(OpenstackMetadataAPI, self).__init__()
self.putChild('meta_data.json', OpenstackMetadata(server))
self.putChild('user_data', UserdataResource(server))
class OpenstackMetadata(DelayedRendererMixin, MetadataMixin, resource.Resource,
object):
isLeaf = True
@defer.inlineCallbacks
def _delayed_renderer(self, request):
metadata = yield self.get_metadata_from_request(request)
metadata['uuid'] = str(metadata['uuid'])
metadata['public_keys'] = {
k: v.toString('OPENSSH')
for k, v in metadata['public_keys']
}
defer.returnValue(json.dumps(metadata))
class APIVersionsIndex(RecursiveResource):
def register_api(self, res):
self.putChild(res.version, res)
latest = self.children.get('latest', None)
if not latest or res.version > latest.version:
self.putChild('latest', res)
def render_GET(self, request):
versions = sorted(self.children)
if versions:
return '\n'.join(versions) + '\n'
else:
return ''
class InstanceCallback(DelayedRendererMixin, resource.Resource):
isLeaf = True
def __init__(self, server):
self._server = server
@defer.inlineCallbacks
def _delayed_renderer(self, request):
instance_uuid = request.postpath[0]
data = yield self._server.get_instancedata_for_uuid(instance_uuid)
defer.returnValue(json.dumps(data))
def render_POST(self, request):
setip = 'nosetip' not in request.args
instance_id = request.args['instance_id'][0]
hostname = request.args['hostname'][0]
data = {
'hostname': hostname,
'status': 'running',
}
if setip:
ip = request.requestHeaders.getRawHeaders('X-Forwarded-For')[0]
data['ip_address'] = ip
for k, v in request.args.iteritems():
if k.startswith('pub_key_'):
try:
data[k] = v[0].strip()
except:
pass
self._server.add_instancedata_for_uuid(instance_id, data)
return ''
class MetadataRootResource(RecursiveResource):
isLeaf = False
def __init__(self, server):
super(MetadataRootResource, self).__init__()
self._server = server
self.ec2 = APIVersionsIndex()
self.ec2.register_api(EC2MetadataAPI(server))
self.openstack = APIVersionsIndex()
self.openstack.register_api(OpenstackMetadataAPI(server))
self.instancedata = InstanceCallback(self._server)
def getChild(self, name, request):
if name == 'openstack':
child = self.openstack
elif name == 'instancedata':
child = self.instancedata
else:
child = self.ec2.getChild(name, request)
return child
| mit | 9,081,998,082,272,325,000 | 27.454887 | 79 | 0.608271 | false |
yen223/mclass-sky | mclearn/photometry.py | 1 | 10079 | """ Procedures specific to photometric data. """
import os
import numpy as np
from urllib.request import urlopen
from urllib.parse import urlencode
from .tools import load_results
def reddening_correction_sfd98(extinction_r):
""" Compute the reddening values using the SFD98 correction set.
Parameters
----------
extinction_r : array
The uncorrected extinction values in the r-band.
Returns
-------
A_u : array
The corrected extinction values in the u-band.
A_g : array
The corrected extinction values in the g-band.
A_r : array
The corrected extinction values in the r-band.
A_i : array
The corrected extinction values in the i-band.
A_z : array
The corrected extinction values in the z-band.
"""
E_BV = extinction_r / 2.751
A_u = E_BV * 5.155
A_g = E_BV * 3.793
A_r = E_BV * 2.751
A_i = E_BV * 2.086
A_z = E_BV * 1.479
return (A_u, A_g, A_r, A_i, A_z)
def reddening_correction_sf11(extinction_r):
""" Compute the reddening values using the SF11 correction set.
Parameters
----------
extinction_r : array
The uncorrected extinction values in the r-band.
Returns
-------
A_u : array
The corrected extinction values in the u-band.
A_g : array
The corrected extinction values in the g-band.
A_r : array
The corrected extinction values in the r-band.
A_i : array
The corrected extinction values in the i-band.
A_z : array
The corrected extinction values in the z-band.
"""
E_BV = extinction_r / 2.751
A_u = E_BV * 4.239
A_g = E_BV * 3.303
A_r = E_BV * 2.285
A_i = E_BV * 1.698
A_z = E_BV * 1.263
return (A_u, A_g, A_r, A_i, A_z)
def reddening_correction_w14(extinction_r):
""" Compute the reddening values using the W14 correction set.
Parameters
----------
extinction_r : array
The uncorrected extinction values in the r-band.
Returns
-------
A_u : array
The corrected extinction values in the u-band.
A_g : array
The corrected extinction values in the g-band.
A_r : array
The corrected extinction values in the r-band.
A_i : array
The corrected extinction values in the i-band.
A_z : array
The corrected extinction values in the z-band.
"""
E_BV = extinction_r / 2.751
region_2 = np.logical_and(E_BV >= 0.04, E_BV < 0.08)
region_3 = E_BV >= 0.08
E_BV[region_2] = E_BV[region_2] + 0.5 * (E_BV[region_2] - 0.04)
E_BV[region_3] = E_BV[region_3] + 0.02
A_u = E_BV * 4.305
A_g = E_BV * 3.288
A_r = E_BV * 2.261
A_i = E_BV * 1.714
A_z = E_BV * 1.263
return (A_u, A_g, A_r, A_i, A_z)
def correct_magnitudes(data, magnitudes, corrections, suffix):
""" Correct the values of magntidues given a correction set.
Parameters
----------
data : DataFrame
The DataFrame containing the magnitudes.
magnitudes : array
The column names of the magnitudes.
corrections : array
The set of correction values in the same order as `magnitudes`.
"""
for mag, cor in zip(magnitudes, corrections):
data[mag + suffix] = data[mag] - cor
def compute_colours(data, colours, suffix):
""" Compute specified combinations of colours.
Parameters
----------
data : DataFrame
The DataFrame containing the magnitudes.
colours : array
The list of colour combinations to be computed.
suffix : array
A suffix is added to the colour name to distinguish between correction sets.
"""
for colour in colours:
prefix = 'psf' if colour[0].startswith('psf') else 'petro'
colour_name = prefix + colour[0][-2:] + colour[1][-2:]
data[colour_name + suffix] = data[colour[0] + suffix] - data[colour[1] + suffix]
def fetch_sloan_data(sql, output, url=None, fmt='csv', verbose=True):
""" Run an SQL query on the Sloan Sky Server.
Parameters
----------
sql : str
The sql query.
output : str
The path where the queried data will be stored.
url : str
The url that will be used for fetching.
fmt : str
The format of the output, one of 'csv', 'xml', 'html'.
"""
assert fmt in ['csv','xml','html'], "Wrong format!"
if not url:
url = 'http://skyserver.sdss.org/dr10/en/tools/search/x_sql.aspx'
# filter out the comments in the sql query
fsql = ''
for line in sql.split('\n'):
fsql += line.split('--')[0] + ' ' + os.linesep
# make the sql query
if verbose:
print('Connecting to the server...')
params = urlencode({'cmd': fsql, 'format': fmt})
query = urlopen(url + '?{}'.format(params))
# ignore the first line (the name of table)
query.readline()
if verbose:
print('Writing to file...')
with open(output, 'wb') as f:
f.write(query.read())
if verbose:
print('Success!')
def fetch_filter(filter, download_url, filter_dir=''):
""" Get a filter from the internet.
Parameters
----------
filter : char
Name of the filters. Must be one of u, g, r, i, and z.
download_url : str
The URL where the filter can be downloaded.
Returns
-------
data : array
The downloaded filter data.
"""
assert filter in 'ugriz'
url = download_url % filter
if not os.path.exists(filter_dir):
os.makedirs(filter_dir)
loc = os.path.join(filter_dir, '%s.dat' % filter)
if not os.path.exists(loc):
filter_file = urlopen(url)
with open(loc, 'wb') as f:
f.write(filter_file.read())
with open(loc, 'rb') as f:
data = np.loadtxt(f)
return data
def fetch_spectrum(spectrum_url, spectra_dir=''):
""" Get a spectrum from the internet.
Parameters
----------
spectrum_url : str
The URL where the spectrum can be downloaded.
Returns
-------
data : array
The downloaded spectrum data.
"""
if not os.path.exists(spectra_dir):
os.makedirs(spectra_dir)
refspec_file = os.path.join(spectra_dir, spectrum_url.split('/')[-1])
if not os.path.exists(refspec_file):
spectrum_file = urlopen(spectrum_url)
with open(refspec_file, 'wb') as f:
f.write(spectrum_file.read())
with open(refspec_file, 'rb') as f:
data = np.loadtxt(f)
return data
def clean_up_subclasses(classes, subclasses):
""" Clean up the names of the subclasses in the SDSS dataset.
Parameters
----------
classes : array
The array containing the classes. This will be prepended to the sublcasses.
subclasses : array
The array containing the subclasses.
"""
# remove null references
subclasses.replace('null', '', inplace=True)
# remove HD catalog number (stored in brackets)
subclasses.replace(r'\s*\(\d+\)\s*', '', regex=True, inplace=True)
# captialise only the first leter of some subclasses
subclasses.replace('BROADLINE', 'Broadline', inplace=True)
subclasses.replace('STARFORMING', 'Starforming', inplace=True)
subclasses.replace('STARBURST', 'Starburst', inplace=True)
subclasses.replace('STARBURST BROADLINE', 'Starburst Broadline', inplace=True)
subclasses.replace('AGN BROADLINE', 'AGN Broadline', inplace=True)
subclasses.replace('STARFORMING BROADLINE', 'Starforming Broadline', inplace=True)
# remove other brackets
subclasses.replace('F8V (G_243-63)', 'F8V', inplace=True)
subclasses.replace('K5 (G_19-24)', 'K5', inplace=True)
subclasses.replace('sd:F0 (G_84-29)', 'sd:F0', inplace=True)
subclasses.replace('G0 (G_101-29)', 'G0', inplace=True)
subclasses.replace('A4 (G_165-39)', 'A4', inplace=True)
subclasses.replace('A4p (G_37-26)', 'A4p', inplace=True)
not_empty = subclasses != ''
subclasses.loc[not_empty] = classes[not_empty] + ' ' + subclasses[not_empty]
def optimise_sdss_features(sdss, scaler_path):
""" Apply the W14 reddening correction and compute key colours in the SDSS dataset.
Parameters
----------
sdss : DataFrame
The DataFrame containing photometric features.
"""
# compute the three sets of reddening correction
A_u_w14, A_g_w14, A_r_w14, A_i_w14, A_z_w14 = reddening_correction_w14(sdss['extinction_r'])
# useful variables
psf_magnitudes = ['psfMag_u', 'psfMag_g', 'psfMag_r', 'psfMag_i', 'psfMag_z']
petro_magnitudes = ['petroMag_u', 'petroMag_g', 'petroMag_r', 'petroMag_i', 'petroMag_z']
w14_corrections = [A_u_w14, A_g_w14, A_r_w14, A_i_w14, A_z_w14]
colours = [('psfMag_u', 'psfMag_g'), ('psfMag_g', 'psfMag_r'), ('psfMag_r', 'psfMag_i'), ('psfMag_i', 'psfMag_z'),
('petroMag_u', 'petroMag_g'), ('petroMag_g', 'petroMag_r'), ('petroMag_r', 'petroMag_i'), ('petroMag_i', 'petroMag_z')]
# calculate the corrected magnitudes
correct_magnitudes(sdss, psf_magnitudes, w14_corrections, '_w14')
correct_magnitudes(sdss, petro_magnitudes, w14_corrections, '_w14')
# calculate the corrected magnitudes
compute_colours(sdss, colours, '_w14')
# scale features
w14_feature_cols = ['psfMag_r_w14', 'psf_u_g_w14', 'psf_g_r_w14', 'psf_r_i_w14',
'psf_i_z_w14', 'petroMag_r_w14', 'petro_u_g_w14', 'petro_g_r_w14',
'petro_r_i_w14', 'petro_i_z_w14', 'petroRad_r']
scaler = load_results(scaler_path)
sdss[w14_feature_cols] = scaler.transform(sdss[w14_feature_cols]) | bsd-3-clause | -5,552,300,325,919,544,000 | 27.882521 | 134 | 0.580613 | false |
weka511/bioinformatics | nwck.py | 1 | 1881 | # Copyright (C) 2017 Greenweaves Software Pty Ltd
# This is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>
import newick
tokenizer = newick.Tokenizer()
parser = newick.Parser(tokenizer)
def get_path_to_root(clade,path=[]):
if (len(path)==0):
path=[(clade.id,clade.length)]
ancestor = clade.parent
if ancestor == None:
return path
pp=path[:]
pp.append((ancestor.id,ancestor.length))
return get_path_to_root(ancestor,pp)
def get_path(clade):
return [clade.id]+get_path_to_root(clade)
def diff(path1,path2):
def length(path):
return sum(l for (_,l) in path)
if len(path1)>len(path2):
return diff(path2,path1)
i=0
while i<len(path1) and path1[i][0]==path2[i][0]:
i+=1
return length(path1[i:]) +length(path2[i:])
with open (r'C:\Users\Weka\Downloads\rosalind_nkew.txt') as f:
diffs=[]
i = 0
tree = None
lookup = None
for line in f:
if i%3==0:
print (line.strip())
tree,lookup=parser.parse(line.strip())
elif i%3==1:
clades = line.strip().split()
print (clades)
ancestors=[get_path_to_root(lookup[clade])[::-1] for clade in clades]
diffs.append(diff(ancestors[0],ancestors[1]))
i+=1
print (diffs) | gpl-3.0 | 8,854,605,071,501,078,000 | 29.852459 | 81 | 0.643275 | false |
eriksonJAguiar/TCC-UENP-Codigos | My_codes/module_sentiment/mensure.py | 1 | 2797 | from sent_classification_module import *
from class_roc import Roc
if __name__ == '__main__':
sent = SentClassifiers('dataset-portuguese')
nv_roc = Roc()
svm_roc = Roc()
dt_roc = Roc()
rf_roc = Roc()
gd_roc = Roc()
rl_roc = Roc()
cm_roc = Roc()
fpr = []
tpr = []
auc = []
acuracias = []
nv_ac,_,nv_p,nv_r,nv_f1,nv_e,nv_cm,nv_roc = sent.CMultinomialNV()
print("Naive")
print('ac = %f'%nv_ac)
print('p = %f'%nv_p)
print('r = %f'%nv_r)
print('f1 = %f'%nv_f1)
print('e = %f'%nv_e)
print('---------------')
acuracias.append(nv_ac)
fpr.append(nv_roc.get_fpr())
tpr.append(nv_roc.get_tpr())
auc.append(nv_roc.get_auc())
#sent.plot_confuse_matrix(nv_cm)
svm_ac,_,svm_p,svm_r,svm_f1,svm_e,svm_cm,svm_roc = sent.CSuportVectorMachine()
print("SVM")
print('ac = %f'%svm_ac)
print('p = %f'%svm_p)
print('r = %f'%svm_r)
print('f1 = %f'%svm_f1)
print('e = %f'%svm_e)
print('---------------')
acuracias.append(svm_ac)
fpr.append(svm_roc.get_fpr())
tpr.append(svm_roc.get_tpr())
auc.append(svm_roc.get_auc())
dt_ac,_,dt_p,dt_r,dt_f1,dt_e,dt_cm,dt_roc = sent.CDecisionTree()
print("Arvore de Decisao")
print('ac = %f'%dt_ac)
print('p = %f'%dt_p)
print('r = %f'%dt_r)
print('f1 = %f'%dt_f1)
print('e = %f'%dt_e)
print('---------------')
acuracias.append(dt_ac)
fpr.append(dt_roc.get_fpr())
tpr.append(dt_roc.get_tpr())
auc.append(dt_roc.get_auc())
rf_ac,_,rf_p,rf_r,rf_f1,rf_e,rf_cm,rf_roc = sent.CRandomForest()
print("Radom Forest")
print('ac = %f'%rf_ac)
print('p = %f'%rf_p)
print('r = %f'%rf_r)
print('f1 = %f'%rf_f1)
print('e = %f'%rf_e)
print('---------------')
acuracias.append(rf_ac)
fpr.append(rf_roc.get_fpr())
tpr.append(rf_roc.get_tpr())
auc.append(rf_roc.get_auc())
rl_ac,_,rl_p,rl_r,rl_f1,rl_e,rl_cm,rl_roc = sent.CLogistRegression()
print('Regressao Logistica')
print('ac = %f'%rl_ac)
print('p = %f'%rl_p)
print('r = %f'%rl_r)
print('f1 = %f'%rl_f1)
print('e = %f'%rl_e)
print('---------------')
acuracias.append(rl_ac)
fpr.append(rl_roc.get_fpr())
tpr.append(rl_roc.get_tpr())
auc.append(rl_roc.get_auc())
pesos = sent.calc_weigth(acuracias)
k = 10
#pred,original = sent.committee(k,pesos)
pesos = sent.calc_weigth(acuracias)
cm_ac,_,cm_p,cm_r,cm_f1,cm_e,cm_median,cm_roc = sent.committee(k,pesos)
print('Comitê')
print('ac = %f'%cm_ac)
print('p = %f'%cm_p)
print('r = %f'%cm_r)
print('f1 = %f'%cm_f1)
print('e = %f'%cm_e)
print('---------------')
#cm_roc = sent.roc(cm_mean)
fpr.append(cm_roc.get_fpr())
tpr.append(cm_roc.get_tpr())
auc.append(cm_roc.get_auc())
label = ['naive','svm','tree','forest','logistic','committee']
sent.plot_roc_all(fpr,tpr,auc,label)
#sent.plot_roc(roc.get_fpr(),roc.get_tpr(),roc.get_auc(),'red','nv')
#sent.plot_confuse_matrix(nv_cm)
| gpl-3.0 | -1,176,213,288,249,679,600 | 19.873134 | 79 | 0.584406 | false |
anderson1008/NOCulator | hring/src/Script/sim_batch.py | 1 | 1964 | #!/usr/bin/python
import sys
import os
import re
import fnmatch
import string
workload_dir = "/Users/xiyuexiang/GoogleDrive/NOCulator/hring/src/bin/"
workload = "mix_app"
insns_count = 1000000
ipc_alone = [2.16, 2.75, 2.08, 1.91, 2.16, 2.75, 2.08, 1.91, 2.16, 2.75, 2.08, 1.91, 2.16, 2.75, 2.08, 1.91]
ipc_share = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
out_dir = "/Users/xiyuexiang/GoogleDrive/NOCulator/hring/src/bin/sweep_batch/"
filename = '../bin/sweep_batch_period.txt'
filename_out = str(filename)
if os.path.exists(filename_out) == True:
os.remove(filename_out)
fo_out = open(filename_out, "a")
fo_out.write('\n\n' + 'sweep packet batching period (epoch = 100000)' + '\n\n')
fo_out.write('period'.ljust(15) + 'w_speedup'.ljust(15) + 'h_speedup'.ljust(15))
fo_out.write('\n')
for sim_index in range(100, 5100, 100):
out_file = "sim_" + str(sim_index) + ".out"
command_line = "mono ../bin/sim.exe -config " + workload_dir + "config_0.txt -output " + out_dir + out_file + " -workload " + workload_dir + workload + ' 3 ' + "-STC_batchPeriod " + str(sim_index)
os.system (command_line)
# collect result
result_file = open (out_dir + out_file, 'r')
result = result_file.read()
result_file.close()
searchObj = re.search(r'(?:"active_cycles":\[(.*?)])',result)
splitObj = re.split('\W+',searchObj.group(1))
active_cycles = splitObj
weighted_speedup = 0
temp0 = 0
for i in range (0, 16, 1):
ipc_share [i] = float(insns_count) / int(active_cycles[i])
weighted_speedup = ipc_share[i] / ipc_alone[i] + weighted_speedup
temp0 = ipc_alone[i] / ipc_share[i] + temp0
harmonic_speedup = 16 / temp0
print str(sim_index) + " " + str("%.2f" % weighted_speedup) + " " + str("%.2f" % harmonic_speedup)
fo_out.write('\n')
fo_out.write(str(sim_index).ljust(15) + str(weighted_speedup).ljust(15) + str(harmonic_speedup).ljust(15))
fo_out.write('\n')
fo_out.close()
| mit | 6,003,752,314,014,478,000 | 32.45614 | 197 | 0.623727 | false |
zhuangjun1981/retinotopic_mapping | retinotopic_mapping/examples/analysis_retinotopicmapping/batch_MarkPatches.py | 1 | 1417 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 30 14:46:38 2014
@author: junz
"""
import os
import matplotlib.pyplot as plt
import corticalmapping.core.FileTools as ft
import corticalmapping.RetinotopicMapping as rm
trialName = '160208_M193206_Trial1.pkl'
names = [
['patch01', 'V1'],
['patch02', 'RL'],
['patch03', 'LM'],
['patch04', 'AL'],
['patch05', 'AM'],
['patch06', 'PM'],
['patch07', 'MMA'],
['patch08', 'MMP'],
['patch09', 'LLA'],
# ['patch10', 'AM'],
# ['patch11', 'LLA'],
# ['patch12', 'MMP'],
# ['patch13', 'MMP']
# ['patch14', 'MMP']
]
currFolder = os.path.dirname(os.path.realpath(__file__))
os.chdir(currFolder)
trialPath = os.path.join(currFolder,trialName)
trialDict = ft.loadFile(trialPath)
finalPatches = dict(trialDict['finalPatches'])
for i, namePair in enumerate(names):
currPatch = finalPatches.pop(namePair[0])
newPatchDict = {namePair[1]:currPatch}
finalPatches.update(newPatchDict)
trialDict.update({'finalPatchesMarked':finalPatches})
ft.saveFile(trialPath,trialDict)
trial, _ = rm.loadTrial(trialPath)
f = plt.figure(figsize=(10,10))
ax = f.add_subplot(111)
trial.plotFinalPatchBorders2(plotAxis = ax,borderWidth=2)
plt.show()
f.savefig(trialName[0:-4]+'_borders.pdf',dpi=600)
f.savefig(trialName[0:-4]+'_borders.png',dpi=300) | gpl-3.0 | 4,602,845,023,401,263,600 | 24.321429 | 57 | 0.617502 | false |
zmarvel/slowboy | test_roms/scripts/gentilemap2.py | 1 | 2654 |
import string
import sys
TWIDTH = 8
THEIGHT = 8
TSWIDTH = 128
TSHEIGHT = 128
TSWIDTH_TILES = TSWIDTH // TWIDTH
TSHEIGHT_TILES = TSHEIGHT // THEIGHT
SCREEN_WIDTH = 160
SCREEN_HEIGHT = 144
SWIDTH_TILES = SCREEN_WIDTH // TWIDTH
SHEIGHT_TILES = SCREEN_HEIGHT // THEIGHT
BACKGROUND_WIDTH = 256
BACKGROUND_HEIGHT = 256
BGWIDTH_TILES = BACKGROUND_WIDTH // TWIDTH
BGHEIGHT_TILES = BACKGROUND_HEIGHT // THEIGHT
def s8(u):
return ((u ^ 0xff) + 1) & 0xff
def sub(a, b):
return (a + s8(b)) & 0xff
def strtotilemap(s, offset, width, left, right, pad):
# width in tiles
# left and right are tileid for left and right border
# only support one case for now
s = s.lower()
out = [left]
col = 1
for i in range(len(s)):
if col == width-1:
out.append(right)
out.extend([pad for _ in range(BGWIDTH_TILES-width)])
out.append(left)
col = 1
if s[i] == ' ':
out.append(pad)
elif s[i] not in string.ascii_lowercase:
raise ValueError('only ascii letters are supported: {}')
else:
out.append(offset + (ord(s[i]) - 97))
col += 1
print(len(out))
if col <= width:
# pad
out.extend([pad for _ in range(width-col-1)])
out.append(right)
out.extend([pad for _ in range(BGWIDTH_TILES-width)])
print(len(out))
print(out)
return out
TOPLEFT_CORNER = 64+43
TOPRIGHT_CORNER = 64+44
BOTTOMLEFT_CORNER = 64+50
BOTTOMRIGHT_CORNER = 64+49
TOP_EDGE = 64+46
LEFT_EDGE = 64+45
RIGHT_EDGE = 64+47
BOTTOM_EDGE = 64+48
SPACE = 64+51
HEART = 64+6
fname = sys.argv[1]
with open(fname, 'wb+') as f:
# bg tilemap: 0x9800-0x9bff = 0x400
f.write(bytes(x % 64 for x in range(0, 0x400)))
# fg tilemap: 0x0xc00-0x9fff = 0x400
top_row = bytes([TOPLEFT_CORNER] + [TOP_EDGE for _ in range(18)] \
+ [TOPRIGHT_CORNER] + [SPACE for _ in range(BGWIDTH_TILES-20)])
f.write(top_row)
encoded = strtotilemap("hello world", 64+17, 20, LEFT_EDGE, RIGHT_EDGE, HEART)
blank_rows = []
for i in range(3):
blank_rows.extend([LEFT_EDGE] + [SPACE for _ in range(18)] + [RIGHT_EDGE])
blank_rows.extend(HEART for _ in range(BGWIDTH_TILES-SWIDTH_TILES))
bottom_row = [BOTTOMLEFT_CORNER] + [BOTTOM_EDGE for _ in range(18)] \
+ [BOTTOMRIGHT_CORNER]
bottom_row.extend(HEART for _ in range(BGWIDTH_TILES-SWIDTH_TILES))
l = 0x400 - len(top_row) - len(encoded) - len(blank_rows) - len(bottom_row)
f.write(bytes(encoded))
f.write(bytes(blank_rows))
f.write(bytes(bottom_row))
f.write(bytes(0 for _ in range(l)))
| mit | -4,803,830,658,140,106,000 | 28.164835 | 83 | 0.609646 | false |
dmlloyd/openjdk-modules | hotspot/.mx.jvmci/mx_jvmci.py | 1 | 31696 | #
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import os, shutil, zipfile, re, time, sys, datetime, platform
from os.path import join, exists, dirname, isdir
from argparse import ArgumentParser, REMAINDER
import StringIO
import xml.dom.minidom
import subprocess
import mx
import mx_gate
import mx_unittest
from mx_gate import Task
from mx_unittest import unittest
_suite = mx.suite('jvmci')
JVMCI_VERSION = 9
"""
Top level directory of the JDK source workspace.
"""
_jdkSourceRoot = dirname(_suite.dir)
_JVMCI_JDK_TAG = 'jvmci'
_minVersion = mx.VersionSpec('1.9')
# max version (first _unsupported_ version)
_untilVersion = None
_jvmciModes = {
'hosted' : ['-XX:+UnlockExperimentalVMOptions', '-XX:+EnableJVMCI'],
'jit' : ['-XX:+UnlockExperimentalVMOptions', '-XX:+EnableJVMCI', '-XX:+UseJVMCICompiler'],
'disabled' : []
}
# TODO: can optimized be built without overriding release build?
_jdkDebugLevels = ['release', 'fastdebug', 'slowdebug']
# TODO: add client once/if it can be built on 64-bit platforms
_jdkJvmVariants = ['server', 'client']
"""
Translation table from mx_jvmci:8 --vmbuild values to mx_jvmci:9 --jdk-debug-level values.
"""
_legacyVmbuilds = {
'product' : 'release',
'debug' : 'slowdebug'
}
"""
Translates a mx_jvmci:8 --vmbuild value to a mx_jvmci:9 --jdk-debug-level value.
"""
def _translateLegacyDebugLevel(debugLevel):
return _legacyVmbuilds.get(debugLevel, debugLevel)
"""
Translation table from mx_jvmci:8 --vm values to mx_jvmci:9 (--jdk-jvm-variant, --jvmci-mode) tuples.
"""
_legacyVms = {
'jvmci' : ('server', 'jit')
}
"""
A VM configuration composed of a JDK debug level, JVM variant and a JVMCI mode.
This is also a context manager that can be used with the 'with' statement to set/change
a VM configuration within a dynamic scope. For example:
with ConfiguredJDK(debugLevel='fastdebug'):
dacapo(['pmd'])
"""
class VM:
def __init__(self, jvmVariant=None, debugLevel=None, jvmciMode=None):
self.update(jvmVariant, debugLevel, jvmciMode)
def update(self, jvmVariant=None, debugLevel=None, jvmciMode=None):
if jvmVariant in _legacyVms:
# Backwards compatibility for mx_jvmci:8 API
jvmVariant, newJvmciMode = _legacyVms[jvmVariant]
if jvmciMode is not None and jvmciMode != newJvmciMode:
mx.abort('JVM variant "' + jvmVariant + '" implies JVMCI mode "' + newJvmciMode +
'" which conflicts with explicitly specified JVMCI mode of "' + jvmciMode + '"')
jvmciMode = newJvmciMode
debugLevel = _translateLegacyDebugLevel(debugLevel)
assert jvmVariant is None or jvmVariant in _jdkJvmVariants, jvmVariant
assert debugLevel is None or debugLevel in _jdkDebugLevels, debugLevel
assert jvmciMode is None or jvmciMode in _jvmciModes, jvmciMode
self.jvmVariant = jvmVariant or _vm.jvmVariant
self.debugLevel = debugLevel or _vm.debugLevel
self.jvmciMode = jvmciMode or _vm.jvmciMode
def __enter__(self):
global _vm
self.previousVm = _vm
_vm = self
def __exit__(self, exc_type, exc_value, traceback):
global _vm
_vm = self.previousVm
_vm = VM(jvmVariant=_jdkJvmVariants[0], debugLevel=_jdkDebugLevels[0], jvmciMode='hosted')
def get_vm():
"""
Gets the configured VM.
"""
return _vm
def relativeVmLibDirInJdk():
mxos = mx.get_os()
if mxos == 'darwin':
return join('lib')
if mxos == 'windows' or mxos == 'cygwin':
return join('bin')
return join('lib', mx.get_arch())
def isJVMCIEnabled(vm):
assert vm in _jdkJvmVariants
return True
def _makehelp():
return subprocess.check_output([mx.gmake_cmd(), 'help'], cwd=_jdkSourceRoot)
def _runmake(args):
"""run the JDK make process
To build hotspot and import it into the JDK: "mx make hotspot import-hotspot"
{0}"""
jdkBuildDir = _get_jdk_build_dir()
if not exists(jdkBuildDir):
# JDK9 must be bootstrapped with a JDK8
compliance = mx.JavaCompliance('8')
jdk8 = mx.get_jdk(compliance.exactMatch, versionDescription=compliance.value)
cmd = ['sh', 'configure', '--with-debug-level=' + _vm.debugLevel, '--with-native-debug-symbols=external', '--disable-precompiled-headers', '--with-jvm-features=graal',
'--with-jvm-variants=' + _vm.jvmVariant, '--disable-warnings-as-errors', '--with-boot-jdk=' + jdk8.home, '--with-jvm-features=graal']
mx.run(cmd, cwd=_jdkSourceRoot)
cmd = [mx.gmake_cmd(), 'CONF=' + _vm.debugLevel]
if mx.get_opts().verbose:
cmd.append('LOG=debug')
cmd.extend(args)
if mx.get_opts().use_jdk_image and 'images' not in args:
cmd.append('images')
if not mx.get_opts().verbose:
mx.log('--------------- make execution ----------------------')
mx.log('Working directory: ' + _jdkSourceRoot)
mx.log('Command line: ' + ' '.join(cmd))
mx.log('-----------------------------------------------------')
mx.run(cmd, cwd=_jdkSourceRoot)
def _runmultimake(args):
"""run the JDK make process for one or more configurations"""
jvmVariantsDefault = ','.join(_jdkJvmVariants)
debugLevelsDefault = ','.join(_jdkDebugLevels)
parser = ArgumentParser(prog='mx multimake')
parser.add_argument('--jdk-jvm-variants', '--vms', help='a comma separated list of VMs to build (default: ' + jvmVariantsDefault + ')', metavar='<args>', default=jvmVariantsDefault)
parser.add_argument('--jdk-debug-levels', '--builds', help='a comma separated list of JDK debug levels (default: ' + debugLevelsDefault + ')', metavar='<args>', default=debugLevelsDefault)
parser.add_argument('-n', '--no-check', action='store_true', help='omit running "java -version" after each build')
select = parser.add_mutually_exclusive_group()
select.add_argument('-c', '--console', action='store_true', help='send build output to console instead of log files')
select.add_argument('-d', '--output-dir', help='directory for log files instead of current working directory', default=os.getcwd(), metavar='<dir>')
args = parser.parse_args(args)
jvmVariants = args.jdk_jvm_variants.split(',')
debugLevels = [_translateLegacyDebugLevel(dl) for dl in args.jdk_debug_levels.split(',')]
allStart = time.time()
for jvmVariant in jvmVariants:
for debugLevel in debugLevels:
if not args.console:
logFile = join(mx.ensure_dir_exists(args.output_dir), jvmVariant + '-' + debugLevel + '.log')
log = open(logFile, 'wb')
start = time.time()
mx.log('BEGIN: ' + jvmVariant + '-' + debugLevel + '\t(see: ' + logFile + ')')
verbose = ['-v'] if mx.get_opts().verbose else []
# Run as subprocess so that output can be directed to a file
cmd = [sys.executable, '-u', mx.__file__] + verbose + ['--jdk-jvm-variant=' + jvmVariant, '--jdk-debug-level=' + debugLevel, 'make']
mx.logv("executing command: " + str(cmd))
subprocess.check_call(cmd, cwd=_suite.dir, stdout=log, stderr=subprocess.STDOUT)
duration = datetime.timedelta(seconds=time.time() - start)
mx.log('END: ' + jvmVariant + '-' + debugLevel + '\t[' + str(duration) + ']')
else:
with VM(jvmVariant=jvmVariant, debugLevel=debugLevel):
_runmake([])
if not args.no_check:
with VM(jvmciMode='jit'):
run_vm(['-XX:-BootstrapJVMCI', '-version'])
allDuration = datetime.timedelta(seconds=time.time() - allStart)
mx.log('TOTAL TIME: ' + '[' + str(allDuration) + ']')
class HotSpotProject(mx.NativeProject):
"""
Defines a NativeProject representing the HotSpot binaries built via make.
"""
def __init__(self, suite, name, deps, workingSets, **args):
assert name == 'hotspot'
mx.NativeProject.__init__(self, suite, name, "", [], deps, workingSets, None, None, join(suite.mxDir, name))
def eclipse_config_up_to_date(self, configZip):
# Assume that any change to this module might imply changes to the generated IDE files
if configZip.isOlderThan(__file__):
return False
for _, source in self._get_eclipse_settings_sources().iteritems():
if configZip.isOlderThan(source):
return False
return True
def _get_eclipse_settings_sources(self):
"""
Gets a dictionary from the name of an Eclipse settings file to
the file providing its generated content.
"""
if not hasattr(self, '_eclipse_settings'):
esdict = {}
templateSettingsDir = join(self.dir, 'templates', 'eclipse', 'settings')
if exists(templateSettingsDir):
for name in os.listdir(templateSettingsDir):
source = join(templateSettingsDir, name)
esdict[name] = source
self._eclipse_settings = esdict
return self._eclipse_settings
def _eclipseinit(self, files=None, libFiles=None):
"""
Generates an Eclipse project for each HotSpot build configuration.
"""
roots = [
'ASSEMBLY_EXCEPTION',
'LICENSE',
'README',
'THIRD_PARTY_README',
'agent',
'make',
'src',
'test'
]
for jvmVariant in _jdkJvmVariants:
for debugLevel in _jdkDebugLevels:
name = jvmVariant + '-' + debugLevel
eclProjectDir = join(self.dir, 'eclipse', name)
mx.ensure_dir_exists(eclProjectDir)
out = mx.XMLDoc()
out.open('projectDescription')
out.element('name', data='hotspot:' + name)
out.element('comment', data='')
out.element('projects', data='')
out.open('buildSpec')
out.open('buildCommand')
out.element('name', data='org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder')
out.element('triggers', data='full,incremental')
out.element('arguments', data='')
out.close('buildCommand')
out.close('buildSpec')
out.open('natures')
out.element('nature', data='org.eclipse.cdt.core.cnature')
out.element('nature', data='org.eclipse.cdt.core.ccnature')
out.element('nature', data='org.eclipse.cdt.managedbuilder.core.managedBuildNature')
out.element('nature', data='org.eclipse.cdt.managedbuilder.core.ScannerConfigNature')
out.close('natures')
if roots:
out.open('linkedResources')
for r in roots:
f = join(_suite.dir, r)
out.open('link')
out.element('name', data=r)
out.element('type', data='2' if isdir(f) else '1')
out.element('locationURI', data=mx.get_eclipse_project_rel_locationURI(f, eclProjectDir))
out.close('link')
out.open('link')
out.element('name', data='generated')
out.element('type', data='2')
generated = join(_get_hotspot_build_dir(jvmVariant, debugLevel), 'generated')
out.element('locationURI', data=mx.get_eclipse_project_rel_locationURI(generated, eclProjectDir))
out.close('link')
out.close('linkedResources')
out.close('projectDescription')
projectFile = join(eclProjectDir, '.project')
mx.update_file(projectFile, out.xml(indent='\t', newl='\n'))
if files:
files.append(projectFile)
cprojectTemplate = join(self.dir, 'templates', 'eclipse', 'cproject')
cprojectFile = join(eclProjectDir, '.cproject')
with open(cprojectTemplate) as f:
content = f.read()
mx.update_file(cprojectFile, content)
if files:
files.append(cprojectFile)
settingsDir = join(eclProjectDir, ".settings")
mx.ensure_dir_exists(settingsDir)
for name, source in self._get_eclipse_settings_sources().iteritems():
out = StringIO.StringIO()
print >> out, '# GENERATED -- DO NOT EDIT'
print >> out, '# Source:', source
with open(source) as f:
print >> out, f.read()
content = out.getvalue()
mx.update_file(join(settingsDir, name), content)
if files:
files.append(join(settingsDir, name))
def getBuildTask(self, args):
return JDKBuildTask(self, args, _vm.debugLevel, _vm.jvmVariant)
class JDKBuildTask(mx.NativeBuildTask):
def __init__(self, project, args, debugLevel, jvmVariant):
mx.NativeBuildTask.__init__(self, args, project)
self.jvmVariant = jvmVariant
self.debugLevel = debugLevel
def __str__(self):
return 'Building JDK[{}, {}]'.format(self.debugLevel, self.jvmVariant)
def build(self):
if mx.get_opts().use_jdk_image:
_runmake(['images'])
else:
_runmake([])
self._newestOutput = None
def clean(self, forBuild=False):
if forBuild: # Let make handle incremental builds
return
if exists(_get_jdk_build_dir(self.debugLevel)):
_runmake(['clean'])
self._newestOutput = None
# Backwards compatibility for mx_jvmci:8 API
def buildvms(args):
_runmultimake(args)
def run_vm(args, vm=None, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, debugLevel=None, vmbuild=None):
"""run a Java program by executing the java executable in a JVMCI JDK"""
jdkTag = mx.get_jdk_option().tag
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.abort('The "--jdk" option must have the tag "' + _JVMCI_JDK_TAG + '" when running a command requiring a JVMCI VM')
jdk = get_jvmci_jdk(debugLevel=debugLevel or _translateLegacyDebugLevel(vmbuild))
return jdk.run_java(args, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd, timeout=timeout)
def _unittest_vm_launcher(vmArgs, mainClass, mainClassArgs):
run_vm(vmArgs + [mainClass] + mainClassArgs)
mx_unittest.set_vm_launcher('JVMCI VM launcher', _unittest_vm_launcher)
def _jvmci_gate_runner(args, tasks):
# Build release server VM now so we can run the unit tests
with Task('BuildHotSpotJVMCIHosted: release', tasks) as t:
if t: _runmultimake(['--jdk-jvm-variants', 'server', '--jdk-debug-levels', 'release'])
# Run unit tests in hosted mode
with VM(jvmVariant='server', debugLevel='release', jvmciMode='hosted'):
with Task('JVMCI UnitTests: hosted-release', tasks) as t:
if t: unittest(['--suite', 'jvmci', '--enable-timing', '--verbose', '--fail-fast'])
# Build the other VM flavors
with Task('BuildHotSpotJVMCIOthers: fastdebug', tasks) as t:
if t: _runmultimake(['--jdk-jvm-variants', 'server', '--jdk-debug-levels', 'fastdebug'])
with Task('CleanAndBuildIdealGraphVisualizer', tasks, disableJacoco=True) as t:
if t and platform.processor() != 'sparc':
buildxml = mx._cygpathU2W(join(_suite.dir, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'build.xml'))
mx.run(['ant', '-f', buildxml, '-q', 'clean', 'build'], env=_igvBuildEnv())
mx_gate.add_gate_runner(_suite, _jvmci_gate_runner)
mx_gate.add_gate_argument('-g', '--only-build-jvmci', action='store_false', dest='buildNonJVMCI', help='only build the JVMCI VM')
def _igvJdk():
v8u20 = mx.VersionSpec("1.8.0_20")
v8u40 = mx.VersionSpec("1.8.0_40")
v8 = mx.VersionSpec("1.8")
def _igvJdkVersionCheck(version):
return version >= v8 and (version < v8u20 or version >= v8u40)
return mx.get_jdk(_igvJdkVersionCheck, versionDescription='>= 1.8 and < 1.8.0u20 or >= 1.8.0u40', purpose="building & running IGV").home
def _igvBuildEnv():
# When the http_proxy environment variable is set, convert it to the proxy settings that ant needs
env = dict(os.environ)
proxy = os.environ.get('http_proxy')
if not (proxy is None) and len(proxy) > 0:
if '://' in proxy:
# Remove the http:// prefix (or any other protocol prefix)
proxy = proxy.split('://', 1)[1]
# Separate proxy server name and port number
proxyName, proxyPort = proxy.split(':', 1)
proxyEnv = '-DproxyHost="' + proxyName + '" -DproxyPort=' + proxyPort
env['ANT_OPTS'] = proxyEnv
env['JAVA_HOME'] = _igvJdk()
return env
def igv(args):
"""run the Ideal Graph Visualizer"""
logFile = '.ideal_graph_visualizer.log'
with open(join(_suite.dir, logFile), 'w') as fp:
mx.logv('[Ideal Graph Visualizer log is in ' + fp.name + ']')
nbplatform = join(_suite.dir, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'nbplatform')
# Remove NetBeans platform if it is earlier than the current supported version
if exists(nbplatform):
updateTrackingFile = join(nbplatform, 'platform', 'update_tracking', 'org-netbeans-core.xml')
if not exists(updateTrackingFile):
mx.log('Could not find \'' + updateTrackingFile + '\', removing NetBeans platform')
shutil.rmtree(nbplatform)
else:
dom = xml.dom.minidom.parse(updateTrackingFile)
currentVersion = mx.VersionSpec(dom.getElementsByTagName('module_version')[0].getAttribute('specification_version'))
supportedVersion = mx.VersionSpec('3.43.1')
if currentVersion < supportedVersion:
mx.log('Replacing NetBeans platform version ' + str(currentVersion) + ' with version ' + str(supportedVersion))
shutil.rmtree(nbplatform)
elif supportedVersion < currentVersion:
mx.log('Supported NetBeans version in igv command should be updated to ' + str(currentVersion))
if not exists(nbplatform):
mx.logv('[This execution may take a while as the NetBeans platform needs to be downloaded]')
env = _igvBuildEnv()
# make the jar for Batik 1.7 available.
env['IGV_BATIK_JAR'] = mx.library('BATIK').get_path(True)
if mx.run(['ant', '-f', mx._cygpathU2W(join(_suite.dir, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'build.xml')), '-l', mx._cygpathU2W(fp.name), 'run'], env=env, nonZeroIsFatal=False):
mx.abort("IGV ant build & launch failed. Check '" + logFile + "'. You can also try to delete 'src/share/tools/IdealGraphVisualizer/nbplatform'.")
def c1visualizer(args):
"""run the Cl Compiler Visualizer"""
libpath = join(_suite.dir, 'lib')
if mx.get_os() == 'windows':
executable = join(libpath, 'c1visualizer', 'bin', 'c1visualizer.exe')
else:
executable = join(libpath, 'c1visualizer', 'bin', 'c1visualizer')
# Check whether the current C1Visualizer installation is the up-to-date
if exists(executable) and not exists(mx.library('C1VISUALIZER_DIST').get_path(resolve=False)):
mx.log('Updating C1Visualizer')
shutil.rmtree(join(libpath, 'c1visualizer'))
archive = mx.library('C1VISUALIZER_DIST').get_path(resolve=True)
if not exists(executable):
zf = zipfile.ZipFile(archive, 'r')
zf.extractall(libpath)
if not exists(executable):
mx.abort('C1Visualizer binary does not exist: ' + executable)
if mx.get_os() != 'windows':
# Make sure that execution is allowed. The zip file does not always specfiy that correctly
os.chmod(executable, 0777)
mx.run([executable])
def hsdis(args, copyToDir=None):
"""download the hsdis library
This is needed to support HotSpot's assembly dumping features.
By default it downloads the Intel syntax version, use the 'att' argument to install AT&T syntax."""
flavor = 'intel'
if 'att' in args:
flavor = 'att'
if mx.get_arch() == "sparcv9":
flavor = "sparcv9"
lib = mx.add_lib_suffix('hsdis-' + mx.get_arch())
path = join(_suite.dir, 'lib', lib)
sha1s = {
'att/hsdis-amd64.dll' : 'bcbd535a9568b5075ab41e96205e26a2bac64f72',
'att/hsdis-amd64.so' : '58919ba085d4ef7a513f25bae75e7e54ee73c049',
'intel/hsdis-amd64.dll' : '6a388372cdd5fe905c1a26ced614334e405d1f30',
'intel/hsdis-amd64.so' : '844ed9ffed64fe9599638f29a8450c50140e3192',
'intel/hsdis-amd64.dylib' : 'fdb13ef0d7d23d93dacaae9c98837bea0d4fc5a2',
'sparcv9/hsdis-sparcv9.so': '970640a9af0bd63641f9063c11275b371a59ee60',
}
flavoredLib = flavor + "/" + lib
if flavoredLib not in sha1s:
mx.logv("hsdis not supported on this plattform or architecture")
return
if not exists(path):
sha1 = sha1s[flavoredLib]
sha1path = path + '.sha1'
mx.download_file_with_sha1('hsdis', path, ['https://lafo.ssw.uni-linz.ac.at/pub/hsdis/' + flavoredLib], sha1, sha1path, True, True, sources=False)
if copyToDir is not None and exists(copyToDir):
shutil.copy(path, copyToDir)
def hcfdis(args):
"""disassemble HexCodeFiles embedded in text files
Run a tool over the input files to convert all embedded HexCodeFiles
to a disassembled format."""
parser = ArgumentParser(prog='mx hcfdis')
parser.add_argument('-m', '--map', help='address to symbol map applied to disassembler output')
parser.add_argument('files', nargs=REMAINDER, metavar='files...')
args = parser.parse_args(args)
path = mx.library('HCFDIS').get_path(resolve=True)
mx.run_java(['-cp', path, 'com.oracle.max.hcfdis.HexCodeFileDis'] + args.files)
if args.map is not None:
addressRE = re.compile(r'0[xX]([A-Fa-f0-9]+)')
with open(args.map) as fp:
lines = fp.read().splitlines()
symbols = dict()
for l in lines:
addressAndSymbol = l.split(' ', 1)
if len(addressAndSymbol) == 2:
address, symbol = addressAndSymbol
if address.startswith('0x'):
address = long(address, 16)
symbols[address] = symbol
for f in args.files:
with open(f) as fp:
lines = fp.read().splitlines()
updated = False
for i in range(0, len(lines)):
l = lines[i]
for m in addressRE.finditer(l):
sval = m.group(0)
val = long(sval, 16)
sym = symbols.get(val)
if sym:
l = l.replace(sval, sym)
updated = True
lines[i] = l
if updated:
mx.log('updating ' + f)
with open('new_' + f, "w") as fp:
for l in lines:
print >> fp, l
def jol(args):
"""Java Object Layout"""
joljar = mx.library('JOL_INTERNALS').get_path(resolve=True)
candidates = mx.findclass(args, logToConsole=False, matcher=lambda s, classname: s == classname or classname.endswith('.' + s) or classname.endswith('$' + s))
if len(candidates) > 0:
candidates = mx.select_items(sorted(candidates))
else:
# mx.findclass can be mistaken, don't give up yet
candidates = args
run_vm(['-javaagent:' + joljar, '-cp', os.pathsep.join([mx.classpath(), joljar]), "org.openjdk.jol.MainObjectInternals"] + candidates)
def _get_openjdk_os():
# See: common/autoconf/platform.m4
os = mx.get_os()
if 'darwin' in os:
os = 'macosx'
elif 'linux' in os:
os = 'linux'
elif 'solaris' in os:
os = 'solaris'
elif 'cygwin' in os or 'mingw' in os:
os = 'windows'
return os
def _get_openjdk_cpu():
cpu = mx.get_arch()
if cpu == 'amd64':
cpu = 'x86_64'
elif cpu == 'sparcv9':
cpu = 'sparcv9'
return cpu
def _get_openjdk_os_cpu():
return _get_openjdk_os() + '-' + _get_openjdk_cpu()
def _get_jdk_build_dir(debugLevel=None):
"""
Gets the directory into which the JDK is built. This directory contains
the exploded JDK under jdk/ and the JDK image under images/jdk/.
"""
if debugLevel is None:
debugLevel = _vm.debugLevel
name = '{}-{}-{}-{}'.format(_get_openjdk_os_cpu(), 'normal', _vm.jvmVariant, debugLevel)
return join(dirname(_suite.dir), 'build', name)
_jvmci_bootclasspath_prepends = []
def _get_hotspot_build_dir(jvmVariant=None, debugLevel=None):
"""
Gets the directory in which a particular HotSpot configuration is built
(e.g., <JDK_REPO_ROOT>/build/macosx-x86_64-normal-server-release/hotspot/bsd_amd64_compiler2)
"""
if jvmVariant is None:
jvmVariant = _vm.jvmVariant
os = mx.get_os()
if os == 'darwin':
os = 'bsd'
arch = mx.get_arch()
buildname = {'client': 'compiler1', 'server': 'compiler2'}.get(jvmVariant, jvmVariant)
name = '{}_{}_{}'.format(os, arch, buildname)
return join(_get_jdk_build_dir(debugLevel=debugLevel), 'hotspot', name)
class JVMCI9JDKConfig(mx.JDKConfig):
def __init__(self, debugLevel):
self.debugLevel = debugLevel
jdkBuildDir = _get_jdk_build_dir(debugLevel)
jdkDir = join(jdkBuildDir, 'images', 'jdk') if mx.get_opts().use_jdk_image else join(jdkBuildDir, 'jdk')
mx.JDKConfig.__init__(self, jdkDir, tag=_JVMCI_JDK_TAG)
def parseVmArgs(self, args, addDefaultArgs=True):
args = mx.expand_project_in_args(args, insitu=False)
jacocoArgs = mx_gate.get_jacoco_agent_args()
if jacocoArgs:
args = jacocoArgs + args
args = ['-Xbootclasspath/p:' + dep.classpath_repr() for dep in _jvmci_bootclasspath_prepends] + args
# Remove JVMCI jars from class path. They are only necessary when
# compiling with a javac from JDK8 or earlier.
cpIndex, cp = mx.find_classpath_arg(args)
if cp:
excluded = frozenset([dist.path for dist in _suite.dists])
cp = os.pathsep.join([e for e in cp.split(os.pathsep) if e not in excluded])
args[cpIndex] = cp
if '-version' in args:
ignoredArgs = args[args.index('-version') + 1:]
if len(ignoredArgs) > 0:
mx.log("Warning: The following options will be ignored by the vm because they come after the '-version' argument: " + ' '.join(ignoredArgs))
return self.processArgs(args, addDefaultArgs=addDefaultArgs)
# Overrides JDKConfig
def run_java(self, args, vm=None, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, env=None, addDefaultArgs=True):
if vm is None:
vm = 'server'
args = self.parseVmArgs(args, addDefaultArgs=addDefaultArgs)
jvmciModeArgs = _jvmciModes[_vm.jvmciMode]
cmd = [self.java] + ['-' + vm] + jvmciModeArgs + args
return mx.run(cmd, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd)
"""
The dict of JVMCI JDKs indexed by debug-level names.
"""
_jvmci_jdks = {}
def get_jvmci_jdk(debugLevel=None):
"""
Gets the JVMCI JDK corresponding to 'debugLevel'.
"""
if not debugLevel:
debugLevel = _vm.debugLevel
jdk = _jvmci_jdks.get(debugLevel)
if jdk is None:
try:
jdk = JVMCI9JDKConfig(debugLevel)
except mx.JDKConfigException as e:
jdkBuildDir = _get_jdk_build_dir(debugLevel)
msg = 'Error with the JDK built into {}:\n{}\nTry (re)building it with: mx --jdk-debug-level={} make'
if mx.get_opts().use_jdk_image:
msg += ' images'
mx.abort(msg.format(jdkBuildDir, e.message, debugLevel))
_jvmci_jdks[debugLevel] = jdk
return jdk
class JVMCI9JDKFactory(mx.JDKFactory):
def getJDKConfig(self):
jdk = get_jvmci_jdk(_vm.debugLevel)
return jdk
def description(self):
return "JVMCI JDK"
mx.update_commands(_suite, {
'make': [_runmake, '[args...]', _makehelp],
'multimake': [_runmultimake, '[options]'],
'c1visualizer' : [c1visualizer, ''],
'hsdis': [hsdis, '[att]'],
'hcfdis': [hcfdis, ''],
'igv' : [igv, ''],
'jol' : [jol, ''],
'vm': [run_vm, '[-options] class [args...]'],
})
mx.add_argument('-M', '--jvmci-mode', action='store', choices=sorted(_jvmciModes.viewkeys()), help='the JVM variant type to build/run (default: ' + _vm.jvmciMode + ')')
mx.add_argument('--jdk-jvm-variant', '--vm', action='store', choices=_jdkJvmVariants + sorted(_legacyVms.viewkeys()), help='the JVM variant type to build/run (default: ' + _vm.jvmVariant + ')')
mx.add_argument('--jdk-debug-level', '--vmbuild', action='store', choices=_jdkDebugLevels + sorted(_legacyVmbuilds.viewkeys()), help='the JDK debug level to build/run (default: ' + _vm.debugLevel + ')')
mx.add_argument('-I', '--use-jdk-image', action='store_true', help='build/run JDK image instead of exploded JDK')
mx.addJDKFactory(_JVMCI_JDK_TAG, mx.JavaCompliance('9'), JVMCI9JDKFactory())
def mx_post_parse_cmd_line(opts):
mx.set_java_command_default_jdk_tag(_JVMCI_JDK_TAG)
jdkTag = mx.get_jdk_option().tag
jvmVariant = None
debugLevel = None
jvmciMode = None
if opts.jdk_jvm_variant is not None:
jvmVariant = opts.jdk_jvm_variant
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.warn('Ignoring "--jdk-jvm-variant" option as "--jdk" tag is not "' + _JVMCI_JDK_TAG + '"')
if opts.jdk_debug_level is not None:
debugLevel = _translateLegacyDebugLevel(opts.jdk_debug_level)
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.warn('Ignoring "--jdk-debug-level" option as "--jdk" tag is not "' + _JVMCI_JDK_TAG + '"')
if opts.jvmci_mode is not None:
jvmciMode = opts.jvmci_mode
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.warn('Ignoring "--jvmci-mode" option as "--jdk" tag is not "' + _JVMCI_JDK_TAG + '"')
_vm.update(jvmVariant, debugLevel, jvmciMode)
| gpl-2.0 | 4,158,647,016,580,874,000 | 41.20506 | 202 | 0.608941 | false |
guykisel/inline-plz | inlineplz/linters/shellcheck.py | 1 | 1627 | # -*- coding: utf-8 -*-
import dirtyjson as json
from ..decorators import linter
from ..parsers.base import ParserBase
@linter(
name="shellcheck",
install=[
["cabal", "update"],
["cabal", "install", "shellcheck"],
["apt-get", "install", "shellcheck"],
["dnf", "install", "shellcheck"],
["brew", "install", "shellcheck"],
["port", "install", "shellcheck"],
["zypper", "in", "ShellCheck"],
],
help_cmd=["shellcheck", "-V"],
run=["shellcheck", "-x", "-f", "json", "-e", "SC2086"],
rundefault=["shellcheck", "-x", "-f", "json", "-e", "SC2086"],
dotfiles=[],
language="shell",
autorun=True,
run_per_file=True,
)
class ShellcheckParser(ParserBase):
"""Parse json shellcheck output."""
def parse(self, lint_data):
messages = set()
for file_path, output in lint_data:
if file_path.strip() and output.strip():
filedata = json.loads(output)
if filedata:
for msgdata in filedata:
try:
path = file_path
line = msgdata["line"]
msgbody = msgdata["message"]
messages.add((path, line, msgbody))
except (ValueError, KeyError, TypeError):
print(
"({0}) Invalid message: {1}".format(
type(self).__name__, msgdata
)
)
return messages
| isc | -6,519,408,518,040,228,000 | 32.204082 | 68 | 0.454825 | false |
sternshus/arelle2.7 | svr-2.7/arelle/CntlrGenVersReports.py | 1 | 23340 | u'''
Created on Dec 14, 2010
Use this module to start Arelle in command line non-interactive mode
(This module can be a pattern for custom use of Arelle in an application.)
In this example a versioning report production file is read and used to generate
versioning reports, per Roland Hommes 2010-12-10
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from __future__ import with_statement
from arelle import PythonUtil # define 2.x or 3.x string types
import time, datetime, os, gettext, io, sys, traceback
from lxml import etree
from optparse import OptionParser
from arelle import (Cntlr, ModelXbrl, ModelDocument, ModelVersReport, FileSource,
XmlUtil, XbrlConst, Version)
from arelle import xlrd
import logging
from io import open
conformanceNS = u"http://xbrl.org/2008/conformance"
def main():
gettext.install(u"arelle") # needed for options messages
usage = u"usage: %prog [options]"
parser = OptionParser(usage, version=u"Arelle(r) {0}".format(Version.version))
parser.add_option(u"--excelfile", dest=u"excelfilename",
help=_(u"FILENAME is an excel 95-2003 index file containing columns: \n"
u"Dir is a test directory, \n"
u"fromURI is the fromDTS URI relative to test director, \n"
u"toURI is the toDTS URI relative to test director, \n"
u"Description is the goal of the test for testcase description, \n"
u"Assignment is the business, technical, or errata classification, \n"
u"Expected event is an event localName that is expected \n\n"
u"Output files and testcases are located in filename's directory, \n"
u"report files are generated in '/report' under fromURI's directory."))
parser.add_option(u"--testfiledate", dest=u"testfiledate",
help=_(u"Date if desired to use (instead of today) in generated testcase elements."))
(options, args) = parser.parse_args()
try:
CntlrGenVersReports().runFromExcel(options)
except Exception, ex:
print ex, traceback.format_tb(sys.exc_info()[2])
class CntlrGenVersReports(Cntlr.Cntlr):
def __init__(self):
super(CntlrGenVersReports, self).__init__()
def runFromExcel(self, options):
#testGenFileName = options.excelfilename
testGenFileName = ur"C:\Users\Herm Fischer\Documents\mvsl\projects\XBRL.org\conformance-versioning\trunk\versioningReport\conf\creation-index.xls"
testGenDir = os.path.dirname(testGenFileName)
schemaDir = os.path.dirname(testGenDir) + os.sep + u"schema"
timeNow = XmlUtil.dateunionValue(datetime.datetime.now())
if options.testfiledate:
today = options.testfiledate
else:
today = XmlUtil.dateunionValue(datetime.date.today())
startedAt = time.time()
LogHandler(self) # start logger
self.logMessages = []
logMessagesFile = testGenDir + os.sep + u'log-generation-messages.txt'
modelTestcases = ModelXbrl.create(self.modelManager, url=testGenFileName, isEntry=True)
testcaseIndexBook = xlrd.open_workbook(testGenFileName)
testcaseIndexSheet = testcaseIndexBook.sheet_by_index(0)
self.addToLog(_(u"[info] xls loaded in {0:.2} secs at {1}").format(time.time() - startedAt, timeNow))
# start index file
indexFiles = [testGenDir + os.sep + u'creation-testcases-index.xml',
testGenDir + os.sep + u'consumption-testcases-index.xml']
indexDocs = []
testcasesElements = []
for purpose in (u"Creation",u"Consumption"):
file = io.StringIO(
#'<?xml version="1.0" encoding="UTF-8"?>'
u'<!-- XBRL Versioning 1.0 {0} Tests -->'
u'<!-- Copyright 2011 XBRL International. All Rights Reserved. -->'
u'<?xml-stylesheet type="text/xsl" href="infrastructure/testcases-index.xsl"?>'
u'<testcases name="XBRL Versioning 1.0 {0} Tests" '
u' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
u' xsi:noNamespaceSchemaLocation="infrastructure/testcases-index.xsd">'
u'</testcases>'.format(purpose, today)
)
doc = etree.parse(file)
file.close()
indexDocs.append(doc)
testcasesElements.append(doc.getroot())
priorTestcasesDir = None
testcaseFiles = None
testcaseDocs = None
for iRow in xrange(1, testcaseIndexSheet.nrows):
try:
row = testcaseIndexSheet.row(iRow)
if (row[0].ctype == xlrd.XL_CELL_EMPTY or # must have directory
row[1].ctype == xlrd.XL_CELL_EMPTY or # from
row[2].ctype == xlrd.XL_CELL_EMPTY): # to
continue
testDir = row[0].value
uriFrom = row[1].value
uriTo = row[2].value
overrideReport = row[3].value
description = row[4].value
if description is None or len(description) == 0:
continue # test not ready to run
assignment = row[5].value
expectedEvents = row[6].value # comma space separated if multiple
note = row[7].value
useCase = row[8].value
base = os.path.join(os.path.dirname(testGenFileName),testDir) + os.sep
self.addToLog(_(u"[info] testcase uriFrom {0}").format(uriFrom))
if uriFrom and uriTo and assignment.lower() not in (u"n.a.", u"error") and expectedEvents != u"N.A.":
modelDTSfrom = modelDTSto = None
for URIs, msg, isFrom in ((uriFrom, _(u"loading from DTS"), True), (uriTo, _(u"loading to DTS"), False)):
if u',' not in URIs:
modelDTS = ModelXbrl.load(self.modelManager, URIs, msg, base=base)
else:
modelDTS = ModelXbrl.create(self.modelManager,
ModelDocument.Type.DTSENTRIES,
self.webCache.normalizeUrl(URIs.replace(u", ",u"_") + u".dts",
base),
isEntry=True)
DTSdoc = modelDTS.modelDocument
DTSdoc.inDTS = True
for uri in URIs.split(u','):
doc = ModelDocument.load(modelDTS, uri.strip(), base=base)
if doc is not None:
DTSdoc.referencesDocument[doc] = u"import" #fake import
doc.inDTS = True
if isFrom: modelDTSfrom = modelDTS
else: modelDTSto = modelDTS
if modelDTSfrom is not None and modelDTSto is not None:
# generate differences report
reportUri = uriFrom.partition(u',')[0] # first file
reportDir = os.path.dirname(reportUri)
if reportDir: reportDir += os.sep
reportName = os.path.basename(reportUri).replace(u"from.xsd",u"report.xml")
reportFile = reportDir + u"out" + os.sep + reportName
#reportFile = reportDir + "report" + os.sep + reportName
reportFullPath = self.webCache.normalizeUrl(
reportFile,
base)
testcasesDir = os.path.dirname(os.path.dirname(reportFullPath))
if testcasesDir != priorTestcasesDir:
# close prior report
if priorTestcasesDir:
for i,testcaseFile in enumerate(testcaseFiles):
with open(testcaseFile, u"w", encoding=u"utf-8") as fh:
XmlUtil.writexml(fh, testcaseDocs[i], encoding=u"utf-8")
testcaseName = os.path.basename(testcasesDir)
testcaseFiles = [testcasesDir + os.sep + testcaseName + u"-creation-testcase.xml",
testcasesDir + os.sep + testcaseName + u"-consumption-testcase.xml"]
for i,testcaseFile in enumerate(testcaseFiles):
etree.SubElement(testcasesElements[i], u"testcase",
attrib={u"uri":
testcaseFile[len(testGenDir)+1:].replace(u"\\",u"/")} )
# start testcase file
testcaseDocs = []
testcaseElements = []
testcaseNumber = testcaseName[0:4]
if testcaseNumber.isnumeric():
testcaseNumberElement = u"<number>{0}</number>".format(testcaseNumber)
testcaseName = testcaseName[5:]
else:
testcaseNumberElement = u""
testDirSegments = testDir.split(u'/')
if len(testDirSegments) >= 2 and u'-' in testDirSegments[1]:
testedModule = testDirSegments[1][testDirSegments[1].index(u'-') + 1:]
else:
testedModule = u''
for purpose in (u"Creation",u"Consumption"):
file = io.StringIO(
#'<?xml version="1.0" encoding="UTF-8"?>'
u'<!-- Copyright 2011 XBRL International. All Rights Reserved. -->'
u'<?xml-stylesheet type="text/xsl" href="../../../infrastructure/test.xsl"?>'
u'<testcase '
u' xmlns="http://xbrl.org/2008/conformance"'
u' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
u' xsi:schemaLocation="http://xbrl.org/2008/conformance ../../../infrastructure/test.xsd">'
u'<creator>'
u'<name>Roland Hommes</name>'
u'<email>[email protected]</email>'
u'</creator>'
u'{0}'
u'<name>{1}</name>'
# '<description>{0}</description>'
u'<reference>'
u'{2}'
u'{3}'
u'</reference>'
u'</testcase>'.format(testcaseNumberElement,
testcaseName,
u'<name>{0}</name>'.format(testedModule) if testedModule else u'',
u'<id>{0}</id>'.format(useCase) if useCase else u'')
)
doc = etree.parse(file)
file.close()
testcaseDocs.append(doc)
testcaseElements.append(doc.getroot())
priorTestcasesDir = testcasesDir
variationSeq = 1
try:
os.makedirs(os.path.dirname(reportFullPath))
except WindowsError:
pass # dir already exists
modelVersReport = ModelVersReport.ModelVersReport(modelTestcases)
modelVersReport.diffDTSes(reportFullPath,modelDTSfrom, modelDTSto,
assignment=assignment,
schemaDir=schemaDir)
# check for expected elements
if expectedEvents:
for expectedEvent in expectedEvents.split(u","):
if expectedEvent not in (u"No change", u"N.A."):
prefix, sep, localName = expectedEvent.partition(u':')
if sep and len(modelVersReport.xmlDocument.findall(
u'//{{{0}}}{1}'.format(
XbrlConst.verPrefixNS.get(prefix),
localName))) == 0:
modelTestcases.warning(u"warning",
u"Generated test case %(reportName)s missing expected event %(event)s",
reportName=reportName,
event=expectedEvent)
modelVersReport.close()
uriFromParts = uriFrom.split(u'_')
if len(uriFromParts) >= 2:
variationId = uriFromParts[1]
else:
variationId = u"_{0:02n}".format(variationSeq)
for i,testcaseElt in enumerate(testcaseElements):
variationElement = etree.SubElement(testcaseElt, u"{http://xbrl.org/2008/conformance}variation",
attrib={u"id": variationId})
nameElement = etree.SubElement(variationElement, u"{http://xbrl.org/2008/conformance}description")
nameElement.text = description
u''' (removed per RH 2011/10/04
if note:
paramElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}description")
paramElement.text = "Note: " + note
if useCase:
paramElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}reference")
paramElement.set("specification", "versioning-requirements")
paramElement.set("useCase", useCase)
'''
dataElement = etree.SubElement(variationElement, u"{http://xbrl.org/2008/conformance}data")
if i == 0: # result is report
if expectedEvents:
paramElement = etree.SubElement(dataElement, u"{http://xbrl.org/2008/conformance}parameter",
attrib={u"name":u"expectedEvent",
u"value":expectedEvents.replace(u',',u' ')},
nsmap={u"conf":u"http://xbrl.org/2008/conformance",
None:u""})
if assignment:
paramElement = etree.SubElement(dataElement, u"{http://xbrl.org/2008/conformance}parameter",
attrib={u"name":u"assignment",
u"value":assignment},
nsmap={u"conf":u"http://xbrl.org/2008/conformance",
None:u""})
for schemaURIs, dtsAttr in ((uriFrom,u"from"), (uriTo,u"to")):
for schemaURI in schemaURIs.split(u","):
schemaElement = etree.SubElement(dataElement, u"{http://xbrl.org/2008/conformance}schema")
schemaElement.set(u"dts",dtsAttr)
if i == 0:
schemaElement.set(u"readMeFirst",u"true")
schemaElement.text=os.path.basename(schemaURI.strip())
resultElement = etree.SubElement(variationElement, u"{http://xbrl.org/2008/conformance}result")
reportElement = etree.SubElement(resultElement if i == 0 else dataElement,
u"{http://xbrl.org/2008/conformance}versioningReport")
if i == 1:
reportElement.set(u"readMeFirst",u"true")
reportElement.text = u"report/" + reportName
variationSeq += 1
except Exception, err:
modelTestcases.error(u"exception",
_(u"Exception: %(error)s, Excel row: %(excelRow)s"),
error=err,
excelRow=iRow,
exc_info=True)
# add tests-error-code index files to consumption
for testcaseFile in self.testcaseFiles(testGenDir + os.sep + u"tests-error-code"):
etree.SubElement(testcasesElements[1], u"testcase",
attrib={u"uri":
testcaseFile[len(testGenDir)+1:].replace(u"\\",u"/")} )
with open(logMessagesFile, u"w") as fh:
fh.writelines(self.logMessages)
if priorTestcasesDir:
for i,testcaseFile in enumerate(testcaseFiles):
with open(testcaseFile, u"w", encoding=u"utf-8") as fh:
XmlUtil.writexml(fh, testcaseDocs[i], encoding=u"utf-8")
for i,indexFile in enumerate(indexFiles):
with open(indexFile, u"w", encoding=u"utf-8") as fh:
XmlUtil.writexml(fh, indexDocs[i], encoding=u"utf-8")
def testcaseFiles(self, dir, files=None):
if files is None: files = []
for file in os.listdir(dir):
path = dir + os.sep + file
if path.endswith(u".svn"):
continue
if path.endswith(u"-testcase.xml"):
files.append(path)
elif os.path.isdir(path):
self.testcaseFiles(path, files)
return files
def runFromXml(self):
testGenFileName = ur"C:\Users\Herm Fischer\Documents\mvsl\projects\Arelle\roland test cases\1000-Concepts\index.xml"
filesource = FileSource.FileSource(testGenFileName)
startedAt = time.time()
LogHandler(self) # start logger
modelTestcases = self.modelManager.load(filesource, _(u"views loading"))
self.addToLog(_(u"[info] loaded in {0:.2} secs").format(time.time() - startedAt))
if modelTestcases.modelDocument.type == ModelDocument.Type.TESTCASESINDEX:
for testcasesElement in modelTestcases.modelDocument.iter(tag=u"testcases"):
rootAttr = testcasesElement.get(u"root")
title = testcasesElement.get(u"title")
self.addToLog(_(u"[info] testcases {0}").format(title))
if rootAttr is not None:
base = os.path.join(os.path.dirname(modelTestcases.modelDocument.filepath),rootAttr) + os.sep
else:
base = self.filepath
for testcaseElement in testcasesElement.iterchildren(tag=u"testcase"):
uriFrom = testcaseElement.get(u"uriFrom")
uriTo = testcaseElement.get(u"uriTo")
modelDTSfrom = modelDTSto = None
self.addToLog(_(u"[info] testcase uriFrom {0}").format(uriFrom))
if uriFrom is not None and uriTo is not None:
modelDTSfrom = ModelXbrl.load(modelTestcases.modelManager,
uriFrom,
_(u"loading from DTS"),
base=base)
modelDTSto = ModelXbrl.load(modelTestcases.modelManager,
uriTo,
_(u"loading to DTS"),
base=base)
if modelDTSfrom is not None and modelDTSto is not None:
# generate differences report
reportName = os.path.basename(uriFrom).replace(u"from.xsd",u"report.xml")
reportFile = os.path.dirname(uriFrom) + u"\\report\\" + reportName
reportFullPath = self.webCache.normalizeUrl(
reportFile,
base)
try:
os.makedirs(os.path.dirname(reportFullPath))
except WindowsError:
pass # dir already exists
ModelVersReport.ModelVersReport(modelTestcases).diffDTSes(
reportFullPath,
modelDTSfrom, modelDTSto)
def addToLog(self, message):
self.logMessages.append(message + u'\n')
print message
def showStatus(self, message, clearAfter=None):
pass
class LogHandler(logging.Handler):
def __init__(self, cntlr):
super(LogHandler, self).__init__()
self.cntlr = cntlr
self.level = logging.DEBUG
formatter = logging.Formatter(u"[%(messageCode)s] %(message)s - %(file)s %(sourceLine)s")
self.setFormatter(formatter)
logging.getLogger(u"arelle").addHandler(self)
def flush(self):
u''' Nothing to flush '''
def emit(self, logRecord):
self.cntlr.addToLog(self.format(logRecord))
if __name__ == u"__main__":
main()
| apache-2.0 | -3,449,562,717,907,046,000 | 58.623377 | 154 | 0.466024 | false |
AffilaeTech/niav | niav/helpers/mongo/mongo.py | 1 | 6645 | import logging
import pendulum
from pymongo import MongoClient
from bson.objectid import ObjectId
from niav.ssh_tunnel import SshTunnel
class Mongo(object):
"""
MongoDB helper
- MongoDB wrapper
- load configurations
- open SSH tunnel if needed
"""
def __init__(self, env, section_mongo=None, section_tunnel_ssh=None):
"""
:param env: Env instance
:param section_mongo: Section name for MongoDB configuration in env.ini file
:param section_tunnel_ssh: Section name for SSH tunnel configuration in env.ini file.
:type env: object instance
:type section_mongo: string
:type section_tunnel_ssh: string
"""
self.env = env
self.section_mongo = section_mongo
self.section_tunnel_ssh = section_tunnel_ssh
self.host = None
self.port = None
self.client = None
self.tunnel = None
self.tunnel_host = None
self.tunnel_port = None
self.tunnel_user = None
self.tunnel_password = None
self.tunnel_private_key = None
self.tunnel_private_key_password = None
self.tunnel_local_port = None
self.tunnel_remote_port = None
self.log = logging.getLogger("niav")
self.configure_mongo()
self.configure_tunnel_ssh()
def connect(self):
"""
Connect to MongoDB and start SSH tunnel if required.
:return: MongoClient instance
:rtype: object instance
"""
if self.is_tunnel_ssh_required():
self.tunnel = SshTunnel(self.tunnel_host, self.tunnel_local_port, self.tunnel_remote_port,
port_ssh=self.tunnel_port, user=self.tunnel_user, password=self.tunnel_password,
private_key=self.tunnel_private_key, private_key_password=self.tunnel_private_key_password)
self.tunnel.connect()
self.tunnel.start()
if self.client is None:
self.client = MongoClient(self.host, self.port)
self.log.info("Mongo connected to '%s'" % self.host)
return self.client
def close(self):
"""
Close connection to MongoDB.
"""
self.client.close()
if self.tunnel is not None:
self.tunnel.stop()
def get_client(self):
"""
Get MongoClient instance.
:return: MongoClient instance
:rtype: object instance
"""
return self.client
def configure_mongo(self):
"""
Load MongoDB configuration from env.ini
"""
if self.section_mongo is None:
self.section_mongo = "mongo"
self.host = self.env.get("%s.host" % self.section_mongo)
self.port = self.env.get_int("%s.port" % self.section_mongo)
def configure_tunnel_ssh(self):
"""
Load configuration from env.ini
"""
if self.section_tunnel_ssh is None:
self.section_tunnel_ssh = "tunnel_ssh"
self.tunnel_host = self.env.get_unsafe("%s.host" % self.section_tunnel_ssh)
self.tunnel_port = self.env.get_int_unsafe("%s.port" % self.section_tunnel_ssh)
self.tunnel_user = self.env.get_unsafe("%s.user" % self.section_tunnel_ssh)
self.tunnel_password = self.env.get_unsafe("%s.password" % self.section_tunnel_ssh)
self.tunnel_private_key = self.env.get_unsafe("%s.private_key" % self.section_tunnel_ssh)
self.tunnel_private_key_password = self.env.get_unsafe("%s.private_key_password" % self.section_tunnel_ssh)
self.tunnel_local_port = self.env.get_int_unsafe("%s.local_port" % self.section_tunnel_ssh)
self.tunnel_remote_port = self.env.get_int_unsafe("%s.remote_port" % self.section_tunnel_ssh)
def is_tunnel_ssh_required(self):
"""
Check if the configuration is sufficient to start the SSH tunnel.
:return: boolean
:rtype: bool
"""
if self.tunnel_host not in [None, ""] and self.tunnel_local_port not in [None, ""] and self.tunnel_remote_port not in [None, ""]:
return True
return False
@classmethod
def object_id(cls, id_to_object_id):
"""
Get a string Id and return a MongoDB ObjectID.
:param id_to_object_id: id
:rtype id_to_object_id: string
:return: MongoDB ObjectID
:rtype: ObjectID
"""
return ObjectId(id_to_object_id)
@classmethod
def str_id(cls, object_id_to_id):
"""
Get a MongoDB ObjectID and return a string id.
:param object_id_to_id: id
:type object_id_to_id: ObjectId
:return: id
:rtype: string
"""
return str(object_id_to_id)
@classmethod
def from_datetime(cls, generation_time=None, timezone="UTC"):
if generation_time is None:
generation_time = pendulum.now(timezone).start_of("day")
# print(generation_time.strftime("%Y-%m-%d %H:%M:%S"))
object_id = ObjectId.from_datetime(generation_time)
# print(cls.str_id(object_id))
return object_id
@classmethod
def insert_one(cls, db, collection, doc):
"""
Insert a document.
:param db: Database name
:param collection: Collection name
:param doc: Document to insert
:type db: 'string'
:type collection: string
:type doc: dict
:return: id
:rtype: ObjectId
"""
returned_id = db[collection].insert_one(doc).inserted_id
return returned_id
def update_one(self, db, collection, q_filter, update):
"""
Update a document.
:param db: Database name
:param collection: Collection name
:param q_filter: Query filter
:param update: Data to update
:type db: 'string'
:type collection: string
:type q_filter: dict
:type update: dict
:return: True if update is ok
:rtype: bool
"""
result = db[collection].update_one(q_filter, update)
if result.matched_count != 1 or result.modified_count != 1:
self.log.warning("Mongo update_one: filter match: %d, modified: %d" % (result.matched_count, result.modified_count))
return False
return True
| mit | 4,294,171,752,109,502,000 | 33.252577 | 137 | 0.567344 | false |
Vijaysai005/KProject | vijay/DBSCAN/main_3.py | 1 | 2729 | # usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 12 13:15:05 2017
@author: Vijayasai S
"""
# Use python3
import Cluster as cl
from pymongo import MongoClient
import numpy as np
from datetime import datetime
def centroid(lati=[],longi=[]):
x = sum(lati) / len(lati)
y = sum(longi) / len(longi)
return x,y
def mongoCluster(get_col, set_col, year, month, startday, endday):
for day in range(startday,endday+1):
for hr in range(24):
for mins in range(59):
items = get_col.find({"$and" :[{"packettimestamp":{"$gte":datetime(year,month,day,hr,mins,0)}},{"packettimestamp":{"$lte":datetime(year,month,day,hr,mins+1,0)}}]},{"unit_id":1,"latitude":1,"longitude":1,"_id":0}).sort([("packettimestamp", -1)])
data = [] ; item_id = []
for item in items:
if item["unit_id"] not in item_id:
item_id.append(item["unit_id"])
data.append(item)
try:
listOflist = cl.DictToList(data)
data = cl.loadData(listOflist, "unit_id", "latitude", "longitude", start_column=1)
main_dict,n_cluster = cl.cluster(data[0], data[1], 0.045, 2)
for i in range(len(main_dict)):
try:
for j in range(len(main_dict[i])):
set_col.insert([{"cluster_number": i, "unit_id": int(main_dict[i][j][0]), "latitude": main_dict[i][j][1],"longitude": main_dict[i][j][2], "timestamp":datetime(year,month,day,hr,mins)}])
except Exception:
for k in range(len(main_dict["outlier"])):
set_col.insert([{"cluster_number": "outlier", "unit_id": int(main_dict["outlier"][k][0]), "latitude": main_dict["outlier"][k][1],"longitude": main_dict["outlier"][k][2], "timestamp":datetime(year,month,day,hr,mins)}])
print (day,hr,mins)
if n_cluster == 0:
lat_cen = [] ; long_cen = []
for i in range(len(main_dict["outlier"])):
lat_cen.append(main_dict["outlier"][i][1])
long_cen.append(main_dict["outlier"][i][2])
cent_x,cent_y = centroid(lat_cen,long_cen)
else:
cent_x = [] ; cent_y = []
for i in range(n_cluster):
lat_cen = [] ; long_cen = []
for j in range(main_dict[i]):
lat_cen.append(main_dict[i][j][1])
long_cen.append(main_dict[i][j][2])
_x,_y = centroid(lat_cen,long_cen)
cent_x.append(_x)
cent_y.append(_y)
#print (cent_x,cent_y)
except KeyError:
pass
return main_dict, n_cluster, cent_x, cent_y
if __name__ == "__main__":
client = MongoClient('localhost', 27017)
db = client.maximus_db
get_coll = db.device_data
set_coll = db.clus
startday = 25 ; endday = 26
year = 2017 ; month = 3
main_dict, n_cluster, cent_x, cent_y = mongoCluster(get_coll, set_coll, year, month, startday, endday)
| gpl-3.0 | 5,728,580,436,739,454,000 | 31.488095 | 248 | 0.599487 | false |
vitan/django-guardian | guardian/shortcuts.py | 1 | 27452 | """
Convenient shortcuts to manage or check object permissions.
"""
from __future__ import unicode_literals
from django.contrib.auth.models import Group
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import Count, Q
from django.shortcuts import _get_queryset
from itertools import groupby
from guardian.compat import get_user_model
from guardian.compat import basestring
from guardian.core import ObjectPermissionChecker
from guardian.exceptions import MixedContentTypeError
from guardian.exceptions import WrongAppError
from guardian.utils import get_anonymous_user
from guardian.utils import get_identity
from guardian.utils import get_user_obj_perms_model
from guardian.utils import get_group_obj_perms_model
import warnings
def assign_perm(perm, user_or_group, obj=None):
"""
Assigns permission to user/group and object pair.
:param perm: proper permission for given ``obj``, as string (in format:
``app_label.codename`` or ``codename``). If ``obj`` is not given, must
be in format ``app_label.codename``.
:param user_or_group: instance of ``User``, ``AnonymousUser`` or ``Group``;
passing any other object would raise
``guardian.exceptions.NotUserNorGroup`` exception
:param obj: persisted Django's ``Model`` instance or ``None`` if assigning
global permission. Default is ``None``.
We can assign permission for ``Model`` instance for specific user:
>>> from django.contrib.sites.models import Site
>>> from guardian.models import User
>>> from guardian.shortcuts import assign_perm
>>> site = Site.objects.get_current()
>>> user = User.objects.create(username='joe')
>>> assign_perm("change_site", user, site)
<UserObjectPermission: example.com | joe | change_site>
>>> user.has_perm("change_site", site)
True
... or we can assign permission for group:
>>> group = Group.objects.create(name='joe-group')
>>> user.groups.add(group)
>>> assign_perm("delete_site", group, site)
<GroupObjectPermission: example.com | joe-group | delete_site>
>>> user.has_perm("delete_site", site)
True
**Global permissions**
This function may also be used to assign standard, *global* permissions if
``obj`` parameter is omitted. Added Permission would be returned in that
case:
>>> assign_perm("sites.change_site", user)
<Permission: sites | site | Can change site>
"""
user, group = get_identity(user_or_group)
# If obj is None we try to operate on global permissions
if obj is None:
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label,
codename=codename)
if user:
user.user_permissions.add(perm)
return perm
if group:
group.permissions.add(perm)
return perm
perm = perm.split('.')[-1]
if user:
model = get_user_obj_perms_model(obj)
return model.objects.assign_perm(perm, user, obj)
if group:
model = get_group_obj_perms_model(obj)
return model.objects.assign_perm(perm, group, obj)
def assign(perm, user_or_group, obj=None):
""" Depreciated function name left in for compatibility"""
warnings.warn("Shortcut function 'assign' is being renamed to 'assign_perm'. Update your code accordingly as old name will be depreciated in 2.0 version.", DeprecationWarning)
return assign_perm(perm, user_or_group, obj)
def bulk_assign_perm(perm, users_or_groups, objs=None):
"""
Bulk assign permission to users/groups and objects pair.
:param perm: proper permission for given ``objs``, as string (in format:
``app_label.codename`` or ``codename``). If ``objs`` is not given, must
be in format ``app_label.codename``.
:param users_or_groups: instances of ``User``, ``AnonymousUser`` or ``Group``;
passing any other objects would raise
``guardian.exceptions.NotUserNorGroup`` exception
:param objs: persisted Django's ``Model`` instances or ``None`` if assigning
global permission. Default is ``None``.
We can assign permission for ``Model`` instances for specific users:
>>> from django.contrib.sites.models import Site
>>> from guardian.compat import get_user_model
>>> from guardian.shortcuts import bulk_assign_perm
>>> User = get_user_model
>>> Site.objects.bulk_create([
... Site(domain='d_vitan', name='vitan.com'),
... Site(domain='d_elain', name='elain.com')])
>>> User.objects.bulk_create([
... User(username='vitan'),
... User(username='elain')])
>>> site_qs = Site.objects.all()
>>> user_qs = User.objects.all()
>>> bulk_assign_perm("change_site", user_qs, site_qs)
[<UserObjectPermission: vitan.com | vitan | change_site>, ...]
>>> for user in user_qs:
... for site in site_qs:
... user.has_perm("change_site", site)
True
True
True
True
... or we can assign permission for groups:
>>> group = Group.objects.create(name='joe-group')
>>> for user in user_qs:
... user.groups.add(group)
>>> bulk_assign_perm("delete_site", [group], site_qs)
[<GroupObjectPermission: vitan.com | joe-group | delete_site>, ...]
>>> for user in user_qs:
... for site in site_qs:
... user.has_perm("delete_site", site)
True
True
True
True
**Global permissions**
This function may also be used to assign standard, *global* permissions if
``objs`` parameter is omitted. Added Permission would be returned in that
case:
>>> bulk_assign_perm("sites.change_site", user_qs)
<Permission: sites | site | Can change site>
"""
user, group = get_identity(users_or_groups[0])
# If objs is None we try to operate on global permissions
if objs is None:
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label,
codename=codename)
if user:
perm.user_set.add(*users_or_groups)
return perm
if group:
perm.group_set.add(*users_or_groups)
return perm
perm = perm.split('.')[-1]
if user:
model = get_user_obj_perms_model(objs[0])
if group:
model = get_group_obj_perms_model(objs[0])
return model.objects.bulk_assign_perm(perm, users_or_groups, objs)
def remove_perm(perm, user_or_group=None, obj=None):
"""
Removes permission from user/group and object pair.
:param perm: proper permission for given ``obj``, as string (in format:
``app_label.codename`` or ``codename``). If ``obj`` is not given, must
be in format ``app_label.codename``.
:param user_or_group: instance of ``User``, ``AnonymousUser`` or ``Group``;
passing any other object would raise
``guardian.exceptions.NotUserNorGroup`` exception
:param obj: persisted Django's ``Model`` instance or ``None`` if assigning
global permission. Default is ``None``.
"""
user, group = get_identity(user_or_group)
if obj is None:
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label,
codename=codename)
if user:
user.user_permissions.remove(perm)
return
elif group:
group.permissions.remove(perm)
return
perm = perm.split('.')[-1]
if user:
model = get_user_obj_perms_model(obj)
model.objects.remove_perm(perm, user, obj)
if group:
model = get_group_obj_perms_model(obj)
model.objects.remove_perm(perm, group, obj)
def bulk_remove_perm(perm, users_or_groups=None, objs=None):
"""
Removes permission from users/groups and objects pair.
:param perm: proper permission for given ``objs``, as string (in format:
``app_label.codename`` or ``codename``). If ``objs`` is not given, must
be in format ``app_label.codename``.
:param users_or_groups: instances of ``User``, ``AnonymousUser`` or ``Group``;
passing any other object would raise
``guardian.exceptions.NotUserNorGroup`` exception
:param objs: persisted Django's ``Model`` instances or ``None`` if assigning
global permission. Default is ``None``.
"""
user, group = get_identity(users_or_groups[0])
if objs is None:
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label,
codename=codename)
if user:
perm.user_set.remove(*users_or_groups)
return
elif group:
perm.group_set.remove(*users_or_groups)
return
perm = perm.split('.')[-1]
if user:
model = get_user_obj_perms_model(objs[0])
if group:
model = get_group_obj_perms_model(objs[0])
model.objects.bulk_remove_perm(perm, users_or_groups, objs)
def get_perms(user_or_group, obj):
"""
Returns permissions for given user/group and object pair, as list of
strings.
"""
check = ObjectPermissionChecker(user_or_group)
return check.get_perms(obj)
def get_perms_for_model(cls):
"""
Returns queryset of all Permission objects for the given class. It is
possible to pass Model as class or instance.
"""
if isinstance(cls, basestring):
app_label, model_name = cls.split('.')
model = models.get_model(app_label, model_name)
else:
model = cls
ctype = ContentType.objects.get_for_model(model)
return Permission.objects.filter(content_type=ctype)
def get_users_with_perms(obj, attach_perms=False, with_superusers=False,
with_group_users=True):
"""
Returns queryset of all ``User`` objects with *any* object permissions for
the given ``obj``.
:param obj: persisted Django's ``Model`` instance
:param attach_perms: Default: ``False``. If set to ``True`` result would be
dictionary of ``User`` instances with permissions' codenames list as
values. This would fetch users eagerly!
:param with_superusers: Default: ``False``. If set to ``True`` result would
contain all superusers.
:param with_group_users: Default: ``True``. If set to ``False`` result would
**not** contain those users who have only group permissions for given
``obj``.
Example::
>>> from django.contrib.flatpages.models import FlatPage
>>> from django.contrib.auth.models import User
>>> from guardian.shortcuts import assign_perm, get_users_with_perms
>>>
>>> page = FlatPage.objects.create(title='Some page', path='/some/page/')
>>> joe = User.objects.create_user('joe', '[email protected]', 'joesecret')
>>> assign_perm('change_flatpage', joe, page)
>>>
>>> get_users_with_perms(page)
[<User: joe>]
>>>
>>> get_users_with_perms(page, attach_perms=True)
{<User: joe>: [u'change_flatpage']}
"""
ctype = ContentType.objects.get_for_model(obj)
if not attach_perms:
# It's much easier without attached perms so we do it first if that is
# the case
user_model = get_user_obj_perms_model(obj)
related_name = user_model.user.field.related_query_name()
if user_model.objects.is_generic():
user_filters = {
'%s__content_type' % related_name: ctype,
'%s__object_pk' % related_name: obj.pk,
}
else:
user_filters = {'%s__content_object' % related_name: obj}
qset = Q(**user_filters)
if with_group_users:
group_model = get_group_obj_perms_model(obj)
group_rel_name = group_model.group.field.related_query_name()
if group_model.objects.is_generic():
group_filters = {
'groups__%s__content_type' % group_rel_name: ctype,
'groups__%s__object_pk' % group_rel_name: obj.pk,
}
else:
group_filters = {
'groups__%s__content_object' % group_rel_name: obj,
}
qset = qset | Q(**group_filters)
if with_superusers:
qset = qset | Q(is_superuser=True)
return get_user_model().objects.filter(qset).distinct()
else:
# TODO: Do not hit db for each user!
users = {}
for user in get_users_with_perms(obj,
with_group_users=with_group_users):
users[user] = sorted(get_perms(user, obj))
return users
def get_groups_with_perms(obj, attach_perms=False):
"""
Returns queryset of all ``Group`` objects with *any* object permissions for
the given ``obj``.
:param obj: persisted Django's ``Model`` instance
:param attach_perms: Default: ``False``. If set to ``True`` result would be
dictionary of ``Group`` instances with permissions' codenames list as
values. This would fetch groups eagerly!
Example::
>>> from django.contrib.flatpages.models import FlatPage
>>> from guardian.shortcuts import assign_perm, get_groups_with_perms
>>> from guardian.models import Group
>>>
>>> page = FlatPage.objects.create(title='Some page', path='/some/page/')
>>> admins = Group.objects.create(name='Admins')
>>> assign_perm('change_flatpage', admins, page)
>>>
>>> get_groups_with_perms(page)
[<Group: admins>]
>>>
>>> get_groups_with_perms(page, attach_perms=True)
{<Group: admins>: [u'change_flatpage']}
"""
ctype = ContentType.objects.get_for_model(obj)
if not attach_perms:
# It's much easier without attached perms so we do it first if that is
# the case
group_model = get_group_obj_perms_model(obj)
group_rel_name = group_model.group.field.related_query_name()
if group_model.objects.is_generic():
group_filters = {
'%s__content_type' % group_rel_name: ctype,
'%s__object_pk' % group_rel_name: obj.pk,
}
else:
group_filters = {'%s__content_object' % group_rel_name: obj}
groups = Group.objects.filter(**group_filters).distinct()
return groups
else:
# TODO: Do not hit db for each group!
groups = {}
for group in get_groups_with_perms(obj):
if not group in groups:
groups[group] = sorted(get_perms(group, obj))
return groups
def get_objects_for_user(user, perms, klass=None, use_groups=True, any_perm=False,
with_superuser=True):
"""
Returns queryset of objects for which a given ``user`` has *all*
permissions present at ``perms``.
:param user: ``User`` or ``AnonymousUser`` instance for which objects would
be returned.
:param perms: single permission string, or sequence of permission strings
which should be checked.
If ``klass`` parameter is not given, those should be full permission
names rather than only codenames (i.e. ``auth.change_user``). If more than
one permission is present within sequence, their content type **must** be
the same or ``MixedContentTypeError`` exception would be raised.
:param klass: may be a Model, Manager or QuerySet object. If not given
this parameter would be computed based on given ``params``.
:param use_groups: if ``False``, wouldn't check user's groups object
permissions. Default is ``True``.
:param any_perm: if True, any of permission in sequence is accepted
:param with_superuser: if ``True`` returns the entire queryset if not it will
only return objects the user has explicit permissions.
:raises MixedContentTypeError: when computed content type for ``perms``
and/or ``klass`` clashes.
:raises WrongAppError: if cannot compute app label for given ``perms``/
``klass``.
Example::
>>> from django.contrib.auth.models import User
>>> from guardian.shortcuts import get_objects_for_user
>>> joe = User.objects.get(username='joe')
>>> get_objects_for_user(joe, 'auth.change_group')
[]
>>> from guardian.shortcuts import assign_perm
>>> group = Group.objects.create('some group')
>>> assign_perm('auth.change_group', joe, group)
>>> get_objects_for_user(joe, 'auth.change_group')
[<Group some group>]
The permission string can also be an iterable. Continuing with the previous example:
>>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group'])
[]
>>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group'], any_perm=True)
[<Group some group>]
>>> assign_perm('auth.delete_group', joe, group)
>>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group'])
[<Group some group>]
"""
if isinstance(perms, basestring):
perms = [perms]
ctype = None
app_label = None
codenames = set()
# Compute codenames set and ctype if possible
for perm in perms:
if '.' in perm:
new_app_label, codename = perm.split('.', 1)
if app_label is not None and app_label != new_app_label:
raise MixedContentTypeError("Given perms must have same app "
"label (%s != %s)" % (app_label, new_app_label))
else:
app_label = new_app_label
else:
codename = perm
codenames.add(codename)
if app_label is not None:
new_ctype = ContentType.objects.get(app_label=app_label,
permission__codename=codename)
if ctype is not None and ctype != new_ctype:
raise MixedContentTypeError("ContentType was once computed "
"to be %s and another one %s" % (ctype, new_ctype))
else:
ctype = new_ctype
# Compute queryset and ctype if still missing
if ctype is None and klass is None:
raise WrongAppError("Cannot determine content type")
elif ctype is None and klass is not None:
queryset = _get_queryset(klass)
ctype = ContentType.objects.get_for_model(queryset.model)
elif ctype is not None and klass is None:
queryset = _get_queryset(ctype.model_class())
else:
queryset = _get_queryset(klass)
if ctype.model_class() != queryset.model:
raise MixedContentTypeError("Content type for given perms and "
"klass differs")
# At this point, we should have both ctype and queryset and they should
# match which means: ctype.model_class() == queryset.model
# we should also have ``codenames`` list
# First check if user is superuser and if so, return queryset immediately
if with_superuser and user.is_superuser:
return queryset
# Check if the user is anonymous. The
# django.contrib.auth.models.AnonymousUser object doesn't work for queries
# and it's nice to be able to pass in request.user blindly.
if user.is_anonymous():
user = get_anonymous_user()
# Now we should extract list of pk values for which we would filter queryset
user_model = get_user_obj_perms_model(queryset.model)
user_obj_perms_queryset = (user_model.objects
.filter(user=user)
.filter(permission__content_type=ctype)
.filter(permission__codename__in=codenames))
if user_model.objects.is_generic():
fields = ['object_pk', 'permission__codename']
else:
fields = ['content_object__pk', 'permission__codename']
if use_groups:
group_model = get_group_obj_perms_model(queryset.model)
group_filters = {
'permission__content_type': ctype,
'permission__codename__in': codenames,
'group__%s' % get_user_model().groups.field.related_query_name(): user,
}
groups_obj_perms_queryset = group_model.objects.filter(**group_filters)
if group_model.objects.is_generic():
fields = ['object_pk', 'permission__codename']
else:
fields = ['content_object__pk', 'permission__codename']
if not any_perm:
user_obj_perms = user_obj_perms_queryset.values_list(*fields)
groups_obj_perms = groups_obj_perms_queryset.values_list(*fields)
data = list(user_obj_perms) + list(groups_obj_perms)
keyfunc = lambda t: t[0] # sorting/grouping by pk (first in result tuple)
data = sorted(data, key=keyfunc)
pk_list = []
for pk, group in groupby(data, keyfunc):
obj_codenames = set((e[1] for e in group))
if codenames.issubset(obj_codenames):
pk_list.append(pk)
objects = queryset.filter(pk__in=pk_list)
return objects
if not any_perm and len(codenames) > 1:
counts = user_obj_perms_queryset.values(fields[0]).annotate(object_pk_count=Count(fields[0]))
user_obj_perms_queryset = counts.filter(object_pk_count__gte=len(codenames))
values = user_obj_perms_queryset.values_list(fields[0], flat=True)
if user_model.objects.is_generic():
values = [int(v) for v in values]
objects = queryset.filter(pk__in=values)
if use_groups:
values = groups_obj_perms_queryset.values_list(fields[0], flat=True)
if group_model.objects.is_generic():
values = [int(v) for v in values]
objects |= queryset.filter(pk__in=values)
return objects
def get_objects_for_group(group, perms, klass=None, any_perm=False):
"""
Returns queryset of objects for which a given ``group`` has *all*
permissions present at ``perms``.
:param group: ``Group`` instance for which objects would be returned.
:param perms: single permission string, or sequence of permission strings
which should be checked.
If ``klass`` parameter is not given, those should be full permission
names rather than only codenames (i.e. ``auth.change_user``). If more than
one permission is present within sequence, their content type **must** be
the same or ``MixedContentTypeError`` exception would be raised.
:param klass: may be a Model, Manager or QuerySet object. If not given
this parameter would be computed based on given ``params``.
:param any_perm: if True, any of permission in sequence is accepted
:raises MixedContentTypeError: when computed content type for ``perms``
and/or ``klass`` clashes.
:raises WrongAppError: if cannot compute app label for given ``perms``/
``klass``.
Example:
Let's assume we have a ``Task`` model belonging to the ``tasker`` app with
the default add_task, change_task and delete_task permissions provided
by Django::
>>> from guardian.shortcuts import get_objects_for_group
>>> from tasker import Task
>>> group = Group.objects.create('some group')
>>> task = Task.objects.create('some task')
>>> get_objects_for_group(group, 'tasker.add_task')
[]
>>> from guardian.shortcuts import assign_perm
>>> assign_perm('tasker.add_task', group, task)
>>> get_objects_for_group(group, 'tasker.add_task')
[<Task some task>]
The permission string can also be an iterable. Continuing with the previous example:
>>> get_objects_for_group(group, ['tasker.add_task', 'tasker.delete_task'])
[]
>>> assign_perm('tasker.delete_task', group, task)
>>> get_objects_for_group(group, ['tasker.add_task', 'tasker.delete_task'])
[<Task some task>]
"""
if isinstance(perms, basestring):
perms = [perms]
ctype = None
app_label = None
codenames = set()
# Compute codenames set and ctype if possible
for perm in perms:
if '.' in perm:
new_app_label, codename = perm.split('.', 1)
if app_label is not None and app_label != new_app_label:
raise MixedContentTypeError("Given perms must have same app "
"label (%s != %s)" % (app_label, new_app_label))
else:
app_label = new_app_label
else:
codename = perm
codenames.add(codename)
if app_label is not None:
new_ctype = ContentType.objects.get(app_label=app_label,
permission__codename=codename)
if ctype is not None and ctype != new_ctype:
raise MixedContentTypeError("ContentType was once computed "
"to be %s and another one %s" % (ctype, new_ctype))
else:
ctype = new_ctype
# Compute queryset and ctype if still missing
if ctype is None and klass is None:
raise WrongAppError("Cannot determine content type")
elif ctype is None and klass is not None:
queryset = _get_queryset(klass)
ctype = ContentType.objects.get_for_model(queryset.model)
elif ctype is not None and klass is None:
queryset = _get_queryset(ctype.model_class())
else:
queryset = _get_queryset(klass)
if ctype.model_class() != queryset.model:
raise MixedContentTypeError("Content type for given perms and "
"klass differs")
# At this point, we should have both ctype and queryset and they should
# match which means: ctype.model_class() == queryset.model
# we should also have ``codenames`` list
# Now we should extract list of pk values for which we would filter queryset
group_model = get_group_obj_perms_model(queryset.model)
groups_obj_perms_queryset = (group_model.objects
.filter(group=group)
.filter(permission__content_type=ctype)
.filter(permission__codename__in=codenames))
if group_model.objects.is_generic():
fields = ['object_pk', 'permission__codename']
else:
fields = ['content_object__pk', 'permission__codename']
groups_obj_perms = groups_obj_perms_queryset.values_list(*fields)
data = list(groups_obj_perms)
keyfunc = lambda t: t[0] # sorting/grouping by pk (first in result tuple)
data = sorted(data, key=keyfunc)
pk_list = []
for pk, group in groupby(data, keyfunc):
obj_codenames = set((e[1] for e in group))
if any_perm or codenames.issubset(obj_codenames):
pk_list.append(pk)
objects = queryset.filter(pk__in=pk_list)
return objects
| bsd-2-clause | -398,339,219,573,541,500 | 39.075912 | 179 | 0.623998 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/testing/layers.py | 1 | 66650 | # Copyright 2009-2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Layers used by Launchpad tests.
Layers are the mechanism used by the Zope3 test runner to efficiently
provide environments for tests and are documented in the lib/zope/testing.
Note that every Layer should define all of setUp, tearDown, testSetUp
and testTearDown. If you don't do this, a base class' method will be called
instead probably breaking something.
Preferred style is to not use the 'cls' argument to Layer class methods,
as this is unambguious.
TODO: Make the Zope3 test runner handle multiple layers per test instead
of one, forcing us to attempt to make some sort of layer tree.
-- StuartBishop 20060619
"""
__metaclass__ = type
__all__ = [
'AppServerLayer',
'AuditorLayer',
'BaseLayer',
'DatabaseFunctionalLayer',
'DatabaseLayer',
'FunctionalLayer',
'GoogleLaunchpadFunctionalLayer',
'GoogleServiceLayer',
'LaunchpadFunctionalLayer',
'LaunchpadLayer',
'LaunchpadScriptLayer',
'LaunchpadTestSetup',
'LaunchpadZopelessLayer',
'LayerInvariantError',
'LayerIsolationError',
'LibrarianLayer',
'PageTestLayer',
'RabbitMQLayer',
'SwiftLayer',
'TwistedAppServerLayer',
'TwistedLaunchpadZopelessLayer',
'TwistedLayer',
'YUITestLayer',
'YUIAppServerLayer',
'ZopelessAppServerLayer',
'ZopelessDatabaseLayer',
'ZopelessLayer',
'disconnect_stores',
'reconnect_stores',
'wsgi_application',
]
from cProfile import Profile
import datetime
import errno
import gc
import logging
import os
import signal
import socket
import subprocess
import sys
import tempfile
from textwrap import dedent
import threading
import time
from unittest import (
TestCase,
TestResult,
)
from urllib import urlopen
from fixtures import (
Fixture,
MonkeyPatch,
)
import psycopg2
from storm.zope.interfaces import IZStorm
import transaction
import wsgi_intercept
from wsgi_intercept import httplib2_intercept
from zope.app.publication.httpfactory import chooseClasses
import zope.app.testing.functional
from zope.app.testing.functional import (
FunctionalTestSetup,
ZopePublication,
)
from zope.component import (
getUtility,
globalregistry,
provideUtility,
)
from zope.component.interfaces import ComponentLookupError
import zope.publisher.publish
from zope.security.management import (
endInteraction,
getSecurityPolicy,
)
from zope.server.logger.pythonlogger import PythonLogger
from lp.services import pidfile
from lp.services.auditor.server import AuditorServer
from lp.services.config import (
config,
dbconfig,
LaunchpadConfig,
)
from lp.services.config.fixture import (
ConfigFixture,
ConfigUseFixture,
)
from lp.services.database.interfaces import IStore
from lp.services.database.sqlbase import session_store
from lp.services.googlesearch.tests.googleserviceharness import (
GoogleServiceTestSetup,
)
from lp.services.job.tests import celeryd
from lp.services.librarian.model import LibraryFileAlias
from lp.services.librarianserver.testing.server import LibrarianServerFixture
from lp.services.mail.mailbox import (
IMailBox,
TestMailBox,
)
from lp.services.mail.sendmail import set_immediate_mail_delivery
import lp.services.mail.stub
from lp.services.memcache.client import memcache_client_factory
from lp.services.osutils import kill_by_pidfile
from lp.services.rabbit.server import RabbitServer
from lp.services.scripts import execute_zcml_for_scripts
from lp.services.testing.profiled import profiled
from lp.services.timeout import (
get_default_timeout_function,
set_default_timeout_function,
)
from lp.services.webapp.authorization import LaunchpadPermissiveSecurityPolicy
from lp.services.webapp.interfaces import IOpenLaunchBag
from lp.services.webapp.servers import (
LaunchpadAccessLogger,
register_launchpad_request_publication_factories,
)
import lp.services.webapp.session
from lp.testing import (
ANONYMOUS,
login,
logout,
reset_logging,
)
from lp.testing.pgsql import PgTestSetup
from lp.testing.swift.fixture import SwiftFixture
from lp.testing.smtpd import SMTPController
orig__call__ = zope.app.testing.functional.HTTPCaller.__call__
COMMA = ','
WAIT_INTERVAL = datetime.timedelta(seconds=180)
def set_up_functional_test():
return FunctionalTestSetup('zcml/ftesting.zcml')
class LayerError(Exception):
pass
class LayerInvariantError(LayerError):
"""Layer self checks have detected a fault. Invariant has been violated.
This indicates the Layer infrastructure has messed up. The test run
should be aborted.
"""
pass
class LayerIsolationError(LayerError):
"""Test isolation has been broken, probably by the test we just ran.
This generally indicates a test has screwed up by not resetting
something correctly to the default state.
The test suite should abort if it cannot clean up the mess as further
test failures may well be spurious.
"""
def is_ca_available():
"""Returns true if the component architecture has been loaded"""
try:
getUtility(IOpenLaunchBag)
except ComponentLookupError:
return False
else:
return True
def disconnect_stores():
"""Disconnect Storm stores."""
zstorm = getUtility(IZStorm)
stores = [
store for name, store in zstorm.iterstores() if name != 'session']
# If we have any stores, abort the transaction and close them.
if stores:
for store in stores:
zstorm.remove(store)
transaction.abort()
for store in stores:
store.close()
def reconnect_stores(reset=False):
"""Reconnect Storm stores, resetting the dbconfig to its defaults.
After reconnecting, the database revision will be checked to make
sure the right data is available.
"""
disconnect_stores()
if reset:
dbconfig.reset()
main_store = IStore(LibraryFileAlias)
assert main_store is not None, 'Failed to reconnect'
# Confirm that SQLOS is again talking to the database (it connects
# as soon as SQLBase._connection is accessed
r = main_store.execute('SELECT count(*) FROM LaunchpadDatabaseRevision')
assert r.get_one()[0] > 0, 'Storm is not talking to the database'
assert session_store() is not None, 'Failed to reconnect'
def wait_children(seconds=120):
"""Wait for all children to exit.
:param seconds: Maximum number of seconds to wait. If None, wait
forever.
"""
now = datetime.datetime.now
if seconds is None:
until = None
else:
until = now() + datetime.timedelta(seconds=seconds)
while True:
try:
os.waitpid(-1, os.WNOHANG)
except OSError as error:
if error.errno != errno.ECHILD:
raise
break
if until is not None and now() > until:
break
class MockRootFolder:
"""Implement the minimum functionality required by Z3 ZODB dependencies
Installed as part of FunctionalLayer.testSetUp() to allow the http()
method (zope.app.testing.functional.HTTPCaller) to work.
"""
@property
def _p_jar(self):
return self
def sync(self):
pass
class BaseLayer:
"""Base layer.
All our layers should subclass Base, as this is where we will put
test isolation checks to ensure that tests to not leave global
resources in a mess.
XXX: StuartBishop 2006-07-12: Unit tests (tests with no layer) will not
get these checks. The Z3 test runner should be updated so that a layer
can be specified to use for unit tests.
"""
# Set to True when we are running tests in this layer.
isSetUp = False
# The name of this test - this is the same output that the testrunner
# displays. It is probably unique, but not guaranteed to be so.
test_name = None
# A flag to disable a check for threads still running after test
# completion. This is hopefully a temporary measure; see the comment
# in tearTestDown.
disable_thread_check = False
# A flag to make services like Librarian and Memcached to persist
# between test runs. This flag is set in setUp() by looking at the
# LP_PERSISTENT_TEST_SERVICES environment variable.
persist_test_services = False
# Things we need to cleanup.
fixture = None
# ConfigFixtures for the configs generated for this layer. Set to None
# if the layer is not setUp, or if persistent tests services are in use.
config_fixture = None
appserver_config_fixture = None
# The config names that are generated for this layer. Set to None when
# the layer is not setUp.
config_name = None
appserver_config_name = None
@classmethod
def make_config(cls, config_name, clone_from, attr_name):
"""Create a temporary config and link it into the layer cleanup."""
cfg_fixture = ConfigFixture(config_name, clone_from)
cls.fixture.addCleanup(cfg_fixture.cleanUp)
cfg_fixture.setUp()
cls.fixture.addCleanup(setattr, cls, attr_name, None)
setattr(cls, attr_name, cfg_fixture)
@classmethod
@profiled
def setUp(cls):
# Set the default appserver config instance name.
# May be changed as required eg when running parallel tests.
cls.appserver_config_name = 'testrunner-appserver'
BaseLayer.isSetUp = True
cls.fixture = Fixture()
cls.fixture.setUp()
cls.fixture.addCleanup(setattr, cls, 'fixture', None)
BaseLayer.persist_test_services = (
os.environ.get('LP_PERSISTENT_TEST_SERVICES') is not None)
# We can only do unique test allocation and parallelisation if
# LP_PERSISTENT_TEST_SERVICES is off.
if not BaseLayer.persist_test_services:
test_instance = str(os.getpid())
os.environ['LP_TEST_INSTANCE'] = test_instance
cls.fixture.addCleanup(os.environ.pop, 'LP_TEST_INSTANCE', '')
# Kill any Memcached or Librarian left running from a previous
# test run, or from the parent test process if the current
# layer is being run in a subprocess. No need to be polite
# about killing memcached - just do it quickly.
kill_by_pidfile(MemcachedLayer.getPidFile(), num_polls=0)
config_name = 'testrunner_%s' % test_instance
cls.make_config(config_name, 'testrunner', 'config_fixture')
app_config_name = 'testrunner-appserver_%s' % test_instance
cls.make_config(
app_config_name, 'testrunner-appserver',
'appserver_config_fixture')
cls.appserver_config_name = app_config_name
else:
config_name = 'testrunner'
app_config_name = 'testrunner-appserver'
cls.config_name = config_name
cls.fixture.addCleanup(setattr, cls, 'config_name', None)
cls.appserver_config_name = app_config_name
cls.fixture.addCleanup(setattr, cls, 'appserver_config_name', None)
use_fixture = ConfigUseFixture(config_name)
cls.fixture.addCleanup(use_fixture.cleanUp)
use_fixture.setUp()
# Kill any database left lying around from a previous test run.
db_fixture = LaunchpadTestSetup()
try:
db_fixture.connect().close()
except psycopg2.Error:
# We assume this means 'no test database exists.'
pass
else:
db_fixture.dropDb()
@classmethod
@profiled
def tearDown(cls):
cls.fixture.cleanUp()
BaseLayer.isSetUp = False
@classmethod
@profiled
def testSetUp(cls):
# Store currently running threads so we can detect if a test
# leaves new threads running.
BaseLayer._threads = threading.enumerate()
BaseLayer.check()
BaseLayer.original_working_directory = os.getcwd()
# Tests and test infrastruture sometimes needs to know the test
# name. The testrunner doesn't provide this, so we have to do
# some snooping.
import inspect
frame = inspect.currentframe()
try:
while frame.f_code.co_name != 'startTest':
frame = frame.f_back
BaseLayer.test_name = str(frame.f_locals['test'])
finally:
del frame # As per no-leak stack inspection in Python reference.
@classmethod
@profiled
def testTearDown(cls):
# Get our current working directory, handling the case where it no
# longer exists (!).
try:
cwd = os.getcwd()
except OSError:
cwd = None
# Handle a changed working directory. If the test succeeded,
# add an error. Then restore the working directory so the test
# run can continue.
if cwd != BaseLayer.original_working_directory:
BaseLayer.flagTestIsolationFailure(
"Test failed to restore working directory.")
os.chdir(BaseLayer.original_working_directory)
BaseLayer.original_working_directory = None
reset_logging()
del lp.services.mail.stub.test_emails[:]
BaseLayer.test_name = None
BaseLayer.check()
def new_live_threads():
return [
thread for thread in threading.enumerate()
if thread not in BaseLayer._threads and thread.isAlive()]
if BaseLayer.disable_thread_check:
new_threads = None
else:
for loop in range(0, 100):
# Check for tests that leave live threads around early.
# A live thread may be the cause of other failures, such as
# uncollectable garbage.
new_threads = new_live_threads()
has_live_threads = False
for new_thread in new_threads:
new_thread.join(0.1)
if new_thread.isAlive():
has_live_threads = True
if has_live_threads:
# Trigger full garbage collection that might be
# blocking threads from exiting.
gc.collect()
else:
break
new_threads = new_live_threads()
if new_threads:
# BaseLayer.disable_thread_check is a mechanism to stop
# tests that leave threads behind from failing. Its use
# should only ever be temporary.
if BaseLayer.disable_thread_check:
print (
"ERROR DISABLED: "
"Test left new live threads: %s") % repr(new_threads)
else:
BaseLayer.flagTestIsolationFailure(
"Test left new live threads: %s" % repr(new_threads))
BaseLayer.disable_thread_check = False
del BaseLayer._threads
if signal.getsignal(signal.SIGCHLD) != signal.SIG_DFL:
BaseLayer.flagTestIsolationFailure(
"Test left SIGCHLD handler.")
# Objects with __del__ methods cannot participate in refence cycles.
# Fail tests with memory leaks now rather than when Launchpad crashes
# due to a leak because someone ignored the warnings.
if gc.garbage:
del gc.garbage[:]
gc.collect() # Expensive, so only do if there might be garbage.
if gc.garbage:
BaseLayer.flagTestIsolationFailure(
"Test left uncollectable garbage\n"
"%s (referenced from %s)"
% (gc.garbage, gc.get_referrers(*gc.garbage)))
@classmethod
@profiled
def check(cls):
"""Check that the environment is working as expected.
We check here so we can detect tests that, for example,
initialize the Zopeless or Functional environments and
are using the incorrect layer.
"""
if FunctionalLayer.isSetUp and ZopelessLayer.isSetUp:
raise LayerInvariantError(
"Both Zopefull and Zopeless CA environments setup")
# Detect a test that causes the component architecture to be loaded.
# This breaks test isolation, as it cannot be torn down.
if (is_ca_available()
and not FunctionalLayer.isSetUp
and not ZopelessLayer.isSetUp):
raise LayerIsolationError(
"Component architecture should not be loaded by tests. "
"This should only be loaded by the Layer.")
# Detect a test that forgot to reset the default socket timeout.
# This safety belt is cheap and protects us from very nasty
# intermittent test failures: see bug #140068 for an example.
if socket.getdefaulttimeout() is not None:
raise LayerIsolationError(
"Test didn't reset the socket default timeout.")
@classmethod
def flagTestIsolationFailure(cls, message):
"""Handle a breakdown in test isolation.
If the test that broke isolation thinks it succeeded,
add an error. If the test failed, don't add a notification
as the isolation breakdown is probably just fallout.
The layer that detected the isolation failure still needs to
repair the damage, or in the worst case abort the test run.
"""
test_result = BaseLayer.getCurrentTestResult()
if test_result.wasSuccessful():
test_case = BaseLayer.getCurrentTestCase()
try:
raise LayerIsolationError(message)
except LayerIsolationError:
test_result.addError(test_case, sys.exc_info())
@classmethod
def getCurrentTestResult(cls):
"""Return the TestResult currently in play."""
import inspect
frame = inspect.currentframe()
try:
while True:
f_self = frame.f_locals.get('self', None)
if isinstance(f_self, TestResult):
return frame.f_locals['self']
frame = frame.f_back
finally:
del frame # As per no-leak stack inspection in Python reference.
@classmethod
def getCurrentTestCase(cls):
"""Return the test currently in play."""
import inspect
frame = inspect.currentframe()
try:
while True:
f_self = frame.f_locals.get('self', None)
if isinstance(f_self, TestCase):
return f_self
f_test = frame.f_locals.get('test', None)
if isinstance(f_test, TestCase):
return f_test
frame = frame.f_back
return frame.f_locals['test']
finally:
del frame # As per no-leak stack inspection in Python reference.
@classmethod
def appserver_config(cls):
"""Return a config suitable for AppServer tests."""
return LaunchpadConfig(cls.appserver_config_name)
@classmethod
def appserver_root_url(cls, facet='mainsite', ensureSlash=False):
"""Return the correct app server root url for the given facet."""
return cls.appserver_config().appserver_root_url(
facet, ensureSlash)
class MemcachedLayer(BaseLayer):
"""Provides tests access to a memcached.
Most tests needing memcache access will actually need to use
ZopelessLayer, FunctionalLayer or sublayer as they will be accessing
memcached using a utility.
"""
# A memcache.Client instance.
client = None
# A subprocess.Popen instance if this process spawned the test
# memcached.
_memcached_process = None
_is_setup = False
@classmethod
@profiled
def setUp(cls):
cls._is_setup = True
# Create a client
MemcachedLayer.client = memcache_client_factory()
if (BaseLayer.persist_test_services and
os.path.exists(MemcachedLayer.getPidFile())):
return
# First, check to see if there is a memcached already running.
# This happens when new layers are run as a subprocess.
test_key = "MemcachedLayer__live_test"
if MemcachedLayer.client.set(test_key, "live"):
return
cmd = [
'memcached',
'-m', str(config.memcached.memory_size),
'-l', str(config.memcached.address),
'-p', str(config.memcached.port),
'-U', str(config.memcached.port),
]
if config.memcached.verbose:
cmd.append('-vv')
stdout = sys.stdout
stderr = sys.stderr
else:
stdout = tempfile.NamedTemporaryFile()
stderr = tempfile.NamedTemporaryFile()
MemcachedLayer._memcached_process = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr)
MemcachedLayer._memcached_process.stdin.close()
# Wait for the memcached to become operational.
while not MemcachedLayer.client.set(test_key, "live"):
if MemcachedLayer._memcached_process.returncode is not None:
raise LayerInvariantError(
"memcached never started or has died.",
MemcachedLayer._memcached_process.stdout.read())
MemcachedLayer.client.forget_dead_hosts()
time.sleep(0.1)
# Store the pidfile for other processes to kill.
pid_file = MemcachedLayer.getPidFile()
open(pid_file, 'w').write(str(MemcachedLayer._memcached_process.pid))
@classmethod
@profiled
def tearDown(cls):
if not cls._is_setup:
return
cls._is_setup = False
MemcachedLayer.client.disconnect_all()
MemcachedLayer.client = None
if not BaseLayer.persist_test_services:
# Kill our memcached, and there is no reason to be nice about it.
kill_by_pidfile(MemcachedLayer.getPidFile())
MemcachedLayer._memcached_process = None
@classmethod
@profiled
def testSetUp(cls):
MemcachedLayer.client.forget_dead_hosts()
MemcachedLayer.client.flush_all()
@classmethod
@profiled
def testTearDown(cls):
pass
@classmethod
def getPidFile(cls):
return os.path.join(config.root, '.memcache.pid')
@classmethod
def purge(cls):
"Purge everything from our memcached."
MemcachedLayer.client.flush_all() # Only do this in tests!
class RabbitMQLayer(BaseLayer):
"""Provides tests access to a rabbitMQ instance."""
rabbit = RabbitServer()
_is_setup = False
@classmethod
@profiled
def setUp(cls):
cls.rabbit.setUp()
cls.config_fixture.add_section(
cls.rabbit.config.service_config)
cls.appserver_config_fixture.add_section(
cls.rabbit.config.service_config)
cls._is_setup = True
@classmethod
@profiled
def tearDown(cls):
if not cls._is_setup:
return
cls.rabbit.cleanUp()
cls._is_setup = False
# Can't pop the config above, so bail here and let the test runner
# start a sub-process.
raise NotImplementedError
@classmethod
@profiled
def testSetUp(cls):
pass
@classmethod
@profiled
def testTearDown(cls):
pass
# We store a reference to the DB-API connect method here when we
# put a proxy in its place.
_org_connect = None
class DatabaseLayer(BaseLayer):
"""Provides tests access to the Launchpad sample database."""
_is_setup = False
_db_fixture = None
# For parallel testing, we allocate a temporary template to prevent worker
# contention.
_db_template_fixture = None
@classmethod
@profiled
def setUp(cls):
cls._is_setup = True
# Read the sequences we'll need from the test template database.
reset_sequences_sql = LaunchpadTestSetup(
dbname='launchpad_ftest_template').generateResetSequencesSQL()
# Allocate a template for this test instance
if os.environ.get('LP_TEST_INSTANCE'):
template_name = '_'.join([LaunchpadTestSetup.template,
os.environ.get('LP_TEST_INSTANCE')])
cls._db_template_fixture = LaunchpadTestSetup(
dbname=template_name, reset_sequences_sql=reset_sequences_sql)
cls._db_template_fixture.setUp()
else:
template_name = LaunchpadTestSetup.template
cls._db_fixture = LaunchpadTestSetup(template=template_name,
reset_sequences_sql=reset_sequences_sql)
cls.force_dirty_database()
# Nuke any existing DB (for persistent-test-services) [though they
# prevent this !?]
cls._db_fixture.tearDown()
# Force a db creation for unique db names - needed at layer init
# because appserver using layers run things at layer setup, not
# test setup.
cls._db_fixture.setUp()
# And take it 'down' again to be in the right state for testSetUp
# - note that this conflicts in principle with layers whose setUp
# needs the db working, but this is a conceptually cleaner starting
# point for addressing that mismatch.
cls._db_fixture.tearDown()
# Bring up the db, so that it is available for other layers.
cls._ensure_db()
@classmethod
@profiled
def tearDown(cls):
if not cls._is_setup:
return
cls._is_setup = False
# Don't leave the DB lying around or it might break tests
# that depend on it not being there on startup, such as found
# in test_layers.py
cls.force_dirty_database()
cls._db_fixture.tearDown()
cls._db_fixture = None
if os.environ.get('LP_TEST_INSTANCE'):
cls._db_template_fixture.tearDown()
cls._db_template_fixture = None
@classmethod
@profiled
def testSetUp(cls):
pass
@classmethod
def _ensure_db(cls):
cls._db_fixture.setUp()
# Ensure that the database is connectable. Because we might have
# just created it, keep trying for a few seconds incase PostgreSQL
# is taking its time getting its house in order.
attempts = 60
for count in range(0, attempts):
try:
cls.connect().close()
except psycopg2.Error:
if count == attempts - 1:
raise
time.sleep(0.5)
else:
break
@classmethod
@profiled
def testTearDown(cls):
# Ensure that the database is connectable
cls.connect().close()
cls._db_fixture.tearDown()
# Fail tests that forget to uninstall their database policies.
from lp.services.webapp.adapter import StoreSelector
while StoreSelector.get_current() is not None:
BaseLayer.flagTestIsolationFailure(
"Database policy %s still installed"
% repr(StoreSelector.pop()))
# Reset/bring up the db - makes it available for either the next test,
# or a subordinate layer which builds on the db. This wastes one setup
# per db layer teardown per run, but thats tolerable.
cls._ensure_db()
@classmethod
@profiled
def force_dirty_database(cls):
cls._db_fixture.force_dirty_database()
@classmethod
@profiled
def connect(cls):
return cls._db_fixture.connect()
@classmethod
@profiled
def _dropDb(cls):
return cls._db_fixture.dropDb()
class SwiftLayer(BaseLayer):
@classmethod
@profiled
def setUp(cls):
cls.swift_fixture = SwiftFixture()
cls.swift_fixture.setUp()
@classmethod
@profiled
def tearDown(cls):
swift = cls.swift_fixture
if swift is not None:
cls.swift_fixture = None
swift.cleanUp()
class LibrarianLayer(DatabaseLayer):
"""Provides tests access to a Librarian instance.
Calls to the Librarian will fail unless there is also a Launchpad
database available.
"""
librarian_fixture = None
@classmethod
@profiled
def setUp(cls):
cls.librarian_fixture = LibrarianServerFixture(
BaseLayer.config_fixture)
cls.librarian_fixture.setUp()
cls._check_and_reset()
# Make sure things using the appserver config know the
# correct Librarian port numbers.
cls.appserver_config_fixture.add_section(
cls.librarian_fixture.service_config)
@classmethod
@profiled
def tearDown(cls):
# Permit multiple teardowns while we sort out the layering
# responsibilities : not desirable though.
if cls.librarian_fixture is None:
return
try:
cls._check_and_reset()
finally:
librarian = cls.librarian_fixture
cls.librarian_fixture = None
librarian.cleanUp()
@classmethod
@profiled
def _check_and_reset(cls):
"""Raise an exception if the Librarian has been killed, else reset."""
try:
f = urlopen(config.librarian.download_url)
f.read()
except Exception as e:
raise LayerIsolationError(
"Librarian has been killed or has hung."
"Tests should use LibrarianLayer.hide() and "
"LibrarianLayer.reveal() where possible, and ensure "
"the Librarian is restarted if it absolutely must be "
"shutdown: " + str(e))
else:
cls.librarian_fixture.reset()
@classmethod
@profiled
def testSetUp(cls):
cls._check_and_reset()
@classmethod
@profiled
def testTearDown(cls):
if cls._hidden:
cls.reveal()
cls._check_and_reset()
# Flag maintaining state of hide()/reveal() calls
_hidden = False
# Fake upload socket used when the librarian is hidden
_fake_upload_socket = None
@classmethod
@profiled
def hide(cls):
"""Hide the Librarian so nothing can find it. We don't want to
actually shut it down because starting it up again is expensive.
We do this by altering the configuration so the Librarian client
looks for the Librarian server on the wrong port.
"""
cls._hidden = True
if cls._fake_upload_socket is None:
# Bind to a socket, but don't listen to it. This way we
# guarantee that connections to the given port will fail.
cls._fake_upload_socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
assert config.librarian.upload_host == 'localhost', (
'Can only hide librarian if it is running locally')
cls._fake_upload_socket.bind(('127.0.0.1', 0))
host, port = cls._fake_upload_socket.getsockname()
librarian_data = dedent("""
[librarian]
upload_port: %s
""" % port)
config.push('hide_librarian', librarian_data)
@classmethod
@profiled
def reveal(cls):
"""Reveal a hidden Librarian.
This just involves restoring the config to the original value.
"""
cls._hidden = False
config.pop('hide_librarian')
def test_default_timeout():
"""Don't timeout by default in tests."""
return None
class LaunchpadLayer(LibrarianLayer, MemcachedLayer, RabbitMQLayer):
"""Provides access to the Launchpad database and daemons.
We need to ensure that the database setup runs before the daemon
setup, or the database setup will fail because the daemons are
already connected to the database.
This layer is mainly used by tests that call initZopeless() themselves.
Most tests will use a sublayer such as LaunchpadFunctionalLayer that
provides access to the Component Architecture.
"""
@classmethod
@profiled
def setUp(cls):
pass
@classmethod
@profiled
def tearDown(cls):
pass
@classmethod
@profiled
def testSetUp(cls):
# By default, don't make external service tests timeout.
if get_default_timeout_function() is not None:
raise LayerIsolationError(
"Global default timeout function should be None.")
set_default_timeout_function(test_default_timeout)
@classmethod
@profiled
def testTearDown(cls):
if get_default_timeout_function() is not test_default_timeout:
raise LayerIsolationError(
"Test didn't reset default timeout function.")
set_default_timeout_function(None)
# A database connection to the session database, created by the first
# call to resetSessionDb.
_raw_sessiondb_connection = None
@classmethod
@profiled
def resetSessionDb(cls):
"""Reset the session database.
Layers that need session database isolation call this explicitly
in the testSetUp().
"""
if LaunchpadLayer._raw_sessiondb_connection is None:
from storm.uri import URI
from lp.services.webapp.adapter import (
LaunchpadSessionDatabase)
launchpad_session_database = LaunchpadSessionDatabase(
URI('launchpad-session:'))
LaunchpadLayer._raw_sessiondb_connection = (
launchpad_session_database.raw_connect())
LaunchpadLayer._raw_sessiondb_connection.cursor().execute(
"DELETE FROM SessionData")
def wsgi_application(environ, start_response):
"""This is a wsgi application for Zope functional testing.
We use it with wsgi_intercept, which is itself mostly interesting
for our webservice (lazr.restful) tests.
"""
# Committing work done up to now is a convenience that the Zope
# zope.app.testing.functional.HTTPCaller does. We're replacing that bit,
# so it is easiest to follow that lead, even if it feels a little loose.
transaction.commit()
# Let's support post-mortem debugging.
if environ.pop('HTTP_X_ZOPE_HANDLE_ERRORS', 'True') == 'False':
environ['wsgi.handleErrors'] = False
handle_errors = environ.get('wsgi.handleErrors', True)
# Make sure the request method is something Launchpad will
# recognize. httplib2 usually takes care of this, but we've
# bypassed that code in our test environment.
environ['REQUEST_METHOD'] = environ['REQUEST_METHOD'].upper()
# Now we do the proper dance to get the desired request. This is an
# almalgam of code from zope.app.testing.functional.HTTPCaller and
# zope.publisher.paste.Application.
request_cls, publication_cls = chooseClasses(
environ['REQUEST_METHOD'], environ)
publication = publication_cls(set_up_functional_test().db)
request = request_cls(environ['wsgi.input'], environ)
request.setPublication(publication)
# The rest of this function is an amalgam of
# zope.publisher.paste.Application.__call__ and van.testing.layers.
request = zope.publisher.publish.publish(
request, handle_errors=handle_errors)
response = request.response
# We sort these, and then put the status first, because
# zope.testbrowser.testing does--and because it makes it easier to write
# reliable tests.
headers = sorted(response.getHeaders())
status = response.getStatusString()
headers.insert(0, ('Status', status))
# Start the WSGI server response.
start_response(status, headers)
# Return the result body iterable.
return response.consumeBodyIter()
class FunctionalLayer(BaseLayer):
"""Loads the Zope3 component architecture in appserver mode."""
# Set to True if tests using the Functional layer are currently being run.
isSetUp = False
@classmethod
@profiled
def setUp(cls):
FunctionalLayer.isSetUp = True
set_up_functional_test().setUp()
# Assert that set_up_functional_test did what it says it does
if not is_ca_available():
raise LayerInvariantError("Component architecture failed to load")
# Access the cookie manager's secret to get the cache populated.
# If we don't, it may issue extra queries depending on test order.
lp.services.webapp.session.idmanager.secret
# If our request publication factories were defined using ZCML,
# they'd be set up by set_up_functional_test().setUp(). Since
# they're defined by Python code, we need to call that code
# here.
register_launchpad_request_publication_factories()
wsgi_intercept.add_wsgi_intercept(
'localhost', 80, lambda: wsgi_application)
wsgi_intercept.add_wsgi_intercept(
'api.launchpad.dev', 80, lambda: wsgi_application)
httplib2_intercept.install()
@classmethod
@profiled
def tearDown(cls):
FunctionalLayer.isSetUp = False
wsgi_intercept.remove_wsgi_intercept('localhost', 80)
wsgi_intercept.remove_wsgi_intercept('api.launchpad.dev', 80)
httplib2_intercept.uninstall()
# Signal Layer cannot be torn down fully
raise NotImplementedError
@classmethod
@profiled
def testSetUp(cls):
transaction.abort()
transaction.begin()
# Fake a root folder to keep Z3 ZODB dependencies happy.
fs = set_up_functional_test()
if not fs.connection:
fs.connection = fs.db.open()
root = fs.connection.root()
root[ZopePublication.root_name] = MockRootFolder()
# Should be impossible, as the CA cannot be unloaded. Something
# mighty nasty has happened if this is triggered.
if not is_ca_available():
raise LayerInvariantError(
"Component architecture not loaded or totally screwed")
@classmethod
@profiled
def testTearDown(cls):
# Should be impossible, as the CA cannot be unloaded. Something
# mighty nasty has happened if this is triggered.
if not is_ca_available():
raise LayerInvariantError(
"Component architecture not loaded or totally screwed")
transaction.abort()
class ZopelessLayer(BaseLayer):
"""Layer for tests that need the Zopeless component architecture
loaded using execute_zcml_for_scripts().
"""
# Set to True if tests in the Zopeless layer are currently being run.
isSetUp = False
@classmethod
@profiled
def setUp(cls):
ZopelessLayer.isSetUp = True
execute_zcml_for_scripts()
# Assert that execute_zcml_for_scripts did what it says it does.
if not is_ca_available():
raise LayerInvariantError(
"Component architecture not loaded by "
"execute_zcml_for_scripts")
# If our request publication factories were defined using
# ZCML, they'd be set up by execute_zcml_for_scripts(). Since
# they're defined by Python code, we need to call that code
# here.
register_launchpad_request_publication_factories()
@classmethod
@profiled
def tearDown(cls):
ZopelessLayer.isSetUp = False
# Signal Layer cannot be torn down fully
raise NotImplementedError
@classmethod
@profiled
def testSetUp(cls):
# Should be impossible, as the CA cannot be unloaded. Something
# mighty nasty has happened if this is triggered.
if not is_ca_available():
raise LayerInvariantError(
"Component architecture not loaded or totally screwed")
# This should not happen here, it should be caught by the
# testTearDown() method. If it does, something very nasty
# happened.
if getSecurityPolicy() != LaunchpadPermissiveSecurityPolicy:
raise LayerInvariantError(
"Previous test removed the LaunchpadPermissiveSecurityPolicy."
)
# execute_zcml_for_scripts() sets up an interaction for the
# anonymous user. A previous script may have changed or removed
# the interaction, so set it up again
login(ANONYMOUS)
@classmethod
@profiled
def testTearDown(cls):
# Should be impossible, as the CA cannot be unloaded. Something
# mighty nasty has happened if this is triggered.
if not is_ca_available():
raise LayerInvariantError(
"Component architecture not loaded or totally screwed")
# Make sure that a test that changed the security policy, reset it
# back to its default value.
if getSecurityPolicy() != LaunchpadPermissiveSecurityPolicy:
raise LayerInvariantError(
"This test removed the LaunchpadPermissiveSecurityPolicy and "
"didn't restore it.")
logout()
class TwistedLayer(BaseLayer):
"""A layer for cleaning up the Twisted thread pool."""
@classmethod
@profiled
def setUp(cls):
pass
@classmethod
@profiled
def tearDown(cls):
pass
@classmethod
def _save_signals(cls):
"""Save the current signal handlers."""
TwistedLayer._original_sigint = signal.getsignal(signal.SIGINT)
TwistedLayer._original_sigterm = signal.getsignal(signal.SIGTERM)
TwistedLayer._original_sigchld = signal.getsignal(signal.SIGCHLD)
# XXX MichaelHudson, 2009-07-14, bug=399118: If a test case in this
# layer launches a process with spawnProcess, there should really be a
# SIGCHLD handler installed to avoid PotentialZombieWarnings. But
# some tests in this layer use tachandler and it is fragile when a
# SIGCHLD handler is installed. tachandler needs to be fixed.
# from twisted.internet import reactor
# signal.signal(signal.SIGCHLD, reactor._handleSigchld)
@classmethod
def _restore_signals(cls):
"""Restore the signal handlers."""
signal.signal(signal.SIGINT, TwistedLayer._original_sigint)
signal.signal(signal.SIGTERM, TwistedLayer._original_sigterm)
signal.signal(signal.SIGCHLD, TwistedLayer._original_sigchld)
@classmethod
@profiled
def testSetUp(cls):
TwistedLayer._save_signals()
from twisted.internet import interfaces, reactor
from twisted.python import threadpool
# zope.exception demands more of frame objects than
# twisted.python.failure provides in its fake frames. This is enough
# to make it work with them as of 2009-09-16. See
# https://bugs.launchpad.net/bugs/425113.
cls._patch = MonkeyPatch(
'twisted.python.failure._Frame.f_locals',
property(lambda self: {}))
cls._patch.setUp()
if interfaces.IReactorThreads.providedBy(reactor):
pool = getattr(reactor, 'threadpool', None)
# If the Twisted threadpool has been obliterated (probably by
# testTearDown), then re-build it using the values that Twisted
# uses.
if pool is None:
reactor.threadpool = threadpool.ThreadPool(0, 10)
reactor.threadpool.start()
@classmethod
@profiled
def testTearDown(cls):
# Shutdown and obliterate the Twisted threadpool, to plug up leaking
# threads.
from twisted.internet import interfaces, reactor
if interfaces.IReactorThreads.providedBy(reactor):
reactor.suggestThreadPoolSize(0)
pool = getattr(reactor, 'threadpool', None)
if pool is not None:
reactor.threadpool.stop()
reactor.threadpool = None
cls._patch.cleanUp()
TwistedLayer._restore_signals()
class GoogleServiceLayer(BaseLayer):
"""Tests for Google web service integration."""
@classmethod
def setUp(cls):
google = GoogleServiceTestSetup()
google.setUp()
@classmethod
def tearDown(cls):
GoogleServiceTestSetup().tearDown()
@classmethod
def testSetUp(self):
# We need to override BaseLayer.testSetUp(), or else we will
# get a LayerIsolationError.
pass
@classmethod
def testTearDown(self):
# We need to override BaseLayer.testTearDown(), or else we will
# get a LayerIsolationError.
pass
class DatabaseFunctionalLayer(DatabaseLayer, FunctionalLayer):
"""Provides the database and the Zope3 application server environment."""
@classmethod
@profiled
def setUp(cls):
pass
@classmethod
@profiled
def tearDown(cls):
pass
@classmethod
@profiled
def testSetUp(cls):
# Connect Storm
reconnect_stores(reset=True)
@classmethod
@profiled
def testTearDown(cls):
getUtility(IOpenLaunchBag).clear()
endInteraction()
# Disconnect Storm so it doesn't get in the way of database resets
disconnect_stores()
class LaunchpadFunctionalLayer(LaunchpadLayer, FunctionalLayer):
"""Provides the Launchpad Zope3 application server environment."""
@classmethod
@profiled
def setUp(cls):
pass
@classmethod
@profiled
def testSetUp(cls):
# Reset any statistics
from lp.services.webapp.opstats import OpStats
OpStats.resetStats()
# Connect Storm
reconnect_stores(reset=True)
@classmethod
@profiled
def testTearDown(cls):
getUtility(IOpenLaunchBag).clear()
endInteraction()
# Reset any statistics
from lp.services.webapp.opstats import OpStats
OpStats.resetStats()
# Disconnect Storm so it doesn't get in the way of database resets
disconnect_stores()
class AuditorLayer(LaunchpadFunctionalLayer):
auditor = AuditorServer()
_is_setup = False
@classmethod
@profiled
def setUp(cls):
cls.auditor.setUp()
cls.config_fixture.add_section(cls.auditor.service_config)
cls.appserver_config_fixture.add_section(cls.auditor.service_config)
cls._is_setup = True
@classmethod
@profiled
def tearDown(cls):
if not cls._is_setup:
return
cls.auditor.cleanUp()
cls._is_setup = False
# Can't pop the config above, so bail here and let the test runner
# start a sub-process.
raise NotImplementedError
@classmethod
@profiled
def testSetUp(cls):
pass
@classmethod
@profiled
def testTearDown(cls):
pass
class GoogleLaunchpadFunctionalLayer(LaunchpadFunctionalLayer,
GoogleServiceLayer):
"""Provides Google service in addition to LaunchpadFunctionalLayer."""
@classmethod
@profiled
def setUp(cls):
pass
@classmethod
@profiled
def tearDown(cls):
pass
@classmethod
@profiled
def testSetUp(cls):
pass
@classmethod
@profiled
def testTearDown(cls):
pass
class ZopelessDatabaseLayer(ZopelessLayer, DatabaseLayer):
"""Testing layer for unit tests with no need for librarian.
Can be used wherever you're accustomed to using LaunchpadZopeless
or LaunchpadScript layers, but there is no need for librarian.
"""
@classmethod
@profiled
def setUp(cls):
pass
@classmethod
@profiled
def tearDown(cls):
# Signal Layer cannot be torn down fully
raise NotImplementedError
@classmethod
@profiled
def testSetUp(cls):
# LaunchpadZopelessLayer takes care of reconnecting the stores
if not LaunchpadZopelessLayer.isSetUp:
reconnect_stores(reset=True)
@classmethod
@profiled
def testTearDown(cls):
disconnect_stores()
class LaunchpadScriptLayer(ZopelessLayer, LaunchpadLayer):
"""Testing layer for scripts using the main Launchpad database adapter"""
@classmethod
@profiled
def setUp(cls):
# Make a TestMailBox available
# This is registered via ZCML in the LaunchpadFunctionalLayer
# XXX flacoste 2006-10-25 bug=68189: This should be configured from
# ZCML but execute_zcml_for_scripts() doesn't cannot support a
# different testing configuration.
cls._mailbox = TestMailBox()
provideUtility(cls._mailbox, IMailBox)
@classmethod
@profiled
def tearDown(cls):
if not globalregistry.base.unregisterUtility(cls._mailbox):
raise NotImplementedError('failed to unregister mailbox')
@classmethod
@profiled
def testSetUp(cls):
# LaunchpadZopelessLayer takes care of reconnecting the stores
if not LaunchpadZopelessLayer.isSetUp:
reconnect_stores(reset=True)
@classmethod
@profiled
def testTearDown(cls):
disconnect_stores()
class LaunchpadTestSetup(PgTestSetup):
template = 'launchpad_ftest_template'
dbuser = 'launchpad'
host = 'localhost'
class LaunchpadZopelessLayer(LaunchpadScriptLayer):
"""Full Zopeless environment including Component Architecture and
database connections initialized.
"""
isSetUp = False
txn = transaction
@classmethod
@profiled
def setUp(cls):
LaunchpadZopelessLayer.isSetUp = True
@classmethod
@profiled
def tearDown(cls):
LaunchpadZopelessLayer.isSetUp = False
@classmethod
@profiled
def testSetUp(cls):
dbconfig.override(isolation_level='read_committed')
# XXX wgrant 2011-09-24 bug=29744: initZopeless used to do this.
# Tests that still need it should eventually set this directly,
# so the whole layer is not polluted.
set_immediate_mail_delivery(True)
# Connect Storm
reconnect_stores()
@classmethod
@profiled
def testTearDown(cls):
dbconfig.reset()
# LaunchpadScriptLayer will disconnect the stores for us.
# XXX wgrant 2011-09-24 bug=29744: uninstall used to do this.
# Tests that still need immediate delivery should eventually do
# this directly.
set_immediate_mail_delivery(False)
@classmethod
@profiled
def commit(cls):
transaction.commit()
@classmethod
@profiled
def abort(cls):
transaction.abort()
class MockHTTPTask:
class MockHTTPRequestParser:
headers = None
first_line = None
class MockHTTPServerChannel:
# This is not important to us, so we can hardcode it here.
addr = ['127.0.0.88', 80]
request_data = MockHTTPRequestParser()
channel = MockHTTPServerChannel()
def __init__(self, response, first_line):
self.request = response._request
# We have no way of knowing when the task started, so we use
# the current time here. That shouldn't be a problem since we don't
# care about that for our tests anyway.
self.start_time = time.time()
self.status = response.getStatus()
# When streaming files (see lib/zope/publisher/httpresults.txt)
# the 'Content-Length' header is missing. When it happens we set
# 'bytes_written' to an obviously invalid value. This variable is
# used for logging purposes, see webapp/servers.py.
content_length = response.getHeader('Content-Length')
if content_length is not None:
self.bytes_written = int(content_length)
else:
self.bytes_written = -1
self.request_data.headers = self.request.headers
self.request_data.first_line = first_line
def getCGIEnvironment(self):
return self.request._orig_env
class PageTestLayer(LaunchpadFunctionalLayer, GoogleServiceLayer):
"""Environment for page tests.
"""
@classmethod
@profiled
def setUp(cls):
if os.environ.get('PROFILE_PAGETESTS_REQUESTS'):
PageTestLayer.profiler = Profile()
else:
PageTestLayer.profiler = None
file_handler = logging.FileHandler('logs/pagetests-access.log', 'w')
file_handler.setFormatter(logging.Formatter())
logger = PythonLogger('pagetests-access')
logger.logger.addHandler(file_handler)
logger.logger.setLevel(logging.INFO)
access_logger = LaunchpadAccessLogger(logger)
def my__call__(obj, request_string, handle_errors=True, form=None):
"""Call HTTPCaller.__call__ and log the page hit."""
if PageTestLayer.profiler:
response = PageTestLayer.profiler.runcall(
orig__call__, obj, request_string,
handle_errors=handle_errors, form=form)
else:
response = orig__call__(
obj, request_string, handle_errors=handle_errors,
form=form)
first_line = request_string.strip().splitlines()[0]
access_logger.log(MockHTTPTask(response._response, first_line))
return response
PageTestLayer.orig__call__ = (
zope.app.testing.functional.HTTPCaller.__call__)
zope.app.testing.functional.HTTPCaller.__call__ = my__call__
@classmethod
@profiled
def tearDown(cls):
zope.app.testing.functional.HTTPCaller.__call__ = (
PageTestLayer.orig__call__)
if PageTestLayer.profiler:
PageTestLayer.profiler.dump_stats(
os.environ.get('PROFILE_PAGETESTS_REQUESTS'))
@classmethod
@profiled
def testSetUp(cls):
LaunchpadLayer.resetSessionDb()
@classmethod
@profiled
def testTearDown(cls):
pass
class TwistedLaunchpadZopelessLayer(TwistedLayer, LaunchpadZopelessLayer):
"""A layer for cleaning up the Twisted thread pool."""
@classmethod
@profiled
def setUp(cls):
pass
@classmethod
@profiled
def tearDown(cls):
pass
@classmethod
@profiled
def testSetUp(cls):
pass
@classmethod
@profiled
def testTearDown(cls):
# XXX 2008-06-11 jamesh bug=239086:
# Due to bugs in the transaction module's thread local
# storage, transactions may be reused by new threads in future
# tests. Therefore we do some cleanup before the pool is
# destroyed by TwistedLayer.testTearDown().
from twisted.internet import interfaces, reactor
if interfaces.IReactorThreads.providedBy(reactor):
pool = getattr(reactor, 'threadpool', None)
if pool is not None and pool.workers > 0:
def cleanup_thread_stores(event):
disconnect_stores()
# Don't exit until the event fires. This ensures
# that our thread doesn't get added to
# pool.waiters until all threads are processed.
event.wait()
event = threading.Event()
# Ensure that the pool doesn't grow, and issue one
# cleanup job for each thread in the pool.
pool.adjustPoolsize(0, pool.workers)
for i in range(pool.workers):
pool.callInThread(cleanup_thread_stores, event)
event.set()
class LayerProcessController:
"""Controller for starting and stopping subprocesses.
Layers which need to start and stop a child process appserver or smtp
server should call the methods in this class, but should NOT inherit from
this class.
"""
# Holds the Popen instance of the spawned app server.
appserver = None
# The config used by the spawned app server.
appserver_config = None
# The SMTP server for layer tests. See
# configs/testrunner-appserver/mail-configure.zcml
smtp_controller = None
@classmethod
def setConfig(cls):
"""Stash a config for use."""
cls.appserver_config = LaunchpadConfig(
BaseLayer.appserver_config_name, 'runlaunchpad')
@classmethod
def setUp(cls):
cls.setConfig()
cls.startSMTPServer()
cls.startAppServer()
@classmethod
@profiled
def startSMTPServer(cls):
"""Start the SMTP server if it hasn't already been started."""
if cls.smtp_controller is not None:
raise LayerInvariantError('SMTP server already running')
# Ensure that the SMTP server does proper logging.
log = logging.getLogger('lazr.smtptest')
log_file = os.path.join(config.mailman.build_var_dir, 'logs', 'smtpd')
handler = logging.FileHandler(log_file)
formatter = logging.Formatter(
fmt='%(asctime)s (%(process)d) %(message)s',
datefmt='%b %d %H:%M:%S %Y')
handler.setFormatter(formatter)
log.setLevel(logging.DEBUG)
log.addHandler(handler)
log.propagate = False
cls.smtp_controller = SMTPController('localhost', 9025)
cls.smtp_controller.start()
@classmethod
@profiled
def startAppServer(cls, run_name='run'):
"""Start the app server if it hasn't already been started."""
if cls.appserver is not None:
raise LayerInvariantError('App server already running')
cls._cleanUpStaleAppServer()
cls._runAppServer(run_name)
cls._waitUntilAppServerIsReady()
@classmethod
@profiled
def stopSMTPServer(cls):
"""Kill the SMTP server and wait until it's exited."""
if cls.smtp_controller is not None:
cls.smtp_controller.reset()
cls.smtp_controller.stop()
cls.smtp_controller = None
@classmethod
def _kill(cls, sig):
"""Kill the appserver with `sig`.
:param sig: the signal to kill with
:type sig: int
:return: True if the signal was delivered, otherwise False.
:rtype: bool
"""
try:
os.kill(cls.appserver.pid, sig)
except OSError as error:
if error.errno == errno.ESRCH:
# The child process doesn't exist. Maybe it went away by the
# time we got here.
cls.appserver = None
return False
else:
# Something else went wrong.
raise
else:
return True
@classmethod
@profiled
def stopAppServer(cls):
"""Kill the appserver and wait until it's exited."""
if cls.appserver is not None:
# Unfortunately, Popen.wait() does not support a timeout, so poll
# for a little while, then SIGKILL the process if it refuses to
# exit. test_on_merge.py will barf if we hang here for too long.
until = datetime.datetime.now() + WAIT_INTERVAL
last_chance = False
if not cls._kill(signal.SIGTERM):
# The process is already gone.
return
while True:
# Sleep and poll for process exit.
if cls.appserver.poll() is not None:
break
time.sleep(0.5)
# If we slept long enough, send a harder kill and wait again.
# If we already had our last chance, raise an exception.
if datetime.datetime.now() > until:
if last_chance:
raise RuntimeError("The appserver just wouldn't die")
last_chance = True
if not cls._kill(signal.SIGKILL):
# The process is already gone.
return
until = datetime.datetime.now() + WAIT_INTERVAL
cls.appserver = None
@classmethod
@profiled
def postTestInvariants(cls):
"""Enforce some invariants after each test.
Must be called in your layer class's `testTearDown()`.
"""
if cls.appserver.poll() is not None:
raise LayerIsolationError(
"App server died in this test (status=%s):\n%s" % (
cls.appserver.returncode, cls.appserver.stdout.read()))
DatabaseLayer.force_dirty_database()
@classmethod
def _cleanUpStaleAppServer(cls):
"""Kill any stale app server or pid file."""
pid = pidfile.get_pid('launchpad', cls.appserver_config)
if pid is not None:
# Don't worry if the process no longer exists.
try:
os.kill(pid, signal.SIGTERM)
except OSError as error:
if error.errno != errno.ESRCH:
raise
pidfile.remove_pidfile('launchpad', cls.appserver_config)
@classmethod
def _runAppServer(cls, run_name):
"""Start the app server using runlaunchpad.py"""
_config = cls.appserver_config
cmd = [
os.path.join(_config.root, 'bin', run_name),
'-C', 'configs/%s/launchpad.conf' % _config.instance_name]
environ = dict(os.environ)
environ['LPCONFIG'] = _config.instance_name
cls.appserver = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=environ, cwd=_config.root)
@classmethod
def appserver_root_url(cls):
return cls.appserver_config.vhost.mainsite.rooturl
@classmethod
def _waitUntilAppServerIsReady(cls):
"""Wait until the app server accepts connection."""
assert cls.appserver is not None, "App server isn't started."
root_url = cls.appserver_root_url()
until = datetime.datetime.now() + WAIT_INTERVAL
while until > datetime.datetime.now():
try:
connection = urlopen(root_url)
connection.read()
except IOError as error:
# We are interested in a wrapped socket.error.
# urlopen() really sucks here.
if len(error.args) <= 1:
raise
if not isinstance(error.args[1], socket.error):
raise
if error.args[1].args[0] != errno.ECONNREFUSED:
raise
returncode = cls.appserver.poll()
if returncode is not None:
raise RuntimeError(
'App server failed to start (status=%d):\n%s' % (
returncode, cls.appserver.stdout.read()))
time.sleep(0.5)
else:
connection.close()
break
else:
os.kill(cls.appserver.pid, signal.SIGTERM)
cls.appserver = None
# Go no further.
raise AssertionError('App server startup timed out.')
class AppServerLayer(LaunchpadFunctionalLayer):
"""Layer for tests that run in the webapp environment with an app server.
"""
@classmethod
@profiled
def setUp(cls):
LayerProcessController.setUp()
@classmethod
@profiled
def tearDown(cls):
LayerProcessController.stopAppServer()
LayerProcessController.stopSMTPServer()
@classmethod
@profiled
def testSetUp(cls):
LaunchpadLayer.resetSessionDb()
@classmethod
@profiled
def testTearDown(cls):
LayerProcessController.postTestInvariants()
class CeleryJobLayer(AppServerLayer):
"""Layer for tests that run jobs via Celery."""
celeryd = None
@classmethod
@profiled
def setUp(cls):
cls.celeryd = celeryd('launchpad_job')
cls.celeryd.__enter__()
@classmethod
@profiled
def tearDown(cls):
cls.celeryd.__exit__(None, None, None)
cls.celeryd = None
class CeleryBzrsyncdJobLayer(AppServerLayer):
"""Layer for tests that run jobs that read from branches via Celery."""
celeryd = None
@classmethod
@profiled
def setUp(cls):
cls.celeryd = celeryd('bzrsyncd_job')
cls.celeryd.__enter__()
@classmethod
@profiled
def tearDown(cls):
cls.celeryd.__exit__(None, None, None)
cls.celeryd = None
class CeleryBranchWriteJobLayer(AppServerLayer):
"""Layer for tests that run jobs which write to branches via Celery."""
celeryd = None
@classmethod
@profiled
def setUp(cls):
cls.celeryd = celeryd('branch_write_job')
cls.celeryd.__enter__()
@classmethod
@profiled
def tearDown(cls):
cls.celeryd.__exit__(None, None, None)
cls.celeryd = None
class ZopelessAppServerLayer(LaunchpadZopelessLayer):
"""Layer for tests that run in the zopeless environment with an appserver.
"""
@classmethod
@profiled
def setUp(cls):
LayerProcessController.setUp()
@classmethod
@profiled
def tearDown(cls):
LayerProcessController.stopAppServer()
LayerProcessController.stopSMTPServer()
@classmethod
@profiled
def testSetUp(cls):
LaunchpadLayer.resetSessionDb()
@classmethod
@profiled
def testTearDown(cls):
LayerProcessController.postTestInvariants()
class TwistedAppServerLayer(TwistedLaunchpadZopelessLayer):
"""Layer for twisted-using zopeless tests that need a running app server.
"""
@classmethod
@profiled
def setUp(cls):
LayerProcessController.setUp()
@classmethod
@profiled
def tearDown(cls):
LayerProcessController.stopAppServer()
LayerProcessController.stopSMTPServer()
@classmethod
@profiled
def testSetUp(cls):
LaunchpadLayer.resetSessionDb()
@classmethod
@profiled
def testTearDown(cls):
LayerProcessController.postTestInvariants()
class YUITestLayer(FunctionalLayer):
"""The layer for all YUITests cases."""
class YUIAppServerLayer(MemcachedLayer):
"""The layer for all YUIAppServer test cases."""
@classmethod
@profiled
def setUp(cls):
LayerProcessController.setConfig()
LayerProcessController.startAppServer('run-testapp')
@classmethod
@profiled
def tearDown(cls):
LayerProcessController.stopAppServer()
@classmethod
@profiled
def testSetUp(cls):
LaunchpadLayer.resetSessionDb()
| agpl-3.0 | 8,982,281,586,918,893,000 | 31.655561 | 78 | 0.633593 | false |
UASLab/ImageAnalysis | scripts/archive/4b-simple-matches-reset.py | 1 | 2626 | #!/usr/bin/python
import sys
sys.path.insert(0, "/usr/local/opencv3/lib/python2.7/site-packages/")
import argparse
import commands
import cPickle as pickle
import cv2
import fnmatch
import math
import numpy as np
import os.path
from progress.bar import Bar
import scipy.spatial
sys.path.append('../lib')
import Matcher
import Pose
import ProjectMgr
import SRTM
# Rest all match point locations to their original direct
# georeferenced locations based on estimated camera pose and
# projection onto DEM earth surface
parser = argparse.ArgumentParser(description='Keypoint projection.')
parser.add_argument('--project', required=True, help='project directory')
args = parser.parse_args()
proj = ProjectMgr.ProjectMgr(args.project)
proj.load_image_info()
proj.load_features()
proj.undistort_keypoints()
proj.load_match_pairs()
# setup SRTM ground interpolator
ref = proj.ned_reference_lla
sss = SRTM.NEDGround( ref, 2000, 2000, 30 )
# compute keypoint usage map
proj.compute_kp_usage()
# fast way:
# 1. make a grid (i.e. 8x8) of uv coordinates covering the whole image
# 2. undistort these uv coordinates
# 3. project them into vectors
# 4. intersect them with the srtm terrain to get ned coordinates
# 5. use linearndinterpolator ... g = scipy.interpolate.LinearNDInterpolator([[0,0],[1,0],[0,1],[1,1]], [[0,4,8],[1,3,2],[2,2,-4],[4,1,0]])
# with origin uv vs. 3d location to build a table
# 6. interpolate original uv coordinates to 3d locations
proj.fastProjectKeypointsTo3d(sss)
# build a list of all keypoints, but only consider pairwise
# matches and don't try to find single matches that span 3 or more
# images.
print "Constructing unified match structure..."
matches_direct = []
for i, i1 in enumerate(proj.image_list):
# print i1.name
for j, matches in enumerate(i1.match_list):
# print proj.image_list[j].name
if j > i:
for pair in matches:
ned1 = proj.image_list[i].coord_list[pair[0]]
ned2 = proj.image_list[j].coord_list[pair[1]]
ned = (ned1 + ned2) / 2
#print ned1, ned2, ned
match = [ ned, [i, pair[0]], [j, pair[1]] ]
matches_direct.append( match )
print "total features in image set = %d" % len(matches_direct)
print "2 images per feature, no redundancy removal."
print "Writing match file ..."
pickle.dump(matches_direct, open(args.project + "/matches_direct", "wb"))
print "temp: writing matches_direct ascii version..."
f = open(args.project + "/matches_direct.ascii", "wb")
for match in matches_direct:
f.write( str(match) + '\n' )
| mit | 6,323,647,680,881,667,000 | 31.02439 | 139 | 0.690023 | false |
undoware/neutron-drive | google_appengine/google/appengine/api/search/search_util.py | 1 | 4254 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Provides utility methods used by modules in the FTS API stub."""
import datetime
import re
from google.appengine.datastore import document_pb
from google.appengine.api.search import QueryParser
DEFAULT_MAX_SNIPPET_LENGTH = 160
TEXT_DOCUMENT_FIELD_TYPES = [
document_pb.FieldValue.ATOM,
document_pb.FieldValue.TEXT,
document_pb.FieldValue.HTML,
]
TEXT_QUERY_TYPES = [
QueryParser.NAME,
QueryParser.PHRASE,
QueryParser.TEXT,
]
NUMBER_DOCUMENT_FIELD_TYPES = [
document_pb.FieldValue.NUMBER,
]
NUMBER_QUERY_TYPES = [
QueryParser.FLOAT,
QueryParser.INT,
QueryParser.NUMBER,
]
BASE_DATE = datetime.datetime(1970, 1, 1, tzinfo=None)
class UnsupportedOnDevError(Exception):
"""Indicates attempt to perform an action unsupported on the dev server."""
def GetFieldInDocument(document, field_name):
"""Find and return the field with the provided name in the document."""
for f in document.field_list():
if f.name() == field_name:
return f
return None
def AddFieldsToDocumentPb(doc_id, fields, document):
"""Add the id and fields to document.
Args:
doc_id: The document id.
fields: List of tuples of field name, value and optionally type.
document: The document to add the fields to.
"""
if doc_id is not None:
document.set_id(doc_id)
for field_tuple in fields:
name = field_tuple[0]
value = field_tuple[1]
field = document.add_field()
field.set_name(name)
field_value = field.mutable_value()
field_value.set_string_value(value)
if len(field_tuple) > 2:
field_value.set_type(field_tuple[2])
def GetFieldCountInDocument(document, field_name):
count = 0
for field in document.field_list():
if field.name() == field_name:
count += 1
return count
def GetFieldValue(field):
"""Returns the value of a field as the correct type."""
value = field.value().string_value()
value_type = field.value().type()
if value_type in TEXT_DOCUMENT_FIELD_TYPES:
return value
if value_type is document_pb.FieldValue.DATE:
return DeserializeDate(value)
if value_type is document_pb.FieldValue.NUMBER:
return float(value)
raise TypeError('No conversion defined for type %s' % value_type)
def EpochTime(date):
"""Returns millisecond epoch time for a date or datetime."""
if isinstance(date, datetime.datetime):
td = date - BASE_DATE
else:
td = date - BASE_DATE.date()
milliseconds_since_epoch = long(
(td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**3)
return milliseconds_since_epoch
def SerializeDate(date):
return str(EpochTime(date))
def DeserializeDate(date_str):
if re.match(r'^\d+\-\d+\-\d+$', date_str):
return datetime.datetime.strptime(date_str, '%Y-%m-%d')
else:
dt = BASE_DATE + datetime.timedelta(milliseconds=long(date_str))
return dt
def Repr(class_instance, ordered_dictionary):
"""Generates an unambiguous representation for instance and ordered dict."""
return 'search.%s(%s)' % (class_instance.__class__.__name__, ', '.join(
["%s='%s'" % (key, value)
for (key, value) in ordered_dictionary if value]))
def TreeRepr(tree, depth=0):
"""Generate a string representation of an ANTLR parse tree for debugging."""
def _NodeRepr(node):
text = str(node.getType())
if node.getText():
text = '%s: %s' % (text, node.getText())
return text
children = ''
if tree.children:
children = '\n' + '\n'.join([TreeRepr(child, depth=depth+1)
for child in tree.children if child])
return depth * ' ' + _NodeRepr(tree) + children
| bsd-3-clause | -5,943,355,562,431,053,000 | 25.5875 | 78 | 0.681711 | false |
inveniosoftware/invenio-indexer | invenio_indexer/api.py | 1 | 16034 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""API for indexing of records."""
from __future__ import absolute_import, print_function
import copy
from contextlib import contextmanager
import pytz
from celery import current_app as current_celery_app
from elasticsearch import VERSION as ES_VERSION
from elasticsearch.helpers import bulk
from elasticsearch.helpers import expand_action as default_expand_action
from flask import current_app
from invenio_records.api import Record
from invenio_search import current_search_client
from invenio_search.utils import build_alias_name
from kombu import Producer as KombuProducer
from kombu.compat import Consumer
from sqlalchemy.orm.exc import NoResultFound
from .proxies import current_record_to_index
from .signals import before_record_index
from .utils import _es7_expand_action
class Producer(KombuProducer):
"""Producer validating published messages.
For more information visit :class:`kombu:kombu.Producer`.
"""
def publish(self, data, **kwargs):
"""Validate operation type."""
assert data.get('op') in {'index', 'create', 'delete', 'update'}
return super(Producer, self).publish(data, **kwargs)
class RecordIndexer(object):
r"""Provide an interface for indexing records in Elasticsearch.
Bulk indexing works by queuing requests for indexing records and processing
these requests in bulk.
"""
record_cls = Record
"""Record class used for retriving and dumping records.
You can either subclass and overwrite this attribute, or provide the record
class to the constructor.
"""
record_dumper = None
"""Dumper instance to use with this record indexer."""
def __init__(self, search_client=None, exchange=None, queue=None,
routing_key=None, version_type=None, record_to_index=None,
record_cls=None, record_dumper=None):
"""Initialize indexer.
:param search_client: Elasticsearch client.
(Default: ``current_search_client``)
:param exchange: A :class:`kombu.Exchange` instance for message queue.
:param queue: A :class:`kombu.Queue` instance for message queue.
:param routing_key: Routing key for message queue.
:param version_type: Elasticsearch version type.
(Default: ``external_gte``)
:param record_to_index: Function to extract the index and doc_type
from the record.
:param record_cls: Record class used for retriving and dumping records.
If the ``Record.enable_jsonref`` flag is False, new-style record
dumping will be used for creating the Elasticsearch source
document.
:param record_dumper: Dumper instance to use for dumping the record.
Only has an effect for new-style record dumping.
"""
self.client = search_client or current_search_client
self._exchange = exchange
self._queue = queue
self._record_to_index = record_to_index or current_record_to_index
self._routing_key = routing_key
self._version_type = version_type or 'external_gte'
if record_cls:
self.record_cls = record_cls
if record_dumper:
self.record_dumper = record_dumper
def record_to_index(self, record):
"""Get index/doc_type given a record.
:param record: The record where to look for the information.
:returns: A tuple (index, doc_type).
"""
return self._record_to_index(record)
@property
def mq_queue(self):
"""Message Queue queue.
:returns: The Message Queue queue.
"""
return self._queue or current_app.config['INDEXER_MQ_QUEUE']
@property
def mq_exchange(self):
"""Message Queue exchange.
:returns: The Message Queue exchange.
"""
return self._exchange or current_app.config['INDEXER_MQ_EXCHANGE']
@property
def mq_routing_key(self):
"""Message Queue routing key.
:returns: The Message Queue routing key.
"""
return (self._routing_key or
current_app.config['INDEXER_MQ_ROUTING_KEY'])
#
# High-level API
#
def index(self, record, arguments=None, **kwargs):
"""Index a record.
The caller is responsible for ensuring that the record has already been
committed to the database. If a newer version of a record has already
been indexed then the provided record will not be indexed. This
behavior can be controlled by providing a different ``version_type``
when initializing ``RecordIndexer``.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
arguments = arguments or {}
body = self._prepare_record(
record, index, doc_type, arguments, **kwargs)
index, doc_type = self._prepare_index(index, doc_type)
return self.client.index(
id=str(record.id),
version=record.revision_id,
version_type=self._version_type,
index=index,
doc_type=doc_type,
body=body,
**arguments
)
def index_by_id(self, record_uuid, **kwargs):
"""Index a record by record identifier.
:param record_uuid: Record identifier.
:param kwargs: Passed to :meth:`RecordIndexer.index`.
"""
return self.index(self.record_cls.get_record(record_uuid), **kwargs)
def delete(self, record, **kwargs):
"""Delete a record.
:param record: Record instance.
:param kwargs: Passed to
:meth:`elasticsearch:elasticsearch.Elasticsearch.delete`.
"""
index, doc_type = self.record_to_index(record)
index, doc_type = self._prepare_index(index, doc_type)
# Pop version arguments for backward compatibility if they were
# explicit set to None in the function call.
if 'version' in kwargs and kwargs['version'] is None:
kwargs.pop('version', None)
kwargs.pop('version_type', None)
else:
kwargs.setdefault('version', record.revision_id)
kwargs.setdefault('version_type', self._version_type)
return self.client.delete(
id=str(record.id),
index=index,
doc_type=doc_type,
**kwargs
)
def delete_by_id(self, record_uuid, **kwargs):
"""Delete record from index by record identifier.
:param record_uuid: Record identifier.
:param kwargs: Passed to :meth:`RecordIndexer.delete`.
"""
self.delete(self.record_cls.get_record(record_uuid), **kwargs)
def bulk_index(self, record_id_iterator):
"""Bulk index records.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'index')
def bulk_delete(self, record_id_iterator):
"""Bulk delete records from index.
:param record_id_iterator: Iterator yielding record UUIDs.
"""
self._bulk_op(record_id_iterator, 'delete')
def process_bulk_queue(self, es_bulk_kwargs=None):
"""Process bulk indexing queue.
:param dict es_bulk_kwargs: Passed to
:func:`elasticsearch:elasticsearch.helpers.bulk`.
"""
with current_celery_app.pool.acquire(block=True) as conn:
consumer = Consumer(
connection=conn,
queue=self.mq_queue.name,
exchange=self.mq_exchange.name,
routing_key=self.mq_routing_key,
)
req_timeout = current_app.config['INDEXER_BULK_REQUEST_TIMEOUT']
es_bulk_kwargs = es_bulk_kwargs or {}
count = bulk(
self.client,
self._actionsiter(consumer.iterqueue()),
stats_only=True,
request_timeout=req_timeout,
expand_action_callback=(
_es7_expand_action if ES_VERSION[0] >= 7
else default_expand_action
),
**es_bulk_kwargs
)
consumer.close()
return count
@contextmanager
def create_producer(self):
"""Context manager that yields an instance of ``Producer``."""
with current_celery_app.pool.acquire(block=True) as conn:
yield Producer(
conn,
exchange=self.mq_exchange,
routing_key=self.mq_routing_key,
auto_declare=True,
)
#
# Low-level implementation
#
def _bulk_op(self, record_id_iterator, op_type, index=None, doc_type=None):
"""Index record in Elasticsearch asynchronously.
:param record_id_iterator: dIterator that yields record UUIDs.
:param op_type: Indexing operation (one of ``index``, ``create``,
``delete`` or ``update``).
:param index: The Elasticsearch index. (Default: ``None``)
:param doc_type: The Elasticsearch doc_type. (Default: ``None``)
"""
with self.create_producer() as producer:
for rec in record_id_iterator:
producer.publish(dict(
id=str(rec),
op=op_type,
index=index,
doc_type=doc_type
))
def _actionsiter(self, message_iterator):
"""Iterate bulk actions.
:param message_iterator: Iterator yielding messages from a queue.
"""
for message in message_iterator:
payload = message.decode()
try:
if payload['op'] == 'delete':
yield self._delete_action(payload)
else:
yield self._index_action(payload)
message.ack()
except NoResultFound:
message.reject()
except Exception:
message.reject()
current_app.logger.error(
"Failed to index record {0}".format(payload.get('id')),
exc_info=True)
def _delete_action(self, payload):
"""Bulk delete action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'delete' action.
"""
kwargs = {}
index, doc_type = payload.get('index'), payload.get('doc_type')
if not (index and doc_type):
record = self.record_cls.get_record(
payload['id'], with_deleted=True)
index, doc_type = self.record_to_index(record)
kwargs['_version'] = record.revision_id
kwargs['_version_type'] = self._version_type
else:
# Allow version to be sent in the payload (but only use if we
# haven't loaded the record.
if 'version' in payload:
kwargs['_version'] = payload['version']
kwargs['_version_type'] = self._version_type
index, doc_type = self._prepare_index(index, doc_type)
return {
'_op_type': 'delete',
'_index': index,
'_type': doc_type,
'_id': payload['id'],
**kwargs,
}
def _index_action(self, payload):
"""Bulk index action.
:param payload: Decoded message body.
:returns: Dictionary defining an Elasticsearch bulk 'index' action.
"""
record = self.record_cls.get_record(payload['id'])
index, doc_type = self.record_to_index(record)
arguments = {}
body = self._prepare_record(record, index, doc_type, arguments)
index, doc_type = self._prepare_index(index, doc_type)
action = {
'_op_type': 'index',
'_index': index,
'_type': doc_type,
'_id': str(record.id),
'_version': record.revision_id,
'_version_type': self._version_type,
'_source': body
}
action.update(arguments)
return action
def _prepare_index(self, index, doc_type):
"""Prepare the index/doc_type before an operation."""
return build_alias_name(index), doc_type
def _prepare_record(self, record, index, doc_type, arguments=None,
**kwargs):
"""Prepare record data for indexing.
Invenio-Records is evolving and preparing an Elasticsearch source
document is now a responsibility of the Record class. For backward
compatibility, we use the ``Record.enable_jsonref`` flag to control
if we use the new record dumpers feature from Invenio-Records. Set the
flag to ``False`` (disabling JSONRef replacement) to use the new
style record dumping.
:param record: The record to prepare.
:param index: The Elasticsearch index.
:param doc_type: The Elasticsearch document type.
:param arguments: The arguments to send to Elasticsearch upon indexing.
:param **kwargs: Extra parameters.
:returns: The Elasticsearch source document.
"""
# New-style record dumping - we use the Record.enable_jsonref flag on
# the Record to control if we use the new simplified dumping.
if not getattr(record, 'enable_jsonref', True):
# If dumper is None, dumps() will use the default configured dumper
# on the Record class.
return record.dumps(dumper=self.record_dumper)
# Old-style dumping - the old style will still if INDEXER_REPLACE_REFS
# is False use the Record.dumps(), however the default implementation
# is backward compatible for new-style records. Also, we're adding
# extra information into the record like _created and _updated
# afterwards, which the Record.dumps() have no control over.
if current_app.config['INDEXER_REPLACE_REFS']:
data = copy.deepcopy(record.replace_refs())
else:
data = record.dumps()
data['_created'] = pytz.utc.localize(record.created).isoformat() \
if record.created else None
data['_updated'] = pytz.utc.localize(record.updated).isoformat() \
if record.updated else None
# Allow modification of data prior to sending to Elasticsearch.
before_record_index.send(
current_app._get_current_object(),
json=data,
record=record,
index=index,
doc_type=doc_type,
arguments={} if arguments is None else arguments,
**kwargs
)
return data
class BulkRecordIndexer(RecordIndexer):
r"""Provide an interface for indexing records in Elasticsearch.
Uses bulk indexing by default.
"""
def index(self, record):
"""Index a record.
The caller is responsible for ensuring that the record has already been
committed to the database. If a newer version of a record has already
been indexed then the provided record will not be indexed. This
behavior can be controlled by providing a different ``version_type``
when initializing ``RecordIndexer``.
:param record: Record instance.
"""
self.bulk_index([record.id])
def index_by_id(self, record_uuid):
"""Index a record by record identifier.
:param record_uuid: Record identifier.
"""
self.bulk_index([record_uuid])
def delete(self, record):
"""Delete a record.
:param record: Record instance.
"""
self.bulk_delete([record.id])
def delete_by_id(self, record_uuid):
"""Delete record from index by record identifier."""
self.bulk_delete([record_uuid])
| mit | -9,200,436,251,405,704,000 | 34.631111 | 79 | 0.601971 | false |
Hybrid-Cloud/cinder | cinder/tests/unit/test_falconstor_fss.py | 1 | 38330 | # Copyright (c) 2016 FalconStor, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
import mock
import time
from cinder import context
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.falconstor import fc
from cinder.volume.drivers.falconstor import iscsi
from cinder.volume.drivers.falconstor import rest_proxy as proxy
DRIVER_PATH = "cinder.volume.drivers.falconstor"
BASE_DRIVER = DRIVER_PATH + ".fss_common.FalconstorBaseDriver"
ISCSI_DRIVER = DRIVER_PATH + ".iscsi.FSSISCSIDriver"
PRIMARY_IP = '10.0.0.1'
SECONDARY_IP = '10.0.0.2'
FAKE_ID = 123
FAKE = 'fake'
FAKE_HOST = 'fakehost'
API_RESPONSE = {'rc': 0}
ISCSI_VOLUME_BACKEND_NAME = "FSSISCSIDriver"
SESSION_ID = "a76d506c-abcd-1234-efgh-710e1fd90527"
VOLUME_ID = '6068ea6d-f221-4213-bde9-f1b50aecdf36'
ADD_VOLUME_ID = '6068ed7f-f231-4283-bge9-f1b51aecdf36'
GROUP_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'
PORTAL_RESPONSE = {'rc': 0, 'ipaddress': FAKE}
VOLUME_METADATA = {'metadata': {'FSS-vid': 1}}
EXTENT_NEW_SIZE = 3
DATA_SERVER_INFO = 0, {'metadata': {'vendor': 'FalconStor', 'version': '1.5'}}
FSS_SINGLE_TYPE = 'single'
RAWTIMESTAMP = '1324975390'
VOLUME = {'id': VOLUME_ID,
'name': "volume-" + VOLUME_ID,
'display_name': 'fake_volume',
'display_description': '',
'size': 1,
'host': "hostname@backend#%s" % FAKE_ID,
'volume_type': None,
'volume_type_id': None,
'consistencygroup_id': None,
'volume_metadata': [],
'metadata': {"Type": "work"}}
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
SRC_VOL = {
"name": "volume-" + SRC_VOL_ID,
"id": SRC_VOL_ID,
"display_name": "fake_src_vol",
"size": 1,
"host": "hostname@backend#%s" % FAKE_ID,
"volume_type": None,
"volume_type_id": None,
"volume_size": 1
}
VOLUME_NAME = 'cinder-' + VOLUME['id']
SRC_VOL_NAME = 'cinder-' + SRC_VOL['id']
DATA_OUTPUT = VOLUME_NAME, VOLUME_METADATA
SNAPSHOT_METADATA = {'fss-tm-comment': None}
ADD_VOLUME_IN_CG = {
'id': ADD_VOLUME_ID,
'display_name': 'abc123',
'display_description': '',
'size': 1,
'consistencygroup_id': GROUP_ID,
'status': 'available',
'host': "hostname@backend#%s" % FAKE_ID}
REMOVE_VOLUME_IN_CG = {
'id': 'fe2dbc515810451dab2f8c8a48d15bee',
'display_name': 'fe2dbc515810451dab2f8c8a48d15bee',
'display_description': '',
'size': 1,
'consistencygroup_id': GROUP_ID,
'status': 'available',
'host': "hostname@backend#%s" % FAKE_ID}
CONSISTGROUP = {'id': GROUP_ID,
'name': 'fake_group',
'description': 'fake_group_des',
'status': ''}
CG_SNAPSHOT = {
'consistencygroup_id': GROUP_ID,
'id': '3c61b0f9-842e-46bf-b061-5e0031d8083f',
'name': 'cgsnapshot1',
'description': 'cgsnapshot1',
'status': ''}
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
SNAPSHOT = {'name': "snapshot-" + SNAPSHOT_ID,
'id': SNAPSHOT_ID,
'volume_id': VOLUME_ID,
'volume_name': "volume-" + VOLUME_ID,
'volume_size': 2,
'display_name': "fake_snapshot",
'display_description': '',
'volume': VOLUME,
'metadata': SNAPSHOT_METADATA,
'status': ''}
INITIATOR_IQN = 'iqn.2015-08.org.falconstor:01:fss'
TARGET_IQN = "iqn.2015-06.com.falconstor:freestor.fss-12345abc"
TARGET_PORT = "3260"
ISCSI_PORT_NAMES = ["ct0.eth2", "ct0.eth3", "ct1.eth2", "ct1.eth3"]
ISCSI_IPS = ["10.0.0." + str(i + 1) for i in range(len(ISCSI_PORT_NAMES))]
ISCSI_PORTS = {"iqn": TARGET_IQN, "lun": 1}
ISCSI_CONNECTOR = {'initiator': INITIATOR_IQN,
'host': "hostname@backend#%s" % FAKE_ID}
ISCSI_INFO = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': True,
'discard': True,
'encrypted': False,
'qos_specs': None,
'access_mode': 'rw',
'volume_id': VOLUME_ID,
'target_iqn': ISCSI_PORTS['iqn'],
'target_portal': ISCSI_IPS[0] + ':' + TARGET_PORT,
'target_lun': 1
},
}
ISCSI_MULTIPATH_INFO = {
'driver_volume_type': 'iscsi',
'data''data': {
'target_discovered': False,
'discard': True,
'encrypted': False,
'qos_specs': None,
'access_mode': 'rw',
'volume_id': VOLUME_ID,
'target_iqns': [ISCSI_PORTS['iqn']],
'target_portals': [ISCSI_IPS[0] + ':' + TARGET_PORT],
'target_luns': [1]
},
}
FC_INITIATOR_WWPNS = ['2100000d778301c3', '2101000d77a301c3']
FC_TARGET_WWPNS = ['11000024ff2d2ca4', '11000024ff2d2ca5',
'11000024ff2d2c23', '11000024ff2d2c24']
FC_WWNS = ['20000024ff2d2ca4', '20000024ff2d2ca5',
'20000024ff2d2c23', '20000024ff2d2c24']
FC_CONNECTOR = {'ip': '10.10.0.1',
'initiator': 'iqn.1988-08.org.oracle:568eb4ccbbcc',
'wwpns': FC_INITIATOR_WWPNS,
'wwnns': FC_WWNS,
'host': FAKE_HOST,
'multipath': False}
FC_INITIATOR_TARGET_MAP = {
FC_INITIATOR_WWPNS[0]: [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
FC_INITIATOR_WWPNS[1]: [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]]
}
FC_DEVICE_MAPPING = {
"fabric": {
'initiator_port_wwn_list': FC_INITIATOR_WWPNS,
'target_port_wwn_list': FC_WWNS
}
}
FC_INFO = {
'driver_volume_type': 'fibre_channel',
'data': {
'target_discovered': True,
'volume_id': VOLUME_ID,
'target_lun': 1,
'target_wwn': FC_TARGET_WWPNS,
'initiator_target_map': FC_INITIATOR_TARGET_MAP
}
}
def Fake_sleep(time):
pass
class FSSDriverTestCase(test.TestCase):
def setUp(self):
super(FSSDriverTestCase, self).setUp()
self.mock_config = mock.Mock()
self.mock_config.san_ip = PRIMARY_IP
self.mock_config.san_login = FAKE
self.mock_config.san_password = FAKE
self.mock_config.fss_pool = FAKE_ID
self.mock_config.san_is_local = False
self.mock_config.fss_debug = False
self.mock_config.additional_retry_list = False
self.mock_object(time, 'sleep', Fake_sleep)
class TestFSSISCSIDriver(FSSDriverTestCase):
def __init__(self, method):
super(TestFSSISCSIDriver, self).__init__(method)
def setUp(self):
super(TestFSSISCSIDriver, self).setUp()
self.mock_config.use_chap_auth = False
self.mock_config.use_multipath_for_image_xfer = False
self.mock_config.volume_backend_name = ISCSI_VOLUME_BACKEND_NAME
self.driver = iscsi.FSSISCSIDriver(configuration=self.mock_config)
self.mock_utils = mock.Mock()
self.driver.driver_utils = self.mock_utils
def test_initialized_should_set_fss_info(self):
self.assertEqual(self.driver.proxy.fss_host,
self.driver.configuration.san_ip)
self.assertEqual(self.driver.proxy.fss_username,
self.driver.configuration.san_login)
self.assertEqual(self.driver.proxy.fss_password,
self.driver.configuration.san_password)
self.assertEqual(self.driver.proxy.fss_defined_pool,
self.driver.configuration.fss_pool)
def test_check_for_setup_error(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
@mock.patch.object(proxy.RESTProxy, 'create_vdev',
return_value=DATA_OUTPUT)
def test_create_volume(self, mock_create_vdev):
self.driver.create_volume(VOLUME)
mock_create_vdev.assert_called_once_with(VOLUME)
@mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name',
return_value=VOLUME_NAME)
def test_extend_volume(self, mock__get_fss_volume_name):
"""Volume extended_volume successfully."""
self.driver.proxy.extend_vdev = mock.Mock()
result = self.driver.extend_volume(VOLUME, EXTENT_NEW_SIZE)
mock__get_fss_volume_name.assert_called_once_with(VOLUME)
self.driver.proxy.extend_vdev.assert_called_once_with(VOLUME_NAME,
VOLUME["size"],
EXTENT_NEW_SIZE)
self.assertIsNone(result)
@mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name')
def test_clone_volume(self, mock__get_fss_volume_name):
mock__get_fss_volume_name.side_effect = [VOLUME_NAME, SRC_VOL_NAME]
self.driver.proxy.clone_volume = mock.Mock(
return_value=VOLUME_METADATA)
self.driver.proxy.extend_vdev = mock.Mock()
self.driver.create_cloned_volume(VOLUME, SRC_VOL)
self.driver.proxy.clone_volume.assert_called_with(VOLUME_NAME,
SRC_VOL_NAME)
mock__get_fss_volume_name.assert_any_call(VOLUME)
mock__get_fss_volume_name.assert_any_call(SRC_VOL)
self.assertEqual(2, mock__get_fss_volume_name.call_count)
self.driver.proxy.extend_vdev(VOLUME_NAME, VOLUME["size"],
SRC_VOL["size"])
self.driver.proxy.extend_vdev.assert_called_with(VOLUME_NAME,
VOLUME["size"],
SRC_VOL["size"])
@mock.patch.object(proxy.RESTProxy, 'delete_vdev')
def test_delete_volume(self, mock_delete_vdev):
result = self.driver.delete_volume(VOLUME)
mock_delete_vdev.assert_called_once_with(VOLUME)
self.assertIsNone(result)
@mock.patch.object(proxy.RESTProxy, 'create_snapshot',
return_value=API_RESPONSE)
def test_create_snapshot(self, mock_create_snapshot):
snap_name = SNAPSHOT.get('display_name')
SNAPSHOT_METADATA["fss-tm-comment"] = snap_name
result = self.driver.create_snapshot(SNAPSHOT)
mock_create_snapshot.assert_called_once_with(SNAPSHOT)
self.assertEqual(result, {'metadata': SNAPSHOT_METADATA})
@mock.patch.object(proxy.RESTProxy, 'delete_snapshot',
return_value=API_RESPONSE)
def test_delete_snapshot(self, mock_delete_snapshot):
result = self.driver.delete_snapshot(SNAPSHOT)
mock_delete_snapshot.assert_called_once_with(SNAPSHOT)
self.assertIsNone(result)
@mock.patch.object(proxy.RESTProxy, 'create_volume_from_snapshot',
return_value=(VOLUME_NAME, VOLUME_METADATA))
@mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name',
return_value=VOLUME_NAME)
def test_create_volume_from_snapshot(self, mock__get_fss_volume_name,
mock_create_volume_from_snapshot):
vol_size = VOLUME['size']
snap_size = SNAPSHOT['volume_size']
self.driver.proxy.extend_vdev = mock.Mock()
self.assertEqual(
self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT),
dict(metadata=VOLUME_METADATA))
mock_create_volume_from_snapshot.assert_called_once_with(VOLUME,
SNAPSHOT)
if vol_size != snap_size:
mock__get_fss_volume_name.assert_called_once_with(VOLUME)
self.driver.proxy.extend_vdev(VOLUME_NAME, snap_size, vol_size)
self.driver.proxy.extend_vdev.assert_called_with(VOLUME_NAME,
snap_size,
vol_size)
@mock.patch.object(proxy.RESTProxy, 'create_group')
def test_create_consistency_group(self, mock_create_group):
ctxt = context.get_admin_context()
model_update = self.driver.create_consistencygroup(ctxt, CONSISTGROUP)
mock_create_group.assert_called_once_with(CONSISTGROUP)
self.assertDictMatch({'status': 'available'}, model_update)
@mock.patch.object(proxy.RESTProxy, 'destroy_group')
@mock.patch(BASE_DRIVER + ".delete_volume", autospec=True)
def test_delete_consistency_group(self, mock_delete_vdev,
mock_destroy_group):
mock_cgroup = mock.MagicMock()
mock_cgroup.id = FAKE_ID
mock_cgroup['status'] = "deleted"
mock_context = mock.Mock()
mock_volume = mock.MagicMock()
expected_volume_updates = [{
'id': mock_volume.id,
'status': 'deleted'
}]
model_update, volumes = self.driver.delete_consistencygroup(
mock_context, mock_cgroup, [mock_volume])
mock_destroy_group.assert_called_with(mock_cgroup)
self.assertEqual(expected_volume_updates, volumes)
self.assertEqual(mock_cgroup['status'], model_update['status'])
mock_delete_vdev.assert_called_with(self.driver, mock_volume)
@mock.patch.object(proxy.RESTProxy, 'set_group')
def test_update_consistency_group(self, mock_set_group):
ctxt = context.get_admin_context()
add_vols = [
{'name': 'vol1', 'id': 'vol1', 'display_name': ''},
{'name': 'vol2', 'id': 'vol2', 'display_name': ''}
]
remove_vols = [
{'name': 'vol3', 'id': 'vol3', 'display_name': ''},
{'name': 'vol4', 'id': 'vol4', 'display_name': ''}
]
expected_addvollist = ["cinder-%s" % volume['id'] for volume in
add_vols]
expected_remvollist = ["cinder-%s" % vol['id'] for vol in remove_vols]
self.driver.update_consistencygroup(ctxt, CONSISTGROUP,
add_volumes=add_vols,
remove_volumes=remove_vols)
mock_set_group.assert_called_with(GROUP_ID,
addvollist=expected_addvollist,
remvollist=expected_remvollist)
@mock.patch.object(proxy.RESTProxy, 'create_cgsnapshot')
def test_create_cgsnapshot(self, mock_create_cgsnapshot):
mock_cgsnap = CG_SNAPSHOT
mock_context = mock.Mock()
mock_snap = mock.MagicMock()
model_update, snapshots = self.driver.create_cgsnapshot(mock_context,
mock_cgsnap,
[mock_snap])
mock_create_cgsnapshot.assert_called_once_with(mock_cgsnap)
self.assertEqual({'status': 'available'}, model_update)
expected_snapshot_update = [{
'id': mock_snap.id,
'status': 'available'
}]
self.assertEqual(expected_snapshot_update, snapshots)
@mock.patch.object(proxy.RESTProxy, 'delete_cgsnapshot')
def test_delete_cgsnapshot(self, mock_delete_cgsnapshot):
mock_cgsnap = mock.Mock()
mock_cgsnap.id = FAKE_ID
mock_cgsnap.status = 'deleted'
mock_context = mock.Mock()
mock_snap = mock.MagicMock()
model_update, snapshots = self.driver.delete_cgsnapshot(mock_context,
mock_cgsnap,
[mock_snap])
mock_delete_cgsnapshot.assert_called_once_with(mock_cgsnap)
self.assertEqual({'status': mock_cgsnap.status}, model_update)
expected_snapshot_update = [dict(id=mock_snap.id, status='deleted')]
self.assertEqual(expected_snapshot_update, snapshots)
@mock.patch.object(proxy.RESTProxy, 'initialize_connection_iscsi',
return_value=ISCSI_PORTS)
def test_initialize_connection(self, mock_initialize_connection_iscsi):
FSS_HOSTS = []
FSS_HOSTS.append(PRIMARY_IP)
ret = self.driver.initialize_connection(VOLUME, ISCSI_CONNECTOR)
mock_initialize_connection_iscsi.assert_called_once_with(
VOLUME,
ISCSI_CONNECTOR,
FSS_HOSTS)
result = deepcopy(ISCSI_INFO)
self.assertDictMatch(result, ret)
@mock.patch.object(proxy.RESTProxy, 'initialize_connection_iscsi')
@mock.patch(ISCSI_DRIVER + "._check_multipath", autospec=True)
def test_initialize_connection_multipath(self, mock__check_multipath,
mock_initialize_connection_iscsi):
fss_hosts = []
fss_hosts.append(self.mock_config.san_ip)
mock_initialize_connection_iscsi.return_value = ISCSI_PORTS
mock__check_multipath.retuen_value = True
self.mock_config.use_multipath_for_image_xfer = True
self.mock_config.san_secondary_ip = SECONDARY_IP
multipath_connector = deepcopy(ISCSI_CONNECTOR)
multipath_connector["multipath"] = True
fss_hosts.append(SECONDARY_IP)
self.driver.initialize_connection(VOLUME, multipath_connector)
mock_initialize_connection_iscsi.assert_called_once_with(
VOLUME,
multipath_connector,
fss_hosts)
@mock.patch.object(proxy.RESTProxy, 'terminate_connection_iscsi')
def test_terminate_connection(self, mock_terminate_connection_iscsi):
self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR)
mock_terminate_connection_iscsi.assert_called_once_with(
VOLUME,
ISCSI_CONNECTOR)
@mock.patch.object(proxy.RESTProxy, '_manage_existing_volume')
@mock.patch.object(proxy.RESTProxy, '_get_existing_volume_ref_vid')
def test_manage_existing(self, mock__get_existing_volume_ref_vid,
mock__manage_existing_volume):
ref_vid = 1
volume_ref = {'source-id': ref_vid}
self.driver.manage_existing(VOLUME, volume_ref)
mock__get_existing_volume_ref_vid.assert_called_once_with(volume_ref)
mock__manage_existing_volume.assert_called_once_with(
volume_ref['source-id'], VOLUME)
@mock.patch.object(proxy.RESTProxy, '_get_existing_volume_ref_vid',
return_value=5120)
def test_manage_existing_get_size(self, mock__get_existing_volume_ref_vid):
ref_vid = 1
volume_ref = {'source-id': ref_vid}
expected_size = 5
size = self.driver.manage_existing_get_size(VOLUME, volume_ref)
mock__get_existing_volume_ref_vid.assert_called_once_with(volume_ref)
self.assertEqual(expected_size, size)
@mock.patch.object(proxy.RESTProxy, 'unmanage')
def test_unmanage(self, mock_unmanage):
self.driver.unmanage(VOLUME)
mock_unmanage.assert_called_once_with(VOLUME)
class TestFSSFCDriver(FSSDriverTestCase):
def setUp(self):
super(TestFSSFCDriver, self).setUp()
self.driver = fc.FSSFCDriver(configuration=self.mock_config)
self.driver._lookup_service = mock.Mock()
@mock.patch.object(proxy.RESTProxy, 'fc_initialize_connection')
def test_initialize_connection(self, mock_fc_initialize_connection):
fss_hosts = []
fss_hosts.append(PRIMARY_IP)
self.driver.initialize_connection(VOLUME, FC_CONNECTOR)
mock_fc_initialize_connection.assert_called_once_with(
VOLUME,
FC_CONNECTOR,
fss_hosts)
@mock.patch.object(proxy.RESTProxy, '_check_fc_host_devices_empty',
return_value=False)
@mock.patch.object(proxy.RESTProxy, 'fc_terminate_connection',
return_value=FAKE_ID)
def test_terminate_connection(self, mock_fc_terminate_connection,
mock__check_fc_host_devices_empty):
self.driver.terminate_connection(VOLUME, FC_CONNECTOR)
mock_fc_terminate_connection.assert_called_once_with(
VOLUME,
FC_CONNECTOR)
mock__check_fc_host_devices_empty.assert_called_once_with(FAKE_ID)
class TestRESTProxy(test.TestCase):
"""Test REST Proxy Driver."""
def setUp(self):
super(TestRESTProxy, self).setUp()
configuration = mock.Mock(conf.Configuration)
configuration.san_ip = FAKE
configuration.san_login = FAKE
configuration.san_password = FAKE
configuration.fss_pool = FAKE_ID
configuration.fss_debug = False
configuration.additional_retry_list = None
self.proxy = proxy.RESTProxy(configuration)
self.FSS_MOCK = mock.MagicMock()
self.proxy.FSS = self.FSS_MOCK
self.FSS_MOCK._fss_request.return_value = API_RESPONSE
self.mock_object(time, 'sleep', Fake_sleep)
def test_do_setup(self):
self.proxy.do_setup()
self.FSS_MOCK.fss_login.assert_called_once_with()
self.assertNotEqual(self.proxy.session_id, SESSION_ID)
def test_create_volume(self):
sizemb = self.proxy._convert_size_to_mb(VOLUME['size'])
volume_name = self.proxy._get_fss_volume_name(VOLUME)
params = dict(storagepoolid=self.proxy.fss_defined_pool,
sizemb=sizemb,
category="virtual",
name=volume_name)
self.proxy.create_vdev(VOLUME)
self.FSS_MOCK.create_vdev.assert_called_once_with(params)
@mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name',
return_value=FAKE_ID)
def test_extend_volume(self, mock__get_fss_vid_from_name):
size = self.proxy._convert_size_to_mb(EXTENT_NEW_SIZE - VOLUME['size'])
params = dict(
action='expand',
sizemb=size
)
volume_name = self.proxy._get_fss_volume_name(VOLUME)
self.proxy.extend_vdev(volume_name, VOLUME["size"], EXTENT_NEW_SIZE)
mock__get_fss_vid_from_name.assert_called_once_with(volume_name,
FSS_SINGLE_TYPE)
self.FSS_MOCK.extend_vdev.assert_called_once_with(FAKE_ID, params)
@mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name',
return_value=FAKE_ID)
def test_delete_volume(self, mock__get_fss_vid_from_name):
volume_name = self.proxy._get_fss_volume_name(VOLUME)
self.proxy.delete_vdev(VOLUME)
mock__get_fss_vid_from_name.assert_called_once_with(volume_name,
FSS_SINGLE_TYPE)
self.FSS_MOCK.delete_vdev.assert_called_once_with(FAKE_ID)
@mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name',
return_value=FAKE_ID)
def test_clone_volume(self, mock__get_fss_vid_from_name):
self.FSS_MOCK.create_mirror.return_value = API_RESPONSE
self.FSS_MOCK.sync_mirror.return_value = API_RESPONSE
mirror_params = dict(
category='virtual',
selectioncriteria='anydrive',
mirrortarget="virtual",
storagepoolid=self.proxy.fss_defined_pool
)
ret = self.proxy.clone_volume(VOLUME_NAME, SRC_VOL_NAME)
self.FSS_MOCK.create_mirror.assert_called_once_with(FAKE_ID,
mirror_params)
self.FSS_MOCK.sync_mirror.assert_called_once_with(FAKE_ID)
self.FSS_MOCK.promote_mirror.assert_called_once_with(FAKE_ID,
VOLUME_NAME)
self.assertNotEqual(ret, VOLUME_METADATA)
@mock.patch.object(proxy.RESTProxy, 'create_vdev_snapshot')
@mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name',
return_value=FAKE_ID)
@mock.patch.object(proxy.RESTProxy, '_get_vol_name_from_snap',
return_value=VOLUME_NAME)
def test_create_snapshot(self, mock__get_vol_name_from_snap,
mock__get_fss_vid_from_name,
mock_create_vdev_snapshot):
self.FSS_MOCK._check_if_snapshot_tm_exist.return_value = [
False, False, SNAPSHOT['volume_size']]
self.proxy.create_snapshot(SNAPSHOT)
self.FSS_MOCK._check_if_snapshot_tm_exist.assert_called_once_with(
FAKE_ID)
sizemb = self.proxy._convert_size_to_mb(SNAPSHOT['volume_size'])
mock_create_vdev_snapshot.assert_called_once_with(FAKE_ID, sizemb)
self.FSS_MOCK.create_timemark_policy.assert_called_once_with(
FAKE_ID,
storagepoolid=self.proxy.fss_defined_pool)
self.FSS_MOCK.create_timemark.assert_called_once_with(
FAKE_ID,
SNAPSHOT["display_name"])
@mock.patch.object(proxy.RESTProxy, '_get_timestamp',
return_value=RAWTIMESTAMP)
@mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name',
return_value=FAKE_ID)
@mock.patch.object(proxy.RESTProxy, '_get_vol_name_from_snap',
return_value=VOLUME_NAME)
def test_delete_snapshot(self, mock__get_vol_name_from_snap,
mock__get_fss_vid_from_name,
mock__get_timestamp):
timestamp = '%s_%s' % (FAKE_ID, RAWTIMESTAMP)
self.proxy.delete_snapshot(SNAPSHOT)
mock__get_vol_name_from_snap.assert_called_once_with(SNAPSHOT)
self.FSS_MOCK.delete_timemark.assert_called_once_with(timestamp)
self.FSS_MOCK.get_timemark.assert_any_call(FAKE_ID)
self.assertEqual(2, self.FSS_MOCK.get_timemark.call_count)
@mock.patch.object(proxy.RESTProxy, '_get_timestamp')
@mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name')
@mock.patch.object(proxy.RESTProxy, '_get_vol_name_from_snap')
def test_create_volume_from_snapshot(self, mock__get_vol_name_from_snap,
mock__get_fss_vid_from_name,
mock__get_timestamp):
tm_info = {"rc": 0,
"data":
{
"guid": "497bad5e-e589-bb0a-e0e7-00004eeac169",
"name": "SANDisk-001",
"total": "1",
"timemark": [
{
"size": 131072,
"comment": "123test456",
"hastimeview": False,
"priority": "low",
"quiescent": "yes",
"timeviewdata": "notkept",
"rawtimestamp": "1324975390",
"timestamp": "2015-10-11 16:43:10"
}]
}
}
mock__get_vol_name_from_snap.return_value = VOLUME_NAME
new_vol_name = self.proxy._get_fss_volume_name(VOLUME)
mock__get_fss_vid_from_name.return_value = FAKE_ID
self.FSS_MOCK.get_timemark.return_value = tm_info
mock__get_timestamp.return_value = RAWTIMESTAMP
timestamp = '%s_%s' % (FAKE_ID, RAWTIMESTAMP)
self.proxy.create_volume_from_snapshot(VOLUME, SNAPSHOT)
self.FSS_MOCK.get_timemark.assert_called_once_with(FAKE_ID)
mock__get_timestamp.assert_called_once_with(tm_info,
SNAPSHOT['display_name'])
self.FSS_MOCK.copy_timemark.assert_called_once_with(
timestamp,
storagepoolid=self.proxy.fss_defined_pool,
name=new_vol_name)
@mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id')
def test_create_consistency_group(self, mock__get_group_name_from_id):
mock__get_group_name_from_id.return_value = CONSISTGROUP['name']
params = dict(name=CONSISTGROUP['name'])
self.proxy.create_group(CONSISTGROUP)
self.FSS_MOCK.create_group.assert_called_once_with(params)
@mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name')
@mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id')
def test_delete_consistency_group(self, mock__get_group_name_from_id,
mock__get_fss_gid_from_name):
mock__get_group_name_from_id.return_value = CONSISTGROUP['name']
mock__get_fss_gid_from_name.return_value = FAKE_ID
self.proxy.destroy_group(CONSISTGROUP)
mock__get_group_name_from_id.assert_called_once_with(
CONSISTGROUP['id'])
mock__get_fss_gid_from_name.assert_called_once_with(
CONSISTGROUP['name'])
self.FSS_MOCK.destroy_group.assert_called_once_with(FAKE_ID)
@mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name')
@mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name')
@mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id')
def test_update_consistency_group(self, mock__get_group_name_from_id,
mock__get_fss_gid_from_name,
mock__get_fss_vid_from_name):
join_vid_list = [1, 2]
leave_vid_list = [3, 4]
mock__get_group_name_from_id.return_value = CONSISTGROUP['name']
mock__get_fss_gid_from_name.return_value = FAKE_ID
mock__get_fss_vid_from_name.side_effect = [join_vid_list,
leave_vid_list]
add_vols = [
{'name': 'vol1', 'id': 'vol1'},
{'name': 'vol2', 'id': 'vol2'}
]
remove_vols = [
{'name': 'vol3', 'id': 'vol3'},
{'name': 'vol4', 'id': 'vol4'}
]
expected_addvollist = ["cinder-%s" % volume['id'] for volume in
add_vols]
expected_remvollist = ["cinder-%s" % vol['id'] for vol in remove_vols]
self.proxy.set_group(CONSISTGROUP, addvollist=expected_addvollist,
remvollist=expected_remvollist)
if expected_addvollist:
mock__get_fss_vid_from_name.assert_any_call(expected_addvollist)
if expected_remvollist:
mock__get_fss_vid_from_name.assert_any_call(expected_remvollist)
self.assertEqual(2, mock__get_fss_vid_from_name.call_count)
join_params = dict()
leave_params = dict()
join_params.update(
action='join',
virtualdevices=join_vid_list
)
leave_params.update(
action='leave',
virtualdevices=leave_vid_list
)
self.FSS_MOCK.set_group.assert_called_once_with(FAKE_ID, join_params,
leave_params)
@mock.patch.object(proxy.RESTProxy, 'create_vdev_snapshot')
@mock.patch.object(proxy.RESTProxy, 'create_group_timemark')
@mock.patch.object(proxy.RESTProxy, '_get_vdev_id_from_group_id')
@mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name')
@mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id')
def test_create_cgsnapshot(self, mock__get_group_name_from_id,
mock__get_fss_gid_from_name,
mock__get_vdev_id_from_group_id,
mock_create_group_timemark,
mock_create_vdev_snapshot
):
vid_list = [1]
group_name = "cinder-consisgroup-%s" % CG_SNAPSHOT[
'consistencygroup_id']
mock__get_group_name_from_id.return_value = group_name
mock__get_fss_gid_from_name.return_value = FAKE_ID
mock__get_vdev_id_from_group_id.return_value = vid_list
gsnap_name = self.proxy._encode_name(CG_SNAPSHOT['id'])
self.FSS_MOCK._check_if_snapshot_tm_exist.return_value = (
False,
False,
1024)
self.proxy.create_cgsnapshot(CG_SNAPSHOT)
mock__get_group_name_from_id.assert_called_once_with(
CG_SNAPSHOT['consistencygroup_id'])
mock__get_fss_gid_from_name.assert_called_once_with(group_name)
mock__get_vdev_id_from_group_id.assert_called_once_with(FAKE_ID)
for vid in vid_list:
self.FSS_MOCK._check_if_snapshot_tm_exist.assert_called_with(vid)
mock_create_vdev_snapshot.assert_called_once_with(vid, 1024)
self.FSS_MOCK.create_timemark_policy.assert_called_once_with(
vid,
storagepoolid=self.proxy.fss_defined_pool)
mock_create_group_timemark.assert_called_once_with(FAKE_ID, gsnap_name)
@mock.patch.object(proxy.RESTProxy, 'delete_group_timemark')
@mock.patch.object(proxy.RESTProxy, '_get_fss_group_membercount')
@mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name')
@mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id')
def test_delete_cgsnapshot(self, mock__get_group_name_from_id,
mock__get_fss_gid_from_name,
mock__get_fss_group_membercount,
mock_delete_group_timemark):
tm_info = {
"rc": 0,
"data":
{
"name": "GroupTestABC",
"total": 1,
"timemark": [{
"size": 65536,
"comment": "cinder-PGGwaaaaaaaar+wYV4AMdgIPw",
"priority": "low",
"quiescent": "yes",
"hastimeview": "false",
"timeviewdata": "notkept",
"rawtimestamp": "1324974940",
"timestamp": "2015-10-15 16:35:40"}]
}
}
final_tm_data = {
"rc": 0,
"data":
{"name": "GroupTestABC",
"total": 1,
"timemark": []
}}
mock__get_group_name_from_id.return_value = CG_SNAPSHOT[
'consistencygroup_id']
mock__get_fss_gid_from_name.return_value = FAKE_ID
self.FSS_MOCK.get_group_timemark.side_effect = [tm_info, final_tm_data]
encode_snap_name = self.proxy._encode_name(CG_SNAPSHOT['id'])
self.proxy.delete_cgsnapshot(CG_SNAPSHOT)
mock__get_fss_group_membercount.assert_called_once_with(FAKE_ID)
self.assertEqual(2, self.FSS_MOCK.get_group_timemark.call_count)
self.FSS_MOCK.get_group_timemark.assert_any_call(FAKE_ID)
rawtimestamp = self.proxy._get_timestamp(tm_info, encode_snap_name)
timestamp = '%s_%s' % (FAKE_ID, rawtimestamp)
mock_delete_group_timemark.assert_called_once_with(timestamp)
self.FSS_MOCK.delete_group_timemark_policy.assert_called_once_with(
FAKE_ID)
@mock.patch.object(proxy.RESTProxy, 'initialize_connection_iscsi')
def test_iscsi_initialize_connection(self,
mock_initialize_connection_iscsi):
fss_hosts = []
fss_hosts.append(PRIMARY_IP)
self.proxy.initialize_connection_iscsi(VOLUME, ISCSI_CONNECTOR,
fss_hosts)
mock_initialize_connection_iscsi.assert_called_once_with(
VOLUME,
ISCSI_CONNECTOR,
fss_hosts)
@mock.patch.object(proxy.RESTProxy, 'terminate_connection_iscsi')
def test_iscsi_terminate_connection(self, mock_terminate_connection_iscsi):
self.FSS_MOCK._get_target_info.return_value = (FAKE_ID, INITIATOR_IQN)
self.proxy.terminate_connection_iscsi(VOLUME, ISCSI_CONNECTOR)
mock_terminate_connection_iscsi.assert_called_once_with(
VOLUME,
ISCSI_CONNECTOR)
@mock.patch.object(proxy.RESTProxy, 'rename_vdev')
@mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name')
def test_manage_existing(self, mock__get_fss_volume_name,
mock_rename_vdev):
new_vol_name = 'rename-vol'
mock__get_fss_volume_name.return_value = new_vol_name
self.proxy._manage_existing_volume(FAKE_ID, VOLUME)
mock__get_fss_volume_name.assert_called_once_with(VOLUME)
mock_rename_vdev.assert_called_once_with(FAKE_ID, new_vol_name)
@mock.patch.object(proxy.RESTProxy, 'list_volume_info')
def test_manage_existing_get_size(self, mock_list_volume_info):
volume_ref = {'source-id': FAKE_ID}
vdev_info = {
"rc": 0,
"data": {
"name": "cinder-2ab1f70a-6c89-432c-84e3-5fa6c187fb92",
"type": "san",
"category": "virtual",
"sizemb": 1020
}}
mock_list_volume_info.return_value = vdev_info
self.proxy._get_existing_volume_ref_vid(volume_ref)
mock_list_volume_info.assert_called_once_with(FAKE_ID)
@mock.patch.object(proxy.RESTProxy, 'rename_vdev')
@mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name')
@mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name')
def test_unmanage(self, mock__get_fss_volume_name,
mock__get_fss_vid_from_name,
mock_rename_vdev):
mock__get_fss_volume_name.return_value = VOLUME_NAME
mock__get_fss_vid_from_name.return_value = FAKE_ID
unmanaged_vol_name = VOLUME_NAME + "-unmanaged"
self.proxy.unmanage(VOLUME)
mock__get_fss_volume_name.assert_called_once_with(VOLUME)
mock__get_fss_vid_from_name.assert_called_once_with(VOLUME_NAME,
FSS_SINGLE_TYPE)
mock_rename_vdev.assert_called_once_with(FAKE_ID, unmanaged_vol_name)
| apache-2.0 | -2,721,745,136,575,598,600 | 42.115861 | 79 | 0.584086 | false |
Vicaris/ModPro | moviepy/video/fx/resize.py | 1 | 4949 | resize_possible = True
try:
# TRY USING OpenCV AS RESIZER
#raise ImportError #debugging
import cv2
import numpy as np
def resizer (pic, newsize):
lx, ly = int(newsize[0]), int(newsize[1])
if lx > pic.shape[1] or ly > pic.shape[0]:
# For upsizing use linear for good quality & decent speed
interpolation = cv2.INTER_LINEAR
else:
# For dowsizing use area to prevent aliasing
interpolation = cv2.INTER_AREA
return cv2.resize(+pic.astype('uint8'), (lx, ly),
interpolation=interpolation)
resizer.origin = "cv2"
except ImportError:
try:
# TRY USING PIL/PILLOW AS RESIZER
from PIL import Image
import numpy as np
def resizer(pic, newsize):
newsize = list(map(int, newsize))[::-1]
shape = pic.shape
if len(shape)==3:
newshape = (newsize[0],newsize[1], shape[2] )
else:
newshape = (newsize[0],newsize[1])
pilim = Image.fromarray(pic)
resized_pil = pilim.resize(newsize[::-1], Image.ANTIALIAS)
#arr = np.fromstring(resized_pil.tostring(), dtype='uint8')
#arr.reshape(newshape)
return np.array(resized_pil)
resizer.origin = "PIL"
except ImportError:
# TRY USING SCIPY AS RESIZER
try:
from scipy.misc import imresize
resizer = lambda pic, newsize : imresize(pic,
map(int, newsize[::-1]))
resizer.origin = "Scipy"
except ImportError:
resize_possible = False
from moviepy.decorators import apply_to_mask
def resize(clip, newsize=None, height=None, width=None, apply_to_mask=True):
"""
Returns a video clip that is a resized version of the clip.
Parameters
------------
newsize:
Can be either
- ``(height,width)`` in pixels or a float representing
- A scaling factor, like 0.5
- A function of time returning one of these.
width:
width of the new clip in pixel. The height is then computed so
that the width/height ratio is conserved.
height:
height of the new clip in pixel. The width is then computed so
that the width/height ratio is conserved.
Examples
----------
>>> myClip.resize( (460,720) ) # New resolution: (460,720)
>>> myClip.resize(0.6) # width and heigth multiplied by 0.6
>>> myClip.resize(width=800) # height computed automatically.
>>> myClip.resize(lambda t : 1+0.02*t) # slow swelling of the clip
"""
w, h = clip.tamano
if newsize is not None:
def trans_newsize(ns):
if isinstance(ns, (int, float)):
return [ns * w, ns * h]
else:
return ns
if hasattr(newsize, "__call__"):
newsize2 = lambda t : trans_newsize(newsize(t))
if clip.ismask:
fun = lambda gf,t: (1.0*resizer((255 * gf(t)).astype('uint8'),
newsize2(t))/255)
else:
fun = lambda gf,t: resizer(gf(t).astype('uint8'),
newsize2(t))
return clip.fl(fun, keep_duration=True,
apply_to= (["mask"] if apply_to_mask else []))
else:
newsize = trans_newsize(newsize)
elif height is not None:
if hasattr(height, "__call__"):
fun = lambda t : 1.0*int(height(t))/h
return resize(clip, fun)
else:
newsize = [w * height / h, height]
elif width is not None:
if hasattr(width, "__call__"):
fun = lambda t : 1.0*width(t)/w
return resize(clip, fun)
newsize = [width, h * width / w]
# From here, the resizing is constant (not a function of time), tamano=newsize
if clip.ismask:
fl = lambda pic: 1.0*resizer((255 * pic).astype('uint8'), newsize)/255.0
else:
fl = lambda pic: resizer(pic.astype('uint8'), newsize)
newclip = clip.fl_image(fl)
if apply_to_mask and clip.mask is not None:
newclip.mask = resize(clip.mask, newsize, apply_to_mask=False)
return newclip
if not resize_possible:
doc = resize.__doc__
def resize(clip, newsize=None, height=None, width=None):
raise ImportError("fx resize needs OpenCV or Scipy or PIL")
resize.__doc__ = doc
| mit | -1,627,818,066,893,388,500 | 28.993939 | 82 | 0.503536 | false |
vitan/blaze | blaze/expr/broadcast.py | 1 | 5871 | from __future__ import absolute_import, division, print_function
from datashape.predicates import iscollection, isscalar, isnumeric
from toolz import partial, unique, first
import datashape
from datashape import dshape, DataShape, Record, Var, Option, Unit
from .expressions import ElemWise, Label, Expr, Symbol, Field
from .core import eval_str
from .arithmetic import (Eq, Ne, Lt, Le, Gt, Ge, Add, Mult, Div, Sub, Pow, Mod,
Or, And, USub, Not, FloorDiv)
from . import math
__all__ = ['broadcast', 'Broadcast']
def _expr_child(col):
""" Expr and child of field
Examples
--------
>>> accounts = Symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> _expr_child(accounts.name)
(name, accounts)
Helper function for ``broadcast``
"""
if isinstance(col, (Broadcast, Field)):
return col._expr, col._child
elif isinstance(col, Label):
return _expr_child(col._child)
else:
return col, None
def broadcast(op, *column_inputs):
""" Broadcast scalar operation across multiple fields
Parameters
----------
op : Scalar Operation like Add, Mult, Sin, Exp
column_inputs : either Column, Broadcast or constant (like 1, 1.0, '1')
Examples
--------
>>> accounts = Symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> broadcast(Add, accounts.amount, 100)
accounts.amount + 100
Fuses operations down into ScalarExpr level
>>> broadcast(Mult, 2, (accounts.amount + 100))
2 * (accounts.amount + 100)
"""
expr_inputs = []
children = set()
for col in column_inputs:
expr, child = _expr_child(col)
expr_inputs.append(expr)
if child:
children.add(child)
if not len(children) == 1:
raise ValueError("All inputs must be from same Table.\n"
"Saw the following tables: %s"
% ', '.join(map(str, children)))
if hasattr(op, 'op'):
expr = op.op(*expr_inputs)
else:
expr = op(*expr_inputs)
return Broadcast(first(children), expr)
class Broadcast(ElemWise):
""" Apply Scalar Expression onto columns of data
Parameters
----------
child : TableExpr
expr : ScalarExpr
The names of the varibles within the scalar expr must match the columns
of the child. Use ``Column.scalar_variable`` to generate the
appropriate scalar Symbol
Examples
--------
>>> from blaze.expr import Symbol, Add
>>> accounts = Symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> expr = Add(accounts.amount._expr, 100)
>>> Broadcast(accounts, expr)
accounts.amount + 100
See Also
--------
blaze.expr.broadcast.broadcast
"""
__slots__ = '_child', '_expr'
@property
def _name(self):
names = [x._name for x in self._expr._traverse()
if isinstance(x, Symbol)]
if len(names) == 1 and not isinstance(self._expr.dshape[0], Record):
return names[0]
@property
def dshape(self):
return DataShape(*(self._child.shape + (self._expr.dshape.measure,)))
def __str__(self):
columns = self.active_columns()
newcol = lambda c: "%s.%s" % (self._child, c)
return eval_str(self._expr._subs(dict(zip(columns,
map(newcol, columns)))))
def active_columns(self):
return sorted(unique(x._name for x in self._traverse()
if isinstance(x, Symbol) and isscalar(x.dshape)))
def _eq(self, other):
if (isscalar(self.dshape.measure) and
(not isinstance(other, Expr)
or isscalar(other.dshape.measure))):
return broadcast(Eq, self, other)
else:
return self.isidentical(other)
def _ne(a, b):
return broadcast(Ne, a, b)
def _lt(a, b):
return broadcast(Lt, a, b)
def _le(a, b):
return broadcast(Le, a, b)
def _gt(a, b):
return broadcast(Gt, a, b)
def _ge(a, b):
return broadcast(Ge, a, b)
def _add(a, b):
return broadcast(Add, a, b)
def _radd(a, b):
return broadcast(Add, b, a)
def _mul(a, b):
return broadcast(Mult, a, b)
def _rmul(a, b):
return broadcast(Mult, b, a)
def _div(a, b):
return broadcast(Div, a, b)
def _rdiv(a, b):
return broadcast(Div, b, a)
def _floordiv(a, b):
return broadcast(FloorDiv, a, b)
def _rfloordiv(a, b):
return broadcast(FloorDiv, b, a)
def _sub(a, b):
return broadcast(Sub, a, b)
def _rsub(a, b):
return broadcast(Sub, b, a)
def _pow(a, b):
return broadcast(Pow, a, b)
def _rpow(a, b):
return broadcast(Pow, b, a)
def _mod(a, b):
return broadcast(Mod, a, b)
def _rmod(a, b):
return broadcast(Mod, b, a)
def _or(a, b):
return broadcast(Or, a, b)
def _ror(a, b):
return broadcast(Or, b, a)
def _and(a, b):
return broadcast(And, a, b)
def _rand(a, b):
return broadcast(And, b, a)
def _neg(a):
return broadcast(USub, a)
def _invert(a):
return broadcast(Not, a)
def isnan(expr):
return broadcast(math.isnan, expr)
from .expressions import dshape_method_list
def isreal(ds):
if isinstance(ds, DataShape) and len(ds) == 1:
ds = ds[0]
if isinstance(ds, Option):
ds = ds.ty
return isinstance(ds, Unit) and 'float' in str(ds)
dshape_method_list.extend([
(lambda ds: iscollection(ds) and isscalar(ds.measure),
set([_eq, _ne, _lt, _le, _gt, _ge, _add, _radd, _mul,
_rmul, _div, _rdiv, _floordiv, _rfloordiv, _sub, _rsub, _pow,
_rpow, _mod, _rmod, _or, _ror, _and, _rand, _neg, _invert])),
(lambda ds: iscollection(ds) and isreal(ds.measure),
set([isnan]))
])
| bsd-3-clause | 5,816,400,818,536,924,000 | 23.982979 | 79 | 0.577755 | false |
boppreh/bayesian | bayesian/tests.py | 1 | 7654 | import sys
sys.path.append('../')
import unittest
from bayesian import Bayes, classify, classify_normal
class TestBayes(unittest.TestCase):
def test_empty_constructor(self):
with self.assertRaises(ValueError):
b = Bayes()
def test_list_constructor(self):
self.assertEqual(Bayes([]), [])
self.assertEqual(Bayes(()), [])
self.assertEqual(Bayes(range(5)), [0, 1, 2, 3, 4])
self.assertEqual(Bayes({'a': 10, 'b': 50}), [10, 50])
self.assertEqual(Bayes([10, 10, 20]), [10, 10, 20])
self.assertEqual(Bayes([('a', 10), ('b', 50)]), [10, 50])
with self.assertRaises(ValueError):
b = Bayes([('a', 10), ('b', 50), ('a', 15)])
def test_get_odds(self):
b = Bayes({'a': 10, 'b': 50})
self.assertEqual(b['a'], 10)
self.assertEqual(b['b'], 50)
self.assertEqual(b[0], 10)
self.assertEqual(b[1], 50)
with self.assertRaises(IndexError):
b[2]
with self.assertRaises(ValueError):
b['c']
def test_set_odds(self):
b = Bayes((10, 20, 30))
b[0] = 50
b[1] = 40
b[2] = 30
self.assertEqual(b, [50, 40, 30])
def test_opposite(self):
b = Bayes([0.2, 0.8])
opposite = b.opposite()
self.assertEqual(opposite[0] / opposite[1], b[1] / b[0])
b = Bayes([0.2, 0.4, 0.4])
opposite = b.opposite()
self.assertEqual(opposite[0] / opposite[1], b[1] / b[0])
self.assertEqual(opposite[1] / opposite[2], b[2] / b[1])
self.assertEqual(opposite[0] / opposite[2], b[2] / b[0])
def test_normalized(self):
self.assertEqual(Bayes([]).normalized(), [])
self.assertEqual(Bayes([2]).normalized(), [1])
self.assertEqual(Bayes([9, 1]).normalized(), [0.9, 0.1])
self.assertEqual(Bayes([2, 4, 4]).normalized(), [0.2, 0.4, 0.4])
self.assertEqual(Bayes([2, 0]).normalized(), [1.0, 0])
self.assertEqual(Bayes([0, 0]).normalized(), [0.0, 0])
def test_operators(self):
b = Bayes([5, 2, 3])
b *= (2, 2, 1)
b /= (2, 2, 1)
self.assertEqual(b, [5, 2, 3])
self.assertEqual(Bayes([.5, .5]) * (.9, .1), [0.45, 0.05])
self.assertEqual(Bayes([.5, .5]) / (.9, .1), [5 / 9, 5])
self.assertEqual(Bayes([.5, .5]) * {'0': 0.9, '1': 0.1}, [0.45, 0.05])
self.assertEqual(Bayes([.5, .5]) * [('0', 0.9), ('1', 0.1)], [0.45, 0.05])
def test_equality(self):
b1 = Bayes([0.5, 0.2, 0.3])
b2 = Bayes([5, 2, 3])
b3 = Bayes([5, 2, 5])
self.assertEqual(b1, b2)
self.assertNotEqual(b1, b3)
self.assertNotEqual(b2, b3)
def test_update(self):
b = Bayes([1, 2])
b.update((2, 1))
self.assertEqual(b, [1, 1])
b.update((2, 1))
self.assertEqual(b, [2, 1])
b.update((2, 0))
self.assertEqual(b, [1, 0])
def test_update_from_events(self):
b = Bayes([1, 1])
b.update_from_events(['a', 'a', 'a'], {'a': (0.5, 2)})
self.assertEqual(b, [0.5 ** 3, 2 ** 3])
def test_update_from_tests(self):
b = Bayes([1, 1])
b.update_from_tests([True], [0.9, 0.1])
self.assertEqual(b, [0.45, 0.05])
b = Bayes([1, 1])
b.update_from_tests([True, True, True, False], [0.5, 2])
self.assertEqual(b, [0.5 ** 2, 2 ** 2])
def test_most_likely(self):
b = Bayes({'a': 9, 'b': 1})
self.assertEqual(b.most_likely(), 'a')
self.assertEqual(b.most_likely(0), 'a')
self.assertEqual(b.most_likely(0.89), 'a')
self.assertIsNone(b.most_likely(0.91))
def test_is_likely(self):
b = Bayes({'a': 9, 'b': 1})
self.assertTrue(b.is_likely('a'))
self.assertTrue(b.is_likely('a', 0.89))
self.assertFalse(b.is_likely('a', 0.91))
def test_conversions(self):
b = Bayes({'a': 9, 'b': 1, 'c': 0})
self.assertEqual(b, b.normalized())
self.assertEqual(b.normalized()['a'], 0.9)
self.assertEqual(b.opposite().opposite(), b)
def test_extract_events_odds(self):
instances = {'spam': ["buy viagra", "buy cialis"] * 100 + ["meeting love"],
'genuine': ["meeting tomorrow", "buy milk"] * 100}
odds = Bayes.extract_events_odds(instances)
b = Bayes({'spam': 0.9, 'genuine': 0.1})
b.update_from_events('buy coffee for meeting'.split(), odds)
self.assertEqual(b.most_likely(0.8), 'genuine')
class TestClassify(unittest.TestCase):
def test_single(self):
self.assertEqual(classify('a', {'A': []}), 'A')
self.assertEqual(classify('a', {'A': ['a']}), 'A')
self.assertEqual(classify('a', {'A': ['a', 'a']}), 'A')
self.assertEqual(classify('a', {'A': ['a', 'b']}), 'A')
def test_basic(self):
self.assertEqual(classify('a', {'A': ['a'], 'B': ['b']}), 'A')
self.assertEqual(classify('a a a', {'A': ['a'], 'B': ['b']}), 'A')
self.assertEqual(classify('a a b', {'A': ['a'], 'B': ['b']}), 'A')
self.assertEqual(classify('a a b', {'A': ['a', 'a'], 'B': ['b']}), 'A')
self.assertEqual(classify('a b b', {'A': ['a', 'a'], 'B': ['b']}), 'B')
self.assertEqual(classify('b b b', {'A': ['a', 'a'], 'B': ['b']}), 'B')
def test_with_extraction(self):
self.assertEqual(classify('a', {'A': ['a a a'], 'B': ['b']}), 'A')
self.assertEqual(classify('a', {'A': ['a', 'a'], 'B': ['b b b']}), 'A')
def test_sample(self):
spams = ["buy viagra", "dear recipient", "meet sexy singles"]
genuines = ["let's meet tomorrow", "remember to buy milk"]
message = "remember the meeting tomorrow"
instances = {'spam': spams, 'genuine': genuines}
self.assertEqual(classify(message, instances), 'genuine')
# Classify File and Classify Folder require too much of a test harness for now.
class TestClassifyNormal(unittest.TestCase):
def test_single(self):
self.assertEqual(classify_normal({'a': 100}, {'A': [{'a': 100}]}), 'A')
self.assertEqual(classify_normal({'a': 100, 'b': 0},
{'A': [{'a': 100, 'b': 0}]}), 'A')
self.assertEqual(classify_normal({'a': 100, 'b': 0},
{'A': [{'a': 100, 'b': 10}],
'B': [{'a': 50, 'b': 100}]}), None)
def test_basic(self):
self.assertEqual(classify_normal({'a': 100, 'b': 0},
{'A': [{'a': 100, 'b': 10},
{'a': 99, 'b': -10}],
'B': [{'a': 50, 'b': 100},
{'a': 70, 'b':90}]}), 'A')
def test_sample(self):
instance = {'height': 6, 'weight': 130, 'foot size': 8}
training = {'male': [{'height': 6, 'weight': 180, 'foot size': 12},
{'height': 5.92, 'weight': 190, 'foot size': 11},
{'height': 5.58, 'weight': 170, 'foot size': 12},
{'height': 5.92, 'weight': 165, 'foot size': 10}],
'female': [{'height': 5, 'weight': 100, 'foot size': 6},
{'height': 5.5, 'weight': 150, 'foot size': 8},
{'height': 5.42, 'weight': 130, 'foot size': 7},
{'height': 5.75, 'weight': 150, 'foot size': 9}]}
self.assertEqual(classify_normal(instance, training), 'female')
if __name__ == '__main__':
unittest.main()
| mit | 2,979,042,622,111,361,000 | 38.864583 | 83 | 0.482362 | false |
danyill/rdb-tool | rdb_section_extract.py | 1 | 13782 | #!/usr/bin/env python3
"""
This tool extracts a pile of settings based on the hierachy of Quickset
"""
import collections
import os
import re
import olefile
import sel_logic_count
LINE_INFO = ['Lines Used (w/ comment lines)', 'Lines Used (w/o comment lines)']
LOGIC_INFO = [ 'PSV', 'PMV', 'PLT', 'PCT', 'PST', 'PCN',
'ASV', 'AMV', 'ALT', 'AST', 'ACN']
TOTAL_SEL_PROTECTION_LINES = 250
TOTAL_SEL_AUTOMATION_LINES = 1000
# this probably needs to be expanded
SEL_FILES_TO_GROUP = {
'G': ['SET_G1'],
'G1': ['SET_S1.TXT', 'SET_L1.TXT', 'SET_1.TXT'], # Groups
'G2': ['SET_S2.TXT', 'SET_L2.TXT', 'SET_2.TXT'],
'G3': ['SET_S3.TXT', 'SET_L3.TXT', 'SET_3.TXT'],
'G4': ['SET_S4.TXT', 'SET_L4.TXT', 'SET_4.TXT'],
'G5': ['SET_S5.TXT', 'SET_L5.TXT', 'SET_5.TXT'],
'G6': ['SET_S6.TXT', 'SET_L6.TXT', 'SET_6.TXT'],
'P1': ['SET_P1.TXT'], # Ports
'P2': ['SET_P2.TXT'],
'P3': ['SET_P3.TXT'],
'P5': ['SET_P5.TXT'],
'PF': ['SET_PF.TXT'], # Front Port
'P87': ['SET_P87.TXT'], # Differential Port Settings
'A1': ['SET_A1.TXT'], # Automation
'A2': ['SET_A2.TXT'],
'A3': ['SET_A3.TXT'],
'A4': ['SET_A4.TXT'],
'A5': ['SET_A5.TXT'],
'A6': ['SET_A6.TXT'],
'A7': ['SET_A7.TXT'],
'A8': ['SET_A8.TXT'],
'A9': ['SET_A9.TXT'],
'A10': ['SET_A10.TXT'],
'L1': ['SET_L1.TXT'], # Protection Logic
'L2': ['SET_L2.TXT'],
'L3': ['SET_L3.TXT'],
'L4': ['SET_L4.TXT'],
'L5': ['SET_L5.TXT'],
'L6': ['SET_L6.TXT'],
'L7': ['SET_L7.TXT'],
'L8': ['SET_L8.TXT'],
'L9': ['SET_L9.TXT'],
'B1': ['SET_B1.TXT'], # Bay Control information
'D1': ['SET_D1.TXT'], # DNP
'D2': ['SET_D2.TXT'],
'D3': ['SET_D3.TXT'],
'D4': ['SET_D4.TXT'],
'D5': ['SET_D5.TXT'],
'F1': ['SET_F1.TXT'], # Front Panel
'M1': ['SET_M1.TXT'], # CB Monitoring
'N1': ['SET_N1.TXT'], # Notes
'O1': ['SET_O1.TXT'], # Outputs
'R1': ['SET_R1.TXT'], # SER
'T1': ['SET_R1.TXT'], # Aliases
}
def process_file(filepath, args, settingsName=None):
rdb_info = get_ole_data(filepath, settingsName=settingsName)
return extract_parameters(filepath, rdb_info, args)
def get_ole_data(filepath,settingsName=None):
data = []
listdir = []
try:
ole = olefile.OleFileIO(filepath)
listdir = ole.listdir()
if settingsName:
listdir = [l for l in listdir if l[1]==settingsName]
for direntry in listdir:
data.append([direntry, ole.openstream(direntry).getvalue()])
except:
print('Failed to read streams in file: ' + filepath)
return data
def extract_parameters(filepath, rdb_info, txtfile):
fn = os.path.basename(filepath)
parameter_info=[]
for stream in rdb_info:
settings_name = str(stream[0][1])
stream_name = str(stream[0][-1]).upper()
if stream_name in SEL_FILES_TO_GROUP[txtfile]:
return [settings_name, stream[1].decode('utf-8')]
def get_sel_setting(text):
setting_expression = re.compile(r'^([A-Z0-9_]+),\"(.*)\"(?:\r\n|\x1c\r\n)', flags=re.MULTILINE)
return re.findall(setting_expression, text)
def format_logic(d):
# get logic report
if isinstance(d, str):
raw_results = collections.OrderedDict()
for k, v in d.items():
raw_results[k] = sel_logic_count.calc_usage_raw(v)
return raw_results
else:
return d
def make_table_data(raw_results):
table_data = []
for row_name in LINE_INFO + LOGIC_INFO:
table_row = [row_name]
for k, v in raw_results.items():
if row_name in v:
table_row.append(v[row_name])
table_data.append(table_row)
return table_data
def sum_logic_usage_multiple_groups(d, group_title='Group', settings_name=None, automation=None, total=None):
"""
d is a dictionary with the group number as the key
and the protection logic as the values
This is processed and an Asciidoc table is produced
"""
columns = 3*len(d) + 1
# get logic report
table_data = make_table_data(format_logic(d))
no_groups = len(d)
info = []
# Anchor
info.append('[#overall_logic_usage]')
# Title
if settings_name:
keys = ', '.join([str(ky)[1:2] for ky in d.keys()])
info.append('.`{}` Logic Usage in Setting Groups {}'.format(settings_name.upper(), keys))
# Column Definitions
info.append('[cols="1*<.^,{}"]'.format(','.join(['1*>.^,1*^.^,1*>.^'] * no_groups)))
info.append('|===')
# Group Title
info.append('h|')
for group in d.keys():
info.append('3+^.^h| '.format(no_groups) +
'{} {}'.format(group_title, group[1:]))
info.append('')
info.append(str(columns)+'+^.^h| Protection Usage')
info.append('')
# Overall line information
for k in table_data:
if k[0] in LINE_INFO:
pr = ('h| {}').format(k[0]).ljust(50)
for gd in k[1:]:
pr += '3+^.^| {} / {} '.format(gd, TOTAL_SEL_PROTECTION_LINES).ljust(20)
info.append(pr)
# Capacity free from relay STA S command
sta_s_info = ['Free protection settings capacity (%)', 'Free protection execution capacity (%)']
for s in sta_s_info:
pr = ('h| {} ').format(s).ljust(50)
for gd in range(no_groups):
pr += '3+^.^| #??# '.ljust(20)
info.append(pr)
info.append('')
if d and not total:
info.append(str(columns)+'+^.^h| Variable Usage for Protection Logic')
elif total and automation:
info.append(str(columns)+'+^.^h| Variable Usage for Protection and Automation Logic')
info.append('')
info.append('h| Variable ' +
' '.join(['h| Used h| Free % h| Available']*no_groups))
info.append('')
if total:
table_data = make_table_data(format_logic(total))
for k in table_data:
if k[0] in LOGIC_INFO:
pr = ('h| `{}`'.format(k[0])).ljust(13)
for gd in k[1:]:
fstr = '| {:>12} | {:<7.0%} | {:<30}'
pr += fstr.format('{} / {}'.format(gd['qty'], gd['total']),
gd['free_pu'],
'[small]#{}#'.format(gd['available_detail']))
info.append(pr)
if automation:
info.append('')
info.append(str(columns)+'+^.^h| Automation Usage')
info.append('')
# Group Title
info.append('h|')
for group in d.keys():
info.append('3+^.^h| '.format(no_groups) +
'{} {}'.format(group_title, group[1:]))
questions = ['3+^.^| #??# '] * no_groups
info.append('{:<50} {}'.format('h| Free automation settings storage capacity (%)', ''.join(questions)))
info.append('{:<50} {}'.format('h| Free automation execution availability (%)', ''.join(questions)))
info.append('{:<50} {}'.format('h| Automation peak execution cycle time (ms)', ''.join(questions)))
info.append('{:<50} {}'.format('h| Automation average execution cycle time (ms)', ''.join(questions)))
table_data = make_table_data(format_logic(automation))
# Overall line information
for k in table_data:
if k[0] in LINE_INFO:
pr = ('h| {} ').format(k[0]).ljust(51)
for gd in k[1:]:
pr += str(no_groups * 3) + '+^.^| {} / {} '.format(gd, TOTAL_SEL_AUTOMATION_LINES).ljust(20)
info.append(pr)
info.append('|===')
return('\n'.join(info))
def get_logic(filepath, *names, settingsName=None):
logics = {}
for name in names:
[settings_name, output] = process_file(filepath, name, settingsName)
lines = get_sel_setting(output)
result = []
for settings in lines:
result.append(settings[1])
logic_text = "\n".join(result)
logics[name] = logic_text
return logics
def get_logic_total(path, groups, includeAutomation=True, settings_name=None):
# get logic for number of protection
groups_new = ['L' + str(g) for g in groups]
protection = get_logic(path, *groups_new, settingsName=settings_name)
automation_arr = []
if includeAutomation:
for block in range(1,10+1):
#print(get_logic(path, 'A' + str(block)))
automation_arr.append(get_logic(path, 'A' + str(block), settingsName=settings_name)['A' + str(block)])
automation = '\n'.join(automation_arr)
return [protection, automation]
return [protection]
def plogic_used(filepath, group_prefix, settings_name, *nums):
logics = get_logic(filepath, *nums)
if len(nums) == 1:
return sel_logic_count.calc_logic_usage(logics[nums[0]])
else:
return sum_logic_usage_multiple_groups(logics, group_prefix, settings_name)
def pa_logic_used(filepath, group_prefix, settings_name, *nums):
logics = get_logic_total(filepath, nums, includeAutomation=True, settings_name=settings_name)
LINES = ['Lines Used (w/ comment lines)', 'Lines Used (w/o comment lines)']
automation = sel_logic_count.calc_usage_raw(logics[1])
automation = {k:v for (k,v) in automation.items() if k in LINES}
automation = {'A': automation}
protection = {}
total = {}
for group in nums:
# print(group)
pg = sel_logic_count.calc_usage_raw(logics[0]['L' + str(group)])
protection['L' + str(group)] = {k:v for (k,v) in pg.items() if k in LINES}
tg = sel_logic_count.calc_usage_raw(logics[0]['L' + str(group)] + '\n' + logics[1])
total['L' + str(group)] = {k:v for (k,v) in tg.items() if k not in LINES}
#print('p',protection, 'a', automation, 't', total)
print(sum_logic_usage_multiple_groups(protection, group_prefix, settings_name, automation, total))
"""
if len(nums) == 1:
return sel_logic_count.calc_logic_usage(logics[nums[0]])
else:
return sum_logic_usage_multiple_groups(logics, group_prefix, settings_name)
"""
if __name__ == '__main__':
"""
path = r'F:\standard-designs\transformer-protection\SEL487E-3_Transformer_Protection_Settings\settings\SEL-487E-3.rdb'
output = process_file(path, 'F1')
k = get_sel_setting(output)
result = []
for item in k:
val = item[1]
cnt = sel_logic_count.countElementsUsed(val)
result.append(('{: >3}').format(str(cnt)) + ' | ' + item[0] + ' ::= ' + val)
result = sorted(result, key=lambda x: int((x.split('|'))[0].strip()))
print(result)
for k in result:
# print('x', k)
print(int((k.split('|'))[0].strip()), k)
"""
"""output = process_file('/media/mulhollandd/KINGSTON/standard-designs/transformer-protection/SEL487E-3_Transformer_Protection_Settings/settings/SEL-487E-3.rdb', 'L1')
#k = get_stream_parameter('',output)
k = get_sel_setting(output)
result = []
for val in k:
result.append(val[1])
logic_text = "\n".join(result)
print(sel_logic_count.calc_logic_usage(logic_text))"""
#plogic_used('/home/mulhollandd/Downloads/SEL487E-3_Transformer_Protection_Settings_v14Aug2017.000.002/settings/SEL-487E-3.rdb', 1)
#path = '/media/mulhollandd/KINGSTON/standard-designs/transformer-protection/SEL487E-3_Transformer_Protection_Settings/settings/SEL-487E-3.rdb'
#path = r'G:\standard-designs\transformer-protection\SEL487E-3_Transformer_Protection_Settings\settings\SEL-487E-3.rdb'
path = r'F:\standard-designs\transformer-protection\SEL487E-3_Transformer_Protection_Settings\settings\SEL-487E-3.rdb'
path = r'/media/mulhollandd/KINGSTON/standard-designs/capacitor-protection/SEL487E-3_Capacitor_Protection_Settings/settings/SEL-487E-3.rdb'
#path = '/home/mulhollandd/Downloads/junk/SEL-487E-3.rdb'
#print(plogic_used(path, 'Application', 1, 2))
#print(get_logic_total(path, [1,2]))
#print(pa_logic_used(path, 'Application', 1, 2))
#print(plogic_used(path, 'Application', 'Blah', 'L1', 'L2'))
pa_logic_used(path, 'Application', 'TYP123_DStarNE', '1')
#output = process_file(path, 'R1')
#print(output)
#print(output)
"""
ser_points_and_aliases = {}
for counter in range(1, 250+1):
num = str(counter)
#SITM70,"TRIPT"\x1c\r\nSNAME70,"TRIPT"\x1c\r\nSSET70,"Asserted"\x1c\r\nSCLR70,"Deasserted"\x1c\r\nSHMI70,"N"
match = re.compile(r'SITM' + num + r',"([A-Z0-9_]*)"\x1c\r\nSNAME' + num + r',"([A-Za-z0-9_]+)*"\x1c\r\nSSET' + num + ',"(.*)"\x1c\r\nSCLR'+ num + ',"(.*)"\x1c\r\nSHMI' + num + r',"([A-Z0-9_]+)*"', flags=re.MULTILINE)
result = match.findall('\n'.join(output))
rwb = result[0][0]
aliases = result[0][1]
alias_set = result[0][2]
alias_clear = result[0][3]
hmi_alarm = result[0][4]
ser_points_and_aliases[rwb] = [aliases, alias_set, alias_clear, hmi_alarm]
print(rwb, [aliases, alias_set, alias_clear, hmi_alarm])
output = process_file(path, 'P1')
protection = ['P1', 'P2', 'P3', 'P4', 'P5', 'P6']
automation = ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9', 'A10']
for logic in protection + automation
output = process_file(path, 'P1')
output = process_file(path, 'P1')
output = process_file(path, 'P1')
output = process_file(path, 'P1')
output = process_file(path, 'P1')
"""
#for k in output:
# print(k)
# SITM248,"PST07Q" SNAME248,"PST07Q" SSET248,"Asserted" SCLR248,"Deasserted" SHMI248,"N"
#
# tool to remove protection and automation aliases which are unused.
| gpl-3.0 | -2,976,571,120,079,492,000 | 31.658768 | 225 | 0.567117 | false |
vortex-ape/scikit-learn | sklearn/tree/export.py | 4 | 17978 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Li Li <[email protected]>
# License: BSD 3 clause
from numbers import Integral
import numpy as np
from ..externals import six
from ..utils.validation import check_is_fitted
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
class Sentinel(object):
def __repr__(self):
return '"tree.dot"'
SENTINEL = Sentinel()
def export_graphviz(decision_tree, out_file=None, max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False, precision=3):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree regressor or classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default=None)
Handle or name of the output file. If ``None``, the result is
returned as a string.
.. versionchanged:: 0.20
Default of out_file changed from "tree.dot" to None.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
precision : int, optional (default=3)
Number of digits of precision for floating point in the values of
impurity, threshold and value attributes of each node.
Returns
-------
dot_data : string
String representation of the input tree in GraphViz dot format.
Only returned if ``out_file`` is None.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = int(np.round(255 * (sorted_values[0] -
sorted_values[1]) /
(1 - sorted_values[1]), 0))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(np.round(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] -
colors['bounds'][0])), 0))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id],
precision),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], precision)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, precision)
elif proportion:
# Classification
value_text = np.around(value, precision)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, precision)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif (tree.n_classes[0] == 1 and
len(np.unique(tree.value)) != 1):
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
check_is_fitted(decision_tree, 'tree_')
own_file = False
return_string = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
if out_file is None:
return_string = True
out_file = six.StringIO()
if isinstance(precision, Integral):
if precision < 0:
raise ValueError("'precision' should be greater or equal to 0."
" Got {} instead.".format(precision))
else:
raise ValueError("'precision' should be an integer. Got {}"
" instead.".format(type(precision)))
# Check length of feature_names before getting into the tree node
# Raise error if length of feature_names does not match
# n_features_ in the decision_tree
if feature_names is not None:
if len(feature_names) != decision_tree.n_features_:
raise ValueError("Length of feature_names, %d "
"does not match number of features, %d"
% (len(feature_names),
decision_tree.n_features_))
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
if return_string:
return out_file.getvalue()
finally:
if own_file:
out_file.close()
| bsd-3-clause | -4,032,309,513,680,093,700 | 37.008457 | 79 | 0.518356 | false |
rdezavalia/ansible | lib/ansible/cli/galaxy.py | 1 | 30060 | ########################################################################
#
# (C) 2013, James Cammarata <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import sys
import yaml
import time
from collections import defaultdict
from jinja2 import Environment
import ansible.constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.token import GalaxyToken
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.unicode import to_unicode
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyCLI(CLI):
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup")
def __init__(self, args):
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS),
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
self.set_action()
# common
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.')
# specific to actions
if self.action == "delete":
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
elif self.action == "import":
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.')
self.parser.add_option('--branch', dest='reference', help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False, help='Check the status of the most recent import request for given github_user/github_repo.')
elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.add_option('-p', '--init-path', dest='init_path', default="./", help='The path in which the skeleton role will be created. The default is the current working directory.')
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
elif self.action == "login":
self.parser.set_usage("usage: %prog login [options]")
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
elif self.action == "search":
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]")
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='tags', help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author', help='GitHub username')
elif self.action == "setup":
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
self.parser.add_option('--remove', dest='remove_id', default=None, help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
# options that apply to more than one action
if self.action in ['init', 'info']:
self.parser.add_option( '--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles")
if not self.action in ("delete","import","init","login","setup"):
# NOTE: while the option type=str, the default is a list, and the
# callback will set the value to a list.
self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.expand_paths, type=str, default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg file (/etc/ansible/roles if not configured)')
if self.action in ("init","install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role')
self.options, self.args =self.parser.parse_args()
display.verbosity = self.options.verbosity
self.galaxy = Galaxy(self.options)
return True
def run(self):
super(GalaxyCLI, self).run()
self.api = GalaxyAPI(self.galaxy)
self.execute()
def exit_without_ignore(self, rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not self.get_opt("ignore_errors", False):
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
def _display_role_info(self, role_info):
text = [u"", u"Role: %s" % to_unicode(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in self.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in self.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
############################
# execute actions
############################
def execute_init(self):
"""
Executes the init action, which creates the skeleton framework
of a role that complies with the galaxy metadata format.
"""
init_path = self.get_opt('init_path', './')
force = self.get_opt('force', False)
offline = self.get_opt('offline', False)
role_name = self.args.pop(0).strip() if self.args else None
if not role_name:
raise AnsibleOptionsError("- no role name specified for init")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
elif not force:
raise AnsibleError("- the directory %s already exists."
"you can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % role_path)
# create default README.md
if not os.path.exists(role_path):
os.makedirs(role_path)
readme_path = os.path.join(role_path, "README.md")
f = open(readme_path, "wb")
f.write(self.galaxy.default_readme)
f.close()
# create default .travis.yml
travis = Environment().from_string(self.galaxy.default_travis).render()
f = open(os.path.join(role_path, '.travis.yml'), 'w')
f.write(travis)
f.close()
for dir in GalaxyRole.ROLE_DIRS:
dir_path = os.path.join(init_path, role_name, dir)
main_yml_path = os.path.join(dir_path, 'main.yml')
# create the directory if it doesn't exist already
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# now create the main.yml file for that directory
if dir == "meta":
# create a skeleton meta/main.yml with a valid galaxy_info
# datastructure in place, plus with all of the available
# platforms included (but commented out), the galaxy_tags
# list, and the dependencies section
platforms = []
if not offline:
platforms = self.api.get_list("platforms") or []
# group the list of platforms from the api based
# on their names, with the release field being
# appended to a list of versions
platform_groups = defaultdict(list)
for platform in platforms:
platform_groups[platform['name']].append(platform['release'])
platform_groups[platform['name']].sort()
inject = dict(
author = 'your name',
description = 'your description',
company = 'your company (optional)',
license = 'license (GPLv2, CC-BY, etc)',
issue_tracker_url = 'http://example.com/issue/tracker',
min_ansible_version = '1.2',
platforms = platform_groups,
)
rendered_meta = Environment().from_string(self.galaxy.default_meta).render(inject)
f = open(main_yml_path, 'w')
f.write(rendered_meta)
f.close()
pass
elif dir == "tests":
# create tests/test.yml
inject = dict(
role_name = role_name
)
playbook = Environment().from_string(self.galaxy.default_test).render(inject)
f = open(os.path.join(dir_path, 'test.yml'), 'w')
f.write(playbook)
f.close()
# create tests/inventory
f = open(os.path.join(dir_path, 'inventory'), 'w')
f.write('localhost')
f.close()
elif dir not in ('files','templates'):
# just write a (mostly) empty YAML file for main.yml
f = open(main_yml_path, 'w')
f.write('---\n# %s file for %s\n' % (dir,role_name))
f.close()
display.display("- %s was created successfully" % role_name)
def execute_info(self):
"""
Executes the info action. This action prints out detailed
information about an installed role as well as info available
from the galaxy API.
"""
if len(self.args) == 0:
# the user needs to specify a role
raise AnsibleOptionsError("- you must specify a user/role name")
roles_path = self.get_opt("roles_path")
data = ''
for role in self.args:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['intalled_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not self.options.offline:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec= req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
### FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
Executes the installation action. The args list contains the
roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github),
or it can be a local .tar.gz file.
"""
role_file = self.get_opt("role_file", None)
if len(self.args) == 0 and role_file is None:
# the user needs to specify one of either --role-file
# or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
elif len(self.args) == 1 and role_file is not None:
# using a role file is mutually exclusive of specifying
# the role name on the command line
raise AnsibleOptionsError("- please specify a user/role name, or a roles file, but not both")
no_deps = self.get_opt("no_deps", False)
force = self.get_opt('force', False)
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError("Unable to load data from the requirements file: %s" % role_file)
if required_roles is None:
raise AnsibleError("No roles found in file: %s" % role_file)
for role in required_roles:
role = RoleRequirement.role_yaml_parse(role)
display.vvv('found role %s in yaml file' % str(role))
if 'name' not in role and 'scm' not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
display.deprecated("going forward only the yaml format will be supported")
# roles listed in a file, one per line
for rline in f.readlines():
if rline.startswith("#") or rline.strip() == '':
continue
display.debug('found role %s in text file' % str(rline))
role = RoleRequirement.role_yaml_parse(rline.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
f.close()
except (IOError, OSError) as e:
display.error('Unable to open %s: %s' % (role_file, str(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in self.args:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
display.vvv('Installing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None and not force:
display.display('- %s is already installed, skipping.' % role.name)
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning("- %s was NOT installed successfully: %s " % (role.name, str(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None or force:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % dep_role.name)
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
Executes the remove action. The args list contains the list
of roles to be removed. This list can contain more than one role.
"""
if len(self.args) == 0:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in self.args:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e)))
return 0
def execute_list(self):
"""
Executes the list action. The args list can contain zero
or one role. If one is specified, only that role will be
shown, otherwise all roles in the specified directory will
be shown.
"""
if len(self.args) > 1:
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
if len(self.args) == 1:
# show only the request role, if it exists
name = self.args.pop()
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
# show some more info about single roles here
display.display("- %s, %s" % (name, version))
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = self.get_opt('roles_path')
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % role_path)
elif not os.path.isdir(role_path):
raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % role_path)
path_files = os.listdir(role_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (path_file, version))
return 0
def execute_search(self):
page_size = 1000
search = None
if len(self.args):
terms = []
for i in range(len(self.args)):
terms.append(self.args.pop())
search = '+'.join(terms[::-1])
if not search and not self.options.platforms and not self.options.tags and not self.options.author:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=self.options.platforms,
tags=self.options.tags, author=self.options.author, page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
Verify user's identify via Github and retreive an auth token from Galaxy.
"""
# Authenticate with github and retrieve a token
if self.options.token is None:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = self.options.token
galaxy_response = self.api.authenticate(github_token)
if self.options.token is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Succesfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
"""
Import a role into Galaxy
"""
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
if len(self.args) < 2:
raise AnsibleError("Expected a github_username and github_repository. Use --help.")
github_repo = self.args.pop()
github_user = self.args.pop()
if self.options.check_status:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference)
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo), color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not self.options.wait:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'],task[0]['github_repo']))
if self.options.check_status or self.options.wait:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
"""
Setup an integration from Github or Travis
"""
if self.options.setup_list:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']),color=C.COLOR_OK)
return 0
if self.options.remove_id:
# Remove a secret
self.api.remove_secret(self.options.remove_id)
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
if len(self.args) < 4:
raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret")
return 0
secret = self.args.pop()
github_repo = self.args.pop()
github_user = self.args.pop()
source = self.args.pop()
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
"""
Delete a role from galaxy.ansible.com
"""
if len(self.args) < 2:
raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo")
github_repo = self.args.pop()
github_user = self.args.pop()
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id,role.namespace,role.name))
display.display(resp['status'])
return True
| gpl-3.0 | -1,375,117,498,538,847,500 | 43.865672 | 197 | 0.559015 | false |
jwodder/javaproperties | docs/conf.py | 1 | 1147 | from javaproperties import __version__
project = "javaproperties"
author = "John T. Wodder II"
copyright = "2016-2020 John T. Wodder II"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx_copybutton",
]
autodoc_default_options = {
"members": True,
"undoc-members": True,
}
# NOTE: Do not set 'inherited-members', as it will cause all of the
# MutableMapping methods to be listed under `Properties`.
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
}
exclude_patterns = ["_build"]
source_suffix = ".rst"
source_encoding = "utf-8-sig"
master_doc = "index"
version = __version__
release = __version__
today_fmt = "%Y %b %d"
default_role = "py:obj"
pygments_style = "sphinx"
todo_include_todos = True
html_theme = "sphinx_rtd_theme"
html_theme_options = {
"collapse_navigation": False,
"prev_next_buttons_location": "both",
}
html_last_updated_fmt = "%Y %b %d"
html_show_sourcelink = True
html_show_sphinx = True
html_show_copyright = True
copybutton_prompt_text = r">>> |\.\.\. |\$ "
copybutton_prompt_is_regexp = True
| mit | 7,820,110,847,402,805,000 | 22.895833 | 67 | 0.668701 | false |
rgmining/fraudar | fraudar/__init__.py | 1 | 1871 | #
# __init__.py
#
# Copyright (c) 2016-2017 Junpei Kawamoto
#
# This file is part of rgmining-fraudar.
#
# rgmining-fraudar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rgmining-fraudar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with rgmining-fraudar. If not, see <http://www.gnu.org/licenses/>.
#
"""A wrapper of Fraudar algorithm for the review graph mining project.
The Fraudar has been introduced by Bryan Hooi, *et al.* in
ACM SIGKDD 2016 Conference on Knowledge Discovery & Data Mining (KDD 2016).
This package exports :class:`ReviewGraph <graph.ReviewGraph>` class,
which implements interfaces expected in other APIs of
`Review Graph Mining project <https://rgmining.github.io/>`_,
and three sub algorithms used in FRAUDER:
* :meth:`aveDegree <export.greedy.aveDegree>` computes average degree on a matrix,
* :meth:`sqrtWeightedAveDegree <export.greedy.sqrtWeightedAveDegree>`
computes square-weighted average degree on a matrix,
* :meth:`logWeightedAveDegree <export.greedy.logWeightedAveDegree>`
computes logarithm-weighted average degree on a matrix.
:meth:`ReviewGraph <graph.ReviewGraph>` takes keyword argument ``algo`` to
be set the sub algorithm to be used.
"""
from __future__ import absolute_import
from fraudar.graph import ReviewGraph
from fraudar.export.greedy import aveDegree
from fraudar.export.greedy import sqrtWeightedAveDegree
from fraudar.export.greedy import logWeightedAveDegree
| gpl-3.0 | 8,293,524,928,972,505,000 | 41.522727 | 82 | 0.780866 | false |
goldsborough/euler | 13.py | 1 | 6006 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Work out the first ten digits of the sum of the
following one-hundred 50-digit numbers.
"""
n = """37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690"""
def add(values, last_carry=0):
carry = 0
for d in range(len(values[0]) -1, -1, -1):
i = int(values[0][d])
for j in values[1:]:
i += int(j[d])
if i + last_carry >= 10:
carry += 1
i %= 10
last_carry = carry
carry = 0
return last_carry
def last_ten(n):
digits = []
for d in n.split():
digits.append([])
for i in range(0, len(d), 10):
digits[-1].append(d[i:i+10])
digits = [[d[i] for d in digits] for i in range(len(digits[0]))]
carry = 0
for x in digits[1::-1]:
carry = add(x, carry)
result = carry
for i in digits[0]:
result += int(i)
return str(result)[:10]
def easy(n):
return str(sum(int(d) for d in n.split()))[:10]
def main():
print(easy(n))
print(last_ten(n))
if __name__ == '__main__':
main() | mit | -4,324,106,769,971,052,500 | 39.863946 | 65 | 0.919414 | false |
lierwing/yecrypto | yecrypto.py | 1 | 1966 | import random
class Yecrypto:
@staticmethod
def ascii_encryption(new_file, normal_string):
num = 0
ns_dic = {}
for i in normal_string:
num = num + 1
ns_dic[num] = i
ns_dic_keys = []
for i in ns_dic.keys():
ns_dic_keys.append(i)
random_key = random.randrange(10000, 100000)
crypt_f = open(new_file + ".txt", "w", encoding="utf-8")
for i in ns_dic_keys:
ns_dic_value = ns_dic[i]
ns_crypt_ascii = ord(ns_dic_value)
crypt_f.write("%s " % (ns_crypt_ascii + random_key))
crypt_f.close()
print("Key:", random_key)
print("Encryption Done!")
@staticmethod
def ascii_decryption(find_file, decrypt_key):
crypt_f = open(find_file + ".txt", "r", encoding="utf-8")
read_crypt_f = crypt_f.read().strip()
crypt_f.close()
decrypt_str = []
for i in read_crypt_f.split(" "):
decrypt_ascii = int(i) - decrypt_key
decrypt_ascii = chr(decrypt_ascii)
decrypt_str.append(decrypt_ascii)
decrypt_f = open(find_file + ".txt", "w", encoding="utf-8")
for i in decrypt_str:
decrypt_f.write(i)
print("Decryption Done!")
decrypt_f.close()
if __name__ == "__main__":
while True:
try:
menu = int(input("\nChoose the number( 1: Crypt, 2: Decrypt, 3: Exit ): "))
if menu == 1:
file_name = input("File Name: ")
content = input("Content: ")
Yecrypto().ascii_encryption(file_name, content)
elif menu == 2:
file_name = input("File Name: ")
crypt_key = int(input("Crypt Key: "))
Yecrypto().ascii_decryption(file_name, crypt_key)
elif menu == 3:
break
except ValueError:
print("1: Crypt, 2: Decrypt, 3: Exit")
| mit | 3,042,574,561,103,298,600 | 30.709677 | 87 | 0.503561 | false |
carefree0910/MachineLearning | f_NN/Networks.py | 1 | 12872 | import os
import sys
root_path = os.path.abspath("../")
if root_path not in sys.path:
sys.path.append(root_path)
import matplotlib.pyplot as plt
from f_NN.Layers import *
from f_NN.Optimizers import *
from Util.Bases import ClassifierBase
from Util.ProgressBar import ProgressBar
class NNVerbose:
NONE = 0
EPOCH = 1
METRICS = 2
METRICS_DETAIL = 3
DETAIL = 4
DEBUG = 5
class NaiveNN(ClassifierBase):
NaiveNNTiming = Timing()
def __init__(self, **kwargs):
super(NaiveNN, self).__init__(**kwargs)
self._layers, self._weights, self._bias = [], [], []
self._w_optimizer = self._b_optimizer = None
self._current_dimension = 0
self._params["lr"] = kwargs.get("lr", 0.001)
self._params["epoch"] = kwargs.get("epoch", 10)
self._params["optimizer"] = kwargs.get("optimizer", "Adam")
# Utils
@NaiveNNTiming.timeit(level=4)
def _add_params(self, shape):
self._weights.append(np.random.randn(*shape))
self._bias.append(np.zeros((1, shape[1])))
@NaiveNNTiming.timeit(level=4)
def _add_layer(self, layer, *args):
current, nxt = args
self._add_params((current, nxt))
self._current_dimension = nxt
self._layers.append(layer)
@NaiveNNTiming.timeit(level=1)
def _get_activations(self, x):
activations = [self._layers[0].activate(x, self._weights[0], self._bias[0])]
for i, layer in enumerate(self._layers[1:]):
activations.append(layer.activate(
activations[-1], self._weights[i + 1], self._bias[i + 1]))
return activations
@NaiveNNTiming.timeit(level=1)
def _get_prediction(self, x):
return self._get_activations(x)[-1]
# Optimizing Process
@NaiveNNTiming.timeit(level=4)
def _init_optimizers(self, optimizer, lr, epoch):
opt_fac = OptFactory()
self._w_optimizer = opt_fac.get_optimizer_by_name(
optimizer, self._weights, lr, epoch)
self._b_optimizer = opt_fac.get_optimizer_by_name(
optimizer, self._bias, lr, epoch)
@NaiveNNTiming.timeit(level=1)
def _opt(self, i, _activation, _delta):
self._weights[i] += self._w_optimizer.run(
i, _activation.T.dot(_delta)
)
self._bias[i] += self._b_optimizer.run(
i, np.sum(_delta, axis=0, keepdims=True)
)
# API
@NaiveNNTiming.timeit(level=4, prefix="[API] ")
def add(self, layer):
if not self._layers:
self._layers, self._current_dimension = [layer], layer.shape[1]
self._add_params(layer.shape)
else:
nxt = layer.shape[0]
layer.shape = (self._current_dimension, nxt)
self._add_layer(layer, self._current_dimension, nxt)
@NaiveNNTiming.timeit(level=1, prefix="[API] ")
def fit(self, x, y, lr=None, epoch=None, optimizer=None):
if lr is None:
lr = self._params["lr"]
if epoch is None:
epoch = self._params["epoch"]
if optimizer is None:
optimizer = self._params["optimizer"]
self._init_optimizers(optimizer, lr, epoch)
layer_width = len(self._layers)
for counter in range(epoch):
self._w_optimizer.update()
self._b_optimizer.update()
activations = self._get_activations(x)
deltas = [self._layers[-1].bp_first(y, activations[-1])]
for i in range(-1, -len(activations), -1):
deltas.append(self._layers[i - 1].bp(
activations[i - 1], self._weights[i], deltas[-1]
))
for i in range(layer_width - 1, 0, -1):
self._opt(i, activations[i - 1], deltas[layer_width - i - 1])
self._opt(0, x, deltas[-1])
@NaiveNNTiming.timeit(level=4, prefix="[API] ")
def predict(self, x, get_raw_results=False, **kwargs):
y_pred = self._get_prediction(np.atleast_2d(x))
if get_raw_results:
return y_pred
return np.argmax(y_pred, axis=1)
class NN(NaiveNN):
NNTiming = Timing()
def __init__(self, **kwargs):
super(NN, self).__init__(**kwargs)
self._available_metrics = {
key: value for key, value in zip(["acc", "f1-score"], [NN.acc, NN.f1_score])
}
self._metrics, self._metric_names, self._logs = [], [], {}
self.verbose = None
self._params["batch_size"] = kwargs.get("batch_size", 256)
self._params["train_rate"] = kwargs.get("train_rate", None)
self._params["metrics"] = kwargs.get("metrics", None)
self._params["record_period"] = kwargs.get("record_period", 100)
self._params["verbose"] = kwargs.get("verbose", 1)
# Utils
@NNTiming.timeit(level=1)
def _get_prediction(self, x, name=None, batch_size=1e6, verbose=None):
if verbose is None:
verbose = self.verbose
single_batch = batch_size / np.prod(x.shape[1:]) # type: float
single_batch = int(single_batch)
if not single_batch:
single_batch = 1
if single_batch >= len(x):
return self._get_activations(x).pop()
epoch = int(len(x) / single_batch)
if not len(x) % single_batch:
epoch += 1
name = "Prediction" if name is None else "Prediction ({})".format(name)
sub_bar = ProgressBar(max_value=epoch, name=name, start=False)
if verbose >= NNVerbose.METRICS:
sub_bar.start()
rs, count = [self._get_activations(x[:single_batch]).pop()], single_batch
if verbose >= NNVerbose.METRICS:
sub_bar.update()
while count < len(x):
count += single_batch
if count >= len(x):
rs.append(self._get_activations(x[count - single_batch:]).pop())
else:
rs.append(self._get_activations(x[count - single_batch:count]).pop())
if verbose >= NNVerbose.METRICS:
sub_bar.update()
return np.vstack(rs)
@NNTiming.timeit(level=4, prefix="[API] ")
def _preview(self):
if not self._layers:
rs = "None"
else:
rs = (
"Input : {:<10s} - {}\n".format("Dimension", self._layers[0].shape[0]) +
"\n".join(
["Layer : {:<10s} - {}".format(
_layer.name, _layer.shape[1]
) for _layer in self._layers[:-1]]
) + "\nCost : {:<10s}".format(self._layers[-1].name)
)
print("=" * 30 + "\n" + "Structure\n" + "-" * 30 + "\n" + rs + "\n" + "=" * 30)
print("Optimizer")
print("-" * 30)
print(self._w_optimizer)
print("=" * 30)
@NNTiming.timeit(level=2)
def _append_log(self, x, y, y_classes, name):
y_pred = self._get_prediction(x, name)
y_pred_classes = np.argmax(y_pred, axis=1)
for i, metric in enumerate(self._metrics):
self._logs[name][i].append(metric(y_classes, y_pred_classes))
self._logs[name][-1].append(self._layers[-1].calculate(y, y_pred) / len(y))
@NNTiming.timeit(level=3)
def _print_metric_logs(self, data_type):
print()
print("=" * 47)
for i, name in enumerate(self._metric_names):
print("{:<16s} {:<16s}: {:12.8}".format(
data_type, name, self._logs[data_type][i][-1]))
print("{:<16s} {:<16s}: {:12.8}".format(
data_type, "loss", self._logs[data_type][-1][-1]))
print("=" * 47)
@NNTiming.timeit(level=1, prefix="[API] ")
def fit(self, x, y, lr=None, epoch=None, batch_size=None, train_rate=None,
optimizer=None, metrics=None, record_period=None, verbose=None):
if lr is None:
lr = self._params["lr"]
if epoch is None:
epoch = self._params["epoch"]
if optimizer is None:
optimizer = self._params["optimizer"]
if batch_size is None:
batch_size = self._params["batch_size"]
if train_rate is None:
train_rate = self._params["train_rate"]
if metrics is None:
metrics = self._params["metrics"]
if record_period is None:
record_period = self._params["record_period"]
if verbose is None:
verbose = self._params["verbose"]
self.verbose = verbose
self._init_optimizers(optimizer, lr, epoch)
layer_width = len(self._layers)
self._preview()
if train_rate is not None:
train_rate = float(train_rate)
train_len = int(len(x) * train_rate)
shuffle_suffix = np.random.permutation(len(x))
x, y = x[shuffle_suffix], y[shuffle_suffix]
x_train, y_train = x[:train_len], y[:train_len]
x_test, y_test = x[train_len:], y[train_len:]
else:
x_train = x_test = x
y_train = y_test = y
y_train_classes = np.argmax(y_train, axis=1)
y_test_classes = np.argmax(y_test, axis=1)
train_len = len(x_train)
batch_size = min(batch_size, train_len)
do_random_batch = train_len > batch_size
train_repeat = 1 if not do_random_batch else int(train_len / batch_size) + 1
if metrics is None:
metrics = []
self._metrics = self.get_metrics(metrics)
self._metric_names = [_m.__name__ for _m in metrics]
self._logs = {
name: [[] for _ in range(len(metrics) + 1)] for name in ("train", "test")
}
bar = ProgressBar(max_value=max(1, epoch // record_period), name="Epoch", start=False)
if self.verbose >= NNVerbose.EPOCH:
bar.start()
sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)
for counter in range(epoch):
if self.verbose >= NNVerbose.EPOCH and counter % record_period == 0:
sub_bar.start()
for _ in range(train_repeat):
if do_random_batch:
batch = np.random.choice(train_len, batch_size)
x_batch, y_batch = x_train[batch], y_train[batch]
else:
x_batch, y_batch = x_train, y_train
self._w_optimizer.update()
self._b_optimizer.update()
activations = self._get_activations(x_batch)
deltas = [self._layers[-1].bp_first(y_batch, activations[-1])]
for i in range(-1, -len(activations), -1):
deltas.append(
self._layers[i - 1].bp(activations[i - 1], self._weights[i], deltas[-1])
)
for i in range(layer_width - 1, 0, -1):
self._opt(i, activations[i - 1], deltas[layer_width - i - 1])
self._opt(0, x_batch, deltas[-1])
if self.verbose >= NNVerbose.EPOCH:
if sub_bar.update() and self.verbose >= NNVerbose.METRICS_DETAIL:
self._append_log(x_train, y_train, y_train_classes, "train")
self._append_log(x_test, y_test, y_test_classes, "test")
self._print_metric_logs("train")
self._print_metric_logs("test")
if self.verbose >= NNVerbose.EPOCH:
sub_bar.update()
if (counter + 1) % record_period == 0:
self._append_log(x_train, y_train, y_train_classes, "train")
self._append_log(x_test, y_test, y_test_classes, "test")
if self.verbose >= NNVerbose.METRICS:
self._print_metric_logs("train")
self._print_metric_logs("test")
if self.verbose >= NNVerbose.EPOCH:
bar.update(counter // record_period + 1)
sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)
def draw_logs(self):
metrics_log, loss_log = {}, {}
for key, value in sorted(self._logs.items()):
metrics_log[key], loss_log[key] = value[:-1], value[-1]
for i, name in enumerate(sorted(self._metric_names)):
plt.figure()
plt.title("Metric Type: {}".format(name))
for key, log in sorted(metrics_log.items()):
xs = np.arange(len(log[i])) + 1
plt.plot(xs, log[i], label="Data Type: {}".format(key))
plt.legend(loc=4)
plt.show()
plt.close()
plt.figure()
plt.title("Cost")
for key, loss in sorted(loss_log.items()):
xs = np.arange(len(loss)) + 1
plt.plot(xs, loss, label="Data Type: {}".format(key))
plt.legend()
plt.show()
| mit | 2,093,484,184,977,920,800 | 38.606154 | 116 | 0.535736 | false |
Code4SA/odac-ford-housing | msg_handler/admin.py | 1 | 7116 | from flask import Flask, url_for, redirect, render_template, request
from wtforms import form, fields, validators
from wtforms.fields import SelectField, TextAreaField
from flask.ext import admin, login
from flask.ext.admin.contrib import sqla
from flask.ext.admin import helpers, expose
from flask.ext.admin.model.template import macro
from flask.ext.admin.form import rules
from flask.ext.login import current_user
from msg_handler import app, db, logger
from msg_handler.models import *
from vumi_go import VumiMessage
import json
# Define login and registration forms (for flask-login)
class LoginForm(form.Form):
email = fields.TextField(validators=[validators.required()])
password = fields.PasswordField(validators=[validators.required()])
def validate_login(self, field):
user = self.get_user()
if user is None:
raise validators.ValidationError('Invalid user')
if user.password != hash(self.password.data):
raise validators.ValidationError('Invalid password')
def get_user(self):
return db.session.query(User).filter_by(email=self.email.data).first()
class RegistrationForm(form.Form):
email = fields.TextField(validators=[validators.required()])
password = fields.PasswordField(validators=[validators.required()])
def validate_login(self, field):
if db.session.query(User).filter_by(email=self.email.data).count() > 0:
raise validators.ValidationError('Duplicate users')
# Initialize flask-login
def init_login():
login_manager = login.LoginManager()
login_manager.init_app(app)
# Create user loader function
@login_manager.user_loader
def load_user(user_id):
return db.session.query(User).get(user_id)
# Create customized model view class
class MyModelView(sqla.ModelView):
def is_accessible(self):
return login.current_user.is_authenticated()
# Create customized index view class that handles login & registration
class MyAdminIndexView(admin.AdminIndexView):
@expose('/')
def index(self):
if not login.current_user.is_authenticated():
return redirect(url_for('.login_view'))
return super(MyAdminIndexView, self).index()
@expose('/login/', methods=('GET', 'POST'))
def login_view(self):
# handle user login
form = LoginForm(request.form)
if helpers.validate_form_on_submit(form):
user = form.get_user()
login.login_user(user)
if login.current_user.is_authenticated():
return redirect(url_for('.index'))
link = '<p>Don\'t have an account? <a href="' + url_for('.register_view') + '">Click here to register.</a></p>'
self._template_args['form'] = form
self._template_args['link'] = link
return super(MyAdminIndexView, self).index()
@expose('/register/', methods=('GET', 'POST'))
def register_view(self):
form = RegistrationForm(request.form)
if helpers.validate_form_on_submit(form):
user = User()
# hash password, before populating User object
form.password.data = hash(form.password.data)
form.populate_obj(user)
db.session.add(user)
db.session.commit()
login.login_user(user)
return redirect(url_for('.index'))
link = '<p>Already have an account? <a href="' + url_for('.login_view') + '">Click here to log in.</a></p>'
self._template_args['form'] = form
self._template_args['link'] = link
return super(MyAdminIndexView, self).index()
@expose('/logout/')
def logout_view(self):
login.logout_user()
return redirect(url_for('.index'))
class QueryView(MyModelView):
# disable manual editing / deletion of messages
can_create = False
can_edit = False
can_delete = False
column_list = (
'starred',
'datetime',
'from_addr',
'status',
'content',
'notes',
'responses'
)
column_labels = dict(
datetime='Date',
from_addr='From',
content='Message'
)
column_formatters = dict(
starred=macro('render_star'),
datetime=macro('render_date'),
status=macro('render_status'),
content=macro('render_content'),
notes=macro('render_notes'),
responses=macro('render_responses')
)
column_sortable_list = ('starred', 'datetime', 'from_addr', 'status')
column_searchable_list = ('content', Response.content)
column_default_sort = ('datetime', True)
list_template = 'query_list_template.html'
form_overrides = dict(
content=TextAreaField,
)
form_args = dict(
status=dict(
choices=[
('pending', 'pending'),
('in_progress', 'in progress'),
('finished', 'finished')
]
)
)
inline_models = [(Response, dict(form_label='Reply', ))]
class UserView(MyModelView):
can_create = False
column_list = (
'email',
'first_name',
'last_name'
)
class UpdateView(MyModelView):
can_delete = False
can_edit = False
list_template = 'update_list_template.html'
column_list = (
'datetime',
'user',
'content',
'notes'
)
column_labels = dict(
datetime='Date',
user='User',
content='Message',
notes='Notes'
)
column_default_sort = ('datetime', True)
column_formatters = dict(
datetime=macro('render_date'),
user=macro('render_user'),
)
form_overrides = dict(
content=TextAreaField,
)
form_create_rules = [
rules.Field('content'),
]
def on_model_change(self, form, model, is_created):
# send SMS notifications before saving message to database
msg = VumiMessage({"content": model.content})
count_tot = 0
model.user = current_user
try:
with app.open_instance_resource('notification_list.json', mode='r') as f:
try:
notification_list = json.loads(f.read())
except ValueError:
# start with clean list, if the file does not yet contain a list
notification_list = []
pass
for number in notification_list:
logger.debug("sending update to: " + number)
msg.send(number)
count_tot += 1
model.notes = "Update sent to " + str(count_tot) + " user(s)."
except Exception:
tmp = "Error sending update broadcast via SMS."
logger.exception(tmp)
model.notes = tmp
return
# Initialize flask-login
init_login()
# Create admin
admin = admin.Admin(app, 'Ford Housing', index_view=MyAdminIndexView(), base_template='my_master.html')
# Add views
admin.add_view(UserView(User, db.session))
admin.add_view(QueryView(Query, db.session))
admin.add_view(UpdateView(Update, db.session)) | apache-2.0 | -4,220,745,175,959,475,000 | 29.676724 | 119 | 0.607926 | false |
olgadoronina/LES_ABC | abc_code/utils.py | 1 | 8359 | import logging
import abc_code.global_var as g
import numpy as np
import scipy as sp
import scipy.stats
from numpy.fft import fftfreq, fftn, ifftn
from time import time
from abc_code.sobol_seq import i4_sobol_generate
from fast_histogram import histogram1d
import abc_code.distance as dist
import itertools
def timer(start, end, label):
hours, rem = divmod(end - start, 3600)
minutes, seconds = divmod(rem, 60)
logging.info("{:0>1}:{:0>2}:{:05.2f} \t {}".format(int(hours), int(minutes), seconds, label))
def rand_ind(random):
ind = np.random.randint(0, 256 ** 3, size=random)
ind = np.unique(ind)
while len(ind) < 0.99*random:
ind_add = np.random.randint(0, 256 ** 3, size=(random - len(ind)))
ind = np.unique(np.append(ind, ind_add))
return ind
def pdf_from_array_with_x(array, bins, range):
pdf, edges = np.histogram(array, bins=bins, range=range, normed=1)
x = (edges[1:] + edges[:-1]) / 2
return x, pdf
def pdf_from_array_improved(array, bins, domain, N_each):
pdf = np.empty((N_each, bins))
for i in range(N_each):
pdf[i, :] = np.histogram(array[i, :], bins=bins, range=domain, normed=1)[0]
return pdf
def pdf_from_array_np(array, bins, range):
pdf, _ = np.histogram(array, bins=bins, range=range, normed=1)
return pdf
def pdf_from_array(array, bins, range):
pdf = histogram1d(array.flatten(), bins=bins, range=range)
norm = np.sum(pdf)/bins
return pdf/norm
def baseconvert(x, newbase, number_digits):
"""Converts given number x, from base 10 to base 'newbase'
x -- the number in base 10
newbase -- base to convert
number_digits -- number of digits in new base (add zero in the beginning)
"""
assert(x >= 0)
r = []
while x > 0:
r = [x % newbase] + r
x //= newbase
for i in range(number_digits-len(r)):
r = [0] + r
return r
def uniform_grid(C_limits, N_each):
C_tmp = np.linspace(C_limits[0], C_limits[1], N_each + 1)
C_tmp = C_tmp[:-1] + (C_tmp[1] - C_tmp[0]) / 2
return C_tmp
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, s = np.mean(a), np.std(a)
h = s / np.sqrt(n) * sp.stats.t._ppf((1 + confidence) / 2., n - 1)
return m, h
def take_safe_log(x):
"""Takes natural logarithm and put g.TINY number where x = 0"""
log_fill = np.empty_like(x)
log_fill.fill(g.TINY_log)
log = np.log(x, out=log_fill, where=x > g.TINY)
return log
def covariance_recursive(x, t, cov_prev, mean_prev, s_d):
mean_new = t / (t + 1) * mean_prev + 1 / (t + 1) * x
cov = (t - 1) / t * cov_prev + \
s_d / t * (t * np.outer(mean_prev, mean_prev) - (t + 1) * np.outer(mean_new, mean_new) + np.outer(x, x))
return cov, mean_new
def tophat_kernel(k, limit):
"""Create 3D array of Tophat filter.
k - array of wave numbers;
limit - cutoff wavenumber."""
a = np.zeros((len(k[0]), len(k[1]), len(k[2])), dtype=np.float32)
for indx, kx in enumerate(k[0]):
for indy, ky in enumerate(k[1]):
for indz, kz in enumerate(k[2]):
a[indx, indy, indz] = np.sqrt(kx ** 2 + ky ** 2 + kz ** 2)
kernel = np.piecewise(a, [a <= limit, a > limit], [1, 0])
return kernel
def filter3d(data, scale_k, dx, N_points, filename=None):
""" Tophat filter in Fourier space for dictionary of 3D arrays.
data - dictionary of numpy arrays;
scale_k - wave number, which define size of filter."""
# FFT
start = time()
FFT = dict()
for key, value in data.items():
FFT[key] = fftn(value)
k = [fftfreq(N_points[0], dx[0]), fftfreq(N_points[1], dx[1]), fftfreq(N_points[2], dx[2])]
end = time()
timer(start, end, 'Time for FFT')
# Filtering
start = time()
kernel = tophat_kernel(k, scale_k)
end = time()
timer(start, end, 'Time for creating filter kernel')
start = time()
result = dict()
fft_filtered = dict()
for key, value in FFT.items():
fft_filtered[key] = np.multiply(value, kernel)
end = time()
timer(start, end, 'Time for filtering')
FFT.clear()
start = time()
for key, value in fft_filtered.items():
result[key] = ifftn(value).real
end = time()
timer(start, end, 'Time for iFFT')
fft_filtered.clear()
if filename:
logging.info('\nWrite file in ./data/' + filename + '.npz')
file = './data/' + filename + '.npz'
np.savez(file, **result)
return result
# def filter3d_array(array, scale_k):
#
# fft_array = fftn(array)
# k = [fftfreq(N_points[0], dx[0]), fftfreq(N_points[1], dx[1]), fftfreq(N_points[2], dx[2])]
# kernel = tophat_kernel(k, scale_k)
# fft_filtered = np.multiply(fft_array, kernel)
# result = ifftn(fft_filtered).real
#
# return result
#
# def filter3d_array_inFspace(array, scale_k):
# logging.info(array.shape)
# k = [fftfreq(N_points[0], dx[0]), fftfreq(N_points[1], dx[1]), fftfreq(N_points[2], dx[2])]
# kernel = tophat_kernel(k, scale_k)
# fft_filtered = np.multiply(array, kernel)
#
# return fft_filtered
########################################################################################################################
## Sampling functions
########################################################################################################################
def sampling_initial_for_MCMC(N_proc, C_limits, eps):
""" Find starting points for MCMC. (Sample randomly and save if distance < eps)
:return: list of lists of parameters
"""
C_array = []
while len(C_array) <= N_proc:
c = np.random.uniform(C_limits[:, 0], C_limits[:, 1])
d = dist.calc_dist(c)
if d <= eps:
C_array.append(c)
logging.info('C_start = {}'.format(c))
return C_array
def sampling_initial_for_gaussian_mixture(N_proc, N_gaussians, C_limits, eps):
""" Find starting points for Gaussian Mixture. (Sample randomly and save if distance < eps)
:return: list of lists of parameters
"""
C_array = []
start = time()
from tqdm import tqdm
with tqdm(total=N_proc*N_gaussians) as pbar:
for i in range(N_proc):
c_array = []
while len(c_array) < N_gaussians:
c = np.random.uniform(C_limits[:, 0], C_limits[:, 1])
dist = calc_dist(c)
if dist <= eps:
c_array.append(c)
pbar.update()
C_array.append(np.array(c_array))
pbar.close()
end = time()
timer(start, end, 'Time for sampling')
return C_array
def sampling_sobol(N_total, C_limits):
""" Generate Sobol' sequense of parameters. (low-discrepency quasi-random sampling)
:return: list of lists of sampled parameters
"""
N_params = len(C_limits)
C_array = i4_sobol_generate(N_params, N_total)
for i in range(N_params):
C_array[:, i] = C_array[:, i] * (C_limits[i, 1] - C_limits[i, 0]) + C_limits[i, 0]
C_array = C_array.tolist()
return C_array
def sampling_random(N_total, C_limits):
"""
"""
N_params = len(C_limits)
C_array = np.random.random(size=(N_total, N_params))
for i in range(g.N.params):
C_array[:, i] = C_array[:, i] * (C_limits[i, 1] - C_limits[i, 0]) + C_limits[i, 0]
C_array = C_array.tolist()
return C_array
def sampling_uniform_grid(N_each, N_params_in_task, C_limits):
""" Create list of lists of N parameters manually (make grid) uniformly distributed on given interval
:return: list of lists of sampled parameters
"""
N_params = len(C_limits)
if N_params == 1:
# C1 = np.linspace(C_limits[0, 0], C_limits[0, 1], N_each)
C1 = uniform_grid(C_limits[0], N_each)
C_array = []
for i in C1:
C_array.append([i])
else:
C = np.empty((N_params - N_params_in_task, N_each))
for i in range(N_params - N_params_in_task):
# C[i, :] = np.linspace(C_limits[i, 0], C_limits[i, 1], N_each)
C[i, :] = uniform_grid(C_limits[i], N_each)
permutation = itertools.product(*C)
C_array = list(map(list, permutation))
logging.debug('Form C_array as uniform grid: {} samples\n'.format(len(C_array)))
return C_array
| gpl-3.0 | 2,585,686,329,948,227,000 | 30.78327 | 120 | 0.569685 | false |
icists/ams2 | django/registration/migrations/0001_initial.py | 1 | 4035 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-25 01:44
from __future__ import unicode_literals
from decimal import Decimal
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import djmoney.models.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('policy', '0006_auto_20171025_0144'),
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stage', models.CharField(choices=[('E', 'Early'), ('R', 'Regular'), ('L', 'Late')], max_length=1)),
('screening_result', models.CharField(choices=[('A', 'Accepted'), ('R', 'Rejected'), ('P', 'Pending')], default='P', max_length=1)),
('disclose_result', models.BooleanField(default=False)),
('essay_text', models.TextField(blank=True)),
('visa_letter', models.BooleanField(default=False)),
('financial_aid', models.BooleanField(default=False)),
('previous_participation', models.BooleanField(default=False)),
('last_update', models.DateTimeField(auto_now=True)),
('essay_topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='applications', to='policy.EssayTopic')),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
],
options={
'verbose_name': 'applicant group',
},
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('preferred_currency', models.CharField(choices=[('KRW', 'KRW'), ('USD', 'USD')], default='KRW', max_length=3)),
('paid_amount_currency', djmoney.models.fields.CurrencyField(choices=[('KRW', 'KRW'), ('USD', 'USD')], default='KRW', editable=False, max_length=3)),
('paid_amount', djmoney.models.fields.MoneyField(decimal_places=0, default=Decimal('0'), default_currency='KRW', max_digits=7)),
('dietary_preferences', models.CharField(blank=True, max_length=100, null=True)),
('breakfast_option', models.BooleanField(default=False)),
('pre_option', models.BooleanField(default=False, verbose_name='pre-conference banquet')),
('post_option', models.BooleanField(default=False, verbose_name='post-conference tour')),
('accommodation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to='policy.AccommodationOption')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='application',
name='group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='applications', to='registration.Group'),
),
migrations.AddField(
model_name='application',
name='topic_preference',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='applications', to='policy.ProjectTopic'),
),
migrations.AddField(
model_name='application',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='applications', to=settings.AUTH_USER_MODEL),
),
]
| mit | 3,841,872,372,975,416,000 | 51.402597 | 165 | 0.602974 | false |
tm-softworks/OrderGetterR | src/OrderList.py | 1 | 28480 |
import os
import sys
from collections import OrderedDict
from datetime import datetime, timedelta
import calendar
from argparse import ArgumentParser
import configparser
import traceback
import time
import csv
import io
import copy
import hashlib
import logging
import logging.handlers
import pytz
from rakuten_ws import RakutenWebService
import zeep
from zeep.helpers import serialize_object
JST = pytz.timezone('Asia/Tokyo')
logger = logging.getLogger()
config = None
OUTPUT_KEY = 'output.'
GET_ORDER_ROOT_KEY = 'getOrderRequestModel'
GET_ORDER_SEARCH_ROOT_KEY = 'orderSearchModel'
ORDER_SEARCH_START_DATE_KEY = 'startDate'
ORDER_SEARCH_END_DATE_KEY = 'endDate'
ORDER_SEARCH_START_DATETIME_KEY = 'startDatetime'
ORDER_SEARCH_END_DATETIME_KEY = 'endDatetime'
GENERAL_SECTION_KEY = 'general'
GENERAL_PERIOD_KEY = 'period'
GENERAL_DURATION_1CALL_KEY = 'duration'
GENERAL_THIS_MONTH_KEY = 'thisMonth'
GENERAL_PREV_MONTH_KEY = 'prevMonth'
GENERAL_GET_ORDER_VERSION_KEY = 'getOrderVersion'
GET_ORDER_COUNT_LIMIT = 100
class OrderList:
def __init__(self):
self.targetShop = []
self.isTest = False
self.defaultConfigFile = 'setting.ini'
self.version = '1.0.1'
self.myname = 'OrderList'
self.config = None
def initLog(self, logPath):
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(threadName)s - %(lineno)s - %(name)s - %(message)s')
rfh = logging.handlers.RotatingFileHandler(
filename=logPath,
maxBytes=10*1024*1024,
backupCount=7
)
rfh.setLevel(logging.DEBUG)
rfh.setFormatter(formatter)
logger.addHandler(rfh)
logger.debug('initLog')
def parser(self):
usage = 'Usage: %(prog)s [option] input_file'
argparser = ArgumentParser(usage=usage,
epilog="""
Copyright (C) 2017 T.M SoftWorks ( [email protected] )
""")
argparser.add_argument('input_file')
argparser.add_argument('-v', '--verbose',
action='store_true',
help='show verbose message')
argparser.add_argument('-c', '--conf', type=str,
dest='config_file',
default=self.defaultConfigFile,
help='config file name')
argparser.add_argument('-p', '--coupon',
action='store_true',
help='coupon detail')
argparser.add_argument('-s', '--shipping-detail',
action='store_true',
help='shipping detail')
argparser.add_argument('-d', '--dry-run',
action='store_true',
help='dry run')
argparser.add_argument('--version',
action='version',
version='%(prog)s '+self.version)
args = argparser.parse_args()
return args
def emptyConfig(self):
condition = configparser.ConfigParser()
condition['global'] = {}
condition['api'] = {}
return condition
def defaultConfigPart(self, conf, key, value):
if not key in conf: conf[key] = value
def defaultConfig(self, conf):
g = conf['global']
self.defaultConfigPart(g, 'logDir', './logs')
self.defaultConfigPart(g, 'logFile', 'orderlist.log')
self.defaultConfigPart(g, 'outDir', './data')
a = conf['api']
self.defaultConfigPart(a, 'User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36')
self.defaultConfigPart(a, 'input_encoding', 'cp932')
self.defaultConfigPart(a, 'output_encoding', 'cp932')
self.defaultConfigPart(a, 'input_delimiter', ',')
self.defaultConfigPart(a, 'input_quotechar', '"')
self.defaultConfigPart(a, 'output_delimiter', ',')
self.defaultConfigPart(a, 'output_quotechar', '"')
self.defaultConfigPart(a, 'continue_errorcode', 'N00-000,W00-000,E10-001')
self.defaultConfigPart(a, 'nothing_errorcode', 'E10-001')
self.defaultConfigPart(a, 'warning_errorcode', 'W00-000')
self.defaultConfigPart(a, 'call_per_sec', '1')
self.defaultConfigPart(a, 'list_keys', 'orderNumber,status,orderType,mailAddressType,pointStatus,rbankStatus,orderSite,enclosureStatus,cardStatus,payType')
self.defaultConfigPart(a, 'list_number_keys', 'orderProgressList,subStatusIdList,orderTypeList')
self.defaultConfigPart(a, 'date_keys', 'startDate,endDate')
self.defaultConfigPart(a, 'datetime_keys', 'startDatetime,endDatetime')
self.defaultConfigPart(a, 'bool_keys', 'isOrderNumberOnlyFlg,pointUsed,modify,asuraku,coupon')
self.defaultConfigPart(a, 'number_keys', 'dateType,settlementMethod,shippingDateBlankFlag,shippingNumberBlankFlag,searchKeywordType,mailSendType,phoneNumberType,purchaseSiteType,asurakuFlag,couponUseFlag,drugFlag,overseasFlag,requestRecordsAmount,requestPage,sortColumn,sortDirection')
self.defaultConfigPart(a, 'parse_format', '%%Y/%%m/%%d %%H:%%M:%%S')
self.defaultConfigPart(a, 'parse_format_datetime', '%%Y-%%m-%%dT%%H:%%M:%%S%%z')
self.defaultConfigPart(a, 'datetime_format', '{0:%%Y/%%m/%%d %%H:%%M:%%S}')
self.defaultConfigPart(a, 'RPay', '0')
self.defaultConfigPart(a, 'getOrderVersion', '1')
prev_apicall = None
def getOrderTest(self):
return {
'errorCode': 'N00-000',
'message': '\u6b63\u5e38\u7d42\u4e86',
'unitError': [],
'orderModel': [{
'childOrderModel': [],
'couponModel': [{
'couponCode': 'COUPON1',
}],
'packageModel': []
}]}
def getRmsService(self, conf):
credentials = {
'license_key': conf['licenseKey'],
'secret_service': conf.get('serviceSecret') or conf.get('secretService'),
'shop_url': conf['shopUrl'],
}
ws = RakutenWebService(**credentials)
ua = 'OrderListClient/1.0.1'
if conf['RPay'] == '0':
ws.rms.order.zeep_client.transport.session.headers['User-Agent'] = ua
else:
ws.rms.rpay.search_order.client.service.webservice.session.headers['User-Agent'] = ua
ws.rms.rpay.get_order.client.service.webservice.session.headers['User-Agent'] = ua
return ws
def genLicense(self, a, s):
source = (a+s).encode('utf-8')
return hashlib.sha256(source).hexdigest()
def checkShopUrl(self, conf):
tareget = ''
s = conf['api']['shopUrl']
def checkTargetShop(self, conf):
s = conf['api']['shopUrl']
for shop in self.targetShop:
if s.startswith(shop):
return True
return False
def getOrder(self, ws, input_dict, conf):
logger.info('getOrder start')
logger.debug('getOrder: {}'.format(input_dict))
wait_sec = int(conf['call_per_sec'])
args = input_dict[GET_ORDER_ROOT_KEY]
self.prev_apicall = self.waitSec(self.prev_apicall, wait_sec)
ret = ws.rms.order.getOrder(**args)
if 'errorCode' in ret and not ret['errorCode'] in ['N00-000', 'W00-000']:
logger.error('{}'.format(ret))
#logger.debug(ws.ichiba.item.search(keyword='4562373379528'))
logger.debug(ret)
return ret
def getOrderRPay(self, ws, input_dict, conf):
logger.info('getOrderRPay start')
logger.debug('getOrderRPay: {}'.format(input_dict))
wait_sec = int(conf['call_per_sec'])
args = input_dict[GET_ORDER_ROOT_KEY]
if 'startDate' in args[GET_ORDER_SEARCH_ROOT_KEY]:
args['startDatetime'] = args[GET_ORDER_SEARCH_ROOT_KEY]['startDate']
if 'endDate' in args[GET_ORDER_SEARCH_ROOT_KEY]:
args['endDatetime'] = args[GET_ORDER_SEARCH_ROOT_KEY]['endDate']
del args[GET_ORDER_SEARCH_ROOT_KEY]
self.prev_apicall = self.waitSec(self.prev_apicall, wait_sec)
ret = ws.rms.rpay.search_order(**args)
logger.debug('search_order result: {}'.format(vars(ret)))
if 'errorCode' in ret and not ret['errorCode'] in ['N00-000', 'W00-000']:
logger.error('{}'.format(ret))
#logger.debug(ws.ichiba.item.search(keyword='4562373379528'))
logger.debug(vars(ret))
logger.debug(ret.get('orderNumberList'))
result_array = []
if 'orderNumberList' in ret and len(ret['orderNumberList']) > 0:
orderNumberList = ret['orderNumberList']
index = 0
while True:
targetList = orderNumberList[index:index+GET_ORDER_COUNT_LIMIT]
logger.info('get_order: {} - {}'.format(index, index + len(targetList) - 1))
if len(targetList) == 0:
break
index += len(targetList)
args = {"orderNumberList": targetList}
if GENERAL_GET_ORDER_VERSION_KEY in conf:
args["version"] = conf[GENERAL_GET_ORDER_VERSION_KEY]
self.prev_apicall = self.waitSec(self.prev_apicall, wait_sec)
ret2 = ws.rms.rpay.get_order(**args)
logger.debug('get_order result: {}'.format(vars(ret2)))
messages = ret2["MessageModelList"]
result_array.extend(ret2["OrderModelList"])
logger.info('get_order: {}'.format(len(targetList)))
if len(targetList) < GET_ORDER_COUNT_LIMIT:
break
return {'orderModel': result_array, 'errorCode': 'N00-000', 'message': 'Found'}
else:
ret['errorCode'] = 'W00-000'
ret['message'] = 'Not Found'
ret['orderModel'] = []
return ret
def to_bool(self, s):
return False if s.lower() in ['false', '0'] else True
def datetimeJST(self, year, month, day, hour=0, minute=0, second=0):
return datetime(year, month, day, hour, minute, second, tzinfo=JST)
def add_months(self, sourcedate, months):
month = sourcedate.month - 1 + months
year = int(sourcedate.year + month / 12 )
month = month % 12 + 1
day = min(sourcedate.day,calendar.monthrange(year, month)[1])
return datetime(year,month,day)
def readOutputColumn(self, condition):
logger.debug("readOutputColumn")
new_dict = OrderedDict()
for section in condition.sections():
if section.startswith(OUTPUT_KEY):
keys = section.split('.')
if len(keys) >= 2 and (keys[1] == 'orderModel' or keys[1] == 'OrderModelList'):
prefix = ".".join(keys[2:])
if len(prefix): prefix += "."
for key in condition[section]:
val = condition[section][key]
new_dict[prefix+key] = val
logger.debug(new_dict)
return new_dict
def readCondition(self, config, condition):
list_keys = config['api']['list_keys'].split(',')
list_number_keys = config['api']['list_number_keys'].split(',')
date_keys = config['api']['date_keys'].split(',')
datetime_keys = config['api']['datetime_keys'].split(',')
bool_keys = config['api']['bool_keys'].split(',')
number_keys = config['api']['number_keys'].split(',')
new_hash = {}
for section in condition.sections():
if section.startswith(GET_ORDER_ROOT_KEY):
new_dict = {}
for key in condition[section]:
val = condition[section][key]
if not len(val) == 0:
if key in list_keys:
new_dict[key] = val.split(',')
elif key in list_number_keys:
s = val.split(',')
new_dict[key] = [int(i) for i in s]
elif key in date_keys:
parse_format = config['api']['parse_format']
new_dict[key] = JST.localize(datetime.strptime(val, parse_format))
elif key in datetime_keys:
parse_format = config['api']['parse_format_datetime']
new_dict[key] = datetime.strptime(val, parse_format)
elif key in bool_keys:
new_dict[key] = self.to_bool(val)
elif key in number_keys:
new_dict[key] = int(val)
else:
new_dict[key] = val
if len(new_dict):
keys = section.split('.')
tmp_hash = new_hash
for k in keys:
if not k in tmp_hash:
tmp_hash[k] = {}
tmp_hash = tmp_hash[k]
tmp_hash.update(new_dict)
general_conf = {}
if GENERAL_SECTION_KEY in condition.sections():
if not GET_ORDER_ROOT_KEY in new_hash:
new_hash[GET_ORDER_ROOT_KEY] = {}
if not GET_ORDER_SEARCH_ROOT_KEY in new_hash[GET_ORDER_ROOT_KEY]:
new_hash[GET_ORDER_ROOT_KEY][GET_ORDER_SEARCH_ROOT_KEY] = {}
if GENERAL_DURATION_1CALL_KEY in condition[GENERAL_SECTION_KEY]:
general_conf[GENERAL_DURATION_1CALL_KEY] = condition[GENERAL_SECTION_KEY][GENERAL_DURATION_1CALL_KEY]
if GENERAL_PERIOD_KEY in condition[GENERAL_SECTION_KEY]:
period = condition[GENERAL_SECTION_KEY][GENERAL_PERIOD_KEY]
if len(period):
toDate = JST.localize(datetime.now())
fd = toDate - timedelta(days=int(period))
fromDate = JST.localize(datetime(fd.year, fd.month, fd.day))
logger.debug('{} - {}'.format(fromDate, toDate))
new_hash[GET_ORDER_ROOT_KEY][GET_ORDER_SEARCH_ROOT_KEY][ORDER_SEARCH_START_DATE_KEY] = fromDate
new_hash[GET_ORDER_ROOT_KEY][GET_ORDER_SEARCH_ROOT_KEY][ORDER_SEARCH_END_DATE_KEY] = toDate
prevMonth = condition[GENERAL_SECTION_KEY][GENERAL_PREV_MONTH_KEY]
if prevMonth == "1":
now = JST.localize(datetime.now())
fd = self.add_months(now, -1)
fromDate = JST.localize(datetime(fd.year, fd.month, 1))
toDateTmp = JST.localize(datetime(now.year, now.month, 1, 23, 59, 59))
toDate = toDateTmp - timedelta(days=1)
logger.debug('{} - {}'.format(fromDate, toDate))
new_hash[GET_ORDER_ROOT_KEY][GET_ORDER_SEARCH_ROOT_KEY][ORDER_SEARCH_START_DATE_KEY] = fromDate
new_hash[GET_ORDER_ROOT_KEY][GET_ORDER_SEARCH_ROOT_KEY][ORDER_SEARCH_END_DATE_KEY] = toDate
thisMonth = condition[GENERAL_SECTION_KEY][GENERAL_THIS_MONTH_KEY]
if thisMonth == "1":
toDate = JST.localize(datetime.now())
fromDate = JST.localize(datetime(toDate.year, toDate.month, 1))
logger.debug('{} - {}'.format(fromDate, toDate))
new_hash[GET_ORDER_ROOT_KEY][GET_ORDER_SEARCH_ROOT_KEY][ORDER_SEARCH_START_DATE_KEY] = fromDate
new_hash[GET_ORDER_ROOT_KEY][GET_ORDER_SEARCH_ROOT_KEY][ORDER_SEARCH_END_DATE_KEY] = toDate
return (new_hash, general_conf)
def genFileName(self, key, outPath='.', ext='csv'):
now = datetime.now()
name = "{3}/{0:%Y%m%d_%H%M%S}_{1}.{2}".format(now, key, ext, outPath)
return name
def datetimeSplit(self, start, end, duration):
if duration < 0:
return [{'start': start, 'end': end}]
result = []
s = start
while True:
e = s + timedelta(seconds=(duration - 1))
if e > end: e = end
result.append({'start': s, 'end': e})
if e == end: break
s = e + timedelta(seconds=1)
logger.debug('datetimeSplit: {}'.format(result))
return result
def readInput(self, config, input_file):
condition = configparser.ConfigParser()
condition.optionxform = str
condition.read(input_file, encoding='cp932')
(hash, general_conf) = self.readCondition(config, condition)
outputColumns = self.readOutputColumn(condition)
return (hash, outputColumns, general_conf)
def quotedValue(self, data, qc='"'):
return qc+data+qc
def quotedAppendList(self, _list, qc, data):
_list.append(data)
# _list.append(qc + data + qc)
def waitSec(self, prev, maxWait = 3):
now = time.time()
if prev != None:
sec = now - prev
ssec = maxWait - sec
if ssec > 0:
logger.debug('sleep: {}, sleep={}'.format(sec, ssec))
time.sleep(ssec)
return now
def findObj(self, obj, path):
keys = path.split('.')
d = obj
for k in keys:
if k in d:
d = d[k]
return d if d != obj else None
def grabChildren(self, father, prefix = ""):
local_list = {}
if not isinstance(father, dict): return local_list
for key, value in father.items():
#local_list.append(key)
if isinstance(value, dict):
local_list.update(self.grabChildren(value, prefix+key+"."))
elif isinstance(value, list):
local_list[prefix+key] = value
else:
local_list[prefix+key] = value
return local_list
def extendOrder(self, orderModel):
'''
orderModel[]
packageModel[]
itemModel[]
'''
extendedOrderModel = []
order_dict = self.grabChildren(orderModel)
key1 = 'packageModel' if 'packageModel' in orderModel else 'PackageModelList'
for packageModel in orderModel[key1]:
prefix = key1+'.'
logger.debug('{}'.format(packageModel))
pkg_dict = self.grabChildren(packageModel, prefix)
key2 = 'itemModel' if 'itemModel' in packageModel else 'ItemModelList'
for itemModel in packageModel[key2]:
prefix = key1+'.'+key2+'.'
item_dict = self.grabChildren(itemModel, prefix)
new_dict = copy.copy(order_dict)
new_dict.update(pkg_dict)
new_dict.update(item_dict)
extendedOrderModel.append(new_dict)
return extendedOrderModel
def writeOutput(self, conf, output_file, output_columns, result, writeHeader):
logger.debug("writeOutput: rows={}".format(len(result)))
csv_writer = csv.writer(
output_file,
#sys.stdout,
dialect='excel',
lineterminator='\n',
delimiter=conf['output_delimiter'],
quotechar=conf['output_quotechar'],
quoting=csv.QUOTE_ALL,
)
datetime_format = conf['datetime_format']
headers = []
listOrderModel = result['orderModel']
linum = 0
for (index, orderModelObj) in enumerate(listOrderModel):
logger.debug('{}: {}'.format(index, orderModelObj))
if conf['RPay'] == '0':
orderModel = zeep.helpers.serialize_object(orderModelObj)
else:
orderModel = orderModelObj
if isinstance(orderModel, dict):
extendedOrderModel = self.extendOrder(orderModel)
for eo in extendedOrderModel:
cols = []
for (oc, col) in output_columns.items():
if oc.find('couponModel') < 0 and oc.find('childOrderModel') < 0 and oc.find('CouponModelList') < 0:
if linum == 0: headers.append(col)
v = ""
if oc in eo:
if isinstance(eo[oc], datetime):
v = datetime_format.format(eo[oc])
else:
v = eo[oc]
cols.append(v)
if linum == 0 and writeHeader: csv_writer.writerow(headers)
csv_writer.writerow(cols)
linum += 1
return linum
def extendCouponDetail(self, orderModel):
'''
orderModel[]
couponModel[]
'''
extendedCouponModel = []
order_dict = self.grabChildren(orderModel)
key1 = 'couponModel' if 'couponModel' in orderModel else 'CouponModelList'
if orderModel[key1] is not None:
for packageModel in orderModel[key1]:
prefix = key1+'.'
logger.debug('{}'.format(packageModel))
pkg_dict = self.grabChildren(packageModel, prefix)
new_dict = copy.copy(order_dict)
new_dict.update(pkg_dict)
extendedCouponModel.append(new_dict)
return extendedCouponModel
def writeCouponDetail(self, conf, output_file, output_columns, result, writeHeader):
if not output_file: return 0
logger.debug("writeCouponDetail: rows={}".format(len(result)))
csv_writer = csv.writer(
output_file,
#sys.stdout,
dialect='excel',
lineterminator='\n',
delimiter=conf['output_delimiter'],
quotechar=conf['output_quotechar'],
quoting=csv.QUOTE_ALL,
)
datetime_format = conf['datetime_format']
headers = []
listOrderModel = result['orderModel']
linum = 0
for (index, orderModelObj) in enumerate(listOrderModel):
logger.debug('{}: {}'.format(index, orderModelObj))
orderModel = zeep.helpers.serialize_object(orderModelObj)
if isinstance(orderModel, dict):
extendedCouponModel = self.extendCouponDetail(orderModel)
for eo in extendedCouponModel:
cols = []
for (oc, col) in output_columns.items():
if oc.find('coupon') >= 0 or oc.find('orderNumber') >= 0:
if linum == 0: headers.append(col)
v = ""
if oc in eo:
if isinstance(eo[oc], datetime):
v = datetime_format.format(eo[oc])
else:
v = eo[oc]
cols.append(v)
else:
continue
if linum == 0 and writeHeader: csv_writer.writerow(headers)
csv_writer.writerow(cols)
linum += 1
return linum
def extendShippingDetail(self, orderModel):
'''
orderModel[]
shippingModel[]
'''
extendedShippingModel = []
order_dict = self.grabChildren(orderModel)
key1 = 'packageModel' if 'packageModel' in orderModel else 'PackageModelList'
for packageModel in orderModel[key1]:
prefix = key1+'.'
logger.debug('{}'.format(packageModel))
pkg_dict = self.grabChildren(packageModel, prefix)
key2 = 'ShippingModelList'
if packageModel.get(key2) is not None:
for packageModel in packageModel[key2]:
prefix = key1+'.'+key2+'.'
logger.debug('{}'.format(packageModel))
item_dict = self.grabChildren(packageModel, prefix)
new_dict = copy.copy(order_dict)
new_dict.update(pkg_dict)
new_dict.update(item_dict)
extendedShippingModel.append(new_dict)
return extendedShippingModel
def writeShippingDetail(self, conf, output_file, output_columns, result, writeHeader):
if not output_file: return 0
logger.debug("writeShippingDetail: rows={}".format(len(result)))
csv_writer = csv.writer(
output_file,
#sys.stdout,
dialect='excel',
lineterminator='\n',
delimiter=conf['output_delimiter'],
quotechar=conf['output_quotechar'],
quoting=csv.QUOTE_ALL,
)
datetime_format = conf['datetime_format']
headers = []
listOrderModel = result['orderModel']
linum = 0
for (index, orderModelObj) in enumerate(listOrderModel):
logger.debug('{}: {}'.format(index, orderModelObj))
orderModel = zeep.helpers.serialize_object(orderModelObj)
if isinstance(orderModel, dict):
extendedShippingModel = self.extendShippingDetail(orderModel)
for eo in extendedShippingModel:
cols = []
for (oc, col) in output_columns.items():
if oc.find('ShippingModelList') >= 0 or oc.find('orderNumber') >= 0 or oc.find('basketId') >= 0:
if linum == 0: headers.append(col)
v = ""
if oc in eo:
if isinstance(eo[oc], datetime):
v = datetime_format.format(eo[oc])
else:
v = eo[oc]
cols.append(v)
else:
continue
if linum == 0 and writeHeader: csv_writer.writerow(headers)
csv_writer.writerow(cols)
linum += 1
return linum
def main(self):
ol = OrderList()
try:
args = self.parser()
config = configparser.ConfigParser()
config.read(args.config_file, encoding='cp932')
self.defaultConfig(config)
logDir = config['global']['logDir']
logFile = config['global']['logFile']
outDir = config['global']['outDir']
continueErrorCode = config['api']['continue_errorcode'].split(',')
nothingErrorCode = config['api']['nothing_errorcode'].split(',')
warningErrorCode = config['api']['warning_errorcode'].split(',')
rpay = True if config['api']['RPay'] == '1' else False
os.makedirs(logDir, exist_ok=True)
os.makedirs(outDir, exist_ok=True)
self.initLog('{}/{}'.format(logDir, logFile))
logger.info('start')
if args.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logger.debug(args)
# read input
(input_dict, output_columns, general_conf) = self.readInput(config, args.input_file)
if ORDER_SEARCH_START_DATE_KEY in input_dict[GET_ORDER_ROOT_KEY][GET_ORDER_SEARCH_ROOT_KEY]:
start = input_dict[GET_ORDER_ROOT_KEY][GET_ORDER_SEARCH_ROOT_KEY][ORDER_SEARCH_START_DATE_KEY]
else:
start = input_dict[GET_ORDER_ROOT_KEY]['startDatetime']
if ORDER_SEARCH_END_DATE_KEY in input_dict[GET_ORDER_ROOT_KEY][GET_ORDER_SEARCH_ROOT_KEY]:
end = input_dict[GET_ORDER_ROOT_KEY][GET_ORDER_SEARCH_ROOT_KEY][ORDER_SEARCH_END_DATE_KEY]
else:
end = input_dict[GET_ORDER_ROOT_KEY]['endDatetime']
duration_1call = -1
if GENERAL_DURATION_1CALL_KEY in general_conf:
val = general_conf[GENERAL_DURATION_1CALL_KEY]
if val: duration_1call = int(val)
datetimeList = self.datetimeSplit(start, end, duration_1call)
ws = self.getRmsService(config['api'])
total_output = 0
index = 0
outfile = self.genFileName('order', outDir)
couponfile = self.genFileName('coupon', outDir)
shippingfile = self.genFileName('shipping', outDir)
coupon = args.coupon
coupon_file = None
shipping = args.shipping_detail
shipping_file = None
writeCouponHeader = True
writeShippingHeader = True
with io.open(outfile, "w", encoding=config['api']['output_encoding'], errors='replace') as output_file:
if coupon:
coupon_file = io.open(couponfile, "w", encoding=config['api']['output_encoding'], errors='replace')
if shipping:
shipping_file = io.open(shippingfile, "w", encoding=config['api']['output_encoding'], errors='replace')
for dt in datetimeList:
if not GET_ORDER_SEARCH_ROOT_KEY in input_dict[GET_ORDER_ROOT_KEY]:
input_dict[GET_ORDER_ROOT_KEY][GET_ORDER_SEARCH_ROOT_KEY] = {}
input_dict[GET_ORDER_ROOT_KEY][GET_ORDER_SEARCH_ROOT_KEY][ORDER_SEARCH_START_DATE_KEY] = dt['start']
input_dict[GET_ORDER_ROOT_KEY][GET_ORDER_SEARCH_ROOT_KEY][ORDER_SEARCH_END_DATE_KEY] = dt['end']
logger.debug(input_dict)
result = None
if not args.dry_run:
if rpay:
result = self.getOrderRPay(ws, input_dict, config['api'])
else:
result = self.getOrder(ws, input_dict, config['api'])
else:
ss = "{0:%Y/%m/%d %H:%M:%S}".format(dt['start'])
es = "{0:%Y/%m/%d %H:%M:%S}".format(dt['end'])
print('getOrder: {} - {}'.format(ss, es))
result = self.getOrderTest()
if 'errorCode' in result and not result['errorCode'] in continueErrorCode:
err = '{}: {}'.format(result['errorCode'], result['message'])
print(' {}'.format(err))
logger.error('{}'.format(err))
#logger.error('unitError: {}'.format(result['unitError']))
raise Exception(err)
elif 'errorCode' in result and result['errorCode'] in nothingErrorCode:
warn = '{}: {}'.format(result['errorCode'], result['message'])
logger.warn(warn)
continue
elif 'errorCode' in result and result['errorCode'] in warningErrorCode:
warn = '{}: {}'.format(result['errorCode'], result['message'])
print(' {}'.format(warn))
logger.warn('{}'.format(warn))
#logger.warn('unitError: {}'.format(result['unitError']))
if not len(result['orderModel']):
continue
cnt = self.writeOutput(config['api'], output_file,
output_columns, result, index == 0)
total_output += cnt
print(' Write Success: line={}'.format(cnt))
cwnum = self.writeCouponDetail(config['api'], coupon_file,
output_columns, result, writeCouponHeader)
if cwnum > 0:
writeCouponHeader = False
cwnum = self.writeShippingDetail(config['api'], shipping_file,
output_columns, result, writeShippingHeader)
if cwnum > 0:
writeShippingHeader = False
index += 1
except Exception as e:
print(' {}'.format(e))
logger.error(e)
logger.error(traceback.format_exc())
logger.info('end')
| mit | -3,231,433,813,791,966,000 | 37.228188 | 289 | 0.608216 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.