repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
kuri65536/python-for-android | python-build/python-libs/gdata/src/gdata/tlslite/integration/POP3_TLS.py | 271 | 5466 | """TLS Lite + poplib."""
import socket
from poplib import POP3
from gdata.tlslite.TLSConnection import TLSConnection
from gdata.tlslite.integration.ClientHelper import ClientHelper
# POP TLS PORT
POP3_TLS_PORT = 995
class POP3_TLS(POP3, ClientHelper):
"""This class extends L{poplib.POP3} with TLS support."""
def __init__(self, host, port = POP3_TLS_PORT,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings=None):
"""Create a new POP3_TLS.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The caller should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type host: str
@param host: Server to connect to.
@type port: int
@param port: Port to connect to.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.host = host
self.port = port
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
### New code below (all else copied from poplib)
ClientHelper.__init__(self,
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
self.sock = TLSConnection(self.sock)
self.sock.closeSocket = True
ClientHelper._handshake(self, self.sock)
###
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
| apache-2.0 | 909,638,754,900,657,800 | 37.492958 | 83 | 0.636114 | false |
TeMPO-Consulting/mediadrop | mediacore/model/settings.py | 1 | 4239 | # This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2013 MediaCore Inc., Felix Schwarz and other contributors.
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
"""
Settings Model
A very rudimentary settings implementation which is intended to store our
non-mission-critical options which can be edited via the admin UI.
.. todo:
Rather than fetch one option at a time, load all settings into an object
with attribute-style access.
"""
from sqlalchemy import Table, ForeignKey, Column
from sqlalchemy.exc import IntegrityError, ProgrammingError
from sqlalchemy.types import Unicode, UnicodeText, Integer, Boolean, Float
from sqlalchemy.orm import mapper, relation, backref, synonym, interfaces, validates
from urlparse import urlparse
from mediacore.model.meta import DBSession, metadata
from mediacore.plugin import events
settings = Table('settings', metadata,
Column('id', Integer, autoincrement=True, primary_key=True),
Column('key', Unicode(255), nullable=False, unique=True),
Column('value', UnicodeText),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
multisettings = Table('settings_multi', metadata,
Column('id', Integer, autoincrement=True, primary_key=True),
Column('key', Unicode(255), nullable=False),
Column('value', UnicodeText, nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
class Setting(object):
"""
A Single Setting
"""
query = DBSession.query_property()
def __init__(self, key=None, value=None):
self.key = key or None
self.value = value or None
def __repr__(self):
return '<Setting: %s = %r>' % (self.key, self.value)
def __unicode__(self):
return self.value
class MultiSetting(object):
"""
A MultiSetting
"""
query = DBSession.query_property()
def __init__(self, key=None, value=None):
self.key = key or None
self.value = value or None
def __repr__(self):
return '<MultiSetting: %s = %r>' % (self.key, self.value)
def __unicode__(self):
return self.value
mapper(Setting, settings, extension=events.MapperObserver(events.Setting))
mapper(MultiSetting, multisettings, extension=events.MapperObserver(events.MultiSetting))
def insert_settings(defaults):
"""Insert the given setting if they don't exist yet.
XXX: Does not include any support for MultiSetting. This approach
won't work for that. We'll need to use sqlalchemy-migrate.
:type defaults: list
:param defaults: Key and value pairs
:rtype: list
:returns: Any settings that have just been created.
"""
inserted = []
try:
settings_query = DBSession.query(Setting.key)\
.filter(Setting.key.in_([key for key, value in defaults]))
existing_settings = set(x[0] for x in settings_query)
except ProgrammingError:
# If we are running paster setup-app on a fresh database with a
# plugin which tries to use this function every time the
# Environment.loaded event fires, the settings table will not
# exist and this exception will be thrown, but its safe to ignore.
# The settings will be created the next time the event fires,
# which will likely be the first time the app server starts up.
return inserted
for key, value in defaults:
if key in existing_settings:
continue
transaction = DBSession.begin_nested()
try:
s = Setting(key, value)
DBSession.add(s)
transaction.commit()
inserted.append(s)
except IntegrityError:
transaction.rollback()
if inserted:
DBSession.commit()
return inserted
def fetch_and_create_multi_setting(key, value):
multisettings = MultiSetting.query\
.filter(MultiSetting.key==key)\
.all()
for ms in multisettings:
if ms.value == value:
return ms
ms = MultiSetting(key, value)
DBSession.add(ms)
return ms
| gpl-3.0 | 1,688,854,665,998,850,800 | 32.117188 | 89 | 0.673272 | false |
JeremyRubin/bitcoin | test/functional/p2p_segwit.py | 1 | 95731 | #!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
import math
import random
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxWitness,
MAX_BLOCK_BASE_SIZE,
MSG_BLOCK,
MSG_TX,
MSG_WITNESS_FLAG,
NODE_NETWORK,
NODE_WITNESS,
msg_no_witness_block,
msg_getdata,
msg_headers,
msg_inv,
msg_tx,
msg_block,
msg_no_witness_tx,
ser_uint256,
ser_vector,
sha256,
uint256_from_str,
FromHex,
)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
)
from test_framework.script import (
CScript,
CScriptNum,
CScriptOp,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_16,
OP_2DROP,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_ANYONECANPAY,
SIGHASH_NONE,
SIGHASH_SINGLE,
SegwitV0SignatureHash,
LegacySignatureHash,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
disconnect_nodes,
softfork_active,
hex_str_to_bytes,
assert_raises_rpc_error,
)
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
SEGWIT_HEIGHT = 120
class UTXO():
"""Used to keep track of anyone-can-spend outputs that we can use in the tests."""
def __init__(self, sha256, n, value):
self.sha256 = sha256
self.n = n
self.nValue = value
def get_p2pkh_script(pubkeyhash):
"""Get the script associated with a P2PKH."""
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
"""Add signature for a P2PK witness program."""
tx_hash = SegwitV0SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign_ecdsa(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
def get_virtual_size(witness_block):
"""Calculate the virtual size of a witness block.
Virtual size is base + witness/4."""
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize())
# the "+3" is so we round up
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize
def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
- Submit the transaction over the p2p interface
- use the getrawmempool rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_and_ping(msg_tx(tx) if with_witness else msg_no_witness_tx(tx))
assert_equal(tx.hash in node.getrawmempool(), accepted)
def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):
"""Send a block to the node and check that it's accepted
- Submit the block over the p2p interface
- use the getbestblockhash rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_and_ping(msg_block(block) if with_witness else msg_no_witness_block(block))
assert_equal(node.getbestblockhash() == block.hash, accepted)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.getdataset = set()
# Avoid sending out msg_getdata in the mininode thread as a reply to invs.
# They are not needed and would only lead to races because we send msg_getdata out in the test thread
def on_inv(self, message):
pass
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(MSG_TX, tx.sha256)]))
if success:
self.wait_for_getdata([tx.sha256], timeout)
else:
time.sleep(timeout)
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(MSG_BLOCK, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata([block.sha256])
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
["-acceptnonstdtxn=1", "-segwitheight={}".format(SEGWIT_HEIGHT), "[email protected]"],
["-acceptnonstdtxn=0", "-segwitheight={}".format(SEGWIT_HEIGHT)],
["-acceptnonstdtxn=1", "-segwitheight=-1"],
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
# Helper functions
def build_next_block(self, version=4):
"""Build a block on top of node0's tip."""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = version
block.rehash()
return block
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
"""Add list of transactions to block, adds witness commitment, then solves."""
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
def run_test(self):
# Setup the p2p connections
# self.test_node sets NODE_WITNESS|NODE_NETWORK
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
self.log.info("Starting tests before segwit activation")
self.segwit_active = False
self.test_non_witness_transaction()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
self.test_getblocktemplate_before_lockin()
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_standardness_v0()
self.log.info("Advancing to segwit activation")
self.advance_to_segwit_active()
# Segwit status 'active'
self.test_p2sh_witness()
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0()
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
self.test_upgrade_after_activation()
self.test_witness_sigops()
self.test_superfluous_witness()
# Individual tests
def subtest(func): # noqa: N805
"""Wraps the subtests for logging and state assertions."""
def func_wrapper(self, *args, **kwargs):
self.log.info("Subtest: {} (Segwit active = {})".format(func.__name__, self.segwit_active))
# Assert segwit status is as expected
assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)
func(self, *args, **kwargs)
# Each subtest should leave some utxos for the next subtest
assert self.utxo
self.sync_blocks()
# Assert segwit status is as expected at end of subtest
assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)
return func_wrapper
@subtest # type: ignore
def test_non_witness_transaction(self):
"""See if sending a regular transaction works, and create a utxo to use in later tests."""
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
block = self.build_next_block(version=1)
block.solve()
self.test_node.send_and_ping(msg_no_witness_block(block)) # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_no_witness_tx(tx).serialize(), msg_tx(tx).serialize())
self.test_node.send_and_ping(msg_tx(tx)) # make sure the block was processed
assert tx.hash in self.nodes[0].getrawmempool()
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000))
self.nodes[0].generate(1)
@subtest # type: ignore
def test_unnecessary_witness_before_segwit_activation(self):
"""Verify that blocks with witnesses are rejected before activation."""
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert tx.sha256 != tx.calc_sha256(with_witness=True)
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='unexpected-witness')
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_and_ping(msg_no_witness_block(block)) # make sure the block was processed
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@subtest # type: ignore
def test_block_relay(self):
"""Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG.
This is true regardless of segwit activation.
Also test that we don't ask for blocks from unupgraded peers."""
blocktype = 2 | MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block1, True)
block2 = self.build_next_block(version=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block2, True)
block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if not self.segwit_active:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height + 1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(), wit_block.serialize())
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert len(block.vtx[0].wit.vtxinwit) == 1
assert len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(), block.serialize())
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize()))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3 * len(block.serialize(False)) + len(block.serialize())
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(version=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [CBlockHeader(block4)]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert block4.sha256 not in self.old_node.getdataset
@subtest # type: ignore
def test_v0_outputs_arent_spendable(self):
"""Test that v0 outputs aren't spendable before segwit activation.
~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
backdated so that it applies to all blocks, going back to the genesis
block.
Consequently, version 0 witness outputs are never spendable without
witness, and so can't be spent before segwit activation (the point at which
blocks are permitted to contain witnesses)."""
# node2 doesn't need to be connected for this test.
# (If it's connected, node0 may propagate an invalid block to it over
# compact blocks and the nodes would have inconsistent tips.)
disconnect_nodes(self.nodes[0], 2)
# Create two outputs, a p2wsh and p2sh-p2wsh
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(script_pubkey)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
value = self.utxo[0].nValue // 3
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]
tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]
tx.vout.append(CTxOut(value, CScript([OP_TRUE])))
tx.rehash()
txid = tx.sha256
# Add it to a block
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# Verify that segwit isn't activated. A block serialized with witness
# should be rejected prior to activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# Now send the block without witness. It should be accepted
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
# Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled.
p2wsh_tx = CTransaction()
p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]
p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2wsh_tx.rehash()
p2sh_p2wsh_tx = CTransaction()
p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]
p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2sh_p2wsh_tx.rehash()
for tx in [p2wsh_tx, p2sh_p2wsh_tx]:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# When the block is serialized with a witness, the block will be rejected because witness
# data isn't allowed in blocks that don't commit to witness data.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# When the block is serialized without witness, validation fails because the transaction is
# invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction
# without a witness is invalid).
# Note: The reject reason for this failure could be
# 'block-validation-failed' (if script check threads > 1) or
# 'non-mandatory-script-verify-flag (Witness program was passed an
# empty witness)' (otherwise).
# TODO: support multiple acceptable reject reasons.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=False)
connect_nodes(self.nodes[0], 2)
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest # type: ignore
def test_getblocktemplate_before_lockin(self):
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules": ["segwit"]})
if node == self.nodes[2]:
# If this is a non-segwit node, we should not get a witness
# commitment.
assert 'default_witness_commitment' not in gbt_results
else:
# For segwit-aware nodes, check the witness
# commitment is correct.
assert 'default_witness_commitment' in gbt_results
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, script.hex())
# Clear out the mempool
self.nodes[0].generate(1)
self.sync_blocks()
@subtest # type: ignore
def test_witness_tx_relay_before_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert self.old_node.last_message["getdata"].inv[0].type == MSG_TX
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2, success=False)
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
@subtest # type: ignore
def test_standardness_v0(self):
"""Test V0 txout standardness.
V0 segwit outputs and inputs are always standard.
V0 segwit inputs may only be mined after activation, but not before."""
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)]
tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
# This is always accepted, since the mempool policy is to consider segwit as always active
# and thus allow segwit outputs
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
self.sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
if not self.segwit_active:
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
# in blocks and the tx is impossible to mine right now.
assert_equal(self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]), [{'txid': tx3.hash, 'allowed': True}])
# Create the same output as tx3, but by replacing tx
tx3_out = tx3.vout[0]
tx3 = tx
tx3.vout = [tx3_out]
tx3.rehash()
assert_equal(self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]), [{'txid': tx3.hash, 'allowed': True}])
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
@subtest # type: ignore
def advance_to_segwit_active(self):
"""Mine enough blocks to activate segwit."""
assert not softfork_active(self.nodes[0], 'segwit')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(SEGWIT_HEIGHT - height - 2)
assert not softfork_active(self.nodes[0], 'segwit')
self.nodes[0].generate(1)
assert softfork_active(self.nodes[0], 'segwit')
self.segwit_active = True
@subtest # type: ignore
def test_p2sh_witness(self):
"""Test P2SH wrapped witness programs."""
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# Verify mempool acceptance and block validity
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True)
self.sync_blocks()
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: non-mandatory-script-verify-flag (Witness program was passed an empty witness)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're after activation, then sending this with witnesses should be valid.
# This no longer works before activation, because SCRIPT_VERIFY_WITNESS
# is always set.
# TODO: rewrite this test to make clear that it only works after activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
@subtest # type: ignore
def test_witness_commitments(self):
"""Test witness commitments.
This test can only be run after segwit has activated."""
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert msg_block(block).serialize() != msg_no_witness_block(block).serialize()
# This empty block should be valid.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1]
# This should also be valid.
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert len(block_3.vtx[0].vout) == 4 # 3 OP_returns
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest # type: ignore
def test_block_malleability(self):
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)
assert get_virtual_size(block) > MAX_BLOCK_BASE_SIZE
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
assert_equal('bad-witness-nonce-size', self.nodes[0].submitblock(block.serialize().hex()))
assert self.nodes[0].getbestblockhash() != block.hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert get_virtual_size(block) < MAX_BLOCK_BASE_SIZE
assert_equal(None, self.nodes[0].submitblock(block.serialize().hex()))
assert self.nodes[0].getbestblockhash() == block.hash
# Now make sure that malleating the witness reserved value doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Changing the witness reserved value doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@subtest # type: ignore
def test_witness_block_size(self):
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert len(self.utxo) > 0
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
script_pubkey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert parent_tx.vout[0].nValue > 0
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert len(block.serialize()) > 2 * 1024 * 1024
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert get_virtual_size(block) == MAX_BLOCK_BASE_SIZE
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
@subtest # type: ignore
def test_submit_block(self):
"""Test that submitblock adds the nonce automatically when possible."""
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
assert_equal('bad-witness-merkle-match', self.nodes[0].submitblock(block.serialize().hex()))
assert self.nodes[0].getbestblockhash() != block.hash
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
assert_equal(None, self.nodes[0].submitblock(block.serialize().hex()))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
assert_equal('bad-txnmrklroot', self.nodes[0].submitblock(block_2.serialize().hex()))
# Tip should not advance!
assert self.nodes[0].getbestblockhash() != block_2.hash
@subtest # type: ignore
def test_extra_witness_data(self):
"""Test extra witness data in a transaction."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest # type: ignore
def test_max_witness_push_length(self):
"""Test that witness stack can only allow up to 520 byte pushes."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest # type: ignore
def test_max_witness_program_length(self):
"""Test that witness outputs greater than 10kB can't be spent."""
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 63 + [OP_TRUE])
assert len(long_witness_program) == MAX_PROGRAM_LENGTH + 1
long_witness_hash = sha256(long_witness_program)
long_script_pubkey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 62 + [OP_TRUE])
assert len(witness_program) == MAX_PROGRAM_LENGTH
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest # type: ignore
def test_witness_input_length(self):
"""Test that vin length must match vtxinwit length."""
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert tx.vout[0].nValue >= 0
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest # type: ignore
def test_tx_relay_after_segwit_activation(self):
"""Test transaction relay after segwit activation.
After segwit activates, verify that mempool:
- rejects transactions with unnecessary/extra witnesses
- accepts transactions with valid witnesses
and that witness transactions are relayed to non-upgraded peers."""
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a' * 400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(MSG_TX, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(MSG_TX, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
vsize = math.ceil(weight / 4)
assert_equal(raw_tx["vsize"], vsize)
assert_equal(raw_tx["weight"], weight)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], witness_program.hex())
assert vsize != raw_tx["size"]
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest # type: ignore
def test_segwit_versions(self):
"""Test validity of future segwit version transactions.
Future segwit versions are non-standard to spend, but valid in blocks.
Sending to future segwit versions is always allowed.
Can run this before and after segwit activation."""
NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16
if len(self.utxo) < NUM_SEGWIT_VERSIONS:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS
for i in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_SEGWIT_VERSIONS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
self.sync_blocks()
temp_utxo = []
tx = CTransaction()
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
# First try to spend to a future version segwit script_pubkey.
script_pubkey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue - 1000, script_pubkey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
self.sync_blocks()
assert len(self.nodes[0].getrawmempool()) == 0
# Finally, verify that version 0 -> version 1 transactions
# are standard
script_pubkey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# Gets accepted to both policy-enforcing nodes and others.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades")
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.sync_blocks()
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest # type: ignore
def test_premature_coinbase_witness_spend(self):
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
self.sync_blocks()
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=True)
self.sync_blocks()
@subtest # type: ignore
def test_uncompressed_pubkey(self):
"""Test uncompressed pubkey validity in segwit transactions.
Uncompressed pubkeys are no longer supported in default relay policy,
but (for now) are still valid in blocks."""
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = ECKey()
key.generate(False)
pubkey = key.get_pubkey().get_bytes()
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 1000, script_pkh))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_wsh = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitV0SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.rehash()
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(script_wsh)
script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
script_pubkey = get_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue - 1000, CScript([OP_TRUE])))
(sig_hash, err) = LegacySignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
@subtest # type: ignore
def test_signature_version_1(self):
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.sync_blocks()
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [0, SIGHASH_ANYONECANPAY]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Too-small input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try correct value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_SIGHASH_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
for i in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_SIGHASH_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert len(temp_utxos) > num_inputs
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitV0SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@subtest # type: ignore
def test_non_standard_witness_blinding(self):
"""Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction"""
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True)
self.nodes[0].generate(1)
self.sync_blocks()
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness
# doesn't blind a node to a transaction. Transactions
# rejected for having a witness shouldn't be added
# to the rejection cache.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
tx2.rehash()
# This will be rejected due to a policy check:
# No witness is allowed, since it is not a witness program but a p2sh program
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, 'bad-witness-nonstandard')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True)
self.nodes[0].generate(1)
self.sync_blocks()
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest # type: ignore
def test_non_standard_witness(self):
"""Test detection of non-standard P2WSH witness"""
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
self.sync_blocks()
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
@subtest # type: ignore
def test_upgrade_after_activation(self):
"""Test the behavior of starting up a segwit-aware node after the softfork has activated."""
self.restart_node(2, extra_args=["-segwitheight={}".format(SEGWIT_HEIGHT)])
connect_nodes(self.nodes[0], 2)
# We reconnect more than 100 blocks, give it plenty of time
self.sync_blocks(timeout=240)
# Make sure that this peer thinks segwit has activated.
assert softfork_active(self.nodes[2], 'segwit')
# Make sure this peer's blocks match those of node0.
height = self.nodes[2].getblockcount()
while height >= 0:
block_hash = self.nodes[2].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[2].getblock(block_hash))
height -= 1
@subtest # type: ignore
def test_witness_sigops(self):
"""Test sigop counting is correct inside witnesses."""
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
sigops_per_script = 20 * 5 + 193 * 1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert extra_sigops_available < 100 # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
script_pubkey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
script_pubkey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)
tx2.vout.append(CTxOut(0, script_pubkey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
self.sync_blocks()
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_superfluous_witness(self):
# Serialization of tx that puts witness flag to 3 always
def serialize_with_bogus_witness(tx):
flags = 3
r = b""
r += struct.pack("<i", tx.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(tx.vin)
r += ser_vector(tx.vout)
if flags & 1:
if (len(tx.wit.vtxinwit) != len(tx.vin)):
# vtxinwit must have the same length as vin
tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)]
for i in range(len(tx.wit.vtxinwit), len(tx.vin)):
tx.wit.vtxinwit.append(CTxInWitness())
r += tx.wit.serialize()
r += struct.pack("<I", tx.nLockTime)
return r
class msg_bogus_tx(msg_tx):
def serialize(self):
return serialize_with_bogus_witness(self.tx)
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(address_type='bech32'), 5)
self.nodes[0].generate(1)
unspent = next(u for u in self.nodes[0].listunspent() if u['spendable'] and u['address'].startswith('bcrt'))
raw = self.nodes[0].createrawtransaction([{"txid": unspent['txid'], "vout": unspent['vout']}], {self.nodes[0].getnewaddress(): 1})
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Superfluous witness record']):
self.nodes[0].p2p.send_and_ping(msg_bogus_tx(tx))
raw = self.nodes[0].signrawtransactionwithwallet(raw)
assert raw['complete']
raw = raw['hex']
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Unknown transaction optional data']):
self.nodes[0].p2p.send_and_ping(msg_bogus_tx(tx))
if __name__ == '__main__':
SegWitTest().main()
| mit | 9,174,906,642,596,410,000 | 45.381298 | 187 | 0.630705 | false |
DMOJ/judge | dmoj/tests/test_problem.py | 1 | 4108 | import os
import unittest
from unittest import mock
from dmoj.config import InvalidInitException
from dmoj.problem import Problem, ProblemDataManager
class ProblemTest(unittest.TestCase):
def setUp(self):
self.data_patch = mock.patch('dmoj.problem.ProblemDataManager')
data_mock = self.data_patch.start()
data_mock.side_effect = lambda problem: self.problem_data
def test_test_case_matching(self):
class MockProblem(Problem):
def _resolve_archive_files(self):
return None
def _problem_file_list(self):
# fmt: off
return [
's2.1-1.in', 's2.1-1.out',
's2.1.2.in', 's2.1.2.out',
's3.4.in', 's3.4.out',
'5.in', '5.OUT',
'6-1.in', '6-1.OUT',
'6.2.in', '6.2.OUT',
'foo/a.b.c.6.3.in', 'foo/a.b.c.6.3.OUT',
'bar.in.7', 'bar.out.7',
'INPUT8.txt', 'OUTPUT8.txt',
'.DS_Store',
]
# fmt: on
with mock.patch('dmoj.problem.get_problem_root') as gpr:
gpr.return_value = '/proc'
self.problem_data = ProblemDataManager(None)
self.problem_data.update({'init.yml': 'archive: foo.zip'})
problem = MockProblem('test', 2, 16384, {})
self.assertEqual(
problem.config.test_cases.unwrap(),
[
{
'batched': [{'in': 's2.1-1.in', 'out': 's2.1-1.out'}, {'in': 's2.1.2.in', 'out': 's2.1.2.out'}],
'points': 1,
},
{'in': 's3.4.in', 'out': 's3.4.out', 'points': 1},
{'in': '5.in', 'out': '5.OUT', 'points': 1},
{
'batched': [
{'in': '6-1.in', 'out': '6-1.OUT'},
{'in': '6.2.in', 'out': '6.2.OUT'},
{'in': 'foo/a.b.c.6.3.in', 'out': 'foo/a.b.c.6.3.OUT'},
],
'points': 1,
},
{'in': 'bar.in.7', 'out': 'bar.out.7', 'points': 1},
{'in': 'INPUT8.txt', 'out': 'OUTPUT8.txt', 'points': 1},
],
)
def test_no_init(self):
self.problem_data = {}
with mock.patch('dmoj.problem.get_problem_root') as gpr:
gpr.return_value = '/proc'
with self.assertRaises(InvalidInitException):
Problem('test', 2, 16384, {})
def test_empty_init(self):
self.problem_data = {'init.yml': ''}
with mock.patch('dmoj.problem.get_problem_root') as gpr:
gpr.return_value = '/proc'
with self.assertRaisesRegex(InvalidInitException, 'lack of content'):
Problem('test', 2, 16384, {})
def test_bad_init(self):
self.problem_data = {'init.yml': '"'}
with mock.patch('dmoj.problem.get_problem_root') as gpr:
gpr.return_value = '/proc'
with self.assertRaisesRegex(InvalidInitException, 'while scanning a quoted scalar'):
Problem('test', 2, 16384, {})
def test_blank_init(self):
self.problem_data = {'init.yml': 'archive: does_not_exist.txt'}
with mock.patch('dmoj.problem.get_problem_root') as gpr:
gpr.return_value = '/proc'
with self.assertRaisesRegex(InvalidInitException, 'archive file'):
Problem('test', 2, 16384, {})
@unittest.skipIf(os.devnull != '/dev/null', 'os.path.exists("nul") is False on Windows')
def test_bad_archive(self):
self.problem_data = {'init.yml': 'archive: %s' % (os.devnull,)}
with mock.patch('dmoj.problem.get_problem_root') as gpr:
gpr.return_value = '/'
with self.assertRaisesRegex(InvalidInitException, 'bad archive:'):
Problem('test', 2, 16384, {})
def tearDown(self):
self.data_patch.stop()
| agpl-3.0 | 4,718,125,784,180,389,000 | 40.08 | 120 | 0.478822 | false |
valtech-mooc/edx-platform | common/djangoapps/student/tests/test_roles.py | 61 | 7708 | """
Tests of student.roles
"""
import ddt
from django.test import TestCase
from courseware.tests.factories import UserFactory, StaffFactory, InstructorFactory
from student.tests.factories import AnonymousUserFactory
from student.roles import (
GlobalStaff, CourseRole, CourseStaffRole, CourseInstructorRole,
OrgStaffRole, OrgInstructorRole, RoleCache, CourseBetaTesterRole
)
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class RolesTestCase(TestCase):
"""
Tests of student.roles
"""
def setUp(self):
self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
self.course_loc = self.course_key.make_usage_key('course', '2012_Fall')
self.anonymous_user = AnonymousUserFactory()
self.student = UserFactory()
self.global_staff = UserFactory(is_staff=True)
self.course_staff = StaffFactory(course_key=self.course_key)
self.course_instructor = InstructorFactory(course_key=self.course_key)
def test_global_staff(self):
self.assertFalse(GlobalStaff().has_user(self.student))
self.assertFalse(GlobalStaff().has_user(self.course_staff))
self.assertFalse(GlobalStaff().has_user(self.course_instructor))
self.assertTrue(GlobalStaff().has_user(self.global_staff))
def test_group_name_case_sensitive(self):
uppercase_course_id = "ORG/COURSE/NAME"
lowercase_course_id = uppercase_course_id.lower()
uppercase_course_key = SlashSeparatedCourseKey.from_deprecated_string(uppercase_course_id)
lowercase_course_key = SlashSeparatedCourseKey.from_deprecated_string(lowercase_course_id)
role = "role"
lowercase_user = UserFactory()
CourseRole(role, lowercase_course_key).add_users(lowercase_user)
uppercase_user = UserFactory()
CourseRole(role, uppercase_course_key).add_users(uppercase_user)
self.assertTrue(CourseRole(role, lowercase_course_key).has_user(lowercase_user))
self.assertFalse(CourseRole(role, uppercase_course_key).has_user(lowercase_user))
self.assertFalse(CourseRole(role, lowercase_course_key).has_user(uppercase_user))
self.assertTrue(CourseRole(role, uppercase_course_key).has_user(uppercase_user))
def test_course_role(self):
"""
Test that giving a user a course role enables access appropriately
"""
self.assertFalse(
CourseStaffRole(self.course_key).has_user(self.student),
"Student has premature access to {}".format(self.course_key)
)
CourseStaffRole(self.course_key).add_users(self.student)
self.assertTrue(
CourseStaffRole(self.course_key).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key))
)
# remove access and confirm
CourseStaffRole(self.course_key).remove_users(self.student)
self.assertFalse(
CourseStaffRole(self.course_key).has_user(self.student),
"Student still has access to {}".format(self.course_key)
)
def test_org_role(self):
"""
Test that giving a user an org role enables access appropriately
"""
self.assertFalse(
OrgStaffRole(self.course_key.org).has_user(self.student),
"Student has premature access to {}".format(self.course_key.org)
)
OrgStaffRole(self.course_key.org).add_users(self.student)
self.assertTrue(
OrgStaffRole(self.course_key.org).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key.org))
)
# remove access and confirm
OrgStaffRole(self.course_key.org).remove_users(self.student)
if hasattr(self.student, '_roles'):
del self.student._roles
self.assertFalse(
OrgStaffRole(self.course_key.org).has_user(self.student),
"Student still has access to {}".format(self.course_key.org)
)
def test_org_and_course_roles(self):
"""
Test that Org and course roles don't interfere with course roles or vice versa
"""
OrgInstructorRole(self.course_key.org).add_users(self.student)
CourseInstructorRole(self.course_key).add_users(self.student)
self.assertTrue(
OrgInstructorRole(self.course_key.org).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key.org))
)
self.assertTrue(
CourseInstructorRole(self.course_key).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key))
)
# remove access and confirm
OrgInstructorRole(self.course_key.org).remove_users(self.student)
self.assertFalse(
OrgInstructorRole(self.course_key.org).has_user(self.student),
"Student still has access to {}".format(self.course_key.org)
)
self.assertTrue(
CourseInstructorRole(self.course_key).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key))
)
# ok now keep org role and get rid of course one
OrgInstructorRole(self.course_key.org).add_users(self.student)
CourseInstructorRole(self.course_key).remove_users(self.student)
self.assertTrue(
OrgInstructorRole(self.course_key.org).has_user(self.student),
"Student lost has access to {}".format(self.course_key.org)
)
self.assertFalse(
CourseInstructorRole(self.course_key).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key))
)
def test_get_user_for_role(self):
"""
test users_for_role
"""
role = CourseStaffRole(self.course_key)
role.add_users(self.student)
self.assertGreater(len(role.users_with_role()), 0)
def test_add_users_doesnt_add_duplicate_entry(self):
"""
Tests that calling add_users multiple times before a single call
to remove_users does not result in the user remaining in the group.
"""
role = CourseStaffRole(self.course_key)
role.add_users(self.student)
self.assertTrue(role.has_user(self.student))
# Call add_users a second time, then remove just once.
role.add_users(self.student)
role.remove_users(self.student)
self.assertFalse(role.has_user(self.student))
@ddt.ddt
class RoleCacheTestCase(TestCase):
IN_KEY = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
NOT_IN_KEY = SlashSeparatedCourseKey('edX', 'toy', '2013_Fall')
ROLES = (
(CourseStaffRole(IN_KEY), ('staff', IN_KEY, 'edX')),
(CourseInstructorRole(IN_KEY), ('instructor', IN_KEY, 'edX')),
(OrgStaffRole(IN_KEY.org), ('staff', None, 'edX')),
(OrgInstructorRole(IN_KEY.org), ('instructor', None, 'edX')),
(CourseBetaTesterRole(IN_KEY), ('beta_testers', IN_KEY, 'edX')),
)
def setUp(self):
self.user = UserFactory()
@ddt.data(*ROLES)
@ddt.unpack
def test_only_in_role(self, role, target):
role.add_users(self.user)
cache = RoleCache(self.user)
self.assertTrue(cache.has_role(*target))
for other_role, other_target in self.ROLES:
if other_role == role:
continue
self.assertFalse(cache.has_role(*other_target))
@ddt.data(*ROLES)
@ddt.unpack
def test_empty_cache(self, role, target):
cache = RoleCache(self.user)
self.assertFalse(cache.has_role(*target))
| agpl-3.0 | -1,431,941,181,013,096,000 | 38.937824 | 98 | 0.649196 | false |
webmedic/booker | src/plugins/tags/__init__.py | 1 | 4970 | from PyQt4 import QtGui, QtCore
import sys, os
import models
from pluginmgr import ShelfView
# This plugin lists the books by tag
EBOOK_EXTENSIONS=['epub','mobi','pdf']
class Catalog(ShelfView):
title = "Books By Tag"
itemText = "Tags"
items = {}
def showList(self, search = None):
"""Get all books from the DB and show them"""
if not self.widget:
print("Call setWidget first")
return
self.operate = self.showList
self.items = {}
css = '''
::item {
padding: 0;
margin: 0;
height: 48;
}
'''
self.widget.title.setText(self.title)
# Setup widgetry
self.widget.stack.setCurrentIndex(0)
self.shelf = QtGui.QListWidget()
# Make it look right
self.shelf.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.shelf.setFrameShape(self.shelf.NoFrame)
self.shelf.setDragEnabled(False)
self.shelf.setSelectionMode(self.shelf.NoSelection)
self.shelf.setStyleSheet(css)
self.shelf.setIconSize(QtCore.QSize(48,48))
# Hook the shelf context menu
self.shelf.customContextMenuRequested.connect(self.shelfContextMenu)
# Hook book editor
self.shelf.itemActivated.connect(self.widget.on_books_itemActivated)
# Fill the shelf
if search:
tags = models.Tag.query.order_by("name").filter(models.Tag.name.like("%%%s%%"%search))
else:
tags = models.Tag.query.order_by("name").all()
for a in tags:
a_item = QtGui.QListWidgetItem(a.name, self.shelf)
for b in a.books:
icon = QtGui.QIcon(QtGui.QPixmap(b.cover()).scaledToHeight(128, QtCore.Qt.SmoothTransformation))
item = QtGui.QListWidgetItem(icon, b.title, self.shelf)
item.book = b
self.items[b.id] = item
self.widget.shelfStack.setWidget(self.shelf)
def showGrid(self, search = None):
"""Get all books from the DB and show them"""
if not self.widget:
print("Call setWidget first")
return
self.operate = self.showGrid
self.items = {}
self.widget.title.setText(self.title)
css = '''
::item {
padding: 0;
margin: 0;
width: 150px;
height: 150px;
}
'''
# Setup widgetry
self.widget.stack.setCurrentIndex(0)
self.shelves = QtGui.QWidget()
self.shelvesLayout = QtGui.QVBoxLayout()
self.shelves.setLayout(self.shelvesLayout)
if search:
tags = models.Tag.query.order_by("name").filter(models.Tag.name.like("%%%s%%"%search))
else:
tags = models.Tag.query.order_by("name").all()
for a in tags:
# Make a shelf
shelf_label = QtGui.QLabel(a.name)
shelf = QtGui.QListWidget()
self.shelvesLayout.addWidget(shelf_label)
self.shelvesLayout.addWidget(shelf)
# Make it look right
shelf.setStyleSheet(css)
shelf.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
shelf.setFrameShape(shelf.NoFrame)
shelf.setIconSize(QtCore.QSize(128,128))
shelf.setViewMode(shelf.IconMode)
shelf.setMinimumHeight(153)
shelf.setMaximumHeight(153)
shelf.setMinimumWidth(153*len(a.books))
shelf.setFlow(shelf.LeftToRight)
shelf.setWrapping(False)
shelf.setDragEnabled(False)
shelf.setSelectionMode(shelf.NoSelection)
# Hook the shelf context menu
shelf.customContextMenuRequested.connect(self.shelfContextMenu)
# Hook book editor
shelf.itemActivated.connect(self.widget.on_books_itemActivated)
# Fill the shelf
for b in a.books:
pixmap = QtGui.QPixmap(b.cover())
if pixmap.isNull():
pixmap = QtGui.QPixmap(b.default_cover())
icon = QtGui.QIcon(pixmap.scaledToHeight(128, QtCore.Qt.SmoothTransformation))
item = QtGui.QListWidgetItem(icon, b.title, shelf)
item.book = b
self.items[b.id] = item
self.shelvesLayout.addStretch(1)
self.widget.shelfStack.setWidget(self.shelves)
def updateBook(self, book):
# This may get called when no books
# have been loaded in this view, so make it cheap
if self.items and book.id in self.items:
item = self.items[book.id]
icon = QtGui.QIcon(QtGui.QPixmap(book.cover()).scaledToHeight(128, QtCore.Qt.SmoothTransformation))
item.setText(book.title)
item.setIcon(icon)
item.book = book
| mit | -8,237,720,128,516,522,000 | 34.248227 | 112 | 0.574044 | false |
duncanwp/iris | lib/iris/tests/unit/plot/test_points.py | 11 | 3049 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.plot.points` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.points(self.cube, coords=('bar', 'str_coord'))
self.assertBoundsTickLabels('yaxis')
def test_xaxis_labels(self):
iplt.points(self.cube, coords=('str_coord', 'bar'))
self.assertBoundsTickLabels('xaxis')
def test_xaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(0, 3)
iplt.points(self.cube, coords=('str_coord', 'bar'), axes=ax)
plt.close(fig)
self.assertPointsTickLabels('xaxis', ax)
def test_yaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylim(0, 3)
iplt.points(self.cube, coords=('bar', 'str_coord'), axes=ax)
plt.close(fig)
self.assertPointsTickLabels('yaxis', ax)
def test_geoaxes_exception(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
self.assertRaises(TypeError, iplt.points,
self.lat_lon_cube, axes=ax)
plt.close(fig)
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=False)
self.foo = self.cube.coord('foo').points
self.foo_index = np.arange(self.foo.size)
self.bar = self.cube.coord('bar').points
self.bar_index = np.arange(self.bar.size)
self.data = None
self.dataT = None
self.mpl_patch = self.patch('matplotlib.pyplot.scatter')
self.draw_func = iplt.points
if __name__ == "__main__":
tests.main()
| lgpl-3.0 | 9,134,854,747,925,333,000 | 33.647727 | 74 | 0.66776 | false |
zhangjunlei26/servo | python/mozlog/mozlog/structured/formatters/machformatter.py | 45 | 12405 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import time
from collections import defaultdict
try:
import blessings
except ImportError:
blessings = None
import base
def format_seconds(total):
"""Format number of seconds to MM:SS.DD form."""
minutes, seconds = divmod(total, 60)
return '%2d:%05.2f' % (minutes, seconds)
class NullTerminal(object):
def __getattr__(self, name):
return self._id
def _id(self, value):
return value
class MachFormatter(base.BaseFormatter):
def __init__(self, start_time=None, write_interval=False, write_times=True,
terminal=None, disable_colors=False):
if disable_colors:
terminal = None
elif terminal is None and blessings is not None:
terminal = blessings.Terminal()
if start_time is None:
start_time = time.time()
start_time = int(start_time * 1000)
self.start_time = start_time
self.write_interval = write_interval
self.write_times = write_times
self.status_buffer = {}
self.has_unexpected = {}
self.last_time = None
self.terminal = terminal
self.verbose = False
self._known_pids = set()
self.summary_values = {"tests": 0,
"subtests": 0,
"expected": 0,
"unexpected": defaultdict(int),
"skipped": 0}
self.summary_unexpected = []
def __call__(self, data):
s = base.BaseFormatter.__call__(self, data)
if s is None:
return
time = format_seconds(self._time(data))
action = data["action"].upper()
thread = data["thread"]
# Not using the NullTerminal here is a small optimisation to cut the number of
# function calls
if self.terminal is not None:
test = self._get_test_id(data)
time = self.terminal.blue(time)
color = None
if data["action"] == "test_end":
if "expected" not in data and not self.has_unexpected[test]:
color = self.terminal.green
else:
color = self.terminal.red
elif data["action"] in ("suite_start", "suite_end",
"test_start", "test_status"):
color = self.terminal.yellow
elif data["action"] == "crash":
color = self.terminal.red
if color is not None:
action = color(action)
return "%s %s: %s %s\n" % (time, action, thread, s)
def _get_test_id(self, data):
test_id = data.get("test")
if isinstance(test_id, list):
test_id = tuple(test_id)
return test_id
def _get_file_name(self, test_id):
if isinstance(test_id, (str, unicode)):
return test_id
if isinstance(test_id, tuple):
return "".join(test_id)
assert False, "unexpected test_id"
def suite_start(self, data):
self.summary_values = {"tests": 0,
"subtests": 0,
"expected": 0,
"unexpected": defaultdict(int),
"skipped": 0}
self.summary_unexpected = []
return "%i" % len(data["tests"])
def suite_end(self, data):
term = self.terminal if self.terminal is not None else NullTerminal()
heading = "Summary"
rv = ["", heading, "=" * len(heading), ""]
has_subtests = self.summary_values["subtests"] > 0
if has_subtests:
rv.append("Ran %i tests (%i parents, %i subtests)" %
(self.summary_values["tests"] + self.summary_values["subtests"],
self.summary_values["tests"],
self.summary_values["subtests"]))
else:
rv.append("Ran %i tests" % self.summary_values["tests"])
rv.append("Expected results: %i" % self.summary_values["expected"])
unexpected_count = sum(self.summary_values["unexpected"].values())
if unexpected_count > 0:
unexpected_str = " (%s)" % ", ".join("%s: %i" % (key, value) for key, value in
sorted(self.summary_values["unexpected"].items()))
else:
unexpected_str = ""
rv.append("Unexpected results: %i%s" % (unexpected_count, unexpected_str))
if self.summary_values["skipped"] > 0:
rv.append("Skipped: %i" % self.summary_values["skipped"])
rv.append("")
if not self.summary_values["unexpected"]:
rv.append(term.green("OK"))
else:
heading = "Unexpected Results"
rv.extend([heading, "=" * len(heading), ""])
if has_subtests:
for test_id, results in self.summary_unexpected:
test = self._get_file_name(test_id)
rv.extend([test, "-" * len(test)])
for name, status, expected, message in results:
if name is None:
name = "[Parent]"
rv.append("%s %s" % (self.format_expected(status, expected), name))
else:
for test_id, results in self.summary_unexpected:
test = self._get_file_name(test_id)
assert len(results) == 1
name, status, expected, messge = results[0]
assert name is None
rv.append("%s %s" % (self.format_expected(status, expected), test))
return "\n".join(rv)
def format_expected(self, status, expected):
term = self.terminal if self.terminal is not None else NullTerminal()
if status == "ERROR":
color = term.red
else:
color = term.yellow
if expected in ("PASS", "OK"):
return color(status)
return color("%s expected %s" % (status, expected))
def test_start(self, data):
self.summary_values["tests"] += 1
return "%s" % (self._get_test_id(data),)
def test_end(self, data):
subtests = self._get_subtest_data(data)
unexpected = subtests["unexpected"]
message = data.get("message", "")
if "stack" in data:
stack = data["stack"]
if stack and stack[-1] != "\n":
stack += "\n"
message = stack + message
if "expected" in data:
parent_unexpected = True
expected_str = ", expected %s" % data["expected"]
unexpected.append((None, data["status"], data["expected"],
message))
else:
parent_unexpected = False
expected_str = ""
test = self._get_test_id(data)
if unexpected:
self.summary_unexpected.append((test, unexpected))
self._update_summary(data)
#Reset the counts to 0
self.status_buffer[test] = {"count": 0, "unexpected": [], "pass": 0}
self.has_unexpected[test] = bool(unexpected)
if subtests["count"] != 0:
rv = "Harness %s%s. Subtests passed %i/%i. Unexpected %s" % (
data["status"], expected_str, subtests["pass"], subtests["count"],
len(unexpected))
else:
rv = "%s%s" % (data["status"], expected_str)
if unexpected:
rv += "\n"
if len(unexpected) == 1 and parent_unexpected:
rv += "%s" % unexpected[0][-1]
else:
for name, status, expected, message in unexpected:
if name is None:
name = "[Parent]"
expected_str = "Expected %s, got %s" % (expected, status)
rv += "%s\n" % ("\n".join([name, "-" * len(name), expected_str, message]))
rv = rv[:-1]
return rv
def test_status(self, data):
self.summary_values["subtests"] += 1
test = self._get_test_id(data)
if test not in self.status_buffer:
self.status_buffer[test] = {"count": 0, "unexpected": [], "pass": 0}
self.status_buffer[test]["count"] += 1
message = data.get("message", "")
if "stack" in data:
if message:
message += "\n"
message += data["stack"]
if data["status"] == "PASS":
self.status_buffer[test]["pass"] += 1
self._update_summary(data)
rv = None
status, subtest = data["status"], data["subtest"]
unexpected = "expected" in data
if self.verbose:
if self.terminal is not None:
status = (self.terminal.red if unexpected else self.terminal.green)(status)
rv = " ".join([subtest, status, message])
elif unexpected:
# We only append an unexpected summary if it was not logged
# directly by verbose mode.
self.status_buffer[test]["unexpected"].append((subtest,
status,
data["expected"],
message))
return rv
def _update_summary(self, data):
if "expected" in data:
self.summary_values["unexpected"][data["status"]] += 1
elif data["status"] == "SKIP":
self.summary_values["skipped"] += 1
else:
self.summary_values["expected"] += 1
def process_output(self, data):
rv = []
if "command" in data and data["process"] not in self._known_pids:
self._known_pids.add(data["process"])
rv.append('(pid:%s) Full command: %s' % (data["process"], data["command"]))
rv.append('(pid:%s) "%s"' % (data["process"], data["data"]))
return "\n".join(rv)
def crash(self, data):
test = self._get_test_id(data)
if data.get("stackwalk_returncode", 0) != 0 and not data.get("stackwalk_stderr"):
success = True
else:
success = False
rv = ["pid:%s. Test:%s. Minidump anaylsed:%s. Signature:[%s]" %
(data.get("pid", None), test, success, data["signature"])]
if data.get("minidump_path"):
rv.append("Crash dump filename: %s" % data["minidump_path"])
if data.get("stackwalk_returncode", 0) != 0:
rv.append("minidump_stackwalk exited with return code %d" %
data["stackwalk_returncode"])
if data.get("stackwalk_stderr"):
rv.append("stderr from minidump_stackwalk:")
rv.append(data["stackwalk_stderr"])
elif data.get("stackwalk_stdout"):
rv.append(data["stackwalk_stdout"])
if data.get("stackwalk_errors"):
rv.extend(data.get("stackwalk_errors"))
rv = "\n".join(rv)
if not rv[-1] == "\n":
rv += "\n"
return rv
def log(self, data):
level = data.get("level").upper()
if self.terminal is not None:
if level in ("CRITICAL", "ERROR"):
level = self.terminal.red(level)
elif level == "WARNING":
level = self.terminal.yellow(level)
elif level == "INFO":
level = self.terminal.blue(level)
if data.get('component'):
rv = " ".join([data["component"], level, data["message"]])
else:
rv = "%s %s" % (level, data["message"])
if "stack" in data:
rv += "\n%s" % data["stack"]
return rv
def _get_subtest_data(self, data):
test = self._get_test_id(data)
return self.status_buffer.get(test, {"count": 0, "unexpected": [], "pass": 0})
def _time(self, data):
entry_time = data["time"]
if self.write_interval and self.last_time is not None:
t = entry_time - self.last_time
self.last_time = entry_time
else:
t = entry_time - self.start_time
return t / 1000.
| mpl-2.0 | 2,516,867,410,716,997,600 | 33.845506 | 96 | 0.508666 | false |
harisibrahimkv/django | django/db/backends/oracle/compiler.py | 25 | 2437 | from django.db import NotSupportedError
from django.db.models.sql import compiler
class SQLCompiler(compiler.SQLCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list
of parameters. This is overridden from the original Query class
to handle the additional SQL Oracle requires to emulate LIMIT
and OFFSET.
If 'with_limits' is False, any limit/offset information is not
included in the query.
"""
# The `do_offset` flag indicates whether we need to construct
# the SQL needed to use limit/offset with Oracle.
do_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark)
if not do_offset:
sql, params = super().as_sql(with_limits=False, with_col_aliases=with_col_aliases)
elif not self.connection.features.supports_select_for_update_with_limit and self.query.select_for_update:
raise NotSupportedError(
'LIMIT/OFFSET is not supported with select_for_update on this '
'database backend.'
)
else:
sql, params = super().as_sql(with_limits=False, with_col_aliases=True)
# Wrap the base query in an outer SELECT * with boundaries on
# the "_RN" column. This is the canonical way to emulate LIMIT
# and OFFSET on Oracle.
high_where = ''
if self.query.high_mark is not None:
high_where = 'WHERE ROWNUM <= %d' % (self.query.high_mark,)
if self.query.low_mark:
sql = (
'SELECT * FROM (SELECT "_SUB".*, ROWNUM AS "_RN" FROM (%s) '
'"_SUB" %s) WHERE "_RN" > %d' % (sql, high_where, self.query.low_mark)
)
else:
# Simplify the query to support subqueries if there's no offset.
sql = (
'SELECT * FROM (SELECT "_SUB".* FROM (%s) "_SUB" %s)' % (sql, high_where)
)
return sql, params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
| bsd-3-clause | 6,315,438,832,730,179,000 | 38.306452 | 113 | 0.608945 | false |
acrsteiner/three.js | utils/exporters/blender/addons/io_three/logger.py | 176 | 1423 | import os
import logging
import tempfile
from . import constants
LOG_FILE = None
LOGGER = None
LEVELS = {
constants.DEBUG: logging.DEBUG,
constants.INFO: logging.INFO,
constants.WARNING: logging.WARNING,
constants.ERROR: logging.ERROR,
constants.CRITICAL: logging.CRITICAL
}
def init(filename, level=constants.DEBUG):
"""Initialize the logger.
:param filename: base name of the log file
:param level: logging level (Default value = DEBUG)
"""
global LOG_FILE
LOG_FILE = os.path.join(tempfile.gettempdir(), filename)
with open(LOG_FILE, 'w'):
pass
global LOGGER
LOGGER = logging.getLogger('Three.Export')
LOGGER.setLevel(LEVELS[level])
if not LOGGER.handlers:
stream = logging.StreamHandler()
stream.setLevel(LEVELS[level])
format_ = '%(asctime)s - %(name)s - %(levelname)s: %(message)s'
formatter = logging.Formatter(format_)
stream.setFormatter(formatter)
file_handler = logging.FileHandler(LOG_FILE)
file_handler.setLevel(LEVELS[level])
file_handler.setFormatter(formatter)
LOGGER.addHandler(stream)
LOGGER.addHandler(file_handler)
def info(*args):
LOGGER.info(*args)
def debug(*args):
LOGGER.debug(*args)
def warning(*args):
LOGGER.warning(*args)
def error(*args):
LOGGER.error(*args)
def critical(*args):
LOGGER.critical(*args)
| mit | -8,727,811,980,096,798,000 | 20.892308 | 71 | 0.66409 | false |
timlinux/QGIS | tests/src/python/test_qgspointcloudattributebyramprenderer.py | 1 | 17855 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsPointCloudAttributeByRampRenderer
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '09/11/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
import qgis # NOQA
from qgis.core import (
QgsProviderRegistry,
QgsPointCloudLayer,
QgsPointCloudAttributeByRampRenderer,
QgsReadWriteContext,
QgsRenderContext,
QgsPointCloudRenderContext,
QgsVector3D,
QgsMultiRenderChecker,
QgsMapSettings,
QgsRectangle,
QgsUnitTypes,
QgsMapUnitScale,
QgsCoordinateReferenceSystem,
QgsDoubleRange,
QgsColorRampShader,
QgsStyle,
QgsLayerTreeLayer
)
from qgis.PyQt.QtCore import QDir, QSize, Qt
from qgis.PyQt.QtGui import QPainter
from qgis.PyQt.QtXml import QDomDocument
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
class TestQgsPointCloudAttributeByRampRenderer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.report = "<h1>Python QgsPointCloudAttributeByRampRenderer Tests</h1>\n"
@classmethod
def tearDownClass(cls):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(cls.report)
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testSetLayer(self):
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/norgb/ept.json', 'test', 'ept')
self.assertTrue(layer.isValid())
# test that a point cloud with no RGB attributes is automatically assigned the ramp renderer
self.assertIsInstance(layer.renderer(), QgsPointCloudAttributeByRampRenderer)
# check default range
self.assertAlmostEqual(layer.renderer().minimum(), -1.98, 6)
self.assertAlmostEqual(layer.renderer().maximum(), -1.92, 6)
def testBasic(self):
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('attr')
self.assertEqual(renderer.attribute(), 'attr')
renderer.setMinimum(5)
self.assertEqual(renderer.minimum(), 5)
renderer.setMaximum(15)
self.assertEqual(renderer.maximum(), 15)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(20, 30, ramp)
renderer.setColorRampShader(shader)
self.assertEqual(renderer.colorRampShader().minimumValue(), 20)
self.assertEqual(renderer.colorRampShader().maximumValue(), 30)
renderer.setMaximumScreenError(18)
renderer.setMaximumScreenErrorUnit(QgsUnitTypes.RenderInches)
renderer.setPointSize(13)
renderer.setPointSizeUnit(QgsUnitTypes.RenderPoints)
renderer.setPointSizeMapUnitScale(QgsMapUnitScale(1000, 2000))
rr = renderer.clone()
self.assertEqual(rr.maximumScreenError(), 18)
self.assertEqual(rr.maximumScreenErrorUnit(), QgsUnitTypes.RenderInches)
self.assertEqual(rr.pointSize(), 13)
self.assertEqual(rr.pointSizeUnit(), QgsUnitTypes.RenderPoints)
self.assertEqual(rr.pointSizeMapUnitScale().minScale, 1000)
self.assertEqual(rr.pointSizeMapUnitScale().maxScale, 2000)
self.assertEqual(rr.attribute(), 'attr')
self.assertEqual(rr.minimum(), 5)
self.assertEqual(rr.maximum(), 15)
self.assertEqual(rr.colorRampShader().minimumValue(), 20)
self.assertEqual(rr.colorRampShader().maximumValue(), 30)
self.assertEqual(rr.colorRampShader().sourceColorRamp().color1().name(),
renderer.colorRampShader().sourceColorRamp().color1().name())
self.assertEqual(rr.colorRampShader().sourceColorRamp().color2().name(),
renderer.colorRampShader().sourceColorRamp().color2().name())
doc = QDomDocument("testdoc")
elem = renderer.save(doc, QgsReadWriteContext())
r2 = QgsPointCloudAttributeByRampRenderer.create(elem, QgsReadWriteContext())
self.assertEqual(r2.maximumScreenError(), 18)
self.assertEqual(r2.maximumScreenErrorUnit(), QgsUnitTypes.RenderInches)
self.assertEqual(r2.pointSize(), 13)
self.assertEqual(r2.pointSizeUnit(), QgsUnitTypes.RenderPoints)
self.assertEqual(r2.pointSizeMapUnitScale().minScale, 1000)
self.assertEqual(r2.pointSizeMapUnitScale().maxScale, 2000)
self.assertEqual(r2.attribute(), 'attr')
self.assertEqual(r2.minimum(), 5)
self.assertEqual(r2.maximum(), 15)
self.assertEqual(r2.colorRampShader().minimumValue(), 20)
self.assertEqual(r2.colorRampShader().maximumValue(), 30)
self.assertEqual(r2.colorRampShader().sourceColorRamp().color1().name(),
renderer.colorRampShader().sourceColorRamp().color1().name())
self.assertEqual(r2.colorRampShader().sourceColorRamp().color2().name(),
renderer.colorRampShader().sourceColorRamp().color2().name())
def testUsedAttributes(self):
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('attr')
rc = QgsRenderContext()
prc = QgsPointCloudRenderContext(rc, QgsVector3D(), QgsVector3D())
self.assertEqual(renderer.usedAttributes(prc), {'attr'})
def testLegend(self):
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('Intensity')
renderer.setMinimum(200)
renderer.setMaximum(800)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(200, 800, ramp.clone())
shader.setClassificationMode(QgsColorRampShader.EqualInterval)
shader.classifyColorRamp(classes=4)
renderer.setColorRampShader(shader)
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/sunshine-coast/ept.json', 'test', 'ept')
layer_tree_layer = QgsLayerTreeLayer(layer)
nodes = renderer.createLegendNodes(layer_tree_layer)
self.assertEqual(len(nodes), 4)
self.assertEqual(nodes[0].data(Qt.DisplayRole), '200')
self.assertEqual(nodes[1].data(Qt.DisplayRole), '400')
self.assertEqual(nodes[2].data(Qt.DisplayRole), '600')
self.assertEqual(nodes[3].data(Qt.DisplayRole), '800')
shader = QgsColorRampShader(200, 600, ramp.clone())
shader.setClassificationMode(QgsColorRampShader.EqualInterval)
shader.classifyColorRamp(classes=2)
renderer.setColorRampShader(shader)
nodes = renderer.createLegendNodes(layer_tree_layer)
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].data(Qt.DisplayRole), '200')
self.assertEqual(nodes[1].data(Qt.DisplayRole), '600')
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testRender(self):
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/sunshine-coast/ept.json', 'test', 'ept')
self.assertTrue(layer.isValid())
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('Intensity')
renderer.setMinimum(200)
renderer.setMaximum(1000)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(200, 1000, ramp)
shader.classifyColorRamp()
renderer.setColorRampShader(shader)
layer.setRenderer(renderer)
layer.renderer().setPointSize(2)
layer.renderer().setPointSizeUnit(QgsUnitTypes.RenderMillimeters)
mapsettings = QgsMapSettings()
mapsettings.setOutputSize(QSize(400, 400))
mapsettings.setOutputDpi(96)
mapsettings.setDestinationCrs(layer.crs())
mapsettings.setExtent(QgsRectangle(498061, 7050991, 498069, 7050999))
mapsettings.setLayers([layer])
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('pointcloudrenderer')
renderchecker.setControlName('expected_ramp_render')
result = renderchecker.runTest('expected_ramp_render')
TestQgsPointCloudAttributeByRampRenderer.report += renderchecker.report()
self.assertTrue(result)
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testRenderX(self):
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/sunshine-coast/ept.json', 'test', 'ept')
self.assertTrue(layer.isValid())
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('X')
renderer.setMinimum(498062.00000)
renderer.setMaximum(498067.39000)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(498062.00000, 498067.39000, ramp)
shader.classifyColorRamp()
renderer.setColorRampShader(shader)
layer.setRenderer(renderer)
layer.renderer().setPointSize(2)
layer.renderer().setPointSizeUnit(QgsUnitTypes.RenderMillimeters)
mapsettings = QgsMapSettings()
mapsettings.setOutputSize(QSize(400, 400))
mapsettings.setOutputDpi(96)
mapsettings.setDestinationCrs(layer.crs())
mapsettings.setExtent(QgsRectangle(498061, 7050991, 498069, 7050999))
mapsettings.setLayers([layer])
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('pointcloudrenderer')
renderchecker.setControlName('expected_ramp_xrender')
result = renderchecker.runTest('expected_ramp_xrender')
TestQgsPointCloudAttributeByRampRenderer.report += renderchecker.report()
self.assertTrue(result)
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testRenderY(self):
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/sunshine-coast/ept.json', 'test', 'ept')
self.assertTrue(layer.isValid())
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('Y')
renderer.setMinimum(7050992.84000)
renderer.setMaximum(7050997.04000)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(7050992.84000, 7050997.04000, ramp)
shader.classifyColorRamp()
renderer.setColorRampShader(shader)
layer.setRenderer(renderer)
layer.renderer().setPointSize(2)
layer.renderer().setPointSizeUnit(QgsUnitTypes.RenderMillimeters)
mapsettings = QgsMapSettings()
mapsettings.setOutputSize(QSize(400, 400))
mapsettings.setOutputDpi(96)
mapsettings.setDestinationCrs(layer.crs())
mapsettings.setExtent(QgsRectangle(498061, 7050991, 498069, 7050999))
mapsettings.setLayers([layer])
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('pointcloudrenderer')
renderchecker.setControlName('expected_ramp_yrender')
result = renderchecker.runTest('expected_ramp_yrender')
TestQgsPointCloudAttributeByRampRenderer.report += renderchecker.report()
self.assertTrue(result)
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testRenderZ(self):
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/sunshine-coast/ept.json', 'test', 'ept')
self.assertTrue(layer.isValid())
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('Z')
renderer.setMinimum(74.34000)
renderer.setMaximum(75)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(74.34000, 75, ramp)
shader.classifyColorRamp()
renderer.setColorRampShader(shader)
layer.setRenderer(renderer)
layer.renderer().setPointSize(2)
layer.renderer().setPointSizeUnit(QgsUnitTypes.RenderMillimeters)
mapsettings = QgsMapSettings()
mapsettings.setOutputSize(QSize(400, 400))
mapsettings.setOutputDpi(96)
mapsettings.setDestinationCrs(layer.crs())
mapsettings.setExtent(QgsRectangle(498061, 7050991, 498069, 7050999))
mapsettings.setLayers([layer])
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('pointcloudrenderer')
renderchecker.setControlName('expected_ramp_zrender')
result = renderchecker.runTest('expected_ramp_zrender')
TestQgsPointCloudAttributeByRampRenderer.report += renderchecker.report()
self.assertTrue(result)
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testRenderCrsTransform(self):
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/sunshine-coast/ept.json', 'test', 'ept')
self.assertTrue(layer.isValid())
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('Intensity')
renderer.setMinimum(200)
renderer.setMaximum(1000)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(200, 1000, ramp)
shader.classifyColorRamp()
renderer.setColorRampShader(shader)
layer.setRenderer(renderer)
layer.renderer().setPointSize(2)
layer.renderer().setPointSizeUnit(QgsUnitTypes.RenderMillimeters)
mapsettings = QgsMapSettings()
mapsettings.setOutputSize(QSize(400, 400))
mapsettings.setOutputDpi(96)
mapsettings.setDestinationCrs(QgsCoordinateReferenceSystem('EPSG:4326'))
mapsettings.setExtent(QgsRectangle(152.980508492, -26.662023491, 152.980586020, -26.662071137))
mapsettings.setLayers([layer])
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('pointcloudrenderer')
renderchecker.setControlName('expected_ramp_render_crs_transform')
result = renderchecker.runTest('expected_ramp_render_crs_transform')
TestQgsPointCloudAttributeByRampRenderer.report += renderchecker.report()
self.assertTrue(result)
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testRenderPointSize(self):
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/sunshine-coast/ept.json', 'test', 'ept')
self.assertTrue(layer.isValid())
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('Intensity')
renderer.setMinimum(200)
renderer.setMaximum(1000)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(200, 1000, ramp)
shader.classifyColorRamp()
renderer.setColorRampShader(shader)
layer.setRenderer(renderer)
layer.renderer().setPointSize(.15)
layer.renderer().setPointSizeUnit(QgsUnitTypes.RenderMapUnits)
mapsettings = QgsMapSettings()
mapsettings.setOutputSize(QSize(400, 400))
mapsettings.setOutputDpi(96)
mapsettings.setDestinationCrs(layer.crs())
mapsettings.setExtent(QgsRectangle(498061, 7050991, 498069, 7050999))
mapsettings.setLayers([layer])
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('pointcloudrenderer')
renderchecker.setControlName('expected_ramp_pointsize')
result = renderchecker.runTest('expected_ramp_pointsize')
TestQgsPointCloudAttributeByRampRenderer.report += renderchecker.report()
self.assertTrue(result)
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testRenderZRange(self):
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/sunshine-coast/ept.json', 'test', 'ept')
self.assertTrue(layer.isValid())
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('Intensity')
renderer.setMinimum(200)
renderer.setMaximum(1000)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(200, 1000, ramp)
shader.classifyColorRamp()
renderer.setColorRampShader(shader)
layer.setRenderer(renderer)
layer.renderer().setPointSize(2)
layer.renderer().setPointSizeUnit(QgsUnitTypes.RenderMillimeters)
mapsettings = QgsMapSettings()
mapsettings.setOutputSize(QSize(400, 400))
mapsettings.setOutputDpi(96)
mapsettings.setDestinationCrs(layer.crs())
mapsettings.setExtent(QgsRectangle(498061, 7050991, 498069, 7050999))
mapsettings.setLayers([layer])
mapsettings.setZRange(QgsDoubleRange(74.7, 75))
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('pointcloudrenderer')
renderchecker.setControlName('expected_ramp_zfilter')
result = renderchecker.runTest('expected_ramp_zfilter')
TestQgsPointCloudAttributeByRampRenderer.report += renderchecker.report()
self.assertTrue(result)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 3,611,403,781,188,709,000 | 43.415423 | 115 | 0.7021 | false |
maartenq/ansible | lib/ansible/modules/network/eos/eos_vlan.py | 25 | 11418 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_vlan
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage VLANs on Arista EOS network devices
description:
- This module provides declarative management of VLANs
on Arista EOS network devices.
notes:
- Tested against EOS 4.15
options:
name:
description:
- Name of the VLAN.
vlan_id:
description:
- ID of the VLAN.
required: true
interfaces:
description:
- List of interfaces that should be associated to the VLAN. The name of interface is
case sensitive and should be in expanded format and not abbreviated.
associated_interfaces:
description:
- This is a intent option and checks the operational state of the for given vlan C(name)
for associated interfaces. The name of interface is case sensitive and should be in
expanded format and not abbreviated. If the value in the C(associated_interfaces)
does not match with the operational state of vlan interfaces on device it will result in failure.
version_added: "2.5"
delay:
description:
- Delay the play should wait to check for declarative intent params values.
default: 10
aggregate:
description: List of VLANs definitions.
purge:
description:
- Purge VLANs not defined in the I(aggregate) parameter.
default: no
state:
description:
- State of the VLAN configuration.
default: present
choices: ['present', 'absent', 'active', 'suspend']
extends_documentation_fragment: eos
"""
EXAMPLES = """
- name: Create vlan
eos_vlan:
vlan_id: 4000
name: vlan-4000
state: present
- name: Add interfaces to vlan
eos_vlan:
vlan_id: 4000
state: present
interfaces:
- Ethernet1
- Ethernet2
- name: Check if interfaces is assigned to vlan
eos_vlan:
vlan_id: 4000
associated_interfaces:
- Ethernet1
- Ethernet2
- name: Suspend vlan
eos_vlan:
vlan_id: 4000
state: suspend
- name: Unsuspend vlan
eos_vlan:
vlan_id: 4000
state: active
- name: Create aggregate of vlans
eos_vlan:
aggregate:
- vlan_id: 4000
- {vlan_id: 4001, name: vlan-4001}
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- vlan 20
- name test-vlan
"""
import re
import time
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.eos.eos import load_config, run_commands
from ansible.module_utils.network.eos.eos import eos_argument_spec, check_args
def search_obj_in_list(vlan_id, lst):
for o in lst:
if o['vlan_id'] == vlan_id:
return o
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
purge = module.params['purge']
for w in want:
vlan_id = w['vlan_id']
name = w['name']
state = w['state']
interfaces = w['interfaces']
obj_in_have = search_obj_in_list(vlan_id, have)
if state == 'absent':
if obj_in_have:
commands.append('no vlan %s' % w['vlan_id'])
elif state == 'present':
if not obj_in_have:
commands.append('vlan %s' % w['vlan_id'])
if w['name']:
commands.append('name %s' % w['name'])
if w['interfaces']:
for i in w['interfaces']:
commands.append('interface %s' % i)
commands.append('switchport access vlan %s' % w['vlan_id'])
else:
if w['name'] and w['name'] != obj_in_have['name']:
commands.append('vlan %s' % w['vlan_id'])
commands.append('name %s' % w['name'])
if w['interfaces']:
if not obj_in_have['interfaces']:
for i in w['interfaces']:
commands.append('vlan %s' % w['vlan_id'])
commands.append('interface %s' % i)
commands.append('switchport access vlan %s' % w['vlan_id'])
elif set(w['interfaces']) != obj_in_have['interfaces']:
missing_interfaces = list(set(w['interfaces']) - set(obj_in_have['interfaces']))
for i in missing_interfaces:
commands.append('vlan %s' % w['vlan_id'])
commands.append('interface %s' % i)
commands.append('switchport access vlan %s' % w['vlan_id'])
superfluous_interfaces = list(set(obj_in_have['interfaces']) - set(w['interfaces']))
for i in superfluous_interfaces:
commands.append('vlan %s' % w['vlan_id'])
commands.append('interface %s' % i)
commands.append('no switchport access vlan %s' % w['vlan_id'])
else:
if not obj_in_have:
commands.append('vlan %s' % w['vlan_id'])
if w['name']:
commands.append('name %s' % w['name'])
commands.append('state %s' % w['state'])
elif (w['name'] and obj_in_have['name'] != w['name']) or obj_in_have['state'] != w['state']:
commands.append('vlan %s' % w['vlan_id'])
if w['name']:
if obj_in_have['name'] != w['name']:
commands.append('name %s' % w['name'])
if obj_in_have['state'] != w['state']:
commands.append('state %s' % w['state'])
if purge:
for h in have:
obj_in_want = search_obj_in_list(h['vlan_id'], want)
if not obj_in_want and h['vlan_id'] != '1':
commands.append('no vlan %s' % h['vlan_id'])
return commands
def map_config_to_obj(module):
objs = []
vlans = run_commands(module, ['show vlan configured-ports | json'])
for vlan in vlans[0]['vlans']:
obj = {}
obj['vlan_id'] = vlan
obj['name'] = vlans[0]['vlans'][vlan]['name']
obj['state'] = vlans[0]['vlans'][vlan]['status']
obj['interfaces'] = []
interfaces = vlans[0]['vlans'][vlan]
for interface in interfaces['interfaces']:
obj['interfaces'].append(interface)
if obj['state'] == 'suspended':
obj['state'] = 'suspend'
objs.append(obj)
return objs
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
if item.get('interfaces'):
item['interfaces'] = [intf.replace(" ", "") for intf in item.get('interfaces') if intf]
if item.get('associated_interfaces'):
item['associated_interfaces'] = [intf.replace(" ", "") for intf in item.get('associated_interfaces') if intf]
d = item.copy()
d['vlan_id'] = str(d['vlan_id'])
obj.append(d)
else:
obj.append({
'vlan_id': str(module.params['vlan_id']),
'name': module.params['name'],
'state': module.params['state'],
'interfaces': [intf.replace(" ", "") for intf in module.params['interfaces']] if module.params['interfaces'] else [],
'associated_interfaces': [intf.replace(" ", "") for intf in
module.params['associated_interfaces']] if module.params['associated_interfaces'] else []
})
return obj
def check_declarative_intent_params(want, module, result):
have = None
is_delay = False
for w in want:
if w.get('associated_interfaces') is None:
continue
if result['changed'] and not is_delay:
time.sleep(module.params['delay'])
is_delay = True
if have is None:
have = map_config_to_obj(module)
for i in w['associated_interfaces']:
obj_in_have = search_obj_in_list(w['vlan_id'], have)
if obj_in_have and 'interfaces' in obj_in_have and i not in obj_in_have['interfaces']:
module.fail_json(msg="Interface %s not configured on vlan %s" % (i, w['vlan_id']))
def main():
""" main entry point for module execution
"""
element_spec = dict(
vlan_id=dict(type='int'),
name=dict(),
interfaces=dict(type='list'),
associated_interfaces=dict(type='list'),
delay=dict(default=10, type='int'),
state=dict(default='present',
choices=['present', 'absent', 'active', 'suspend'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['vlan_id'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
argument_spec.update(eos_argument_spec)
required_one_of = [['vlan_id', 'aggregate']]
mutually_exclusive = [['vlan_id', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
check_declarative_intent_params(want, module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 5,958,455,012,172,262,000 | 31.16338 | 129 | 0.572429 | false |
mattcongy/itshop | docker-images/taigav2/taiga-back/taiga/mdrender/templatetags/functions.py | 2 | 1172 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2016 Jesús Espino <[email protected]>
# Copyright (C) 2014-2016 David Barragán <[email protected]>
# Copyright (C) 2014-2016 Alejandro Alonso <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django_jinja import library
from jinja2 import Markup
from taiga.mdrender.service import render
@library.global_function
def mdrender(project, text) -> str:
if text:
return Markup(render(project, text))
return ""
| mit | 6,781,456,459,550,168,000 | 40.785714 | 74 | 0.752137 | false |
bop/hybrid | lib/python2.6/site-packages/django/contrib/admin/util.py | 101 | 15254 | from __future__ import unicode_literals
import datetime
import decimal
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.related import RelatedObject
from django.forms.forms import pretty_name
from django.utils import formats
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils import six
from django.utils.translation import ungettext
from django.core.urlresolvers import reverse
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
field_name = lookup_path.split('__', 1)[0]
field = opts.get_field_by_name(field_name)[0]
if ((hasattr(field, 'rel') and
isinstance(field.rel, models.ManyToManyRel)) or
(isinstance(field, models.related.RelatedObject) and
not field.field.unique)):
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and false
if key.endswith('__isnull'):
if value.lower() in ('', 'false'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
for field in opts['fields']:
# type checking feels dirty, but it seems like the best way here
if type(field) == tuple:
field_names.extend(field)
else:
field_names.append(field)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogenous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
if has_admin:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.object_name.lower()),
None, (quote(obj._get_pk_val()),))
p = '%s.%s' % (opts.app_label,
opts.get_delete_permission())
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{0}: <a href="{1}">{2}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source_attr=None, **kwargs):
for obj in objs:
if source_attr:
self.add_edge(getattr(obj, source_attr), obj)
else:
self.add_edge(None, obj)
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = opts.get_field(name)
except models.FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and hasattr(model_admin, name) and
not name == '__str__' and not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable or the
name of an object attributes, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = model._meta.get_field_by_name(name)[0]
if isinstance(field, RelatedObject):
label = field.opts.verbose_name
else:
label = field.verbose_name
except models.FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
try:
help_text = model._meta.get_field_by_name(name)[0].help_text
except models.FieldDoesNotExist:
help_text = ""
return smart_text(help_text)
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
else:
return smart_text(value)
def display_for_value(value, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if boolean:
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if isinstance(field, models.related.RelatedObject):
return field.model
elif getattr(field, 'rel'): # or isinstance?
return field.rel.to
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field, model, direct, m2m = parent._meta.get_field_by_name(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces)-1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
if direct:
related_name = field.related_query_name()
parent = field.rel.to
else:
related_name = field.field.name
parent = field.model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field_by_name(piece)[0])
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
def get_limit_choices_to_from_path(model, path):
""" Return Q object for limiting choices if applicable.
If final model in path is linked via a ForeignKey or ManyToManyField which
has a `limit_choices_to` attribute, return it as a Q object.
"""
fields = get_fields_from_path(model, path)
fields = remove_trailing_data_field(fields)
limit_choices_to = (
fields and hasattr(fields[-1], 'rel') and
getattr(fields[-1].rel, 'limit_choices_to', None))
if not limit_choices_to:
return models.Q() # empty Q
elif isinstance(limit_choices_to, models.Q):
return limit_choices_to # already a Q
else:
return models.Q(**limit_choices_to) # convert dict to Q
| gpl-2.0 | 5,461,938,227,624,950,000 | 32.525275 | 94 | 0.597876 | false |
gechr/ansible-modules-extras | network/dnsimple.py | 49 | 11750 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: dnsimple
version_added: "1.6"
short_description: Interface with dnsimple.com (a DNS hosting service).
description:
- "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)"
options:
account_email:
description:
- "Account email. If omitted, the env variables DNSIMPLE_EMAIL and DNSIMPLE_API_TOKEN will be looked for. If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)"
required: false
default: null
account_api_token:
description:
- Account API token. See I(account_email) for info.
required: false
default: null
domain:
description:
- Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. If omitted, a list of domains will be returned.
- If domain is present but the domain doesn't exist, it will be created.
required: false
default: null
record:
description:
- Record to add, if blank a record for the domain will be created, supports the wildcard (*)
required: false
default: null
record_ids:
description:
- List of records to ensure they either exist or don't exist
required: false
default: null
type:
description:
- The type of DNS record to create
required: false
choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ]
default: null
ttl:
description:
- The TTL to give the new record
required: false
default: 3600 (one hour)
value:
description:
- Record value
- "Must be specified when trying to ensure a record exists"
required: false
default: null
priority:
description:
- Record priority
required: false
default: null
state:
description:
- whether the record should exist or not
required: false
choices: [ 'present', 'absent' ]
default: null
solo:
description:
- Whether the record should be the only one for that record type and record name. Only use with state=present on a record
required: false
default: null
requirements: [ dnsimple ]
author: "Alex Coomans (@drcapulet)"
'''
EXAMPLES = '''
# authenticate using email and API token
- local_action: dnsimple [email protected] account_api_token=dummyapitoken
# fetch all domains
- local_action dnsimple
register: domains
# fetch my.com domain records
- local_action: dnsimple domain=my.com state=present
register: records
# delete a domain
- local_action: dnsimple domain=my.com state=absent
# create a test.my.com A record to point to 127.0.0.01
- local_action: dnsimple domain=my.com record=test type=A value=127.0.0.1
register: record
# and then delete it
- local_action: dnsimple domain=my.com record_ids={{ record['id'] }}
# create a my.com CNAME record to example.com
- local_action: dnsimple domain=my.com record= type=CNAME value=example.com state=present
# change it's ttl
- local_action: dnsimple domain=my.com record= type=CNAME value=example.com ttl=600 state=present
# and delete the record
- local_action: dnsimpledomain=my.com record= type=CNAME value=example.com state=absent
'''
import os
try:
from dnsimple import DNSimple
from dnsimple.dnsimple import DNSimpleException
HAS_DNSIMPLE = True
except ImportError:
HAS_DNSIMPLE = False
def main():
module = AnsibleModule(
argument_spec = dict(
account_email = dict(required=False),
account_api_token = dict(required=False, no_log=True),
domain = dict(required=False),
record = dict(required=False),
record_ids = dict(required=False, type='list'),
type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']),
ttl = dict(required=False, default=3600, type='int'),
value = dict(required=False),
priority = dict(required=False, type='int'),
state = dict(required=False, choices=['present', 'absent']),
solo = dict(required=False, type='bool'),
),
required_together = (
['record', 'value']
),
supports_check_mode = True,
)
if not HAS_DNSIMPLE:
module.fail_json(msg="dnsimple required for this module")
account_email = module.params.get('account_email')
account_api_token = module.params.get('account_api_token')
domain = module.params.get('domain')
record = module.params.get('record')
record_ids = module.params.get('record_ids')
record_type = module.params.get('type')
ttl = module.params.get('ttl')
value = module.params.get('value')
priority = module.params.get('priority')
state = module.params.get('state')
is_solo = module.params.get('solo')
if account_email and account_api_token:
client = DNSimple(email=account_email, api_token=account_api_token)
elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'):
client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN'))
else:
client = DNSimple()
try:
# Let's figure out what operation we want to do
# No domain, return a list
if not domain:
domains = client.domains()
module.exit_json(changed=False, result=[d['domain'] for d in domains])
# Domain & No record
if domain and record is None and not record_ids:
domains = [d['domain'] for d in client.domains()]
if domain.isdigit():
dr = next((d for d in domains if d['id'] == int(domain)), None)
else:
dr = next((d for d in domains if d['name'] == domain), None)
if state == 'present':
if dr:
module.exit_json(changed=False, result=dr)
else:
if module.check_mode:
module.exit_json(changed=True)
else:
module.exit_json(changed=True, result=client.add_domain(domain)['domain'])
elif state == 'absent':
if dr:
if not module.check_mode:
client.delete(domain)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
else:
module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
# need the not none check since record could be an empty string
if domain and record is not None:
records = [r['record'] for r in client.records(str(domain))]
if not record_type:
module.fail_json(msg="Missing the record type")
if not value:
module.fail_json(msg="Missing the record value")
rr = next((r for r in records if r['name'] == record and r['record_type'] == record_type and r['content'] == value), None)
if state == 'present':
changed = False
if is_solo:
# delete any records that have the same name and record type
same_type = [r['id'] for r in records if r['name'] == record and r['record_type'] == record_type]
if rr:
same_type = [rid for rid in same_type if rid != rr['id']]
if same_type:
if not module.check_mode:
for rid in same_type:
client.delete_record(str(domain), rid)
changed = True
if rr:
# check if we need to update
if rr['ttl'] != ttl or rr['prio'] != priority:
data = {}
if ttl: data['ttl'] = ttl
if priority: data['prio'] = priority
if module.check_mode:
module.exit_json(changed=True)
else:
module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record'])
else:
module.exit_json(changed=changed, result=rr)
else:
# create it
data = {
'name': record,
'record_type': record_type,
'content': value,
}
if ttl: data['ttl'] = ttl
if priority: data['prio'] = priority
if module.check_mode:
module.exit_json(changed=True)
else:
module.exit_json(changed=True, result=client.add_record(str(domain), data)['record'])
elif state == 'absent':
if rr:
if not module.check_mode:
client.delete_record(str(domain), rr['id'])
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
else:
module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
# Make sure these record_ids either all exist or none
if domain and record_ids:
current_records = [str(r['record']['id']) for r in client.records(str(domain))]
wanted_records = [str(r) for r in record_ids]
if state == 'present':
difference = list(set(wanted_records) - set(current_records))
if difference:
module.fail_json(msg="Missing the following records: %s" % difference)
else:
module.exit_json(changed=False)
elif state == 'absent':
difference = list(set(wanted_records) & set(current_records))
if difference:
if not module.check_mode:
for rid in difference:
client.delete_record(str(domain), rid)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
else:
module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
except DNSimpleException, e:
module.fail_json(msg="Unable to contact DNSimple: %s" % e.message)
module.fail_json(msg="Unknown what you wanted me to do")
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | -3,614,467,743,824,539,000 | 37.52459 | 246 | 0.567574 | false |
rotofly/odoo | addons/crm_helpdesk/__init__.py | 442 | 1081 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_helpdesk
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,506,989,740,764,477,400 | 40.576923 | 78 | 0.617946 | false |
fernandezcuesta/ansible | lib/ansible/modules/cloud/cloudstack/cs_region.py | 18 | 5397 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_region
short_description: Manages regions on Apache CloudStack based clouds.
description:
- Add, update and remove regions.
version_added: "2.3"
author: "René Moser (@resmo)"
options:
id:
description:
- ID of the region.
- Must be an number (int).
required: true
name:
description:
- Name of the region.
- Required if C(state=present)
required: false
default: null
endpoint:
description:
- Endpoint URL of the region.
- Required if C(state=present)
required: false
default: null
state:
description:
- State of the region.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# create a region
local_action:
module: cs_region
id: 2
name: geneva
endpoint: https://cloud.gva.example.com
# remove a region with ID 2
local_action:
module: cs_region
id: 2
state: absent
'''
RETURN = '''
---
id:
description: ID of the region.
returned: success
type: int
sample: 1
name:
description: Name of the region.
returned: success
type: string
sample: local
endpoint:
description: Endpoint of the region.
returned: success
type: string
sample: http://cloud.example.com
gslb_service_enabled:
description: Whether the GSLB service is enabled or not.
returned: success
type: bool
sample: true
portable_ip_service_enabled:
description: Whether the portable IP service is enabled or not.
returned: success
type: bool
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackRegion(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackRegion, self).__init__(module)
self.returns = {
'endpoint': 'endpoint',
'gslbserviceenabled': 'gslb_service_enabled',
'portableipserviceenabled': 'portable_ip_service_enabled',
}
def get_region(self):
id = self.module.params.get('id')
regions = self.query_api('listRegions', id=id)
if regions:
return regions['region'][0]
return None
def present_region(self):
region = self.get_region()
if not region:
region = self._create_region(region=region)
else:
region = self._update_region(region=region)
return region
def _create_region(self, region):
self.result['changed'] = True
args = {
'id': self.module.params.get('id'),
'name': self.module.params.get('name'),
'endpoint': self.module.params.get('endpoint')
}
if not self.module.check_mode:
res = self.query_api('addRegion', **args)
region = res['region']
return region
def _update_region(self, region):
args = {
'id': self.module.params.get('id'),
'name': self.module.params.get('name'),
'endpoint': self.module.params.get('endpoint')
}
if self.has_changed(args, region):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateRegion', **args)
region = res['region']
return region
def absent_region(self):
region = self.get_region()
if region:
self.result['changed'] = True
if not self.module.check_mode:
self.query_api('removeRegion', id=region['id'])
return region
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
id=dict(required=True, type='int'),
name=dict(),
endpoint=dict(),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
required_if=[
('state', 'present', ['name', 'endpoint']),
],
supports_check_mode=True
)
acs_region = AnsibleCloudStackRegion(module)
state = module.params.get('state')
if state == 'absent':
region = acs_region.absent_region()
else:
region = acs_region.present_region()
result = acs_region.get_result(region)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -4,224,494,481,638,739,500 | 25.446078 | 70 | 0.620204 | false |
edum1978/eduengage | boilerplate/external/pytz/tzfile.py | 118 | 4340 | #!/usr/bin/env python
'''
$Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $
'''
from cStringIO import StringIO
from datetime import datetime, timedelta
from struct import unpack, calcsize
from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo
from pytz.tzinfo import memorized_datetime, memorized_timedelta
def build_tzinfo(zone, fp):
head_fmt = '>4s c 15x 6l'
head_size = calcsize(head_fmt)
(magic, format, ttisgmtcnt, ttisstdcnt,leapcnt, timecnt,
typecnt, charcnt) = unpack(head_fmt, fp.read(head_size))
# Make sure it is a tzfile(5) file
assert magic == 'TZif'
# Read out the transition times, localtime indices and ttinfo structures.
data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict(
timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt)
data_size = calcsize(data_fmt)
data = unpack(data_fmt, fp.read(data_size))
# make sure we unpacked the right number of values
assert len(data) == 2 * timecnt + 3 * typecnt + 1
transitions = [memorized_datetime(trans)
for trans in data[:timecnt]]
lindexes = list(data[timecnt:2 * timecnt])
ttinfo_raw = data[2 * timecnt:-1]
tznames_raw = data[-1]
del data
# Process ttinfo into separate structs
ttinfo = []
tznames = {}
i = 0
while i < len(ttinfo_raw):
# have we looked up this timezone name yet?
tzname_offset = ttinfo_raw[i+2]
if tzname_offset not in tznames:
nul = tznames_raw.find('\0', tzname_offset)
if nul < 0:
nul = len(tznames_raw)
tznames[tzname_offset] = tznames_raw[tzname_offset:nul]
ttinfo.append((ttinfo_raw[i],
bool(ttinfo_raw[i+1]),
tznames[tzname_offset]))
i += 3
# Now build the timezone object
if len(transitions) == 0:
ttinfo[0][0], ttinfo[0][2]
cls = type(zone, (StaticTzInfo,), dict(
zone=zone,
_utcoffset=memorized_timedelta(ttinfo[0][0]),
_tzname=ttinfo[0][2]))
else:
# Early dates use the first standard time ttinfo
i = 0
while ttinfo[i][1]:
i += 1
if ttinfo[i] == ttinfo[lindexes[0]]:
transitions[0] = datetime.min
else:
transitions.insert(0, datetime.min)
lindexes.insert(0, i)
# calculate transition info
transition_info = []
for i in range(len(transitions)):
inf = ttinfo[lindexes[i]]
utcoffset = inf[0]
if not inf[1]:
dst = 0
else:
for j in range(i-1, -1, -1):
prev_inf = ttinfo[lindexes[j]]
if not prev_inf[1]:
break
dst = inf[0] - prev_inf[0] # dst offset
if dst <= 0: # Bad dst? Look further.
for j in range(i+1, len(transitions)):
stdinf = ttinfo[lindexes[j]]
if not stdinf[1]:
dst = inf[0] - stdinf[0]
if dst > 0:
break # Found a useful std time.
tzname = inf[2]
# Round utcoffset and dst to the nearest minute or the
# datetime library will complain. Conversions to these timezones
# might be up to plus or minus 30 seconds out, but it is
# the best we can do.
utcoffset = int((utcoffset + 30) / 60) * 60
dst = int((dst + 30) / 60) * 60
transition_info.append(memorized_ttinfo(utcoffset, dst, tzname))
cls = type(zone, (DstTzInfo,), dict(
zone=zone,
_utc_transition_times=transitions,
_transition_info=transition_info))
return cls()
if __name__ == '__main__':
import os.path
from pprint import pprint
base = os.path.join(os.path.dirname(__file__), 'zoneinfo')
tz = build_tzinfo('Australia/Melbourne',
open(os.path.join(base,'Australia','Melbourne'), 'rb'))
tz = build_tzinfo('US/Eastern',
open(os.path.join(base,'US','Eastern'), 'rb'))
pprint(tz._utc_transition_times)
#print tz.asPython(4)
#print tz.transitions_mapping
| lgpl-3.0 | -3,298,285,419,007,529,000 | 34.57377 | 77 | 0.548157 | false |
Technorip/Myntra | Django Backend/myntra/env/lib/python2.7/site-packages/django/core/checks/compatibility/django_1_8_0.py | 286 | 1052 | from __future__ import unicode_literals
from django.conf import global_settings, settings
from .. import Tags, Warning, register
@register(Tags.compatibility)
def check_duplicate_template_settings(app_configs, **kwargs):
if settings.TEMPLATES:
values = [
'TEMPLATE_DIRS',
'ALLOWED_INCLUDE_ROOTS',
'TEMPLATE_CONTEXT_PROCESSORS',
'TEMPLATE_DEBUG',
'TEMPLATE_LOADERS',
'TEMPLATE_STRING_IF_INVALID',
]
duplicates = [
value for value in values
if getattr(settings, value) != getattr(global_settings, value)
]
if duplicates:
return [Warning(
"The standalone TEMPLATE_* settings were deprecated in Django "
"1.8 and the TEMPLATES dictionary takes precedence. You must "
"put the values of the following settings into your default "
"TEMPLATES dict: %s." % ", ".join(duplicates),
id='1_8.W001',
)]
return []
| gpl-2.0 | -6,169,457,255,836,149,000 | 32.935484 | 79 | 0.573194 | false |
gigglesninja/senior-design | MissionPlanner/Lib/email/charset.py | 180 | 16043 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: [email protected]
__all__ = [
'Charset',
'add_alias',
'add_charset',
'add_codec',
]
import codecs
import email.base64mime
import email.quoprimime
from email import errors
from email.encoders import encode_7or8bit
# Flags for types of header encodings
QP = 1 # Quoted-Printable
BASE64 = 2 # Base64
SHORTEST = 3 # the shorter of QP and base64, but only for headers
# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
MISC_LEN = 7
DEFAULT_CHARSET = 'us-ascii'
# Defaults
CHARSETS = {
# input header enc body enc output conv
'iso-8859-1': (QP, QP, None),
'iso-8859-2': (QP, QP, None),
'iso-8859-3': (QP, QP, None),
'iso-8859-4': (QP, QP, None),
# iso-8859-5 is Cyrillic, and not especially used
# iso-8859-6 is Arabic, also not particularly used
# iso-8859-7 is Greek, QP will not make it readable
# iso-8859-8 is Hebrew, QP will not make it readable
'iso-8859-9': (QP, QP, None),
'iso-8859-10': (QP, QP, None),
# iso-8859-11 is Thai, QP will not make it readable
'iso-8859-13': (QP, QP, None),
'iso-8859-14': (QP, QP, None),
'iso-8859-15': (QP, QP, None),
'iso-8859-16': (QP, QP, None),
'windows-1252':(QP, QP, None),
'viscii': (QP, QP, None),
'us-ascii': (None, None, None),
'big5': (BASE64, BASE64, None),
'gb2312': (BASE64, BASE64, None),
'euc-jp': (BASE64, None, 'iso-2022-jp'),
'shift_jis': (BASE64, None, 'iso-2022-jp'),
'iso-2022-jp': (BASE64, None, None),
'koi8-r': (BASE64, BASE64, None),
'utf-8': (SHORTEST, BASE64, 'utf-8'),
# We're making this one up to represent raw unencoded 8-bit
'8bit': (None, BASE64, 'utf-8'),
}
# Aliases for other commonly-used names for character sets. Map
# them to the real ones used in email.
ALIASES = {
'latin_1': 'iso-8859-1',
'latin-1': 'iso-8859-1',
'latin_2': 'iso-8859-2',
'latin-2': 'iso-8859-2',
'latin_3': 'iso-8859-3',
'latin-3': 'iso-8859-3',
'latin_4': 'iso-8859-4',
'latin-4': 'iso-8859-4',
'latin_5': 'iso-8859-9',
'latin-5': 'iso-8859-9',
'latin_6': 'iso-8859-10',
'latin-6': 'iso-8859-10',
'latin_7': 'iso-8859-13',
'latin-7': 'iso-8859-13',
'latin_8': 'iso-8859-14',
'latin-8': 'iso-8859-14',
'latin_9': 'iso-8859-15',
'latin-9': 'iso-8859-15',
'latin_10':'iso-8859-16',
'latin-10':'iso-8859-16',
'cp949': 'ks_c_5601-1987',
'euc_jp': 'euc-jp',
'euc_kr': 'euc-kr',
'ascii': 'us-ascii',
}
# Map charsets to their Unicode codec strings.
CODEC_MAP = {
'gb2312': 'eucgb2312_cn',
'big5': 'big5_tw',
# Hack: We don't want *any* conversion for stuff marked us-ascii, as all
# sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
# Let that stuff pass through without conversion to/from Unicode.
'us-ascii': None,
}
# Convenience functions for extending the above mappings
def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
"""Add character set properties to the global registry.
charset is the input character set, and must be the canonical name of a
character set.
Optional header_enc and body_enc is either Charset.QP for
quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
is only valid for header_enc. It describes how message headers and
message bodies in the input charset are to be encoded. Default is no
encoding.
Optional output_charset is the character set that the output should be
in. Conversions will proceed from input charset, to Unicode, to the
output charset when the method Charset.convert() is called. The default
is to output in the same character set as the input.
Both input_charset and output_charset must have Unicode codec entries in
the module's charset-to-codec mapping; use add_codec(charset, codecname)
to add codecs the module does not know about. See the codecs module's
documentation for more information.
"""
if body_enc == SHORTEST:
raise ValueError('SHORTEST not allowed for body_enc')
CHARSETS[charset] = (header_enc, body_enc, output_charset)
def add_alias(alias, canonical):
"""Add a character set alias.
alias is the alias name, e.g. latin-1
canonical is the character set's canonical name, e.g. iso-8859-1
"""
ALIASES[alias] = canonical
def add_codec(charset, codecname):
"""Add a codec that map characters in the given charset to/from Unicode.
charset is the canonical name of a character set. codecname is the name
of a Python codec, as appropriate for the second argument to the unicode()
built-in, or to the encode() method of a Unicode string.
"""
CODEC_MAP[charset] = codecname
class Charset:
"""Map character sets to their email properties.
This class provides information about the requirements imposed on email
for a specific character set. It also provides convenience routines for
converting between character sets, given the availability of the
applicable codecs. Given a character set, it will do its best to provide
information on how to use that character set in an email in an
RFC-compliant way.
Certain character sets must be encoded with quoted-printable or base64
when used in email headers or bodies. Certain character sets must be
converted outright, and are not allowed in email. Instances of this
module expose the following information about a character set:
input_charset: The initial character set specified. Common aliases
are converted to their `official' email names (e.g. latin_1
is converted to iso-8859-1). Defaults to 7-bit us-ascii.
header_encoding: If the character set must be encoded before it can be
used in an email header, this attribute will be set to
Charset.QP (for quoted-printable), Charset.BASE64 (for
base64 encoding), or Charset.SHORTEST for the shortest of
QP or BASE64 encoding. Otherwise, it will be None.
body_encoding: Same as header_encoding, but describes the encoding for the
mail message's body, which indeed may be different than the
header encoding. Charset.SHORTEST is not allowed for
body_encoding.
output_charset: Some character sets must be converted before the can be
used in email headers or bodies. If the input_charset is
one of them, this attribute will contain the name of the
charset output will be converted to. Otherwise, it will
be None.
input_codec: The name of the Python codec used to convert the
input_charset to Unicode. If no conversion codec is
necessary, this attribute will be None.
output_codec: The name of the Python codec used to convert Unicode
to the output_charset. If no conversion codec is necessary,
this attribute will have the same value as the input_codec.
"""
def __init__(self, input_charset=DEFAULT_CHARSET):
# RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to
# unicode because its .lower() is locale insensitive. If the argument
# is already a unicode, we leave it at that, but ensure that the
# charset is ASCII, as the standard (RFC XXX) requires.
try:
if isinstance(input_charset, unicode):
input_charset.encode('ascii')
else:
input_charset = unicode(input_charset, 'ascii')
except UnicodeError:
raise errors.CharsetError(input_charset)
input_charset = input_charset.lower().encode('ascii')
# Set the input charset after filtering through the aliases and/or codecs
if not (input_charset in ALIASES or input_charset in CHARSETS):
try:
input_charset = codecs.lookup(input_charset).name
except LookupError:
pass
self.input_charset = ALIASES.get(input_charset, input_charset)
# We can try to guess which encoding and conversion to use by the
# charset_map dictionary. Try that first, but let the user override
# it.
henc, benc, conv = CHARSETS.get(self.input_charset,
(SHORTEST, BASE64, None))
if not conv:
conv = self.input_charset
# Set the attributes, allowing the arguments to override the default.
self.header_encoding = henc
self.body_encoding = benc
self.output_charset = ALIASES.get(conv, conv)
# Now set the codecs. If one isn't defined for input_charset,
# guess and try a Unicode codec with the same name as input_codec.
self.input_codec = CODEC_MAP.get(self.input_charset,
self.input_charset)
self.output_codec = CODEC_MAP.get(self.output_charset,
self.output_charset)
def __str__(self):
return self.input_charset.lower()
__repr__ = __str__
def __eq__(self, other):
return str(self) == str(other).lower()
def __ne__(self, other):
return not self.__eq__(other)
def get_body_encoding(self):
"""Return the content-transfer-encoding used for body encoding.
This is either the string `quoted-printable' or `base64' depending on
the encoding used, or it is a function in which case you should call
the function with a single argument, the Message object being
encoded. The function should then set the Content-Transfer-Encoding
header itself to whatever is appropriate.
Returns "quoted-printable" if self.body_encoding is QP.
Returns "base64" if self.body_encoding is BASE64.
Returns "7bit" otherwise.
"""
assert self.body_encoding != SHORTEST
if self.body_encoding == QP:
return 'quoted-printable'
elif self.body_encoding == BASE64:
return 'base64'
else:
return encode_7or8bit
def convert(self, s):
"""Convert a string from the input_codec to the output_codec."""
if self.input_codec != self.output_codec:
return unicode(s, self.input_codec).encode(self.output_codec)
else:
return s
def to_splittable(self, s):
"""Convert a possibly multibyte string to a safely splittable format.
Uses the input_codec to try and convert the string to Unicode, so it
can be safely split on character boundaries (even for multibyte
characters).
Returns the string as-is if it isn't known how to convert it to
Unicode with the input_charset.
Characters that could not be converted to Unicode will be replaced
with the Unicode replacement character U+FFFD.
"""
if isinstance(s, unicode) or self.input_codec is None:
return s
try:
return unicode(s, self.input_codec, 'replace')
except LookupError:
# Input codec not installed on system, so return the original
# string unchanged.
return s
def from_splittable(self, ustr, to_output=True):
"""Convert a splittable string back into an encoded string.
Uses the proper codec to try and convert the string from Unicode back
into an encoded format. Return the string as-is if it is not Unicode,
or if it could not be converted from Unicode.
Characters that could not be converted from Unicode will be replaced
with an appropriate character (usually '?').
If to_output is True (the default), uses output_codec to convert to an
encoded format. If to_output is False, uses input_codec.
"""
if to_output:
codec = self.output_codec
else:
codec = self.input_codec
if not isinstance(ustr, unicode) or codec is None:
return ustr
try:
return ustr.encode(codec, 'replace')
except LookupError:
# Output codec not installed
return ustr
def get_output_charset(self):
"""Return the output character set.
This is self.output_charset if that is not None, otherwise it is
self.input_charset.
"""
return self.output_charset or self.input_charset
def encoded_header_len(self, s):
"""Return the length of the encoded header string."""
cset = self.get_output_charset()
# The len(s) of a 7bit encoding is len(s)
if self.header_encoding == BASE64:
return email.base64mime.base64_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == QP:
return email.quoprimime.header_quopri_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
return min(lenb64, lenqp) + len(cset) + MISC_LEN
else:
return len(s)
def header_encode(self, s, convert=False):
"""Header-encode a string, optionally converting it to output_charset.
If convert is True, the string will be converted from the input
charset to the output charset automatically. This is not useful for
multibyte character sets, which have line length issues (multibyte
characters must be split on a character, not a byte boundary); use the
high-level Header class to deal with these issues. convert defaults
to False.
The type of encoding (base64 or quoted-printable) will be based on
self.header_encoding.
"""
cset = self.get_output_charset()
if convert:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (modulo conversions)
if self.header_encoding == BASE64:
return email.base64mime.header_encode(s, cset)
elif self.header_encoding == QP:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
elif self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
if lenb64 < lenqp:
return email.base64mime.header_encode(s, cset)
else:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
else:
return s
def body_encode(self, s, convert=True):
"""Body-encode a string and convert it to output_charset.
If convert is True (the default), the string will be converted from
the input charset to output charset automatically. Unlike
header_encode(), there are no issues with byte boundaries and
multibyte charsets in email bodies, so this is usually pretty safe.
The type of encoding (base64 or quoted-printable) will be based on
self.body_encoding.
"""
if convert:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (module conversions)
if self.body_encoding is BASE64:
return email.base64mime.body_encode(s)
elif self.body_encoding is QP:
return email.quoprimime.body_encode(s)
else:
return s
| gpl-2.0 | 4,743,380,534,371,180,000 | 39.007481 | 81 | 0.620645 | false |
erkanay/django | django/http/request.py | 11 | 21075 | from __future__ import unicode_literals
import copy
import os
import re
import sys
from io import BytesIO
from itertools import chain
from pprint import pformat
from django.conf import settings
from django.core import signing
from django.core.exceptions import DisallowedHost, ImproperlyConfigured
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser, MultiPartParserError
from django.utils import six
from django.utils.datastructures import MultiValueDict, ImmutableList
from django.utils.encoding import force_bytes, force_text, force_str, iri_to_uri
from django.utils.six.moves.urllib.parse import parse_qsl, urlencode, quote, urljoin, urlsplit
RAISE_ERROR = object()
absolute_http_url_re = re.compile(r"^https?://", re.I)
host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9:]+\])(:\d+)?$")
class UnreadablePostError(IOError):
pass
class RawPostDataException(Exception):
"""
You cannot access raw_post_data from a request that has
multipart/* POST data if it has been accessed via POST,
FILES, etc..
"""
pass
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
# WARNING: The `WSGIRequest` subclass doesn't call `super`.
# Any variable assignment made here should also happen in
# `WSGIRequest.__init__()`.
self.GET = QueryDict(mutable=True)
self.POST = QueryDict(mutable=True)
self.COOKIES = {}
self.META = {}
self.FILES = MultiValueDict()
self.path = ''
self.path_info = ''
self.method = None
self.resolver_match = None
self._post_parse_error = False
def __repr__(self):
return build_request_repr(self)
def get_host(self):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = str(self.META['SERVER_PORT'])
if server_port != ('443' if self.is_secure() else '80'):
host = '%s:%s' % (host, server_port)
# There is no hostname validation when DEBUG=True
if settings.DEBUG:
return host
domain, port = split_domain_port(host)
if domain and validate_host(domain, settings.ALLOWED_HOSTS):
return host
else:
msg = "Invalid HTTP_HOST header: %r." % host
if domain:
msg += " You may need to add %r to ALLOWED_HOSTS." % domain
else:
msg += " The domain name provided is not valid according to RFC 1034/1035."
raise DisallowedHost(msg)
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (
self.path,
('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else ''
)
def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
"""
Attempts to return a signed cookie. If the signature fails or the
cookie has expired, raises an exception... unless you provide the
default argument in which case that value will be returned instead.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no ``location`` is specified, the absolute URI is
built on ``request.get_full_path()``. Anyway, if the location is
absolute, it is simply converted to an RFC 3987 compliant URI and
returned and if location is relative or is scheme-relative (i.e.,
``//example.com/``), it is urljoined to a base URL constructed from the
request variables.
"""
if location is None:
# Make it an absolute url (but schemeless and domainless) for the
# edge case that the path starts with '//'.
location = '//%s' % self.get_full_path()
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
current_uri = '{scheme}://{host}{path}'.format(scheme=self.scheme,
host=self.get_host(),
path=self.path)
# Join the constructed URL with the provided location, which will
# allow the provided ``location`` to apply query strings to the
# base path as well as override the host, if it begins with //
location = urljoin(current_uri, location)
return iri_to_uri(location)
def _get_scheme(self):
return 'https' if os.environ.get("HTTPS") == "on" else 'http'
@property
def scheme(self):
# First, check the SECURE_PROXY_SSL_HEADER setting.
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured(
'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.'
)
if self.META.get(header, None) == value:
return 'https'
# Failing that, fall back to _get_scheme(), which is a hook for
# subclasses to implement.
return self._get_scheme()
def is_secure(self):
return self.scheme == 'https'
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, '_get'):
del self._get
if hasattr(self, '_post'):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning="You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, '_body'):
if self._read_started:
raise RawPostDataException("You cannot access body after reading from request's data stream")
try:
self._body = self.read()
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
self._stream = BytesIO(self._body)
return self._body
def _mark_post_parse_error(self):
self._post = QueryDict('')
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
"""Populate self._post and self._files if the content-type is a form type"""
if self.method != 'POST':
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_body'):
self._mark_post_parse_error()
return
if self.META.get('CONTENT_TYPE', '').startswith('multipart/form-data'):
if hasattr(self, '_body'):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except MultiPartParserError:
# An error occurred while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occurred. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
elif self.META.get('CONTENT_TYPE', '').startswith('application/x-www-form-urlencoded'):
self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
else:
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
def close(self):
if hasattr(self, '_files'):
for f in chain.from_iterable(l[1] for l in self._files.lists()):
f.close()
# File-like and iterator interface.
#
# Expects self._stream to be set to an appropriate source of bytes by
# a corresponding request subclass (e.g. WSGIRequest).
# Also when request data has already been read by request.POST or
# request.body, self._stream points to a BytesIO instance
# containing that data.
def read(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.read(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict which represents a query string.
A QueryDict can be used to represent GET or POST data. It subclasses
MultiValueDict since keys in such data can be repeated, for instance
in the data from a form with a <select multiple> field.
By default QueryDicts are immutable, though the copy() method
will always return a mutable copy.
Both keys and values set on this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string=None, mutable=False, encoding=None):
super(QueryDict, self).__init__()
if not encoding:
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
if six.PY3:
if isinstance(query_string, bytes):
# query_string normally contains URL-encoded data, a subset of ASCII.
try:
query_string = query_string.decode(encoding)
except UnicodeDecodeError:
# ... but some user agents are misbehaving :-(
query_string = query_string.decode('iso-8859-1')
for key, value in parse_qsl(query_string or '',
keep_blank_values=True,
encoding=encoding):
self.appendlist(key, value)
else:
for key, value in parse_qsl(query_string or '',
keep_blank_values=True):
try:
value = value.decode(encoding)
except UnicodeDecodeError:
value = value.decode('iso-8859-1')
self.appendlist(force_text(key, encoding, errors='replace'),
value)
self._mutable = mutable
@property
def encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).__setitem__(key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in six.iterlists(self):
result.setlist(key, value)
return result
def __deepcopy__(self, memo):
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in six.iterlists(self):
result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
super(QueryDict, self).setlist(key, list_)
def setlistdefault(self, key, default_list=None):
self._assert_mutable()
return super(QueryDict, self).setlistdefault(key, default_list)
def appendlist(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).appendlist(key, value)
def pop(self, key, *args):
self._assert_mutable()
return super(QueryDict, self).pop(key, *args)
def popitem(self):
self._assert_mutable()
return super(QueryDict, self).popitem()
def clear(self):
self._assert_mutable()
super(QueryDict, self).clear()
def setdefault(self, key, default=None):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
default = bytes_to_text(default, self.encoding)
return super(QueryDict, self).setdefault(key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Returns an encoded string of all query string arguments.
:arg safe: Used to specify characters which do not require quoting, for
example::
>>> q = QueryDict('', mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
safe = force_bytes(safe, self.encoding)
encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
encode = lambda k, v: urlencode({k: v})
for k, list_ in self.lists():
k = force_bytes(k, self.encoding)
output.extend([encode(k, force_bytes(v, self.encoding))
for v in list_])
return '&'.join(output)
def build_request_repr(request, path_override=None, GET_override=None,
POST_override=None, COOKIES_override=None,
META_override=None):
"""
Builds and returns the request's representation string. The request's
attributes may be overridden by pre-processed values.
"""
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = (pformat(GET_override)
if GET_override is not None
else pformat(request.GET))
except Exception:
get = '<could not parse>'
if request._post_parse_error:
post = '<could not parse>'
else:
try:
post = (pformat(POST_override)
if POST_override is not None
else pformat(request.POST))
except Exception:
post = '<could not parse>'
try:
cookies = (pformat(COOKIES_override)
if COOKIES_override is not None
else pformat(request.COOKIES))
except Exception:
cookies = '<could not parse>'
try:
meta = (pformat(META_override)
if META_override is not None
else pformat(request.META))
except Exception:
meta = '<could not parse>'
path = path_override if path_override is not None else request.path
return force_str('<%s\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' %
(request.__class__.__name__,
path,
six.text_type(get),
six.text_type(post),
six.text_type(cookies),
six.text_type(meta)))
# It's neither necessary nor appropriate to use
# django.utils.encoding.smart_text for parsing URLs and form inputs. Thus,
# this slightly more restricted function, used by QueryDict.
def bytes_to_text(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, bytes):
return six.text_type(s, encoding, 'replace')
else:
return s
def split_domain_port(host):
"""
Return a (domain, port) tuple from a given host.
Returned domain is lower-cased. If the host is invalid, the domain will be
empty.
"""
host = host.lower()
if not host_validation_re.match(host):
return '', ''
if host[-1] == ']':
# It's an IPv6 address without a port.
return host, ''
bits = host.rsplit(':', 1)
if len(bits) == 2:
return tuple(bits)
return bits[0], ''
def validate_host(host, allowed_hosts):
"""
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lower-cased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
"""
host = host[:-1] if host.endswith('.') else host
for pattern in allowed_hosts:
pattern = pattern.lower()
match = (
pattern == '*' or
pattern.startswith('.') and (
host.endswith(pattern) or host == pattern[1:]
) or
pattern == host
)
if match:
return True
return False
| bsd-3-clause | 3,606,347,989,027,084,000 | 35.652174 | 110 | 0.585338 | false |
freakboy3742/django | tests/forms_tests/widget_tests/test_splitdatetimewidget.py | 84 | 2669 | from datetime import date, datetime, time
from django.forms import SplitDateTimeWidget
from .base import WidgetTest
class SplitDateTimeWidgetTest(WidgetTest):
widget = SplitDateTimeWidget()
def test_render_empty(self):
self.check_html(self.widget, 'date', '', html=(
'<input type="text" name="date_0"><input type="text" name="date_1">'
))
def test_render_none(self):
self.check_html(self.widget, 'date', None, html=(
'<input type="text" name="date_0"><input type="text" name="date_1">'
))
def test_render_datetime(self):
self.check_html(self.widget, 'date', datetime(2006, 1, 10, 7, 30), html=(
'<input type="text" name="date_0" value="2006-01-10">'
'<input type="text" name="date_1" value="07:30:00">'
))
def test_render_date_and_time(self):
self.check_html(self.widget, 'date', [date(2006, 1, 10), time(7, 30)], html=(
'<input type="text" name="date_0" value="2006-01-10">'
'<input type="text" name="date_1" value="07:30:00">'
))
def test_constructor_attrs(self):
widget = SplitDateTimeWidget(attrs={'class': 'pretty'})
self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=(
'<input type="text" class="pretty" value="2006-01-10" name="date_0">'
'<input type="text" class="pretty" value="07:30:00" name="date_1">'
))
def test_constructor_different_attrs(self):
html = (
'<input type="text" class="foo" value="2006-01-10" name="date_0">'
'<input type="text" class="bar" value="07:30:00" name="date_1">'
)
widget = SplitDateTimeWidget(date_attrs={'class': 'foo'}, time_attrs={'class': 'bar'})
self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=html)
widget = SplitDateTimeWidget(date_attrs={'class': 'foo'}, attrs={'class': 'bar'})
self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=html)
widget = SplitDateTimeWidget(time_attrs={'class': 'bar'}, attrs={'class': 'foo'})
self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=html)
def test_formatting(self):
"""
Use 'date_format' and 'time_format' to change the way a value is
displayed.
"""
widget = SplitDateTimeWidget(
date_format='%d/%m/%Y', time_format='%H:%M',
)
self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=(
'<input type="text" name="date_0" value="10/01/2006">'
'<input type="text" name="date_1" value="07:30">'
))
| bsd-3-clause | -7,669,931,759,818,791,000 | 41.365079 | 94 | 0.572124 | false |
ulif/pulp | server/pulp/server/db/migrations/0027_importer_schema_change.py | 1 | 1091 | import datetime
import isodate
from pulp.server.db.connection import get_collection
def migrate(*args, **kwargs):
"""
Add last_updated and last_override_config to the importer collection.
"""
updated_key = 'last_updated'
config_key = 'last_override_config'
collection = get_collection('repo_importers')
for importer in collection.find():
modified = False
if config_key not in importer:
importer[config_key] = {}
modified = True
# If the key doesn't exist, or does exist but has no value, set it based on the
# last sync time, if possible. Otherwise, set it to now.
if not importer.get(updated_key, None):
try:
importer[updated_key] = isodate.parse_datetime(importer['last_sync'])
# The attribute doesn't exist, or parsing failed. It's safe to set a newer timestamp.
except:
importer[updated_key] = datetime.datetime.now(tz=isodate.UTC)
modified = True
if modified:
collection.save(importer)
| gpl-2.0 | 961,681,694,132,968,600 | 31.088235 | 97 | 0.620532 | false |
jinnykoo/wuyisj | tests/integration/offer/manager_tests.py | 53 | 1741 | import datetime
from django.test import TestCase
from django.utils import timezone
from oscar.test import factories
from oscar.apps.offer import models
class TestActiveOfferManager(TestCase):
def test_includes_offers_in_date_range(self):
# Create offer that is available but with the wrong status
now = timezone.now()
start = now - datetime.timedelta(days=1)
end = now + datetime.timedelta(days=1)
factories.create_offer(start=start, end=end)
filtered_offers = models.ConditionalOffer.active.all()
self.assertEqual(1, len(filtered_offers))
def test_filters_out_expired_offers(self):
# Create offer that is available but with the wrong status
now = timezone.now()
start = now - datetime.timedelta(days=3)
end = now - datetime.timedelta(days=1)
factories.create_offer(start=start, end=end)
filtered_offers = models.ConditionalOffer.active.all()
self.assertEqual(0, len(filtered_offers))
def test_filters_out_offers_yet_to_start(self):
# Create offer that is available but with the wrong status
now = timezone.now()
start = now + datetime.timedelta(days=1)
end = now + datetime.timedelta(days=3)
factories.create_offer(start=start, end=end)
filtered_offers = models.ConditionalOffer.active.all()
self.assertEqual(0, len(filtered_offers))
def test_filters_out_suspended_offers(self):
# Create offer that is available but with the wrong status
factories.create_offer(
status=models.ConditionalOffer.SUSPENDED)
filtered_offers = models.ConditionalOffer.active.all()
self.assertEqual(0, len(filtered_offers))
| bsd-3-clause | -8,440,224,175,311,983,000 | 36.042553 | 66 | 0.682366 | false |
bitifirefly/edx-platform | lms/djangoapps/verify_student/views.py | 5 | 58018 | """
Views for the verification flow
"""
import datetime
import decimal
import json
import logging
import urllib
from pytz import UTC
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.views.generic.base import View, RedirectView
import analytics
from eventtracking import tracker
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from commerce import ecommerce_api_client
from commerce.utils import audit_log
from course_modes.models import CourseMode
from courseware.url_helpers import get_redirect_url
from ecommerce_api_client.exceptions import SlumberBaseException
from edxmako.shortcuts import render_to_response, render_to_string
from embargo import api as embargo_api
from microsite_configuration import microsite
from openedx.core.djangoapps.user_api.accounts import NAME_MIN_LENGTH
from openedx.core.djangoapps.user_api.accounts.api import update_account_settings
from openedx.core.djangoapps.user_api.errors import UserNotFound, AccountValidationError
from openedx.core.djangoapps.credit.api import set_credit_requirement_status
from student.models import CourseEnrollment
from shoppingcart.models import Order, CertificateItem
from shoppingcart.processors import (
get_signed_purchase_params, get_purchase_endpoint
)
from verify_student.ssencrypt import has_valid_signature
from verify_student.models import (
VerificationDeadline,
SoftwareSecurePhotoVerification,
VerificationCheckpoint,
VerificationStatus,
)
from verify_student.image import decode_image_data, InvalidImageData
from util.json_request import JsonResponse
from util.date_utils import get_default_time_display
from xmodule.modulestore.django import modulestore
from staticfiles.storage import staticfiles_storage
log = logging.getLogger(__name__)
class PayAndVerifyView(View):
"""
View for the "verify and pay" flow.
This view is somewhat complicated, because the user
can enter it from a number of different places:
* From the "choose your track" page.
* After completing payment.
* From the dashboard in order to complete verification.
* From the dashboard in order to upgrade to a verified track.
The page will display different steps and requirements
depending on:
* Whether the user has submitted a photo verification recently.
* Whether the user has paid for the course.
* How the user reached the page (mostly affects messaging)
We are also super-paranoid about how users reach this page.
If they somehow aren't enrolled, or the course doesn't exist,
or they've unenrolled, or they've already paid/verified,
... then we try to redirect them to the page with the
most appropriate messaging (including the dashboard).
Note that this page does NOT handle re-verification
(photo verification that was denied or had an error);
that is handled by the "reverify" view.
"""
# Step definitions
#
# These represent the numbered steps a user sees in
# the verify / payment flow.
#
# Steps can either be:
# - displayed or hidden
# - complete or incomplete
#
# For example, when a user enters the verification/payment
# flow for the first time, the user will see steps
# for both payment and verification. As the user
# completes these steps (for example, submitting a photo)
# the steps will be marked "complete".
#
# If a user has already verified for another course,
# then the verification steps will be hidden,
# since the user has already completed them.
#
# If a user re-enters the flow from another application
# (for example, after completing payment through
# a third-party payment processor), then the user
# will resume the flow at an intermediate step.
#
INTRO_STEP = 'intro-step'
MAKE_PAYMENT_STEP = 'make-payment-step'
PAYMENT_CONFIRMATION_STEP = 'payment-confirmation-step'
FACE_PHOTO_STEP = 'face-photo-step'
ID_PHOTO_STEP = 'id-photo-step'
REVIEW_PHOTOS_STEP = 'review-photos-step'
ENROLLMENT_CONFIRMATION_STEP = 'enrollment-confirmation-step'
ALL_STEPS = [
INTRO_STEP,
MAKE_PAYMENT_STEP,
PAYMENT_CONFIRMATION_STEP,
FACE_PHOTO_STEP,
ID_PHOTO_STEP,
REVIEW_PHOTOS_STEP,
ENROLLMENT_CONFIRMATION_STEP
]
PAYMENT_STEPS = [
MAKE_PAYMENT_STEP,
PAYMENT_CONFIRMATION_STEP
]
VERIFICATION_STEPS = [
FACE_PHOTO_STEP,
ID_PHOTO_STEP,
REVIEW_PHOTOS_STEP,
ENROLLMENT_CONFIRMATION_STEP
]
# These steps can be skipped using the ?skip-first-step GET param
SKIP_STEPS = [
INTRO_STEP,
]
STEP_TITLES = {
INTRO_STEP: ugettext_lazy("Intro"),
MAKE_PAYMENT_STEP: ugettext_lazy("Make payment"),
PAYMENT_CONFIRMATION_STEP: ugettext_lazy("Payment confirmation"),
FACE_PHOTO_STEP: ugettext_lazy("Take photo"),
ID_PHOTO_STEP: ugettext_lazy("Take a photo of your ID"),
REVIEW_PHOTOS_STEP: ugettext_lazy("Review your info"),
ENROLLMENT_CONFIRMATION_STEP: ugettext_lazy("Enrollment confirmation"),
}
# Messages
#
# Depending on how the user entered reached the page,
# we will display different text messaging.
# For example, we show users who are upgrading
# slightly different copy than users who are verifying
# for the first time.
#
FIRST_TIME_VERIFY_MSG = 'first-time-verify'
VERIFY_NOW_MSG = 'verify-now'
VERIFY_LATER_MSG = 'verify-later'
UPGRADE_MSG = 'upgrade'
PAYMENT_CONFIRMATION_MSG = 'payment-confirmation'
# Requirements
#
# These explain to the user what he or she
# will need to successfully pay and/or verify.
#
# These are determined by the steps displayed
# to the user; for example, if the user does not
# need to complete the verification steps,
# then the photo ID and webcam requirements are hidden.
#
ACCOUNT_ACTIVATION_REQ = "account-activation-required"
PHOTO_ID_REQ = "photo-id-required"
WEBCAM_REQ = "webcam-required"
STEP_REQUIREMENTS = {
ID_PHOTO_STEP: [PHOTO_ID_REQ, WEBCAM_REQ],
FACE_PHOTO_STEP: [WEBCAM_REQ],
}
# Deadline types
VERIFICATION_DEADLINE = "verification"
UPGRADE_DEADLINE = "upgrade"
@method_decorator(login_required)
def get(
self, request, course_id,
always_show_payment=False,
current_step=None,
message=FIRST_TIME_VERIFY_MSG
):
"""
Render the payment and verification flow.
Arguments:
request (HttpRequest): The request object.
course_id (unicode): The ID of the course the user is trying
to enroll in.
Keyword Arguments:
always_show_payment (bool): If True, show the payment steps
even if the user has already paid. This is useful
for users returning to the flow after paying.
current_step (string): The current step in the flow.
message (string): The messaging to display.
Returns:
HttpResponse
Raises:
Http404: The course does not exist or does not
have a verified mode.
"""
# Parse the course key
# The URL regex should guarantee that the key format is valid.
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
# Verify that the course exists
if course is None:
log.warn(u"Could not find course with ID %s.", course_id)
raise Http404
# Check whether the user has access to this course
# based on country access rules.
redirect_url = embargo_api.redirect_if_blocked(
course_key,
user=request.user,
ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return redirect(redirect_url)
# If the verification deadline has passed
# then show the user a message that he/she can't verify.
#
# We're making the assumptions (enforced in Django admin) that:
#
# 1) Only verified modes have verification deadlines.
#
# 2) If set, verification deadlines are always AFTER upgrade deadlines, because why would you
# let someone upgrade into a verified track if they can't complete verification?
#
verification_deadline = VerificationDeadline.deadline_for_course(course.id)
response = self._response_if_deadline_passed(course, self.VERIFICATION_DEADLINE, verification_deadline)
if response is not None:
log.info(u"Verification deadline for '%s' has passed.", course.id)
return response
# Retrieve the relevant course mode for the payment/verification flow.
#
# WARNING: this is technical debt! A much better way to do this would be to
# separate out the payment flow and use the product SKU to figure out what
# the user is trying to purchase.
#
# Nonethless, for the time being we continue to make the really ugly assumption
# that at some point there was a paid course mode we can query for the price.
relevant_course_mode = self._get_paid_mode(course_key)
# If we can find a relevant course mode, then log that we're entering the flow
# Otherwise, this course does not support payment/verification, so respond with a 404.
if relevant_course_mode is not None:
if CourseMode.is_verified_mode(relevant_course_mode):
log.info(
u"Entering payment and verification flow for user '%s', course '%s', with current step '%s'.",
request.user.id, course_id, current_step
)
else:
log.info(
u"Entering payment flow for user '%s', course '%s', with current step '%s'",
request.user.id, course_id, current_step
)
else:
# Otherwise, there has never been a verified/paid mode,
# so return a page not found response.
log.warn(
u"No paid/verified course mode found for course '%s' for verification/payment flow request",
course_id
)
raise Http404
# If the user is trying to *pay* and the upgrade deadline has passed,
# then they shouldn't be able to enter the flow.
#
# NOTE: This should match the availability dates used by the E-Commerce service
# to determine whether a user can purchase a product. The idea is that if the service
# won't fulfill the order, we shouldn't even let the user get into the payment flow.
#
user_is_trying_to_pay = message in [self.FIRST_TIME_VERIFY_MSG, self.UPGRADE_MSG]
if user_is_trying_to_pay:
upgrade_deadline = relevant_course_mode.expiration_datetime
response = self._response_if_deadline_passed(course, self.UPGRADE_DEADLINE, upgrade_deadline)
if response is not None:
log.info(u"Upgrade deadline for '%s' has passed.", course.id)
return response
# Check whether the user has verified, paid, and enrolled.
# A user is considered "paid" if he or she has an enrollment
# with a paid course mode (such as "verified").
# For this reason, every paid user is enrolled, but not
# every enrolled user is paid.
# If the course mode is not verified(i.e only paid) then already_verified is always True
already_verified = (
self._check_already_verified(request.user)
if CourseMode.is_verified_mode(relevant_course_mode)
else True
)
already_paid, is_enrolled = self._check_enrollment(request.user, course_key)
# Redirect the user to a more appropriate page if the
# messaging won't make sense based on the user's
# enrollment / payment / verification status.
redirect_response = self._redirect_if_necessary(
message,
already_verified,
already_paid,
is_enrolled,
course_key
)
if redirect_response is not None:
return redirect_response
display_steps = self._display_steps(
always_show_payment,
already_verified,
already_paid,
relevant_course_mode
)
requirements = self._requirements(display_steps, request.user.is_active)
if current_step is None:
current_step = display_steps[0]['name']
# Allow the caller to skip the first page
# This is useful if we want the user to be able to
# use the "back" button to return to the previous step.
# This parameter should only work for known skip-able steps
if request.GET.get('skip-first-step') and current_step in self.SKIP_STEPS:
display_step_names = [step['name'] for step in display_steps]
current_step_idx = display_step_names.index(current_step)
if (current_step_idx + 1) < len(display_steps):
current_step = display_steps[current_step_idx + 1]['name']
courseware_url = ""
if not course.start or course.start < datetime.datetime.today().replace(tzinfo=UTC):
courseware_url = reverse(
'course_root',
kwargs={'course_id': unicode(course_key)}
)
full_name = (
request.user.profile.name
if request.user.profile.name
else ""
)
# If the user set a contribution amount on another page,
# use that amount to pre-fill the price selection form.
contribution_amount = request.session.get(
'donation_for_course', {}
).get(unicode(course_key), '')
# Remember whether the user is upgrading
# so we can fire an analytics event upon payment.
request.session['attempting_upgrade'] = (message == self.UPGRADE_MSG)
# Determine the photo verification status
verification_good_until = self._verification_valid_until(request.user)
# get available payment processors
if relevant_course_mode.sku:
# transaction will be conducted via ecommerce service
processors = ecommerce_api_client(request.user).payment.processors.get()
else:
# transaction will be conducted using legacy shopping cart
processors = [settings.CC_PROCESSOR_NAME]
# Render the top-level page
context = {
'contribution_amount': contribution_amount,
'course': course,
'course_key': unicode(course_key),
'checkpoint_location': request.GET.get('checkpoint'),
'course_mode': relevant_course_mode,
'courseware_url': courseware_url,
'current_step': current_step,
'disable_courseware_js': True,
'display_steps': display_steps,
'is_active': json.dumps(request.user.is_active),
'message_key': message,
'platform_name': settings.PLATFORM_NAME,
'processors': processors,
'requirements': requirements,
'user_full_name': full_name,
'verification_deadline': (
get_default_time_display(verification_deadline)
if verification_deadline else ""
),
'already_verified': already_verified,
'verification_good_until': verification_good_until,
'capture_sound': staticfiles_storage.url("audio/camera_capture.wav"),
'nav_hidden': True,
}
return render_to_response("verify_student/pay_and_verify.html", context)
def _redirect_if_necessary(
self,
message,
already_verified,
already_paid,
is_enrolled,
course_key
):
"""Redirect the user to a more appropriate page if necessary.
In some cases, a user may visit this page with
verification / enrollment / payment state that
we don't anticipate. For example, a user may unenroll
from the course after paying for it, then visit the
"verify now" page to complete verification.
When this happens, we try to redirect the user to
the most appropriate page.
Arguments:
message (string): The messaging of the page. Should be a key
in `MESSAGES`.
already_verified (bool): Whether the user has submitted
a verification request recently.
already_paid (bool): Whether the user is enrolled in a paid
course mode.
is_enrolled (bool): Whether the user has an active enrollment
in the course.
course_key (CourseKey): The key for the course.
Returns:
HttpResponse or None
"""
url = None
course_kwargs = {'course_id': unicode(course_key)}
if already_verified and already_paid:
# If they've already paid and verified, there's nothing else to do,
# so redirect them to the dashboard.
if message != self.PAYMENT_CONFIRMATION_MSG:
url = reverse('dashboard')
elif message in [self.VERIFY_NOW_MSG, self.VERIFY_LATER_MSG, self.PAYMENT_CONFIRMATION_MSG]:
if is_enrolled:
# If the user is already enrolled but hasn't yet paid,
# then the "upgrade" messaging is more appropriate.
if not already_paid:
url = reverse('verify_student_upgrade_and_verify', kwargs=course_kwargs)
else:
# If the user is NOT enrolled, then send him/her
# to the first time verification page.
url = reverse('verify_student_start_flow', kwargs=course_kwargs)
elif message == self.UPGRADE_MSG:
if is_enrolled:
if already_paid:
# If the student has paid, but not verified, redirect to the verification flow.
url = reverse('verify_student_verify_now', kwargs=course_kwargs)
else:
url = reverse('verify_student_start_flow', kwargs=course_kwargs)
# Redirect if necessary, otherwise implicitly return None
if url is not None:
return redirect(url)
def _get_paid_mode(self, course_key):
"""
Retrieve the paid course mode for a course.
The returned course mode may or may not be expired.
Unexpired modes are preferred to expired modes.
Arguments:
course_key (CourseKey): The location of the course.
Returns:
CourseMode tuple
"""
# Retrieve all the modes at once to reduce the number of database queries
all_modes, unexpired_modes = CourseMode.all_and_unexpired_modes_for_courses([course_key])
# Retrieve the first unexpired, paid mode, if there is one
for mode in unexpired_modes[course_key]:
if mode.min_price > 0:
return mode
# Otherwise, find the first expired mode
for mode in all_modes[course_key]:
if mode.min_price > 0:
return mode
# Otherwise, return None and so the view knows to respond with a 404.
return None
def _display_steps(self, always_show_payment, already_verified, already_paid, course_mode):
"""Determine which steps to display to the user.
Includes all steps by default, but removes steps
if the user has already completed them.
Arguments:
always_show_payment (bool): If True, display the payment steps
even if the user has already paid.
already_verified (bool): Whether the user has submitted
a verification request recently.
already_paid (bool): Whether the user is enrolled in a paid
course mode.
Returns:
list
"""
display_steps = self.ALL_STEPS
remove_steps = set()
if already_verified or not CourseMode.is_verified_mode(course_mode):
remove_steps |= set(self.VERIFICATION_STEPS)
if already_paid and not always_show_payment:
remove_steps |= set(self.PAYMENT_STEPS)
else:
# The "make payment" step doubles as an intro step,
# so if we're showing the payment step, hide the intro step.
remove_steps |= set([self.INTRO_STEP])
return [
{
'name': step,
'title': unicode(self.STEP_TITLES[step]),
}
for step in display_steps
if step not in remove_steps
]
def _requirements(self, display_steps, is_active):
"""Determine which requirements to show the user.
For example, if the user needs to submit a photo
verification, tell the user that she will need
a photo ID and a webcam.
Arguments:
display_steps (list): The steps to display to the user.
is_active (bool): If False, adds a requirement to activate the user account.
Returns:
dict: Keys are requirement names, values are booleans
indicating whether to show the requirement.
"""
all_requirements = {
self.ACCOUNT_ACTIVATION_REQ: not is_active,
self.PHOTO_ID_REQ: False,
self.WEBCAM_REQ: False,
}
display_steps = set(step['name'] for step in display_steps)
for step, step_requirements in self.STEP_REQUIREMENTS.iteritems():
if step in display_steps:
for requirement in step_requirements:
all_requirements[requirement] = True
return all_requirements
def _verification_valid_until(self, user, date_format="%m/%d/%Y"):
"""
Check whether the user has a valid or pending verification.
Arguments:
user:
date_format: optional parameter for formatting datetime
object to string in response
Returns:
datetime object in string format
"""
photo_verifications = SoftwareSecurePhotoVerification.verification_valid_or_pending(user)
# return 'expiration_datetime' of latest photo verification if found,
# otherwise implicitly return ''
if photo_verifications:
return photo_verifications[0].expiration_datetime.strftime(date_format)
return ''
def _check_already_verified(self, user):
"""Check whether the user has a valid or pending verification.
Note that this includes cases in which the user's verification
has not been accepted (either because it hasn't been processed,
or there was an error).
This should return True if the user has done their part:
submitted photos within the expiration period.
"""
return SoftwareSecurePhotoVerification.user_has_valid_or_pending(user)
def _check_enrollment(self, user, course_key):
"""Check whether the user has an active enrollment and has paid.
If a user is enrolled in a paid course mode, we assume
that the user has paid.
Arguments:
user (User): The user to check.
course_key (CourseKey): The key of the course to check.
Returns:
Tuple `(has_paid, is_active)` indicating whether the user
has paid and whether the user has an active account.
"""
enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course_key)
has_paid = False
if enrollment_mode is not None and is_active:
all_modes = CourseMode.modes_for_course_dict(course_key, include_expired=True)
course_mode = all_modes.get(enrollment_mode)
has_paid = (course_mode and course_mode.min_price > 0)
return (has_paid, bool(is_active))
def _response_if_deadline_passed(self, course, deadline_name, deadline_datetime):
"""
Respond with some error messaging if the deadline has passed.
Arguments:
course (Course): The course the user is trying to enroll in.
deadline_name (str): One of the deadline constants.
deadline_datetime (datetime): The deadline.
Returns: HttpResponse or None
"""
if deadline_name not in [self.VERIFICATION_DEADLINE, self.UPGRADE_DEADLINE]:
log.error("Invalid deadline name %s. Skipping check for whether the deadline passed.", deadline_name)
return None
deadline_passed = (
deadline_datetime is not None and
deadline_datetime < datetime.datetime.now(UTC)
)
if deadline_passed:
context = {
'course': course,
'deadline_name': deadline_name,
'deadline': (
get_default_time_display(deadline_datetime)
if deadline_datetime else ""
)
}
return render_to_response("verify_student/missed_deadline.html", context)
def checkout_with_ecommerce_service(user, course_key, course_mode, processor): # pylint: disable=invalid-name
""" Create a new basket and trigger immediate checkout, using the E-Commerce API. """
course_id = unicode(course_key)
try:
api = ecommerce_api_client(user)
# Make an API call to create the order and retrieve the results
result = api.baskets.post({
'products': [{'sku': course_mode.sku}],
'checkout': True,
'payment_processor_name': processor
})
# Pass the payment parameters directly from the API response.
return result.get('payment_data')
except SlumberBaseException:
params = {'username': user.username, 'mode': course_mode.slug, 'course_id': course_id}
log.exception('Failed to create order for %(username)s %(mode)s mode of %(course_id)s', params)
raise
finally:
audit_log(
'checkout_requested',
course_id=course_id,
mode=course_mode.slug,
processor_name=processor,
user_id=user.id
)
def checkout_with_shoppingcart(request, user, course_key, course_mode, amount):
""" Create an order and trigger checkout using shoppingcart."""
cart = Order.get_cart_for_user(user)
cart.clear()
enrollment_mode = course_mode.slug
CertificateItem.add_to_order(cart, course_key, amount, enrollment_mode)
# Change the order's status so that we don't accidentally modify it later.
# We need to do this to ensure that the parameters we send to the payment system
# match what we store in the database.
# (Ordinarily we would do this client-side when the user submits the form, but since
# the JavaScript on this page does that immediately, we make the change here instead.
# This avoids a second AJAX call and some additional complication of the JavaScript.)
# If a user later re-enters the verification / payment flow, she will create a new order.
cart.start_purchase()
callback_url = request.build_absolute_uri(
reverse("shoppingcart.views.postpay_callback")
)
payment_data = {
'payment_processor_name': settings.CC_PROCESSOR_NAME,
'payment_page_url': get_purchase_endpoint(),
'payment_form_data': get_signed_purchase_params(
cart,
callback_url=callback_url,
extra_data=[unicode(course_key), course_mode.slug]
),
}
return payment_data
@require_POST
@login_required
def create_order(request):
"""
This endpoint is named 'create_order' for backward compatibility, but its
actual use is to add a single product to the user's cart and request
immediate checkout.
"""
course_id = request.POST['course_id']
course_id = CourseKey.from_string(course_id)
donation_for_course = request.session.get('donation_for_course', {})
contribution = request.POST.get("contribution", donation_for_course.get(unicode(course_id), 0))
try:
amount = decimal.Decimal(contribution).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN)
except decimal.InvalidOperation:
return HttpResponseBadRequest(_("Selected price is not valid number."))
current_mode = None
paid_modes = CourseMode.paid_modes_for_course(course_id)
# Check if there are more than 1 paid(mode with min_price>0 e.g verified/professional/no-id-professional) modes
# for course exist then choose the first one
if paid_modes:
if len(paid_modes) > 1:
log.warn(u"Multiple paid course modes found for course '%s' for create order request", course_id)
current_mode = paid_modes[0]
# Make sure this course has a paid mode
if not current_mode:
log.warn(u"Create order requested for course '%s' without a paid mode.", course_id)
return HttpResponseBadRequest(_("This course doesn't support paid certificates"))
if CourseMode.is_professional_mode(current_mode):
amount = current_mode.min_price
if amount < current_mode.min_price:
return HttpResponseBadRequest(_("No selected price or selected price is below minimum."))
if current_mode.sku:
# if request.POST doesn't contain 'processor' then the service's default payment processor will be used.
payment_data = checkout_with_ecommerce_service(
request.user,
course_id,
current_mode,
request.POST.get('processor')
)
else:
payment_data = checkout_with_shoppingcart(request, request.user, course_id, current_mode, amount)
if 'processor' not in request.POST:
# (XCOM-214) To be removed after release.
# the absence of this key in the POST payload indicates that the request was initiated from
# a stale js client, which expects a response containing only the 'payment_form_data' part of
# the payment data result.
payment_data = payment_data['payment_form_data']
return HttpResponse(json.dumps(payment_data), content_type="application/json")
class SubmitPhotosView(View):
"""
End-point for submitting photos for verification.
"""
@method_decorator(login_required)
def post(self, request):
"""
Submit photos for verification.
This end-point is used for the following cases:
* Initial verification through the pay-and-verify flow.
* Initial verification initiated from a checkpoint within a course.
* Re-verification initiated from a checkpoint within a course.
POST Parameters:
face_image (str): base64-encoded image data of the user's face.
photo_id_image (str): base64-encoded image data of the user's photo ID.
full_name (str): The user's full name, if the user is requesting a name change as well.
course_key (str): Identifier for the course, if initiated from a checkpoint.
checkpoint (str): Location of the checkpoint in the course.
"""
# If the user already has an initial verification attempt, we can re-use the photo ID
# the user submitted with the initial attempt. This is useful for the in-course reverification
# case in which users submit only the face photo and have it matched against their ID photos
# submitted with the initial verification.
initial_verification = SoftwareSecurePhotoVerification.get_initial_verification(request.user)
# Validate the POST parameters
params, response = self._validate_parameters(request, bool(initial_verification))
if response is not None:
return response
# If necessary, update the user's full name
if "full_name" in params:
response = self._update_full_name(request.user, params["full_name"])
if response is not None:
return response
# Retrieve the image data
# Validation ensures that we'll have a face image, but we may not have
# a photo ID image if this is a reverification.
face_image, photo_id_image, response = self._decode_image_data(
params["face_image"], params.get("photo_id_image")
)
if response is not None:
return response
# Submit the attempt
attempt = self._submit_attempt(request.user, face_image, photo_id_image, initial_verification)
# If this attempt was submitted at a checkpoint, then associate
# the attempt with the checkpoint.
submitted_at_checkpoint = "checkpoint" in params and "course_key" in params
if submitted_at_checkpoint:
checkpoint = self._associate_attempt_with_checkpoint(
request.user, attempt,
params["course_key"],
params["checkpoint"]
)
# If the submission came from an in-course checkpoint
if initial_verification is not None and submitted_at_checkpoint:
self._fire_event(request.user, "edx.bi.reverify.submitted", {
"category": "verification",
"label": unicode(params["course_key"]),
"checkpoint": checkpoint.checkpoint_name,
})
# Send a URL that the client can redirect to in order
# to return to the checkpoint in the courseware.
redirect_url = get_redirect_url(params["course_key"], params["checkpoint"])
return JsonResponse({"url": redirect_url})
# Otherwise, the submission came from an initial verification flow.
else:
self._fire_event(request.user, "edx.bi.verify.submitted", {"category": "verification"})
self._send_confirmation_email(request.user)
redirect_url = None
return JsonResponse({})
def _validate_parameters(self, request, has_initial_verification):
"""
Check that the POST parameters are valid.
Arguments:
request (HttpRequest): The request object.
has_initial_verification (bool): Whether the user has an initial verification attempt.
Returns:
HttpResponse or None
"""
# Pull out the parameters we care about.
params = {
param_name: request.POST[param_name]
for param_name in [
"face_image",
"photo_id_image",
"course_key",
"checkpoint",
"full_name"
]
if param_name in request.POST
}
# If the user already has an initial verification attempt, then we don't
# require the user to submit a photo ID image, since we can re-use the photo ID
# image from the initial attempt.
# If we don't have an initial verification OR a photo ID image, something has gone
# terribly wrong in the JavaScript. Log this as an error so we can track it down.
if "photo_id_image" not in params and not has_initial_verification:
log.error(
(
"User %s does not have an initial verification attempt "
"and no photo ID image data was provided. "
"This most likely means that the JavaScript client is not "
"correctly constructing the request to submit photos."
), request.user.id
)
return None, HttpResponseBadRequest(
_("Photo ID image is required if the user does not have an initial verification attempt.")
)
# The face image is always required.
if "face_image" not in params:
msg = _("Missing required parameter face_image")
return None, HttpResponseBadRequest(msg)
# If provided, parse the course key and checkpoint location
if "course_key" in params:
try:
params["course_key"] = CourseKey.from_string(params["course_key"])
except InvalidKeyError:
return None, HttpResponseBadRequest(_("Invalid course key"))
if "checkpoint" in params:
try:
params["checkpoint"] = UsageKey.from_string(params["checkpoint"]).replace(
course_key=params["course_key"]
)
except InvalidKeyError:
return None, HttpResponseBadRequest(_("Invalid checkpoint location"))
return params, None
def _update_full_name(self, user, full_name):
"""
Update the user's full name.
Arguments:
user (User): The user to update.
full_name (unicode): The user's updated full name.
Returns:
HttpResponse or None
"""
try:
update_account_settings(user, {"name": full_name})
except UserNotFound:
return HttpResponseBadRequest(_("No profile found for user"))
except AccountValidationError:
msg = _(
"Name must be at least {min_length} characters long."
).format(min_length=NAME_MIN_LENGTH)
return HttpResponseBadRequest(msg)
def _decode_image_data(self, face_data, photo_id_data=None):
"""
Decode image data sent with the request.
Arguments:
face_data (str): base64-encoded face image data.
Keyword Arguments:
photo_id_data (str): base64-encoded photo ID image data.
Returns:
tuple of (str, str, HttpResponse)
"""
try:
# Decode face image data (used for both an initial and re-verification)
face_image = decode_image_data(face_data)
# Decode the photo ID image data if it's provided
photo_id_image = (
decode_image_data(photo_id_data)
if photo_id_data is not None else None
)
return face_image, photo_id_image, None
except InvalidImageData:
msg = _("Image data is not valid.")
return None, None, HttpResponseBadRequest(msg)
def _submit_attempt(self, user, face_image, photo_id_image=None, initial_verification=None):
"""
Submit a verification attempt.
Arguments:
user (User): The user making the attempt.
face_image (str): Decoded face image data.
Keyword Arguments:
photo_id_image (str or None): Decoded photo ID image data.
initial_verification (SoftwareSecurePhotoVerification): The initial verification attempt.
"""
attempt = SoftwareSecurePhotoVerification(user=user)
# We will always have face image data, so upload the face image
attempt.upload_face_image(face_image)
# If an ID photo wasn't submitted, re-use the ID photo from the initial attempt.
# Earlier validation rules ensure that at least one of these is available.
if photo_id_image is not None:
attempt.upload_photo_id_image(photo_id_image)
elif initial_verification is None:
# Earlier validation should ensure that we never get here.
log.error(
"Neither a photo ID image or initial verification attempt provided. "
"Parameter validation in the view should prevent this from happening!"
)
# Submit the attempt
attempt.mark_ready()
attempt.submit(copy_id_photo_from=initial_verification)
return attempt
def _associate_attempt_with_checkpoint(self, user, attempt, course_key, usage_id):
"""
Associate the verification attempt with a checkpoint within a course.
Arguments:
user (User): The user making the attempt.
attempt (SoftwareSecurePhotoVerification): The verification attempt.
course_key (CourseKey): The identifier for the course.
usage_key (UsageKey): The location of the checkpoint within the course.
Returns:
VerificationCheckpoint
"""
checkpoint = VerificationCheckpoint.get_or_create_verification_checkpoint(course_key, usage_id)
checkpoint.add_verification_attempt(attempt)
VerificationStatus.add_verification_status(checkpoint, user, "submitted")
return checkpoint
def _send_confirmation_email(self, user):
"""
Send an email confirming that the user submitted photos
for initial verification.
"""
context = {
'full_name': user.profile.name,
'platform_name': microsite.get_value("PLATFORM_NAME", settings.PLATFORM_NAME)
}
subject = _("Verification photos received")
message = render_to_string('emails/photo_submission_confirmation.txt', context)
from_address = microsite.get_value('default_from_email', settings.DEFAULT_FROM_EMAIL)
to_address = user.email
try:
send_mail(subject, message, from_address, [to_address], fail_silently=False)
except: # pylint: disable=bare-except
# We catch all exceptions and log them.
# It would be much, much worse to roll back the transaction due to an uncaught
# exception than to skip sending the notification email.
log.exception("Could not send notification email for initial verification for user %s", user.id)
def _fire_event(self, user, event_name, parameters):
"""
Fire an analytics event.
Arguments:
user (User): The user who submitted photos.
event_name (str): Name of the analytics event.
parameters (dict): Event parameters.
Returns: None
"""
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
tracking_context = tracker.get_tracker().resolve_context()
context = {
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
analytics.track(user.id, event_name, parameters, context=context)
def _compose_message_reverification_email(
course_key, user_id, related_assessment_location, status, request
): # pylint: disable=invalid-name
"""
Compose subject and message for photo reverification email.
Args:
course_key(CourseKey): CourseKey object
user_id(str): User Id
related_assessment_location(str): Location of reverification XBlock
photo_verification(QuerySet): Queryset of SoftwareSecure objects
status(str): Approval status
is_secure(Bool): Is running on secure protocol or not
Returns:
None if any error occurred else Tuple of subject and message strings
"""
try:
usage_key = UsageKey.from_string(related_assessment_location)
reverification_block = modulestore().get_item(usage_key)
course = modulestore().get_course(course_key)
redirect_url = get_redirect_url(course_key, usage_key.replace(course_key=course_key))
subject = "Re-verification Status"
context = {
"status": status,
"course_name": course.display_name_with_default,
"assessment": reverification_block.related_assessment
}
# Allowed attempts is 1 if not set on verification block
allowed_attempts = reverification_block.attempts + 1
used_attempts = VerificationStatus.get_user_attempts(user_id, course_key, related_assessment_location)
left_attempts = allowed_attempts - used_attempts
is_attempt_allowed = left_attempts > 0
verification_open = True
if reverification_block.due:
verification_open = timezone.now() <= reverification_block.due
context["left_attempts"] = left_attempts
context["is_attempt_allowed"] = is_attempt_allowed
context["verification_open"] = verification_open
context["due_date"] = get_default_time_display(reverification_block.due)
context['platform_name'] = settings.PLATFORM_NAME
context["used_attempts"] = used_attempts
context["allowed_attempts"] = allowed_attempts
context["support_link"] = microsite.get_value('email_from_address', settings.CONTACT_EMAIL)
re_verification_link = reverse(
'verify_student_incourse_reverify',
args=(
unicode(course_key),
related_assessment_location
)
)
context["course_link"] = request.build_absolute_uri(redirect_url)
context["reverify_link"] = request.build_absolute_uri(re_verification_link)
message = render_to_string('emails/reverification_processed.txt', context)
log.info(
"Sending email to User_Id=%s. Attempts left for this user are %s. "
"Allowed attempts %s. "
"Due Date %s",
str(user_id), left_attempts, allowed_attempts, str(reverification_block.due)
)
return subject, message
# Catch all exception to avoid raising back to view
except: # pylint: disable=bare-except
log.exception("The email for re-verification sending failed for user_id %s", user_id)
def _send_email(user_id, subject, message):
""" Send email to given user
Args:
user_id(str): User Id
subject(str): Subject lines of emails
message(str): Email message body
Returns:
None
"""
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
user = User.objects.get(id=user_id)
user.email_user(subject, message, from_address)
def _set_user_requirement_status(attempt, namespace, status, reason=None):
"""Sets the status of a credit requirement for the user,
based on a verification checkpoint.
"""
checkpoint = None
try:
checkpoint = VerificationCheckpoint.objects.get(photo_verification=attempt)
except VerificationCheckpoint.DoesNotExist:
log.error("Unable to find checkpoint for user with id %d", attempt.user.id)
if checkpoint is not None:
try:
set_credit_requirement_status(
attempt.user.username,
checkpoint.course_id,
namespace,
checkpoint.checkpoint_location,
status=status,
reason=reason,
)
except Exception: # pylint: disable=broad-except
# Catch exception if unable to add credit requirement
# status for user
log.error("Unable to add Credit requirement status for user with id %d", attempt.user.id)
@require_POST
@csrf_exempt # SS does its own message signing, and their API won't have a cookie value
def results_callback(request):
"""
Software Secure will call this callback to tell us whether a user is
verified to be who they said they are.
"""
body = request.body
try:
body_dict = json.loads(body)
except ValueError:
log.exception("Invalid JSON received from Software Secure:\n\n{}\n".format(body))
return HttpResponseBadRequest("Invalid JSON. Received:\n\n{}".format(body))
if not isinstance(body_dict, dict):
log.error("Reply from Software Secure is not a dict:\n\n{}\n".format(body))
return HttpResponseBadRequest("JSON should be dict. Received:\n\n{}".format(body))
headers = {
"Authorization": request.META.get("HTTP_AUTHORIZATION", ""),
"Date": request.META.get("HTTP_DATE", "")
}
has_valid_signature(
"POST",
headers,
body_dict,
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"],
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_SECRET_KEY"]
)
_response, access_key_and_sig = headers["Authorization"].split(" ")
access_key = access_key_and_sig.split(":")[0]
# This is what we should be doing...
#if not sig_valid:
# return HttpResponseBadRequest("Signature is invalid")
# This is what we're doing until we can figure out why we disagree on sigs
if access_key != settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]:
return HttpResponseBadRequest("Access key invalid")
receipt_id = body_dict.get("EdX-ID")
result = body_dict.get("Result")
reason = body_dict.get("Reason", "")
error_code = body_dict.get("MessageType", "")
try:
attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=receipt_id)
except SoftwareSecurePhotoVerification.DoesNotExist:
log.error("Software Secure posted back for receipt_id %s, but not found", receipt_id)
return HttpResponseBadRequest("edX ID {} not found".format(receipt_id))
if result == "PASS":
log.debug("Approving verification for %s", receipt_id)
attempt.approve()
status = "approved"
_set_user_requirement_status(attempt, 'reverification', 'satisfied')
elif result == "FAIL":
log.debug("Denying verification for %s", receipt_id)
attempt.deny(json.dumps(reason), error_code=error_code)
status = "denied"
_set_user_requirement_status(
attempt, 'reverification', 'failed', json.dumps(reason)
)
elif result == "SYSTEM FAIL":
log.debug("System failure for %s -- resetting to must_retry", receipt_id)
attempt.system_error(json.dumps(reason), error_code=error_code)
status = "error"
log.error("Software Secure callback attempt for %s failed: %s", receipt_id, reason)
else:
log.error("Software Secure returned unknown result %s", result)
return HttpResponseBadRequest(
"Result {} not understood. Known results: PASS, FAIL, SYSTEM FAIL".format(result)
)
checkpoints = VerificationCheckpoint.objects.filter(photo_verification=attempt).all()
VerificationStatus.add_status_from_checkpoints(checkpoints=checkpoints, user=attempt.user, status=status)
# If this is re-verification then send the update email
if checkpoints:
user_id = attempt.user.id
course_key = checkpoints[0].course_id
related_assessment_location = checkpoints[0].checkpoint_location
subject, message = _compose_message_reverification_email(
course_key, user_id, related_assessment_location, status, request
)
_send_email(user_id, subject, message)
return HttpResponse("OK!")
class ReverifyView(View):
"""
Reverification occurs when a user's initial verification is denied
or expires. When this happens, users can re-submit photos through
the re-verification flow.
Unlike in-course reverification, this flow requires users to submit
*both* face and ID photos. In contrast, during in-course reverification,
students submit only face photos, which are matched against the ID photo
the user submitted during initial verification.
"""
@method_decorator(login_required)
def get(self, request):
"""
Render the reverification flow.
Most of the work is done client-side by composing the same
Backbone views used in the initial verification flow.
"""
status, _ = SoftwareSecurePhotoVerification.user_status(request.user)
if status in ["must_reverify", "expired"]:
context = {
"user_full_name": request.user.profile.name,
"platform_name": settings.PLATFORM_NAME,
"capture_sound": staticfiles_storage.url("audio/camera_capture.wav"),
}
return render_to_response("verify_student/reverify.html", context)
else:
context = {
"status": status
}
return render_to_response("verify_student/reverify_not_allowed.html", context)
class InCourseReverifyView(View):
"""
The in-course reverification view.
In-course reverification occurs while a student is taking a course.
At points in the course, students are prompted to submit face photos,
which are matched against the ID photos the user submitted during their
initial verification.
Students are prompted to enter this flow from an "In Course Reverification"
XBlock (courseware component) that course authors add to the course.
See https://github.com/edx/edx-reverification-block for more details.
"""
@method_decorator(login_required)
def get(self, request, course_id, usage_id):
"""Display the view for face photo submission.
Args:
request(HttpRequest): HttpRequest object
course_id(str): A string of course id
usage_id(str): Location of Reverification XBlock in courseware
Returns:
HttpResponse
"""
user = request.user
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
if course is None:
log.error(u"Could not find course '%s' for in-course reverification.", course_key)
raise Http404
try:
checkpoint = VerificationCheckpoint.objects.get(course_id=course_key, checkpoint_location=usage_id)
except VerificationCheckpoint.DoesNotExist:
log.error(
u"No verification checkpoint exists for the "
u"course '%s' and checkpoint location '%s'.",
course_key, usage_id
)
raise Http404
initial_verification = SoftwareSecurePhotoVerification.get_initial_verification(user)
if not initial_verification:
return self._redirect_to_initial_verification(user, course_key, usage_id)
# emit the reverification event
self._track_reverification_events('edx.bi.reverify.started', user.id, course_id, checkpoint.checkpoint_name)
context = {
'course_key': unicode(course_key),
'course_name': course.display_name_with_default,
'checkpoint_name': checkpoint.checkpoint_name,
'platform_name': settings.PLATFORM_NAME,
'usage_id': usage_id,
'capture_sound': staticfiles_storage.url("audio/camera_capture.wav"),
}
return render_to_response("verify_student/incourse_reverify.html", context)
def _track_reverification_events(self, event_name, user_id, course_id, checkpoint): # pylint: disable=invalid-name
"""Track re-verification events for a user against a reverification
checkpoint of a course.
Arguments:
event_name (str): Name of event being tracked
user_id (str): The ID of the user
course_id (unicode): ID associated with the course
checkpoint (str): Checkpoint name
Returns:
None
"""
log.info(
u"In-course reverification: event %s occurred for user '%s' in course '%s' at checkpoint '%s'",
event_name, user_id, course_id, checkpoint
)
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(
user_id,
event_name,
{
'category': "verification",
'label': unicode(course_id),
'checkpoint': checkpoint
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
def _redirect_to_initial_verification(self, user, course_key, checkpoint):
"""
Redirect because the user does not have an initial verification.
We will redirect the user to the initial verification flow,
passing the identifier for this checkpoint. When the user
submits a verification attempt, it will count for *both*
the initial and checkpoint verification.
Arguments:
user (User): The user who made the request.
course_key (CourseKey): The identifier for the course for which
the user is attempting to re-verify.
checkpoint (string): Location of the checkpoint in the courseware.
Returns:
HttpResponse
"""
log.info(
u"User %s does not have an initial verification, so "
u"he/she will be redirected to the \"verify later\" flow "
u"for the course %s.",
user.id, course_key
)
base_url = reverse('verify_student_verify_now', kwargs={'course_id': unicode(course_key)})
params = urllib.urlencode({"checkpoint": checkpoint})
full_url = u"{base}?{params}".format(base=base_url, params=params)
return redirect(full_url)
| agpl-3.0 | 6,672,905,549,594,937,000 | 38.334237 | 119 | 0.630494 | false |
cobalys/django | django/contrib/webdesign/lorem_ipsum.py | 230 | 4908 | """
Utility functions for generating "lorem ipsum" Latin text.
"""
from __future__ import unicode_literals
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = ', '.join(sections)
# Convert to sentence case and add end punctuation.
return '%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return ' '.join([sentence() for i in range(random.randint(1, 4))])
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return ' '.join(word_list)
| bsd-3-clause | 5,613,577,768,618,588,000 | 46.650485 | 459 | 0.610228 | false |
phalax4/CarnotKE | jyhton/lib-python/2.7/tarfile.py | 8 | 89024 | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustäbel <[email protected]>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Read from and write to tar format archives.
"""
__version__ = "$Revision: 85213 $"
# $Source$
version = "0.9.0"
__author__ = "Lars Gustäbel ([email protected])"
__date__ = "$Date$"
__cvsid__ = "$Id$"
__credits__ = "Gustavo Niemeyer, Niels Gustäbel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import shutil
import stat
import errno
import time
import struct
import copy
import re
import operator
try:
import grp, pwd
except ImportError:
grp = pwd = None
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = "\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = "ustar \0" # magic gnu tar string
POSIX_MAGIC = "ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = "0" # regular file
AREGTYPE = "\0" # regular file
LNKTYPE = "1" # link (inside tarfile)
SYMTYPE = "2" # symbolic link
CHRTYPE = "3" # character special device
BLKTYPE = "4" # block special device
DIRTYPE = "5" # directory
FIFOTYPE = "6" # fifo special device
CONTTYPE = "7" # contiguous file
GNUTYPE_LONGNAME = "L" # GNU tar longname
GNUTYPE_LONGLINK = "K" # GNU tar longlink
GNUTYPE_SPARSE = "S" # GNU tar sparse file
XHDTYPE = "x" # POSIX.1-2001 extended header
XGLTYPE = "g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = "X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0120000 # symbolic link
S_IFREG = 0100000 # regular file
S_IFBLK = 0060000 # block device
S_IFDIR = 0040000 # directory
S_IFCHR = 0020000 # character device
S_IFIFO = 0010000 # fifo
TSUID = 04000 # set UID on execution
TSGID = 02000 # set GID on execution
TSVTX = 01000 # reserved
TUREAD = 0400 # read by owner
TUWRITE = 0200 # write by owner
TUEXEC = 0100 # execute/search by owner
TGREAD = 0040 # read by group
TGWRITE = 0020 # write by group
TGEXEC = 0010 # execute/search by group
TOREAD = 0004 # read by other
TOWRITE = 0002 # write by other
TOEXEC = 0001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
ENCODING = sys.getfilesystemencoding()
if ENCODING is None:
ENCODING = sys.getdefaultencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length):
"""Convert a python string to a null-terminated string buffer.
"""
return s[:length] + (length - len(s)) * NUL
def nts(s):
"""Convert a null-terminated string field to a python string.
"""
# Use the string up to the first null char.
p = s.find("\0")
if p == -1:
return s
return s[:p]
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0200):
try:
n = int(nts(s) or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0L
for i in xrange(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = "%0*o" % (digits - 1, n) + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = ""
for i in xrange(digits - 1):
s = chr(n & 0377) + s
n >>= 8
s = chr(0200) + s
return s
def uts(s, encoding, errors):
"""Convert a unicode object to a string.
"""
if errors == "utf-8":
# An extra error handler similar to the -o invalid=UTF-8 option
# in POSIX.1-2001. Replace untranslatable characters with their
# UTF-8 representation.
try:
return s.encode(encoding, "strict")
except UnicodeEncodeError:
x = []
for c in s:
try:
x.append(c.encode(encoding, "strict"))
except UnicodeEncodeError:
x.append(c.encode("utf8"))
return "".join(x)
else:
return s.encode(encoding, errors)
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
shutil.copyfileobj(src, dst)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in xrange(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadble tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile:
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream:
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = ""
self.pos = 0L
self.closed = False
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32("") & 0xffffffffL
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = ""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", long(time.time()))
self.__write("\037\213\010\010%s\002\377" % timestamp)
if type(self.name) is unicode:
self.name = self.name.encode("iso-8859-1", "replace")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
self.__write(self.name + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc) & 0xffffffffL
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = ""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffffL))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFFL))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = ""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != "\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != "\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in xrange(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
t = [self.dbuf]
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
t.append(buf)
c += len(buf)
t = "".join(t)
self.dbuf = t[size:]
return t[:size]
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
t = [self.buf]
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
t.append(buf)
c += len(buf)
t = "".join(t)
self.buf = t[size:]
return t[:size]
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith("\037\213\010"):
return "gz"
if self.buf[0:3] == "BZh" and self.buf[4:10] == "1AY&SY":
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = ""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
b = [self.buf]
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
data = self.bz2obj.decompress(raw)
b.append(data)
x += len(data)
self.buf = "".join(b)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, sparse=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.sparse = sparse
self.position = 0
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
if self.sparse is None:
return self.readnormal(size)
else:
return self.readsparse(size)
def readnormal(self, size):
"""Read operation for regular files.
"""
self.fileobj.seek(self.offset + self.position)
self.position += size
return self.fileobj.read(size)
def readsparse(self, size):
"""Read operation for sparse files.
"""
data = []
while size > 0:
buf = self.readsparsesection(size)
if not buf:
break
size -= len(buf)
data.append(buf)
return "".join(data)
def readsparsesection(self, size):
"""Read a single section of a sparse file.
"""
section = self.sparse.find(self.position)
if section is None:
return ""
size = min(size, section.offset + section.size - self.position)
if isinstance(section, _data):
realpos = section.realpos + self.position - section.offset
self.fileobj.seek(self.offset + realpos)
self.position += size
return self.fileobj.read(size)
else:
self.position += size
return NUL * size
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
getattr(tarinfo, "sparse", None))
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = ""
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = ""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = ""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if "\n" in self.buffer:
pos = self.buffer.find("\n") + 1
else:
buffers = [self.buffer]
while True:
buf = self.fileobj.read(self.blocksize)
buffers.append(buf)
if not buf or "\n" in buf:
self.buffer = "".join(buffers)
pos = self.buffer.find("\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = ""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self, encoding, errors):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 07777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
for key in ("name", "linkname", "uname", "gname"):
if type(info[key]) is unicode:
info[key] = info[key].encode(encoding, errors)
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="strict"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info(encoding, errors)
if format == USTAR_FORMAT:
return self.create_ustar_header(info)
elif format == GNU_FORMAT:
return self.create_gnu_header(info)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding, errors)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT)
def create_gnu_header(self, info):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = ""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME)
return buf + self._create_header(info, GNU_FORMAT)
def create_pax_header(self, info, encoding, errors):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
val = info[name].decode(encoding, errors)
# Try to encode the string as ASCII.
try:
val.encode("ascii")
except UnicodeEncodeError:
pax_headers[hname] = val
continue
if len(info[name]) > length:
pax_headers[hname] = val
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = unicode(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers)
else:
buf = ""
return buf + self._create_header(info, USTAR_FORMAT)
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, type=XGLTYPE)
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100),
itn(info.get("mode", 0) & 07777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100),
stn(info.get("magic", POSIX_MAGIC), 8),
stn(info.get("uname", ""), 32),
stn(info.get("gname", ""), 32),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155)
]
buf = struct.pack("%ds" % BLOCKSIZE, "".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + "%06o\0" % chksum + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name += NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type=XHDTYPE):
"""Return a POSIX.1-2001 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be unicode objects.
"""
records = []
for keyword, value in pax_headers.iteritems():
keyword = keyword.encode("utf8")
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records.append("%d %s=%s\n" % (p, keyword, value))
records = "".join(records)
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT) + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf):
"""Construct a TarInfo object from a 512 byte string buffer.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.buf = buf
obj.name = nts(buf[0:100])
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257])
obj.uname = nts(buf[265:297])
obj.gname = nts(buf[297:329])
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500])
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
buf = self.buf
sp = _ringbuffer()
pos = 386
lastpos = 0L
realpos = 0L
# There are 4 possible sparse structs in the
# first header.
for i in xrange(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset > lastpos:
sp.append(_hole(lastpos, offset - lastpos))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = offset + numbytes
pos += 24
isextended = ord(buf[482])
origsize = nti(buf[483:495])
# If the isextended flag is given,
# there are extra headers to process.
while isextended == 1:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in xrange(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset > lastpos:
sp.append(_hole(lastpos, offset - lastpos))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = offset + numbytes
pos += 24
isextended = ord(buf[504])
if lastpos < origsize:
sp.append(_hole(lastpos, origsize - lastpos))
self.sparse = sp
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2001.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(r"(\d+) ([^=]+)=", re.U)
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
keyword = keyword.decode("utf8")
value = value.decode("utf8")
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.iteritems():
if keyword not in PAX_FIELDS:
continue
if keyword == "path":
value = value.rstrip("/")
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
else:
value = uts(value, encoding, errors)
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.type == GNUTYPE_SPARSE
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors=None, pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
if errors is not None:
self.errors = errors
elif mode == "r":
self.errors = "utf-8"
else:
self.errors = "strict"
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError, e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def _getposix(self):
return self.format == USTAR_FORMAT
def _setposix(self, value):
import warnings
warnings.warn("use the format attribute instead", DeprecationWarning,
2)
if value:
self.format = USTAR_FORMAT
else:
self.format = GNU_FORMAT
posix = property(_getposix, _setposix)
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError), e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
t = cls(name, filemode,
_Stream(name, filemode, comptype, fileobj, bufsize),
**kwargs)
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
if fileobj is None:
fileobj = bltn_open(name, mode + "b")
try:
t = cls.taropen(name, mode,
gzip.GzipFile(name, mode, compresslevel, fileobj),
**kwargs)
except IOError:
raise ReadError("not a gzip file")
t._extfileobj = False
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0L
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print filemode(tarinfo.mode),
print "%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid),
if tarinfo.ischr() or tarinfo.isblk():
print "%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)),
else:
print "%10d" % tarinfo.size,
print "%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6],
print tarinfo.name + ("/" if tarinfo.isdir() else ""),
if verbose:
if tarinfo.issym():
print "->", tarinfo.linkname,
if tarinfo.islnk():
print "link to", tarinfo.linkname,
print
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
with bltn_open(name, "rb") as f:
self.addfile(tarinfo, f)
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0700
self.extract(tarinfo, path)
# Reverse sort directories.
directories.sort(key=operator.attrgetter('name'))
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path=""):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'.
"""
self._check("r")
if isinstance(member, basestring):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name))
except EnvironmentError, e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, basestring):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0700)
except EnvironmentError, e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.extractfile(tarinfo)
try:
with bltn_open(targetpath, "wb") as target:
copyfileobj(source, target)
finally:
source.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
if hasattr(os, "symlink") and hasattr(os, "link"):
# For systems that support symbolic and hard links.
if tarinfo.issym():
if os.path.lexists(targetpath):
os.unlink(targetpath)
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
if os.path.lexists(targetpath):
os.unlink(targetpath)
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo), targetpath)
else:
try:
self._extract_member(self._find_link_target(tarinfo), targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError, e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError, e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError, e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError, e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError, e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError, e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError, e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname)))
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print >> sys.stderr, msg
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter:
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def next(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
# Helper classes for sparse file support
class _section:
"""Base class for _data and _hole.
"""
def __init__(self, offset, size):
self.offset = offset
self.size = size
def __contains__(self, offset):
return self.offset <= offset < self.offset + self.size
class _data(_section):
"""Represent a data section in a sparse file.
"""
def __init__(self, offset, size, realpos):
_section.__init__(self, offset, size)
self.realpos = realpos
class _hole(_section):
"""Represent a hole section in a sparse file.
"""
pass
class _ringbuffer(list):
"""Ringbuffer class which increases performance
over a regular list.
"""
def __init__(self):
self.idx = 0
def find(self, offset):
idx = self.idx
while True:
item = self[idx]
if offset in item:
break
idx += 1
if idx == len(self):
idx = 0
if idx == self.idx:
# End of File
return None
self.idx = idx
return item
#---------------------------------------------
# zipfile compatible TarFile class
#---------------------------------------------
TAR_PLAIN = 0 # zipfile.ZIP_STORED
TAR_GZIPPED = 8 # zipfile.ZIP_DEFLATED
class TarFileCompat:
"""TarFile class compatible with standard module zipfile's
ZipFile class.
"""
def __init__(self, file, mode="r", compression=TAR_PLAIN):
from warnings import warnpy3k
warnpy3k("the TarFileCompat class has been removed in Python 3.0",
stacklevel=2)
if compression == TAR_PLAIN:
self.tarfile = TarFile.taropen(file, mode)
elif compression == TAR_GZIPPED:
self.tarfile = TarFile.gzopen(file, mode)
else:
raise ValueError("unknown compression constant")
if mode[0:1] == "r":
members = self.tarfile.getmembers()
for m in members:
m.filename = m.name
m.file_size = m.size
m.date_time = time.gmtime(m.mtime)[:6]
def namelist(self):
return map(lambda m: m.name, self.infolist())
def infolist(self):
return filter(lambda m: m.type in REGULAR_TYPES,
self.tarfile.getmembers())
def printdir(self):
self.tarfile.list()
def testzip(self):
return
def getinfo(self, name):
return self.tarfile.getmember(name)
def read(self, name):
return self.tarfile.extractfile(self.tarfile.getmember(name)).read()
def write(self, filename, arcname=None, compress_type=None):
self.tarfile.add(filename, arcname)
def writestr(self, zinfo, bytes):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import calendar
tinfo = TarInfo(zinfo.filename)
tinfo.size = len(bytes)
tinfo.mtime = calendar.timegm(zinfo.date_time)
self.tarfile.addfile(tinfo, StringIO(bytes))
def close(self):
self.tarfile.close()
#class TarFileCompat
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
| apache-2.0 | -6,498,345,370,909,645,000 | 33.372201 | 103 | 0.535878 | false |
akkana/scripts | astro/marsoppy.py | 1 | 14537 | #!/usr/bin/env python3
# Plot all the planets' orbits, as viewed from a point that
# floats above the Earth's north ecliptic pole and moves with
# the Earth, to demonstrate phenomena like epicycles and the
# Venus pentagram. Idea from Galen Gisler's planetarium show.
#
# Copyright 2020 by Akkana Peck: Share and enjoy under the GPLv2 or later.
# Weird GLib bug: GLib.timeout_add takes an integer number
# of milliseconds, but if you pass it a float, it sets the timeout
# but then doesn't call draw() automatically after configure(),
# resulting in a transparent window since the black background
# doesn't get drawn. I guess it's some mysterious GTK/Cairo bug.
# Of course I could require an integer timeout when parsing arguments,
# but it's such an amusingly weird bug that I've left it as a float.
import ephem
from tkinter import Tk, Canvas, PhotoImage, mainloop, LEFT
import math
import argparse
import sys, os
from datetime import datetime, timezone
ICONDIR = os.path.expanduser("~/Docs/Preso/mars/pix/")
earth = { "name": "Earth", "obj": ephem.Sun(), "color": "#08f",
"path": [], "xypath": [],
"line": None, "disk": None,
"imgname": os.path.join(ICONDIR, "tinyearth.png") }
mars = { "name": "Mars", "obj": ephem.Mars(), "color": "#f80",
"path": [], "xypath": [], "oppositions": [],
"line": None, "disk": None,
"imgname": os.path.join(ICONDIR, "tinymars.png") }
# oppositions will include date, earth hlon, earth dist, mars hlon, mars dist
oppositions = []
table_header = "%-20s %10s %10s" % ("Date", "Distance", "Size")
table_format = "%-20s %10.3f %10.2f"
def find_next_opposition(start_time):
"""Find oppsition and time of closest approach for the given time range.
Input is the start time, either in ephem.Date or float julian.
Output is two ephem.Dates: opposition, closest approach
"""
t = start_time
timedelta = ephem.hour * 6
mars = ephem.Mars()
sun = ephem.Sun()
min_dist = 20
oppy_date = None
closest_date = None
last_dlon = None
# Loop til we've found opposition, plus 15 days.
# Opposition is when dlon changes sign and is very small.
while not oppy_date or t - oppy_date < 15:
mars.compute(t)
sun.compute(t)
dlon = mars.hlon - sun.hlon
# Does dlon have the opposite sign from last_dlon?
if last_dlon and abs(dlon) < .1 and \
(dlon == 0 or (dlon < 0) != (last_dlon < 0)):
oppy_date = t
if mars.earth_distance < min_dist:
closest_date = t
min_dist = mars.earth_distance
if oppy_date and closest_date:
return ephem.Date(oppy_date), ephem.Date(closest_date)
last_dlon = dlon
t += timedelta
class OrbitViewWindow():
def __init__(self, auscale, timestep, time_increment=1,
start_time=None, stopped=False):
"""time_increment is in days.
start_time is anything that can be turned into a ephem.Date object.
"""
self.auscale = auscale
self.timestep = timestep
if stopped:
self.stepping = False
else:
self.stepping = True
if start_time:
self.time = ephem.Date(start_time)
else:
# Default to starting 30 days before present
self.time = ephem.Date(datetime.now(tz=timezone.utc)) \
- ephem.hour * 24 * 30
print("Start time:", ephem.Date(self.time))
self.opp_date, self.closest_date = find_next_opposition(self.time)
# print("Next opposition:", self.opp_date)
# print("Next closest:", self.closest_date)
self.time_increment = ephem.hour * time_increment * 24
self.linewidth = 3
self.width = 1024
self.height = 768
self.halfwidth = self.width/2.
self.halfheight = self.height/2.
self.dist_scale = self.halfheight / self.auscale
tkmaster = Tk()
tkmaster.title("Mars Oppositions")
self.canvas = Canvas(tkmaster, bg="black",
width=self.width, height=self.height)
# Start with just the Sun
try:
self.sunimg = PhotoImage(file=os.path.join(ICONDIR, "tinysun.png"))
self.canvas.create_image(self.width/2, self.height/2,
image=self.sunimg)
except:
sunrad = 20
self.canvas.create_oval(self.width/2 - sunrad,
self.height/2 - sunrad,
self.width/2 + sunrad,
self.height/2 + sunrad,
fill="yellow")
self.canvas.pack()
# Canvas requires that the app save a reference to PhotoImages:
# the canvas doesn't keep the references.
try:
earth["tinyimg"] = PhotoImage(file=earth["imgname"])
except:
earth["tinyimg"] = None
try:
mars["tinyimg"] = PhotoImage(file=mars["imgname"])
except:
mars["tinyimg"] = None
tkmaster.bind("<KeyPress-q>", sys.exit)
tkmaster.bind("<KeyPress-space>", self.toggle_stepping)
print(table_header)
# Schedule the first draw
self.step_draw()
def toggle_stepping(self, key):
self.stepping = not self.stepping
def step_draw(self):
"""Calculate and draw the next position of each planet.
"""
# If we don't call step_draw at all, we'll never get further key events
# that could restart the animation. So just step at a much slower pace.
if not self.stepping:
self.canvas.after(500, self.step_draw)
return
# Adding a float to ephem.Date turns it into a float.
# You can get back an ephem.Date with: ephem.Date(self.time).
self.time += self.time_increment
for p in (earth, mars):
p["obj"].compute(self.time)
# ephem treats Earth specially, what a hassle!
# There is no ephem.Earth body; ephem.Sun gives the Earth's
# hlon as hlon, but I guess we need to use earth_distance.
oppy = False
if p["name"] == "Earth":
hlon = p["obj"].hlon
sundist = p["obj"].earth_distance
earthdist = 0
size = 0
else:
hlon = p["obj"].hlon
sundist = p["obj"].sun_distance
earthdist = p["obj"].earth_distance
size = p["obj"].size
if abs(self.time - self.opp_date) <= .5:
oppy = True
if self.opp_date < self.closest_date:
print(table_format % (self.opp_date, earthdist, size),
"Opposition")
print(table_format % (self.closest_date,
earthdist, size),
"Closest approach")
else:
print(table_format % (self.closest_date,
earthdist, size),
"Closest approach")
print(table_format % (self.opp_date, earthdist, size),
"Opposition")
xn, yn = self.planet_x_y(hlon, sundist)
radius = 10
if oppy:
# Create outline circles for Mars and Earth at opposition.
# xn, yn should be Mars since Earth was done first.
# Create the open circle at the bottom of the stacking order.
# There may be a way to do this by passing in tags,
# but I can't find any documentation on tags.
self.canvas.tag_lower(
self.canvas.create_oval(xn-radius, yn-radius,
xn+radius, yn+radius,
outline=p["color"], width=3)
)
earthx = earth["xypath"][-2]
earthy = earth["xypath"][-1]
self.canvas.tag_lower(
self.canvas.create_oval(earthx-radius, earthy-radius,
earthx+radius, earthy+radius,
outline=earth["color"], width=3)
)
localtz = datetime.now().astimezone().tzinfo
oppdate = ephem.to_timezone(self.opp_date, localtz)
opp_str = oppdate.strftime("%Y-%m-%d") + \
'\n%.3f AU\n%.1f"' % (earthdist, size)
if xn < self.width/2:
if yn < self.height / 2:
anchor = "se"
else:
anchor = "ne"
xtxt = xn - radius
else:
if yn < self.height / 2:
anchor = "sw"
else:
anchor = "nw"
xtxt = xn + radius
ytxt = yn
txtobj = self.canvas.create_text(xtxt, ytxt,
fill="white", justify=LEFT,
font=('sans', 14, 'bold'),
anchor=anchor,
text=opp_str)
# Make sure it's not offscreen
xt1, yt1, xt2, yt2 = self.canvas.bbox(txtobj)
if xt1 < 0:
xtxt -= xt1
elif xt2 > self.width:
xtxt -= (xt2 - self.width)
if yt1 < 0:
ytxt -= yt1
elif yt2 > self.height:
ytxt -= yt2 - self.height
self.canvas.coords(txtobj, xtxt, ytxt)
# Done with this opposition: find the next one.
self.opp_date, self.closest_date \
= find_next_opposition(self.time + 500)
p["xypath"].append(int(xn))
p["xypath"].append(int(yn))
if p["line"]:
self.canvas.coords(p["line"], p["xypath"])
if p["tinyimg"]:
self.canvas.coords(p["disk"], xn, yn)
else:
self.canvas.coords(p["disk"], xn-radius, yn-radius,
xn+radius, yn+radius)
else:
p["line"] = self.canvas.create_line(xn, yn, xn, yn,
width=self.linewidth,
fill=p["color"])
# Use images if there are any, else circles
if p["tinyimg"]:
p["disk"] = self.canvas.create_image(xn-radius, yn-radius,
image=p["tinyimg"])
else:
p["disk"] = self.canvas.create_oval(xn-radius, yn-radius,
xn+radius, yn+radius,
fill=p["color"])
p["path"].append((hlon, sundist, earthdist, size))
if self.stepping:
self.canvas.after(self.timestep, self.step_draw)
def planet_x_y(self, hlon, dist):
return (dist * self.dist_scale * math.cos(hlon) + self.halfwidth,
dist * self.dist_scale * math.sin(hlon) + self.halfheight)
def print_table():
"""Super quickie hack to print out a table for the current opposition.
Unrelated to any of the other code in this script.
Ought to be generalized to take start and stop times, etc.
"""
start_date = ephem.Date(datetime.now()) - ephem.hour*24*10
end_date = start_date + ephem.hour*24*40
opp_date, closest_date = find_next_opposition(start_date)
print("Opposition:", opp_date)
print("Closest approach:", closest_date)
# Define "opposition season"
d = opp_date - ephem.hour * 24 * 15
end_date = opp_date + ephem.hour * 24 * 20
mars = ephem.Mars()
print(table_header)
while d < start_date + 60:
mars.compute(d)
d += ephem.hour * 24
if abs(d - opp_date) <= .5:
print(table_format % (ephem.Date(d), mars.earth_distance,
mars.size), "** OPPOSITION")
elif abs(d - closest_date) <= .5:
print(table_format % (ephem.Date(d), mars.earth_distance,
mars.size), "** CLOSEST APPROACH")
else:
print(table_format % (ephem.Date(d), mars.earth_distance,
mars.size))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Draw planet orbits from the north ecliptic pole.
Key bindings:
space Start/stop animation
q quit""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-a', "--au", dest="auscale", type=float, default=1.7,
action="store",
help="""Scale of the window in astronomical units.""")
parser.add_argument("-s", "--start", dest="start", default=None,
help="Start date, YYYY-MM-DD, "
"default: 30 days beforetoday")
parser.add_argument('-t', "--timestep", dest="timestep",
type=int, default=30,
help="""Time step in milliseconds (default 30).
Controls how fast the orbits are drawn.""")
parser.add_argument('-S', "--stopped", dest="stopped", action="store_true",
help="Bring up the window but don't immediately "
"start animating: wait for the spacebar.")
parser.add_argument('-T', "--table", dest="table", action="store_true",
help="Forget all that graphic stuff and just "
"print a table of sizes around opposition")
args = parser.parse_args(sys.argv[1:])
if args.start:
args.start = datetime.strptime(args.start, "%Y-%m-%d")
if args.table:
print_table()
sys.exit(0)
win = OrbitViewWindow(auscale=args.auscale, start_time=args.start,
timestep=args.timestep, stopped=args.stopped)
mainloop()
| gpl-2.0 | -640,194,651,697,535,100 | 38.077957 | 79 | 0.508564 | false |
bunnyinc/django-oidc-provider | oidc_provider/tests/test_utils.py | 1 | 2459 | import time
from datetime import datetime
from django.test import TestCase
from django.utils import timezone
from oidc_provider.lib.utils.common import get_issuer
from oidc_provider.lib.utils.token import create_id_token
from oidc_provider.tests.app.utils import create_fake_user
class Request(object):
"""
Mock request object.
"""
scheme = 'http'
def get_host(self):
return 'host-from-request:8888'
class CommonTest(TestCase):
"""
Test cases for common utils.
"""
def test_get_issuer(self):
request = Request()
# from default settings
self.assertEqual(get_issuer(),
'http://localhost:8000/openid')
# from custom settings
with self.settings(SITE_URL='http://otherhost:8000'):
self.assertEqual(get_issuer(),
'http://otherhost:8000/openid')
# `SITE_URL` not set, from `request`
with self.settings(SITE_URL=''):
self.assertEqual(get_issuer(request=request),
'http://host-from-request:8888/openid')
# use settings first if both are provided
self.assertEqual(get_issuer(request=request),
'http://localhost:8000/openid')
# `site_url` can even be overridden manually
self.assertEqual(get_issuer(site_url='http://127.0.0.1:9000',
request=request),
'http://127.0.0.1:9000/openid')
def timestamp_to_datetime(timestamp):
tz = timezone.get_current_timezone()
return datetime.fromtimestamp(timestamp, tz=tz)
class TokenTest(TestCase):
def setUp(self):
self.user = create_fake_user()
def test_create_id_token(self):
start_time = int(time.time())
login_timestamp = start_time - 1234
self.user.last_login = timestamp_to_datetime(login_timestamp)
id_token_data = create_id_token(self.user, aud='test-aud')
iat = id_token_data['iat']
self.assertEqual(type(iat), int)
self.assertGreaterEqual(iat, start_time)
self.assertLessEqual(iat - start_time, 5) # Can't take more than 5 s
self.assertEqual(id_token_data, {
'aud': 'test-aud',
'auth_time': login_timestamp,
'exp': iat + 600,
'iat': iat,
'iss': 'http://localhost:8000/openid',
'sub': str(self.user.id),
})
| mit | 8,580,068,148,021,425,000 | 30.525641 | 77 | 0.589671 | false |
mrry/tensorflow | tensorflow/contrib/learn/python/learn/basic_session_run_hooks.py | 3 | 14433 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some common SessionRunHook classes.
@@
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import six
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.session_run_hook import SessionRunArgs
from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
class LoggingTensorHook(session_run_hook.SessionRunHook):
"""Prints given tensors every N iteration.
The tensors will be printed to the log, with `INFO` severity.
"""
def __init__(self, tensors, every_n_iter=100):
"""Initializes a LoggingHook monitor.
Args:
tensors: `dict` of tag to tensors/names or
`iterable` of tensors/names.
every_n_iter: `int`, print every N iteration.
Raises:
ValueError: if `every_n_iter` is non-positive.
"""
if every_n_iter <= 0:
raise ValueError("Invalid every_n_iter=%s." % every_n_iter)
if not isinstance(tensors, dict):
tensors = {item: item for item in tensors}
self._tensors = tensors
self._every_n_iter = every_n_iter
def begin(self):
self._iter_count = 0
# Convert names to tensors if given
self._current_tensors = {tag: _as_graph_element(tensor)
for (tag, tensor) in self._tensors.items()}
def before_run(self, run_context): # pylint: disable=unused-argument
if self._iter_count % self._every_n_iter == 0:
return SessionRunArgs(self._current_tensors)
else:
return None
def after_run(self, run_context, run_values):
_ = run_context
if self._iter_count % self._every_n_iter == 0:
stats = []
for tag in sorted(self._current_tensors.keys()):
stats.append("%s = %s" % (tag, run_values.results[tag]))
logging.info("%s", ", ".join(stats))
self._iter_count += 1
class StopAtStepHook(session_run_hook.SessionRunHook):
"""Monitor to request stop at a specified step."""
def __init__(self, num_steps=None, last_step=None):
"""Create a StopAtStep Hook.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
def begin(self):
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results
if self._last_step is None:
self._last_step = global_step + self._num_steps - 1
if global_step >= self._last_step:
run_context.request_stop()
class CheckpointSaverHook(session_run_hook.SessionRunHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None):
"""Initialize CheckpointSaverHook monitor.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
"""
logging.info("Create CheckpointSaverHook.")
self._saver = saver
self._checkpoint_dir = checkpoint_dir
self._summary_writer = SummaryWriterCache.get(checkpoint_dir)
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._save_secs = save_secs
self._save_steps = save_steps
self._last_saved_time = None
self._last_saved_step = None
if save_steps is None and save_secs is None:
raise ValueError("Either save_steps or save_secs should be provided")
if (save_steps is not None) and (save_secs is not None):
raise ValueError("Can not provide both save_steps and save_secs.")
def begin(self):
self._last_saved_time = None
self._last_saved_step = None
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
if self._last_saved_time is None:
# Write graph in the first call.
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir,
"graph.pbtxt")
self._summary_writer.add_graph(ops.get_default_graph())
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results
if self._last_saved_time is None:
self._save(global_step, run_context.session)
if self._save_steps is not None:
if global_step >= self._last_saved_step + self._save_steps:
self._save(global_step, run_context.session)
if self._save_secs is not None:
if time.time() >= self._last_saved_time + self._save_secs:
self._save(global_step, run_context.session)
def end(self, session):
last_step = session.run(contrib_variables.get_global_step())
self._save(last_step, session)
def _save(self, step, session):
"""Saves the latest checkpoint."""
if step == self._last_saved_step:
return
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
self._last_saved_time = time.time()
self._last_saved_step = step
if self._saver is None:
self._scaffold.saver.save(session, self._save_path, global_step=step)
else:
self._saver.save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
class StepCounterHook(session_run_hook.SessionRunHook):
"""Steps per second monitor."""
def __init__(self, every_n_steps=100, output_dir=None, summary_writer=None):
self._summary_tag = "global_step/sec"
self._every_n_steps = every_n_steps
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
def begin(self):
self._last_reported_time = None
self._last_reported_step = None
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use StepCounterHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
global_step = run_values.results
current_time = time.time()
if self._last_reported_time is None:
self._last_reported_step = global_step
self._last_reported_time = current_time
else:
if global_step >= self._every_n_steps + self._last_reported_step:
added_steps = global_step - self._last_reported_step
elapsed_time = current_time - self._last_reported_time
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(
tag=self._summary_tag, simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, global_step)
self._last_reported_step = global_step
self._last_reported_time = current_time
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
class NanTensorHook(session_run_hook.SessionRunHook):
"""NaN Loss monitor.
Monitors loss and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, fail_on_nan_loss=True):
"""Initializes NanLoss monitor.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
if np.isnan(run_values.results):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we request stop without an exception.
run_context.request_stop()
class SummarySaverHook(session_run_hook.SessionRunHook):
"""Saves summaries every N steps."""
def __init__(self,
save_steps=100,
output_dir=None,
summary_writer=None,
scaffold=None,
summary_op=None):
"""Initializes a `SummarySaver` monitor.
Args:
save_steps: `int`, save summaries every N steps. See `EveryN`.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
buffer, as output by TF summary methods like `scalar_summary` or
`merge_all_summaries`.
"""
# TODO(ipolosukhin): Implement every N seconds.
self._summary_op = summary_op
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
self._scaffold = scaffold
self._save_steps = save_steps
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def begin(self):
self._last_saved_step = None
self._request_summary = True
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use SummarySaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._summary_op is not None:
requests["summary"] = self._summary_op
elif self._scaffold.summary_op is not None:
requests["summary"] = self._scaffold.summary_op
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
global_step = run_values.results["global_step"]
if self._last_saved_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if self._request_summary:
self._last_saved_step = global_step
if "summary" in run_values.results:
self._summary_writer.add_summary(run_values.results["summary"],
global_step)
self._request_summary = (
global_step >= self._last_saved_step + self._save_steps - 1)
def end(self, session=None):
if self._summary_writer:
self._summary_writer.flush()
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
| apache-2.0 | -902,728,234,952,753,400 | 35.0825 | 89 | 0.663064 | false |
coderbone/SickRage | lib/tvdb_api/tvdb_api.py | 12 | 35715 | # !/usr/bin/env python2
# encoding:utf-8
# author:dbr/Ben
#project:tvdb_api
#repository:http://github.com/dbr/tvdb_api
#license:unlicense (http://unlicense.org/)
from functools import wraps
import traceback
__author__ = "dbr/Ben"
__version__ = "1.9"
import os
import re
import time
import getpass
import StringIO
import tempfile
import warnings
import logging
import zipfile
import datetime as dt
import requests
import xmltodict
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import xml.etree.ElementTree as ElementTree
try:
import gzip
except ImportError:
gzip = None
from dateutil.parser import parse
from cachecontrol import CacheControl, caches
from tvdb_ui import BaseUI, ConsoleUI
from tvdb_exceptions import (tvdb_error, tvdb_userabort, tvdb_shownotfound, tvdb_showincomplete,
tvdb_seasonnotfound, tvdb_episodenotfound, tvdb_attributenotfound)
def log():
return logging.getLogger("tvdb_api")
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck, e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print msg
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
class ShowContainer(dict):
"""Simple dict that holds a series of Show instances
"""
def __init__(self):
self._stack = []
self._lastgc = time.time()
def __setitem__(self, key, value):
self._stack.append(key)
#keep only the 100th latest results
if time.time() - self._lastgc > 20:
for o in self._stack[:-100]:
del self[o]
self._stack = self._stack[-100:]
self._lastgc = time.time()
super(ShowContainer, self).__setitem__(key, value)
class Show(dict):
"""Holds a dict of seasons, and show data.
"""
def __init__(self):
dict.__init__(self)
self.data = {}
def __repr__(self):
return "<Show %s (containing %s seasons)>" % (
self.data.get(u'seriesname', 'instance'),
len(self)
)
def __getattr__(self, key):
if key in self:
# Key is an episode, return it
return self[key]
if key in self.data:
# Non-numeric request is for show-data
return self.data[key]
raise AttributeError
def __getitem__(self, key):
if key in self:
# Key is an episode, return it
return dict.__getitem__(self, key)
if key in self.data:
# Non-numeric request is for show-data
return dict.__getitem__(self.data, key)
# Data wasn't found, raise appropriate error
if isinstance(key, int) or key.isdigit():
# Episode number x was not found
raise tvdb_seasonnotfound("Could not find season %s" % (repr(key)))
else:
# If it's not numeric, it must be an attribute name, which
# doesn't exist, so attribute error.
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
def airedOn(self, date):
ret = self.search(str(date), 'firstaired')
if len(ret) == 0:
raise tvdb_episodenotfound("Could not find any episodes that aired on %s" % date)
return ret
def search(self, term=None, key=None):
"""
Search all episodes in show. Can search all data, or a specific key (for
example, episodename)
Always returns an array (can be empty). First index contains the first
match, and so on.
Each array index is an Episode() instance, so doing
search_results[0]['episodename'] will retrieve the episode name of the
first match.
Search terms are converted to lower case (unicode) strings.
# Examples
These examples assume t is an instance of Tvdb():
>>> t = Tvdb()
>>>
To search for all episodes of Scrubs with a bit of data
containing "my first day":
>>> t['Scrubs'].search("my first day")
[<Episode 01x01 - My First Day>]
>>>
Search for "My Name Is Earl" episode named "Faked His Own Death":
>>> t['My Name Is Earl'].search('Faked His Own Death', key = 'episodename')
[<Episode 01x04 - Faked His Own Death>]
>>>
To search Scrubs for all episodes with "mentor" in the episode name:
>>> t['scrubs'].search('mentor', key = 'episodename')
[<Episode 01x02 - My Mentor>, <Episode 03x15 - My Tormented Mentor>]
>>>
# Using search results
>>> results = t['Scrubs'].search("my first")
>>> print results[0]['episodename']
My First Day
>>> for x in results: print x['episodename']
My First Day
My First Step
My First Kill
>>>
"""
results = []
for cur_season in self.values():
searchresult = cur_season.search(term=term, key=key)
if len(searchresult) != 0:
results.extend(searchresult)
return results
class Season(dict):
def __init__(self, show=None):
"""The show attribute points to the parent show
"""
self.show = show
def __repr__(self):
return "<Season instance (containing %s episodes)>" % (
len(self.keys())
)
def __getattr__(self, episode_number):
if episode_number in self:
return self[episode_number]
raise AttributeError
def __getitem__(self, episode_number):
if episode_number not in self:
raise tvdb_episodenotfound("Could not find episode %s" % (repr(episode_number)))
else:
return dict.__getitem__(self, episode_number)
def search(self, term=None, key=None):
"""Search all episodes in season, returns a list of matching Episode
instances.
>>> t = Tvdb()
>>> t['scrubs'][1].search('first day')
[<Episode 01x01 - My First Day>]
>>>
See Show.search documentation for further information on search
"""
results = []
for ep in self.values():
searchresult = ep.search(term=term, key=key)
if searchresult is not None:
results.append(
searchresult
)
return results
class Episode(dict):
def __init__(self, season=None):
"""The season attribute points to the parent season
"""
self.season = season
def __repr__(self):
seasno = int(self.get(u'seasonnumber', 0))
epno = int(self.get(u'episodenumber', 0))
epname = self.get(u'episodename')
if epname is not None:
return "<Episode %02dx%02d - %s>" % (seasno, epno, epname)
else:
return "<Episode %02dx%02d>" % (seasno, epno)
def __getattr__(self, key):
if key in self:
return self[key]
raise AttributeError
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
def search(self, term=None, key=None):
"""Search episode data for term, if it matches, return the Episode (self).
The key parameter can be used to limit the search to a specific element,
for example, episodename.
This primarily for use use by Show.search and Season.search. See
Show.search for further information on search
Simple example:
>>> e = Episode()
>>> e['episodename'] = "An Example"
>>> e.search("examp")
<Episode 00x00 - An Example>
>>>
Limiting by key:
>>> e.search("examp", key = "episodename")
<Episode 00x00 - An Example>
>>>
"""
if term == None:
raise TypeError("must supply string to search for (contents)")
term = unicode(term).lower()
for cur_key, cur_value in self.items():
cur_key, cur_value = unicode(cur_key).lower(), unicode(cur_value).lower()
if key is not None and cur_key != key:
# Do not search this key
continue
if cur_value.find(unicode(term).lower()) > -1:
return self
class Actors(list):
"""Holds all Actor instances for a show
"""
pass
class Actor(dict):
"""Represents a single actor. Should contain..
id,
image,
name,
role,
sortorder
"""
def __repr__(self):
return "<Actor \"%s\">" % (self.get("name"))
class Tvdb:
"""Create easy-to-use interface to name of season/episode name
>>> t = Tvdb()
>>> t['Scrubs'][1][24]['episodename']
u'My Last Day'
"""
def __init__(self,
interactive=False,
select_first=False,
debug=False,
cache=True,
banners=False,
actors=False,
custom_ui=None,
language=None,
search_all_languages=False,
apikey=None,
forceConnect=False,
useZip=False,
dvdorder=False,
proxy=None):
"""interactive (True/False):
When True, uses built-in console UI is used to select the correct show.
When False, the first search result is used.
select_first (True/False):
Automatically selects the first series search result (rather
than showing the user a list of more than one series).
Is overridden by interactive = False, or specifying a custom_ui
debug (True/False) DEPRECATED:
Replaced with proper use of logging module. To show debug messages:
>>> import logging
>>> logging.basicConfig(level = logging.DEBUG)
cache (True/False/str/unicode/urllib2 opener):
Retrieved XML are persisted to to disc. If true, stores in
tvdb_api folder under your systems TEMP_DIR, if set to
str/unicode instance it will use this as the cache
location. If False, disables caching. Can also be passed
an arbitrary Python object, which is used as a urllib2
opener, which should be created by urllib2.build_opener
banners (True/False):
Retrieves the banners for a show. These are accessed
via the _banners key of a Show(), for example:
>>> Tvdb(banners=True)['scrubs']['_banners'].keys()
['fanart', 'poster', 'series', 'season']
actors (True/False):
Retrieves a list of the actors for a show. These are accessed
via the _actors key of a Show(), for example:
>>> t = Tvdb(actors=True)
>>> t['scrubs']['_actors'][0]['name']
u'Zach Braff'
custom_ui (tvdb_ui.BaseUI subclass):
A callable subclass of tvdb_ui.BaseUI (overrides interactive option)
language (2 character language abbreviation):
The language of the returned data. Is also the language search
uses. Default is "en" (English). For full list, run..
>>> Tvdb().config['valid_languages'] #doctest: +ELLIPSIS
['da', 'fi', 'nl', ...]
search_all_languages (True/False):
By default, Tvdb will only search in the language specified using
the language option. When this is True, it will search for the
show in and language
apikey (str/unicode):
Override the default thetvdb.com API key. By default it will use
tvdb_api's own key (fine for small scripts), but you can use your
own key if desired - this is recommended if you are embedding
tvdb_api in a larger application)
See http://thetvdb.com/?tab=apiregister to get your own key
forceConnect (bool):
If true it will always try to connect to theTVDB.com even if we
recently timed out. By default it will wait one minute before
trying again, and any requests within that one minute window will
return an exception immediately.
useZip (bool):
Download the zip archive where possibale, instead of the xml.
This is only used when all episodes are pulled.
And only the main language xml is used, the actor and banner xml are lost.
"""
self.shows = ShowContainer() # Holds all Show classes
self.corrections = {} # Holds show-name to show_id mapping
self.config = {}
if apikey is not None:
self.config['apikey'] = apikey
else:
self.config['apikey'] = "0629B785CE550C8D" # tvdb_api's API key
self.config['debug_enabled'] = debug # show debugging messages
self.config['custom_ui'] = custom_ui
self.config['interactive'] = interactive # prompt for correct series?
self.config['select_first'] = select_first
self.config['search_all_languages'] = search_all_languages
self.config['useZip'] = useZip
self.config['dvdorder'] = dvdorder
self.config['proxy'] = proxy
if cache is True:
self.config['cache_enabled'] = True
self.config['cache_location'] = self._getTempDir()
elif cache is False:
self.config['cache_enabled'] = False
elif isinstance(cache, basestring):
self.config['cache_enabled'] = True
self.config['cache_location'] = cache
else:
raise ValueError("Invalid value for Cache %r (type was %s)" % (cache, type(cache)))
self.config['session'] = requests.Session()
self.config['banners_enabled'] = banners
self.config['actors_enabled'] = actors
if self.config['debug_enabled']:
warnings.warn("The debug argument to tvdb_api.__init__ will be removed in the next version. "
"To enable debug messages, use the following code before importing: "
"import logging; logging.basicConfig(level=logging.DEBUG)")
logging.basicConfig(level=logging.DEBUG)
# List of language from http://thetvdb.com/api/0629B785CE550C8D/languages.xml
# Hard-coded here as it is realtively static, and saves another HTTP request, as
# recommended on http://thetvdb.com/wiki/index.php/API:languages.xml
self.config['valid_languages'] = [
"da", "fi", "nl", "de", "it", "es", "fr", "pl", "hu", "el", "tr",
"ru", "he", "ja", "pt", "zh", "cs", "sl", "hr", "ko", "en", "sv", "no"
]
# thetvdb.com should be based around numeric language codes,
# but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16
# requires the language ID, thus this mapping is required (mainly
# for usage in tvdb_ui - internally tvdb_api will use the language abbreviations)
self.config['langabbv_to_id'] = {'el': 20, 'en': 7, 'zh': 27,
'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9,
'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11,
'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30}
if language is None:
self.config['language'] = 'en'
else:
if language not in self.config['valid_languages']:
raise ValueError("Invalid language %s, options are: %s" % (
language, self.config['valid_languages']
))
else:
self.config['language'] = language
# The following url_ configs are based of the
# http://thetvdb.com/wiki/index.php/Programmers_API
self.config['base_url'] = "http://thetvdb.com"
if self.config['search_all_languages']:
self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php" % self.config
self.config['params_getSeries'] = {"seriesname": "", "language": "all"}
else:
self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php" % self.config
self.config['params_getSeries'] = {"seriesname": "", "language": self.config['language']}
self.config['url_epInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.xml" % self.config
self.config['url_epInfo_zip'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.zip" % self.config
self.config['url_seriesInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/%%s.xml" % self.config
self.config['url_actorsInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/actors.xml" % self.config
self.config['url_seriesBanner'] = u"%(base_url)s/api/%(apikey)s/series/%%s/banners.xml" % self.config
self.config['url_artworkPrefix'] = u"%(base_url)s/banners/%%s" % self.config
self.config['url_updates_all'] = u"%(base_url)s/api/%(apikey)s/updates_all.zip" % self.config
self.config['url_updates_month'] = u"%(base_url)s/api/%(apikey)s/updates_month.zip" % self.config
self.config['url_updates_week'] = u"%(base_url)s/api/%(apikey)s/updates_week.zip" % self.config
self.config['url_updates_day'] = u"%(base_url)s/api/%(apikey)s/updates_day.zip" % self.config
def _getTempDir(self):
"""Returns the [system temp dir]/tvdb_api-u501 (or
tvdb_api-myuser)
"""
if hasattr(os, 'getuid'):
uid = "u%d" % (os.getuid())
else:
# For Windows
try:
uid = getpass.getuser()
except ImportError:
return os.path.join(tempfile.gettempdir(), "tvdb_api")
return os.path.join(tempfile.gettempdir(), "tvdb_api-%s" % (uid))
@retry(tvdb_error)
def _loadUrl(self, url, params=None, language=None):
try:
log().debug("Retrieving URL %s" % url)
# get response from TVDB
if self.config['cache_enabled']:
session = CacheControl(sess=self.config['session'], cache=caches.FileCache(self.config['cache_location'], use_dir_lock=True), cache_etags=False)
if self.config['proxy']:
log().debug("Using proxy for URL: %s" % url)
session.proxies = {
"http": self.config['proxy'],
"https": self.config['proxy'],
}
resp = session.get(url.strip(), params=params)
else:
resp = requests.get(url.strip(), params=params)
resp.raise_for_status()
except requests.exceptions.HTTPError, e:
raise tvdb_error("HTTP error " + str(e.errno) + " while loading URL " + str(url))
except requests.exceptions.ConnectionError, e:
raise tvdb_error("Connection error " + str(e.message) + " while loading URL " + str(url))
except requests.exceptions.Timeout, e:
raise tvdb_error("Connection timed out " + str(e.message) + " while loading URL " + str(url))
except Exception as e:
raise tvdb_error("Unknown exception while loading URL " + url + ": " + repr(e))
def process(path, key, value):
key = key.lower()
# clean up value and do type changes
if value:
try:
if key == 'firstaired' and value in "0000-00-00":
new_value = str(dt.date.fromordinal(1))
new_value = re.sub("([-]0{2}){1,}", "", new_value)
fixDate = parse(new_value, fuzzy=True).date()
value = fixDate.strftime("%Y-%m-%d")
elif key == 'firstaired':
value = parse(value, fuzzy=True).date()
value = value.strftime("%Y-%m-%d")
#if key == 'airs_time':
# value = parse(value).time()
# value = value.strftime("%I:%M %p")
except:
pass
return key, value
if 'application/zip' in resp.headers.get("Content-Type", ''):
try:
log().debug("We recived a zip file unpacking now ...")
zipdata = StringIO.StringIO()
zipdata.write(resp.content)
myzipfile = zipfile.ZipFile(zipdata)
return xmltodict.parse(myzipfile.read('%s.xml' % language), postprocessor=process)
except zipfile.BadZipfile:
raise tvdb_error("Bad zip file received from thetvdb.com, could not read it")
else:
try:
return xmltodict.parse(resp.content.decode('utf-8'), postprocessor=process)
except:
return dict([(u'data', None)])
def _getetsrc(self, url, params=None, language=None):
"""Loads a URL using caching, returns an ElementTree of the source
"""
try:
return self._loadUrl(url, params=params, language=language).values()[0]
except Exception, e:
raise tvdb_error(e)
def _setItem(self, sid, seas, ep, attrib, value):
"""Creates a new episode, creating Show(), Season() and
Episode()s as required. Called by _getShowData to populate show
Since the nice-to-use tvdb[1][24]['name] interface
makes it impossible to do tvdb[1][24]['name] = "name"
and still be capable of checking if an episode exists
so we can raise tvdb_shownotfound, we have a slightly
less pretty method of setting items.. but since the API
is supposed to be read-only, this is the best way to
do it!
The problem is that calling tvdb[1][24]['episodename'] = "name"
calls __getitem__ on tvdb[1], there is no way to check if
tvdb.__dict__ should have a key "1" before we auto-create it
"""
if sid not in self.shows:
self.shows[sid] = Show()
if seas not in self.shows[sid]:
self.shows[sid][seas] = Season(show=self.shows[sid])
if ep not in self.shows[sid][seas]:
self.shows[sid][seas][ep] = Episode(season=self.shows[sid][seas])
self.shows[sid][seas][ep][attrib] = value
def _setShowData(self, sid, key, value):
"""Sets self.shows[sid] to a new Show instance, or sets the data
"""
if sid not in self.shows:
self.shows[sid] = Show()
self.shows[sid].data[key] = value
def _cleanData(self, data):
"""Cleans up strings returned by TheTVDB.com
Issues corrected:
- Replaces & with &
- Trailing whitespace
"""
data = unicode(data).replace(u"&", u"&")
data = data.strip()
return data
def search(self, series):
"""This searches TheTVDB.com for the series name
and returns the result list
"""
series = series.encode("utf-8")
log().debug("Searching for show %s" % series)
self.config['params_getSeries']['seriesname'] = series
results = self._getetsrc(self.config['url_getSeries'], self.config['params_getSeries'])
if not results:
return
return results.values()[0]
def _getSeries(self, series):
"""This searches TheTVDB.com for the series name,
If a custom_ui UI is configured, it uses this to select the correct
series. If not, and interactive == True, ConsoleUI is used, if not
BaseUI is used to select the first result.
"""
allSeries = self.search(series)
if not allSeries:
log().debug('Series result returned zero')
raise tvdb_shownotfound("Show search returned zero results (cannot find show on TVDB)")
if not isinstance(allSeries, list):
allSeries = [allSeries]
if self.config['custom_ui'] is not None:
log().debug("Using custom UI %s" % (repr(self.config['custom_ui'])))
CustomUI = self.config['custom_ui']
ui = CustomUI(config=self.config)
else:
if not self.config['interactive']:
log().debug('Auto-selecting first search result using BaseUI')
ui = BaseUI(config=self.config)
else:
log().debug('Interactively selecting show using ConsoleUI')
ui = ConsoleUI(config=self.config)
return ui.selectSeries(allSeries)
def _parseBanners(self, sid):
"""Parses banners XML, from
http://thetvdb.com/api/[APIKEY]/series/[SERIES ID]/banners.xml
Banners are retrieved using t['show name]['_banners'], for example:
>>> t = Tvdb(banners = True)
>>> t['scrubs']['_banners'].keys()
['fanart', 'poster', 'series', 'season']
>>> t['scrubs']['_banners']['poster']['680x1000']['35308']['_bannerpath']
u'http://thetvdb.com/banners/posters/76156-2.jpg'
>>>
Any key starting with an underscore has been processed (not the raw
data from the XML)
This interface will be improved in future versions.
"""
log().debug('Getting season banners for %s' % (sid))
bannersEt = self._getetsrc(self.config['url_seriesBanner'] % (sid))
if not bannersEt:
log().debug('Banners result returned zero')
return
banners = {}
for cur_banner in bannersEt['banner'] if isinstance(bannersEt['banner'], list) else [bannersEt['banner']]:
bid = cur_banner['id']
btype = cur_banner['bannertype']
btype2 = cur_banner['bannertype2']
if btype is None or btype2 is None:
continue
if not btype in banners:
banners[btype] = {}
if not btype2 in banners[btype]:
banners[btype][btype2] = {}
if not bid in banners[btype][btype2]:
banners[btype][btype2][bid] = {}
for k, v in cur_banner.items():
if k is None or v is None:
continue
k, v = k.lower(), v.lower()
banners[btype][btype2][bid][k] = v
for k, v in banners[btype][btype2][bid].items():
if k.endswith("path"):
new_key = "_%s" % (k)
log().debug("Transforming %s to %s" % (k, new_key))
new_url = self.config['url_artworkPrefix'] % (v)
banners[btype][btype2][bid][new_key] = new_url
self._setShowData(sid, "_banners", banners)
def _parseActors(self, sid):
"""Parsers actors XML, from
http://thetvdb.com/api/[APIKEY]/series/[SERIES ID]/actors.xml
Actors are retrieved using t['show name]['_actors'], for example:
>>> t = Tvdb(actors = True)
>>> actors = t['scrubs']['_actors']
>>> type(actors)
<class 'tvdb_api.Actors'>
>>> type(actors[0])
<class 'tvdb_api.Actor'>
>>> actors[0]
<Actor "Zach Braff">
>>> sorted(actors[0].keys())
['id', 'image', 'name', 'role', 'sortorder']
>>> actors[0]['name']
u'Zach Braff'
>>> actors[0]['image']
u'http://thetvdb.com/banners/actors/43640.jpg'
Any key starting with an underscore has been processed (not the raw
data from the XML)
"""
log().debug("Getting actors for %s" % (sid))
actorsEt = self._getetsrc(self.config['url_actorsInfo'] % (sid))
if not actorsEt:
log().debug('Actors result returned zero')
return
cur_actors = Actors()
for cur_actor in actorsEt['actor'] if isinstance(actorsEt['actor'], list) else [actorsEt['actor']]:
curActor = Actor()
for k, v in cur_actor.items():
if k is None or v is None:
continue
k = k.lower()
if k == "image":
v = self.config['url_artworkPrefix'] % (v)
else:
v = self._cleanData(v)
curActor[k] = v
cur_actors.append(curActor)
self._setShowData(sid, '_actors', cur_actors)
def _getShowData(self, sid, language, getEpInfo=False):
"""Takes a series ID, gets the epInfo URL and parses the TVDB
XML file into the shows dict in layout:
shows[series_id][season_number][episode_number]
"""
if self.config['language'] is None:
log().debug('Config language is none, using show language')
if language is None:
raise tvdb_error("config['language'] was None, this should not happen")
getShowInLanguage = language
else:
log().debug(
'Configured language %s override show language of %s' % (
self.config['language'],
language
)
)
getShowInLanguage = self.config['language']
# Parse show information
log().debug('Getting all series data for %s' % (sid))
seriesInfoEt = self._getetsrc(
self.config['url_seriesInfo'] % (sid, getShowInLanguage)
)
if not seriesInfoEt:
log().debug('Series result returned zero')
raise tvdb_error("Series result returned zero")
# get series data
for k, v in seriesInfoEt['series'].items():
if v is not None:
if k in ['banner', 'fanart', 'poster']:
v = self.config['url_artworkPrefix'] % (v)
else:
v = self._cleanData(v)
self._setShowData(sid, k, v)
# get episode data
if getEpInfo:
# Parse banners
if self.config['banners_enabled']:
self._parseBanners(sid)
# Parse actors
if self.config['actors_enabled']:
self._parseActors(sid)
# Parse episode data
log().debug('Getting all episodes of %s' % (sid))
if self.config['useZip']:
url = self.config['url_epInfo_zip'] % (sid, language)
else:
url = self.config['url_epInfo'] % (sid, language)
epsEt = self._getetsrc(url, language=language)
if not epsEt:
log().debug('Series results incomplete')
raise tvdb_showincomplete("Show search returned incomplete results (cannot find complete show on TVDB)")
if 'episode' not in epsEt:
return False
episodes = epsEt["episode"]
if not isinstance(episodes, list):
episodes = [episodes]
for cur_ep in episodes:
if self.config['dvdorder']:
log().debug('Using DVD ordering.')
use_dvd = cur_ep['dvd_season'] != None and cur_ep['dvd_episodenumber'] != None
else:
use_dvd = False
if use_dvd:
seasnum, epno = cur_ep['dvd_season'], cur_ep['dvd_episodenumber']
else:
seasnum, epno = cur_ep['seasonnumber'], cur_ep['episodenumber']
if seasnum is None or epno is None:
log().warning("An episode has incomplete season/episode number (season: %r, episode: %r)" % (
seasnum, epno))
continue # Skip to next episode
# float() is because https://github.com/dbr/tvnamer/issues/95 - should probably be fixed in TVDB data
seas_no = int(float(seasnum))
ep_no = int(float(epno))
for k, v in cur_ep.items():
k = k.lower()
if v is not None:
if k == 'filename':
v = self.config['url_artworkPrefix'] % (v)
else:
v = self._cleanData(v)
self._setItem(sid, seas_no, ep_no, k, v)
return True
def _nameToSid(self, name):
"""Takes show name, returns the correct series ID (if the show has
already been grabbed), or grabs all episodes and returns
the correct SID.
"""
if name in self.corrections:
log().debug('Correcting %s to %s' % (name, self.corrections[name]))
return self.corrections[name]
else:
log().debug('Getting show %s' % (name))
selected_series = self._getSeries(name)
if isinstance(selected_series, dict):
selected_series = [selected_series]
sids = list(int(x['id']) for x in selected_series if
self._getShowData(int(x['id']), self.config['language']))
self.corrections.update(dict((x['seriesname'], int(x['id'])) for x in selected_series))
return sids
def __getitem__(self, key):
"""Handles tvdb_instance['seriesname'] calls.
The dict index should be the show id
"""
if isinstance(key, (int, long)):
# Item is integer, treat as show id
if key not in self.shows:
self._getShowData(key, self.config['language'], True)
return self.shows[key]
key = str(key).lower()
self.config['searchterm'] = key
selected_series = self._getSeries(key)
if isinstance(selected_series, dict):
selected_series = [selected_series]
[[self._setShowData(show['id'], k, v) for k, v in show.items()] for show in selected_series]
return selected_series
def __repr__(self):
return str(self.shows)
def main():
"""Simple example of using tvdb_api - it just
grabs an episode name interactively.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
tvdb_instance = Tvdb(interactive=True, cache=False)
print tvdb_instance['Lost']['seriesname']
print tvdb_instance['Lost'][1][4]['episodename']
if __name__ == '__main__':
main()
| gpl-3.0 | -4,359,985,044,943,779,300 | 35.443878 | 160 | 0.552989 | false |
wldtyp/flask | tests/test_helpers.py | 1 | 27016 | # -*- coding: utf-8 -*-
"""
tests.helpers
~~~~~~~~~~~~~~~~~~~~~~~
Various helpers.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import os
import datetime
import flask
from logging import StreamHandler
from werkzeug.exceptions import BadRequest
from werkzeug.http import parse_cache_control_header, parse_options_header
from werkzeug.http import http_date
from flask._compat import StringIO, text_type
def has_encoding(name):
try:
import codecs
codecs.lookup(name)
return True
except LookupError:
return False
class TestJSON(object):
def test_post_empty_json_adds_exception_to_response_content_in_debug(self):
app = flask.Flask(__name__)
app.config['DEBUG'] = True
@app.route('/json', methods=['POST'])
def post_json():
flask.request.get_json()
return None
c = app.test_client()
rv = c.post('/json', data=None, content_type='application/json')
assert rv.status_code == 400
assert b'Failed to decode JSON object' in rv.data
def test_post_empty_json_wont_add_exception_to_response_if_no_debug(self):
app = flask.Flask(__name__)
app.config['DEBUG'] = False
@app.route('/json', methods=['POST'])
def post_json():
flask.request.get_json()
return None
c = app.test_client()
rv = c.post('/json', data=None, content_type='application/json')
assert rv.status_code == 400
assert b'Failed to decode JSON object' not in rv.data
def test_json_bad_requests(self):
app = flask.Flask(__name__)
@app.route('/json', methods=['POST'])
def return_json():
return flask.jsonify(foo=text_type(flask.request.get_json()))
c = app.test_client()
rv = c.post('/json', data='malformed', content_type='application/json')
assert rv.status_code == 400
def test_json_custom_mimetypes(self):
app = flask.Flask(__name__)
@app.route('/json', methods=['POST'])
def return_json():
return flask.request.get_json()
c = app.test_client()
rv = c.post('/json', data='"foo"', content_type='application/x+json')
assert rv.data == b'foo'
def test_json_body_encoding(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.get_json()
c = app.test_client()
resp = c.get('/', data=u'"Hällo Wörld"'.encode('iso-8859-15'),
content_type='application/json; charset=iso-8859-15')
assert resp.data == u'Hällo Wörld'.encode('utf-8')
def test_json_as_unicode(self):
app = flask.Flask(__name__)
app.config['JSON_AS_ASCII'] = True
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
assert rv == '"\\u2603"'
app.config['JSON_AS_ASCII'] = False
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
assert rv == u'"\u2603"'
def test_jsonify_basic_types(self):
"""Test jsonify with basic types."""
# Should be able to use pytest parametrize on this, but I couldn't
# figure out the correct syntax
# https://pytest.org/latest/parametrize.html#pytest-mark-parametrize-parametrizing-test-functions
test_data = (0, 1, 23, 3.14, 's', "longer string", True, False,)
app = flask.Flask(__name__)
c = app.test_client()
for i, d in enumerate(test_data):
url = '/jsonify_basic_types{0}'.format(i)
app.add_url_rule(url, str(i), lambda x=d: flask.jsonify(x))
rv = c.get(url)
assert rv.mimetype == 'application/json'
assert flask.json.loads(rv.data) == d
def test_jsonify_dicts(self):
"""Test jsonify with dicts and kwargs unpacking."""
d = dict(
a=0, b=23, c=3.14, d='t', e='Hi', f=True, g=False,
h=['test list', 10, False],
i={'test':'dict'}
)
app = flask.Flask(__name__)
@app.route('/kw')
def return_kwargs():
return flask.jsonify(**d)
@app.route('/dict')
def return_dict():
return flask.jsonify(d)
c = app.test_client()
for url in '/kw', '/dict':
rv = c.get(url)
assert rv.mimetype == 'application/json'
assert flask.json.loads(rv.data) == d
def test_jsonify_arrays(self):
"""Test jsonify of lists and args unpacking."""
l = [
0, 42, 3.14, 't', 'hello', True, False,
['test list', 2, False],
{'test':'dict'}
]
app = flask.Flask(__name__)
@app.route('/args_unpack')
def return_args_unpack():
return flask.jsonify(*l)
@app.route('/array')
def return_array():
return flask.jsonify(l)
c = app.test_client()
for url in '/args_unpack', '/array':
rv = c.get(url)
assert rv.mimetype == 'application/json'
assert flask.json.loads(rv.data) == l
def test_jsonify_date_types(self):
"""Test jsonify with datetime.date and datetime.datetime types."""
test_dates = (
datetime.datetime(1973, 3, 11, 6, 30, 45),
datetime.date(1975, 1, 5)
)
app = flask.Flask(__name__)
c = app.test_client()
for i, d in enumerate(test_dates):
url = '/datetest{0}'.format(i)
app.add_url_rule(url, str(i), lambda val=d: flask.jsonify(x=val))
rv = c.get(url)
assert rv.mimetype == 'application/json'
assert flask.json.loads(rv.data)['x'] == http_date(d.timetuple())
def test_json_attr(self):
app = flask.Flask(__name__)
@app.route('/add', methods=['POST'])
def add():
json = flask.request.get_json()
return text_type(json['a'] + json['b'])
c = app.test_client()
rv = c.post('/add', data=flask.json.dumps({'a': 1, 'b': 2}),
content_type='application/json')
assert rv.data == b'3'
def test_template_escaping(self):
app = flask.Flask(__name__)
render = flask.render_template_string
with app.test_request_context():
rv = flask.json.htmlsafe_dumps('</script>')
assert rv == u'"\\u003c/script\\u003e"'
assert type(rv) == text_type
rv = render('{{ "</script>"|tojson }}')
assert rv == '"\\u003c/script\\u003e"'
rv = render('{{ "<\0/script>"|tojson }}')
assert rv == '"\\u003c\\u0000/script\\u003e"'
rv = render('{{ "<!--<script>"|tojson }}')
assert rv == '"\\u003c!--\\u003cscript\\u003e"'
rv = render('{{ "&"|tojson }}')
assert rv == '"\\u0026"'
rv = render('{{ "\'"|tojson }}')
assert rv == '"\\u0027"'
rv = render("<a ng-data='{{ data|tojson }}'></a>",
data={'x': ["foo", "bar", "baz'"]})
assert rv == '<a ng-data=\'{"x": ["foo", "bar", "baz\\u0027"]}\'></a>'
def test_json_customization(self):
class X(object):
def __init__(self, val):
self.val = val
class MyEncoder(flask.json.JSONEncoder):
def default(self, o):
if isinstance(o, X):
return '<%d>' % o.val
return flask.json.JSONEncoder.default(self, o)
class MyDecoder(flask.json.JSONDecoder):
def __init__(self, *args, **kwargs):
kwargs.setdefault('object_hook', self.object_hook)
flask.json.JSONDecoder.__init__(self, *args, **kwargs)
def object_hook(self, obj):
if len(obj) == 1 and '_foo' in obj:
return X(obj['_foo'])
return obj
app = flask.Flask(__name__)
app.testing = True
app.json_encoder = MyEncoder
app.json_decoder = MyDecoder
@app.route('/', methods=['POST'])
def index():
return flask.json.dumps(flask.request.get_json()['x'])
c = app.test_client()
rv = c.post('/', data=flask.json.dumps({
'x': {'_foo': 42}
}), content_type='application/json')
assert rv.data == b'"<42>"'
def test_modified_url_encoding(self):
class ModifiedRequest(flask.Request):
url_charset = 'euc-kr'
app = flask.Flask(__name__)
app.testing = True
app.request_class = ModifiedRequest
app.url_map.charset = 'euc-kr'
@app.route('/')
def index():
return flask.request.args['foo']
rv = app.test_client().get(u'/?foo=정상처리'.encode('euc-kr'))
assert rv.status_code == 200
assert rv.data == u'정상처리'.encode('utf-8')
if not has_encoding('euc-kr'):
test_modified_url_encoding = None
def test_json_key_sorting(self):
app = flask.Flask(__name__)
app.testing = True
assert app.config['JSON_SORT_KEYS'] == True
d = dict.fromkeys(range(20), 'foo')
@app.route('/')
def index():
return flask.jsonify(values=d)
c = app.test_client()
rv = c.get('/')
lines = [x.strip() for x in rv.data.strip().decode('utf-8').splitlines()]
sorted_by_str = [
'{',
'"values": {',
'"0": "foo",',
'"1": "foo",',
'"10": "foo",',
'"11": "foo",',
'"12": "foo",',
'"13": "foo",',
'"14": "foo",',
'"15": "foo",',
'"16": "foo",',
'"17": "foo",',
'"18": "foo",',
'"19": "foo",',
'"2": "foo",',
'"3": "foo",',
'"4": "foo",',
'"5": "foo",',
'"6": "foo",',
'"7": "foo",',
'"8": "foo",',
'"9": "foo"',
'}',
'}'
]
sorted_by_int = [
'{',
'"values": {',
'"0": "foo",',
'"1": "foo",',
'"2": "foo",',
'"3": "foo",',
'"4": "foo",',
'"5": "foo",',
'"6": "foo",',
'"7": "foo",',
'"8": "foo",',
'"9": "foo",',
'"10": "foo",',
'"11": "foo",',
'"12": "foo",',
'"13": "foo",',
'"14": "foo",',
'"15": "foo",',
'"16": "foo",',
'"17": "foo",',
'"18": "foo",',
'"19": "foo"',
'}',
'}'
]
try:
assert lines == sorted_by_int
except AssertionError:
assert lines == sorted_by_str
class TestSendfile(object):
def test_send_file_regular(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.send_file('static/index.html')
assert rv.direct_passthrough
assert rv.mimetype == 'text/html'
with app.open_resource('static/index.html') as f:
rv.direct_passthrough = False
assert rv.data == f.read()
rv.close()
def test_send_file_xsendfile(self):
app = flask.Flask(__name__)
app.use_x_sendfile = True
with app.test_request_context():
rv = flask.send_file('static/index.html')
assert rv.direct_passthrough
assert 'x-sendfile' in rv.headers
assert rv.headers['x-sendfile'] == \
os.path.join(app.root_path, 'static/index.html')
assert rv.mimetype == 'text/html'
rv.close()
def test_send_file_object(self, catch_deprecation_warnings):
app = flask.Flask(__name__)
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'), mode='rb')
rv = flask.send_file(f)
rv.direct_passthrough = False
with app.open_resource('static/index.html') as f:
assert rv.data == f.read()
assert rv.mimetype == 'text/html'
rv.close()
# mimetypes + etag
assert len(captured) == 2
app.use_x_sendfile = True
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
assert rv.mimetype == 'text/html'
assert 'x-sendfile' in rv.headers
assert rv.headers['x-sendfile'] == \
os.path.join(app.root_path, 'static/index.html')
rv.close()
# mimetypes + etag
assert len(captured) == 2
app.use_x_sendfile = False
with app.test_request_context():
with catch_deprecation_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f)
rv.direct_passthrough = False
assert rv.data == b'Test'
assert rv.mimetype == 'application/octet-stream'
rv.close()
# etags
assert len(captured) == 1
with catch_deprecation_warnings() as captured:
class PyStringIO(object):
def __init__(self, *args, **kwargs):
self._io = StringIO(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._io, name)
f = PyStringIO('Test')
f.name = 'test.txt'
rv = flask.send_file(f)
rv.direct_passthrough = False
assert rv.data == b'Test'
assert rv.mimetype == 'text/plain'
rv.close()
# attachment_filename and etags
assert len(captured) == 3
with catch_deprecation_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f, mimetype='text/plain')
rv.direct_passthrough = False
assert rv.data == b'Test'
assert rv.mimetype == 'text/plain'
rv.close()
# etags
assert len(captured) == 1
app.use_x_sendfile = True
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = StringIO('Test')
rv = flask.send_file(f)
assert 'x-sendfile' not in rv.headers
rv.close()
# etags
assert len(captured) == 1
def test_attachment(self, catch_deprecation_warnings):
app = flask.Flask(__name__)
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f, as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
assert value == 'attachment'
rv.close()
# mimetypes + etag
assert len(captured) == 2
with app.test_request_context():
assert options['filename'] == 'index.html'
rv = flask.send_file('static/index.html', as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
assert value == 'attachment'
assert options['filename'] == 'index.html'
rv.close()
with app.test_request_context():
rv = flask.send_file(StringIO('Test'), as_attachment=True,
attachment_filename='index.txt',
add_etags=False)
assert rv.mimetype == 'text/plain'
value, options = parse_options_header(rv.headers['Content-Disposition'])
assert value == 'attachment'
assert options['filename'] == 'index.txt'
rv.close()
def test_static_file(self):
app = flask.Flask(__name__)
# default cache timeout is 12 hours
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 12 * 60 * 60
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 12 * 60 * 60
rv.close()
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 3600
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 3600
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 3600
rv.close()
class StaticFileApp(flask.Flask):
def get_send_file_max_age(self, filename):
return 10
app = StaticFileApp(__name__)
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 10
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 10
rv.close()
def test_send_from_directory(self):
app = flask.Flask(__name__)
app.testing = True
app.root_path = os.path.join(os.path.dirname(__file__),
'test_apps', 'subdomaintestmodule')
with app.test_request_context():
rv = flask.send_from_directory('static', 'hello.txt')
rv.direct_passthrough = False
assert rv.data.strip() == b'Hello Subdomain'
rv.close()
def test_send_from_directory_bad_request(self):
app = flask.Flask(__name__)
app.testing = True
app.root_path = os.path.join(os.path.dirname(__file__),
'test_apps', 'subdomaintestmodule')
with app.test_request_context():
with pytest.raises(BadRequest):
flask.send_from_directory('static', 'bad\x00')
class TestLogging(object):
def test_logger_cache(self):
app = flask.Flask(__name__)
logger1 = app.logger
assert app.logger is logger1
assert logger1.name == __name__
app.logger_name = __name__ + '/test_logger_cache'
assert app.logger is not logger1
def test_debug_log(self, capsys):
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
app.logger.warning('the standard library is dead')
app.logger.debug('this is a debug statement')
return ''
@app.route('/exc')
def exc():
1 // 0
with app.test_client() as c:
c.get('/')
out, err = capsys.readouterr()
assert 'WARNING in test_helpers [' in err
assert os.path.basename(__file__.rsplit('.', 1)[0] + '.py') in err
assert 'the standard library is dead' in err
assert 'this is a debug statement' in err
with pytest.raises(ZeroDivisionError):
c.get('/exc')
def test_debug_log_override(self):
app = flask.Flask(__name__)
app.debug = True
app.logger_name = 'flask_tests/test_debug_log_override'
app.logger.level = 10
assert app.logger.level == 10
def test_exception_logging(self):
out = StringIO()
app = flask.Flask(__name__)
app.config['LOGGER_HANDLER_POLICY'] = 'never'
app.logger_name = 'flask_tests/test_exception_logging'
app.logger.addHandler(StreamHandler(out))
@app.route('/')
def index():
1 // 0
rv = app.test_client().get('/')
assert rv.status_code == 500
assert b'Internal Server Error' in rv.data
err = out.getvalue()
assert 'Exception on / [GET]' in err
assert 'Traceback (most recent call last):' in err
assert '1 // 0' in err
assert 'ZeroDivisionError:' in err
def test_processor_exceptions(self):
app = flask.Flask(__name__)
app.config['LOGGER_HANDLER_POLICY'] = 'never'
@app.before_request
def before_request():
if trigger == 'before':
1 // 0
@app.after_request
def after_request(response):
if trigger == 'after':
1 // 0
return response
@app.route('/')
def index():
return 'Foo'
@app.errorhandler(500)
def internal_server_error(e):
return 'Hello Server Error', 500
for trigger in 'before', 'after':
rv = app.test_client().get('/')
assert rv.status_code == 500
assert rv.data == b'Hello Server Error'
def test_url_for_with_anchor(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
assert flask.url_for('index', _anchor='x y') == '/#x%20y'
def test_url_for_with_scheme(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
assert flask.url_for('index', _external=True, _scheme='https') == 'https://localhost/'
def test_url_for_with_scheme_not_external(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
pytest.raises(ValueError,
flask.url_for,
'index',
_scheme='https')
def test_url_for_with_alternating_schemes(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
assert flask.url_for('index', _external=True) == 'http://localhost/'
assert flask.url_for('index', _external=True, _scheme='https') == 'https://localhost/'
assert flask.url_for('index', _external=True) == 'http://localhost/'
def test_url_with_method(self):
from flask.views import MethodView
app = flask.Flask(__name__)
class MyView(MethodView):
def get(self, id=None):
if id is None:
return 'List'
return 'Get %d' % id
def post(self):
return 'Create'
myview = MyView.as_view('myview')
app.add_url_rule('/myview/', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/<int:id>', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/create', methods=['POST'],
view_func=myview)
with app.test_request_context():
assert flask.url_for('myview', _method='GET') == '/myview/'
assert flask.url_for('myview', id=42, _method='GET') == '/myview/42'
assert flask.url_for('myview', _method='POST') == '/myview/create'
class TestNoImports(object):
"""Test Flasks are created without import.
Avoiding ``__import__`` helps create Flask instances where there are errors
at import time. Those runtime errors will be apparent to the user soon
enough, but tools which build Flask instances meta-programmatically benefit
from a Flask which does not ``__import__``. Instead of importing to
retrieve file paths or metadata on a module or package, use the pkgutil and
imp modules in the Python standard library.
"""
def test_name_with_import_error(self, modules_tmpdir):
modules_tmpdir.join('importerror.py').write('raise NotImplementedError()')
try:
flask.Flask('importerror')
except NotImplementedError:
assert False, 'Flask(import_name) is importing import_name.'
class TestStreaming(object):
def test_streaming_with_context(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(generate()))
c = app.test_client()
rv = c.get('/?name=World')
assert rv.data == b'Hello World!'
def test_streaming_with_context_as_decorator(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.stream_with_context
def generate(hello):
yield hello
yield flask.request.args['name']
yield '!'
return flask.Response(generate('Hello '))
c = app.test_client()
rv = c.get('/?name=World')
assert rv.data == b'Hello World!'
def test_streaming_with_context_and_custom_close(self):
app = flask.Flask(__name__)
app.testing = True
called = []
class Wrapper(object):
def __init__(self, gen):
self._gen = gen
def __iter__(self):
return self
def close(self):
called.append(42)
def __next__(self):
return next(self._gen)
next = __next__
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(
Wrapper(generate())))
c = app.test_client()
rv = c.get('/?name=World')
assert rv.data == b'Hello World!'
assert called == [42]
| bsd-3-clause | -7,277,954,857,394,219,000 | 35.530447 | 105 | 0.509335 | false |
zstackio/zstack-woodpecker | zstackwoodpecker/zstackwoodpecker/zstack_test/vcenter_checker/zstack_vcenter_snapshot_checker.py | 2 | 9805 | import zstackwoodpecker.header.checker as checker_header
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.zstack_test.zstack_test_snapshot as zstack_sp_header
import zstackwoodpecker.header.volume as vl_header
import zstackwoodpecker.header.vm as vm_header
import zstackwoodpecker.header.snapshot as sp_header
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.config_operations as cfg_ops
import apibinding.inventory as inventory
class zstack_vcenter_backuped_snapshot_checker(checker_header.TestChecker):
def check(self):
'''
will check if backuped snapshot existed.
'''
super(zstack_vcenter_backuped_snapshot_checker, self).check()
backuped_snapshots = self.test_obj.get_backuped_snapshots()
for sp in backuped_snapshots:
sp_bs_refs = sp.get_snapshot().backupStorageRefs
for sp_bs_ref in sp_bs_refs:
bs_uuid = sp_bs_ref.backupStorageUuid
sp_path = sp_bs_ref.installPath
sp_uuid = sp_bs_ref.volumeSnapshotUuid
bs_host = test_lib.lib_get_backup_storage_host(bs_uuid)
bs = test_lib.lib_get_backup_storage_by_uuid(sp_bs_ref.backupStorageUuid)
if hasattr(inventory, 'IMAGE_STORE_BACKUP_STORAGE_TYPE') and bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE:
sp_info = sp_path.split('://')[1].split('/')
sp_path = '%s/registry/v1/repos/public/%s/manifests/revisions/%s' % (bs.url, sp_info[0], sp_info[1])
if test_lib.lib_check_file_exist(bs_host, sp_path):
test_util.test_logger('Checker result: backuped snapshot:%s is found in backup storage:%s in path: %s' % (sp_uuid, bs_uuid, sp_path))
if self.exp_result == False:
return self.judge(True)
else:
test_util.test_logger('Checker result: backuped snapshot:%s is NOT found in backup storage:%s in path: %s' % (sp_uuid, bs_uuid, sp_path))
if self.exp_result == True:
return self.judge(False)
test_util.test_logger('Checker result: Finish backuped snapshot checking')
return self.judge(self.exp_result)
class zstack_vcenter_snapshot_checker(checker_header.TestChecker):
def check(self):
'''
Will use snapshot:createDataVolumeFromSnapshot function to do checking.
'''
super(zstack_vcenter_snapshot_checker, self).check()
target_volume = self.test_obj.get_target_volume()
if target_volume.get_volume().type == 'Root':
test_util.test_logger('Checking Result: skip snapshot checking, since target volume: %s is Root volme' % target_volume.get_volume().uuid)
return self.judge(self.exp_result)
#snapshots = self.test_obj.get_snapshot_list()
sp = self.test_obj.get_current_snapshot()
if not sp:
test_util.test_logger('Checker result: no available current snapshot to be checked')
return self.judge(self.exp_result)
utility_vm = self.test_obj.get_utility_vm()
vm_inv = utility_vm.get_vm()
result = True
#only need to test latest current snapshot, since previouse snapshot
#operations should be checked already and assumed won't be changed.
#If there is not true, change following 2 lines to next line:
#for sp in snapshots.get_snapshot_list():
if sp.get_state() == sp_header.DELETED:
#continue
test_util.test_logger('Checking Result: snapshot status is Deleted, it should not be tested')
return self.judge(self.exp_result)
#calculate checking point
checking_points_list = self.test_obj.get_checking_points(sp)
volume_obj = sp.create_data_volume()
volume_obj.attach(utility_vm)
import tempfile
with tempfile.NamedTemporaryFile() as script:
script.write('''
device=/dev/`ls -ltr --file-type /dev | grep disk | awk '{print $NF}' | grep -v '[[:digit:]]' | tail -1`1
mkdir -p %s >/dev/null
mount $device %s >/dev/null
mkdir -p %s >/dev/null
checking_result=''
ls %s
umount %s >/dev/null
''' % (test_lib.WOODPECKER_MOUNT_POINT, \
test_lib.WOODPECKER_MOUNT_POINT, \
zstack_sp_header.checking_point_folder, \
zstack_sp_header.checking_point_folder, \
test_lib.WOODPECKER_MOUNT_POINT))
script.flush()
rsp = test_lib.lib_execute_shell_script_in_vm(vm_inv, \
script.name)
volume_obj.detach()
volume_obj.delete()
if rsp:
result_list = rsp.result.split()
temp_checking_list = list(result_list)
temp_exp_list = list(checking_points_list)
for item in result_list:
if item in checking_points_list:
temp_checking_list.remove(item)
temp_exp_list.remove(item)
if len(temp_exp_list) == 0:
if len(temp_checking_list) == 0:
test_util.test_logger('Checker result: snapshot: %s integrity checking pass' % sp.get_snapshot().uuid)
else:
test_util.test_logger('Checker result: snapshot: %s integrity checking fail, there are something more than expected : %s' % (sp.get_snapshot().uuid, temp_checking_list))
zstack_sp_header.print_snapshot_chain_checking_point(zstack_sp_header.get_all_ancestry(sp))
result = False
else:
if len(temp_checking_list) == 0:
test_util.test_logger('Checker result: snapshot: %s integrity checking fail, there are something less than expected: %s' % (sp.get_snapshot().uuid, temp_exp_list))
zstack_sp_header.print_snapshot_chain_checking_point(zstack_sp_header.get_all_ancestry(sp))
result = False
else:
test_util.test_logger('Checker result: snapshot: %s integrity checking fail, there are something more than expected : %s and there are something less than expected: %s ' % (sp.get_snapshot().uuid, temp_checking_list, temp_exp_list))
zstack_sp_header.print_snapshot_chain_checking_point(zstack_sp_header.get_all_ancestry(sp))
result = False
else:
test_util.test_logger('Checker result: check snapshot: %s failed with checking script.' % sp.get_snapshot().uuid)
zstack_sp_header.print_snapshot_chain_checking_point(zstack_sp_header.get_all_ancestry(sp))
result = False
return self.judge(result)
class zstack_vcenter_snapshot_tree_checker(checker_header.TestChecker):
def check(self):
'''
Will check snapshot tree correctness
To be noticed. The tree depth changing will impact the snapshots who
have been created. So if the snapshots are created before
incrementalSnapshot.maxNum is changed. The checker results will be
untrustable.
'''
import json
import zstacklib.utils.jsonobject as jsonobject
super(zstack_vcenter_snapshot_tree_checker, self).check()
snapshots = self.test_obj.get_snapshot_list()
if not self.test_obj.get_snapshot_head():
test_util.test_logger('Snapshot is not created, skipped checking')
return self.judge(self.exp_result)
utility_vm = self.test_obj.get_utility_vm()
vm_inv = utility_vm.get_vm()
volume_obj = self.test_obj.get_target_volume()
volume_uuid = volume_obj.get_volume().uuid
if volume_obj.get_state() == vl_header.DELETED or \
(volume_obj.get_volume().type == 'Root' and \
volume_obj.get_target_vm().get_state() == vm_header.DESTROYED):
test_util.test_logger('Checker result: target volume is deleted, can not get get and check snapshot tree status')
return self.judge(self.exp_result)
vol_trees = test_lib.lib_get_volume_snapshot_tree(volume_uuid)
tree_allowed_depth = cfg_ops.get_global_config_value('volumeSnapshot', \
'incrementalSnapshot.maxNum')
for vol_tree in vol_trees:
tree = json.loads(jsonobject.dumps(vol_tree))['tree']
tree_max_depth = find_tree_max_depth(tree)
if tree_max_depth > (int(tree_allowed_depth) + 1):
test_util.test_logger(\
'Checker result: volume: %s snapshot tree: %s depth checking failure. The max \
allowed depth is : %s. But we get: %s' % (volume_uuid, tree['inventory'].uuid, \
tree_allowed_depth, str(tree_max_depth - 1)))
return self.judge(False)
test_util.test_logger(\
'Checker result: volume: %s snapshot tree depth checking pass. The max allowed \
depth is : %s. The real snapshot max depth is: %s' % \
(volume_uuid, tree_allowed_depth, str(tree_max_depth - 1)))
return self.judge(True)
def find_tree_max_depth(tree):
'''
tree is a dictionary. Its children were put under keyword of 'children'.
'''
if not tree:
return 0
child_depth = 0
if not tree.has_key('children'):
test_util.test_fail('Snapshot tree has invalid format, it does not has key for children.')
if tree['children']:
child_depth = 1
for child in tree['children']:
current_child_depth = find_tree_max_depth(child)
child_depth = max(child_depth, current_child_depth)
return child_depth + 1
| apache-2.0 | 7,924,296,997,090,319,000 | 48.520202 | 252 | 0.624069 | false |
tuxnani/open-telugu | tests/tamil_regexp.py | 3 | 4571 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (C) 2013-2015 Muthiah Annamalai
#
# This file is part of 'open-tamil' package tests
#
# setup the paths
import codecs
import re
from opentamiltests import *
from tamil.utils.santhirules import joinWords
from tamil.regexp import make_pattern
class SantheeRules(unittest.TestCase):
def test_filebased_conjugation_tests( self ):
# write to file
with codecs.open('out.txt','r','utf-8') as FP:
data = FP.readlines()
for no,combo in enumerate(data):
print("testing #",no)
word = combo.split('=')
parts = word[0].split('+')
joind = word[1].strip()
a = parts[0].strip()
b = parts[1].strip()
jword = joinWords(a,b)
if ( LINUX ):
print(a + u' + ' + b + u' = ' + jword+u'\n')
print(jword,u'|',joind)
self.assertEqual( joind, jword )
return
def test_grammar_conjugation( self ):
a = u'என்ன'
b = u'என்ன'
w = joinWords(a, b).encode('utf8')
print( w )
self.assertTrue( w.decode('utf-8') == u'என்னென்ன')
class TamilRegex(unittest.TestCase):
def test_basic_A2Z( self ):
pattern = u"[அ-ஔ]+"
expected = u"[அ,ஆ,இ,ஈ,உ,ஊ,எ,ஏ,ஐ,ஒ,ஓ,ஔ]+"
[cpattern,opattern] = make_pattern( pattern )
self.assertEqual( opattern, expected )
def test_basic_no2_A2Z( self ):
pattern = u"^[அ-உ]+"
expected = u"^[அ,ஆ,இ,ஈ,உ]+"
[cpattern,opattern] = make_pattern( pattern )
self.assertEqual( opattern, expected )
def test_basic_no3_A2Z( self ):
pattern = u"^[அ-ஔ][0-9]+"
expected = u"^[அ,ஆ,இ,ஈ,உ,ஊ,எ,ஏ,ஐ,ஒ,ஓ,ஔ][0-9]+"
[cpattern,opattern] = make_pattern( pattern )
self.assertEqual( opattern, expected )
def test_basic_no4_A2Z( self ):
pattern = u"^[அ-ஔ][0-9]+"
expected = u"^[அ,ஆ,இ,ஈ,உ,ஊ,எ,ஏ,ஐ,ஒ,ஓ,ஔ][0-9]+"
[cpattern,opattern] = make_pattern( pattern )
self.assertEqual( opattern, expected )
def test_basic_no5_A2Z( self ):
pattern = u"^[க்-ம்]+"
expected = u"^[க்,ச்,ட்,த்,ப்,ற்,ஞ்,ங்,ண்,ந்,ம்]+"
[cpattern,opattern] = make_pattern( pattern )
self.assertEqual( opattern, expected )
def test_uyirmei_no6_A2Z( self ):
pattern = u"[ப-பௌ]+"
expected = u"[ப,பா,பி,பீ,பு,பூ,பெ,பே,பை,பொ,போ,பௌ]+"
[cpattern,opattern] = make_pattern( pattern )
self.assertEqual( opattern, expected )
class GrepTests(unittest.TestCase):
def setUp(self):
self.data = codecs.open('data/richmond.txt','r','utf-8').readlines()
print("\ndata size = %d L"%len(self.data))
def search_test(self,pattern,expected):
return self.match_test(pattern,expected,fcn=re.search)
def match_test(self,pattern,expected,data=None,fcn=re.match):
[repatt,ymp] = make_pattern( pattern )
word_matches = []
if not data:
data = self.data
for idx,line in enumerate(data):
q = fcn(repatt,line.strip())
if q:
print("matched @ %d"%idx)
word_matches.append( idx )
self.assertEqual( word_matches, expected )
return
def test_exprs(self):
pattern = u"^ரிச்.*[க்-ழ்]$"
expected = [0,1,2,3,7,8,10]
self.match_test( pattern, expected )
def test_match_letterend_exprs(self):
pattern = u"டு$"
expected = [5,6]
self.search_test(pattern,expected)
return
def test_match_exprs(self):
pattern = u".*[^க்-ழ்]$"
expected = [4,5,6,9]
self.match_test(pattern,expected)
return
def test_demo_regex(self):
pattern = u"^[க-ள].+[க்-ள்]$"
data = [u"இந்த",u"தமிழ்",u"ரெகேஸ்புல்",u"\"^[க-ள].+[க்-ள்]$\"",u"இத்தொடரில்", u"எதை", u"பொருந்தும்"]
expected = [1,2,6] # i.e.தமிழ்
self.match_test(pattern,expected,data)
return
if __name__ == '__main__':
unittest.main()
| mit | -2,627,890,649,315,196,000 | 31.742188 | 108 | 0.518492 | false |
orcasgit/python-healthvault | healthvaultlib/status_codes.py | 1 | 10202 | class HealthVaultStatus(object):
"""Status codes that HealthVault can return.
`See also <http://msdn.microsoft.com/en-us/library/hh567902.aspx>`_
"""
OK = 0 # The request was successful.
FAILED = 1 # Generic failure due to unknown causes or internal error.
BAD_HTTP = 2 # Http protocol problem.
INVALID_XML = 3 # Request xml cannot be parsed or nonconformant.
BAD_SIG = 4 # Signature validation failed
BAD_METHOD = 5 # No such method.
INVALID_APP = 6 # App does not exist app is invalid app is not active or calling IP is invalid.
CREDENTIAL_TOKEN_EXPIRED = 7 # Credential token has expired need a new one.
INVALID_TOKEN = 8 # Auth token malformed or otherwise busted.
INVALID_PERSON = 9 # Person does not exist or is not active.
INVALID_RECORD = 10 # Given record id does not exist.
ACCESS_DENIED = 11 # Person or app does not have sufficient rights.
NYI = 12 # The functionality being accessed is not yet implemented.
INVALID_THING = 13 # invalid thing identifier.
CANT_CONVERT_UNITS = 14 # Data table already exists with incompatible units.
INVALID_FILTER = 15 # Missing or invalid GetThingsFilter.
INVALID_FORMAT = 16 # Missing or invalid GetThings format specifier.
MISSING_SHARED_SECRET = 17 # A credential was supplied without a shared secret.
INVALID_APPAUTH = 18 # authorized_applications entry missing.
INVALID_THING_TYPE = 19 # Thing type doesn't exist.
THING_TYPE_IMMUTABLE = 20 # Can't update things of this type.
THING_TYPE_UNCREATABLE = 21 # Can't create things of this type.
DUPLICATE_CREDENTIAL_FOUND = 22 # Duplicate Credential found.
INVALID_RECORD_NAME = 23 # Invalid Record name.
DRUG_NOT_FOUND = 24 # Cannot find the drug specified.
INVALID_PERSON_STATE = 25 # Invalid person state.
INVALID_CODESET = 26 # Requested code set was not found.
INVALID_VALIDATION_TOKEN = 28 # Invalid validation token for contact email validation.
INVALID_CONTACT_EMAIL = 30 # Invalid contact email
INVALID_LOGIN_NAME = 31 # Invalid login name.
INVALID_PASSWORD = 32 # Invalid password.
INVALID_OPENQUERY = 33 # Open query id not found.
INVALID_TRANSFORM = 34 # Transform cannot be loaded.
INVALID_RELATIONSHIP_TYPE = 35 # Invalid relationship type.
INVALID_CREDENTIAL_TYPE = 36 # Invalid credential type.
INVALID_RECORD_STATE = 37 # Invalid record state.
APP_AUTH_NOT_REQUIRED = 38 # Application authorization is not required for this app.
REQUEST_TOO_LONG = 39 # The request provided has exceeded maximum allowed request length.
DUPLICATE_AUTHORIZED_RECORD_FOUND = 40 # Duplicate authorized record found.
EMAIL_NOT_VALIDATED = 41 # Person email must be validated but it's not
MAIL_ADDRESS_MALFORMED = 45 # The email address specified to SendInsecureMessage is malformed.
PASSWORD_NOT_STRONG = 46 # The password does not meet the complexity requirements.
CANNOT_REMOVE_LAST_CUSTODIAN = 47 # The last custodian for a record cannot be removed.
INVALID_EMAIL_ADDRESS = 48 # The email address is invalid.
REQUEST_TIMED_OUT = 49 # The request sent to HealthVault reached its time to live and is now too old to be processed.
INVALID_SPONSOR_EMAIL = 50 # The sponsor email address is invalid.
INVALID_PROMOTION_TOKEN = 51 # Promotion token is invalid.
INVALID_RECORD_AUTHORIZATION_TOKEN = 52 # Record authorization token is invalid.
TOO_MANY_GROUPS_IN_QUERY = 53 # GetThings Query has too many request groups.
GRANT_AUTHZ_EXCEEDS_DEFAULT = 54 # The permissions to be granted exceed the default permissions available to be granted. e.g.attempt to grant all access when only read access is available.
INVALID_VOCABULARY = 55 # Requested vocabulary was not found
DUPLICATE_APPLICATION_FOUND = 56 # An application with the same ID already exists
RECORD_AUTHORIZATION_TOKEN_EXPIRED = 57 # Record authorization token has expired.
RECORD_AUTHORIZATION_DOES_NOT_EXIST = 58 # Record authorization does not exist.
THING_TYPE_UNDELETABLE = 59 # Can't delete things of this type.
VERSION_STAMP_MISSING = 60 # Version stamp is missing.
VERSION_STAMP_MISMATCH = 61 # Version stamp mismatch.
EXPIRED_OPENQUERY = 62 # Requested open query has expired.
INVALID_PUBLIC_KEY = 63 # Public key is invalid.
DOMAIN_NAME_NOT_SET = 64 # The application's domain name hasn't been set.
AUTHENTICATED_SESSION_TOKEN_EXPIRED = 65 # Authenticated session token has expired need a new one.
INVALID_CREDENTIAL_KEY = 66 # The credential key was not found.
INVALID_PERSON_ID = 67 # Pseudo id for person not valid
RECORD_QUOTA_EXCEEDED = 68 # The size occupied by the things in the put things request will cause the record to exceed the size quota alloted to it.
INVALID_DATETIME = 69 # The DateTime supplied is invalid (exceeds the bounds for the DateTime)
BAD_CERT = 70 # Certificate validation failed.
RESPONSE_TOO_LONG = 71 # The response has exceeded maximum allowed size.
INVALID_VERIFICATION_QUESTION = 72 # Verification question for connect request invalid.
INVALID_VERIFICATION_ANSWER = 73 # The verification answer for the connect request is invalid.
INVALID_IDENTITY_CODE = 74 # There is no connect request corresponding to the given code.
RETRY_LIMIT_EXCEEDED = 75 # Maximum number of retries has been exceeded.
CULTURE_NOT_SUPPORTED = 76 # Request header culture not supported.
INVALID_FILE_EXTENSION = 77 # The file extension is not supported.
INVALID_VOCABULARY_ITEM = 78 # The vocabulary item does not exist.
DUPLICATE_CONNECT_REQUEST_FOUND = 79 # Duplicate connect request found.
INVALID_SPECIAL_ACCOUNT_TYPE = 80 # The account type specified is invalid.
DUPLICATE_TYPE_FOUND = 81 # A type with the specified identifier already exists.
CREDENTIAL_NOT_FOUND = 82 # Credential not found
CANNOT_REMOVE_LAST_CREDENTIAL = 83 # Attempt to delete last credential associated with an account
CONNECT_REQUEST_ALREADY_AUTHORIZED = 84 # The connect request has been previously authorized.
INVALID_THING_TYPE_VERSION = 85 # The type specified to update an instance of a thing is an older version of the type than the existing instance.
CREDENTIALS_LIMIT_EXCEEDED = 86 # The maximum number of allowed credentials has been exceeded.
INVALID_METHOD = 87 # One or more invalid methods were specified in the method mask.
INVALID_BLOB_REF_URL = 88 # The blob reference url supplied for the blob streaming API is invalid.
CANNOT_GET_STREAMED_OTHER_DATA = 89 # Other data put in to Healthvault via the streaming API cannot be requested as an other data string.
UPDATE_THING_TYPE_VERSION_NO_DATA_XML = 90 # The type version of the thing cannot be changed without a data xml supplied for validation.
UNSUPPORTED_CONTENT_ENCODING = 91 # The content encoding specified for the blob is not supported.
CONTENT_ENCODING_DATA_MISMATCH = 92 # The content encoding specified for the blob does not match the blob data.
APPLICATION_LIMIT_EXCEEDED = 93 # The user exceeded the maximum number of applications allowed.
INVALID_BINARY_CONTENT_ID = 94 # The specified binary content identifier was not found.
CONNECT_REQUEST_INCOMPLETE = 95 # The connect request was found but does not yet have any contents.
CONNECT_PACKAGE_EXISTS = 96 # The connect package has already been fully created and populated.
INVALID_FILE_NAME = 97 # The file name is not supported.
INVALID_SIGNUP_CODE = 98 # The signup code is invalid.
BLOB_SIZE_TOO_LARGE_FOR_INLINE = 99 # The blob is too large and cannot be returned inline.
DUPLICATE_BLOB = 100 # A blob of this name is already present in the request.
BLOB_TOKEN_COMMITTED = 101 # The blob token corresponds to a blob that is already committed.
BLOB_TOKEN_NOT_COMPLETED = 102 # The blob token corresponds to a blob that was not marked completed through the streaming interface.
THING_POTENTIALLY_INCOMPLETE = 104 # The thing being updated has data items that cannot be seen in this version, e.g. signatures with new signature methods or multiple blobs.
INVALID_SIGNATURE_ALGORITHM = 105 # The signature algorithm is not valid.
INVALID_BLOB_HASH_ALGORITHM = 106 # The blob hash algorithm is invalid or not supported.
UNSUPPORTED_BLOB_HASH_BLOCK_SIZE = 107 # The blob hash block size is unsupported.
BLOB_HASH_ALGORITHM_MISMATCH = 108 # The specified blob hash algorithm does not match the blob's hash algorithm.
BLOB_HASH_BLOCK_SIZE_MISMATCH = 109 # The specified blob hash block size does not match the blob's hash block size.
UNSUPPORTED_SIGNATURE_METHOD = 110 # The signature method is not supported in the context it is being used.
INVALID_BLOB_HASH = 111 # The specified blob hash is invalid.
PACKAGE_BLOB_NOT_COMMITTED = 112 # The blob is associated with a connect package that is not yet created.
APPLICATION_STATE_TRANSITION_NOT_SUPPORTED = 113 # Changing the application state from deleted is not supported.
INVALID_PACKAGE_CONTENTS = 120 # The contents of the connect package are not valid xml.
INVALID_CONTENT_TYPE = 121 # The content type is not supported.
CONNECT_PACKAGE_VALIDATION_REQUIRED = 122 # The contents of the connect package must be validated before they are put into a health record.
INVALID_THING_STATE = 123 # Invalid thing state.
TOO_MANY_THINGS_SPECIFIED = 124 # The maximum number of things specified has been exceeded.
INVALID_DIRECTORY_ITEM = 126 # The directory item passed in is invalid.
INVALID_VOCABULARY_AUTHORIZATION = 129 # The vocbulary authorization is invalid.
VOCABULARY_ACCESS_DENIED = 130 # Access to the requested vocabulary is denied.
UNSUPPORTED_PERSONAL_FLAG = 131 # The personal flag is not supported with this type.
SUBSCRIPTION_NOT_FOUND = 132 # The requested subscription was not found.
SUBSCRIPTION_LIMIT_EXCEEDED = 133 # The number of subscriptions for the application was exceeded.
SUBSCRIPTION_INVALID = 134 # The subscription contains invalid data.
| mit | -1,922,435,709,822,506,200 | 78.703125 | 193 | 0.736424 | false |
kkanahin/django-avatar | avatar/views.py | 3 | 8108 | from django.http import Http404
from django.shortcuts import render, redirect
from django.utils import six
from django.utils.translation import ugettext as _
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from avatar.forms import PrimaryAvatarForm, DeleteAvatarForm, UploadAvatarForm
from avatar.models import Avatar
from avatar.settings import AVATAR_MAX_AVATARS_PER_USER, AVATAR_DEFAULT_SIZE
from avatar.signals import avatar_updated
from avatar.util import (get_primary_avatar, get_default_avatar_url,
get_user_model, get_user)
def _get_next(request):
"""
The part that's the least straightforward about views in this module is how they
determine their redirects after they have finished computation.
In short, they will try and determine the next place to go in the following order:
1. If there is a variable named ``next`` in the *POST* parameters, the view will
redirect to that variable's value.
2. If there is a variable named ``next`` in the *GET* parameters, the view will
redirect to that variable's value.
3. If Django can determine the previous page from the HTTP headers, the view will
redirect to that previous page.
"""
next = request.POST.get('next', request.GET.get('next',
request.META.get('HTTP_REFERER', None)))
if not next:
next = request.path
return next
def _get_avatars(user):
# Default set. Needs to be sliced, but that's it. Keep the natural order.
avatars = user.avatar_set.all()
# Current avatar
primary_avatar = avatars.order_by('-primary')[:1]
if primary_avatar:
avatar = primary_avatar[0]
else:
avatar = None
if AVATAR_MAX_AVATARS_PER_USER == 1:
avatars = primary_avatar
else:
# Slice the default set now that we used the queryset for the primary avatar
avatars = avatars[:AVATAR_MAX_AVATARS_PER_USER]
return (avatar, avatars)
@login_required
def add(request, extra_context=None, next_override=None,
upload_form=UploadAvatarForm, *args, **kwargs):
if extra_context is None:
extra_context = {}
avatar, avatars = _get_avatars(request.user)
upload_avatar_form = upload_form(request.POST or None,
request.FILES or None, user=request.user)
if request.method == "POST" and 'avatar' in request.FILES:
if upload_avatar_form.is_valid():
avatar = Avatar(user=request.user, primary=True)
image_file = request.FILES['avatar']
avatar.avatar.save(image_file.name, image_file)
avatar.save()
messages.success(request, _("Successfully uploaded a new avatar."))
avatar_updated.send(sender=Avatar, user=request.user, avatar=avatar)
return redirect(next_override or _get_next(request))
context = {
'avatar': avatar,
'avatars': avatars,
'upload_avatar_form': upload_avatar_form,
'next': next_override or _get_next(request),
}
context.update(extra_context)
return render(request, 'avatar/add.html', context)
@login_required
def change(request, extra_context=None, next_override=None,
upload_form=UploadAvatarForm, primary_form=PrimaryAvatarForm,
*args, **kwargs):
if extra_context is None:
extra_context = {}
avatar, avatars = _get_avatars(request.user)
if avatar:
kwargs = {'initial': {'choice': avatar.id}}
else:
kwargs = {}
upload_avatar_form = upload_form(user=request.user, **kwargs)
primary_avatar_form = primary_form(request.POST or None,
user=request.user, avatars=avatars, **kwargs)
if request.method == "POST":
updated = False
if 'choice' in request.POST and primary_avatar_form.is_valid():
avatar = Avatar.objects.get(
id=primary_avatar_form.cleaned_data['choice'])
avatar.primary = True
avatar.save()
updated = True
messages.success(request, _("Successfully updated your avatar."))
if updated:
avatar_updated.send(sender=Avatar, user=request.user, avatar=avatar)
return redirect(next_override or _get_next(request))
context = {
'avatar': avatar,
'avatars': avatars,
'upload_avatar_form': upload_avatar_form,
'primary_avatar_form': primary_avatar_form,
'next': next_override or _get_next(request)
}
context.update(extra_context)
return render(request, 'avatar/change.html', context)
@login_required
def delete(request, extra_context=None, next_override=None, *args, **kwargs):
if extra_context is None:
extra_context = {}
avatar, avatars = _get_avatars(request.user)
delete_avatar_form = DeleteAvatarForm(request.POST or None,
user=request.user, avatars=avatars)
if request.method == 'POST':
if delete_avatar_form.is_valid():
ids = delete_avatar_form.cleaned_data['choices']
if six.text_type(avatar.id) in ids and avatars.count() > len(ids):
# Find the next best avatar, and set it as the new primary
for a in avatars:
if six.text_type(a.id) not in ids:
a.primary = True
a.save()
avatar_updated.send(sender=Avatar, user=request.user, avatar=avatar)
break
Avatar.objects.filter(id__in=ids).delete()
messages.success(request, _("Successfully deleted the requested avatars."))
return redirect(next_override or _get_next(request))
context = {
'avatar': avatar,
'avatars': avatars,
'delete_avatar_form': delete_avatar_form,
'next': next_override or _get_next(request),
}
context.update(extra_context)
return render(request, 'avatar/confirm_delete.html', context)
def avatar_gallery(request, username, template_name="avatar/gallery.html"):
try:
user = get_user(username)
except get_user_model().DoesNotExist:
raise Http404
return render(request, template_name, {
"other_user": user,
"avatars": user.avatar_set.all(),
})
def avatar(request, username, id, template_name="avatar/avatar.html"):
try:
user = get_user(username)
except get_user_model().DoesNotExist:
raise Http404
avatars = user.avatar_set.order_by("-date_uploaded")
index = None
avatar = None
if avatars:
avatar = avatars.get(pk=id)
if not avatar:
return Http404
index = avatars.filter(date_uploaded__gt=avatar.date_uploaded).count()
count = avatars.count()
if index == 0:
prev = avatars.reverse()[0]
if count <= 1:
next = avatars[0]
else:
next = avatars[1]
else:
prev = avatars[index - 1]
if (index + 1) >= count:
next = avatars[0]
prev_index = index - 1
if prev_index < 0:
prev_index = 0
prev = avatars[prev_index]
else:
next = avatars[index + 1]
return render(request, template_name, {
"other_user": user,
"avatar": avatar,
"index": index + 1,
"avatars": avatars,
"next": next,
"prev": prev,
"count": count,
})
def render_primary(request, extra_context={}, user=None, size=AVATAR_DEFAULT_SIZE, *args, **kwargs):
size = int(size)
avatar = get_primary_avatar(user, size=size)
if avatar:
# FIXME: later, add an option to render the resized avatar dynamically
# instead of redirecting to an already created static file. This could
# be useful in certain situations, particulary if there is a CDN and
# we want to minimize the storage usage on our static server, letting
# the CDN store those files instead
return redirect(avatar.avatar_url(size))
else:
return redirect(get_default_avatar_url())
| bsd-3-clause | -3,761,141,852,235,347,500 | 35.687783 | 100 | 0.625432 | false |
wooga/airflow | tests/www/api/experimental/test_endpoints.py | 1 | 21328 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import unittest
from datetime import timedelta
from unittest import mock
from urllib.parse import quote_plus
from parameterized import parameterized_class
from airflow import settings
from airflow.api.common.experimental.trigger_dag import trigger_dag
from airflow.models import DagBag, DagRun, Pool, TaskInstance
from airflow.models.serialized_dag import SerializedDagModel
from airflow.settings import Session
from airflow.utils.timezone import datetime, parse as parse_datetime, utcnow
from airflow.version import version
from airflow.www import app as application
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_pools
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, os.pardir, os.pardir)
)
class TestBase(unittest.TestCase):
def setUp(self):
self.app = application.create_app(testing=True)
self.appbuilder = self.app.appbuilder # pylint: disable=no-member
self.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'
self.app.config['SECRET_KEY'] = 'secret_key'
self.app.config['CSRF_ENABLED'] = False
self.app.config['WTF_CSRF_ENABLED'] = False
self.client = self.app.test_client()
settings.configure_orm()
self.session = Session
@parameterized_class([
{"dag_serialization": "False"},
{"dag_serialization": "True"},
])
class TestApiExperimental(TestBase):
dag_serialization = "False"
@classmethod
def setUpClass(cls):
super().setUpClass()
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
dagbag = DagBag(include_examples=True)
for dag in dagbag.dags.values():
dag.sync_to_db()
SerializedDagModel.write_dag(dag)
def tearDown(self):
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
super().tearDown()
def test_info(self):
url = '/api/experimental/info'
resp_raw = self.client.get(url)
resp = json.loads(resp_raw.data.decode('utf-8'))
self.assertEqual(version, resp['version'])
def test_task_info(self):
with conf_vars(
{("core", "store_serialized_dags"): self.dag_serialization}
):
url_template = '/api/experimental/dags/{}/tasks/{}'
response = self.client.get(
url_template.format('example_bash_operator', 'runme_0')
)
self.assertIn('"email"', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
response = self.client.get(
url_template.format('example_bash_operator', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
response = self.client.get(
url_template.format('DNE', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
def test_get_dag_code(self):
with conf_vars(
{("core", "store_serialized_dags"): self.dag_serialization}
):
url_template = '/api/experimental/dags/{}/code'
response = self.client.get(
url_template.format('example_bash_operator')
)
self.assertIn('BashOperator(', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
response = self.client.get(
url_template.format('xyz')
)
self.assertEqual(404, response.status_code)
def test_dag_paused(self):
with conf_vars(
{("core", "store_serialized_dags"): self.dag_serialization}
):
pause_url_template = '/api/experimental/dags/{}/paused/{}'
paused_url_template = '/api/experimental/dags/{}/paused'
paused_url = paused_url_template.format('example_bash_operator')
response = self.client.get(
pause_url_template.format('example_bash_operator', 'true')
)
self.assertIn('ok', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
paused_response = self.client.get(paused_url)
self.assertEqual(200, paused_response.status_code)
self.assertEqual({"is_paused": True}, paused_response.json)
response = self.client.get(
pause_url_template.format('example_bash_operator', 'false')
)
self.assertIn('ok', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
paused_response = self.client.get(paused_url)
self.assertEqual(200, paused_response.status_code)
self.assertEqual({"is_paused": False}, paused_response.json)
def test_trigger_dag(self):
with conf_vars(
{("core", "store_serialized_dags"): self.dag_serialization}
):
url_template = '/api/experimental/dags/{}/dag_runs'
run_id = 'my_run' + utcnow().isoformat()
response = self.client.post(
url_template.format('example_bash_operator'),
data=json.dumps({'run_id': run_id}),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
response_execution_date = parse_datetime(
json.loads(response.data.decode('utf-8'))['execution_date'])
self.assertEqual(0, response_execution_date.microsecond)
# Check execution_date is correct
response = json.loads(response.data.decode('utf-8'))
dagbag = DagBag()
dag = dagbag.get_dag('example_bash_operator')
dag_run = dag.get_dagrun(response_execution_date)
dag_run_id = dag_run.run_id
self.assertEqual(run_id, dag_run_id)
self.assertEqual(dag_run_id, response['run_id'])
# Test error for nonexistent dag
response = self.client.post(
url_template.format('does_not_exist_dag'),
data=json.dumps({}),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
def test_trigger_dag_for_date(self):
with conf_vars(
{("core", "store_serialized_dags"): self.dag_serialization}
):
url_template = '/api/experimental/dags/{}/dag_runs'
dag_id = 'example_bash_operator'
execution_date = utcnow() + timedelta(hours=1)
datetime_string = execution_date.isoformat()
# Test correct execution with execution date
response = self.client.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': datetime_string}),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
self.assertEqual(datetime_string, json.loads(response.data.decode('utf-8'))['execution_date'])
dagbag = DagBag()
dag = dagbag.get_dag(dag_id)
dag_run = dag.get_dagrun(execution_date)
self.assertTrue(dag_run,
'Dag Run not found for execution date {}'
.format(execution_date))
# Test correct execution with execution date and microseconds replaced
response = self.client.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': datetime_string, 'replace_microseconds': 'true'}),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
response_execution_date = parse_datetime(
json.loads(response.data.decode('utf-8'))['execution_date'])
self.assertEqual(0, response_execution_date.microsecond)
dagbag = DagBag()
dag = dagbag.get_dag(dag_id)
dag_run = dag.get_dagrun(response_execution_date)
self.assertTrue(dag_run,
'Dag Run not found for execution date {}'
.format(execution_date))
# Test error for nonexistent dag
response = self.client.post(
url_template.format('does_not_exist_dag'),
data=json.dumps({'execution_date': datetime_string}),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
# Test error for bad datetime format
response = self.client.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': 'not_a_datetime'}),
content_type="application/json"
)
self.assertEqual(400, response.status_code)
def test_task_instance_info(self):
with conf_vars(
{("core", "store_serialized_dags"): self.dag_serialization}
):
url_template = '/api/experimental/dags/{}/dag_runs/{}/tasks/{}'
dag_id = 'example_bash_operator'
task_id = 'also_run_this'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(
datetime(1990, 1, 1, 1, 1, 1).isoformat()
)
# Create DagRun
trigger_dag(dag_id=dag_id,
run_id='test_task_instance_info_run',
execution_date=execution_date)
# Test Correct execution
response = self.client.get(
url_template.format(dag_id, datetime_string, task_id)
)
self.assertEqual(200, response.status_code)
self.assertIn('state', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag
response = self.client.get(
url_template.format('does_not_exist_dag', datetime_string,
task_id),
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent task
response = self.client.get(
url_template.format(dag_id, datetime_string, 'does_not_exist_task')
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag run (wrong execution_date)
response = self.client.get(
url_template.format(dag_id, wrong_datetime_string, task_id)
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for bad datetime format
response = self.client.get(
url_template.format(dag_id, 'not_a_datetime', task_id)
)
self.assertEqual(400, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
def test_dagrun_status(self):
with conf_vars(
{("core", "store_serialized_dags"): self.dag_serialization}
):
url_template = '/api/experimental/dags/{}/dag_runs/{}'
dag_id = 'example_bash_operator'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(
datetime(1990, 1, 1, 1, 1, 1).isoformat()
)
# Create DagRun
trigger_dag(dag_id=dag_id,
run_id='test_task_instance_info_run',
execution_date=execution_date)
# Test Correct execution
response = self.client.get(
url_template.format(dag_id, datetime_string)
)
self.assertEqual(200, response.status_code)
self.assertIn('state', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag
response = self.client.get(
url_template.format('does_not_exist_dag', datetime_string),
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag run (wrong execution_date)
response = self.client.get(
url_template.format(dag_id, wrong_datetime_string)
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for bad datetime format
response = self.client.get(
url_template.format(dag_id, 'not_a_datetime')
)
self.assertEqual(400, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
@parameterized_class([
{"dag_serialization": "False"},
{"dag_serialization": "True"},
])
class TestLineageApiExperimental(TestBase):
PAPERMILL_EXAMPLE_DAGS = os.path.join(ROOT_FOLDER, "airflow", "providers", "papermill", "example_dags")
dag_serialization = "False"
@classmethod
def setUpClass(cls):
super().setUpClass()
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
dagbag = DagBag(include_examples=False, dag_folder=cls.PAPERMILL_EXAMPLE_DAGS)
for dag in dagbag.dags.values():
dag.sync_to_db()
SerializedDagModel.write_dag(dag)
@mock.patch("airflow.settings.DAGS_FOLDER", PAPERMILL_EXAMPLE_DAGS)
def test_lineage_info(self):
with conf_vars(
{("core", "store_serialized_dags"): self.dag_serialization}
):
url_template = '/api/experimental/lineage/{}/{}'
dag_id = 'example_papermill_operator'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(
datetime(1990, 1, 1, 1, 1, 1).isoformat()
)
# create DagRun
trigger_dag(dag_id=dag_id,
run_id='test_lineage_info_run',
execution_date=execution_date)
# test correct execution
response = self.client.get(
url_template.format(dag_id, datetime_string)
)
self.assertEqual(200, response.status_code)
self.assertIn('task_ids', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag
response = self.client.get(
url_template.format('does_not_exist_dag', datetime_string),
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag run (wrong execution_date)
response = self.client.get(
url_template.format(dag_id, wrong_datetime_string)
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for bad datetime format
response = self.client.get(
url_template.format(dag_id, 'not_a_datetime')
)
self.assertEqual(400, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
class TestPoolApiExperimental(TestBase):
USER_POOL_COUNT = 2
TOTAL_POOL_COUNT = USER_POOL_COUNT + 1 # including default_pool
@classmethod
def setUpClass(cls):
super().setUpClass()
def setUp(self):
super().setUp()
clear_db_pools()
self.pools = [Pool.get_default_pool()]
for i in range(self.USER_POOL_COUNT):
name = 'experimental_%s' % (i + 1)
pool = Pool(
pool=name,
slots=i,
description=name,
)
self.session.add(pool)
self.pools.append(pool)
self.session.commit()
self.pool = self.pools[-1]
def _get_pool_count(self):
response = self.client.get('/api/experimental/pools')
self.assertEqual(response.status_code, 200)
return len(json.loads(response.data.decode('utf-8')))
def test_get_pool(self):
response = self.client.get(
'/api/experimental/pools/{}'.format(self.pool.pool),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode('utf-8')),
self.pool.to_json())
def test_get_pool_non_existing(self):
response = self.client.get('/api/experimental/pools/foo')
self.assertEqual(response.status_code, 404)
self.assertEqual(json.loads(response.data.decode('utf-8'))['error'],
"Pool 'foo' doesn't exist")
def test_get_pools(self):
response = self.client.get('/api/experimental/pools')
self.assertEqual(response.status_code, 200)
pools = json.loads(response.data.decode('utf-8'))
self.assertEqual(len(pools), self.TOTAL_POOL_COUNT)
for i, pool in enumerate(sorted(pools, key=lambda p: p['pool'])):
self.assertDictEqual(pool, self.pools[i].to_json())
def test_create_pool(self):
response = self.client.post(
'/api/experimental/pools',
data=json.dumps({
'name': 'foo',
'slots': 1,
'description': '',
}),
content_type='application/json',
)
self.assertEqual(response.status_code, 200)
pool = json.loads(response.data.decode('utf-8'))
self.assertEqual(pool['pool'], 'foo')
self.assertEqual(pool['slots'], 1)
self.assertEqual(pool['description'], '')
self.assertEqual(self._get_pool_count(), self.TOTAL_POOL_COUNT + 1)
def test_create_pool_with_bad_name(self):
for name in ('', ' '):
response = self.client.post(
'/api/experimental/pools',
data=json.dumps({
'name': name,
'slots': 1,
'description': '',
}),
content_type='application/json',
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.data.decode('utf-8'))['error'],
"Pool name shouldn't be empty",
)
self.assertEqual(self._get_pool_count(), self.TOTAL_POOL_COUNT)
def test_delete_pool(self):
response = self.client.delete(
'/api/experimental/pools/{}'.format(self.pool.pool),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode('utf-8')),
self.pool.to_json())
self.assertEqual(self._get_pool_count(), self.TOTAL_POOL_COUNT - 1)
def test_delete_pool_non_existing(self):
response = self.client.delete(
'/api/experimental/pools/foo',
)
self.assertEqual(response.status_code, 404)
self.assertEqual(json.loads(response.data.decode('utf-8'))['error'],
"Pool 'foo' doesn't exist")
def test_delete_default_pool(self):
clear_db_pools()
response = self.client.delete(
'/api/experimental/pools/default_pool',
)
self.assertEqual(response.status_code, 400)
self.assertEqual(json.loads(response.data.decode('utf-8'))['error'],
"default_pool cannot be deleted")
| apache-2.0 | 3,584,455,724,658,494,500 | 38.42329 | 107 | 0.578676 | false |
vismantic-ohtuprojekti/image-filtering-suite | qualipy/utils/tesseract.py | 2 | 1421 | import subprocess
import tempfile
import os
def img_to_str(tesseract_path, image):
"""Reads text in an image with tesseract-ocr
:param tesseract_path: path to the tesseract executable
:type tesseracth_path: str
:param image: path to the input image
:type image: str
"""
if not os.path.isfile(image):
raise OSError("image not found: %s" % image)
with tempfile.NamedTemporaryFile(prefix="tess_") as outfile:
try:
status, err_str = __run_tesseract(tesseract_path,
image, outfile.name)
if status:
raise OSError(err_str)
with open(outfile.name + ".txt") as out:
return out.read().strip()
finally:
__remove_file(outfile.name + ".txt")
def __run_tesseract(tesseract_path, infile, outfile):
"""Run tesseract process for given input file
:param tesseract_path: path to the tesseract executable
:type tesseracth_path: str
:param infile: path to the input image
:type infile: str
:param outfile: path to the output file
:type outfile: str
"""
command = [tesseract_path, infile, outfile]
tesseract = subprocess.Popen(command, stderr=subprocess.PIPE)
return tesseract.wait(), tesseract.stderr.read()
def __remove_file(filename):
try:
os.remove(filename)
except OSError:
pass
| mit | -6,089,292,067,045,030,000 | 28.604167 | 66 | 0.619282 | false |
runt18/nupic | src/nupic/data/aggregator.py | 1 | 28990 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from collections import defaultdict
import datetime
import os
from pkg_resources import resource_filename
import time
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.fieldmeta import FieldMetaSpecial
from nupic.data.file_record_stream import FileRecordStream
"""The aggregator aggregates PF datasets
It supports aggregation of multiple records based on time.
Common use cases:
- Aggregate records by month
- Aggregate records every 3 months starting April 15th
- Aggregate records in 2.5 seconds intervals
Assumption: aggregated slices fit in memory. All the records that are aggregated
per period are stored in memory until the next slice starts and are only
aggregated then. If this assumption is too strong the script will need to write
slices to a temp storage or use incremental aggregation techniques.
"""
def initFilter(input, filterInfo = None):
""" Initializes internal filter variables for further processing.
Returns a tuple (function to call,parameters for the filter call)
The filterInfo is a dict. Here is an example structure:
{fieldName: {'min': x,
'max': y,
'type': 'category', # or 'number'
'acceptValues': ['foo', 'bar'],
}
}
This returns the following:
(filterFunc, ((fieldIdx, fieldFilterFunc, filterDict),
...)
Where fieldIdx is the index of the field within each record
fieldFilterFunc returns True if the value is "OK" (within min, max or
part of acceptValues)
fieldDict is a dict containing 'type', 'min', max', 'acceptValues'
"""
if filterInfo is None:
return None
# Build an array of index/func to call on record[index]
filterList = []
for i, fieldName in enumerate(input.getFieldNames()):
fieldFilter = filterInfo.get(fieldName, None)
if fieldFilter is None:
continue
var = dict()
var['acceptValues'] = None
min = fieldFilter.get('min', None)
max = fieldFilter.get('max', None)
var['min'] = min
var['max'] = max
if fieldFilter['type'] == 'category':
var['acceptValues'] = fieldFilter['acceptValues']
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] in x['acceptValues'])
elif fieldFilter['type'] == 'number':
if min is not None and max is not None:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] >= x['min'] and x['value'] <= x['max'])
elif min is not None:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] >= x['min'])
else:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] <= x['max'])
filterList.append((i, fp, var))
return (_filterRecord, filterList)
def _filterRecord(filterList, record):
""" Takes a record and returns true if record meets filter criteria,
false otherwise
"""
for (fieldIdx, fp, params) in filterList:
x = dict()
x['value'] = record[fieldIdx]
x['acceptValues'] = params['acceptValues']
x['min'] = params['min']
x['max'] = params['max']
if not fp(x):
return False
# None of the field filters triggered, accept the record as a good one
return True
def _aggr_first(inList):
""" Returns first non-None element in the list, or None if all are None
"""
for elem in inList:
if elem != SENTINEL_VALUE_FOR_MISSING_DATA:
return elem
return None
def _aggr_last(inList):
""" Returns last non-None element in the list, or None if all are None
"""
for elem in reversed(inList):
if elem != SENTINEL_VALUE_FOR_MISSING_DATA:
return elem
return None
def _aggr_sum(inList):
""" Returns sum of the elements in the list. Missing items are replaced with
the mean value
"""
aggrMean = _aggr_mean(inList)
if aggrMean is None:
return None
aggrSum = 0
for elem in inList:
if elem != SENTINEL_VALUE_FOR_MISSING_DATA:
aggrSum += elem
else:
aggrSum += aggrMean
return aggrSum
def _aggr_mean(inList):
""" Returns mean of non-None elements of the list
"""
aggrSum = 0
nonNone = 0
for elem in inList:
if elem != SENTINEL_VALUE_FOR_MISSING_DATA:
aggrSum += elem
nonNone += 1
if nonNone != 0:
return aggrSum / nonNone
else:
return None
def _aggr_mode(inList):
""" Returns most common value seen in the non-None elements of the list
"""
valueCounts = dict()
nonNone = 0
for elem in inList:
if elem == SENTINEL_VALUE_FOR_MISSING_DATA:
continue
nonNone += 1
if elem in valueCounts:
valueCounts[elem] += 1
else:
valueCounts[elem] = 1
# Get the most common one
if nonNone == 0:
return None
# Sort by counts
sortedCounts = valueCounts.items()
sortedCounts.sort(cmp=lambda x,y: x[1] - y[1], reverse=True)
return sortedCounts[0][0]
def _aggr_weighted_mean(inList, params):
""" Weighted mean uses params (must be the same size as inList) and
makes weighed mean of inList"""
assert(len(inList) == len(params))
# If all weights are 0, then the value is not defined, return None (missing)
weightsSum = sum(params)
if weightsSum == 0:
return None
weightedMean = 0
for i, elem in enumerate(inList):
weightedMean += elem * params[i]
return weightedMean / weightsSum
class Aggregator(object):
"""
This class provides context and methods for aggregating records. The caller
should construct an instance of Aggregator and then call the next() method
repeatedly to get each aggregated record.
This is an example aggregationInfo dict:
{
'hours': 1,
'minutes': 15,
'fields': [
('timestamp', 'first'),
('gym', 'first'),
('consumption', 'sum')
],
}
"""
def __init__(self, aggregationInfo, inputFields, timeFieldName=None,
sequenceIdFieldName=None, resetFieldName=None, filterInfo=None):
""" Construct an aggregator instance
Params:
- aggregationInfo: a dictionary that contains the following entries
- fields: a list of pairs. Each pair is a field name and an
aggregation function (e.g. sum). The function will be used to aggregate
multiple values during the aggregation period.
- aggregation period: 0 or more of unit=value fields; allowed units are:
[years months] | [weeks days hours minutes seconds milliseconds
microseconds]
NOTE: years and months are mutually-exclusive with the other units. See
getEndTime() and _aggregate() for more details.
Example1: years=1, months=6,
Example2: hours=1, minutes=30,
If none of the period fields are specified or if all that are specified
have values of 0, then aggregation will be suppressed, and the given
inputFile parameter value will be returned.
- inputFields: The fields from the data source. This is a sequence of
`nupic.data.fieldmeta.FieldMetaInfo` instances.
- timeFieldName: name of the field to use as the time field. If None,
then the time field will be queried from the reader.
- sequenceIdFieldName: name of the field to use as the sequenecId. If None,
then the time field will be queried from the reader.
- resetFieldName: name of the field to use as the reset field. If None,
then the time field will be queried from the reader.
- filterInfo: a structure with rules for filtering records out
If the input file contains a time field, sequence id field or reset field
that were not specified in aggregationInfo fields, those fields will be
added automatically with the following rules:
1. The order will be R, S, T, rest of the fields
2. The aggregation function for these will be to pick the first:
lambda x: x[0]
"""
# -----------------------------------------------------------------------
# Save member variables.
# The same aggregationInfo dict may be used by the caller for generating
# more datasets (with slight changes), so it is safer to copy it here and
# all changes made here will not affect the input aggregationInfo
self._filterInfo = filterInfo
self._nullAggregation = False
self._inputFields = inputFields
# See if this is a null aggregation
self._nullAggregation = False
if aggregationInfo is None:
self._nullAggregation = True
else:
aggDef = defaultdict(lambda: 0, aggregationInfo)
if (aggDef['years'] == aggDef['months'] == aggDef['weeks'] ==
aggDef['days'] == aggDef['hours'] == aggDef['minutes'] ==
aggDef['seconds'] == aggDef['milliseconds'] ==
aggDef['microseconds'] == 0):
self._nullAggregation = True
# Prepare the field filtering info. The filter allows us to ignore records
# based on specified min or max values for each field.
self._filter = initFilter(self._inputFields, self._filterInfo)
# ----------------------------------------------------------------------
# Fill in defaults
self._fields = None
self._resetFieldIdx = None
self._timeFieldIdx = None
self._sequenceIdFieldIdx = None
self._aggTimeDelta = datetime.timedelta()
self._aggYears = 0
self._aggMonths = 0
# Init state variables used within next()
self._aggrInputBookmark = None
self._startTime = None
self._endTime = None
self._sequenceId = None
self._firstSequenceStartTime = None
self._inIdx = -1
self._slice = defaultdict(list)
# ========================================================================
# Get aggregation params
# self._fields will be a list of tuples: (fieldIdx, funcPtr, funcParam)
if not self._nullAggregation:
# ---------------------------------------------------------------------
# Verify that all aggregation field names exist in the input
fieldNames = [f[0] for f in aggregationInfo['fields']]
readerFieldNames = [f[0] for f in self._inputFields]
for name in fieldNames:
if not name in readerFieldNames:
raise Exception('No such input field: {0!s}'.format((name)))
# ---------------------------------------------------------------------
# Get the indices of the special fields, if given to our constructor
if timeFieldName is not None:
self._timeFieldIdx = readerFieldNames.index(timeFieldName)
if resetFieldName is not None:
self._resetFieldIdx = readerFieldNames.index(resetFieldName)
if sequenceIdFieldName is not None:
self._sequenceIdFieldIdx = readerFieldNames.index(sequenceIdFieldName)
# ---------------------------------------------------------------------
# Re-order the fields to match the order in the reader and add in any
# fields from the reader that were not already in the aggregationInfo
# fields list.
self._fields = []
fieldIdx = -1
for (name, type, special) in self._inputFields:
fieldIdx += 1
# See if it exists in the aggregationInfo
found = False
for field in aggregationInfo['fields']:
if field[0] == name:
aggFunctionName = field[1]
found = True
break
if not found:
aggFunctionName = 'first'
# Convert to a function pointer and optional params
(funcPtr, params) = self._getFuncPtrAndParams(aggFunctionName)
# Add it
self._fields.append((fieldIdx, funcPtr, params))
# Is it a special field that we are still looking for?
if special == FieldMetaSpecial.reset and self._resetFieldIdx is None:
self._resetFieldIdx = fieldIdx
if special == FieldMetaSpecial.timestamp and self._timeFieldIdx is None:
self._timeFieldIdx = fieldIdx
if (special == FieldMetaSpecial.sequence and
self._sequenceIdFieldIdx is None):
self._sequenceIdFieldIdx = fieldIdx
assert self._timeFieldIdx is not None, "No time field was found"
# Create an instance of _AggregationPeriod with the aggregation period
self._aggTimeDelta = datetime.timedelta(days=aggDef['days'],
hours=aggDef['hours'],
minutes=aggDef['minutes'],
seconds=aggDef['seconds'],
milliseconds=aggDef['milliseconds'],
microseconds=aggDef['microseconds'],
weeks=aggDef['weeks'])
self._aggYears = aggDef['years']
self._aggMonths = aggDef['months']
if self._aggTimeDelta:
assert self._aggYears == 0
assert self._aggMonths == 0
def _getEndTime(self, t):
"""Add the aggregation period to the input time t and return a datetime object
Years and months are handled as aspecial case due to leap years
and months with different number of dates. They can't be converted
to a strict timedelta because a period of 3 months will have different
durations actually. The solution is to just add the years and months
fields directly to the current time.
Other periods are converted to timedelta and just added to current time.
"""
assert isinstance(t, datetime.datetime)
if self._aggTimeDelta:
return t + self._aggTimeDelta
else:
year = t.year + self._aggYears + (t.month - 1 + self._aggMonths) / 12
month = (t.month - 1 + self._aggMonths) % 12 + 1
return t.replace(year=year, month=month)
def _getFuncPtrAndParams(self, funcName):
""" Given the name of an aggregation function, returns the function pointer
and param.
Parameters:
------------------------------------------------------------------------
funcName: a string (name of function) or funcPtr
retval: (funcPtr, param)
"""
params = None
if isinstance(funcName, basestring):
if funcName == 'sum':
fp = _aggr_sum
elif funcName == 'first':
fp = _aggr_first
elif funcName == 'last':
fp = _aggr_last
elif funcName == 'mean':
fp = _aggr_mean
elif funcName == 'max':
fp = max
elif funcName == 'min':
fp = min
elif funcName == 'mode':
fp = _aggr_mode
elif funcName.startswith('wmean:'):
fp = _aggr_weighted_mean
paramsName = funcName[6:]
params = [f[0] for f in self._inputFields].index(paramsName)
else:
fp = funcName
return (fp, params)
def _createAggregateRecord(self):
""" Generate the aggregated output record
Parameters:
------------------------------------------------------------------------
retval: outputRecord
"""
record = []
for i, (fieldIdx, aggFP, paramIdx) in enumerate(self._fields):
if aggFP is None: # this field is not supposed to be aggregated.
continue
values = self._slice[i]
refIndex = None
if paramIdx is not None:
record.append(aggFP(values, self._slice[paramIdx]))
else:
record.append(aggFP(values))
return record
def isNullAggregation(self):
""" Return True if no aggregation will be performed, either because the
aggregationInfo was None or all aggregation params within it were 0.
"""
return self._nullAggregation
def next(self, record, curInputBookmark):
""" Return the next aggregated record, if any
Parameters:
------------------------------------------------------------------------
record: The input record (values only) from the input source, or
None if the input has reached EOF (this will cause this
method to force completion of and return any partially
aggregated time period)
curInputBookmark: The bookmark to the next input record
retval:
(outputRecord, inputBookmark)
outputRecord: the aggregated record
inputBookmark: a bookmark to the last position from the input that
contributed to this aggregated record.
If we don't have any aggregated records yet, returns (None, None)
The caller should generally do a loop like this:
while True:
inRecord = reader.getNextRecord()
bookmark = reader.getBookmark()
(aggRecord, aggBookmark) = aggregator.next(inRecord, bookmark)
# reached EOF?
if inRecord is None and aggRecord is None:
break
if aggRecord is not None:
proessRecord(aggRecord, aggBookmark)
This method makes use of the self._slice member variable to build up
the values we need to aggregate. This is a dict of lists. The keys are
the field indices and the elements of each list are the values for that
field. For example:
self._siice = { 0: [42, 53], 1: [4.0, 5.1] }
"""
# This will hold the aggregated record we return
outRecord = None
# This will hold the bookmark of the last input used within the
# aggregated record we return.
retInputBookmark = None
if record is not None:
# Increment input count
self._inIdx += 1
#print self._inIdx, record
# Apply the filter, ignore the record if any field is unacceptable
if self._filter is not None and not self._filter[0](self._filter[1], record):
return (None, None)
# If no aggregation info just return as-is
if self._nullAggregation:
return (record, curInputBookmark)
# ----------------------------------------------------------------------
# Do aggregation
#
# Remember the very first record time stamp - it will be used as
# the timestamp for all first records in all sequences to align
# times for the aggregation/join of sequences.
#
# For a set of aggregated records, it will use the beginning of the time
# window as a timestamp for the set
#
t = record[self._timeFieldIdx]
if self._firstSequenceStartTime is None:
self._firstSequenceStartTime = t
# Create initial startTime and endTime if needed
if self._startTime is None:
self._startTime = t
if self._endTime is None:
self._endTime = self._getEndTime(t)
assert self._endTime > t
#print 'Processing line:', i, t, endTime
#from dbgp.client import brk; brk(port=9011)
# ----------------------------------------------------------------------
# Does this record have a reset signal or sequence Id associated with it?
# If so, see if we've reached a sequence boundary
if self._resetFieldIdx is not None:
resetSignal = record[self._resetFieldIdx]
else:
resetSignal = None
if self._sequenceIdFieldIdx is not None:
currSequenceId = record[self._sequenceIdFieldIdx]
else:
currSequenceId = None
newSequence = (resetSignal == 1 and self._inIdx > 0) \
or self._sequenceId != currSequenceId \
or self._inIdx == 0
if newSequence:
self._sequenceId = currSequenceId
# --------------------------------------------------------------------
# We end the aggregation chunk if we go past the end time
# -OR- we get an out of order record (t < startTime)
sliceEnded = (t >= self._endTime or t < self._startTime)
# -------------------------------------------------------------------
# Time to generate a new output record?
if (newSequence or sliceEnded) and len(self._slice) > 0:
# Create aggregated record
# print 'Creating aggregate record...'
# Make first record timestamp as the beginning of the time period,
# in case the first record wasn't falling on the beginning of the period
for j, f in enumerate(self._fields):
index = f[0]
if index == self._timeFieldIdx:
self._slice[j][0] = self._startTime
break
# Generate the aggregated record
outRecord = self._createAggregateRecord()
retInputBookmark = self._aggrInputBookmark
# Reset the slice
self._slice = defaultdict(list)
# --------------------------------------------------------------------
# Add current record to slice (Note keeping slices in memory). Each
# field in the slice is a list of field values from all the sliced
# records
for j, f in enumerate(self._fields):
index = f[0]
# append the parsed field value to the proper aggregated slice field.
self._slice[j].append(record[index])
self._aggrInputBookmark = curInputBookmark
# --------------------------------------------------------------------
# If we've encountered a new sequence, start aggregation over again
if newSequence:
# TODO: May use self._firstSequenceStartTime as a start for the new
# sequence (to align all sequences)
self._startTime = t
self._endTime = self._getEndTime(t)
# --------------------------------------------------------------------
# If a slice just ended, re-compute the start and end time for the
# next aggregated record
if sliceEnded:
# Did we receive an out of order record? If so, go back and iterate
# till we get to the next end time boundary.
if t < self._startTime:
self._endTime = self._firstSequenceStartTime
while t >= self._endTime:
self._startTime = self._endTime
self._endTime = self._getEndTime(self._endTime)
# If we have a record to return, do it now
if outRecord is not None:
return (outRecord, retInputBookmark)
# ---------------------------------------------------------------------
# Input reached EOF
# Aggregate one last time in the end if necessary
elif self._slice:
# Make first record timestamp as the beginning of the time period,
# in case the first record wasn't falling on the beginning of the period
for j, f in enumerate(self._fields):
index = f[0]
if index == self._timeFieldIdx:
self._slice[j][0] = self._startTime
break
outRecord = self._createAggregateRecord()
retInputBookmark = self._aggrInputBookmark
self._slice = defaultdict(list)
# Return aggregated record
return (outRecord, retInputBookmark)
def generateDataset(aggregationInfo, inputFilename, outputFilename=None):
"""Generate a dataset of aggregated values
Parameters:
----------------------------------------------------------------------------
aggregationInfo: a dictionary that contains the following entries
- fields: a list of pairs. Each pair is a field name and an
aggregation function (e.g. sum). The function will be used to aggregate
multiple values during the aggregation period.
aggregation period: 0 or more of unit=value fields; allowed units are:
[years months] |
[weeks days hours minutes seconds milliseconds microseconds]
NOTE: years and months are mutually-exclusive with the other units.
See getEndTime() and _aggregate() for more details.
Example1: years=1, months=6,
Example2: hours=1, minutes=30,
If none of the period fields are specified or if all that are specified
have values of 0, then aggregation will be suppressed, and the given
inputFile parameter value will be returned.
inputFilename: filename of the input dataset within examples/prediction/data
outputFilename: name for the output file. If not given, a name will be
generated based on the input filename and the aggregation params
retval: Name of the generated output file. This will be the same as the input
file name if no aggregation needed to be performed
If the input file contained a time field, sequence id field or reset field
that were not specified in aggregationInfo fields, those fields will be
added automatically with the following rules:
1. The order will be R, S, T, rest of the fields
2. The aggregation function for all will be to pick the first: lambda x: x[0]
Returns: the path of the aggregated data file if aggregation was performed
(in the same directory as the given input file); if aggregation did not
need to be performed, then the given inputFile argument value is returned.
"""
# Create the input stream
inputFullPath = resource_filename("nupic.datafiles", inputFilename)
inputObj = FileRecordStream(inputFullPath)
# Instantiate the aggregator
aggregator = Aggregator(aggregationInfo=aggregationInfo,
inputFields=inputObj.getFields())
# Is it a null aggregation? If so, just return the input file unmodified
if aggregator.isNullAggregation():
return inputFullPath
# ------------------------------------------------------------------------
# If we were not given an output filename, create one based on the
# aggregation settings
if outputFilename is None:
outputFilename = 'agg_{0!s}'.format( \
os.path.splitext(os.path.basename(inputFullPath))[0])
timePeriods = 'years months weeks days '\
'hours minutes seconds milliseconds microseconds'
for k in timePeriods.split():
if aggregationInfo.get(k, 0) > 0:
outputFilename += '_{0!s}_{1:d}'.format(k, aggregationInfo[k])
outputFilename += '.csv'
outputFilename = os.path.join(os.path.dirname(inputFullPath), outputFilename)
# ------------------------------------------------------------------------
# If some other process already started creating this file, simply
# wait for it to finish and return without doing anything
lockFilePath = outputFilename + '.please_wait'
if os.path.isfile(outputFilename) or \
os.path.isfile(lockFilePath):
while os.path.isfile(lockFilePath):
print 'Waiting for {0!s} to be fully written by another process'.format( \
lockFilePath)
time.sleep(1)
return outputFilename
# Create the lock file
lockFD = open(lockFilePath, 'w')
# -------------------------------------------------------------------------
# Create the output stream
outputObj = FileRecordStream(streamID=outputFilename, write=True,
fields=inputObj.getFields())
# -------------------------------------------------------------------------
# Write all aggregated records to the output
while True:
inRecord = inputObj.getNextRecord()
(aggRecord, aggBookmark) = aggregator.next(inRecord, None)
if aggRecord is None and inRecord is None:
break
if aggRecord is not None:
outputObj.appendRecord(aggRecord)
return outputFilename
def getFilename(aggregationInfo, inputFile):
"""Generate the filename for aggregated dataset
The filename is based on the input filename and the
aggregation period.
Returns the inputFile if no aggregation required (aggregation
info has all 0's)
"""
# Find the actual file, with an absolute path
inputFile = resource_filename("nupic.datafiles", inputFile)
a = defaultdict(lambda: 0, aggregationInfo)
outputDir = os.path.dirname(inputFile)
outputFile = 'agg_{0!s}'.format(os.path.splitext(os.path.basename(inputFile))[0])
noAggregation = True
timePeriods = 'years months weeks days '\
'hours minutes seconds milliseconds microseconds'
for k in timePeriods.split():
if a[k] > 0:
noAggregation = False
outputFile += '_{0!s}_{1:d}'.format(k, a[k])
if noAggregation:
return inputFile
outputFile += '.csv'
outputFile = os.path.join(outputDir, outputFile)
return outputFile
| agpl-3.0 | 3,947,203,172,248,661,500 | 32.398618 | 83 | 0.614315 | false |
dje42/gdb | gdb/contrib/cleanup_check.py | 20 | 13262 | # Copyright 2013 Free Software Foundation, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import gcc
import gccutils
import sys
want_raii_info = False
logging = False
show_cfg = False
def log(msg, indent=0):
global logging
if logging:
sys.stderr.write('%s%s\n' % (' ' * indent, msg))
sys.stderr.flush()
def is_cleanup_type(return_type):
if not isinstance(return_type, gcc.PointerType):
return False
if not isinstance(return_type.dereference, gcc.RecordType):
return False
if str(return_type.dereference.name) == 'cleanup':
return True
return False
def is_constructor(decl):
"Return True if the function DECL is a cleanup constructor; False otherwise"
return is_cleanup_type(decl.type.type) and (not decl.name or str(decl.name) != 'make_final_cleanup')
destructor_names = set(['do_cleanups', 'discard_cleanups'])
def is_destructor(decl):
return decl.name in destructor_names
# This list is just much too long... we should probably have an
# attribute instead.
special_names = set(['do_final_cleanups', 'discard_final_cleanups',
'save_cleanups', 'save_final_cleanups',
'restore_cleanups', 'restore_final_cleanups',
'exceptions_state_mc_init',
'make_my_cleanup2', 'make_final_cleanup', 'all_cleanups',
'save_my_cleanups', 'quit_target'])
def needs_special_treatment(decl):
return decl.name in special_names
# Sometimes we need a new placeholder object that isn't the same as
# anything else.
class Dummy(object):
def __init__(self, location):
self.location = location
# A wrapper for a cleanup which has been assigned to a variable.
# This holds the variable and the location.
class Cleanup(object):
def __init__(self, var, location):
self.var = var
self.location = location
# A class representing a master cleanup. This holds a stack of
# cleanup objects and supports a merging operation.
class MasterCleanup(object):
# Create a new MasterCleanup object. OTHER, if given, is a
# MasterCleanup object to copy.
def __init__(self, other = None):
# 'cleanups' is a list of cleanups. Each element is either a
# Dummy, for an anonymous cleanup, or a Cleanup, for a cleanup
# which was assigned to a variable.
if other is None:
self.cleanups = []
self.aliases = {}
else:
self.cleanups = other.cleanups[:]
self.aliases = dict(other.aliases)
def compare_vars(self, definition, argument):
if definition == argument:
return True
if argument in self.aliases:
argument = self.aliases[argument]
if definition in self.aliases:
definition = self.aliases[definition]
return definition == argument
def note_assignment(self, lhs, rhs):
log('noting assignment %s = %s' % (lhs, rhs), 4)
self.aliases[lhs] = rhs
# Merge with another MasterCleanup.
# Returns True if this resulted in a change to our state.
def merge(self, other):
# We do explicit iteration like this so we can easily
# update the list after the loop.
counter = -1
found_named = False
for counter in range(len(self.cleanups) - 1, -1, -1):
var = self.cleanups[counter]
log('merge checking %s' % var, 4)
# Only interested in named cleanups.
if isinstance(var, Dummy):
log('=> merge dummy', 5)
continue
# Now see if VAR is found in OTHER.
if other._find_var(var.var) >= 0:
log ('=> merge found', 5)
break
log('=>merge not found', 5)
found_named = True
if found_named and counter < len(self.cleanups) - 1:
log ('merging to %d' % counter, 4)
if counter < 0:
self.cleanups = []
else:
self.cleanups = self.cleanups[0:counter]
return True
# If SELF is empty but OTHER has some cleanups, then consider
# that a change as well.
if len(self.cleanups) == 0 and len(other.cleanups) > 0:
log('merging non-empty other', 4)
self.cleanups = other.cleanups[:]
return True
return False
# Push a new constructor onto our stack. LHS is the
# left-hand-side of the GimpleCall statement. It may be None,
# meaning that this constructor's value wasn't used.
def push(self, location, lhs):
if lhs is None:
obj = Dummy(location)
else:
obj = Cleanup(lhs, location)
log('pushing %s' % lhs, 4)
idx = self._find_var(lhs)
if idx >= 0:
gcc.permerror(location, 'reassigning to known cleanup')
gcc.inform(self.cleanups[idx].location,
'previous assignment is here')
self.cleanups.append(obj)
# A helper for merge and pop that finds BACK_TO in self.cleanups,
# and returns the index, or -1 if not found.
def _find_var(self, back_to):
for i in range(len(self.cleanups) - 1, -1, -1):
if isinstance(self.cleanups[i], Dummy):
continue
if self.compare_vars(self.cleanups[i].var, back_to):
return i
return -1
# Pop constructors until we find one matching BACK_TO.
# This is invoked when we see a do_cleanups call.
def pop(self, location, back_to):
log('pop:', 4)
i = self._find_var(back_to)
if i >= 0:
self.cleanups = self.cleanups[0:i]
else:
gcc.permerror(location, 'destructor call with unknown argument')
# Check whether ARG is the current master cleanup. Return True if
# all is well.
def verify(self, location, arg):
log('verify %s' % arg, 4)
return (len(self.cleanups) > 0
and not isinstance(self.cleanups[0], Dummy)
and self.compare_vars(self.cleanups[0].var, arg))
# Check whether SELF is empty.
def isempty(self):
log('isempty: len = %d' % len(self.cleanups), 4)
return len(self.cleanups) == 0
# Emit informational warnings about the cleanup stack.
def inform(self):
for item in reversed(self.cleanups):
gcc.inform(item.location, 'leaked cleanup')
class CleanupChecker:
def __init__(self, fun):
self.fun = fun
self.seen_edges = set()
self.bad_returns = set()
# This maps BB indices to a list of master cleanups for the
# BB.
self.master_cleanups = {}
# Pick a reasonable location for the basic block BB.
def guess_bb_location(self, bb):
if isinstance(bb.gimple, list):
for stmt in bb.gimple:
if stmt.loc:
return stmt.loc
return self.fun.end
# Compute the master cleanup list for BB.
# Modifies MASTER_CLEANUP in place.
def compute_master(self, bb, bb_from, master_cleanup):
if not isinstance(bb.gimple, list):
return
curloc = self.fun.end
for stmt in bb.gimple:
if stmt.loc:
curloc = stmt.loc
if isinstance(stmt, gcc.GimpleCall) and stmt.fndecl:
if is_constructor(stmt.fndecl):
log('saw constructor %s in bb=%d' % (str(stmt.fndecl), bb.index), 2)
self.cleanup_aware = True
master_cleanup.push(curloc, stmt.lhs)
elif is_destructor(stmt.fndecl):
if str(stmt.fndecl.name) != 'do_cleanups':
self.only_do_cleanups_seen = False
log('saw destructor %s in bb=%d, bb_from=%d, argument=%s'
% (str(stmt.fndecl.name), bb.index, bb_from, str(stmt.args[0])),
2)
master_cleanup.pop(curloc, stmt.args[0])
elif needs_special_treatment(stmt.fndecl):
pass
# gcc.permerror(curloc, 'function needs special treatment')
elif isinstance(stmt, gcc.GimpleAssign):
if isinstance(stmt.lhs, gcc.VarDecl) and isinstance(stmt.rhs[0], gcc.VarDecl):
master_cleanup.note_assignment(stmt.lhs, stmt.rhs[0])
elif isinstance(stmt, gcc.GimpleReturn):
if self.is_constructor:
if not master_cleanup.verify(curloc, stmt.retval):
gcc.permerror(curloc,
'constructor does not return master cleanup')
elif not self.is_special_constructor:
if not master_cleanup.isempty():
if curloc not in self.bad_returns:
gcc.permerror(curloc, 'cleanup stack is not empty at return')
self.bad_returns.add(curloc)
master_cleanup.inform()
# Traverse a basic block, updating the master cleanup information
# and propagating to other blocks.
def traverse_bbs(self, edge, bb, bb_from, entry_master):
log('traverse_bbs %d from %d' % (bb.index, bb_from), 1)
# Propagate the entry MasterCleanup though this block.
master_cleanup = MasterCleanup(entry_master)
self.compute_master(bb, bb_from, master_cleanup)
modified = False
if bb.index in self.master_cleanups:
# Merge the newly-computed MasterCleanup into the one we
# have already computed. If this resulted in a
# significant change, then we need to re-propagate.
modified = self.master_cleanups[bb.index].merge(master_cleanup)
else:
self.master_cleanups[bb.index] = master_cleanup
modified = True
# EDGE is None for the entry BB.
if edge is not None:
# If merging cleanups caused a change, check to see if we
# have a bad loop.
if edge in self.seen_edges:
# This error doesn't really help.
# if modified:
# gcc.permerror(self.guess_bb_location(bb),
# 'invalid cleanup use in loop')
return
self.seen_edges.add(edge)
if not modified:
return
# Now propagate to successor nodes.
for edge in bb.succs:
self.traverse_bbs(edge, edge.dest, bb.index, master_cleanup)
def check_cleanups(self):
if not self.fun.cfg or not self.fun.decl:
return 'ignored'
if is_destructor(self.fun.decl):
return 'destructor'
if needs_special_treatment(self.fun.decl):
return 'special'
self.is_constructor = is_constructor(self.fun.decl)
self.is_special_constructor = not self.is_constructor and str(self.fun.decl.name).find('with_cleanup') > -1
# Yuck.
if str(self.fun.decl.name) == 'gdb_xml_create_parser_and_cleanup_1':
self.is_special_constructor = True
if self.is_special_constructor:
gcc.inform(self.fun.start, 'function %s is a special constructor' % (self.fun.decl.name))
# If we only see do_cleanups calls, and this function is not
# itself a constructor, then we can convert it easily to RAII.
self.only_do_cleanups_seen = not self.is_constructor
# If we ever call a constructor, then we are "cleanup-aware".
self.cleanup_aware = False
entry_bb = self.fun.cfg.entry
master_cleanup = MasterCleanup()
self.traverse_bbs(None, entry_bb, -1, master_cleanup)
if want_raii_info and self.only_do_cleanups_seen and self.cleanup_aware:
gcc.inform(self.fun.decl.location,
'function %s could be converted to RAII' % (self.fun.decl.name))
if self.is_constructor:
return 'constructor'
return 'OK'
class CheckerPass(gcc.GimplePass):
def execute(self, fun):
if fun.decl:
log("Starting " + fun.decl.name)
if show_cfg:
dot = gccutils.cfg_to_dot(fun.cfg, fun.decl.name)
gccutils.invoke_dot(dot, name=fun.decl.name)
checker = CleanupChecker(fun)
what = checker.check_cleanups()
if fun.decl:
log(fun.decl.name + ': ' + what, 2)
ps = CheckerPass(name = 'check-cleanups')
# We need the cfg, but we want a relatively high-level Gimple.
ps.register_after('cfg')
| gpl-2.0 | -1,139,908,911,967,899,600 | 38.58806 | 115 | 0.588976 | false |
Godiyos/python-for-android | python-build/python-libs/gdata/build/lib/gdata/sample_util.py | 133 | 7858 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides utility functions used with command line samples."""
# This module is used for version 2 of the Google Data APIs.
import sys
import getpass
import urllib
import gdata.gauth
__author__ = '[email protected] (Jeff Scudder)'
CLIENT_LOGIN = 1
AUTHSUB = 2
OAUTH = 3
HMAC = 1
RSA = 2
def get_param(name, prompt='', secret=False, ask=True):
# First, check for a command line parameter.
for i in xrange(len(sys.argv)):
if sys.argv[i].startswith('--%s=' % name):
return sys.argv[i].split('=')[1]
elif sys.argv[i] == '--%s' % name:
return sys.argv[i + 1]
if ask:
# If it was not on the command line, ask the user to input the value.
prompt = '%s: ' % prompt
if secret:
return getpass.getpass(prompt)
else:
return raw_input(prompt)
else:
return None
def authorize_client(client, auth_type=None, service=None, source=None,
scopes=None, oauth_type=None, consumer_key=None,
consumer_secret=None):
"""Uses command line arguments, or prompts user for token values."""
if auth_type is None:
auth_type = int(get_param(
'auth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. to use your email address and password (ClientLogin)\n'
'2. to use a web browser to visit an auth web page (AuthSub)\n'
'3. if you have registed to use OAuth\n'))
# Get the scopes for the services we want to access.
if auth_type == AUTHSUB or auth_type == OAUTH:
if scopes is None:
scopes = get_param(
'scopes', 'Enter the URL prefixes (scopes) for the resources you '
'would like to access.\nFor multiple scope URLs, place a comma '
'between each URL.\n'
'Example: http://www.google.com/calendar/feeds/,'
'http://www.google.com/m8/feeds/\n').split(',')
elif isinstance(scopes, (str, unicode)):
scopes = scopes.split(',')
if auth_type == CLIENT_LOGIN:
email = get_param('email', 'Please enter your username')
password = get_param('password', 'Password', True)
if service is None:
service = get_param(
'service', 'What is the name of the service you wish to access?'
'\n(See list:'
' http://code.google.com/apis/gdata/faq.html#clientlogin)')
if source is None:
source = get_param('source', ask=False)
client.client_login(email, password, source=source, service=service)
elif auth_type == AUTHSUB:
auth_sub_token = get_param('auth_sub_token', ask=False)
session_token = get_param('session_token', ask=False)
private_key = None
auth_url = None
single_use_token = None
rsa_private_key = get_param(
'rsa_private_key',
'If you want to use secure mode AuthSub, please provide the\n'
' location of your RSA private key which corresponds to the\n'
' certificate you have uploaded for your domain. If you do not\n'
' have an RSA key, simply press enter')
if rsa_private_key:
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print 'Unable to read private key from file'
if private_key is not None:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
session_token, private_key, scopes)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
auth_sub_token, private_key, scopes)
client.upgrade_token()
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes, True)
print 'with a private key, get ready for this URL', auth_url
else:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.AuthSubToken(session_token, scopes)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.AuthSubToken(auth_sub_token, scopes)
client.upgrade_token()
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes)
print 'Visit the following URL in your browser to authorize this app:'
print str(auth_url)
print 'After agreeing to authorize the app, copy the token value from the'
print ' URL. Example: "www.google.com/?token=ab12" token value is ab12'
token_value = raw_input('Please enter the token value: ')
if private_key is not None:
single_use_token = gdata.gauth.SecureAuthSubToken(
token_value, private_key, scopes)
else:
single_use_token = gdata.gauth.AuthSubToken(token_value, scopes)
client.auth_token = single_use_token
client.upgrade_token()
elif auth_type == OAUTH:
if oauth_type is None:
oauth_type = int(get_param(
'oauth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. use an HMAC signature using your consumer key and secret\n'
'2. use RSA with your private key to sign requests\n'))
consumer_key = get_param(
'consumer_key', 'Please enter your OAuth conumer key '
'which identifies your app')
if oauth_type == HMAC:
consumer_secret = get_param(
'consumer_secret', 'Please enter your OAuth conumer secret '
'which you share with the OAuth provider', True)
# Swap out this code once the client supports requesting an oauth token.
# Get a request token.
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
consumer_secret=consumer_secret)
elif oauth_type == RSA:
rsa_private_key = get_param(
'rsa_private_key',
'Please provide the location of your RSA private key which\n'
' corresponds to the certificate you have uploaded for your domain.')
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print 'Unable to read private key from file'
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
rsa_private_key=private_key)
else:
print 'Invalid OAuth signature type'
return None
# Authorize the request token in the browser.
print 'Visit the following URL in your browser to authorize this app:'
print str(request_token.generate_authorization_url())
print 'After agreeing to authorize the app, copy URL from the browser\'s'
print ' address bar.'
url = raw_input('Please enter the url: ')
gdata.gauth.authorize_request_token(request_token, url)
# Exchange for an access token.
client.auth_token = client.get_access_token(request_token)
else:
print 'Invalid authorization type.'
return None
def print_options():
"""Displays usage information, available command line params."""
# TODO: fill in the usage description for authorizing the client.
print ''
| apache-2.0 | -1,095,789,578,838,443,100 | 35.892019 | 79 | 0.647748 | false |
Godiyos/python-for-android | python-build/python-libs/gdata/src/gdata/Crypto/PublicKey/qNEW.py | 228 | 5545 | #
# qNEW.py : The q-NEW signature algorithm.
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: qNEW.py,v 1.8 2003/04/04 15:13:35 akuchling Exp $"
from Crypto.PublicKey import pubkey
from Crypto.Util.number import *
from Crypto.Hash import SHA
class error (Exception):
pass
HASHBITS = 160 # Size of SHA digests
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate a qNEW key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
obj=qNEWobj()
# Generate prime numbers p and q. q is a 160-bit prime
# number. p is another prime number (the modulus) whose bit
# size is chosen by the caller, and is generated so that p-1
# is a multiple of q.
#
# Note that only a single seed is used to
# generate p and q; if someone generates a key for you, you can
# use the seed to duplicate the key generation. This can
# protect you from someone generating values of p,q that have
# some special form that's easy to break.
if progress_func:
progress_func('p,q\n')
while (1):
obj.q = getPrime(160, randfunc)
# assert pow(2, 159L)<obj.q<pow(2, 160L)
obj.seed = S = long_to_bytes(obj.q)
C, N, V = 0, 2, {}
# Compute b and n such that bits-1 = b + n*HASHBITS
n= (bits-1) / HASHBITS
b= (bits-1) % HASHBITS ; powb=2L << b
powL1=pow(long(2), bits-1)
while C<4096:
# The V array will contain (bits-1) bits of random
# data, that are assembled to produce a candidate
# value for p.
for k in range(0, n+1):
V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest())
p = V[n] % powb
for k in range(n-1, -1, -1):
p= (p << long(HASHBITS) )+V[k]
p = p+powL1 # Ensure the high bit is set
# Ensure that p-1 is a multiple of q
p = p - (p % (2*obj.q)-1)
# If p is still the right size, and it's prime, we're done!
if powL1<=p and isPrime(p):
break
# Otherwise, increment the counter and try again
C, N = C+1, N+n+1
if C<4096:
break # Ended early, so exit the while loop
if progress_func:
progress_func('4096 values of p tried\n')
obj.p = p
power=(p-1)/obj.q
# Next parameter: g = h**((p-1)/q) mod p, such that h is any
# number <p-1, and g>1. g is kept; h can be discarded.
if progress_func:
progress_func('h,g\n')
while (1):
h=bytes_to_long(randfunc(bits)) % (p-1)
g=pow(h, power, p)
if 1<h<p-1 and g>1:
break
obj.g=g
# x is the private key information, and is
# just a random number between 0 and q.
# y=g**x mod p, and is part of the public information.
if progress_func:
progress_func('x,y\n')
while (1):
x=bytes_to_long(randfunc(20))
if 0 < x < obj.q:
break
obj.x, obj.y=x, pow(g, x, p)
return obj
# Construct a qNEW object
def construct(tuple):
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)
Construct a qNEW object from a 4- or 5-tuple of numbers.
"""
obj=qNEWobj()
if len(tuple) not in [4,5]:
raise error, 'argument for construct() wrong length'
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
return obj
class qNEWobj(pubkey.pubkey):
keydata=['p', 'q', 'g', 'y', 'x']
def _sign(self, M, K=''):
if (self.q<=K):
raise error, 'K is greater than q'
if M<0:
raise error, 'Illegal value of M (<0)'
if M>=pow(2,161L):
raise error, 'Illegal value of M (too large)'
r=pow(self.g, K, self.p) % self.q
s=(K- (r*M*self.x % self.q)) % self.q
return (r,s)
def _verify(self, M, sig):
r, s = sig
if r<=0 or r>=self.q or s<=0 or s>=self.q:
return 0
if M<0:
raise error, 'Illegal value of M (<0)'
if M<=0 or M>=pow(2,161L):
return 0
v1 = pow(self.g, s, self.p)
v2 = pow(self.y, M*r, self.p)
v = ((v1*v2) % self.p)
v = v % self.q
if v==r:
return 1
return 0
def size(self):
"Return the maximum number of bits that can be handled by this key."
return 160
def has_private(self):
"""Return a Boolean denoting whether the object contains
private components."""
return hasattr(self, 'x')
def can_sign(self):
"""Return a Boolean value recording whether this algorithm can generate signatures."""
return 1
def can_encrypt(self):
"""Return a Boolean value recording whether this algorithm can encrypt data."""
return 0
def publickey(self):
"""Return a new key object containing only the public information."""
return construct((self.p, self.q, self.g, self.y))
object = qNEWobj
| apache-2.0 | 7,821,494,523,921,376,000 | 31.617647 | 94 | 0.571506 | false |
pramasoul/micropython | tests/extmod/vfs_fat_finaliser.py | 14 | 1906 | # Test VfsFat class and its finaliser
try:
import uerrno, uos
uos.VfsFat
except (ImportError, AttributeError):
print("SKIP")
raise SystemExit
class RAMBlockDevice:
def __init__(self, blocks, sec_size=512):
self.sec_size = sec_size
self.data = bytearray(blocks * self.sec_size)
def readblocks(self, n, buf):
for i in range(len(buf)):
buf[i] = self.data[n * self.sec_size + i]
def writeblocks(self, n, buf):
for i in range(len(buf)):
self.data[n * self.sec_size + i] = buf[i]
def ioctl(self, op, arg):
if op == 4: # MP_BLOCKDEV_IOCTL_BLOCK_COUNT
return len(self.data) // self.sec_size
if op == 5: # MP_BLOCKDEV_IOCTL_BLOCK_SIZE
return self.sec_size
# Create block device, and skip test if not enough RAM
try:
bdev = RAMBlockDevice(50)
except MemoryError:
print("SKIP")
raise SystemExit
# Format block device and create VFS object
uos.VfsFat.mkfs(bdev)
vfs = uos.VfsFat(bdev)
# Here we test that opening a file with the heap locked fails correctly. This
# is a special case because file objects use a finaliser and allocating with a
# finaliser is a different path to normal allocation. It would be better to
# test this in the core tests but there are no core objects that use finaliser.
import micropython
micropython.heap_lock()
try:
vfs.open("x", "r")
except MemoryError:
print("MemoryError")
micropython.heap_unlock()
# Here we test that the finaliser is actually called during a garbage collection.
import gc
N = 4
for i in range(N):
n = "x%d" % i
f = vfs.open(n, "w")
f.write(n)
f = None # release f without closing
[0, 1, 2, 3] # use up Python stack so f is really gone
gc.collect() # should finalise all N files by closing them
for i in range(N):
with vfs.open("x%d" % i, "r") as f:
print(f.read())
| mit | -3,013,091,855,140,831,000 | 26.623188 | 81 | 0.648478 | false |
fgclaramonte/Odoo-addons | inter_company_rules/models/inter_company_invoice.py | 21 | 5617 | from openerp import models, fields, api, _
from openerp.exceptions import Warning
class account_invoice(models.Model):
_inherit = 'account.invoice'
auto_generated = fields.Boolean(string='Auto Generated Document', copy=False)
auto_invoice_id = fields.Many2one('account.invoice', string='Source Invoice',
readonly=True, copy=False)
@api.multi
def invoice_validate(self):
"""
Validated invoice generate cross invoice base on company rules.
"""
for invoice in self:
#do not consider invoices that have already been auto-generated, nor the invoices that were already validated in the past
company = self.env['res.company']._find_company_from_partner(invoice.partner_id.id)
if company and company.auto_generate_invoices and not invoice.auto_generated:
if invoice.type == 'out_invoice':
invoice.action_create_invoice(company, 'in_invoice', 'purchase')
elif invoice.type == 'in_invoice':
invoice.action_create_invoice(company, 'out_invoice', 'sale')
elif invoice.type == 'out_refund':
invoice.action_create_invoice(company, 'in_refund', 'purchase_refund')
elif invoice.type == 'in_refund':
invoice.action_create_invoice(company, 'out_refund', 'sale_refund')
return super(account_invoice, self).invoice_validate()
@api.one
def action_create_invoice(self, company, inv_type, journal_type):
#Find user for creating the invoice from company
intercompany_uid = company.intercompany_user_id and company.intercompany_user_id.id or False
if not intercompany_uid:
raise Warning(_('Provide one user for intercompany relation for % ') % company.name)
ctx = self._context.copy()
ctx['force_company'] = company.id
this_company_partner = self.company_id.partner_id
inv_lines = []
for line in self.invoice_line:
#To find lot of data from product onchanges because its already avail method in core.
product_uom = line.product_id.uom_id and line.product_id.uom_id.id or False
line_data = line.with_context(ctx).sudo(intercompany_uid).product_id_change(line.product_id.id,
product_uom, qty=line.quantity, name='', type=inv_type,
partner_id=this_company_partner.id,
fposition_id=this_company_partner.property_account_position.id, company_id=company.id)
inv_line_data = self._prepare_inv_line(line_data, line)
inv_line_id = line.with_context(ctx).sudo(intercompany_uid).create(inv_line_data)
inv_lines.append(inv_line_id.id)
#create invoice
invoice_vals = self.with_context(ctx).sudo(intercompany_uid)._prepare_inv(inv_lines, inv_type, journal_type, company)[0]
return self.with_context(ctx).sudo(intercompany_uid).create(invoice_vals)
@api.model
def _prepare_inv_line(self, line_data, line):
""" Generate invoice line dictionary"""
vals = {
'name': line.name,
'price_unit': line.price_unit,
'quantity': line.quantity,
'discount': line.discount,
'product_id': line.product_id.id or False,
'uos_id': line.uos_id.id or False,
'sequence': line.sequence,
'invoice_line_tax_id': [(6, 0, line_data['value'].get('invoice_line_tax_id', []))],
'account_analytic_id': line.account_analytic_id.id or False,
}
if line_data['value'].get('account_id', False):
vals['account_id'] = line_data['value']['account_id']
return vals
@api.one
def _prepare_inv(self, inv_lines, inv_type, jrnl_type, company):
""" Generate invoice dictionary """
#To find journal.
journal = self.env['account.journal'].search([('type', '=', jrnl_type), ('company_id', '=', company.id)], limit=1)
if not journal:
raise Warning(_('Please define %s journal for this company: "%s" (id:%d).') % (jrnl_type, company.name, company.id))
#To find periods of supplier company.
ctx = self._context.copy()
ctx['company_id'] = company.id
period_ids = self.env['account.period'].with_context(ctx).find(self.date_invoice)
#To find account,payment term,fiscal position,bank.
partner_data = self.onchange_partner_id(inv_type, self.company_id.partner_id.id, company_id=company.id)
return {
'name': self.name,
'origin': self.company_id.name + _(' Invoice: ') + str(self.number),
'type': inv_type,
'date_invoice': self.date_invoice,
'reference': self.reference,
'account_id': partner_data['value'].get('account_id', False),
'partner_id': self.company_id.partner_id.id,
'journal_id': journal.id,
'invoice_line': [(6, 0, inv_lines)],
'currency_id': self.currency_id and self.currency_id.id,
'fiscal_position': partner_data['value'].get('fiscal_position', False),
'payment_term': partner_data['value'].get('payment_term', False),
'company_id': company.id,
'period_id': period_ids and period_ids[0].id or False,
'partner_bank_id': partner_data['value'].get('partner_bank_id', False),
'auto_generated': True,
'auto_invoice_id': self.id,
}
| gpl-3.0 | 5,583,683,049,762,702,000 | 51.990566 | 134 | 0.597828 | false |
pmghalvorsen/gramps_branch | gramps/gen/filters/rules/family/_hastag.py | 2 | 1730 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Rule that checks for a family with a particular tag.
"""
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hastagbase import HasTagBase
#-------------------------------------------------------------------------
#
# HasTag
#
#-------------------------------------------------------------------------
class HasTag(HasTagBase):
"""
Rule that checks for a family with a particular tag.
"""
labels = [ _('Tag:') ]
name = _('Families with the <tag>')
description = _("Matches families with the particular tag")
| gpl-2.0 | 7,058,798,302,969,537,000 | 33.6 | 79 | 0.528324 | false |
camptocamp/odoo | addons/l10n_in_hr_payroll/__init__.py | 430 | 1117 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import l10n_in_hr_payroll
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 | -357,303,169,001,409,300 | 42 | 80 | 0.628469 | false |
linkhub-sdk/popbill.taxinvoice.example.py | cancelIssue.py | 1 | 1516 | # -*- coding: utf-8 -*-
# code for console Encoding difference. Dont' mind on it
import sys
import imp
imp.reload(sys)
try:
sys.setdefaultencoding('UTF8')
except Exception as E:
pass
import testValue
from popbill import TaxinvoiceService, PopbillException
taxinvoiceService = TaxinvoiceService(testValue.LinkID, testValue.SecretKey)
taxinvoiceService.IsTest = testValue.IsTest
taxinvoiceService.IPRestrictOnOff = testValue.IPRestrictOnOff
taxinvoiceService.UseStaticIP = testValue.UseStaticIP
taxinvoiceService.UseLocalTimeYN = testValue.UseLocalTimeYN
'''
[발행완료] 상태의 세금계산서를 [발행취소] 처리합니다.
- [발행취소]는 국세청 전송전에만 가능합니다.
- 발행취소된 세금계산서는 국세청에 전송되지 않습니다.
- https://docs.popbill.com/taxinvoice/python/api#CancelIssue
'''
try:
print("=" * 15 + " 세금계산서 발행 취소 " + "=" * 15)
# 팝빌회원 사업자번호
CorpNum = testValue.testCorpNum
# 세금계산서 발행유형, SELL : 매출 , BUY : 매입 , TRUSTEE : 수탁
MgtKeyType = "SELL"
# 문서번호
MgtKey = "20210429-001"
# 메모
Memo = "발행취소 메모"
# 팝빌회원 아이디
UserID = testValue.testUserID
result = taxinvoiceService.cancelIssue(CorpNum, MgtKeyType, MgtKey, Memo, UserID)
print("처리결과 : [%d] %s" % (result.code, result.message))
except PopbillException as PE:
print("Popbill Exception : [%d] %s" % (PE.code, PE.message))
| mit | -2,675,356,383,787,168,000 | 24.058824 | 85 | 0.707355 | false |
dineshappavoo/virtdc | virtdc_init/virtdc_init.py | 2 | 1747 | #!/usr/bin/env python
import subprocess, sys
#API - virtdc command line init tool
#==============================================================================
# Variables
#==============================================================================
# Some descriptive variables
#name = "virtdc"
#version = "0.1.0"
#long_description = """vmplacementandscaling is a set of API's/tools written to create virtual machines for cloud users efficiently."""
#url = "https://github.com/dineshappavoo/virtdc"
#license = ""
#==============================================================================
def virtdc_init():
#start domain termination process
cmd = "/usr/bin/python /var/lib/virtdc/framework/VM_terminationHandler.py &"
termination_process = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE)
#initiate the vmonere domain socket listener
vmonere_domain_socket_listener_cmd = "/usr/bin/python /var/lib/virtdc/vmonere/sockets/vmonere_listener.socket.py &"
vmonere_domain_socket_listener = subprocess.Popen(vmonere_domain_socket_listener_cmd, shell=True, stderr=subprocess.PIPE)
#initiate the vmonere host socket listener
vmonere_host_socket_listener_cmd = "/usr/bin/python /var/lib/virtdc/vmonere/sockets/vmonere_host_listener.socket &"
vmonere_host_socket_listener = subprocess.Popen(vmonere_host_socket_listener_cmd, shell=True, stderr=subprocess.PIPE)
delete_log_files_cmd = "/usr/bin/python /var/lib/virtdc/vmonere/host/vmonere_exec_log_deleter.py"
delete_log_files_process = subprocess.Popen(delete_log_files_cmd, shell=True, stderr=subprocess.PIPE)
if __name__ == "__main__":
# stuff only to run when not called via 'import' here
virtdc_init()
| mit | -7,807,982,920,771,440,000 | 46.216216 | 138 | 0.630223 | false |
srvelivela/ansibledoc | lib/ansible/plugins/lookup/consul_kv.py | 89 | 4541 | # (c) 2015, Steve Gargan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Lookup plugin to grab metadata from a consul key value store.
============================================================
Plugin will lookup metadata for a playbook from the key value store in a
consul cluster. Values can be easily set in the kv store with simple rest
commands e.g.
curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata
this can then be looked up in a playbook as follows
- debug: msg='key contains {{item}}'
with_consul_kv:
- 'key/to/retrieve'
Parameters can be provided after the key be more specific about what to retrieve e.g.
- debug: msg='key contains {{item}}'
with_consul_kv:
- 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98')}}'
recurse: if true, will retrieve all the values that have the given key as prefix
index: if the key has a value with the specified index then this is returned
allowing access to historical values.
token: acl token to allow access to restricted values.
By default this will lookup keys via the consul agent running on http://localhost:8500
this can be changed by setting the env variable 'ANSIBLE_CONSUL_URL' to point to the url
of the kv store you'd like to use.
'''
######################################################################
import os
import sys
from urlparse import urlparse
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
try:
import json
except ImportError:
import simplejson as json
try:
import consul
HAS_CONSUL = True
except ImportError as e:
HAS_CONSUL = False
class LookupModule(LookupBase):
def __init__(self, loader=None, templar=None, **kwargs):
super(LookupBase, self).__init__(loader, templar, **kwargs)
self.agent_url = 'http://localhost:8500'
if os.getenv('ANSIBLE_CONSUL_URL') is not None:
self.agent_url = os.environ['ANSIBLE_CONSUL_URL']
def run(self, terms, variables=None, **kwargs):
if not HAS_CONSUL:
raise AnsibleError('python-consul is required for consul_kv lookup. see http://python-consul.readthedocs.org/en/latest/#installation')
u = urlparse(self.agent_url)
consul_api = consul.Consul(host=u.hostname, port=u.port)
values = []
try:
for term in terms:
params = self.parse_params(term)
results = consul_api.kv.get(params['key'],
token=params['token'],
index=params['index'],
recurse=params['recurse'])
if results[1]:
# responds with a single or list of result maps
if isinstance(results[1], list):
for r in results[1]:
values.append(r['Value'])
else:
values.append(results[1]['Value'])
except Exception as e:
raise AnsibleError(
"Error locating '%s' in kv store. Error was %s" % (term, e))
return values
def parse_params(self, term):
params = term.split(' ')
paramvals = {
'key': params[0],
'token': None,
'recurse': False,
'index': None
}
# parameters specified?
try:
for param in params[1:]:
if param and len(param) > 0:
name, value = param.split('=')
assert name in paramvals, "% not a valid consul lookup parameter" % name
paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
return paramvals
| gpl-3.0 | -2,738,424,637,369,656,300 | 33.142857 | 146 | 0.605373 | false |
SVRobots/Minecraft | modules/world.py | 1 | 2839 | from os import listdir, mkdir
import cPickle
from imp import load_source
api = load_source('api', 'modules\\api.py')
from api import *
class World(object):
#initialize
def __init__(self):
super(World, self).__init__()
self.world="Default"
self.dimension="DIM1"
self.playername="Default"
self.world_blocks={}
self.shown_blocks={}
#load world
def load(self):
#set world folder location
self.savedir = 'saves\\' + self.world + '\\'
#init saves folder
if FileExists('', 'saves') == False:
mkdir('saves')
#init world folder
if FileExists('saves', self.world) == False:
mkdir('saves\\' + self.world)
#init dimension folder
if FileExists('saves\\' + self.world, self.dimension) == False:
mkdir('saves\\' + self.world + '\\' + self.dimension)
#init players folder
if FileExists(self.savedir, 'players') == False:
mkdir(self.savedir + 'players')
#init player
if FileExists(self.savedir + 'players\\', self.playername) == False:
self.player = Player()
else:
self.loadPlayer()
#get world gravity
self.GetGravity()
#make some blocks
self.GenerateWorld(-10,-10,10,10)
self.ParseVisible()
#Generate World
def GenerateWorld(self, lx, lz, ux, uz):
i=0
for m in self.c.dimensions:
if self.dimension in m:
tmp = self.c.mods[i].GenerateDimension(self.dimension, lx, lz, ux, uz)
for b in tmp:
self.world_blocks[b] = tmp[b]
#find visible blocks to render
def ParseVisible(self):
for b in self.world_blocks:
x,y,z = b
if ((x,y+1,z) not in self.world_blocks) or ((x,y-1,z) not in self.world_blocks) or ((x+1,y,z) not in self.world_blocks) or ((x-1,y,z) not in self.world_blocks) or ((x,y,z+1) not in self.world_blocks) or ((x,y,z-1) not in self.world_blocks):
self.shown_blocks[b]=self.world_blocks[b]
#Get world gravity
def GetGravity(self):
i=0
for m in self.c.dimensions:
if self.dimension in m:
self.a = self.c.mods[i].dimension_gravity[self.dimension]
#save player
def savePlayer(self):
save(self.savedir + 'players\\' + self.player.name, self.player, True)
#load player
def loadPlayer(self):
self.player = load(self.savedir + 'players\\' + self.playername)
def quit(self):
self.savePlayer()
class Player(object):
def __init__(self):
self.name='Default'
self.flying=True
self.gameMode=1
self.x=0
self.y=10
self.z=0
self.r=0
self.p=0
self.onGround=False
self.vy=0
self.fallDistance=0
def InitializeWorld(world, dimension, c):
w=World()
w.world = world
w.dimension = dimension
w.c = c
w.load()
return w
def save(f,o,r):
if r == True:
a = open(f,'wb')
else:
a = open(f,'r+b')
print f
cPickle.dump(o, a, -1)
a.close()
def load(f):
a = open(f, 'rb')
o = cPickle.load(a)
a.close()
return o
def FileExists(direct, f):
for p in listdir(direct):
if p == f:
return True
return False | gpl-2.0 | -7,457,913,990,684,695,000 | 24.357143 | 243 | 0.659387 | false |
dati91/servo | tests/wpt/web-platform-tests/resource-timing/resources/multi_redirect.py | 17 | 1767 | def main(request, response):
"""Handler that causes multiple redirections.
The request has two mandatory and one optional query parameters:
page_origin - The page origin, used for redirection and to set TAO. This is a mandatory parameter.
cross_origin - The cross origin used to make this a cross-origin redirect. This is a mandatory parameter.
timing_allow - Whether TAO should be set or not in the redirect chain. This is an optional parameter. Default: not set.
Note that |step| is a parameter used internally for the multi-redirect. It's the step we're at in the redirect chain.
"""
step = 1
if "step" in request.GET:
try:
step = int(request.GET.first("step"))
except ValueError:
pass
page_origin = request.GET.first("page_origin")
cross_origin = request.GET.first("cross_origin")
timing_allow = "0"
if "timing_allow" in request.GET:
timing_allow = request.GET.first("timing_allow")
redirect_url = "/resource-timing/resources/multi_redirect.py?"
redirect_url += "page_origin=" + page_origin
redirect_url += "&cross_origin=" + cross_origin
redirect_url += "&timing_allow=" + timing_allow
redirect_url += "&step="
if step == 1:
redirect_url = cross_origin + redirect_url + "2"
if timing_allow != "0":
response.headers.set("timing-allow-origin", page_origin)
elif step == 2:
redirect_url = page_origin + redirect_url + "3"
if timing_allow != "0":
response.headers.set("timing-allow-origin", page_origin)
else:
redirect_url = page_origin + "/resource-timing/resources/blank_page_green.htm"
response.status = 302
response.headers.set("Location", redirect_url)
| mpl-2.0 | -8,498,140,194,006,947,000 | 43.175 | 123 | 0.654782 | false |
acsone/partner-contact | base_location_nuts/models/res_partner_nuts.py | 5 | 1197 | # -*- coding: utf-8 -*-
# © 2015 Antiun Ingeniería S.L. - Antonio Espinosa
# © 2015 Antiun Ingeniería S.L. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields
class ResPartnerNuts(models.Model):
_name = 'res.partner.nuts'
_order = "parent_left"
_parent_order = "name"
_parent_store = True
_description = "NUTS Item"
# NUTS fields
level = fields.Integer(required=True)
code = fields.Char(required=True)
name = fields.Char(required=True, translate=True)
country_id = fields.Many2one(comodel_name='res.country', string="Country",
required=True)
state_id = fields.Many2one(comodel_name='res.country.state',
string='State')
# Parent hierarchy
parent_id = fields.Many2one(comodel_name='res.partner.nuts',
ondelete='restrict')
child_ids = fields.One2many(
'res.partner.nuts',
'parent_id',
"Children",
oldname="children")
parent_left = fields.Integer('Parent Left', select=True)
parent_right = fields.Integer('Parent Right', select=True)
| agpl-3.0 | -1,704,822,542,284,996,000 | 35.151515 | 78 | 0.616094 | false |
rwl/puddle | puddle/python_editor/python_workbench_editor.py | 1 | 6267 | #------------------------------------------------------------------------------
# Copyright (C) 2009 Richard W. Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#------------------------------------------------------------------------------
""" A Python code editor.
"""
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
from os.path import exists, basename
from enthought.pyface.workbench.api import TraitsUIEditor
from enthought.traits.api import Code, Instance, Str
from enthought.traits.ui.api import View, Item, Group, CodeEditor
from enthought.pyface.api import PythonEditor, FileDialog, CANCEL
#------------------------------------------------------------------------------
# "PythonWorkbenchEditor" class:
#------------------------------------------------------------------------------
class PythonWorkbenchEditor(TraitsUIEditor):
""" A text editor.
"""
#--------------------------------------------------------------------------
# "PythonEditor" interface:
#--------------------------------------------------------------------------
# The text being edited:
text = Code
# The default traits view.
traits_view = View(
Group(
Item(
name="text", editor=CodeEditor(show_line_numbers=False)
),
show_labels=False
),
id="puddle.python_editor.python_workbench_editor",
kind="live", resizable=True,
)
#--------------------------------------------------------------------------
# "TraitsUIEditor" interface.
#--------------------------------------------------------------------------
def _name_default(self):
""" Trait initialiser.
"""
# self.obj.on_trait_change(self.on_path, "path")
if self.obj.path == "":
return "Unsaved Script"
else:
return basename(self.obj.path)
# def on_path(self, new):
# """ Handle the file path changing """
#
# self.name = basename(new)
def create_control(self, parent):
""" Creates the toolkit-specific control that represents the
editor. 'parent' is the toolkit-specific control that is
the editor's parent.
"""
ed = PythonEditor(parent, show_line_numbers=False)
# FIXME: Implement toolkit specific Python editor subclass
import wx
styles = [
wx.stc.STC_STYLE_DEFAULT,
wx.stc.STC_STYLE_CONTROLCHAR,
wx.stc.STC_STYLE_BRACELIGHT,
wx.stc.STC_STYLE_BRACEBAD,
wx.stc.STC_P_DEFAULT,
wx.stc.STC_P_COMMENTLINE,
wx.stc.STC_P_NUMBER,
wx.stc.STC_P_STRING,
wx.stc.STC_P_CHARACTER,
wx.stc.STC_P_WORD,
wx.stc.STC_P_TRIPLE,
wx.stc.STC_P_TRIPLEDOUBLE,
wx.stc.STC_P_CLASSNAME,
wx.stc.STC_P_DEFNAME,
wx.stc.STC_P_OPERATOR,
wx.stc.STC_P_IDENTIFIER,
wx.stc.STC_P_COMMENTBLOCK,
wx.stc.STC_P_STRINGEOL
]
for style in styles:
ed.control.StyleSetFaceName(style, "monospace")
ed.control.StyleSetSize(style, 10)
path = self.obj.path
if exists(path):
ed.path = path
ed.load()
return ed.control
#--------------------------------------------------------------------------
# "PythonWorkbenchEditor" interface.
#--------------------------------------------------------------------------
def save(self):
""" Saves the text to disk.
"""
# If the file has not yet been saved then prompt for the file name.
if len(self.obj.path) == 0:
self.save_as()
else:
f = file(self.obj.path, 'w')
f.write(self.text)
f.close()
# We have just saved the file so we ain't dirty no more!
self.dirty = False
return
def save_as(self):
""" Saves the text to disk after prompting for the file name.
"""
dialog = FileDialog(
parent = self.window.control,
action = 'save as',
default_filename = self.name,
wildcard = FileDialog.WILDCARD_PY
)
if dialog.open() != CANCEL:
# Update the editor.
self.id = dialog.path
self.name = basename(dialog.path)
# Update the resource.
self.obj.path = dialog.path
# Save it!
self.save()
return
def _text_changed(self, trait_name, old, new):
""" Static trait change handler.
"""
if self.traits_inited():
self.dirty = True
return
def _dirty_changed(self, dirty):
""" Static trait change handler.
"""
if len(self.obj.path) > 0:
if dirty:
self.name = basename(self.obj.path) + '*'
else:
self.name = basename(self.obj.path)
return
# EOF -------------------------------------------------------------------------
| mit | 4,991,800,274,352,044,000 | 31.640625 | 79 | 0.49274 | false |
dischinator/pyload | module/lib/simplejson/encoder.py | 45 | 20060 | """Implementation of JSONEncoder
"""
import re
from decimal import Decimal
def _import_speedups():
try:
from simplejson import _speedups
return _speedups.encode_basestring_ascii, _speedups.make_encoder
except ImportError:
return None, None
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
from simplejson.decoder import PosInf
ESCAPE = re.compile(ur'[\x00-\x1f\\"\b\f\n\r\t\u2028\u2029]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
u'\u2028': '\\u2028',
u'\u2029': '\\u2029',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict, namedtuple | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None,
use_decimal=True, namedtuple_as_object=True,
tuple_as_array=True):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
If use_decimal is true (not the default), ``decimal.Decimal`` will
be supported directly by the encoder. For the inverse, decode JSON
with ``parse_float=decimal.Decimal``.
If namedtuple_as_object is true (the default), tuple subclasses with
``_asdict()`` methods will be encoded as JSON objects.
If tuple_as_array is true (the default), tuple (and subclasses) will
be encoded as JSON arrays.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.use_decimal = use_decimal
self.namedtuple_as_object = namedtuple_as_object
self.tuple_as_array = tuple_as_array
if isinstance(indent, (int, long)):
indent = ' ' * indent
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
elif indent is not None:
self.item_separator = ','
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
key_memo = {}
if (_one_shot and c_make_encoder is not None
and self.indent is None):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan, key_memo, self.use_decimal,
self.namedtuple_as_object, self.tuple_as_array)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self.use_decimal,
self.namedtuple_as_object, self.tuple_as_array)
try:
return _iterencode(o, 0)
finally:
key_memo.clear()
class JSONEncoderForHTML(JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o, True)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
yield chunk
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
_use_decimal, _namedtuple_as_object, _tuple_as_array,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
Decimal=Decimal,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield buf + str(value)
else:
yield buf
if isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
elif (_namedtuple_as_object and isinstance(value, tuple) and
hasattr(value, '_asdict')):
chunks = _iterencode_dict(value._asdict(),
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield str(value)
else:
if isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
elif (_namedtuple_as_object and isinstance(value, tuple) and
hasattr(value, '_asdict')):
chunks = _iterencode_dict(value._asdict(),
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, list):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif (_namedtuple_as_object and isinstance(o, tuple) and
hasattr(o, '_asdict')):
for chunk in _iterencode_dict(o._asdict(), _current_indent_level):
yield chunk
elif (_tuple_as_array and isinstance(o, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
elif _use_decimal and isinstance(o, Decimal):
yield str(o)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| gpl-3.0 | -6,977,356,010,366,139,000 | 36.565543 | 82 | 0.534148 | false |
open-o/nfvo | lcm/lcm/packages/tests/test_nf.py | 1 | 23203 | # Copyright 2016-2017 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
from rest_framework import status
from django.test import TestCase
from django.test import Client
from lcm.pub.utils import restcall
from lcm.pub.utils import fileutil
from lcm.pub.nfvi.vim.vimadaptor import VimAdaptor
from lcm.pub.database.models import NfPackageModel, VnfPackageFileModel, NfInstModel
from lcm.pub.database.models import JobStatusModel, JobModel
from lcm.packages.nf_package import NfOnBoardingThread, NfPkgDeletePendingThread
from lcm.packages.nf_package import NfPkgDeleteThread
from lcm.packages import nf_package
from lcm.pub.nfvi.vim.const import VIM_OPENSTACK
class TestNfPackage(TestCase):
def setUp(self):
self.client = Client()
NfPackageModel.objects.filter().delete()
VnfPackageFileModel.objects.filter().delete()
NfInstModel.objects.filter().delete()
JobModel.objects.filter().delete()
JobStatusModel.objects.filter().delete()
self.vnfd_raw_data = {
"rawData":{
"instance":{
"metadata":{
"is_shared":False,
"plugin_info":"vbrasplugin_1.0",
"vendor":"zte",
"request_reclassification":False,
"name":"vbras",
"version":1,
"vnf_type":"vbras",
"cross_dc":False,
"vnfd_version":"1.0.0",
"id":"zte_vbras_1.0",
"nsh_aware":True
},
"nodes":[
{
"id":"aaa_dnet_cp_0xu2j5sbigxc8h1ega3if0ld1",
"type_name":"tosca.nodes.nfv.ext.zte.CP",
"template_name":"aaa_dnet_cp",
"properties":{
"bandwidth":{
"type_name":"integer",
"value":0
},
"direction":{
"type_name":"string",
"value":"bidirectional"
},
"vnic_type":{
"type_name":"string",
"value":"normal"
},
"sfc_encapsulation":{
"type_name":"string",
"value":"mac"
},
"order":{
"type_name":"integer",
"value":2
}
},
"relationships":[
{
"name":"guest_os",
"source_requirement_index":0,
"target_node_id":"AAA_image_d8aseebr120nbm7bo1ohkj194",
"target_capability_name":"feature"
}
]
},
{
"id":"LB_Image_oj5l2ay8l2g6vcq6fsswzduha",
"type_name":"tosca.nodes.nfv.ext.ImageFile",
"template_name":"LB_Image",
"properties":{
"disk_format":{
"type_name":"string",
"value":"qcow2"
},
"file_url":{
"type_name":"string",
"value":"/SoftwareImages/image-lb"
},
"name":{
"type_name":"string",
"value":"image-lb"
}
}
}
]
},
"model":{
"metadata":{
"is_shared":False,
"plugin_info":"vbrasplugin_1.0",
"vendor":"zte",
"request_reclassification":False,
"name":"vbras",
"version":1,
"vnf_type":"vbras",
"cross_dc":False,
"vnfd_version":"1.0.0",
"id":"zte_vbras_1.0",
"nsh_aware":True
},
"node_templates":[
{
"name":"aaa_dnet_cp",
"type_name":"tosca.nodes.nfv.ext.zte.CP",
"default_instances":1,
"min_instances":0,
"properties":{
"bandwidth":{
"type_name":"integer",
"value":0
}
},
"requirement_templates":[
{
"name":"virtualbinding",
"target_node_template_name":"AAA",
"target_capability_name":"virtualbinding"
}
]
}
]
}
}
}
def tearDown(self):
pass
def assert_job_result(self, job_id, job_progress, job_detail):
jobs = JobStatusModel.objects.filter(
jobid=job_id,
progress=job_progress,
descp=job_detail)
self.assertEqual(1, len(jobs))
@mock.patch.object(NfOnBoardingThread, 'run')
def test_nf_pkg_on_boarding_normal(self, mock_run):
resp = self.client.post("/openoapi/nslcm/v1/vnfpackage", {
"csarId": "1",
"vimIds": ["1"]
}, format='json')
self.assertEqual(resp.status_code, status.HTTP_202_ACCEPTED)
@mock.patch.object(restcall, 'call_req')
def test_nf_pkg_on_boarding_when_on_boarded(self, mock_call_req):
mock_call_req.return_value = [0, json.JSONEncoder().encode({"onBoardState": "onBoarded"}), '200']
NfOnBoardingThread(csar_id="1",
vim_ids=["1"],
lab_vim_id="",
job_id="2").run()
self.assert_job_result("2", 255, "CSAR(1) already onBoarded.")
@mock.patch.object(restcall, 'call_req')
def test_nf_pkg_on_boarding_when_on_boarding(self, mock_call_req):
mock_call_req.return_value = [0, json.JSONEncoder().encode({
"onBoardState": "non-onBoarded",
"processState": "onBoarding"
}), '200']
NfOnBoardingThread(csar_id="2",
vim_ids=["1"],
lab_vim_id="",
job_id="3").run()
self.assert_job_result("3", 255, "CSAR(2) is onBoarding now.")
@mock.patch.object(restcall, 'call_req')
def test_nf_on_boarding_when_nfd_already_exists(self, mock_call_req):
mock_vals = {
"/openoapi/catalog/v1/csars/2":
[0, json.JSONEncoder().encode({
"onBoardState": "onBoardFailed", "processState": "deleteFailed"}), '200'],
"/openoapi/catalog/v1/servicetemplates/queryingrawdata":
[0, json.JSONEncoder().encode(self.vnfd_raw_data), '200']}
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
NfPackageModel(uuid="1", nfpackageid="2", vnfdid="zte_vbras_1.0").save()
NfOnBoardingThread(csar_id="2", vim_ids=["1"], lab_vim_id="", job_id="4").run()
self.assert_job_result("4", 255, "NFD(zte_vbras_1.0) already exists.")
@mock.patch.object(restcall, 'call_req')
@mock.patch.object(fileutil, 'download_file_from_http')
@mock.patch.object(VimAdaptor, '__init__')
@mock.patch.object(VimAdaptor, 'create_image')
@mock.patch.object(VimAdaptor, 'get_image')
def test_nf_on_boarding_when_successfully(self, mock_get_image, mock_create_image,
mock__init__, mock_download_file_from_http, mock_call_req):
mock_download_file_from_http.return_value = True, "/root/package"
mock_vals = {
"/openoapi/catalog/v1/csars/2":
[0, json.JSONEncoder().encode({
"onBoardState": "onBoardFailed", "processState": "deleteFailed"}), '200'],
"/openoapi/catalog/v1/servicetemplates/queryingrawdata":
[0, json.JSONEncoder().encode(self.vnfd_raw_data), '200'],
"/openoapi/catalog/v1/csars/2/files?relativePath=/SoftwareImages/image-lb":
[0, json.JSONEncoder().encode({
"csar_file_info": [{"downloadUri": "8"}, {"localPath": "9"}]}), '200'],
"/openoapi/extsys/v1/vims":
[0, json.JSONEncoder().encode([{
"vimId": "1", "type": VIM_OPENSTACK,
"url": "/root/package", "userName": "tom",
"password": "tom", "tenant": "10"}]), '200'],
"/openoapi/catalog/v1/csars/2?onBoardState=onBoarded": [0, '{}', 200],
"/openoapi/catalog/v1/csars/2?operationalState=Enabled": [0, '{}', 200],
"/openoapi/catalog/v1/csars/2?processState=normal": [0, '{}', 200]}
mock_create_image.return_value = [0, {"id": "30", "name": "jerry", "res_type": 0}]
mock__init__.return_value = None
mock_get_image.return_value = [0, {"id": "30", "name": "jerry", "size": "60", "status": "active"}]
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
NfOnBoardingThread(csar_id="2", vim_ids=["1"], lab_vim_id="", job_id="4").run()
self.assert_job_result("4", 100, "CSAR(2) onBoarding successfully.")
@mock.patch.object(restcall, 'call_req')
@mock.patch.object(fileutil, 'download_file_from_http')
@mock.patch.object(VimAdaptor, '__init__')
@mock.patch.object(VimAdaptor, 'create_image')
@mock.patch.object(VimAdaptor, 'get_image')
def test_nf_on_boarding_when_timeout(self, mock_get_image, mock_create_image,
mock__init__, mock_download_file_from_http, mock_call_req):
nf_package.MAX_RETRY_TIMES = 2
nf_package.SLEEP_INTERVAL_SECONDS = 1
mock_download_file_from_http.return_value = True, "/root/package"
mock_vals = {
"/openoapi/catalog/v1/csars/3":
[0, json.JSONEncoder().encode({"onBoardState": "onBoardFailed",
"processState": "deleteFailed"}), '200'],
"/openoapi/catalog/v1/servicetemplates/queryingrawdata":
[0, json.JSONEncoder().encode(self.vnfd_raw_data), '200'],
"/openoapi/catalog/v1/csars/3/files?relativePath=/SoftwareImages/image-lb":
[0, json.JSONEncoder().encode({
"csar_file_info": [{"downloadUri": "8"}, {"localPath": "9"}]}), '200'],
"/openoapi/catalog/v1/csars/3?processState=onBoardFailed": [0, '{}', 200],
"/openoapi/extsys/v1/vims":
[0, json.JSONEncoder().encode([{
"vimId": "1", "type": VIM_OPENSTACK,
"url": "/root/package", "userName": "tom",
"password": "tom", "tenant": "10"}]), 200]}
mock_create_image.return_value = [0, {"id": "30", "name": "jerry", "res_type": 0}]
mock__init__.return_value = None
mock_get_image.return_value = [0, {"id": "30", "name": "jerry", "size": "60", "status": "0"}]
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
NfOnBoardingThread(csar_id="3", vim_ids=["1"], lab_vim_id="", job_id="6").run()
self.assert_job_result("6", 255, "Failed to create image:timeout(2 seconds.)")
@mock.patch.object(restcall, 'call_req')
@mock.patch.object(fileutil, 'download_file_from_http')
@mock.patch.object(VimAdaptor, '__init__')
@mock.patch.object(VimAdaptor, 'create_image')
def test_nf_on_boarding_when_failed_to_create_image(self, mock_create_image,
mock__init__, mock_download_file_from_http, mock_call_req):
mock_download_file_from_http.return_value = True, "/root/package"
mock_vals = {
"/openoapi/catalog/v1/csars/5":
[0, json.JSONEncoder().encode({
"onBoardState": "onBoardFailed", "processState": "deleteFailed"}), '200'],
"/openoapi/catalog/v1/servicetemplates/queryingrawdata":
[0, json.JSONEncoder().encode(self.vnfd_raw_data), '200'],
"/openoapi/catalog/v1/csars/5/files?relativePath=/SoftwareImages/image-lb":
[0, json.JSONEncoder().encode({
"csar_file_info": [{"downloadUri": "8"}, {"localPath": "9"}]}), '200'],
"/openoapi/catalog/v1/csars/5?processState=onBoardFailed": [0, '{}', 200],
"/openoapi/extsys/v1/vims":
[0, json.JSONEncoder().encode([{
"vimId": "1", "type": VIM_OPENSTACK,
"url": "/root/package", "userName": "tom",
"password": "tom", "tenant": "10"}]), '200']}
mock_create_image.return_value = [1, 'Unsupported image format.']
mock__init__.return_value = None
def side_effect(*args):
return mock_vals[args[4]]
mock_call_req.side_effect = side_effect
NfOnBoardingThread(csar_id="5", vim_ids=["1"], lab_vim_id="", job_id="8").run()
self.assert_job_result("8", 255, "Failed to create image:Unsupported image format.")
#########################################################################
@mock.patch.object(restcall, 'call_req')
def test_get_csar_successfully(self, mock_call_req):
mock_call_req.return_value = [0, json.JSONEncoder().encode({
"name": "1", "provider": "2", "version": "3", "operationalState": "4",
"usageState": "5", "onBoardState": "6", "processState": "7",
"deletionPending": "8", "downloadUri": "9", "createTime": "10",
"modifyTime": "11", "format": "12", "size": "13"
}), '200']
NfPackageModel(uuid="1", vnfdid="001", vendor="vendor",
vnfdversion="1.2.0", vnfversion="1.1.0", nfpackageid="13").save()
VnfPackageFileModel(id="1", filename="filename", imageid="00001",
vimid="1", vimuser="001", tenant="12", status="1", vnfpid="13").save()
NfInstModel(nfinstid="1", mnfinstid="001", nf_name="name", package_id="13").save()
resp = self.client.get("/openoapi/nslcm/v1/vnfpackage/13")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
expect_data = {
"csarId": '13',
"packageInfo": {
"vnfdId": "001",
"vnfdProvider": "vendor",
"vnfdVersion": "1.2.0",
"vnfVersion": "1.1.0",
"name": "1",
"provider": "2",
"version": "3",
"operationalState": "4",
"usageState": "5",
"onBoardState": "6",
"processState": "7",
"deletionPending": "8",
"downloadUri": "9",
"createTime": "10",
"modifyTime": "11",
"format": "12",
"size": "13"},
"imageInfo": [{
"index": "0",
"fileName": "filename",
"imageId": "00001",
"vimId": "1",
"vimUser": "001",
"tenant": "12",
"status": "1"}],
"vnfInstanceInfo": [{
"vnfInstanceId": "1",
"vnfInstanceName": "name"}]}
self.assertEqual(expect_data, resp.data)
#########################################################################
@mock.patch.object(restcall, 'call_req')
def test_delete_pending_csar_when_successfully(self, mock_call_req):
mock_call_req.return_value = [0, json.JSONEncoder().encode({
"processState": "deleting"}), "200"]
NfPkgDeletePendingThread(csar_id="1", job_id='2').run()
self.assert_job_result("2", 100, "Delete pending CSAR(1) successfully.")
@mock.patch.object(restcall, 'call_req')
def test_delete_pending_csar_when_deleting(self, mock_call_req):
NfPackageModel(uuid="01", nfpackageid="1").save()
mock_call_req.return_value = [0, json.JSONEncoder().encode({
"processState": "deleting"}), "200"]
NfPkgDeletePendingThread(csar_id="1", job_id='2').run()
self.assert_job_result("2", 100, "CSAR(1) is deleting now.")
@mock.patch.object(restcall, 'call_req')
def test_delete_pending_csar_when_not_deletion_pending(self, mock_call_req):
NfPackageModel(uuid="01", nfpackageid="1").save()
mock_call_req.return_value = [0, json.JSONEncoder().encode({
"deletionPending": "false"}), "200"]
NfPkgDeletePendingThread(csar_id="1", job_id='2').run()
self.assert_job_result("2", 100, "CSAR(1) need not to be deleted.")
@mock.patch.object(restcall, 'call_req')
def test_delete_pending_csar_when_in_using(self, mock_call_req):
mock_call_req.return_value = [0, json.JSONEncoder().encode({
"processState": "normal"}), "200"]
NfPackageModel(uuid="01", nfpackageid="1").save()
NfInstModel(nfinstid="01", package_id="1").save()
NfPkgDeletePendingThread(csar_id="1", job_id='2').run()
self.assert_job_result("2", 100, "CSAR(1) is in using, cannot be deleted.")
@mock.patch.object(VimAdaptor, '__init__')
@mock.patch.object(VimAdaptor, 'delete_image')
@mock.patch.object(restcall, 'call_req')
def test_delete_csarr_when_exception(self, mock_call_req, mock_delete_image, mock_init_):
mock_vals = {
("/openoapi/catalog/v1/csars/1", "DELETE"):
[1, "{}", "400"],
("/openoapi/catalog/v1/csars/1?processState=deleting", "PUT"):
[0, "{}", "200"],
("/openoapi/catalog/v1/csars/1?processState=deleteFailed", "PUT"):
[0, "{}", "200"],
("/openoapi/catalog/v1/csars/1", "GET"):
[0, json.JSONEncoder().encode({"processState": "normal"}), "200"],
("/openoapi/extsys/v1/vims", "GET"):
[0, json.JSONEncoder().encode([{"vimId": "002",
"url": "url_test",
"userName": "test01",
"password": "123456",
"tenant": "test"}]), "200"]}
mock_delete_image.return_value = [0, "", '200']
def side_effect(*args):
return mock_vals[(args[4], args[5])]
mock_call_req.side_effect = side_effect
mock_init_.return_value = None
VnfPackageFileModel(vnfpid="1", imageid="001", vimid="002").save()
NfPackageModel(uuid="01", nfpackageid="1").save()
NfPkgDeletePendingThread(csar_id="1", job_id='2').run()
self.assert_job_result("2", 255, "Failed to delete CSAR(1) from catalog.")
@mock.patch.object(VimAdaptor, '__init__')
@mock.patch.object(VimAdaptor, 'delete_image')
@mock.patch.object(restcall, 'call_req')
def test_delete_csar_when_successfully(self, mock_call_req, mock_delete_image, mock_init_):
mock_vals = {
("/openoapi/catalog/v1/csars/1", "DELETE"):
[0, json.JSONEncoder().encode({"successfully": "successfully"}), "200"],
("/openoapi/catalog/v1/csars/1?processState=deleting", "PUT"):
[0, json.JSONEncoder().encode({"successfully": "successfully"}), "200"],
("/openoapi/catalog/v1/csars/1?processState=deleteFailed", "PUT"):
[0, json.JSONEncoder().encode({"successfully": "successfully"}), "200"],
("/openoapi/catalog/v1/csars/1", "GET"):
[0, json.JSONEncoder().encode({"notProcessState": "notProcessState"}), "200"],
("/openoapi/extsys/v1/vims", "GET"):
[0, json.JSONEncoder().encode([{
"vimId": "002",
"url": "url_test",
"userName": "test01",
"password": "123456",
"tenant": "test"}]), "200"]}
mock_delete_image.return_value = [0, json.JSONEncoder().encode({"test": "test"}), '200']
def side_effect(*args):
return mock_vals[(args[4], args[5])]
mock_call_req.side_effect = side_effect
mock_init_.return_value = None
VnfPackageFileModel(vnfpid="1", imageid="001", vimid="002").save()
NfPackageModel(uuid="01", nfpackageid="1").save()
NfPkgDeletePendingThread(csar_id="1", job_id='2').run()
self.assert_job_result("2", 100, "Delete CSAR(1) successfully.")
#########################################################################
@mock.patch.object(restcall, 'call_req')
def test_delete_nf_pkg_when_deleting(self, mock_call_req):
mock_call_req.return_value = [0, json.JSONEncoder().encode({"processState": "deleting"}), '200']
NfPkgDeleteThread(csar_id="1", job_id="2").run()
self.assert_job_result("2", 100, "CSAR(1) is deleting now.")
def test_get_nf_csars_normal(self):
NfPackageModel(uuid="01", nfpackageid="1", vnfdid="2").save()
resp = self.client.get("/openoapi/nslcm/v1/vnfpackage")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(1, len(resp.data["csars"]))
self.assertEqual("1", resp.data["csars"][0]["csarId"])
self.assertEqual("2", resp.data["csars"][0]["vnfdId"])
| apache-2.0 | 7,188,164,469,112,064,000 | 48.26327 | 115 | 0.48873 | false |
emilk/sproxel | distro/common/lib/ctypes/macholib/dylib.py | 59 | 2107 | ######################################################################
# This file should be kept compatible with Python 2.3, see PEP 291. #
######################################################################
"""
Generic dylib path manipulation
"""
import re
__all__ = ['dylib_info']
DYLIB_RE = re.compile(r"""(?x)
(?P<location>^.*)(?:^|/)
(?P<name>
(?P<shortname>\w+?)
(?:\.(?P<version>[^._]+))?
(?:_(?P<suffix>[^._]+))?
\.dylib$
)
""")
def dylib_info(filename):
"""
A dylib name can take one of the following four forms:
Location/Name.SomeVersion_Suffix.dylib
Location/Name.SomeVersion.dylib
Location/Name_Suffix.dylib
Location/Name.dylib
returns None if not found or a mapping equivalent to:
dict(
location='Location',
name='Name.SomeVersion_Suffix.dylib',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present.
"""
is_dylib = DYLIB_RE.match(filename)
if not is_dylib:
return None
return is_dylib.groupdict()
def test_dylib_info():
def d(location=None, name=None, shortname=None, version=None, suffix=None):
return dict(
location=location,
name=name,
shortname=shortname,
version=version,
suffix=suffix
)
assert dylib_info('completely/invalid') is None
assert dylib_info('completely/invalide_debug') is None
assert dylib_info('P/Foo.dylib') == d('P', 'Foo.dylib', 'Foo')
assert dylib_info('P/Foo_debug.dylib') == d('P', 'Foo_debug.dylib', 'Foo', suffix='debug')
assert dylib_info('P/Foo.A.dylib') == d('P', 'Foo.A.dylib', 'Foo', 'A')
assert dylib_info('P/Foo_debug.A.dylib') == d('P', 'Foo_debug.A.dylib', 'Foo_debug', 'A')
assert dylib_info('P/Foo.A_debug.dylib') == d('P', 'Foo.A_debug.dylib', 'Foo', 'A', 'debug')
if __name__ == '__main__':
test_dylib_info()
| bsd-3-clause | -6,101,371,475,512,709,000 | 29.924242 | 96 | 0.521595 | false |
bitkeeper/python-opcua | examples/client_to_kepware.py | 1 | 1723 | import sys
sys.path.insert(0, "..")
import logging
from opcua import Client
from opcua import uaprotocol as ua
class SubHandler(object):
"""
Client to subscription. It will receive events from server
"""
def datachange_notification(self, node, val, data):
print("Python: New data change event", node, val)
def event_notification(self, event):
print("Python: New event", event)
if __name__ == "__main__":
#from IPython import embed
logging.basicConfig(level=logging.WARN)
client = Client("opc.tcp://192.168.56.100:49320/OPCUA/SimulationServer/")
#client = Client("opc.tcp://192.168.56.100:4840/OPCUA/SimulationServer/")
#client = Client("opc.tcp://olivier:olivierpass@localhost:53530/OPCUA/SimulationServer/")
try:
client.connect()
root = client.get_root_node()
print("Root is", root)
print("childs of root are: ", root.get_children())
print("name of root is", root.get_browse_name())
objects = client.get_objects_node()
print("childs og objects are: ", objects.get_children())
tag1 = client.get_node("ns=2;s=Channel1.Device1.Tag1")
print("tag1 is: {0} with value {1} ".format(tag1, tag1.get_value()))
tag2 = client.get_node("ns=2;s=Channel1.Device1.Tag2")
print("tag2 is: {0} with value {1} ".format(tag2, tag2.get_value()))
handler = SubHandler()
sub = client.create_subscription(500, handler)
handle = sub.subscribe_data_change(tag1)
handle = sub.subscribe_data_change(tag2)
from IPython import embed
embed()
sub.unsubscribe(handle)
sub.delete()
finally:
client.disconnect()
| lgpl-3.0 | 2,871,847,764,149,168,600 | 30.327273 | 93 | 0.629716 | false |
clumsy/intellij-community | python/lib/Lib/unicodedata.py | 69 | 6437 | from bisect import bisect_left
import operator
import java.lang.Character
# XXX - this is intended as a stopgap measure until 2.5.1, which will have a Java implementation
# requires java 6 for `normalize` function
# only has one version of the database
# does not normalized ideographs
_codepoints = {}
_eaw = {}
_names = {}
_segments = []
_eaw_segments = []
Nonesuch = object()
def get_int(col):
try:
return int(col)
except ValueError:
return None
def get_yn(col):
if col == 'Y': return 1
else: return 0
def get_numeric(col):
try:
return float(col)
except ValueError:
try:
a, b = col.split('/')
return float(a)/float(b)
except:
return None
def init_unicodedata(data):
for row in data:
cols = row.split(';')
codepoint = int(cols[0], 16)
name = cols[1]
if name == '<CJK Ideograph, Last>':
lookup_name = 'CJK UNIFIED IDEOGRAPH'
else:
lookup_name = name
data = (
cols[2],
get_int(cols[3]),
cols[4],
cols[5],
get_int(cols[6]),
get_int(cols[7]),
get_numeric(cols[8]),
get_yn(cols[9]),
lookup_name,
)
if name.find('First') >= 0:
start = codepoint
elif name.find('Last') >= 0:
_segments.append((start, (start, codepoint), data))
else:
_names[name] = unichr(codepoint)
_codepoints[codepoint] = data
def init_east_asian_width(data):
for row in data:
if row.startswith('#'):
continue
row = row.partition('#')[0]
cols = row.split(';')
if len(cols) < 2:
continue
cr = cols[0].split('..')
width = cols[1].rstrip()
if len(cr) == 1:
codepoint = int(cr[0], 16)
_eaw[codepoint] = width
else:
start = int(cr[0], 16)
end = int(cr[1], 16)
_eaw_segments.append((start, (start, end), width))
# xxx - need to normalize the segments, so
# <CJK Ideograph, Last> ==> CJK UNIFIED IDEOGRAPH;
# may need to do some sort of analysis against CPython for the normalization!
def name(unichr, default=None):
codepoint = get_codepoint(unichr, "name")
v = _codepoints.get(codepoint, None)
if v is None:
v = check_segments(codepoint, _segments)
if v is not None:
return "%s-%X" % (v[8], codepoint)
if v is None:
if default is not Nonesuch:
return default
raise ValueError()
return v[8]
# xxx - also need to add logic here so that if it's CJK UNIFIED
# IDEOGRAPH-8000, we go against the segment to verify the prefix
def lookup(name):
return _names[name]
def check_segments(codepoint, segments):
i = bisect_left(segments, (codepoint,))
if i < len(segments):
segment = segments[i - 1]
if codepoint <= segment[1][1]:
return segment[2]
return None
def get_codepoint(unichr, fn=None):
if not(isinstance(unichr, unicode)):
raise TypeError(fn, "() argument 1 must be unicode, not " + type(unichr))
if len(unichr) > 1 or len(unichr) == 0:
raise TypeError("need a single Unicode character as parameter")
return ord(unichr)
def get_eaw(unichr, default, fn):
codepoint = get_codepoint(unichr, fn)
v = _eaw.get(codepoint, None)
if v is None:
v = check_segments(codepoint, _eaw_segments)
if v is None:
if default is not Nonesuch:
return default
raise ValueError()
return v
def get(unichr, default, fn, getter):
codepoint = get_codepoint(unichr, fn)
data = _codepoints.get(codepoint, None)
if data is None:
data = check_segments(codepoint, _segments)
if data is None:
if default is not Nonesuch:
return default
raise ValueError()
v = getter(data)
if v is None:
if default is not Nonesuch:
return default
raise ValueError()
else:
return v
category_getter = operator.itemgetter(0)
combining_getter = operator.itemgetter(1)
bidirectional_getter = operator.itemgetter(2)
decomposition_getter = operator.itemgetter(3)
decimal_getter = operator.itemgetter(4)
digit_getter = operator.itemgetter(5)
numeric_getter = operator.itemgetter(6)
mirrored_getter = operator.itemgetter(7)
def decimal(unichr, default=Nonesuch):
return get(unichr, default, 'decimal', decimal_getter)
def decomposition(unichr, default=''):
return get(unichr, default, 'decomposition', decomposition_getter)
def digit(unichr, default=Nonesuch):
return get(unichr, default, 'digit', digit_getter)
def numeric(unichr, default=Nonesuch):
return get(unichr, default, 'numeric', numeric_getter)
def category(unichr):
return get(unichr, 'Cn', 'catgegory', category_getter)
def bidirectional(unichr):
return get(unichr, '', 'bidirectional', bidirectional_getter)
def combining(unichr):
return get(unichr, 0, 'combining', combining_getter)
def mirrored(unichr):
return get(unichr, 0, 'mirrored', mirrored_getter)
def east_asian_width(unichr):
return get_eaw(unichr, 'N', 'east_asian_width')
def jymirrored(unichr):
return java.lang.Character.isMirrored(get_codepoint(unichr, 'mirrored'))
try:
from java.text import Normalizer
_forms = {
'NFC': Normalizer.Form.NFC,
'NFKC': Normalizer.Form.NFKC,
'NFD': Normalizer.Form.NFD,
'NFKD': Normalizer.Form.NFKD
}
def normalize(form, unistr):
"""
Return the normal form 'form' for the Unicode string unistr. Valid
values for form are 'NFC', 'NFKC', 'NFD', and 'NFKD'.
"""
try:
normalizer_form = _forms[form]
except KeyError:
raise ValueError('invalid normalization form')
return Normalizer.normalize(unistr, normalizer_form)
except ImportError:
pass
def init():
import pkgutil
import os.path
import StringIO
import sys
my_path = os.path.dirname(__file__)
loader = pkgutil.get_loader('unicodedata')
init_unicodedata(StringIO.StringIO(loader.get_data(os.path.join(my_path, 'UnicodeData.txt'))))
init_east_asian_width(StringIO.StringIO(loader.get_data(os.path.join(my_path, 'EastAsianWidth.txt'))))
init()
| apache-2.0 | -6,758,697,989,492,542,000 | 27.10917 | 106 | 0.607426 | false |
EDRN/CancerDataExpo | src/edrn.rdf/setup.py | 2 | 2657 | # encoding: utf-8
# Copyright 2008-2011 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
from setuptools import setup, find_packages
import os.path
# Package data
# ------------
_name = 'edrn.rdf'
_version = '1.3.8'
_description = 'EDRN RDF Server'
_author = 'Sean Kelly'
_authorEmail = '[email protected]'
_maintainer = 'Sean Kelly'
_maintainerEmail = '[email protected]'
_license = 'ALv2'
_namespaces = ['edrn']
_zipSafe = False
_keywords = 'rdf web zope plone cancer bioinformatics detection informatics edrn'
_testSuite = 'edrn.rdf.tests.test_suite'
_entryPoints = {
'z3c.autoinclude.plugin': ['target=plone'],
}
_requirements = [
'collective.autopermission',
'pysolr',
'Pillow',
'plone.app.dexterity[relations]',
'plone.app.relationfield',
'plone.behavior',
'Products.CMFPlone',
'rdflib==4.2.2',
'setuptools',
'z3c.relationfield',
'suds2',
'zope.app.intid',
]
_extras = {
'test': ['plone.app.testing', 'rdfextras'],
}
_classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Plone',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Software Development :: Libraries :: Python Modules',
]
# Setup Metadata
# --------------
def _read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
_header = '*' * len(_name) + '\n' + _name + '\n' + '*' * len(_name)
_longDescription = _header + '\n\n' + _read('README.rst') + '\n\n' + _read('docs', 'INSTALL.txt') + '\n\n' \
+ _read('docs', 'HISTORY.txt') + '\n'
open('doc.txt', 'w').write(_longDescription)
setup(
author=_author,
author_email=_authorEmail,
classifiers=_classifiers,
description=_description,
entry_points=_entryPoints,
extras_require=_extras,
include_package_data=True,
install_requires=_requirements,
keywords=_keywords,
license=_license,
long_description=_longDescription,
maintainer=_maintainer,
maintainer_email=_maintainerEmail,
name=_name,
namespace_packages=_namespaces,
packages=find_packages(exclude=['ez_setup', 'distribute_setup', 'bootstrap']),
url='https://github.com/EDRN/' + _name,
version=_version,
zip_safe=_zipSafe,
)
| apache-2.0 | 3,337,923,018,098,752,500 | 29.193182 | 108 | 0.629281 | false |
stephanie-wang/ray | python/ray/tune/suggest/sigopt.py | 1 | 5550 | import copy
import os
import logging
import pickle
try:
import sigopt as sgo
except ImportError:
sgo = None
from ray.tune.suggest.suggestion import SuggestionAlgorithm
logger = logging.getLogger(__name__)
class SigOptSearch(SuggestionAlgorithm):
"""A wrapper around SigOpt to provide trial suggestions.
Requires SigOpt to be installed. Requires user to store their SigOpt
API key locally as an environment variable at `SIGOPT_KEY`.
Parameters:
space (list of dict): SigOpt configuration. Parameters will be sampled
from this configuration and will be used to override
parameters generated in the variant generation process.
name (str): Name of experiment. Required by SigOpt.
max_concurrent (int): Number of maximum concurrent trials supported
based on the user's SigOpt plan. Defaults to 1.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
Example:
>>> space = [
>>> {
>>> 'name': 'width',
>>> 'type': 'int',
>>> 'bounds': {
>>> 'min': 0,
>>> 'max': 20
>>> },
>>> },
>>> {
>>> 'name': 'height',
>>> 'type': 'int',
>>> 'bounds': {
>>> 'min': -100,
>>> 'max': 100
>>> },
>>> },
>>> ]
>>> algo = SigOptSearch(
>>> space, name="SigOpt Example Experiment",
>>> max_concurrent=1, metric="mean_loss", mode="min")
"""
def __init__(self,
space,
name="Default Tune Experiment",
max_concurrent=1,
reward_attr=None,
metric="episode_reward_mean",
mode="max",
**kwargs):
assert sgo is not None, "SigOpt must be installed!"
assert type(max_concurrent) is int and max_concurrent > 0
assert "SIGOPT_KEY" in os.environ, \
"SigOpt API key must be stored as environ variable at SIGOPT_KEY"
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
if reward_attr is not None:
mode = "max"
metric = reward_attr
logger.warning(
"`reward_attr` is deprecated and will be removed in a future "
"version of Tune. "
"Setting `metric={}` and `mode=max`.".format(reward_attr))
if "use_early_stopped_trials" in kwargs:
logger.warning(
"`use_early_stopped_trials` is not used in SigOptSearch.")
self._max_concurrent = max_concurrent
self._metric = metric
if mode == "max":
self._metric_op = 1.
elif mode == "min":
self._metric_op = -1.
self._live_trial_mapping = {}
# Create a connection with SigOpt API, requires API key
self.conn = sgo.Connection(client_token=os.environ["SIGOPT_KEY"])
self.experiment = self.conn.experiments().create(
name=name,
parameters=space,
parallel_bandwidth=self._max_concurrent,
)
super(SigOptSearch, self).__init__(**kwargs)
def _suggest(self, trial_id):
if self._num_live_trials() >= self._max_concurrent:
return None
# Get new suggestion from SigOpt
suggestion = self.conn.experiments(
self.experiment.id).suggestions().create()
self._live_trial_mapping[trial_id] = suggestion
return copy.deepcopy(suggestion.assignments)
def on_trial_result(self, trial_id, result):
pass
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Notification for the completion of trial.
If a trial fails, it will be reported as a failed Observation, telling
the optimizer that the Suggestion led to a metric failure, which
updates the feasible region and improves parameter recommendation.
Creates SigOpt Observation object for trial.
"""
if result:
self.conn.experiments(self.experiment.id).observations().create(
suggestion=self._live_trial_mapping[trial_id].id,
value=self._metric_op * result[self._metric],
)
# Update the experiment object
self.experiment = self.conn.experiments(self.experiment.id).fetch()
elif error or early_terminated:
# Reports a failed Observation
self.conn.experiments(self.experiment.id).observations().create(
failed=True, suggestion=self._live_trial_mapping[trial_id].id)
del self._live_trial_mapping[trial_id]
def _num_live_trials(self):
return len(self._live_trial_mapping)
def save(self, checkpoint_dir):
trials_object = (self.conn, self.experiment)
with open(checkpoint_dir, "wb") as outputFile:
pickle.dump(trials_object, outputFile)
def restore(self, checkpoint_dir):
with open(checkpoint_dir, "rb") as inputFile:
trials_object = pickle.load(inputFile)
self.conn = trials_object[0]
self.experiment = trials_object[1]
| apache-2.0 | -1,845,133,324,321,591,300 | 35.27451 | 79 | 0.559099 | false |
aetilley/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause | 6,470,606,179,744,523,000 | 34.087719 | 79 | 0.6125 | false |
dagdaggo/coala | tests/bearlib/spacing/SpacingHelperTest.py | 26 | 4341 | import unittest
from coalib.bearlib.spacing.SpacingHelper import SpacingHelper
from coalib.settings.Section import Section
class SpacingHelperTest(unittest.TestCase):
def setUp(self):
self.uut = SpacingHelper()
def test_needed_settings(self):
self.assertEqual(list(self.uut.get_optional_settings()), ["tab_width"])
self.assertEqual(list(self.uut.get_non_optional_settings()), [])
def test_construction(self):
section = Section("test section")
self.assertRaises(TypeError, SpacingHelper, "no integer")
self.assertRaises(TypeError, self.uut.from_section, 5)
self.assertEqual(self.uut.tab_width,
self.uut.from_section(section).tab_width)
# This is assumed in some tests. If you want to change this value, be
# sure to change the tests too
self.assertEqual(self.uut.DEFAULT_TAB_WIDTH, 4)
self.assertEqual(self.uut.tab_width, self.uut.DEFAULT_TAB_WIDTH)
def test_get_indentation(self):
self.assertRaises(TypeError, self.uut.get_indentation, 5)
self.assertEqual(self.uut.get_indentation("no indentation"), 0)
self.assertEqual(self.uut.get_indentation(" indentation"), 1)
self.assertEqual(self.uut.get_indentation(" indentation"), 2)
self.assertEqual(self.uut.get_indentation("\tindentation"),
self.uut.DEFAULT_TAB_WIDTH)
# Having a space before the tab shouldn't make any difference
self.assertEqual(self.uut.get_indentation(" \tindentation"),
self.uut.DEFAULT_TAB_WIDTH)
self.assertEqual(self.uut.get_indentation(" \t indentation"),
self.uut.DEFAULT_TAB_WIDTH+1)
self.assertEqual(self.uut.get_indentation("\t indentation"),
self.uut.DEFAULT_TAB_WIDTH+1)
# same tests but with indentation only
self.assertEqual(self.uut.get_indentation("\t"),
self.uut.DEFAULT_TAB_WIDTH)
self.assertEqual(self.uut.get_indentation(" \t"),
self.uut.DEFAULT_TAB_WIDTH)
self.assertEqual(self.uut.get_indentation(" \t "),
self.uut.DEFAULT_TAB_WIDTH+1)
self.assertEqual(self.uut.get_indentation("\t "),
self.uut.DEFAULT_TAB_WIDTH+1)
self.assertEqual(self.uut.get_indentation("\t\t"),
self.uut.DEFAULT_TAB_WIDTH*2)
def test_replace_tabs_with_spaces(self):
self.assertRaises(TypeError, self.uut.replace_tabs_with_spaces, 5)
self.assertEqual(self.uut.replace_tabs_with_spaces(""), "")
self.assertEqual(self.uut.replace_tabs_with_spaces(" "), " ")
self.assertEqual(self.uut.replace_tabs_with_spaces("\t"),
" "*self.uut.DEFAULT_TAB_WIDTH)
self.assertEqual(self.uut.replace_tabs_with_spaces("\t\t"),
" "*self.uut.DEFAULT_TAB_WIDTH*2)
self.assertEqual(self.uut.replace_tabs_with_spaces(" \t"),
" "*self.uut.DEFAULT_TAB_WIDTH)
self.assertEqual(self.uut.replace_tabs_with_spaces(" \t"),
" "*self.uut.DEFAULT_TAB_WIDTH)
self.assertEqual(self.uut.replace_tabs_with_spaces("d \t "),
"d" + " "*self.uut.DEFAULT_TAB_WIDTH)
def test_replace_spaces_with_tabs(self):
self.assertRaises(TypeError, self.uut.replace_spaces_with_tabs, 5)
self.assertEqual(self.uut.replace_spaces_with_tabs(""), "")
self.assertEqual(self.uut.replace_spaces_with_tabs(" "), " ")
self.assertEqual(self.uut.replace_spaces_with_tabs(" "), "\t")
self.assertEqual(self.uut.replace_spaces_with_tabs(" \t"), "\t")
self.assertEqual(self.uut.replace_spaces_with_tabs(" dd "),
" dd ")
self.assertEqual(self.uut.replace_spaces_with_tabs(" dd d "),
" dd d ") # One space shouldnt be replaced
self.assertEqual(self.uut.replace_spaces_with_tabs(" dd "),
" dd\t")
self.assertEqual(
self.uut.replace_spaces_with_tabs(" \t a_text another"),
"\t a_text\tanother")
self.assertEqual(self.uut.replace_spaces_with_tabs("d d"), "d d")
| agpl-3.0 | -452,152,684,058,441,000 | 47.233333 | 79 | 0.599862 | false |
hanchentech/git-repo | subcmds/help.py | 48 | 4794 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from formatter import AbstractFormatter, DumbWriter
from color import Coloring
from command import PagedCommand, MirrorSafeCommand
class Help(PagedCommand, MirrorSafeCommand):
common = False
helpSummary = "Display detailed help on a command"
helpUsage = """
%prog [--all|command]
"""
helpDescription = """
Displays detailed usage information about a command.
"""
def _PrintAllCommands(self):
print 'usage: repo COMMAND [ARGS]'
print """
The complete list of recognized repo commands are:
"""
commandNames = self.commands.keys()
commandNames.sort()
maxlen = 0
for name in commandNames:
maxlen = max(maxlen, len(name))
fmt = ' %%-%ds %%s' % maxlen
for name in commandNames:
command = self.commands[name]
try:
summary = command.helpSummary.strip()
except AttributeError:
summary = ''
print fmt % (name, summary)
print """
See 'repo help <command>' for more information on a specific command.
"""
def _PrintCommonCommands(self):
print 'usage: repo COMMAND [ARGS]'
print """
The most commonly used repo commands are:
"""
commandNames = [name
for name in self.commands.keys()
if self.commands[name].common]
commandNames.sort()
maxlen = 0
for name in commandNames:
maxlen = max(maxlen, len(name))
fmt = ' %%-%ds %%s' % maxlen
for name in commandNames:
command = self.commands[name]
try:
summary = command.helpSummary.strip()
except AttributeError:
summary = ''
print fmt % (name, summary)
print """
See 'repo help <command>' for more information on a specific command.
See 'repo help --all' for a complete list of recognized commands.
"""
def _PrintCommandHelp(self, cmd):
class _Out(Coloring):
def __init__(self, gc):
Coloring.__init__(self, gc, 'help')
self.heading = self.printer('heading', attr='bold')
self.wrap = AbstractFormatter(DumbWriter())
def _PrintSection(self, heading, bodyAttr):
try:
body = getattr(cmd, bodyAttr)
except AttributeError:
return
if body == '' or body is None:
return
self.nl()
self.heading('%s', heading)
self.nl()
self.heading('%s', ''.ljust(len(heading), '-'))
self.nl()
me = 'repo %s' % cmd.NAME
body = body.strip()
body = body.replace('%prog', me)
asciidoc_hdr = re.compile(r'^\n?([^\n]{1,})\n([=~-]{2,})$')
for para in body.split("\n\n"):
if para.startswith(' '):
self.write('%s', para)
self.nl()
self.nl()
continue
m = asciidoc_hdr.match(para)
if m:
title = m.group(1)
section_type = m.group(2)
if section_type[0] in ('=', '-'):
p = self.heading
else:
def _p(fmt, *args):
self.write(' ')
self.heading(fmt, *args)
p = _p
p('%s', title)
self.nl()
p('%s', ''.ljust(len(title),section_type[0]))
self.nl()
continue
self.wrap.add_flowing_data(para)
self.wrap.end_paragraph(1)
self.wrap.end_paragraph(0)
out = _Out(self.manifest.globalConfig)
out._PrintSection('Summary', 'helpSummary')
cmd.OptionParser.print_help()
out._PrintSection('Description', 'helpDescription')
def _Options(self, p):
p.add_option('-a', '--all',
dest='show_all', action='store_true',
help='show the complete list of commands')
def Execute(self, opt, args):
if len(args) == 0:
if opt.show_all:
self._PrintAllCommands()
else:
self._PrintCommonCommands()
elif len(args) == 1:
name = args[0]
try:
cmd = self.commands[name]
except KeyError:
print >>sys.stderr, "repo: '%s' is not a repo command." % name
sys.exit(1)
cmd.manifest = self.manifest
self._PrintCommandHelp(cmd)
else:
self._PrintCommandHelp(self)
| apache-2.0 | 8,913,382,724,638,427,000 | 26.872093 | 74 | 0.585732 | false |
CubicERP/odoo | addons/account/report/account_journal.py | 255 | 10035 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
from common_report_header import common_report_header
class journal_print(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
if context is None:
context = {}
super(journal_print, self).__init__(cr, uid, name, context=context)
self.context = context
self.period_ids = []
self.last_move_id = False
self.journal_ids = []
self.sort_selection = 'am.name'
self.localcontext.update({
'time': time,
'lines': self.lines,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_account': self._get_account,
'get_filter': self._get_filter,
'get_start_date': self._get_start_date,
'get_end_date': self._get_end_date,
'get_fiscalyear': self._get_fiscalyear,
'display_currency':self._display_currency,
'get_sortby': self._get_sortby,
'get_target_move': self._get_target_move,
'check_last_move_id': self.check_last_move_id,
'set_last_move_id': self.set_last_move_id,
'tax_codes': self.tax_codes,
'sum_vat': self._sum_vat,
})
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
new_ids = ids
self.query_get_clause = ''
self.target_move = data['form'].get('target_move', 'all')
if (data['model'] == 'ir.ui.menu'):
self.period_ids = tuple(data['form']['periods'])
self.journal_ids = tuple(data['form']['journal_ids'])
new_ids = data['form'].get('active_ids', [])
self.query_get_clause = 'AND '
self.query_get_clause += obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context', {}))
self.sort_selection = data['form'].get('sort_selection', 'date')
objects = self.pool.get('account.journal.period').browse(self.cr, self.uid, new_ids)
elif new_ids:
#in case of direct access from account.journal.period object, we need to set the journal_ids and periods_ids
self.cr.execute('SELECT period_id, journal_id FROM account_journal_period WHERE id IN %s', (tuple(new_ids),))
res = self.cr.fetchall()
self.period_ids, self.journal_ids = zip(*res)
return super(journal_print, self).set_context(objects, data, ids, report_type=report_type)
def set_last_move_id(self, move_id):
self.last_move_id = move_id
def check_last_move_id(self, move_id):
'''
return True if we need to draw a gray line above this line, used to separate moves
'''
if self.last_move_id:
return not(self.last_move_id == move_id)
return False
def tax_codes(self, period_id, journal_id):
ids_journal_period = self.pool.get('account.journal.period').search(self.cr, self.uid,
[('journal_id', '=', journal_id), ('period_id', '=', period_id)])
self.cr.execute(
'select distinct tax_code_id from account_move_line ' \
'where period_id=%s and journal_id=%s and tax_code_id is not null and state<>\'draft\'',
(period_id, journal_id)
)
ids = map(lambda x: x[0], self.cr.fetchall())
tax_code_ids = []
if ids:
self.cr.execute('select id from account_tax_code where id in %s order by code', (tuple(ids),))
tax_code_ids = map(lambda x: x[0], self.cr.fetchall())
tax_codes = self.pool.get('account.tax.code').browse(self.cr, self.uid, tax_code_ids)
return tax_codes
def _sum_vat(self, period_id, journal_id, tax_code_id):
self.cr.execute('select sum(tax_amount) from account_move_line where ' \
'period_id=%s and journal_id=%s and tax_code_id=%s and state<>\'draft\'',
(period_id, journal_id, tax_code_id))
return self.cr.fetchone()[0] or 0.0
def _sum_debit(self, period_id=False, journal_id=False):
if journal_id and isinstance(journal_id, int):
journal_id = [journal_id]
if period_id and isinstance(period_id, int):
period_id = [period_id]
if not journal_id:
journal_id = self.journal_ids
if not period_id:
period_id = self.period_ids
if not (period_id and journal_id):
return 0.0
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT SUM(debit) FROM account_move_line l, account_move am '
'WHERE l.move_id=am.id AND am.state IN %s AND l.period_id IN %s AND l.journal_id IN %s ' + self.query_get_clause + ' ',
(tuple(move_state), tuple(period_id), tuple(journal_id)))
return self.cr.fetchone()[0] or 0.0
def _sum_credit(self, period_id=False, journal_id=False):
if journal_id and isinstance(journal_id, int):
journal_id = [journal_id]
if period_id and isinstance(period_id, int):
period_id = [period_id]
if not journal_id:
journal_id = self.journal_ids
if not period_id:
period_id = self.period_ids
if not (period_id and journal_id):
return 0.0
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT SUM(l.credit) FROM account_move_line l, account_move am '
'WHERE l.move_id=am.id AND am.state IN %s AND l.period_id IN %s AND l.journal_id IN %s '+ self.query_get_clause+'',
(tuple(move_state), tuple(period_id), tuple(journal_id)))
return self.cr.fetchone()[0] or 0.0
def lines(self, period_id, journal_id=False):
if not journal_id:
journal_id = self.journal_ids
else:
journal_id = [journal_id]
obj_mline = self.pool.get('account.move.line')
self.cr.execute('update account_journal_period set state=%s where journal_id IN %s and period_id=%s and state=%s', ('printed', self.journal_ids, period_id, 'draft'))
self.pool.get('account.journal.period').invalidate_cache(self.cr, self.uid, ['state'], context=self.context)
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT l.id FROM account_move_line l, account_move am WHERE l.move_id=am.id AND am.state IN %s AND l.period_id=%s AND l.journal_id IN %s ' + self.query_get_clause + ' ORDER BY '+ self.sort_selection + ', l.move_id',(tuple(move_state), period_id, tuple(journal_id) ))
ids = map(lambda x: x[0], self.cr.fetchall())
return obj_mline.browse(self.cr, self.uid, ids)
def _set_get_account_currency_code(self, account_id):
self.cr.execute("SELECT c.symbol AS code "\
"FROM res_currency c,account_account AS ac "\
"WHERE ac.id = %s AND ac.currency_id = c.id" % (account_id))
result = self.cr.fetchone()
if result:
self.account_currency = result[0]
else:
self.account_currency = False
def _get_fiscalyear(self, data):
if data['model'] == 'account.journal.period':
return self.pool.get('account.journal.period').browse(self.cr, self.uid, data['id']).fiscalyear_id.name
return super(journal_print, self)._get_fiscalyear(data)
def _get_account(self, data):
if data['model'] == 'account.journal.period':
return self.pool.get('account.journal.period').browse(self.cr, self.uid, data['id']).company_id.name
return super(journal_print, self)._get_account(data)
def _display_currency(self, data):
if data['model'] == 'account.journal.period':
return True
return data['form']['amount_currency']
def _get_sortby(self, data):
# TODO: deprecated, to remove in trunk
if self.sort_selection == 'date':
return self._translate('Date')
elif self.sort_selection == 'ref':
return self._translate('Reference Number')
return self._translate('Date')
class report_journal(osv.AbstractModel):
_name = 'report.account.report_journal'
_inherit = 'report.abstract_report'
_template = 'account.report_journal'
_wrapped_report_class = journal_print
class report_salepurchasejournal(osv.AbstractModel):
_name = 'report.account.report_salepurchasejournal'
_inherit = 'report.abstract_report'
_template = 'account.report_salepurchasejournal'
_wrapped_report_class = journal_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,353,666,811,201,545,000 | 45.24424 | 291 | 0.592128 | false |
hailongqiu/new-deepin-media-player | src/widget/preview.py | 2 | 6604 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Deepin, Inc.
# 2012 Hailong Qiu
#
# Author: Hailong Qiu <[email protected]>
# Maintainer: Hailong Qiu <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from dtk.ui.draw import draw_text
from dtk.ui.constant import DEFAULT_FONT_SIZE
from preview_bg import PreViewWin
from mplayer.player import LDMP
from mplayer.player import length_to_time
from constant import PREVIEW_PV_WIDTH, PREVIEW_PV_HEIGHT
import gtk
import cairo
import pango
class PreView(object):
def __init__(self, path = "", pos = 0):
self.video_width = 0
self.video_height = 0
#self.mp = Mplayer()
self.mp = LDMP()
self.xid = None
self.pos = pos
# Preview background window.
self.bg = PreViewWin()
self.bg.set_size_request(124, 89)
self.bg.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_MENU)
# Set background window.
self.bg.add_events(gtk.gdk.ALL_EVENTS_MASK)
#self.bg.show_all()
self.bg.set_offset(self.bg.get_offset_mid_value())
# Hide preview window.
self.bg.show_time_label.connect("expose-event", self.draw_background)
self.bg.connect("motion-notify-event", self.motion_hide_preview)
self.bg.connect("enter-notify-event", self.motion_hide_preview)
# Preview window.
self.pv = gtk.Window(gtk.WINDOW_POPUP)
# Set preview window.
self.pv.set_size_request(PREVIEW_PV_WIDTH - 4, PREVIEW_PV_HEIGHT)
self.pv.set_decorated(False)
self.pv.set_keep_above(True)
self.pv.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_MENU)
self.pv.add_events(gtk.gdk.ALL_EVENTS_MASK)
self.pv.connect("expose-event", self.draw_preview_video_background)
# Hide preview window.
self.pv.connect("motion-notify-event", self.motion_hide_preview)
self.pv.connect("enter-notify-event", self.motion_hide_preview)
def draw_preview_video_background(self, widget, event):
cr = widget.window.cairo_create()
x, y, w, h = widget.get_allocation()
cr.rectangle(x, y, w, h)
cr.fill()
if self.video_width or self.video_height:
video_ratio = float(self.video_width) / self.video_height
bit = video_ratio - (float(w) / h)
cr.set_source_rgb(0, 0, 0)
if 0 == bit:
return False
elif bit < 0:
s = w - h * (video_ratio)
s = s / 2
# Draw left.
cr.rectangle(x, y,
s, h)
cr.fill()
# Draw right.
cr.rectangle(x + s + h * video_ratio,
y, s, h)
cr.fill()
elif bit > 0:
video_ratio = float(self.video_height) / self.video_width
s = h - w * video_ratio
s = s / 2
# Draw UP.
cr.rectangle(x, y, w + 1, s)
cr.fill()
# Draw bottom.
cr.rectangle(x, y + s + w * (video_ratio),
w, s)
cr.fill()
return True
# Background window.
def draw_background(self, widget, event):
# Init.
cr = widget.window.cairo_create()
rect = widget.allocation
x, y, w, h = rect.x, rect.y, rect.width, rect.height
# Draw preview time.
font_height_padding = 15
show_time_text = length_to_time(self.pos)
time_hour, time_min, time_sec = 1, 3, 30 #self.mp.time(self.pos)
draw_text(cr,
#"%s:%s:%s" % (self.time_to_string(time_hour), self.time_to_string(time_min), self.time_to_string(time_sec)),
show_time_text,
x, h - font_height_padding, w, DEFAULT_FONT_SIZE,
text_color = "#ffffff",
alignment=pango.ALIGN_CENTER
)
return True
def move_preview(self, x, y):
self.bg.move(int(x), int(y))
self.pv.move(int(x + 4), int(y+4))
def show_preview(self, pos):
if self.pos != pos:
self.pos = pos
self.bg.queue_draw()
self.bg.show_all()
self.pv.show_all()
region = gtk.gdk.Region()
self.bg.window.input_shape_combine_region(region, 0, 0)
#self.pv.show_all()
self.pv.set_keep_above(True)
# init preview window mplayer.
self.init_mplayer_window(pos)
def hide_preview(self):
self.bg.hide_all()
self.pv.hide_all()
def quit_preview_player(self):
self.hide_preview()
self.mp.quit()
def set_preview_path(self, path):
self.pv.show_all()
self.pv.set_opacity(0)
self.mp.xid = self.pv.window.xid
self.mp.quit()
self.mp.player.uri = path
self.mp.play() # 播放.
self.mp.pause() # 暂停.
self.mp.nomute() # 静音.
self.pv.hide_all()
self.pv.set_opacity(1)
def time_to_string(self, time_pos):
if 0<= time_pos <= 9:
return "0" + str(time_pos)
return str(time_pos)
def motion_hide_preview(self, widget, event):
self.hide_preview()
def init_mplayer_window(self, pos):
# 预览快进.
self.mp.seek(pos)
if __name__ == "__main__":
pass
| gpl-3.0 | -7,695,444,990,471,682,000 | 34.021277 | 127 | 0.511543 | false |
wqer1019/student-work | vendor/psy/psysh/test/tools/vis.py | 710 | 3428 | """
vis.py
======
Ctypes based module to access libbsd's strvis & strunvis functions.
The `vis` function is the equivalent of strvis.
The `unvis` function is the equivalent of strunvis.
All functions accept unicode string as input and return a unicode string.
Constants:
----------
* to select alternate encoding format
`VIS_OCTAL`: use octal \ddd format
`VIS_CSTYLE`: use \[nrft0..] where appropiate
* to alter set of characters encoded
(default is to encode all non-graphic except space, tab, and newline).
`VIS_SP`: also encode space
`VIS_TAB`: also encode tab
`VIS_NL`: also encode newline
`VIS_WHITE`: same as (VIS_SP | VIS_TAB | VIS_NL)
`VIS_SAFE`: only encode "unsafe" characters
* other
`VIS_NOSLASH`: inhibit printing '\'
`VIS_HTTP1808`: http-style escape % hex hex
`VIS_HTTPSTYLE`: http-style escape % hex hex
`VIS_MIMESTYLE`: mime-style escape = HEX HEX
`VIS_HTTP1866`: http-style &#num; or &string;
`VIS_NOESCAPE`: don't decode `\'
`VIS_GLOB`: encode glob(3) magic characters
:Authors:
- ju1ius (http://github.com/ju1ius)
:Version: 1
:Date: 2014-01-05
"""
from ctypes import CDLL, c_char_p, c_int
from ctypes.util import find_library
__all__ = [
'vis', 'unvis',
'VIS_OCTAL', 'VIS_CSTYLE',
'VIS_SP', 'VIS_TAB', 'VIS_NL', 'VIS_WHITE', 'VIS_SAFE',
'VIS_NOSLASH', 'VIS_HTTP1808', 'VIS_HTTPSTYLE', 'VIS_MIMESTYLE',
'VIS_HTTP1866', 'VIS_NOESCAPE', 'VIS_GLOB'
]
#############################################################
# Constants from bsd/vis.h
#############################################################
#to select alternate encoding format
VIS_OCTAL = 0x0001
VIS_CSTYLE = 0x0002
# to alter set of characters encoded
# (default is to encode all non-graphic except space, tab, and newline).
VIS_SP = 0x0004
VIS_TAB = 0x0008
VIS_NL = 0x0010
VIS_WHITE = VIS_SP | VIS_TAB | VIS_NL
VIS_SAFE = 0x0020
# other
VIS_NOSLASH = 0x0040
VIS_HTTP1808 = 0x0080
VIS_HTTPSTYLE = 0x0080
VIS_MIMESTYLE = 0x0100
VIS_HTTP1866 = 0x0200
VIS_NOESCAPE = 0x0400
VIS_GLOB = 0x1000
#############################################################
# Import libbsd/vis functions
#############################################################
_libbsd = CDLL(find_library('bsd'))
_strvis = _libbsd.strvis
_strvis.argtypes = [c_char_p, c_char_p, c_int]
_strvis.restype = c_int
_strunvis = _libbsd.strunvis
_strvis.argtypes = [c_char_p, c_char_p]
_strvis.restype = c_int
def vis(src, flags=VIS_WHITE):
"""
Encodes the string `src` into libbsd's vis encoding.
`flags` must be one of the VIS_* constants
C definition:
int strvis(char *dst, char *src, int flags);
"""
src = bytes(src, 'utf-8')
dst_p = c_char_p(bytes(len(src) * 4))
src_p = c_char_p(src)
flags = c_int(flags)
bytes_written = _strvis(dst_p, src_p, flags)
if -1 == bytes_written:
raise RuntimeError('vis failed to encode string "{}"'.format(src))
return dst_p.value.decode('utf-8')
def unvis(src):
"""
Decodes a string encoded by vis.
C definition:
int strunvis(char *dst, char *src);
"""
src = bytes(src, 'utf-8')
dst_p = c_char_p(bytes(len(src)))
src_p = c_char_p(src)
bytes_written = _strunvis(dst_p, src_p)
if -1 == bytes_written:
raise RuntimeError('unvis failed to decode string "{}"'.format(src))
return dst_p.value.decode('utf-8')
| mit | -4,698,864,931,030,523,000 | 26.206349 | 76 | 0.599767 | false |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/python/util/deprecation_test.py | 17 | 28084 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensor_util tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
class DeprecationTest(test.TestCase):
def _assert_subset(self, expected_subset, actual_set):
self.assertTrue(
actual_set.issuperset(expected_subset),
msg="%s is not a superset of %s." % (actual_set, expected_subset))
def test_deprecated_illegal_args(self):
instructions = "This is how you update..."
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated("", instructions)
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated("07-04-2016", instructions)
date = "2016-07-04"
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated(date, None)
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated(date, "")
@test.mock.patch.object(logging, "warning", autospec=True)
def test_no_date(self, mock_warning):
date = None
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed in a future version."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\nReturns:"
"\n Sum of args." % instructions, _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(
args[0], r"deprecated and will be removed")
self._assert_subset(set(["in a future version", instructions]),
set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions), _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
"""fn doc."""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"DEPRECATED FUNCTION"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions), _fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions),
getattr(_Object, "_fn").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _Object()._fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
"""fn doc."""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions),
getattr(_Object, "_fn").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _Object()._fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"DEPRECATED FUNCTION"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions), getattr(_Object, "_fn").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _Object()._fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
def test_prop_wrong_order(self):
with self.assertRaisesRegexp(
ValueError,
"make sure @property appears before @deprecated in your source code"):
# pylint: disable=unused-variable
class _Object(object):
def __init(self):
pass
@deprecation.deprecated("2016-07-04", "Instructions.")
@property
def _prop(self):
return "prop_wrong_order"
@test.mock.patch.object(logging, "warning", autospec=True)
def test_prop_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@property
@deprecation.deprecated(date, instructions)
def _prop(self):
"""prop doc.
Returns:
String.
"""
return "prop_with_doc"
# Assert function docs are properly updated.
self.assertEqual(
"prop doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s"
"\n"
"\nReturns:"
"\n String." % (date, instructions), getattr(_Object, "_prop").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual("prop_with_doc", _Object()._prop)
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_prop_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@property
@deprecation.deprecated(date, instructions)
def _prop(self):
return "prop_no_doc"
# Assert function docs are properly updated.
self.assertEqual(
"DEPRECATED FUNCTION"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions), getattr(_Object, "_prop").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual("prop_no_doc", _Object()._prop)
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
class DeprecatedArgsTest(test.TestCase):
def _assert_subset(self, expected_subset, actual_set):
self.assertTrue(
actual_set.issuperset(expected_subset),
msg="%s is not a superset of %s." % (actual_set, expected_subset))
def test_deprecated_illegal_args(self):
instructions = "This is how you update..."
date = "2016-07-04"
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated_args("", instructions, "deprecated")
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated_args("07-04-2016", instructions, "deprecated")
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_args(date, None, "deprecated")
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_args(date, "", "deprecated")
with self.assertRaisesRegexp(ValueError, "argument"):
deprecation.deprecated_args(date, instructions)
def test_deprecated_missing_args(self):
date = "2016-07-04"
instructions = "This is how you update..."
def _fn(arg0, arg1, deprecated=None):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert calls without the deprecated argument log nothing.
with self.assertRaisesRegexp(ValueError, "not present.*\\['missing'\\]"):
deprecation.deprecated_args(date, instructions, "missing")(_fn)
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, deprecated=True):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
deprecated: Deprecated!
Returns:
Sum of args.
"""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n deprecated: Deprecated!"
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions), _fn.__doc__)
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, deprecated=True):
"""fn doc."""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__)
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, deprecated=True):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"DEPRECATED FUNCTION ARGUMENTS"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions), _fn.__doc__)
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_varargs(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, *deprecated):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True, False))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_kwargs(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, **deprecated):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, a=True, b=False))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_positional_and_named(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "d1", "d2")
def _fn(arg0, d1=None, arg1=2, d2=None):
return arg0 + arg1 if d1 else arg1 + arg0 if d2 else arg0 * arg1
# Assert calls without the deprecated arguments log nothing.
self.assertEqual(2, _fn(1, arg1=2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated arguments log warnings.
self.assertEqual(2, _fn(1, None, 2, d2=False))
self.assertEqual(2, mock_warning.call_count)
(args1, _) = mock_warning.call_args_list[0]
self.assertRegexpMatches(args1[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions, "d1"]),
set(args1[1:]))
(args2, _) = mock_warning.call_args_list[1]
self.assertRegexpMatches(args2[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions, "d2"]),
set(args2[1:]))
@test.mock.patch.object(logging, "warning", autospec=True)
def test_positional_and_named_with_ok_vals(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, ("d1", None),
("d2", "my_ok_val"))
def _fn(arg0, d1=None, arg1=2, d2=None):
return arg0 + arg1 if d1 else arg1 + arg0 if d2 else arg0 * arg1
# Assert calls without the deprecated arguments log nothing.
self.assertEqual(2, _fn(1, arg1=2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated arguments log warnings.
self.assertEqual(2, _fn(1, False, 2, d2=False))
self.assertEqual(2, mock_warning.call_count)
(args1, _) = mock_warning.call_args_list[0]
self.assertRegexpMatches(args1[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions, "d1"]),
set(args1[1:]))
(args2, _) = mock_warning.call_args_list[1]
self.assertRegexpMatches(args2[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions, "d2"]),
set(args2[1:]))
# Assert calls with the deprecated arguments dont log warnings if
# the value matches the 'ok_val'.
mock_warning.reset_mock()
self.assertEqual(3, _fn(1, None, 2, d2="my_ok_val"))
self.assertEqual(0, mock_warning.call_count)
class DeprecatedArgValuesTest(test.TestCase):
def _assert_subset(self, expected_subset, actual_set):
self.assertTrue(
actual_set.issuperset(expected_subset),
msg="%s is not a superset of %s." % (actual_set, expected_subset))
def test_deprecated_illegal_args(self):
instructions = "This is how you update..."
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated_arg_values("", instructions, deprecated=True)
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated_arg_values(
"07-04-2016", instructions, deprecated=True)
date = "2016-07-04"
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_arg_values(date, None, deprecated=True)
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_arg_values(date, "", deprecated=True)
with self.assertRaisesRegexp(ValueError, "argument", deprecated=True):
deprecation.deprecated_arg_values(date, instructions)
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, deprecated=True)
def _fn(arg0, arg1, deprecated=True):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
deprecated: Deprecated!
Returns:
Sum of args.
"""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\nArgs:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n deprecated: Deprecated!"
"\n"
"\nReturns:"
"\n Sum of args." % (date, instructions), _fn.__doc__)
# Assert calling new fn with non-deprecated value logs nothing.
self.assertEqual(3, _fn(1, 2, deprecated=False))
self.assertEqual(0, mock_warning.call_count)
# Assert calling new fn with deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2, deprecated=True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
# Assert calling new fn with default deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(2, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, deprecated=True)
def _fn(arg0, arg1, deprecated=True):
"""fn doc."""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__)
# Assert calling new fn with non-deprecated value logs nothing.
self.assertEqual(3, _fn(1, 2, deprecated=False))
self.assertEqual(0, mock_warning.call_count)
# Assert calling new fn with deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2, deprecated=True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
# Assert calling new fn with default deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(2, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, deprecated=True)
def _fn(arg0, arg1, deprecated=True):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"DEPRECATED FUNCTION ARGUMENTS"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions), _fn.__doc__)
# Assert calling new fn with non-deprecated value logs nothing.
self.assertEqual(3, _fn(1, 2, deprecated=False))
self.assertEqual(0, mock_warning.call_count)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2, deprecated=True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed")
self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
# Assert calling new fn with default deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(2, mock_warning.call_count)
class DeprecationArgumentsTest(test.TestCase):
def testDeprecatedArgumentLookup(self):
good_value = 3
self.assertEqual(
deprecation.deprecated_argument_lookup("val_new", good_value, "val_old",
None), good_value)
self.assertEqual(
deprecation.deprecated_argument_lookup("val_new", None, "val_old",
good_value), good_value)
with self.assertRaisesRegexp(ValueError,
"Cannot specify both 'val_old' and 'val_new'"):
self.assertEqual(
deprecation.deprecated_argument_lookup("val_new", good_value,
"val_old", good_value),
good_value)
def testRewriteArgumentDocstring(self):
docs = """Add `a` and `b`
Args:
a: first arg
b: second arg
"""
new_docs = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(docs, "a", "left"), "b", "right")
new_docs_ref = """Add `left` and `right`
Args:
left: first arg
right: second arg
"""
self.assertEqual(new_docs, new_docs_ref)
if __name__ == "__main__":
test.main()
| mit | 2,090,367,151,633,026,000 | 35.284238 | 80 | 0.636021 | false |
kaltsimon/youtube-dl | youtube_dl/extractor/myspass.py | 105 | 2721 | from __future__ import unicode_literals
import os.path
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
)
class MySpassIE(InfoExtractor):
_VALID_URL = r'http://www\.myspass\.de/.*'
_TEST = {
'url': 'http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/',
'md5': '0b49f4844a068f8b33f4b7c88405862b',
'info_dict': {
'id': '11741',
'ext': 'mp4',
"description": "Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?",
"title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2",
},
}
def _real_extract(self, url):
META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s'
# video id is the last path element of the URL
# usually there is a trailing slash, so also try the second but last
url_path = compat_urllib_parse_urlparse(url).path
url_parent_path, video_id = os.path.split(url_path)
if not video_id:
_, video_id = os.path.split(url_parent_path)
# get metadata
metadata_url = META_DATA_URL_TEMPLATE % video_id
metadata = self._download_xml(
metadata_url, video_id, transform_source=lambda s: s.strip())
# extract values from metadata
url_flv_el = metadata.find('url_flv')
if url_flv_el is None:
raise ExtractorError('Unable to extract download url')
video_url = url_flv_el.text
title_el = metadata.find('title')
if title_el is None:
raise ExtractorError('Unable to extract title')
title = title_el.text
format_id_el = metadata.find('format_id')
if format_id_el is None:
format = 'mp4'
else:
format = format_id_el.text
description_el = metadata.find('description')
if description_el is not None:
description = description_el.text
else:
description = None
imagePreview_el = metadata.find('imagePreview')
if imagePreview_el is not None:
thumbnail = imagePreview_el.text
else:
thumbnail = None
return {
'id': video_id,
'url': video_url,
'title': title,
'format': format,
'thumbnail': thumbnail,
'description': description,
}
| unlicense | 2,858,924,838,063,547,000 | 36.273973 | 255 | 0.604925 | false |
klenks/jobsportal | venv/lib/python2.7/site-packages/debug_toolbar/panels/templates/views.py | 3 | 2341 | from __future__ import absolute_import, unicode_literals
from django.http import HttpResponseBadRequest
from django.shortcuts import render_to_response
from django.template import TemplateDoesNotExist
from django.template.engine import Engine
from django.utils.safestring import mark_safe
try:
from django.template import Origin
except ImportError:
Origin = None
def template_source(request):
"""
Return the source of a template, syntax-highlighted by Pygments if
it's available.
"""
template_origin_name = request.GET.get('template_origin', None)
if template_origin_name is None:
return HttpResponseBadRequest('"template_origin" key is required')
template_name = request.GET.get('template', template_origin_name)
final_loaders = []
loaders = Engine.get_default().template_loaders
for loader in loaders:
if loader is not None:
# When the loader has loaders associated with it,
# append those loaders to the list. This occurs with
# django.template.loaders.cached.Loader
if hasattr(loader, 'loaders'):
final_loaders += loader.loaders
else:
final_loaders.append(loader)
for loader in final_loaders:
if Origin: # django>=1.9
origin = Origin(template_origin_name)
try:
source = loader.get_contents(origin)
break
except TemplateDoesNotExist:
pass
else: # django<1.9
try:
source, _ = loader.load_template_source(template_name)
break
except TemplateDoesNotExist:
pass
else:
source = "Template Does Not Exist: %s" % (template_origin_name,)
try:
from pygments import highlight
from pygments.lexers import HtmlDjangoLexer
from pygments.formatters import HtmlFormatter
source = highlight(source, HtmlDjangoLexer(), HtmlFormatter())
source = mark_safe(source)
source.pygmentized = True
except ImportError:
pass
# Using render_to_response avoids running global context processors.
return render_to_response('debug_toolbar/panels/template_source.html', {
'source': source,
'template_name': template_name
})
| mit | 6,540,948,073,250,825,000 | 32.442857 | 76 | 0.641179 | false |
Eksmo/calibre | src/calibre/ebooks/oeb/transforms/jacket.py | 1 | 8466 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import sys
from xml.sax.saxutils import escape
from lxml import etree
from calibre import guess_type, strftime
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.ebooks.oeb.base import XPath, XHTML_NS, XHTML, xml2text, urldefrag
from calibre.library.comments import comments_to_html
from calibre.utils.date import is_date_undefined
from calibre.ebooks.chardet import strip_encoding_declarations
JACKET_XPATH = '//h:meta[@name="calibre-content" and @content="jacket"]'
class Jacket(object):
'''
Book jacket manipulation. Remove first image and insert comments at start of
book.
'''
def remove_images(self, item, limit=1):
path = XPath('//h:img[@src]')
removed = 0
for img in path(item.data):
if removed >= limit:
break
href = item.abshref(img.get('src'))
image = self.oeb.manifest.hrefs.get(href, None)
if image is not None:
self.oeb.manifest.remove(image)
img.getparent().remove(img)
removed += 1
return removed
def remove_first_image(self):
deleted_item = None
for item in self.oeb.spine:
removed = self.remove_images(item)
if removed > 0:
self.log('Removed first image')
body = XPath('//h:body')(item.data)
if body:
raw = xml2text(body[0]).strip()
imgs = XPath('//h:img|//svg:svg')(item.data)
if not raw and not imgs:
self.log('Removing %s as it has no content'%item.href)
self.oeb.manifest.remove(item)
deleted_item = item
break
if deleted_item is not None:
for item in list(self.oeb.toc):
href = urldefrag(item.href)[0]
if href == deleted_item.href:
self.oeb.toc.remove(item)
def insert_metadata(self, mi):
self.log('Inserting metadata into book...')
try:
tags = map(unicode, self.oeb.metadata.subject)
except:
tags = []
try:
comments = unicode(self.oeb.metadata.description[0])
except:
comments = ''
try:
title = unicode(self.oeb.metadata.title[0])
except:
title = _('Unknown')
root = render_jacket(mi, self.opts.output_profile,
alt_title=title, alt_tags=tags,
alt_comments=comments)
id, href = self.oeb.manifest.generate('calibre_jacket', 'jacket.xhtml')
item = self.oeb.manifest.add(id, href, guess_type(href)[0], data=root)
self.oeb.spine.insert(0, item, True)
self.oeb.inserted_metadata_jacket = item
def remove_existing_jacket(self):
for x in self.oeb.spine[:4]:
if XPath(JACKET_XPATH)(x.data):
self.remove_images(x, limit=sys.maxint)
self.oeb.manifest.remove(x)
self.log('Removed existing jacket')
break
def __call__(self, oeb, opts, metadata):
'''
Add metadata in jacket.xhtml if specified in opts
If not specified, remove previous jacket instance
'''
self.oeb, self.opts, self.log = oeb, opts, oeb.log
self.remove_existing_jacket()
if opts.remove_first_image:
self.remove_first_image()
if opts.insert_metadata:
self.insert_metadata(metadata)
# Render Jacket {{{
def get_rating(rating, rchar, e_rchar):
ans = ''
try:
num = float(rating)/2
except:
return ans
num = max(0, num)
num = min(num, 5)
if num < 1:
return ans
ans = ("%s%s") % (rchar * int(num), e_rchar * (5 - int(num)))
return ans
def render_jacket(mi, output_profile,
alt_title=_('Unknown'), alt_tags=[], alt_comments='',
alt_publisher=('')):
css = P('jacket/stylesheet.css', data=True).decode('utf-8')
try:
title_str = mi.title if mi.title else alt_title
except:
title_str = _('Unknown')
title = '<span class="title">%s</span>' % (escape(title_str))
series = escape(mi.series if mi.series else '')
if mi.series and mi.series_index is not None:
series += escape(' [%s]'%mi.format_series_index())
if not mi.series:
series = ''
try:
publisher = mi.publisher if mi.publisher else alt_publisher
except:
publisher = ''
try:
if is_date_undefined(mi.pubdate):
pubdate = ''
else:
pubdate = strftime(u'%Y', mi.pubdate.timetuple())
except:
pubdate = ''
rating = get_rating(mi.rating, output_profile.ratings_char, output_profile.empty_ratings_char)
tags = mi.tags if mi.tags else alt_tags
if tags:
tags = output_profile.tags_to_string(tags)
else:
tags = ''
comments = mi.comments if mi.comments else alt_comments
comments = comments.strip()
orig_comments = comments
if comments:
comments = comments_to_html(comments)
try:
author = mi.format_authors()
except:
author = ''
def generate_html(comments):
args = dict(xmlns=XHTML_NS,
title_str=title_str,
css=css,
title=title,
author=author,
publisher=publisher,
pubdate_label=_('Published'), pubdate=pubdate,
series_label=_('Series'), series=series,
rating_label=_('Rating'), rating=rating,
tags_label=_('Tags'), tags=tags,
comments=comments,
footer=''
)
for key in mi.custom_field_keys():
try:
display_name, val = mi.format_field_extended(key)[:2]
key = key.replace('#', '_')
args[key] = escape(val)
args[key+'_label'] = escape(display_name)
except:
pass
# Used in the comment describing use of custom columns in templates
args['_genre_label'] = args.get('_genre_label', '{_genre_label}')
args['_genre'] = args.get('_genre', '{_genre}')
generated_html = P('jacket/template.xhtml',
data=True).decode('utf-8').format(**args)
# Post-process the generated html to strip out empty header items
soup = BeautifulSoup(generated_html)
if not series:
series_tag = soup.find(attrs={'class':'cbj_series'})
if series_tag is not None:
series_tag.extract()
if not rating:
rating_tag = soup.find(attrs={'class':'cbj_rating'})
if rating_tag is not None:
rating_tag.extract()
if not tags:
tags_tag = soup.find(attrs={'class':'cbj_tags'})
if tags_tag is not None:
tags_tag.extract()
if not pubdate:
pubdate_tag = soup.find(attrs={'class':'cbj_pubdata'})
if pubdate_tag is not None:
pubdate_tag.extract()
if output_profile.short_name != 'kindle':
hr_tag = soup.find('hr', attrs={'class':'cbj_kindle_banner_hr'})
if hr_tag is not None:
hr_tag.extract()
return strip_encoding_declarations(
soup.renderContents('utf-8').decode('utf-8'))
from calibre.ebooks.oeb.base import RECOVER_PARSER
try:
root = etree.fromstring(generate_html(comments), parser=RECOVER_PARSER)
except:
try:
root = etree.fromstring(generate_html(escape(orig_comments)),
parser=RECOVER_PARSER)
except:
root = etree.fromstring(generate_html(''),
parser=RECOVER_PARSER)
return root
# }}}
def linearize_jacket(oeb):
for x in oeb.spine[:4]:
if XPath(JACKET_XPATH)(x.data):
for e in XPath('//h:table|//h:tr|//h:th')(x.data):
e.tag = XHTML('div')
for e in XPath('//h:td')(x.data):
e.tag = XHTML('span')
break
| gpl-3.0 | -1,363,386,381,183,059,000 | 32.070313 | 98 | 0.546421 | false |
araines/moto | tests/test_s3/test_s3_lifecycle.py | 18 | 3339 | from __future__ import unicode_literals
import boto
from boto.exception import S3ResponseError
from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule
import sure # noqa
from moto import mock_s3
@mock_s3
def test_lifecycle_create():
conn = boto.s3.connect_to_region("us-west-1")
bucket = conn.create_bucket("foobar")
lifecycle = Lifecycle()
lifecycle.add_rule('myid', '', 'Enabled', 30)
bucket.configure_lifecycle(lifecycle)
response = bucket.get_lifecycle_config()
len(response).should.equal(1)
lifecycle = response[0]
lifecycle.id.should.equal('myid')
lifecycle.prefix.should.equal('')
lifecycle.status.should.equal('Enabled')
lifecycle.transition.should.equal(None)
@mock_s3
def test_lifecycle_with_glacier_transition():
conn = boto.s3.connect_to_region("us-west-1")
bucket = conn.create_bucket("foobar")
lifecycle = Lifecycle()
transition = Transition(days=30, storage_class='GLACIER')
rule = Rule('myid', prefix='', status='Enabled', expiration=None,
transition=transition)
lifecycle.append(rule)
bucket.configure_lifecycle(lifecycle)
response = bucket.get_lifecycle_config()
transition = response[0].transition
transition.days.should.equal(30)
transition.storage_class.should.equal('GLACIER')
transition.date.should.equal(None)
@mock_s3
def test_lifecycle_multi():
conn = boto.s3.connect_to_region("us-west-1")
bucket = conn.create_bucket("foobar")
date = '2022-10-12T00:00:00.000Z'
sc = 'GLACIER'
lifecycle = Lifecycle()
lifecycle.add_rule("1", "1/", "Enabled", 1)
lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2))
lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date))
lifecycle.add_rule("4", "4/", "Enabled", None,
Transition(days=4, storage_class=sc))
lifecycle.add_rule("5", "5/", "Enabled", None,
Transition(date=date, storage_class=sc))
bucket.configure_lifecycle(lifecycle)
# read the lifecycle back
rules = bucket.get_lifecycle_config()
for rule in rules:
if rule.id == "1":
rule.prefix.should.equal("1/")
rule.expiration.days.should.equal(1)
elif rule.id == "2":
rule.prefix.should.equal("2/")
rule.expiration.days.should.equal(2)
elif rule.id == "3":
rule.prefix.should.equal("3/")
rule.expiration.date.should.equal(date)
elif rule.id == "4":
rule.prefix.should.equal("4/")
rule.transition.days.should.equal(4)
rule.transition.storage_class.should.equal(sc)
elif rule.id == "5":
rule.prefix.should.equal("5/")
rule.transition.date.should.equal(date)
rule.transition.storage_class.should.equal(sc)
else:
assert False, "Invalid rule id"
@mock_s3
def test_lifecycle_delete():
conn = boto.s3.connect_to_region("us-west-1")
bucket = conn.create_bucket("foobar")
lifecycle = Lifecycle()
lifecycle.add_rule(expiration=30)
bucket.configure_lifecycle(lifecycle)
response = bucket.get_lifecycle_config()
response.should.have.length_of(1)
bucket.delete_lifecycle_configuration()
bucket.get_lifecycle_config.when.called_with().should.throw(S3ResponseError)
| apache-2.0 | -8,541,337,628,694,407,000 | 32.059406 | 80 | 0.65319 | false |
bourneagain/pythonBytes | quickSort.py | 1 | 1075 | œœdef kthlargest(arr1, arr2, k):
if len(arr1) == 0:
return arr2[k]
elif len(arr2) == 0:
return arr1[k]
mida1 = len(arr1)/2
mida2 = len(arr2)/2
if mida1+mida2<k:
if arr1[mida1]>arr2[mida2]:
return kthlargest(arr1, arr2[mida2+1:], k-mida2-1)
else:
return kthlargest(arr1[mida1+1:], arr2, k-mida1-1)
else:
if arr1[mida1]>arr2[mida2]:
return kthlargest(arr1[:mida1], arr2, k)
else:
return kthlargest(arr1, arr2[:mida2], k)
# def quickSort(arr):
# less = []
# pivotList = []
# more = []
# if len(arr) <= 1:
# return arr
# else:
# pivot = arr[0]
# for i in arr:
# if i < pivot:
# less.append(i)
# elif i > pivot:
# more.append(i)
# else:
# pivotList.append(i)
# less = quickSort(less)
# more = quickSort(more)
# return less + pivotList + more
# a = [4, 65, 2, -31, 0, 99, 83, 782, 1]
# a = quickSort(a)
| mit | 2,610,790,294,793,926,700 | 25.825 | 62 | 0.474371 | false |
stackforge/networking-nec | networking_nec/nwa/common/config.py | 2 | 4009 | # Copyright 2015-2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.agent.common import config
from oslo_config import cfg
from networking_nec._i18n import _
agent_opts = [
cfg.IntOpt('polling_interval', default=2,
help=_("The number of seconds the agent will wait between "
"polling for local device changes.")),
]
cfg.CONF.register_opts(agent_opts, "AGENT")
config.register_agent_state_opts_helper(cfg.CONF)
# nwa.ini
NWA_opts = [
cfg.StrOpt('server_url',
help=_("URL for NWA REST API.")),
cfg.StrOpt('access_key_id',
help=_("Access ID for NWA REST API.")),
cfg.StrOpt('secret_access_key',
help=_("Secret key for NWA REST API.")),
cfg.StrOpt('resource_group_name',
help=_(
"Resouce Group Name specified at creating tenant NW.")),
cfg.StrOpt('region_name',
help=_("RegionName for DC."),
default='RegionOne'),
cfg.IntOpt('scenario_polling_first_timer', default=2,
help=_("Timer value for the first scenario status polling.")),
cfg.IntOpt('scenario_polling_timer', default=10,
help=_("Timer value for polling scenario status.")),
cfg.IntOpt('scenario_polling_count', default=6,
help=_("Count value for polling scenario status.")),
cfg.BoolOpt('use_necnwa_router',
help=_("Using necnwa_router instead of the l3-router"),
default=True),
cfg.BoolOpt('use_neutron_vlan_id',
help=_("Using vlan id of neutron instead of NWA"),
default=False),
cfg.StrOpt('ironic_az_prefix',
help=_("The prefix name of device_owner used in ironic"),
default='BM_'),
cfg.BoolOpt('use_setting_fw_policy',
default=False,
help=_('Using setting_fw_policy as default')),
cfg.StrOpt('resource_group_file',
help=_("JSON file which defines relations between "
"physical network of OpenStack and NWA.")),
cfg.StrOpt('resource_group',
deprecated_for_removal=True,
deprecated_reason='In favor of resource_group_file option.',
help=_("""
Relations between physical network of OpenStack and NWA.
ex)
[
{
"physical_network": "physnet1",
"ResourceGroupName":"Core/Hypervisor/HV-RG01"
},
{ ... },
]""")),
cfg.StrOpt('lbaas_driver',
help=_("LBaaS Driver Name")),
cfg.StrOpt('fwaas_driver',
help=_("Firewall Driver Name")),
]
Scenario_opts = [
cfg.StrOpt('CreateTenantFW',
help=_("Scenario ID for the scenario CreateTenantFW.")),
cfg.StrOpt('CreateTenantNW',
help=_("Scenario ID for the scenario CreateTenantNW.")),
cfg.StrOpt('CreateVLAN',
help=_("Scenario ID for the scenario CreateVLAN.")),
cfg.StrOpt('CreateGeneralDev',
help=_(
"Scenario ID for the scenario CreateGeneralDev.")),
cfg.StrOpt('UpdateTenantFW',
help=_("Scenario ID for the scenario UpdateTenantFW.")),
cfg.StrOpt('SettingNAT',
help=_("Scenario ID for the scenario SettingNAT.")),
]
cfg.CONF.register_opts(NWA_opts, "NWA")
cfg.CONF.register_opts(Scenario_opts, "Scenario")
| apache-2.0 | -5,495,047,114,034,500,000 | 39.09 | 78 | 0.596408 | false |
ergoregion/Rota-Program | Rota_System/UI/People/widget_population.py | 1 | 1876 | __author__ = 'Neil Butcher'
from PyQt4 import QtCore, QtGui
from model_population import PopulationModel
from Rota_System.UI.widget_addDel_list import AddDelListWidget
from widget_person import PersonWidget
class PopulationWidget(QtGui.QWidget):
commandIssued = QtCore.pyqtSignal(QtGui.QUndoCommand)
criticalCommandIssued = QtCore.pyqtSignal()
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.layout = QtGui.QHBoxLayout(self)
self.list_widget = AddDelListWidget(self)
self.layout.addWidget(self.list_widget)
self.person_widget = PersonWidget(self)
self.layout.addWidget(self.person_widget)
self.list_widget.objectSelected.connect(self.person_widget.person)
self.person_widget.commandIssued.connect(self.emitCommand)
self.person_widget.criticalCommandIssued.connect(self.emitCriticalCommand)
def institution(self, institution):
model = PopulationModel(institution)
self.setModel(model)
return model
def setModel(self, model):
self.list_widget.setModel(model)
model.commandIssued.connect(self.emitCommand)
model.criticalCommandIssued.connect(self.emitCriticalCommand)
@QtCore.pyqtSlot(QtGui.QUndoCommand)
def emitCommand(self, command):
self.commandIssued.emit(command)
@QtCore.pyqtSlot()
def emitCriticalCommand(self):
self.criticalCommandIssued.emit()
from Rota_System.UI.model_undo import MasterUndoModel
import sys
from Rota_System.Institution import Institution
def main():
i = Institution(None)
model = PopulationModel(i)
m = MasterUndoModel()
app = QtGui.QApplication(sys.argv)
w = PopulationWidget(None)
m.add_command_contributer(w)
w.setModel(model)
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| mit | 4,877,304,131,031,213,000 | 28.777778 | 82 | 0.710021 | false |
williamfeng323/py-web | flask/lib/python3.6/site-packages/sqlalchemy_utils/listeners.py | 2 | 7485 | import sqlalchemy as sa
from .exceptions import ImproperlyConfigured
def coercion_listener(mapper, class_):
"""
Auto assigns coercing listener for all class properties which are of coerce
capable type.
"""
for prop in mapper.iterate_properties:
try:
listener = prop.columns[0].type.coercion_listener
except AttributeError:
continue
sa.event.listen(
getattr(class_, prop.key),
'set',
listener,
retval=True
)
def instant_defaults_listener(target, args, kwargs):
for key, column in sa.inspect(target.__class__).columns.items():
if hasattr(column, 'default') and column.default is not None:
if callable(column.default.arg):
setattr(target, key, column.default.arg(target))
else:
setattr(target, key, column.default.arg)
def force_auto_coercion(mapper=None):
"""
Function that assigns automatic data type coercion for all classes which
are of type of given mapper. The coercion is applied to all coercion
capable properties. By default coercion is applied to all SQLAlchemy
mappers.
Before initializing your models you need to call force_auto_coercion.
::
from sqlalchemy_utils import force_auto_coercion
force_auto_coercion()
Then define your models the usual way::
class Document(Base):
__tablename__ = 'document'
id = sa.Column(sa.Integer, autoincrement=True)
name = sa.Column(sa.Unicode(50))
background_color = sa.Column(ColorType)
Now scalar values for coercion capable data types will convert to
appropriate value objects::
document = Document()
document.background_color = 'F5F5F5'
document.background_color # Color object
session.commit()
A useful side-effect of this is that additional validation of data will be
done on the moment it is being assigned to model objects. For example
without auto coerction set, an invalid
:class:`sqlalchemy_utils.types.IPAddressType` (eg. ``10.0.0 255.255``)
would get through without an exception being raised. The database wouldn't
notice this (as most databases don't have a native type for an IP address,
so they're usually just stored as a string), and the ``ipaddress/ipaddr``
package uses a string field as well.
:param mapper: The mapper which the automatic data type coercion should be
applied to
"""
if mapper is None:
mapper = sa.orm.mapper
sa.event.listen(mapper, 'mapper_configured', coercion_listener)
def force_instant_defaults(mapper=None):
"""
Function that assigns object column defaults on object initialization
time. By default calling this function applies instant defaults to all
your models.
Setting up instant defaults::
from sqlalchemy_utils import force_instant_defaults
force_instant_defaults()
Example usage::
class Document(Base):
__tablename__ = 'document'
id = sa.Column(sa.Integer, autoincrement=True)
name = sa.Column(sa.Unicode(50))
created_at = sa.Column(sa.DateTime, default=datetime.now)
document = Document()
document.created_at # datetime object
:param mapper: The mapper which the automatic instant defaults forcing
should be applied to
"""
if mapper is None:
mapper = sa.orm.mapper
sa.event.listen(mapper, 'init', instant_defaults_listener)
def auto_delete_orphans(attr):
"""
Delete orphans for given SQLAlchemy model attribute. This function can be
used for deleting many-to-many associated orphans easily. For more
information see
https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes/ManyToManyOrphan.
Consider the following model definition:
::
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import event
Base = declarative_base()
tagging = Table(
'tagging',
Base.metadata,
Column(
'tag_id',
Integer,
ForeignKey('tag.id', ondelete='CASCADE'),
primary_key=True
),
Column(
'entry_id',
Integer,
ForeignKey('entry.id', ondelete='CASCADE'),
primary_key=True
)
)
class Tag(Base):
__tablename__ = 'tag'
id = Column(Integer, primary_key=True)
name = Column(String(100), unique=True, nullable=False)
def __init__(self, name=None):
self.name = name
class Entry(Base):
__tablename__ = 'entry'
id = Column(Integer, primary_key=True)
tags = relationship(
'Tag',
secondary=tagging,
backref='entries'
)
Now lets say we want to delete the tags if all their parents get deleted (
all Entry objects get deleted). This can be achieved as follows:
::
from sqlalchemy_utils import auto_delete_orphans
auto_delete_orphans(Entry.tags)
After we've set up this listener we can see it in action.
::
e = create_engine('sqlite://')
Base.metadata.create_all(e)
s = Session(e)
r1 = Entry()
r2 = Entry()
r3 = Entry()
t1, t2, t3, t4 = Tag('t1'), Tag('t2'), Tag('t3'), Tag('t4')
r1.tags.extend([t1, t2])
r2.tags.extend([t2, t3])
r3.tags.extend([t4])
s.add_all([r1, r2, r3])
assert s.query(Tag).count() == 4
r2.tags.remove(t2)
assert s.query(Tag).count() == 4
r1.tags.remove(t2)
assert s.query(Tag).count() == 3
r1.tags.remove(t1)
assert s.query(Tag).count() == 2
.. versionadded: 0.26.4
:param attr: Association relationship attribute to auto delete orphans from
"""
parent_class = attr.parent.class_
target_class = attr.property.mapper.class_
backref = attr.property.backref
if not backref:
raise ImproperlyConfigured(
'The relationship argument given for auto_delete_orphans needs to '
'have a backref relationship set.'
)
if isinstance(backref, tuple):
backref = backref[0]
@sa.event.listens_for(sa.orm.Session, 'after_flush')
def delete_orphan_listener(session, ctx):
# Look through Session state to see if we want to emit a DELETE for
# orphans
orphans_found = (
any(
isinstance(obj, parent_class) and
sa.orm.attributes.get_history(obj, attr.key).deleted
for obj in session.dirty
) or
any(
isinstance(obj, parent_class)
for obj in session.deleted
)
)
if orphans_found:
# Emit a DELETE for all orphans
(
session.query(target_class)
.filter(
~getattr(target_class, backref).any()
)
.delete(synchronize_session=False)
)
| mit | 6,640,016,475,307,439,000 | 27.245283 | 79 | 0.593721 | false |
BT-astauder/odoo | addons/web_analytics/__openerp__.py | 305 | 1432 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Google Analytics',
'version': '1.0',
'category': 'Tools',
'complexity': "easy",
'description': """
Google Analytics.
=================
Collects web application usage with Google Analytics.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/website-builder',
'depends': ['web'],
'data': [
'views/web_analytics.xml',
],
'installable': True,
'active': False,
}
| agpl-3.0 | 444,133,330,025,892,800 | 33.926829 | 78 | 0.581006 | false |
brianwoo/django-tutorial | ENV/lib/python2.7/site-packages/pip/_vendor/lockfile/__init__.py | 475 | 9162 | """
lockfile.py - Platform-independent advisory file locks.
Requires Python 2.5 unless you apply 2.4.diff
Locking is done on a per-thread basis instead of a per-process basis.
Usage:
>>> lock = LockFile('somefile')
>>> try:
... lock.acquire()
... except AlreadyLocked:
... print 'somefile', 'is locked already.'
... except LockFailed:
... print 'somefile', 'can\\'t be locked.'
... else:
... print 'got lock'
got lock
>>> print lock.is_locked()
True
>>> lock.release()
>>> lock = LockFile('somefile')
>>> print lock.is_locked()
False
>>> with lock:
... print lock.is_locked()
True
>>> print lock.is_locked()
False
>>> lock = LockFile('somefile')
>>> # It is okay to lock twice from the same thread...
>>> with lock:
... lock.acquire()
...
>>> # Though no counter is kept, so you can't unlock multiple times...
>>> print lock.is_locked()
False
Exceptions:
Error - base class for other exceptions
LockError - base class for all locking exceptions
AlreadyLocked - Another thread or process already holds the lock
LockFailed - Lock failed for some other reason
UnlockError - base class for all unlocking exceptions
AlreadyUnlocked - File was not locked.
NotMyLock - File was locked but not by the current thread/process
"""
from __future__ import absolute_import
import sys
import socket
import os
import threading
import time
import urllib
import warnings
import functools
# Work with PEP8 and non-PEP8 versions of threading module.
if not hasattr(threading, "current_thread"):
threading.current_thread = threading.currentThread
if not hasattr(threading.Thread, "get_name"):
threading.Thread.get_name = threading.Thread.getName
__all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked',
'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock',
'LinkLockFile', 'MkdirLockFile', 'SQLiteLockFile',
'LockBase', 'locked']
class Error(Exception):
"""
Base class for other exceptions.
>>> try:
... raise Error
... except Exception:
... pass
"""
pass
class LockError(Error):
"""
Base class for error arising from attempts to acquire the lock.
>>> try:
... raise LockError
... except Error:
... pass
"""
pass
class LockTimeout(LockError):
"""Raised when lock creation fails within a user-defined period of time.
>>> try:
... raise LockTimeout
... except LockError:
... pass
"""
pass
class AlreadyLocked(LockError):
"""Some other thread/process is locking the file.
>>> try:
... raise AlreadyLocked
... except LockError:
... pass
"""
pass
class LockFailed(LockError):
"""Lock file creation failed for some other reason.
>>> try:
... raise LockFailed
... except LockError:
... pass
"""
pass
class UnlockError(Error):
"""
Base class for errors arising from attempts to release the lock.
>>> try:
... raise UnlockError
... except Error:
... pass
"""
pass
class NotLocked(UnlockError):
"""Raised when an attempt is made to unlock an unlocked file.
>>> try:
... raise NotLocked
... except UnlockError:
... pass
"""
pass
class NotMyLock(UnlockError):
"""Raised when an attempt is made to unlock a file someone else locked.
>>> try:
... raise NotMyLock
... except UnlockError:
... pass
"""
pass
class LockBase:
"""Base class for platform-specific lock classes."""
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = LockBase('somefile')
>>> lock = LockBase('somefile', threaded=False)
"""
self.path = path
self.lock_file = os.path.abspath(path) + ".lock"
self.hostname = socket.gethostname()
self.pid = os.getpid()
if threaded:
t = threading.current_thread()
# Thread objects in Python 2.4 and earlier do not have ident
# attrs. Worm around that.
ident = getattr(t, "ident", hash(t))
self.tname = "-%x" % (ident & 0xffffffff)
else:
self.tname = ""
dirname = os.path.dirname(self.lock_file)
# unique name is mostly about the current process, but must
# also contain the path -- otherwise, two adjacent locked
# files conflict (one file gets locked, creating lock-file and
# unique file, the other one gets locked, creating lock-file
# and overwriting the already existing lock-file, then one
# gets unlocked, deleting both lock-file and unique file,
# finally the last lock errors out upon releasing.
self.unique_name = os.path.join(dirname,
"%s%s.%s%s" % (self.hostname,
self.tname,
self.pid,
hash(self.path)))
self.timeout = timeout
def acquire(self, timeout=None):
"""
Acquire the lock.
* If timeout is omitted (or None), wait forever trying to lock the
file.
* If timeout > 0, try to acquire the lock for that many seconds. If
the lock period expires and the file is still locked, raise
LockTimeout.
* If timeout <= 0, raise AlreadyLocked immediately if the file is
already locked.
"""
raise NotImplemented("implement in subclass")
def release(self):
"""
Release the lock.
If the file is not locked, raise NotLocked.
"""
raise NotImplemented("implement in subclass")
def is_locked(self):
"""
Tell whether or not the file is locked.
"""
raise NotImplemented("implement in subclass")
def i_am_locking(self):
"""
Return True if this object is locking the file.
"""
raise NotImplemented("implement in subclass")
def break_lock(self):
"""
Remove a lock. Useful if a locking thread failed to unlock.
"""
raise NotImplemented("implement in subclass")
def __enter__(self):
"""
Context manager support.
"""
self.acquire()
return self
def __exit__(self, *_exc):
"""
Context manager support.
"""
self.release()
def __repr__(self):
return "<%s: %r -- %r>" % (self.__class__.__name__, self.unique_name,
self.path)
def _fl_helper(cls, mod, *args, **kwds):
warnings.warn("Import from %s module instead of lockfile package" % mod,
DeprecationWarning, stacklevel=2)
# This is a bit funky, but it's only for awhile. The way the unit tests
# are constructed this function winds up as an unbound method, so it
# actually takes three args, not two. We want to toss out self.
if not isinstance(args[0], str):
# We are testing, avoid the first arg
args = args[1:]
if len(args) == 1 and not kwds:
kwds["threaded"] = True
return cls(*args, **kwds)
def LinkFileLock(*args, **kwds):
"""Factory function provided for backwards compatibility.
Do not use in new code. Instead, import LinkLockFile from the
lockfile.linklockfile module.
"""
from . import linklockfile
return _fl_helper(linklockfile.LinkLockFile, "lockfile.linklockfile",
*args, **kwds)
def MkdirFileLock(*args, **kwds):
"""Factory function provided for backwards compatibility.
Do not use in new code. Instead, import MkdirLockFile from the
lockfile.mkdirlockfile module.
"""
from . import mkdirlockfile
return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile",
*args, **kwds)
def SQLiteFileLock(*args, **kwds):
"""Factory function provided for backwards compatibility.
Do not use in new code. Instead, import SQLiteLockFile from the
lockfile.mkdirlockfile module.
"""
from . import sqlitelockfile
return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile",
*args, **kwds)
def locked(path, timeout=None):
"""Decorator which enables locks for decorated function.
Arguments:
- path: path for lockfile.
- timeout (optional): Timeout for acquiring lock.
Usage:
@locked('/var/run/myname', timeout=0)
def myname(...):
...
"""
def decor(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
lock = FileLock(path, timeout=timeout)
lock.acquire()
try:
return func(*args, **kwargs)
finally:
lock.release()
return wrapper
return decor
if hasattr(os, "link"):
from . import linklockfile as _llf
LockFile = _llf.LinkLockFile
else:
from . import mkdirlockfile as _mlf
LockFile = _mlf.MkdirLockFile
FileLock = LockFile
| gpl-3.0 | 5,312,652,990,075,613,000 | 27.104294 | 79 | 0.59332 | false |
Boedaxbayah-vpn/boedaxbayah | ps_mem.py | 3 | 17568 | #!/usr/bin/env python
# Try to determine how much RAM is currently being used per program.
# Note per _program_, not per process. So for example this script
# will report RAM used by all httpd process together. In detail it reports:
# sum(private RAM for program processes) + sum(Shared RAM for program processes)
# The shared RAM is problematic to calculate, and this script automatically
# selects the most accurate method available for your kernel.
# Licence: LGPLv2
# Author: [email protected]
# Source: http://www.pixelbeat.org/scripts/ps_mem.py
# V1.0 06 Jul 2005 Initial release
# V1.1 11 Aug 2006 root permission required for accuracy
# V1.2 08 Nov 2006 Add total to output
# Use KiB,MiB,... for units rather than K,M,...
# V1.3 22 Nov 2006 Ignore shared col from /proc/$pid/statm for
# 2.6 kernels up to and including 2.6.9.
# There it represented the total file backed extent
# V1.4 23 Nov 2006 Remove total from output as it's meaningless
# (the shared values overlap with other programs).
# Display the shared column. This extra info is
# useful, especially as it overlaps between programs.
# V1.5 26 Mar 2007 Remove redundant recursion from human()
# V1.6 05 Jun 2007 Also report number of processes with a given name.
# Patch from [email protected]
# V1.7 20 Sep 2007 Use PSS from /proc/$pid/smaps if available, which
# fixes some over-estimation and allows totalling.
# Enumerate the PIDs directly rather than using ps,
# which fixes the possible race between reading
# RSS with ps, and shared memory with this program.
# Also we can show non truncated command names.
# V1.8 28 Sep 2007 More accurate matching for stats in /proc/$pid/smaps
# as otherwise could match libraries causing a crash.
# Patch from [email protected]
# V1.9 20 Feb 2008 Fix invalid values reported when PSS is available.
# Reported by Andrey Borzenkov <[email protected]>
# V3.3 24 Jun 2014
# http://github.com/pixelb/scripts/commits/master/scripts/ps_mem.py
# Notes:
#
# All interpreted programs where the interpreter is started
# by the shell or with env, will be merged to the interpreter
# (as that's what's given to exec). For e.g. all python programs
# starting with "#!/usr/bin/env python" will be grouped under python.
# You can change this by using the full command line but that will
# have the undesirable affect of splitting up programs started with
# differing parameters (for e.g. mingetty tty[1-6]).
#
# For 2.6 kernels up to and including 2.6.13 and later 2.4 redhat kernels
# (rmap vm without smaps) it can not be accurately determined how many pages
# are shared between processes in general or within a program in our case:
# http://lkml.org/lkml/2005/7/6/250
# A warning is printed if overestimation is possible.
# In addition for 2.6 kernels up to 2.6.9 inclusive, the shared
# value in /proc/$pid/statm is the total file-backed extent of a process.
# We ignore that, introducing more overestimation, again printing a warning.
# Since kernel 2.6.23-rc8-mm1 PSS is available in smaps, which allows
# us to calculate a more accurate value for the total RAM used by programs.
#
# Programs that use CLONE_VM without CLONE_THREAD are discounted by assuming
# they're the only programs that have the same /proc/$PID/smaps file for
# each instance. This will fail if there are multiple real instances of a
# program that then use CLONE_VM without CLONE_THREAD, or if a clone changes
# its memory map while we're checksumming each /proc/$PID/smaps.
#
# I don't take account of memory allocated for a program
# by other programs. For e.g. memory used in the X server for
# a program could be determined, but is not.
#
# FreeBSD is supported if linprocfs is mounted at /compat/linux/proc/
# FreeBSD 8.0 supports up to a level of Linux 2.6.16
import getopt
import time
import errno
import os
import sys
try:
# md5 module is deprecated on python 2.6
# so try the newer hashlib first
import hashlib
md5_new = hashlib.md5
except ImportError:
import md5
md5_new = md5.new
# The following exits cleanly on Ctrl-C or EPIPE
# while treating other exceptions as before.
def std_exceptions(etype, value, tb):
sys.excepthook = sys.__excepthook__
if issubclass(etype, KeyboardInterrupt):
pass
elif issubclass(etype, IOError) and value.errno == errno.EPIPE:
pass
else:
sys.__excepthook__(etype, value, tb)
sys.excepthook = std_exceptions
#
# Define some global variables
#
PAGESIZE = os.sysconf("SC_PAGE_SIZE") / 1024 #KiB
our_pid = os.getpid()
have_pss = 0
class Proc:
def __init__(self):
uname = os.uname()
if uname[0] == "FreeBSD":
self.proc = '/compat/linux/proc'
else:
self.proc = '/proc'
def path(self, *args):
return os.path.join(self.proc, *(str(a) for a in args))
def open(self, *args):
try:
return open(self.path(*args))
except (IOError, OSError):
val = sys.exc_info()[1]
if (val.errno == errno.ENOENT or # kernel thread or process gone
val.errno == errno.EPERM):
raise LookupError
raise
proc = Proc()
#
# Functions
#
def parse_options():
try:
long_options = ['split-args', 'help', 'total']
opts, args = getopt.getopt(sys.argv[1:], "shtp:w:", long_options)
except getopt.GetoptError:
sys.stderr.write(help())
sys.exit(3)
# ps_mem.py options
split_args = False
pids_to_show = None
watch = None
only_total = False
for o, a in opts:
if o in ('-s', '--split-args'):
split_args = True
if o in ('-t', '--total'):
only_total = True
if o in ('-h', '--help'):
sys.stdout.write(help())
sys.exit(0)
if o in ('-p',):
try:
pids_to_show = [int(x) for x in a.split(',')]
except:
sys.stderr.write(help())
sys.exit(3)
if o in ('-w',):
try:
watch = int(a)
except:
sys.stderr.write(help())
sys.exit(3)
return (split_args, pids_to_show, watch, only_total)
def help():
help_msg = 'ps_mem.py - Show process memory usage\n'\
'\n'\
'-h Show this help\n'\
'-w <N> Measure and show process memory every N seconds\n'\
'-p <pid>[,pid2,...pidN] Only show memory usage PIDs in the specified list\n' \
'-s, --split-args Show and separate by, all command line arguments\n' \
'-t, --total Show only the total value\n'
return help_msg
#(major,minor,release)
def kernel_ver():
kv = proc.open('sys/kernel/osrelease').readline().split(".")[:3]
last = len(kv)
if last == 2:
kv.append('0')
last -= 1
while last > 0:
for char in "-_":
kv[last] = kv[last].split(char)[0]
try:
int(kv[last])
except:
kv[last] = 0
last -= 1
return (int(kv[0]), int(kv[1]), int(kv[2]))
#return Private,Shared
#Note shared is always a subset of rss (trs is not always)
def getMemStats(pid):
global have_pss
mem_id = pid #unique
Private_lines = []
Shared_lines = []
Pss_lines = []
Rss = (int(proc.open(pid, 'statm').readline().split()[1])
* PAGESIZE)
if os.path.exists(proc.path(pid, 'smaps')): #stat
digester = md5_new()
for line in proc.open(pid, 'smaps').readlines(): #open
# Note we checksum smaps as maps is usually but
# not always different for separate processes.
digester.update(line.encode('latin1'))
if line.startswith("Shared"):
Shared_lines.append(line)
elif line.startswith("Private"):
Private_lines.append(line)
elif line.startswith("Pss"):
have_pss = 1
Pss_lines.append(line)
mem_id = digester.hexdigest()
Shared = sum([int(line.split()[1]) for line in Shared_lines])
Private = sum([int(line.split()[1]) for line in Private_lines])
#Note Shared + Private = Rss above
#The Rss in smaps includes video card mem etc.
if have_pss:
pss_adjust = 0.5 # add 0.5KiB as this avg error due to trunctation
Pss = sum([float(line.split()[1])+pss_adjust for line in Pss_lines])
Shared = Pss - Private
elif (2,6,1) <= kernel_ver() <= (2,6,9):
Shared = 0 #lots of overestimation, but what can we do?
Private = Rss
else:
Shared = int(proc.open(pid, 'statm').readline().split()[2])
Shared *= PAGESIZE
Private = Rss - Shared
return (Private, Shared, mem_id)
def getCmdName(pid, split_args):
cmdline = proc.open(pid, 'cmdline').read().split("\0")
if cmdline[-1] == '' and len(cmdline) > 1:
cmdline = cmdline[:-1]
path = proc.path(pid, 'exe')
try:
path = os.readlink(path)
# Some symlink targets were seen to contain NULs on RHEL 5 at least
# https://github.com/pixelb/scripts/pull/10, so take string up to NUL
path = path.split('\0')[0]
except OSError:
val = sys.exc_info()[1]
if (val.errno == errno.ENOENT or # either kernel thread or process gone
val.errno == errno.EPERM):
raise LookupError
raise
if split_args:
return " ".join(cmdline)
if path.endswith(" (deleted)"):
path = path[:-10]
if os.path.exists(path):
path += " [updated]"
else:
#The path could be have prelink stuff so try cmdline
#which might have the full path present. This helped for:
#/usr/libexec/notification-area-applet.#prelink#.fX7LCT (deleted)
if os.path.exists(cmdline[0]):
path = cmdline[0] + " [updated]"
else:
path += " [deleted]"
exe = os.path.basename(path)
cmd = proc.open(pid, 'status').readline()[6:-1]
if exe.startswith(cmd):
cmd = exe #show non truncated version
#Note because we show the non truncated name
#one can have separated programs as follows:
#584.0 KiB + 1.0 MiB = 1.6 MiB mozilla-thunder (exe -> bash)
# 56.0 MiB + 22.2 MiB = 78.2 MiB mozilla-thunderbird-bin
return cmd
#The following matches "du -h" output
#see also human.py
def human(num, power="Ki"):
powers = ["Ki", "Mi", "Gi", "Ti"]
while num >= 1000: #4 digits
num /= 1024.0
power = powers[powers.index(power)+1]
return "%.1f %s" % (num, power)
def cmd_with_count(cmd, count):
if count > 1:
return "%s (%u)" % (cmd, count)
else:
return cmd
#Warn of possible inaccuracies
#2 = accurate & can total
#1 = accurate only considering each process in isolation
#0 = some shared mem not reported
#-1= all shared mem not reported
def shared_val_accuracy():
"""http://wiki.apache.org/spamassassin/TopSharedMemoryBug"""
kv = kernel_ver()
if kv[:2] == (2,4):
if proc.open('meminfo').read().find("Inact_") == -1:
return 1
return 0
elif kv[:2] == (2,6):
pid = os.getpid()
if os.path.exists(proc.path(pid, 'smaps')):
if proc.open(pid, 'smaps').read().find("Pss:")!=-1:
return 2
else:
return 1
if (2,6,1) <= kv <= (2,6,9):
return -1
return 0
elif kv[0] > 2:
return 2
else:
return 1
def show_shared_val_accuracy( possible_inacc, only_total=False ):
level = ("Warning","Error")[only_total]
if possible_inacc == -1:
sys.stderr.write(
"%s: Shared memory is not reported by this system.\n" % level
)
sys.stderr.write(
"Values reported will be too large, and totals are not reported\n"
)
elif possible_inacc == 0:
sys.stderr.write(
"%s: Shared memory is not reported accurately by this system.\n" % level
)
sys.stderr.write(
"Values reported could be too large, and totals are not reported\n"
)
elif possible_inacc == 1:
sys.stderr.write(
"%s: Shared memory is slightly over-estimated by this system\n"
"for each program, so totals are not reported.\n" % level
)
sys.stderr.close()
if only_total and possible_inacc != 2:
sys.exit(1)
def get_memory_usage( pids_to_show, split_args, include_self=False, only_self=False ):
cmds = {}
shareds = {}
mem_ids = {}
count = {}
for pid in os.listdir(proc.path('')):
if not pid.isdigit():
continue
pid = int(pid)
# Some filters
if only_self and pid != our_pid:
continue
if pid == our_pid and not include_self:
continue
if pids_to_show is not None and pid not in pids_to_show:
continue
try:
cmd = getCmdName(pid, split_args)
except LookupError:
#operation not permitted
#kernel threads don't have exe links or
#process gone
continue
try:
private, shared, mem_id = getMemStats(pid)
except RuntimeError:
continue #process gone
if shareds.get(cmd):
if have_pss: #add shared portion of PSS together
shareds[cmd] += shared
elif shareds[cmd] < shared: #just take largest shared val
shareds[cmd] = shared
else:
shareds[cmd] = shared
cmds[cmd] = cmds.setdefault(cmd, 0) + private
if cmd in count:
count[cmd] += 1
else:
count[cmd] = 1
mem_ids.setdefault(cmd, {}).update({mem_id:None})
#Add shared mem for each program
total = 0
for cmd in cmds:
cmd_count = count[cmd]
if len(mem_ids[cmd]) == 1 and cmd_count > 1:
# Assume this program is using CLONE_VM without CLONE_THREAD
# so only account for one of the processes
cmds[cmd] /= cmd_count
if have_pss:
shareds[cmd] /= cmd_count
cmds[cmd] = cmds[cmd] + shareds[cmd]
total += cmds[cmd] #valid if PSS available
sorted_cmds = sorted(cmds.items(), key=lambda x:x[1])
sorted_cmds = [x for x in sorted_cmds if x[1]]
return sorted_cmds, shareds, count, total
def print_header():
sys.stdout.write(" Private + Shared = RAM used\tProgram\n\n")
def print_memory_usage(sorted_cmds, shareds, count, total):
for cmd in sorted_cmds:
sys.stdout.write("%8sB + %8sB = %8sB\t%s\n" %
(human(cmd[1]-shareds[cmd[0]]),
human(shareds[cmd[0]]), human(cmd[1]),
cmd_with_count(cmd[0], count[cmd[0]])))
if have_pss:
sys.stdout.write("%s\n%s%8sB\n%s\n" %
("-" * 33, " " * 24, human(total), "=" * 33))
def verify_environment():
if os.geteuid() != 0:
sys.stderr.write("Sorry, root permission required.\n")
if __name__ == '__main__':
sys.stderr.close()
sys.exit(1)
try:
kv = kernel_ver()
except (IOError, OSError):
val = sys.exc_info()[1]
if val.errno == errno.ENOENT:
sys.stderr.write(
"Couldn't access " + proc.path('') + "\n"
"Only GNU/Linux and FreeBSD (with linprocfs) are supported\n")
sys.exit(2)
else:
raise
if __name__ == '__main__':
verify_environment()
split_args, pids_to_show, watch, only_total = parse_options()
if not only_total:
print_header()
if watch is not None:
try:
sorted_cmds = True
while sorted_cmds:
sorted_cmds, shareds, count, total = get_memory_usage( pids_to_show, split_args )
if only_total and have_pss:
sys.stdout.write(human(total).replace(' ','')+'B\n')
elif not only_total:
print_memory_usage(sorted_cmds, shareds, count, total)
time.sleep(watch)
else:
sys.stdout.write('Process does not exist anymore.\n')
except KeyboardInterrupt:
pass
else:
# This is the default behavior
sorted_cmds, shareds, count, total = get_memory_usage( pids_to_show, split_args )
if only_total and have_pss:
sys.stdout.write(human(total).replace(' ','')+'B\n')
elif not only_total:
print_memory_usage(sorted_cmds, shareds, count, total)
# We must close explicitly, so that any EPIPE exception
# is handled by our excepthook, rather than the default
# one which is reenabled after this script finishes.
sys.stdout.close()
vm_accuracy = shared_val_accuracy()
show_shared_val_accuracy( vm_accuracy, only_total )
| gpl-3.0 | -5,824,614,722,368,798,000 | 34.92638 | 97 | 0.57508 | false |
introprogramming/exercises | exercises/plot_2d/plot_2d.py | 1 | 5835 | import math, pygame
from pygame.locals import *
#############################################
## Standard colors (RGB)
BLACK = (20, 20, 40)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
#############################################
## Customize plot here
def function_to_print(x):
"""Write function to plot here.
Must take a single number x and return a single number y."""
return -x * (x - 3)
# Range of window
X_MIN = 0.0
X_MAX = 10.0
Y_MIN = -10.0
Y_MAX = 10.0
# Tick interval on axes
X_TICK = 2.5
Y_TICK = 2.5
# Granularity of plotted functions, more points -> higher resolution plot
N_POINTS = 100
# Colors
background_color = BLACK
plot_color = GREEN
grid_color = WHITE
# Note, it is also possible to make a list of functions to print
# and respective colors:
# functions = [(f1, color1), (f2, color2), ...]
#############################################
## Let the program calculate the rest
WIDTH = 640
HEIGHT = 480
X_SIZE = X_MAX - X_MIN
Y_SIZE = Y_MAX - Y_MIN
def coordinate_to_position(c):
"""Converts a model coordinate (vector) into a graphic position (pixel)"""
gx = (c[0] - X_MIN) * WIDTH / X_SIZE
gy = HEIGHT - (c[1] - Y_MIN) * HEIGHT / Y_SIZE
return gx, gy
def curve_coordinates(f, x0, x1, points):
"""Returns list of coordinates
Creates linear splines for this function f, from x0 to x1
Length of returned list == points."""
coordinates = []
x = x0
delta = (x1 - x0) / (points - 1)
while x <= x1:
coordinates += [[x, f(x)]]
x += delta
return coordinates
def linspace(x0, x1, points):
"""Returns a list of numbers of `points` elements,
with constant intervals between `x0` and `x1`"""
delta = (x1 - x0) / (points - 1)
return map(lambda x: x0 + delta * x, range(points))
def curve_coordinates2(f, x0, x1, points):
"""(Alternative implementation):
This is more compact and functional-like."""
return [[x, f(x)] for x in linspace(x0, x1, points)]
def draw_ticks(screen, axis):
"""Draws appropriate ticks on the specified axis.
axis == 0 -> X-axis, otherwise Y-axis.
This implementation is not so readable, see alternative implementation
for a more readable one."""
if axis == 0:
min = X_MIN
max = X_MAX
tick = X_TICK
limit = HEIGHT
else:
axis = 1
min = Y_MIN
max = Y_MAX
tick = Y_TICK
limit = WIDTH
start = min + min % tick
end = max - max % tick
points = (end - start) / tick + 1
t = limit / 120
for x in linspace(start, end, int(points)):
c = [0, 0]
c[axis] = x
v = coordinate_to_position(c)
a = v[1 - axis] + t
if a > limit:
a = limit
b = v[1 - axis] - t
if b < 0:
b = 0
# Copying v
s = list(v)
s[1 - axis] = a
e = list(v)
e[1 - axis] = b
pygame.draw.line(screen, grid_color, s, e, 2)
def draw_x_ticks(screen):
"""(Alternative implementation):
Draws appropriate ticks on the X-axis."""
start = X_MIN + X_MIN % X_TICK
end = X_MAX - X_MAX % X_TICK
points = (end - start) / X_TICK + 1
# t == half length of the tick line
t = HEIGHT / 120
# one iteration per tick
for x in linspace(start, end, int(points)):
v = coordinate_to_position([x, 0])
a = v[1] + t
b = v[1] - t
if a > HEIGHT:
a = HEIGHT
if b < 0:
b = 0
pygame.draw.line(screen, grid_color, [v[0], a], [v[0], b], 2)
def draw_y_ticks(screen):
"""(Alternative implementation):
Draws appropriate ticks on the Y-axis.
This function mirrors draw_x_ticks(...)"""
start = Y_MIN + Y_MIN % Y_TICK
end = Y_MAX - Y_MAX % Y_TICK
points = (end - start) / Y_TICK + 1
t = WIDTH / 120
for y in linspace(start, end, int(points)):
v = coordinate_to_position([0, y])
# print v
a = v[0] + t
b = v[0] - t
if (a > WIDTH):
a = WIDTH
if (b < 0):
b = 0
pygame.draw.line(screen, grid_color, [a, v[1]], [b, v[1]], 2)
def draw(screen, pp, plot_color):
"""Plots the points `pp` on the specified screen with the specified color."""
# Function
pygame.draw.lines(screen, plot_color, False, pp, 3)
def draw_axis(screen):
"""Draws the axes and ticks of the coordinate system."""
## Alternative implementations:
# draw_x_ticks(screen)
# draw_y_ticks(screen)
draw_ticks(screen, 0)
draw_ticks(screen, 1)
x_points = list(map(coordinate_to_position, [[X_MIN, 0], [X_MAX, 0]]))
y_points = list(map(coordinate_to_position, [[0, Y_MIN], [0, Y_MAX]]))
# X-Axis
pygame.draw.lines(screen, grid_color, False, x_points, 2)
# Y-Axis
pygame.draw.lines(screen, grid_color, False, y_points, 2)
def main():
"""Graphics: draws graphs on window and await EXIT or ESCAPE."""
pygame.init()
screen = pygame.display.set_mode([WIDTH, HEIGHT])
pygame.display.set_caption('Plot 2d')
clock = pygame.time.Clock()
screen.fill(background_color)
cc = curve_coordinates(function_to_print, X_MIN, X_MAX, N_POINTS)
pp = list(map(coordinate_to_position, cc))
# This would typically be done inside the loop, but since it is never
# updated: might as well keep it outside
draw(screen, pp, plot_color)
draw_axis(screen)
done = False
while not done:
time = clock.tick(60)
pygame.display.update()
for e in pygame.event.get():
if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE):
done = True
break
pygame.quit()
# if Python says run...
if __name__ == '__main__':
main()
| mit | -779,678,578,324,625,400 | 23.620253 | 81 | 0.554927 | false |
hitchtest/hitchvagrant | hitchvagrant/vagrant.py | 1 | 1317 | from sys import argv, stdout, stderr, exit
from subprocess import call, PIPE
from os import path, chdir
import optparse
import time
def run():
parser = optparse.OptionParser()
parser.add_option("-d", "--directory", type="str", dest="directory", default=".",
help="Specify the directory that contains the Vagrantfile (default: current directory)")
options, _ = parser.parse_args(argv[1:])
directory = path.abspath(options.directory)
if call(["which", "vagrant"], stdout=PIPE, stderr=PIPE) != 0:
stderr.write("Vagrant not installed.\n")
exit(1)
if path.exists(directory):
chdir(directory)
else:
stderr.write("Directory does not exist.\n")
exit(1)
if not path.exists(path.join(directory, "Vagrantfile")):
stderr.write("Vagrantfile does not exist at '{}'.\n".format(directory))
exit(1)
try:
call(["vagrant", "up"])
stdout.write("Vagrant service ready\n")
while True:
time.sleep(1)
except (KeyboardInterrupt, SystemExit):
stdout.write("Shutting down vagrant...\n")
returncode = call(["vagrant", "halt"])
if returncode != 0:
subprocess.call(["vagrant", "halt", "--force"])
exit(0)
if __name__=='__main__':
run()
| agpl-3.0 | 113,397,798,606,808,370 | 28.931818 | 110 | 0.602126 | false |
dannyboi104/SickRage | lib/hachoir_parser/archive/mozilla_ar.py | 74 | 2405 | """MAR (Mozilla ARchive) parser
Author: Robert Xiao
Creation date: July 10, 2007
"""
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.field import (RootSeekableFieldSet, FieldSet,
String, CString, UInt32, RawBytes)
from hachoir_core.text_handler import displayHandler, filesizeHandler
from hachoir_core.tools import humanUnixAttributes
from hachoir_parser import HachoirParser
class IndexEntry(FieldSet):
def createFields(self):
yield UInt32(self, "offset", "Offset in bytes relative to start of archive")
yield filesizeHandler(UInt32(self, "length", "Length in bytes"))
yield displayHandler(UInt32(self, "flags"), humanUnixAttributes)
yield CString(self, "name", "Filename (byte array)")
def createDescription(self):
return 'File %s, Size %s, Mode %s'%(
self["name"].display, self["length"].display, self["flags"].display)
class MozillaArchive(HachoirParser, RootSeekableFieldSet):
MAGIC = "MAR1"
PARSER_TAGS = {
"id": "mozilla_ar",
"category": "archive",
"file_ext": ("mar",),
"min_size": (8+4+13)*8, # Header, Index Header, 1 Index Entry
"magic": ((MAGIC, 0),),
"description": "Mozilla Archive",
}
endian = BIG_ENDIAN
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self.stream.readBytes(0, 4) != self.MAGIC:
return "Invalid magic"
return True
def createFields(self):
yield String(self, "magic", 4, "File signature (MAR1)", charset="ASCII")
yield UInt32(self, "index_offset", "Offset to index relative to file start")
self.seekByte(self["index_offset"].value, False)
yield UInt32(self, "index_size", "size of index in bytes")
current_index_size = 0 # bytes
while current_index_size < self["index_size"].value:
# plus 4 compensates for index_size
self.seekByte(self["index_offset"].value + current_index_size + 4, False)
entry = IndexEntry(self, "index_entry[]")
yield entry
current_index_size += entry.size // 8
self.seekByte(entry["offset"].value, False)
yield RawBytes(self, "file[]", entry["length"].value)
| gpl-3.0 | 1,947,980,633,558,039,000 | 39.083333 | 93 | 0.63368 | false |
meabsence/python-for-android | python3-alpha/python3-src/Tools/pybench/Unicode.py | 92 | 11110 | try:
unicode
except NameError:
raise ImportError
from pybench import Test
class ConcatUnicode(Test):
version = 2.0
operations = 10 * 5
rounds = 60000
def test(self):
# Make sure the strings are *not* interned
s = unicode(u''.join(map(str,range(100))))
t = unicode(u''.join(map(str,range(1,101))))
for i in range(self.rounds):
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
def calibrate(self):
s = unicode(u''.join(map(str,range(100))))
t = unicode(u''.join(map(str,range(1,101))))
for i in range(self.rounds):
pass
class CompareUnicode(Test):
version = 2.0
operations = 10 * 5
rounds = 150000
def test(self):
# Make sure the strings are *not* interned
s = unicode(u''.join(map(str,range(10))))
t = unicode(u''.join(map(str,range(10))) + "abc")
for i in range(self.rounds):
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
def calibrate(self):
s = unicode(u''.join(map(str,range(10))))
t = unicode(u''.join(map(str,range(10))) + "abc")
for i in range(self.rounds):
pass
class CreateUnicodeWithConcat(Test):
version = 2.0
operations = 10 * 5
rounds = 80000
def test(self):
for i in range(self.rounds):
s = u'om'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
def calibrate(self):
for i in range(self.rounds):
pass
class UnicodeSlicing(Test):
version = 2.0
operations = 5 * 7
rounds = 140000
def test(self):
s = unicode(u''.join(map(str,range(100))))
for i in range(self.rounds):
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
def calibrate(self):
s = unicode(u''.join(map(str,range(100))))
for i in range(self.rounds):
pass
### String methods
class UnicodeMappings(Test):
version = 2.0
operations = 3 * (5 + 4 + 2 + 1)
rounds = 10000
def test(self):
s = u''.join(map(unichr,range(20)))
t = u''.join(map(unichr,range(100)))
u = u''.join(map(unichr,range(500)))
v = u''.join(map(unichr,range(1000)))
for i in range(self.rounds):
s.lower()
s.lower()
s.lower()
s.lower()
s.lower()
s.upper()
s.upper()
s.upper()
s.upper()
s.upper()
s.title()
s.title()
s.title()
s.title()
s.title()
t.lower()
t.lower()
t.lower()
t.lower()
t.upper()
t.upper()
t.upper()
t.upper()
t.title()
t.title()
t.title()
t.title()
u.lower()
u.lower()
u.upper()
u.upper()
u.title()
u.title()
v.lower()
v.upper()
v.title()
def calibrate(self):
s = u''.join(map(unichr,range(20)))
t = u''.join(map(unichr,range(100)))
u = u''.join(map(unichr,range(500)))
v = u''.join(map(unichr,range(1000)))
for i in range(self.rounds):
pass
class UnicodePredicates(Test):
version = 2.0
operations = 5 * 9
rounds = 120000
def test(self):
data = (u'abc', u'123', u' ', u'\u1234\u2345\u3456', u'\uFFFF'*10)
len_data = len(data)
for i in range(self.rounds):
s = data[i % len_data]
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
def calibrate(self):
data = (u'abc', u'123', u' ', u'\u1234\u2345\u3456', u'\uFFFF'*10)
len_data = len(data)
for i in range(self.rounds):
s = data[i % len_data]
try:
import unicodedata
except ImportError:
pass
else:
class UnicodeProperties(Test):
version = 2.0
operations = 5 * 8
rounds = 100000
def test(self):
data = (u'a', u'1', u' ', u'\u1234', u'\uFFFF')
len_data = len(data)
digit = unicodedata.digit
numeric = unicodedata.numeric
decimal = unicodedata.decimal
category = unicodedata.category
bidirectional = unicodedata.bidirectional
decomposition = unicodedata.decomposition
mirrored = unicodedata.mirrored
combining = unicodedata.combining
for i in range(self.rounds):
c = data[i % len_data]
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
def calibrate(self):
data = (u'a', u'1', u' ', u'\u1234', u'\uFFFF')
len_data = len(data)
digit = unicodedata.digit
numeric = unicodedata.numeric
decimal = unicodedata.decimal
category = unicodedata.category
bidirectional = unicodedata.bidirectional
decomposition = unicodedata.decomposition
mirrored = unicodedata.mirrored
combining = unicodedata.combining
for i in range(self.rounds):
c = data[i % len_data]
| apache-2.0 | -8,933,290,414,536,158,000 | 19.536044 | 76 | 0.350045 | false |
IgowWang/ML_python3 | kerasgo/qa.py | 2 | 4487 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author igor
# Created by iFantastic on 16-6-3
import re
import tarfile
from functools import reduce
import numpy as np
import nltk
from keras.preprocessing.sequence import pad_sequences
from keras.layers.embeddings import Embedding
from keras.layers.core import Dense, Merge
from keras.layers import recurrent
from keras.models import Sequential
from keras.utils.visualize_util import plot
np.random.seed(1337)
DATA_PATH = '/data/task_qa.tar.gz'
tar = tarfile.open(DATA_PATH)
challenge = 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt'
def parse_stories(lines, only_supporting=False):
data = []
story = []
for line in lines:
line = line.decode()
line = line.strip()
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line:
q, a, supporting = line.split('\t')
q = nltk.word_tokenize(q)
substory = None
if only_supporting:
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else:
sent = nltk.word_tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=50):
data = parse_stories(f.readlines(), only_supporting=only_supporting)
flatten = lambda data: reduce(lambda x, y: x + y, data)
data = [(flatten(story), q, answer) for story, q, answer in data if max_length or len(flatten(story)) < max_length]
return data
def vectorize_stories(data, word2id, vocab_size, story_maxlen, query_maxlen):
X = []
Xq = []
Y = []
for story, query, answer in data:
x = [word2id[w] for w in story]
xq = [word2id[w] for w in query]
y = np.zeros(vocab_size)
y[word2id[answer]] = 1
X.append(x)
Xq.append(xq)
Y.append(y)
return pad_sequences(X, maxlen=story_maxlen), pad_sequences(Xq, maxlen=query_maxlen), np.array(Y)
def main():
RNN = recurrent.GRU
EMBED_HIDDEN_SIZE = 50
SENT_HIDDEN_SIZE = 100
QUERY_HIDDEN_SIZE = 100
BATCH_SIZE = 32
EPOCHS = 5
print('RNN / Embed / Sent / Query = {}, {}, {}, {}'.format(RNN, EMBED_HIDDEN_SIZE, SENT_HIDDEN_SIZE,
QUERY_HIDDEN_SIZE))
train = get_stories(tar.extractfile(challenge.format('train')))
test = get_stories(tar.extractfile(challenge.format('test')))
vocab = sorted(reduce(lambda x, y: x | y, (set(story + q + [answer]) for story, q, answer in train + test)))
vocab_size = len(vocab) + 1
word2id = dict((c, i + 1) for i, c in enumerate(vocab))
story_maxlen = max(map(len, (x for x, _, _ in train + test)))
query_maxlen = max(map(len, (x for _, x, _ in train + test)))
X, Xq, Y = vectorize_stories(train, word2id, vocab_size, story_maxlen, query_maxlen)
tX, tXq, tY = vectorize_stories(test, word2id, vocab_size, story_maxlen, query_maxlen)
print('vocab = {}'.format(vocab))
print('X.shape = {}'.format(X.shape))
print('Xq.shape = {}'.format(Xq.shape))
print('Y.shape = {}'.format(Y.shape))
print('story_maxlen, query_maxlen = {}, {}'.format(story_maxlen, query_maxlen))
print('Build model...')
sentrnn = Sequential()
sentrnn.add(Embedding(vocab_size, EMBED_HIDDEN_SIZE, input_length=story_maxlen))
sentrnn.add(RNN(SENT_HIDDEN_SIZE, return_sequences=False))
qrnn = Sequential()
qrnn.add(Embedding(vocab_size, EMBED_HIDDEN_SIZE, input_length=query_maxlen))
qrnn.add(RNN(QUERY_HIDDEN_SIZE, return_sequences=False))
model = Sequential()
model.add(Merge([sentrnn, qrnn], mode='concat'))
model.add((Dense(vocab_size, activation='softmax')))
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
# plot(model)
print("Training")
model.fit([X, Xq], Y, batch_size=BATCH_SIZE, nb_epoch=EPOCHS,
validation_split=0.05, verbose=True)
loss, acc = model.evaluate([tX, tXq], tY, batch_size=BATCH_SIZE)
print('Test loss / test accuracy = {:.4f} / {:.4f}'.format(loss, acc))
if __name__ == '__main__':
# data = get_stories(tar.extractfile(challenge.format('train')))
# print(data[0])
main()
| gpl-3.0 | 5,982,826,061,479,286,000 | 33.515385 | 119 | 0.60887 | false |
jwiggins/scikit-image | skimage/transform/tests/test_pyramids.py | 35 | 2302 | from numpy.testing import assert_array_equal, assert_raises, run_module_suite
from skimage import data
from skimage.transform import pyramids
image = data.astronaut()
image_gray = image[..., 0]
def test_pyramid_reduce_rgb():
rows, cols, dim = image.shape
out = pyramids.pyramid_reduce(image, downscale=2)
assert_array_equal(out.shape, (rows / 2, cols / 2, dim))
def test_pyramid_reduce_gray():
rows, cols = image_gray.shape
out = pyramids.pyramid_reduce(image_gray, downscale=2)
assert_array_equal(out.shape, (rows / 2, cols / 2))
def test_pyramid_expand_rgb():
rows, cols, dim = image.shape
out = pyramids.pyramid_expand(image, upscale=2)
assert_array_equal(out.shape, (rows * 2, cols * 2, dim))
def test_pyramid_expand_gray():
rows, cols = image_gray.shape
out = pyramids.pyramid_expand(image_gray, upscale=2)
assert_array_equal(out.shape, (rows * 2, cols * 2))
def test_build_gaussian_pyramid_rgb():
rows, cols, dim = image.shape
pyramid = pyramids.pyramid_gaussian(image, downscale=2)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim)
assert_array_equal(out.shape, layer_shape)
def test_build_gaussian_pyramid_gray():
rows, cols = image_gray.shape
pyramid = pyramids.pyramid_gaussian(image_gray, downscale=2)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer)
assert_array_equal(out.shape, layer_shape)
def test_build_laplacian_pyramid_rgb():
rows, cols, dim = image.shape
pyramid = pyramids.pyramid_laplacian(image, downscale=2)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim)
assert_array_equal(out.shape, layer_shape)
def test_build_laplacian_pyramid_gray():
rows, cols = image_gray.shape
pyramid = pyramids.pyramid_laplacian(image_gray, downscale=2)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer)
assert_array_equal(out.shape, layer_shape)
def test_check_factor():
assert_raises(ValueError, pyramids._check_factor, 0.99)
assert_raises(ValueError, pyramids._check_factor, - 2)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | -7,279,729,311,152,568,000 | 30.972222 | 77 | 0.668983 | false |
hqpr/findyour3d | findyour3d/company/forms.py | 1 | 6270 | from django import forms
from .models import Company, SpecialOffer
EXPEDITED_CHOICES = (
(0, 'No, we do not offer any expedited shipping options.'),
(1, 'Yes we offer an expedited process for a fee.')
)
class AddCompanyForm(forms.ModelForm):
class Meta:
model = Company
fields = ['name', 'display_name', 'address_line_1', 'address_line_2',
'full_name', 'email', 'phone', 'website', 'ideal_customer',
'is_cad_assistance', 'budget',
'material', 'top_printing_processes',
'description', 'user']
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control'}),
'display_name': forms.TextInput(attrs={'class': 'form-control'}),
'address_line_1': forms.TextInput(attrs={'class': 'form-control'}),
'address_line_2': forms.TextInput(attrs={'class': 'form-control'}),
'full_name': forms.TextInput(attrs={'class': 'form-control'}),
'email': forms.TextInput(attrs={'class': 'form-control'}),
'phone': forms.TextInput(attrs={'class': 'form-control'}),
'website': forms.TextInput(attrs={'class': 'form-control'}),
'ideal_customer': forms.SelectMultiple(attrs={'class': 'form-control edited'}),
'budget': forms.SelectMultiple(attrs={'class': 'form-control edited'}),
'material': forms.SelectMultiple(attrs={'class': 'form-control edited big_height_block'}),
'top_printing_processes': forms.SelectMultiple(attrs={'class': 'form-control edited big_height_block'}),
'description': forms.Textarea(attrs={'class': 'form-control', 'rows': 10}),
'quote_limit': forms.NumberInput(attrs={'class': 'form-control edited'}),
'user': forms.HiddenInput(),
}
def __init__(self, *args, **kwargs):
self.user = None
if 'user' in kwargs['initial']:
self.user = kwargs['initial'].pop('user')
super(AddCompanyForm, self).__init__(*args, **kwargs)
self.fields['user'].initial = self.user
self.fields['ideal_customer'].label = 'What is your company’s ideal customer that we should send to you?'
self.fields['budget'].label = 'What is your ideal order cost/budget?'
self.fields['top_printing_processes'].label = 'Printing Processes Available'
self.fields['name'].label = 'Company Name'
# self.fields['quote_limit'].required = False
self.fields['display_name'].label = 'Company Display Name'
self.fields['full_name'].label = "Company Contact's Full Name"
self.fields['email'].label = "Company Contact's Email"
class EditCompanyForm(forms.ModelForm):
class Meta:
model = Company
fields = ['name', 'display_name', 'logo', 'address_line_1', 'address_line_2',
'full_name', 'email', 'phone', 'website', 'ideal_customer',
'is_cad_assistance', 'budget',
'material', 'top_printing_processes',
'description', 'user', 'is_expedited', 'shipping', 'quote_limit']
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control'}),
'display_name': forms.TextInput(attrs={'class': 'form-control'}),
'logo': forms.ClearableFileInput(attrs={'class': 'form-control'}),
'address_line_1': forms.TextInput(attrs={'class': 'form-control'}),
'address_line_2': forms.TextInput(attrs={'class': 'form-control'}),
'full_name': forms.TextInput(attrs={'class': 'form-control'}),
'email': forms.TextInput(attrs={'class': 'form-control'}),
'phone': forms.TextInput(attrs={'class': 'form-control'}),
'website': forms.TextInput(attrs={'class': 'form-control'}),
'ideal_customer': forms.SelectMultiple(attrs={'class': 'form-control edited'}),
'budget': forms.SelectMultiple(attrs={'class': 'form-control edited'}),
'is_expedited': forms.Select(attrs={'class': 'form-control edited'}, choices=EXPEDITED_CHOICES),
'material': forms.SelectMultiple(attrs={'class': 'form-control edited big_height_block'}),
'top_printing_processes': forms.SelectMultiple(attrs={'class': 'form-control edited big_height_block'}),
'description': forms.Textarea(attrs={'class': 'form-control', 'rows': 10}),
'shipping': forms.SelectMultiple(attrs={'class': 'form-control edited'}),
'quote_limit': forms.NumberInput(attrs={'class': 'form-control edited'}),
'user': forms.HiddenInput(),
}
def __init__(self, *args, **kwargs):
self.user = None
if 'user' in kwargs['initial']:
self.user = kwargs['initial'].pop('user')
super(EditCompanyForm, self).__init__(*args, **kwargs)
self.fields['user'].initial = self.user
self.fields['ideal_customer'].label = 'What is your company’s ideal customer that we should send to you?'
self.fields['budget'].label = 'What is your ideal order cost/budget?'
self.fields['is_expedited'].label = 'Do you offer an expedited manufacturing process?'
self.fields['shipping'].label = 'Which of the following shipping options do you offer?'
self.fields['quote_limit'].required = False
self.fields['name'].label = 'Company Name'
self.fields['display_name'].label = 'Company Display Name'
self.fields['full_name'].label = "Company Contact's Full Name"
self.fields['email'].label = "Company Contact's Email"
class AddSpecialOfferForm(forms.ModelForm):
class Meta:
model = SpecialOffer
fields = ('text', 'company')
widgets = {
'text': forms.Textarea(attrs={'class': 'form-control', 'rows': 3,
'placeholder': 'eg: 25% off for next order!'}),
'company': forms.HiddenInput()
}
def __init__(self, *args, **kwargs):
self.user = None
if 'company' in kwargs['initial']:
self.company = kwargs['initial'].pop('company')
super(AddSpecialOfferForm, self).__init__(*args, **kwargs)
self.fields['company'].initial = self.company
| mit | -911,143,808,225,559,300 | 49.532258 | 116 | 0.59368 | false |
seberm/Titulky.com-Downloader | prototypes/captcha-qt/captcha.py | 1 | 2280 | import sys
from PyQt4 import QtGui, QtCore, QtNetwork
from PyQt4.QtCore import SLOT, SIGNAL
CAPTCHA_URL = 'http://www.titulky.com/captcha/captcha.php'
MAX_CAPTCHA_LEN = 8
class CaptchaDialog(QtGui.QDialog):
# Signal is emmited if captcha code is sucessfuly re-typed
codeRead = QtCore.pyqtSignal()
def __init__(self, parent = None, flags = 0):
super(CaptchaDialog, self).__init__(parent)
self.setWindowTitle('Re-type captcha')
# Widgets
self.lblCaptcha = QtGui.QLabel('Loading captcha image ...', self)
self.lblCaptcha.setFixedSize(200, 70)
self.btnReload = QtGui.QPushButton('Reload', self)
self.connect(self.btnReload, SIGNAL("clicked()"), self.reloadCaptcha)
self.btnSend = QtGui.QPushButton('Send', self)
self.connect(self.btnSend, SIGNAL("clicked()"), self.sendCode)
self.leCode = QtGui.QLineEdit(self)
self.leCode.setFocus()
self.leCode.setMaxLength(MAX_CAPTCHA_LEN)
layout = QtGui.QGridLayout()
layout.addWidget(self.lblCaptcha)
layout.addWidget(self.btnReload)
layout.addWidget(self.leCode)
layout.addWidget(self.btnSend)
self.setLayout(layout)
# Load captcha into label
self.manager = QtNetwork.QNetworkAccessManager(self)
self.connect(self.manager, SIGNAL("finished(QNetworkReply*)"), self.managerFinished)
self.reloadCaptcha()
def managerFinished(self, reply):
if reply.error() != QtNetwork.QNetworkReply.NoError:
self.lblCaptcha.setText('Error in loading captcha image')
print(reply.errorString())
return
data = reply.readAll()
pixmap = QtGui.QPixmap()
pixmap.loadFromData(data)
self.lblCaptcha.setPixmap(pixmap)
def reloadCaptcha(self):
url = QtCore.QUrl(CAPTCHA_URL)
request = QtNetwork.QNetworkRequest(url)
self.manager.get(request)
def sendCode(self):
self.leCode.setDisabled(True)
self.captchaCode = self.leCode.text()
# We just emit a signal
self.codeRead.emit()
#self.close()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
sys.exit(CaptchaDialog().exec_())
| gpl-3.0 | 2,012,148,595,861,505,000 | 25.206897 | 92 | 0.641228 | false |
CEG-FYP-OpenStack/scheduler | nova/tests/unit/pci/test_request.py | 11 | 7404 | # Copyright 2013 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for PCI request."""
from nova import exception
from nova.pci import request
from nova import test
_fake_alias1 = """{
"name": "QuicAssist",
"capability_type": "pci",
"product_id": "4443",
"vendor_id": "8086",
"device_type": "type-PCI"
}"""
_fake_alias11 = """{
"name": "QuicAssist",
"capability_type": "pci",
"product_id": "4444",
"vendor_id": "8086",
"device_type": "type-PCI"
}"""
_fake_alias2 = """{
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "1111",
"device_type": "N"
}"""
_fake_alias3 = """{
"name": "IntelNIC",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "type-PF"
}"""
class AliasTestCase(test.NoDBTestCase):
def test_good_alias(self):
self.flags(pci_alias=[_fake_alias1])
als = request._get_alias_from_config()
self.assertIsInstance(als['QuicAssist'], list)
expect_dict = {
"capability_type": "pci",
"product_id": "4443",
"vendor_id": "8086",
"dev_type": "type-PCI"
}
self.assertEqual(expect_dict, als['QuicAssist'][0])
def test_multispec_alias(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias11])
als = request._get_alias_from_config()
self.assertIsInstance(als['QuicAssist'], list)
expect_dict1 = {
"capability_type": "pci",
"product_id": "4443",
"vendor_id": "8086",
"dev_type": "type-PCI"
}
expect_dict2 = {
"capability_type": "pci",
"product_id": "4444",
"vendor_id": "8086",
"dev_type": "type-PCI"
}
self.assertEqual(expect_dict1, als['QuicAssist'][0])
self.assertEqual(expect_dict2, als['QuicAssist'][1])
def test_wrong_type_aliase(self):
self.flags(pci_alias=[_fake_alias2])
self.assertRaises(exception.PciInvalidAlias,
request._get_alias_from_config)
def test_wrong_product_id_aliase(self):
self.flags(pci_alias=[
"""{
"name": "xxx",
"capability_type": "pci",
"product_id": "g111",
"vendor_id": "1111",
"device_type": "NIC"
}"""])
self.assertRaises(exception.PciInvalidAlias,
request._get_alias_from_config)
def test_wrong_vendor_id_aliase(self):
self.flags(pci_alias=[
"""{
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "0xg111",
"device_type": "NIC"
}"""])
self.assertRaises(exception.PciInvalidAlias,
request._get_alias_from_config)
def test_wrong_cap_type_aliase(self):
self.flags(pci_alias=[
"""{
"name": "xxx",
"capability_type": "usb",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "NIC"
}"""])
self.assertRaises(exception.PciInvalidAlias,
request._get_alias_from_config)
def test_dup_aliase(self):
self.flags(pci_alias=[
"""{
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "NIC"
}""",
"""{
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "type-PCI"
}"""])
self.assertRaises(
exception.PciInvalidAlias,
request._get_alias_from_config)
def _verify_result(self, expected, real):
exp_real = zip(expected, real)
for exp, real in exp_real:
self.assertEqual(exp['count'], real.count)
self.assertEqual(exp['alias_name'], real.alias_name)
self.assertEqual(exp['spec'], real.spec)
def test_aliase_2_request(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
expect_request = [
{'count': 3,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'dev_type': 'type-PCI',
'capability_type': 'pci'}],
'alias_name': 'QuicAssist'},
{'count': 1,
'spec': [{'vendor_id': '8086', 'product_id': '1111',
'dev_type': "type-PF",
'capability_type': 'pci'}],
'alias_name': 'IntelNIC'}, ]
requests = request._translate_alias_to_requests(
"QuicAssist : 3, IntelNIC: 1")
self.assertEqual(set([p['count'] for p in requests]), set([1, 3]))
self._verify_result(expect_request, requests)
def test_aliase_2_request_invalid(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
self.assertRaises(exception.PciRequestAliasNotDefined,
request._translate_alias_to_requests,
"QuicAssistX : 3")
def test_get_pci_requests_from_flavor(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
expect_request = [
{'count': 3,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'dev_type': "type-PCI",
'capability_type': 'pci'}],
'alias_name': 'QuicAssist'},
{'count': 1,
'spec': [{'vendor_id': '8086', 'product_id': '1111',
'dev_type': "type-PF",
'capability_type': 'pci'}],
'alias_name': 'IntelNIC'}, ]
flavor = {'extra_specs': {"pci_passthrough:alias":
"QuicAssist:3, IntelNIC: 1"}}
requests = request.get_pci_requests_from_flavor(flavor)
self.assertEqual(set([1, 3]),
set([p.count for p in requests.requests]))
self._verify_result(expect_request, requests.requests)
def test_get_pci_requests_from_flavor_no_extra_spec(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
flavor = {}
requests = request.get_pci_requests_from_flavor(flavor)
self.assertEqual([], requests.requests)
| apache-2.0 | -4,818,857,070,135,511,000 | 34.425837 | 78 | 0.493922 | false |
hainn8x/gnuradio | gr-blocks/python/blocks/qa_multiply_matrix_ff.py | 19 | 4796 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import time
import numpy
import os
import pmt
from gnuradio import gr, gr_unittest
from gnuradio import blocks
class test_multiply_matrix_ff (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
self.multiplier = None
def tearDown (self):
self.tb = None
self.multiplier = None
def run_once(self, X_in, A, tpp=gr.TPP_DONT, A2=None, tags=None, msg_A=None):
""" Run the test for given input-, output- and matrix values.
Every row from X_in is considered an input signal on a port. """
X_in = numpy.matrix(X_in)
A_matrix = numpy.matrix(A)
(N, M) = A_matrix.shape
self.assertTrue(N == X_in.shape[0])
# Calc expected
Y_out_exp = numpy.matrix(numpy.zeros((M, X_in.shape[1])))
self.multiplier = blocks.multiply_matrix_ff(A, tpp)
if A2 is not None:
self.multiplier.set_A(A2)
A = A2
A_matrix = numpy.matrix(A)
for i in xrange(N):
if tags is None:
these_tags = ()
else:
these_tags = (tags[i],)
self.tb.connect(blocks.vector_source_f(X_in[i].tolist()[0], tags=these_tags), (self.multiplier, i))
sinks = []
for i in xrange(M):
sinks.append(blocks.vector_sink_f())
self.tb.connect((self.multiplier, i), sinks[i])
# Run and check
self.tb.run()
for i in xrange(X_in.shape[1]):
Y_out_exp[:,i] = A_matrix * X_in[:,i]
Y_out = [list(x.data()) for x in sinks]
if tags is not None:
self.the_tags = []
for i in xrange(M):
self.the_tags.append(sinks[i].tags())
self.assertEqual(list(Y_out), Y_out_exp.tolist())
def test_001_t (self):
""" Simplest possible check: N==M, unit matrix """
X_in = (
(1, 2, 3, 4),
(5, 6, 7, 8),
)
A = (
(1, 0),
(0, 1),
)
self.run_once(X_in, A)
def test_002_t (self):
""" Switch check: N==M, flipped unit matrix """
X_in = (
(1, 2, 3, 4),
(5, 6, 7, 8),
)
A = (
(0, 1),
(1, 0),
)
self.run_once(X_in, A)
def test_003_t (self):
""" Average """
X_in = (
(1, 1, 1, 1),
(2, 2, 2, 2),
)
A = (
(0.5, 0.5),
(0.5, 0.5),
)
self.run_once(X_in, A)
def test_004_t (self):
""" Set """
X_in = (
(1, 2, 3, 4),
(5, 6, 7, 8),
)
A1 = (
(1, 0),
(0, 1),
)
A2 = (
(0, 1),
(1, 0),
)
self.run_once(X_in, A1, A2=A2)
def test_005_t (self):
""" Tags """
X_in = (
(1, 2, 3, 4),
(5, 6, 7, 8),
)
A = (
(0, 1), # Flip them round
(1, 0),
)
tag1 = gr.tag_t()
tag1.offset = 0
tag1.key = pmt.intern("in1")
tag1.value = pmt.PMT_T
tag2 = gr.tag_t()
tag2.offset = 0
tag2.key = pmt.intern("in2")
tag2.value = pmt.PMT_T
self.run_once(X_in, A, tpp=999, tags=(tag1, tag2))
self.assertTrue(pmt.equal(tag1.key, self.the_tags[1][0].key))
self.assertTrue(pmt.equal(tag2.key, self.the_tags[0][0].key))
#def test_006_t (self):
#""" Message passing """
#X_in = (
#(1, 2, 3, 4),
#(5, 6, 7, 8),
#)
#A1 = (
#(1, 0),
#(0, 1),
#)
#msg_A = (
#(0, 1),
#(1, 0),
#)
#self.run_once(X_in, A1, msg_A=msg_A)
if __name__ == '__main__':
#gr_unittest.run(test_multiply_matrix_ff, "test_multiply_matrix_ff.xml")
gr_unittest.run(test_multiply_matrix_ff)
| gpl-3.0 | 1,884,023,812,199,144,700 | 26.883721 | 111 | 0.48457 | false |
uwosh/Campus_Directory_web_service | getAllSubjectsCX.py | 1 | 1441 | # getAllSubjectsCX
import re
import xmlrpclib
import cx_Oracle
Kim_Nguyen_G5 = '192.168.0.1'
Kim_Nguyen_iMac = '192.168.0.1'
Kim_Nguyen_MacBook = '192.168.0.1'
Plone1 = '192.168.0.1'
Plone3 = '192.168.0.1'
def getAllSubjectsCX (self, usexml):
request = self.REQUEST
RESPONSE = request.RESPONSE
remote_addr = request.REMOTE_ADDR
if remote_addr in [Kim_Nguyen_iMac, Kim_Nguyen_MacBook, '127.0.0.1', Plone3 ]:
file = open('/opt/Plone-2.5.5/zeocluster/client1/Extensions/Oracle_Database_Connection_NGUYEN_PRD.txt', 'r')
for line in file.readlines():
if line <> "" and not line.startswith('#'):
connString = line
file.close()
connection = cx_Oracle.connect(connString)
cursor = connection.cursor()
#cursor.execute("""select distinct(subject) from ps_class_tbl order by subject""")
cursor.execute("""select distinct subject, descr from ps_subject_tbl s1 where s1.eff_status = 'A' and s1.effdt = (select max(s2.effdt) from ps_subject_tbl s2 where s1.subject = s2.subject and s2.eff_status = 'A' and s1.institution = s2.institution and s1.acad_org = s2.acad_org)""")
retlist = []
for column_1, column_2 in cursor:
retlist.append([column_1, column_2,])
if usexml == "0":
return retlist
else:
myMarshaller = xmlrpclib.Marshaller()
return myMarshaller.dumps(retlist)
| gpl-2.0 | -8,733,606,344,912,039,000 | 41.382353 | 290 | 0.639833 | false |
crs4/ProMort | promort/utils/__init__.py | 42 | 1098 | # Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| mit | 9,081,209,460,135,976,000 | 56.789474 | 83 | 0.76867 | false |
SecWiki/windows-kernel-exploits | MS14-068/pykek/pyasn1/type/namedtype.py | 200 | 4794 | # NamedType specification for constructed types
import sys
from pyasn1.type import tagmap
from pyasn1 import error
class NamedType:
isOptional = 0
isDefaulted = 0
def __init__(self, name, t):
self.__name = name; self.__type = t
def __repr__(self): return '%s(%s, %s)' % (
self.__class__.__name__, self.__name, self.__type
)
def getType(self): return self.__type
def getName(self): return self.__name
def __getitem__(self, idx):
if idx == 0: return self.__name
if idx == 1: return self.__type
raise IndexError()
class OptionalNamedType(NamedType):
isOptional = 1
class DefaultedNamedType(NamedType):
isDefaulted = 1
class NamedTypes:
def __init__(self, *namedTypes):
self.__namedTypes = namedTypes
self.__namedTypesLen = len(self.__namedTypes)
self.__minTagSet = None
self.__tagToPosIdx = {}; self.__nameToPosIdx = {}
self.__tagMap = { False: None, True: None }
self.__ambigiousTypes = {}
def __repr__(self):
r = '%s(' % self.__class__.__name__
for n in self.__namedTypes:
r = r + '%r, ' % (n,)
return r + ')'
def __getitem__(self, idx): return self.__namedTypes[idx]
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self.__namedTypesLen)
else:
def __bool__(self): return bool(self.__namedTypesLen)
def __len__(self): return self.__namedTypesLen
def getTypeByPosition(self, idx):
if idx < 0 or idx >= self.__namedTypesLen:
raise error.PyAsn1Error('Type position out of range')
else:
return self.__namedTypes[idx].getType()
def getPositionByType(self, tagSet):
if not self.__tagToPosIdx:
idx = self.__namedTypesLen
while idx > 0:
idx = idx - 1
tagMap = self.__namedTypes[idx].getType().getTagMap()
for t in tagMap.getPosMap():
if t in self.__tagToPosIdx:
raise error.PyAsn1Error('Duplicate type %s' % (t,))
self.__tagToPosIdx[t] = idx
try:
return self.__tagToPosIdx[tagSet]
except KeyError:
raise error.PyAsn1Error('Type %s not found' % (tagSet,))
def getNameByPosition(self, idx):
try:
return self.__namedTypes[idx].getName()
except IndexError:
raise error.PyAsn1Error('Type position out of range')
def getPositionByName(self, name):
if not self.__nameToPosIdx:
idx = self.__namedTypesLen
while idx > 0:
idx = idx - 1
n = self.__namedTypes[idx].getName()
if n in self.__nameToPosIdx:
raise error.PyAsn1Error('Duplicate name %s' % (n,))
self.__nameToPosIdx[n] = idx
try:
return self.__nameToPosIdx[name]
except KeyError:
raise error.PyAsn1Error('Name %s not found' % (name,))
def __buildAmbigiousTagMap(self):
ambigiousTypes = ()
idx = self.__namedTypesLen
while idx > 0:
idx = idx - 1
t = self.__namedTypes[idx]
if t.isOptional or t.isDefaulted:
ambigiousTypes = (t, ) + ambigiousTypes
else:
ambigiousTypes = (t, )
self.__ambigiousTypes[idx] = NamedTypes(*ambigiousTypes)
def getTagMapNearPosition(self, idx):
if not self.__ambigiousTypes: self.__buildAmbigiousTagMap()
try:
return self.__ambigiousTypes[idx].getTagMap()
except KeyError:
raise error.PyAsn1Error('Type position out of range')
def getPositionNearType(self, tagSet, idx):
if not self.__ambigiousTypes: self.__buildAmbigiousTagMap()
try:
return idx+self.__ambigiousTypes[idx].getPositionByType(tagSet)
except KeyError:
raise error.PyAsn1Error('Type position out of range')
def genMinTagSet(self):
if self.__minTagSet is None:
for t in self.__namedTypes:
__type = t.getType()
tagSet = getattr(__type,'getMinTagSet',__type.getTagSet)()
if self.__minTagSet is None or tagSet < self.__minTagSet:
self.__minTagSet = tagSet
return self.__minTagSet
def getTagMap(self, uniq=False):
if self.__tagMap[uniq] is None:
tagMap = tagmap.TagMap()
for nt in self.__namedTypes:
tagMap = tagMap.clone(
nt.getType(), nt.getType().getTagMap(), uniq
)
self.__tagMap[uniq] = tagMap
return self.__tagMap[uniq]
| mit | -2,993,511,859,429,008,400 | 35.318182 | 75 | 0.548185 | false |
cooniur/ansible-modules-core | cloud/amazon/ec2_vol.py | 11 | 19254 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vol
short_description: create and attach a volume, return volume id and device map
description:
- creates an EBS volume and optionally attaches it to an instance. If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made. This module has a dependency on python-boto.
version_added: "1.1"
options:
instance:
description:
- instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach.
required: false
default: null
name:
description:
- volume Name tag if you wish to attach an existing volume (requires instance)
required: false
default: null
version_added: "1.6"
id:
description:
- volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
required: false
default: null
version_added: "1.6"
volume_size:
description:
- size of volume (in GB) to create.
required: false
default: null
volume_type:
description:
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default
and continues to remain the Ansible default for backwards compatibility.
required: false
default: standard
version_added: "1.9"
iops:
description:
- the provisioned IOPs you want to associate with this volume (integer).
required: false
default: 100
version_added: "1.3"
encrypted:
description:
- Enable encryption at rest for this volume.
default: false
version_added: "1.8"
device_name:
description:
- device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows.
required: false
default: null
delete_on_termination:
description:
- When set to "yes", the volume will be deleted upon instance termination.
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.1"
zone:
description:
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
snapshot:
description:
- snapshot ID on which to base the volume
required: false
default: null
version_added: "1.5"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
version_added: "1.5"
state:
description:
- whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
required: false
default: present
choices: ['absent', 'present', 'list']
version_added: "1.6"
author: "Lester Wade (@lwade)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple attachment action
- ec2_vol:
instance: XXXXXX
volume_size: 5
device_name: sdd
# Example using custom iops params
- ec2_vol:
instance: XXXXXX
volume_size: 5
iops: 100
device_name: sdd
# Example using snapshot id
- ec2_vol:
instance: XXXXXX
snapshot: "{{ snapshot }}"
# Playbook example combined with instance launch
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
wait: yes
count: 3
register: ec2
- ec2_vol:
instance: "{{ item.id }} "
volume_size: 5
with_items: "{{ ec2.instances }}"
register: ec2_vol
# Example: Launch an instance and then add a volume if not already attached
# * Volume will be created with the given name if not already created.
# * Nothing will happen if the volume is already attached.
# * Requires Ansible 2.0
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
zone: YYYYYY
id: my_instance
wait: yes
count: 1
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvdf
with_items: "{{ ec2.instances }}"
register: ec2_vol
# Remove a volume
- ec2_vol:
id: vol-XXXXXXXX
state: absent
# Detach a volume (since 1.9)
- ec2_vol:
id: vol-XXXXXXXX
instance: None
# List volumes for an instance
- ec2_vol:
instance: i-XXXXXX
state: list
# Create new volume using SSD storage
- ec2_vol:
instance: XXXXXX
volume_size: 50
volume_type: gp2
device_name: /dev/xvdf
# Attach an existing volume to instance. The volume will be deleted upon instance termination.
- ec2_vol:
instance: XXXXXX
id: XXXXXX
device_name: /dev/sdf
delete_on_termination: yes
'''
RETURN = '''
device:
description: device name of attached volume
returned: when success
type: string
sample: "/def/sdf"
volume_id:
description: the id of volume
returned: when success
type: string
sample: "vol-35b333d9"
volume_type:
description: the volume type
returned: when success
type: string
sample: "standard"
volume:
description: a dictionary containing detailed attributes of the volume
returned: when success
type: string
sample: {
"attachment_set": {
"attach_time": "2015-10-23T00:22:29.000Z",
"deleteOnTermination": "false",
"device": "/dev/sdf",
"instance_id": "i-8356263c",
"status": "attached"
},
"create_time": "2015-10-21T14:36:08.870Z",
"encrypted": false,
"id": "vol-35b333d9",
"iops": null,
"size": 1,
"snapshot_id": "",
"status": "in-use",
"tags": {
"env": "dev"
},
"type": "standard",
"zone": "us-east-1b"
}
'''
import time
from distutils.version import LooseVersion
try:
import boto.ec2
from boto.exception import BotoServerError
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_volume(module, ec2):
name = module.params.get('name')
id = module.params.get('id')
zone = module.params.get('zone')
filters = {}
volume_ids = None
# If no name or id supplied, just try volume creation based on module parameters
if id is None and name is None:
return None
if zone:
filters['availability_zone'] = zone
if name:
filters = {'tag:Name': name}
if id:
volume_ids = [id]
try:
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if not vols:
if id:
msg = "Could not find the volume with id: %s" % id
if name:
msg += (" and name: %s" % name)
module.fail_json(msg=msg)
else:
return None
if len(vols) > 1:
module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
return vols[0]
def get_volumes(module, ec2):
instance = module.params.get('instance')
try:
if not instance:
vols = ec2.get_all_volumes()
else:
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return vols
def delete_volume(module, ec2):
volume_id = module.params['id']
try:
ec2.delete_volume(volume_id)
module.exit_json(changed=True)
except boto.exception.EC2ResponseError as ec2_error:
if ec2_error.code == 'InvalidVolume.NotFound':
module.exit_json(changed=False)
module.fail_json(msg=ec2_error.message)
def boto_supports_volume_encryption():
"""
Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def create_volume(module, ec2, zone):
changed = False
name = module.params.get('name')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
snapshot = module.params.get('snapshot')
# If custom iops is defined we use volume_type "io1" rather than the default of "standard"
if iops:
volume_type = 'io1'
volume = get_volume(module, ec2)
if volume is None:
try:
if boto_supports_volume_encryption():
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
changed = True
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
changed = True
while volume.status != 'available':
time.sleep(3)
volume.update()
if name:
ec2.create_tags([volume.id], {"Name": name})
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return volume, changed
def attach_volume(module, ec2, volume, instance):
device_name = module.params.get('device_name')
delete_on_termination = module.params.get('delete_on_termination')
changed = False
# If device_name isn't set, make a choice based on best practices here:
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
# In future this needs to be more dynamic but combining block device mapping best practices
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
# Use password data attribute to tell whether the instance is Windows or Linux
if device_name is None:
try:
if not ec2.get_password_data(instance.id):
device_name = '/dev/sdf'
else:
device_name = '/dev/xvdf'
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if volume.attachment_state() is not None:
adata = volume.attach_data
if adata.instance_id != instance.id:
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
% (volume.id, adata.instance_id))
else:
# Volume is already attached to right instance
changed = modify_dot_attribute(module, ec2, instance, device_name)
else:
try:
volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
modify_dot_attribute(module, ec2, instance, device_name)
return volume, changed
def modify_dot_attribute(module, ec2, instance, device_name):
""" Modify delete_on_termination attribute """
delete_on_termination = module.params.get('delete_on_termination')
changed = False
try:
instance.update()
dot = instance.block_device_mapping[device_name].delete_on_termination
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if delete_on_termination != dot:
try:
bdt = BlockDeviceType(delete_on_termination=delete_on_termination)
bdm = BlockDeviceMapping()
bdm[device_name] = bdt
ec2.modify_instance_attribute(instance_id=instance.id, attribute='blockDeviceMapping', value=bdm)
while instance.block_device_mapping[device_name].delete_on_termination != delete_on_termination:
time.sleep(3)
instance.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return changed
def detach_volume(module, ec2, volume):
changed = False
if volume.attachment_state() is not None:
adata = volume.attach_data
volume.detach()
while volume.attachment_state() is not None:
time.sleep(3)
volume.update()
changed = True
return volume, changed
def get_volume_info(volume, state):
# If we're just listing volumes then do nothing, else get the latest update for the volume
if state != 'list':
volume.update()
volume_info = {}
attachment = volume.attach_data
volume_info = {
'create_time': volume.create_time,
'encrypted': volume.encrypted,
'id': volume.id,
'iops': volume.iops,
'size': volume.size,
'snapshot_id': volume.snapshot_id,
'status': volume.status,
'type': volume.type,
'zone': volume.zone,
'attachment_set': {
'attach_time': attachment.attach_time,
'device': attachment.device,
'instance_id': attachment.instance_id,
'status': attachment.status
},
'tags': volume.tags
}
if hasattr(attachment, 'deleteOnTermination'):
volume_info['attachment_set']['deleteOnTermination'] = attachment.deleteOnTermination
return volume_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance = dict(),
id = dict(),
name = dict(),
volume_size = dict(),
volume_type = dict(choices=['standard', 'gp2', 'io1'], default='standard'),
iops = dict(),
encrypted = dict(type='bool', default=False),
device_name = dict(),
delete_on_termination = dict(type='bool', default=False),
zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
snapshot = dict(),
state = dict(choices=['absent', 'present', 'list'], default='present')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
id = module.params.get('id')
name = module.params.get('name')
instance = module.params.get('instance')
volume_size = module.params.get('volume_size')
encrypted = module.params.get('encrypted')
device_name = module.params.get('device_name')
zone = module.params.get('zone')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
# Ensure we have the zone or can get the zone
if instance is None and zone is None and state == 'present':
module.fail_json(msg="You must specify either instance or zone")
# Set volume detach flag
if instance == 'None' or instance == '':
instance = None
detach_vol_flag = True
else:
detach_vol_flag = False
# Set changed flag
changed = False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'list':
returned_volumes = []
vols = get_volumes(module, ec2)
for v in vols:
attachment = v.attach_data
returned_volumes.append(get_volume_info(v, state))
module.exit_json(changed=False, volumes=returned_volumes)
if encrypted and not boto_supports_volume_encryption():
module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes")
# Here we need to get the zone info for the instance. This covers situation where
# instance is specified but zone isn't.
# Useful for playbooks chaining instance launch with volume create + attach and where the
# zone doesn't matter to the user.
inst = None
if instance:
try:
reservation = ec2.get_all_instances(instance_ids=instance)
except BotoServerError as e:
module.fail_json(msg=e.message)
inst = reservation[0].instances[0]
zone = inst.placement
# Check if there is a volume already mounted there.
if device_name:
if device_name in inst.block_device_mapping:
module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
volume_id=inst.block_device_mapping[device_name].volume_id,
device=device_name,
changed=False)
# Delaying the checks until after the instance check allows us to get volume ids for existing volumes
# without needing to pass an unused volume_size
if not volume_size and not (id or name or snapshot):
module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot")
if volume_size and (id or snapshot):
module.fail_json(msg="Cannot specify volume_size together with id or snapshot")
if state == 'present':
volume, changed = create_volume(module, ec2, zone)
if detach_vol_flag:
volume, changed = detach_volume(module, ec2, volume)
elif inst is not None:
volume, changed = attach_volume(module, ec2, volume, inst)
# Add device, volume_id and volume_type parameters separately to maintain backward compatability
volume_info = get_volume_info(volume, state)
module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'], volume_id=volume_info['id'], volume_type=volume_info['type'])
elif state == 'absent':
delete_volume(module, ec2)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 | 8,486,378,547,538,384,000 | 31.468803 | 270 | 0.621793 | false |
hplustree/trove | trove/db/sqlalchemy/migrate_repo/versions/025_add_service_statuses_indexes.py | 4 | 1221 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from sqlalchemy.exc import OperationalError
from sqlalchemy.schema import Index
from sqlalchemy.schema import MetaData
from trove.db.sqlalchemy.migrate_repo.schema import Table
logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema')
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
service_statuses = Table('service_statuses', meta, autoload=True)
idx = Index("service_statuses_instance_id", service_statuses.c.instance_id)
try:
idx.create()
except OperationalError as e:
logger.info(e)
| apache-2.0 | 5,618,446,004,645,272,000 | 33.885714 | 79 | 0.737101 | false |
dockerera/func | func/minion/modules/netapp/options.py | 5 | 1483 | ##
## NetApp Filer 'options' Module
##
## Copyright 2008, Red Hat, Inc
## John Eckersberg <[email protected]>
##
## This software may be freely redistributed under the terms of the GNU
## general public license.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
from func.minion.modules import func_module
from func.minion.modules.netapp.common import *
class Options(func_module.FuncModule):
# Update these if need be.
version = "0.0.1"
api_version = "0.0.1"
description = "Interface to the 'options' command"
def get(self, filer, filter=''):
"""
TODO: Document me ...
"""
cmd_opts = ['options', filter]
output = ssh(filer, cmd_opts)
if 'No such option' in output:
return output.strip()
result = {}
for line in output.split('\n'):
if not line: continue
tokens = line.split()
try:
result[tokens[0]] = tokens[1]
except:
result[tokens[0]] = ''
return result
def set(self, filer, option, value):
"""
TODO: Document me ...
"""
cmd_opts = ['options', option, value]
output = ssh(filer, cmd_opts)
# should return no output (maybe a space or newline)
return check_output("^\s*$", output)
| gpl-2.0 | 3,093,679,156,829,956,600 | 27.519231 | 71 | 0.582603 | false |
ivanalejandro0/bitmask_client | src/leap/bitmask/logs/streamtologger.py | 8 | 2008 | # -*- coding: utf-8 -*-
# streamtologger.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Stream object that redirects writes to a logger instance.
"""
import logging
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
Credits to:
http://www.electricmonk.nl/log/2011/08/14/\
redirect-stdout-and-stderr-to-a-logger-in-python/
"""
def __init__(self, logger, log_level=logging.INFO):
"""
Constructor, defines the logger and level to use to log messages.
:param logger: logger object to log messages.
:type logger: logging.Handler
:param log_level: the level to use to log messages through the logger.
:type log_level: int
look at logging-levels in 'logging' docs.
"""
self._logger = logger
self._log_level = log_level
def write(self, data):
"""
Simulates the 'write' method in a file object.
It writes the data receibed in buf to the logger 'self._logger'.
:param data: data to write to the 'file'
:type data: str
"""
for line in data.rstrip().splitlines():
self._logger.log(self._log_level, line.rstrip())
def flush(self):
"""
Dummy method. Needed to replace the twisted.log output.
"""
pass
| gpl-3.0 | -9,182,516,545,356,664,000 | 33.033898 | 78 | 0.65239 | false |
domenicosolazzo/jroc | tests/jroc/nlp/pos/nltk/test_taggerstorage.py | 1 | 1821 | # -*- coding: utf-8 -*-
from . import TaggerStorageAdapter
import unittest
import os
import codecs
class TaggerStorageAdapterTestCase(unittest.TestCase):
taggerStorage = TaggerStorageAdapter()
currentDirectory = "%s" % (os.path.dirname(os.path.realpath(__file__)), )
testTextsDirectory = "%s/../../../../data/text/" % (currentDirectory, )
def setUp(self):
text = "Det er norsk"
self.taggerStorage = TaggerStorageAdapter()
def tearDown(self):
#self.obtManager.cleanUp()
self.taggerStorage = None
def test_pos_initizialize(self):
"""
Check if the initialization works
"""
result = self.taggerStorage.getTagger()
self.assertTrue(result is not None)
def test_pos_initizialize_with_aubt(self):
"""
Check if the initialization works with AUBT
"""
self.taggerStorage = TaggerStorageAdapter(model='aubt')
result = self.taggerStorage.getTagger()
self.assertTrue(result is not None)
def test_pos_classifier_text_english(self):
"""
Test the classifier tagger with an english text
"""
text = self.helper_readFilename("en/article1.txt")
text = text.decode('utf-8').split()
result = self.taggerStorage.getTagger().tag(text)
expected = (u'Congress\u2019s', u'NNP')
self.assertTrue(isinstance(result, list))
self.assertTrue(len(result) > 0)
self.assertEqual(expected, result[0])
def helper_readFilename(self, filename=''):
stopwords = []
if not filename:
raise Exception("The file is empty")
fileToRead = "%s%s" % (self.testTextsDirectory, filename)
with open(fileToRead) as f:
text = f.read()
#f.close()
return text
| gpl-3.0 | -1,996,718,754,081,553,700 | 29.864407 | 78 | 0.616694 | false |
karsev/mbed-os | TESTS/netsocket/host_tests/udp_shotgun.py | 39 | 4553 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import socket
import json
import random
import itertools
import time
from sys import stdout
from threading import Thread
from SocketServer import BaseRequestHandler, UDPServer
from mbed_host_tests import BaseHostTest, event_callback
class UDPEchoClientHandler(BaseRequestHandler):
def handle(self):
""" UDP packet handler. Responds with multiple simultaneous packets
"""
data, sock = self.request
pattern = [ord(d) << 4 for d in data]
# Each byte in request indicates size of packet to recieve
# Each packet size is shifted over by 4 to fit in a byte, which
# avoids any issues with endianess or decoding
for packet in pattern:
data = [random.randint(0, 255) for _ in range(packet-1)]
data.append(reduce(lambda a,b: a^b, data))
data = ''.join(map(chr, data))
sock.sendto(data, self.client_address)
# Sleep a tiny bit to compensate for local network
time.sleep(0.01)
class UDPEchoClientTest(BaseHostTest):
def __init__(self):
"""
Initialise test parameters.
:return:
"""
BaseHostTest.__init__(self)
self.SERVER_IP = None # Will be determined after knowing the target IP
self.SERVER_PORT = 0 # Let TCPServer choose an arbitrary port
self.server = None
self.server_thread = None
self.target_ip = None
@staticmethod
def find_interface_to_target_addr(target_ip):
"""
Finds IP address of the interface through which it is connected to the target.
:return:
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect((target_ip, 0)) # Target IP, any port
except socket.error:
s.connect((target_ip, 8000)) # Target IP, 'random' port
ip = s.getsockname()[0]
s.close()
return ip
def setup_udp_server(self):
"""
sets up a UDP server for target to connect and send test data.
:return:
"""
# !NOTE: There should mechanism to assert in the host test
if self.SERVER_IP is None:
self.log("setup_udp_server() called before determining server IP!")
self.notify_complete(False)
# Returning none will suppress host test from printing success code
self.server = UDPServer((self.SERVER_IP, self.SERVER_PORT), UDPEchoClientHandler)
ip, port = self.server.server_address
self.SERVER_PORT = port
self.server.allow_reuse_address = True
self.log("HOST: Listening for UDP packets: " + self.SERVER_IP + ":" + str(self.SERVER_PORT))
self.server_thread = Thread(target=UDPEchoClientTest.server_thread_func, args=(self,))
self.server_thread.start()
@staticmethod
def server_thread_func(this):
"""
Thread function to run TCP server forever.
:param this:
:return:
"""
this.server.serve_forever()
@event_callback("target_ip")
def _callback_target_ip(self, key, value, timestamp):
"""
Callback to handle reception of target's IP address.
:param key:
:param value:
:param timestamp:
:return:
"""
self.target_ip = value
self.SERVER_IP = self.find_interface_to_target_addr(self.target_ip)
self.setup_udp_server()
@event_callback("host_ip")
def _callback_host_ip(self, key, value, timestamp):
"""
Callback for request for host IP Addr
"""
self.send_kv("host_ip", self.SERVER_IP)
@event_callback("host_port")
def _callback_host_port(self, key, value, timestamp):
"""
Callback for request for host port
"""
self.send_kv("host_port", self.SERVER_PORT)
def teardown(self):
if self.server:
self.server.shutdown()
self.server_thread.join()
| apache-2.0 | -2,323,919,752,832,379,400 | 31.06338 | 100 | 0.628816 | false |
pnavarro/neutron | neutron/tests/base.py | 1 | 13886 | # Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base test cases for all neutron tests.
"""
import contextlib
import gc
import logging as std_logging
import os
import os.path
import random
import traceback
import weakref
import eventlet.timeout
import fixtures
import mock
from oslo_concurrency.fixture import lockutils
from oslo_config import cfg
from oslo_messaging import conffixture as messaging_conffixture
from oslo_utils import strutils
import testtools
from neutron.agent.linux import external_process
from neutron.common import config
from neutron.common import rpc as n_rpc
from neutron.db import agentschedulers_db
from neutron import manager
from neutron import policy
from neutron.tests import fake_notifier
from neutron.tests import post_mortem_debug
CONF = cfg.CONF
CONF.import_opt('state_path', 'neutron.common.config')
LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
ROOTDIR = os.path.dirname(__file__)
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def fake_use_fatal_exceptions(*args):
return True
def fake_consume_in_threads(self):
return []
def get_rand_name(max_length=None, prefix='test'):
"""Return a random string.
The string will start with 'prefix' and will be exactly 'max_length'.
If 'max_length' is None, then exactly 8 random characters, each
hexadecimal, will be added. In case len(prefix) <= len(max_length),
ValueError will be raised to indicate the problem.
"""
if max_length:
length = max_length - len(prefix)
if length <= 0:
raise ValueError("'max_length' must be bigger than 'len(prefix)'.")
suffix = ''.join(str(random.randint(0, 9)) for i in range(length))
else:
suffix = hex(random.randint(0x10000000, 0x7fffffff))[2:]
return prefix + suffix
def bool_from_env(key, strict=False, default=False):
value = os.environ.get(key)
return strutils.bool_from_string(value, strict=strict, default=default)
def get_test_timeout(default=0):
return int(os.environ.get('OS_TEST_TIMEOUT', 0))
class AttributeDict(dict):
"""
Provide attribute access (dict.key) to dictionary values.
"""
def __getattr__(self, name):
"""Allow attribute access for all keys in the dict."""
if name in self:
return self[name]
raise AttributeError(_("Unknown attribute '%s'.") % name)
class DietTestCase(testtools.TestCase):
"""Same great taste, less filling.
BaseTestCase is responsible for doing lots of plugin-centric setup
that not all tests require (or can tolerate). This class provides
only functionality that is common across all tests.
"""
def setUp(self):
super(DietTestCase, self).setUp()
# Configure this first to ensure pm debugging support for setUp()
debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER')
if debugger:
self.addOnException(post_mortem_debug.get_exception_handler(
debugger))
if bool_from_env('OS_DEBUG'):
_level = std_logging.DEBUG
else:
_level = std_logging.INFO
capture_logs = bool_from_env('OS_LOG_CAPTURE')
if not capture_logs:
std_logging.basicConfig(format=LOG_FORMAT, level=_level)
self.log_fixture = self.useFixture(
fixtures.FakeLogger(
format=LOG_FORMAT,
level=_level,
nuke_handlers=capture_logs,
))
test_timeout = get_test_timeout()
if test_timeout == -1:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
# If someone does use tempfile directly, ensure that it's cleaned up
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.addCleanup(mock.patch.stopall)
if bool_from_env('OS_STDOUT_CAPTURE'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if bool_from_env('OS_STDERR_CAPTURE'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.addOnException(self.check_for_systemexit)
def check_for_systemexit(self, exc_info):
if isinstance(exc_info[1], SystemExit):
self.fail("A SystemExit was raised during the test. %s"
% traceback.format_exception(*exc_info))
@contextlib.contextmanager
def assert_max_execution_time(self, max_execution_time=5):
with eventlet.timeout.Timeout(max_execution_time, False):
yield
return
self.fail('Execution of this test timed out')
def assertOrderedEqual(self, expected, actual):
expect_val = self.sort_dict_lists(expected)
actual_val = self.sort_dict_lists(actual)
self.assertEqual(expect_val, actual_val)
def sort_dict_lists(self, dic):
for key, value in dic.iteritems():
if isinstance(value, list):
dic[key] = sorted(value)
elif isinstance(value, dict):
dic[key] = self.sort_dict_lists(value)
return dic
def assertDictSupersetOf(self, expected_subset, actual_superset):
"""Checks that actual dict contains the expected dict.
After checking that the arguments are of the right type, this checks
that each item in expected_subset is in, and matches, what is in
actual_superset. Separate tests are done, so that detailed info can
be reported upon failure.
"""
if not isinstance(expected_subset, dict):
self.fail("expected_subset (%s) is not an instance of dict" %
type(expected_subset))
if not isinstance(actual_superset, dict):
self.fail("actual_superset (%s) is not an instance of dict" %
type(actual_superset))
for k, v in expected_subset.items():
self.assertIn(k, actual_superset)
self.assertEqual(v, actual_superset[k],
"Key %(key)s expected: %(exp)r, actual %(act)r" %
{'key': k, 'exp': v, 'act': actual_superset[k]})
class ProcessMonitorFixture(fixtures.Fixture):
"""Test fixture to capture and cleanup any spawn process monitor."""
def setUp(self):
super(ProcessMonitorFixture, self).setUp()
self.old_callable = (
external_process.ProcessMonitor._spawn_checking_thread)
p = mock.patch("neutron.agent.linux.external_process.ProcessMonitor."
"_spawn_checking_thread",
new=lambda x: self.record_calls(x))
p.start()
self.instances = []
self.addCleanup(self.stop)
def stop(self):
for instance in self.instances:
instance.stop()
def record_calls(self, instance):
self.old_callable(instance)
self.instances.append(instance)
class BaseTestCase(DietTestCase):
@staticmethod
def config_parse(conf=None, args=None):
"""Create the default configurations."""
# neutron.conf.test includes rpc_backend which needs to be cleaned up
if args is None:
args = []
args += ['--config-file', etcdir('neutron.conf.test')]
if conf is None:
config.init(args=args)
else:
conf(args)
def setUp(self):
super(BaseTestCase, self).setUp()
# suppress all but errors here
capture_logs = bool_from_env('OS_LOG_CAPTURE')
self.useFixture(
fixtures.FakeLogger(
name='neutron.api.extensions',
format=LOG_FORMAT,
level=std_logging.ERROR,
nuke_handlers=capture_logs,
))
self.useFixture(lockutils.ExternalLockFixture())
cfg.CONF.set_override('state_path', self.get_default_temp_dir().path)
self.addCleanup(CONF.reset)
self.useFixture(ProcessMonitorFixture())
self.useFixture(fixtures.MonkeyPatch(
'neutron.common.exceptions.NeutronException.use_fatal_exceptions',
fake_use_fatal_exceptions))
self.setup_rpc_mocks()
self.setup_config()
policy.init()
self.addCleanup(policy.reset)
def get_new_temp_dir(self):
"""Create a new temporary directory.
:returns fixtures.TempDir
"""
return self.useFixture(fixtures.TempDir())
def get_default_temp_dir(self):
"""Create a default temporary directory.
Returns the same directory during the whole test case.
:returns fixtures.TempDir
"""
if not hasattr(self, '_temp_dir'):
self._temp_dir = self.get_new_temp_dir()
return self._temp_dir
def get_temp_file_path(self, filename, root=None):
"""Returns an absolute path for a temporary file.
If root is None, the file is created in default temporary directory. It
also creates the directory if it's not initialized yet.
If root is not None, the file is created inside the directory passed as
root= argument.
:param filename: filename
:type filename: string
:param root: temporary directory to create a new file in
:type root: fixtures.TempDir
:returns absolute file path string
"""
root = root or self.get_default_temp_dir()
return root.join(filename)
def setup_rpc_mocks(self):
# don't actually start RPC listeners when testing
self.useFixture(fixtures.MonkeyPatch(
'neutron.common.rpc.Connection.consume_in_threads',
fake_consume_in_threads))
self.useFixture(fixtures.MonkeyPatch(
'oslo_messaging.Notifier', fake_notifier.FakeNotifier))
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
# NOTE(russellb) We want all calls to return immediately.
self.messaging_conf.response_timeout = 0
self.useFixture(self.messaging_conf)
self.addCleanup(n_rpc.clear_extra_exmods)
n_rpc.add_extra_exmods('neutron.test')
self.addCleanup(n_rpc.cleanup)
n_rpc.init(CONF)
def setup_config(self, args=None):
"""Tests that need a non-default config can override this method."""
self.config_parse(args=args)
def config(self, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the fixtures cleanup process.
"""
group = kw.pop('group', None)
for k, v in kw.iteritems():
CONF.set_override(k, v, group)
def setup_coreplugin(self, core_plugin=None):
self.useFixture(PluginFixture(core_plugin))
def setup_notification_driver(self, notification_driver=None):
self.addCleanup(fake_notifier.reset)
if notification_driver is None:
notification_driver = [fake_notifier.__name__]
cfg.CONF.set_override("notification_driver", notification_driver)
class PluginFixture(fixtures.Fixture):
def __init__(self, core_plugin=None):
self.core_plugin = core_plugin
def setUp(self):
super(PluginFixture, self).setUp()
self.dhcp_periodic_p = mock.patch(
'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.'
'start_periodic_dhcp_agent_status_check')
self.patched_dhcp_periodic = self.dhcp_periodic_p.start()
# Plugin cleanup should be triggered last so that
# test-specific cleanup has a chance to release references.
self.addCleanup(self.cleanup_core_plugin)
if self.core_plugin is not None:
cfg.CONF.set_override('core_plugin', self.core_plugin)
def cleanup_core_plugin(self):
"""Ensure that the core plugin is deallocated."""
nm = manager.NeutronManager
if not nm.has_instance():
return
# TODO(marun) Fix plugins that do not properly initialize notifiers
agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {}
# Perform a check for deallocation only if explicitly
# configured to do so since calling gc.collect() after every
# test increases test suite execution time by ~50%.
check_plugin_deallocation = (
bool_from_env('OS_CHECK_PLUGIN_DEALLOCATION'))
if check_plugin_deallocation:
plugin = weakref.ref(nm._instance.plugin)
nm.clear_instance()
if check_plugin_deallocation:
gc.collect()
# TODO(marun) Ensure that mocks are deallocated?
if plugin() and not isinstance(plugin(), mock.Base):
raise AssertionError(
'The plugin for this test was not deallocated.')
| apache-2.0 | -2,358,476,202,939,519,500 | 33.542289 | 79 | 0.642086 | false |
ryancanhelpyou/servo | components/script/dom/bindings/codegen/parser/tests/test_enum.py | 134 | 2866 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
enum TestEnum {
"",
"foo",
"bar"
};
interface TestEnumInterface {
TestEnum doFoo(boolean arg);
readonly attribute TestEnum foo;
};
""")
results = parser.finish()
harness.ok(True, "TestEnumInterfaces interface parsed without error.")
harness.check(len(results), 2, "Should be one production")
harness.ok(isinstance(results[0], WebIDL.IDLEnum),
"Should be an IDLEnum")
harness.ok(isinstance(results[1], WebIDL.IDLInterface),
"Should be an IDLInterface")
enum = results[0]
harness.check(enum.identifier.QName(), "::TestEnum", "Enum has the right QName")
harness.check(enum.identifier.name, "TestEnum", "Enum has the right name")
harness.check(enum.values(), ["", "foo", "bar"], "Enum has the right values")
iface = results[1]
harness.check(iface.identifier.QName(), "::TestEnumInterface", "Interface has the right QName")
harness.check(iface.identifier.name, "TestEnumInterface", "Interface has the right name")
harness.check(iface.parent, None, "Interface has no parent")
members = iface.members
harness.check(len(members), 2, "Should be one production")
harness.ok(isinstance(members[0], WebIDL.IDLMethod),
"Should be an IDLMethod")
method = members[0]
harness.check(method.identifier.QName(), "::TestEnumInterface::doFoo",
"Method has correct QName")
harness.check(method.identifier.name, "doFoo", "Method has correct name")
signatures = method.signatures()
harness.check(len(signatures), 1, "Expect one signature")
(returnType, arguments) = signatures[0]
harness.check(str(returnType), "TestEnum (Wrapper)", "Method type is the correct name")
harness.check(len(arguments), 1, "Method has the right number of arguments")
arg = arguments[0]
harness.ok(isinstance(arg, WebIDL.IDLArgument), "Should be an IDLArgument")
harness.check(str(arg.type), "Boolean", "Argument has the right type")
attr = members[1]
harness.check(attr.identifier.QName(), "::TestEnumInterface::foo",
"Attr has correct QName")
harness.check(attr.identifier.name, "foo", "Attr has correct name")
harness.check(str(attr.type), "TestEnum (Wrapper)", "Attr type is the correct name")
# Now reset our parser
parser = parser.reset()
threw = False
try:
parser.parse("""
enum Enum {
"a",
"b",
"c"
};
interface TestInterface {
void foo(optional Enum e = "d");
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow a bogus default value for an enum")
| mpl-2.0 | 5,266,820,082,998,595,000 | 34.382716 | 99 | 0.624215 | false |
noslenfa/tdjangorest | uw/lib/python2.7/site-packages/IPython/utils/data.py | 3 | 1165 | # encoding: utf-8
"""Utilities for working with data structures like lists, dicts and tuples.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
def uniq_stable(elems):
"""uniq_stable(elems) -> list
Return from an iterable, a list of all the unique elements in the input,
but maintaining the order in which they first appear.
Note: All elements in the input must be hashable for this routine
to work, as it internally uses a set for efficiency reasons.
"""
seen = set()
return [x for x in elems if x not in seen and not seen.add(x)]
def flatten(seq):
"""Flatten a list of lists (NOT recursive, only works for 2d lists)."""
return [x for subseq in seq for x in subseq]
def chop(seq, size):
"""Chop a sequence into chunks of the given size."""
return [seq[i:i+size] for i in xrange(0,len(seq),size)]
| apache-2.0 | -2,935,835,936,047,431,700 | 32.285714 | 78 | 0.593991 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.