repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
pozytywnie/webapp-health-monitor | webapp_health_monitor/management/commands/verify.py | 1 | 1307 | import importlib
import sys
from django.apps import apps
from django.core.management.base import BaseCommand
from webapp_health_monitor.verification_suit import VerificationSuit
class Command(BaseCommand):
SUBMODULE_NAME = 'verificators'
def add_arguments(self, parser):
parser.add_argument('--tag', type=str, default=[], action='append', dest='tags')
def handle(self, tags, **kwargs):
submodules = self._get_verificator_modules()
for submodule in submodules:
try:
importlib.import_module(submodule)
except ImportError as e:
if not self._import_error_concerns_verificator(submodule, e):
raise e
result = VerificationSuit(tags).run()
self.stdout.write('{}\n'.format(result.report()))
sys.exit(result.has_failed())
def _get_verificator_modules(self):
for app in apps.get_app_configs():
yield '.'.join([app.module.__name__, self.SUBMODULE_NAME])
def _import_error_concerns_verificator(self, submodule, error):
if sys.version_info >= (3, 0):
return str(error) == "No module named '{}'".format(submodule)
else:
return error.message == "No module named {}".format(
self.SUBMODULE_NAME)
| mit | -4,889,517,402,081,881,000 | 35.305556 | 88 | 0.625096 | false |
cbears/octoform | test/forms/studentaid.py | 1 | 4935 | # -*- coding: utf-8 -*-
from django.core.management import setup_environ
import forms.form_models.bfsql as bfsql
import forms.form_models.bform as bforms
from forms.form_models.bfbdb import *
bf = bforms.BerkeleyForm()
bf.baseFont='palatino'
bf.theme=('demos',)
"""
Don't just write documents, create an information workflow. Berkeley Forms
is the solution to collecting, managing, storing, and analyzing data.
Berkeley forms makes IT easy!
"""
qdb = [
{'type': 'Info',
'label':'|bold||large||larger|THANK YOU|/larger| for visiting the Student Aid office. By answering the questions below, you can help us improve our services to students.\n \ \n\n |/large||/bold|',
'style': {'font': 'buffy'}},
{'type': 'Section', 'key': 12345, 'style': {'start': True, 'layout': 'list', 'columns': 1}, 'label': ' ' },
{'type': 'Info',
'label':'Your input is valued, regardless of how positive or negative your experience was today.',
'style': {'layout': 'list'}},
{'type':'Info','label':'The questionnaire should take you less than 5 minutes.',
'style': {'layout': 'list'}},
{'type':'Info','label':'Your responses will be kept confidential and will not be associated with your name, ever.',
'style': {'layout': 'list'}},
{'type':'Info','label':'When you are finished, you may fold this form and place it in the box at the Student Aid desk, or drop it in campus mail if folded so the campus mail address is exposed.',
'style': {'layout': 'list'}},
{'type': 'Section', 'key': 12345, 'style': {'end': True, 'layout': 'list'}, 'label': ' ' },
{'type':'Info',
'label':'|invert||bold|Why did you visit the student aid office today?|/bold||br||indent||italic|Please Check all that apply |/italic||OCR||/invert|' },
{'type': 'CheckMany',
'style': {'layout': 'column', 'columns': 2},
'choices':(
'To resolve a problem',
'To ask a question specific to my financial aid',
'To ask a general question about student aid',
'To get general information',
'Other: |ul|'
)},
{'type':'Info','label':"""
|invert||bold||small|
For the statements below, strike the choice to the right that indicates how much you agree or disagree
|/small||/bold||br||italic||indent|
If the statement does not apply to your experience, choose ``NA.''
|/italic||/invert|
"""},
{'type': 'Section', 'key': 12346, 'style': {
'start': True,
'layout': 'grid',
'dimension': (10,4,1),
'label': ' '
}
},
{'type': 'Info',
'label': '',
'style': {'layout': 'grid', 'rotate': 315, 'shift': -.325},
'choices': (
'|small|Strongly Disagree|/small|',
'|small|Disagree|/small|',
'|small|Neither Agree nor Disagree|/small|',
'|small|Agree|/small|',
'|small|Strongly Agree|/small|',
'|small|NA|/small|'
)},
{'type' : 'CheckOne',
'label': 'I am satisfied with the outcome of my visit',
'style': {'layout': 'grid'},
'choices': ( 'SD', 'D', 'N', 'A', 'SA', 'NA') },
{'type' : 'CheckOne',
'label': 'I felt welcome during my visit',
'style': {'layout': 'grid'},
'choices': ( 'SD', 'D', 'N', 'A', 'SA', 'NA') },
{'type' : 'CheckOne',
'label': 'I was able to accomplish the task I visited for',
'style': {'layout': 'grid'},
'choices': ( 'SD', 'D', 'N', 'A', 'SA', 'NA') },
{'type' : 'CheckOne',
'label': 'I understand any future steps I must take to resolve my problem/question',
'style': {'layout': 'grid'},
'choices': ( 'SD', 'D', 'N', 'A', 'SA', 'NA') },
{'type' : 'CheckOne',
'label': 'I am confident that future visits to this office will be productive',
'style': {'layout': 'grid'},
'choices': ( 'SD', 'D', 'N', 'A', 'SA', 'NA') },
{'type': 'Section', 'key': 12346, 'style': {'end': True, 'columns': 1, 'layout': 'grid'}, 'label': ' ' },
{'type':'Info','label':"""
|invert||bold|
The following questions help us understand why your experiences may differ from other students.
|/bold||br||italic||indent|
Please select an answer to the right.
|/italic||/invert|
""" },
{'type': 'Section', 'key': 12347, 'style': {
'start': True,
'layout': 'grid',
'dimension': (10,4,1),
'label': ' ' }
},
{'type' : 'CheckOne',
'label': 'How many times have you visited the student aid office for |bold|this specific reason|/bold|? ',
'style': {'layout': 'grid'},
'choices': ( '0', '1', '2', '3', '4', '5+') },
{'type' : 'CheckOne',
'label': 'How many times have you visited the student aid office for |bold|other reasons|/bold| in the past? ',
'style': {'layout': 'grid'},
'choices': ( '0', '1', '2', '3', '4', '5+') },
{'type' : 'CheckOne',
'label': 'Were you referred to another office at Eastern University during your visit? ',
'style': {'layout': 'grid'},
'choices': ( 'Yes', 'No' ) },
{'type': 'Section', 'key': 12347, 'style': {'end': True, 'layout': 'grid', 'dimension': (10,4,1), 'label': ' ' }},
{'type':'Info','label':'Completed On: '},
]
bforms.createQuestions(bf,qdb)
| gpl-3.0 | 6,844,074,033,647,561,000 | 34.76087 | 198 | 0.599189 | false |
MeGotsThis/BotGotsThis | bot/_config.py | 1 | 7551 | import configparser
import os
import aiofiles
from typing import Dict, List, Optional, Union # noqa: F401
class BotConfig:
def __init__(self) -> None:
self.development: bool = False
self.botnick: str = ''
self.password: str = ''
self.owner: str = ''
self.awsServer: str = ''
self.awsPort: int = 0
self.messageLimit: int = 500
self.modLimit: int = 100
self.modSpamLimit: int = 100
self.publicLimit: float = 20
self.publicDelay: float = 1
self.messageSpan: float = 30
self.whiperLimit: float = 100
self.whiperSpan: float = 30
self.customMessageCooldown: float = 5
self.customMessageUserCooldown: float = 20
self.customMessageUrlTimeout: float = 5
self.spamModeratorCooldown: float = 20
self.warningDuration: float = 20
self.moderatorDefaultTimeout: List[int] = [0, 0, 0]
self.httpTimeout: float = 60
self.joinLimit: int = 50
self.joinPerSecond: float = 15
self.database: Dict[str, str] = {
'main': '',
'oauth': '',
'timeout': '',
'timezone': '',
}
self.connections: Dict[str, int] = {
'main': 10,
'oauth': 10,
'timeout': 10,
'timezone': 10,
}
self.redis: Dict[str, Optional[Union[str, int]]] = {
'host': 'localhost',
'port': 6379,
'db': None,
'password': None,
'connections': 10,
}
self.pkgs: List[str] = ['botgotsthis']
self.twitchClientId: str = ''
self.ircLogFolder: str = ''
self.exceptionLog: str = ''
async def read_config(self) -> None:
ini: configparser.ConfigParser
if os.path.isfile('twitch.ini'):
ini = configparser.ConfigParser()
async with aiofiles.open('twitch.ini', 'r',
encoding='utf-8') as file:
ini.read_string(await file.read(None))
self.botnick = str(ini['BOT']['botnick']).lower()
self.password = str(ini['BOT']['password'])
self.owner = str(ini['BOT']['owner']).lower()
if os.path.isfile('config.ini'):
ini = configparser.ConfigParser()
async with aiofiles.open('config.ini', 'r',
encoding='utf-8') as file:
ini.read_string(await file.read(None))
self.awsServer = str(ini['TWITCH']['awsServer'])
self.awsPort = int(ini['TWITCH']['awsPort'])
self.development = bool(int(ini['BOT']['development']))
self.messageLimit = int(ini['BOT']['messageLimit'])
self.modLimit = min(int(ini['BOT']['modLimit']), 100)
self.modSpamLimit = min(int(ini['BOT']['modSpamLimit']), 100)
self.publicLimit = min(int(ini['BOT']['publicLimit']), 20)
self.publicDelay = float(ini['BOT']['publicDelay'])
self.messageSpan = float(ini['BOT']['messageSpan'])
self.whiperLimit = float(ini['BOT']['whiperLimit'])
self.whiperSpan = float(ini['BOT']['whiperSpan'])
f: float
i: int
f = float(ini['BOT']['customMessageCooldown'])
self.customMessageCooldown = f
if self.customMessageCooldown <= 0:
self.customMessageCooldown = 20.0
f = float(ini['BOT']['customMessageUserCooldown'])
self.customMessageUserCooldown = f
if self.customMessageUserCooldown <= 0:
self.customMessageUserCooldown = 20.0
f = float(ini['BOT']['customMessageUrlTimeout'])
self.customMessageUrlTimeout = f
if self.customMessageUrlTimeout <= 0:
self.customMessageUrlTimeout = 5.0
f = float(ini['BOT']['spamModeratorCooldown'])
self.spamModeratorCooldown = f
if self.spamModeratorCooldown <= 0:
self.spamModeratorCooldown = 20.0
self.warningDuration = float(ini['BOT']['warningDuration'])
if self.warningDuration <= 0:
self.warningDuration = 20.0
self.moderatorDefaultTimeout = [0, 0, 0]
i = int(ini['BOT']['moderatorDefaultTimeout0'])
self.moderatorDefaultTimeout[0] = i
if self.moderatorDefaultTimeout[0] <= 0:
self.moderatorDefaultTimeout[0] = 0
i = int(ini['BOT']['moderatorDefaultTimeout1'])
self.moderatorDefaultTimeout[1] = i
if self.moderatorDefaultTimeout[1] <= 0:
self.moderatorDefaultTimeout[1] = 0
i = int(ini['BOT']['moderatorDefaultTimeout2'])
self.moderatorDefaultTimeout[2] = i
if self.moderatorDefaultTimeout[2] <= 0:
self.moderatorDefaultTimeout[2] = 0
self.joinLimit = min(int(ini['BOT']['joinLimit']), 50)
self.joinPerSecond = float(ini['BOT']['joinPerSecond'])
if self.joinPerSecond <= 0:
self.joinPerSecond = 20.0
self.httpTimeout = float(ini['BOT']['httpTimeout'])
self.ircLogFolder = str(ini['BOT']['ircLogFolder'])
self.exceptionLog = str(ini['BOT']['exceptionLog'])
if self.ircLogFolder:
if not os.path.isdir(self.ircLogFolder):
os.mkdir(self.ircLogFolder)
if os.path.isfile('pkg.ini'):
ini = configparser.ConfigParser()
async with aiofiles.open('pkg.ini', 'r',
encoding='utf-8') as file:
ini.read_string(await file.read(None))
self.pkgs.clear()
option: str
_value: str
for option, _value in ini.items('PKG'):
if ini.getboolean('PKG', option):
self.pkgs.append(option)
if 'botgotsthis' not in self.pkgs:
self.pkgs.append('botgotsthis')
if os.path.isfile('database.ini'):
ini = configparser.ConfigParser()
async with aiofiles.open('database.ini', 'r',
encoding='utf-8') as file:
ini.read_string(await file.read(None))
for s in ['main', 'oauth', 'timeout', 'timezone']:
self.database[s] = str(ini['DATABASE'][s])
if ini['CONNECTIONS'][s]:
i = int(ini['CONNECTIONS'][s])
if i:
self.connections[s] = i
if ini['REDIS']['host'] and ini['REDIS']['port']:
self.redis['host'] = ini['REDIS']['host']
self.redis['port'] = ini['REDIS']['port']
if 'db' in ini['REDIS']:
self.redis['db'] = int(ini['REDIS']['db'])
if 'password' in ini['REDIS']:
self.redis['password'] = ini['REDIS']['password']
if ini['REDIS']['connections']:
i = int(ini['REDIS']['connections'])
self.redis['connections'] = i
if os.path.isfile('twitchApi.ini'):
ini = configparser.ConfigParser()
async with aiofiles.open('twitchApi.ini', 'r',
encoding='utf-8') as file:
ini.read_string(await file.read(None))
self.twitchClientId = str(ini['twitch']['twitchClientID'])
| gpl-3.0 | 39,929,747,576,081,740 | 36.197044 | 73 | 0.521388 | false |
michalfita/nis2ldap | tests/test_ldap_group.py | 1 | 4491 | # nis2ldap - Smarter NIS to LDAP synchronization tool
# Copyright (C) 2015 Michal Fita
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import unittest
import ldap
import ldap_access.group as lag
import records.group
import toolz.ldif2dict
from mockldap import MockLdap
# This is the content of our mock LDAP directory. We use LDIF as more readable form closer to JSON.
# It is then converted to dictonary form used by the mock.
test_ldif = """
dn: ou=Group,dc=test,dc=opensource,dc=io
objectClass: top
ou: Group
dn: cn=foobar,ou=Group,dc=test,dc=opensource,dc=io
cn: foobar
gidNumber: 266
memberUid: foobar
objectClass: posixGroup
objectClass: top
userPassword: x
dn: cn=admin,dc=test,dc=opensource,dc=io
cn: admin
objectClass: posixUser
objectClass: inetOrgPerson
objectClass: top
uid: admin
uidNumber: 666
userPassword: neptune
"""
class TestGroupLdapExport(unittest.TestCase):
"""
Test export group records to LDAP.
"""
base_dn = 'dc=test,dc=opensource,dc=io'
directory = toolz.ldif2dict(test_ldif)
@classmethod
def setUpClass(cls):
# We only need to create the MockLdap instance once. The content we
# pass in will be used for all LDAP connections.
cls.mockldap = MockLdap(cls.directory)
@classmethod
def tearDownClass(cls):
del cls.mockldap
def setUp(self):
# Patch ldap.initialize
self.mockldap.start()
self.ldapobj = self.mockldap['ldap://localhost/']
def tearDown(self):
# Stop patching ldap.initialize and reset state.
self.mockldap.stop()
del self.ldapobj
def test_check_entry_non_existing(self):
# self.ldapobj.simple_bind_s('cn=admin,' + self.base_dn, 'neptune')
result = lag.check_entry(self.ldapobj, self.base_dn, 'spongebob')
self.assertEquals(self.ldapobj.methods_called(), ['search_s'])
self.assertEquals(result, False)
def test_check_entry_existing(self):
# self.ldapobj.simple_bind_s('cn=admin,' + self.base_dn, 'neptune')
result = lag.check_entry(self.ldapobj, self.base_dn, 'foobar')
self.assertEquals(self.ldapobj.methods_called(), ['search_s'])
self.assertEquals(result, True)
def test_add_entry_non_existing(self):
rec = records.group.produce("emperors:x:4044:bunny,duffy,roadrunner")
result = lag.add_entry(self.ldapobj, self.base_dn, rec)
self.assertEquals(self.ldapobj.methods_called(), ['add_s'])
self.assertEquals(result, None)
self.assertEquals(self.ldapobj.directory['cn=emperors,ou=Group,' + self.base_dn],
{'objectClass': ['posixGroup', 'top'],
'cn': ['emperors'],
'userPassword': ['x'],
'gidNumber': [4044],
'memberUid': ['bunny', 'duffy', 'roadrunner']})
def test_add_entry_existing(self):
rec = records.group.produce("foobar:x:266:foobar")
with self.assertRaises(ldap.ALREADY_EXISTS):
lag.add_entry(self.ldapobj, self.base_dn, rec)
self.assertListEqual(self.ldapobj.methods_called(), ['add_s'])
def test_update_entry_existing(self):
rec = records.group.produce("foobar:x:266:foobar")
lag.update_entry(self.ldapobj, self.base_dn, rec)
def xxx_test_update_entry_non_existing(self):
"""
This test cause me trouble as my experience with real LDAP is different in this
case as I'm not getting ldap.NO_SUCH_OBJECT exception.
"""
rec = records.group.produce("emperors:x:4044:bunny,duffy,roadrunner")
with self.assertRaises(SystemExit) as cm:
lag.update_entry(self.ldapobj, self.base_dn, rec)
self.assertEquals(cm.exception.code, 200)
self.assertListEqual(self.ldapobj.methods_called(), ['search_s'])
| gpl-3.0 | 1,420,281,667,251,838,500 | 35.217742 | 99 | 0.670452 | false |
fastcoinproject/fastcoin | contrib/spendfrom/spendfrom.py | 1 | 10059 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19332 if testnet else 9332
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| mit | 3,699,015,106,062,077,400 | 36.394052 | 111 | 0.619445 | false |
andrei987/services | src/releng_tooltool/releng_tooltool/aws.py | 1 | 4871 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import boto
import boto.s3
import boto.sqs
import importlib
import json
from backend_common import log
import threading
import time
logger = log.get_logger()
class StopListening(Exception):
pass
class AWS(object):
def __init__(self, config):
self.config = config
self._connections = {}
self._queues = {}
self._listeners = []
def connect_to(self, service_name, region_name):
key = service_name, region_name
if key in self._connections:
return self._connections[key]
# handle special cases
try:
fn = getattr(self, 'connect_to_' + service_name)
except AttributeError:
fn = self.connect_to_default
conn = fn(service_name, region_name)
self._connections[key] = conn
return conn
def connect_to_default(self, service_name, region_name):
# for the service, import 'boto.$service'
service = importlib.import_module('boto.' + service_name)
for region in service.regions():
if region.name == region_name:
break
else:
raise RuntimeError("invalid region %r" % (region_name,))
connect_fn = getattr(boto, 'connect_' + service_name)
return connect_fn(
region=region,
aws_access_key_id=self.config.get('access_key_id'),
aws_secret_access_key=self.config.get('secret_access_key'),
)
def connect_to_s3(self, service_name, region_name):
# special case for S3, which boto does differently than
# the other services
return boto.s3.connect_to_region(
region_name=region_name,
aws_access_key_id=self.config.get('access_key_id'),
aws_secret_access_key=self.config.get('secret_access_key'),
)
def get_sqs_queue(self, region_name, queue_name):
key = (region_name, queue_name)
if key in self._queues:
return self._queues[key]
sqs = self.connect_to('sqs', region_name)
queue = sqs.get_queue(queue_name)
if not queue:
raise RuntimeError("no such queue %r in %s" %
(queue_name, region_name))
self._queues[key] = queue
return queue
def sqs_write(self, region_name, queue_name, body):
queue = self.get_sqs_queue(region_name, queue_name)
m = boto.sqs.message.Message(body=json.dumps(body))
queue.write(m)
def sqs_listen(self, region_name, queue_name, read_args=None):
def decorate(func):
self._listeners.append(
(region_name, queue_name, read_args or {}, func))
return func
return decorate
def _listen_thd(self, region_name, queue_name, read_args, listener):
logger.info(
"Listening to SQS queue %r in region %s", queue_name, region_name)
try:
queue = self.get_sqs_queue(region_name, queue_name)
except Exception:
logger.exception(
"While getting queue %r in region %s; listening cancelled",
queue_name, region_name,
)
return
while True:
msg = queue.read(wait_time_seconds=20, **read_args)
if msg:
try:
listener(msg)
except StopListening: # for tests
break
except Exception:
logger.exception("while invoking %r", listener)
# note that we do nothing with the message; it will
# remain invisible for a while, then reappear and maybe
# cause another exception
continue
msg.delete()
def _spawn_sqs_listeners(self, _testing=False):
# launch a listening thread for each SQS queue
threads = []
for region_name, queue_name, read_args, listener in self._listeners:
thd = threading.Thread(
name="%s/%r -> %r" % (region_name, queue_name, listener),
target=self._listen_thd,
args=(region_name, queue_name, read_args, listener))
# set the thread to daemon so that SIGINT will kill the process
thd.daemon = True
thd.start()
threads.append(thd)
# sleep forever, or until we get a SIGINT, at which point the remaining
# threads will be killed during process shutdown
if not _testing: # pragma: no cover
while True:
time.sleep(2 ** 31)
return threads
| mpl-2.0 | -1,744,845,335,647,182,600 | 33.062937 | 79 | 0.571546 | false |
MoRgUiJu/morguiju.repo | plugin.video.pelisalacarta/channels/pelisadicto.py | 1 | 10054 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para cuevana
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re
import sys
import urlparse
from core import config
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item
DEBUG = config.get_setting("debug")
def mainlist(item):
logger.info("[pelisadicto.py] mainlist")
itemlist = []
itemlist.append( Item(channel=item.channel, title="Últimas agregadas" , action="agregadas", url="http://pelisadicto.com", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel, title="Listado por género" , action="porGenero", url="http://pelisadicto.com"))
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search", url="http://pelisadicto.com") )
return itemlist
def porGenero(item):
logger.info("[pelisadicto.py] porGenero")
itemlist = []
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Acción",url="http://pelisadicto.com/genero/Acción/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Adulto",url="http://pelisadicto.com/genero/Adulto/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Animación",url="http://pelisadicto.com/genero/Animación/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Aventura",url="http://pelisadicto.com/genero/Aventura/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Biográfico",url="http://pelisadicto.com/genero/Biográfico/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Ciencia Ficción",url="http://pelisadicto.com/genero/Ciencia Ficción/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Cine Negro",url="http://pelisadicto.com/genero/Cine Negro/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Comedia",url="http://pelisadicto.com/genero/Comedia/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Corto",url="http://pelisadicto.com/genero/Corto/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Crimen",url="http://pelisadicto.com/genero/Crimen/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Deporte",url="http://pelisadicto.com/genero/Deporte/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Documental",url="http://pelisadicto.com/genero/Documental/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Drama",url="http://pelisadicto.com/genero/Drama/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Familiar",url="http://pelisadicto.com/genero/Familiar/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Fantasía",url="http://pelisadicto.com/genero/Fantasía/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Guerra",url="http://pelisadicto.com/genero/Guerra/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Historia",url="http://pelisadicto.com/genero/Historia/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Misterio",url="http://pelisadicto.com/genero/Misterio/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Música",url="http://pelisadicto.com/genero/Música/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Musical",url="http://pelisadicto.com/genero/Musical/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Romance",url="http://pelisadicto.com/genero/Romance/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Terror",url="http://pelisadicto.com/genero/Terror/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Thriller",url="http://pelisadicto.com/genero/Thriller/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Western",url="http://pelisadicto.com/genero/Western/1", viewmode="movie_with_plot"))
return itemlist
def search(item,texto):
logger.info("[pelisadicto.py] search")
'''
texto_get = texto.replace(" ","%20")
texto_post = texto.replace(" ","+")
item.url = "http://pelisadicto.com/buscar/%s?search=%s" % (texto_get,texto_post)
'''
texto_post = texto.replace(" ","+")
item.url = "http://pelisadicto.com/buscar/%s" % texto
try:
return agregadas(item)
# Se captura la excepci?n, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
return busqueda(item)
def agregadas(item):
logger.info("[pelisadicto.py] agregadas")
itemlist = []
'''
# Descarga la pagina
if "?search=" in item.url:
url_search = item.url.split("?search=")
data = scrapertools.cache_page(url_search[0], url_search[1])
else:
data = scrapertools.cache_page(item.url)
logger.info("data="+data)
'''
data = scrapertools.cache_page(item.url)
logger.info("data="+data)
# Extrae las entradas
fichas = re.sub(r"\n|\s{2}","",scrapertools.get_match(data,'<ul class="thumbnails">(.*?)</ul>'))
#<li class="col-xs-6 col-sm-2 CALDVD"><a href="/pelicula/101-dalmatas" title="Ver 101 dálmatas Online" class="thumbnail thumbnail-artist-grid"><img class="poster" style="width: 180px; height: 210px;" src="/img/peliculas/101-dalmatas.jpg" alt="101 dálmatas"/><div class="calidad">DVD</div><div class="idiomas"><img src="/img/1.png" height="20" width="30" /></div><div class="thumbnail-artist-grid-name-container-1"><div class="thumbnail-artist-grid-name-container-2"><span class="thumbnail-artist-grid-name">101 dálmatas</span></div></div></a></li>
patron = 'href="([^"]+)".*?' # url
patron+= 'src="([^"]+)" ' # thumbnail
patron+= 'alt="([^"]+)' # title
matches = re.compile(patron,re.DOTALL).findall(fichas)
for url,thumbnail,title in matches:
url=urlparse.urljoin(item.url,url)
thumbnail = urlparse.urljoin(url,thumbnail)
itemlist.append( Item(channel=item.channel, action="findvideos", title=title+" ", fulltitle=title , url=url , thumbnail=thumbnail , show=title) )
# Paginación
try:
#<ul class="pagination"><li class="active"><span>1</span></li><li><span><a href="2">2</a></span></li><li><span><a href="3">3</a></span></li><li><span><a href="4">4</a></span></li><li><span><a href="5">5</a></span></li><li><span><a href="6">6</a></span></li></ul>
current_page_number = int(scrapertools.get_match(item.url,'/(\d+)$'))
item.url = re.sub(r"\d+$","%s",item.url)
next_page_number = current_page_number + 1
next_page = item.url % (next_page_number)
itemlist.append( Item(channel=item.channel, action="agregadas", title="Página siguiente >>" , url=next_page, viewmode="movie_with_plot") )
except: pass
return itemlist
def findvideos(item):
logger.info("[pelisadicto.py] findvideos")
itemlist = []
data = re.sub(r"\n|\s{2}","",scrapertools.cache_page(item.url))
#<!-- SINOPSIS --> <h2>Sinopsis de 101 dálmatas</h2> <p>Pongo y Perdita, los dálmatas protagonistas, son una feliz pareja canina que vive rodeada de sus cachorros y con sus amos Roger y Anita. Pero su felicidad está amenazada. Cruella de Ville, una pérfida mujer que vive en una gran mansión y adora los abrigos de pieles, se entera de que los protagonistas tienen quince cachorros dálmatas. Entonces, la idea de secuestrarlos para hacerse un exclusivo abrigo de pieles se convierte en una obsesión enfermiza. Para hacer realidad su sueño contrata a dos ladrones.</p>
patron = "<!-- SINOPSIS --> "
patron += "<h2>[^<]+</h2> "
patron += "<p>([^<]+)</p>"
matches = re.compile(patron,re.DOTALL).findall(data)
plot = matches[0]
# Descarga la pagina
data = scrapertools.cache_page(item.url)
patron = '<tr>.*?'
patron += '<td><img src="(.*?)".*?<td>(.*?)</td>.*?<td>(.*?)</td>.*?<a href="(.*?)".*?</tr>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedidioma, scrapedcalidad, scrapedserver, scrapedurl in matches:
idioma =""
if "/img/1.png" in scrapedidioma: idioma="Castellano"
if "/img/2.png" in scrapedidioma: idioma="Latino"
if "/img/3.png" in scrapedidioma: idioma="Subtitulado"
title = item.title + " ["+scrapedcalidad+"][" + idioma + "][" + scrapedserver + "]"
itemlist.append( Item(channel=item.channel, action="play", title=title, fulltitle=title , url=scrapedurl , thumbnail="" , plot=plot , show = item.show) )
return itemlist
def play(item):
logger.info("[pelisadicto.py] play")
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist
| gpl-2.0 | 6,785,634,815,207,113,000 | 57.22093 | 579 | 0.67156 | false |
Inspq/ansible | lib/ansible/plugins/action/fetch.py | 1 | 8462 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import base64
from ansible.constants import mk_boolean as boolean
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes
from ansible.module_utils.six import string_types
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash
from ansible.utils.path import makedirs_safe
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
''' handler for fetch operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if self._play_context.check_mode:
result['skipped'] = True
result['msg'] = 'check mode not (yet) supported for this module'
return result
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
flat = boolean(self._task.args.get('flat'))
fail_on_missing = boolean(self._task.args.get('fail_on_missing'))
validate_checksum = boolean(self._task.args.get('validate_checksum', self._task.args.get('validate_md5', True)))
if 'validate_md5' in self._task.args and 'validate_checksum' in self._task.args:
result['msg'] = "validate_checksum and validate_md5 cannot both be specified"
if source is None or dest is None:
result['msg'] = "src and dest are required"
if result.get('msg'):
result['failed'] = True
return result
source = self._connection._shell.join_path(source)
source = self._remote_expand_user(source)
remote_checksum = None
if not self._play_context.become:
# calculate checksum for the remote file, don't bother if using become as slurp will be used
# Force remote_checksum to follow symlinks because fetch always follows symlinks
remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True)
# use slurp if permissions are lacking or privilege escalation is needed
remote_data = None
if remote_checksum in ('1', '2', None):
slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp)
if slurpres.get('failed'):
if not fail_on_missing and (slurpres.get('msg').startswith('file not found') or remote_checksum == '1'):
result['msg'] = "the remote file does not exist, not transferring, ignored"
result['file'] = source
result['changed'] = False
else:
result.update(slurpres)
return result
else:
if slurpres['encoding'] == 'base64':
remote_data = base64.b64decode(slurpres['content'])
if remote_data is not None:
remote_checksum = checksum_s(remote_data)
# the source path may have been expanded on the
# target system, so we compare it here and use the
# expanded version if it's different
remote_source = slurpres.get('source')
if remote_source and remote_source != source:
source = remote_source
# calculate the destination name
if os.path.sep not in self._connection._shell.join_path('a', ''):
source = self._connection._shell._unquote(source)
source_local = source.replace('\\', '/')
else:
source_local = source
dest = os.path.expanduser(dest)
if flat:
if dest.endswith(os.sep):
# if the path ends with "/", we'll use the source filename as the
# destination filename
base = os.path.basename(source_local)
dest = os.path.join(dest, base)
if not dest.startswith("/"):
# if dest does not start with "/", we'll assume a relative path
dest = self._loader.path_dwim(dest)
else:
# files are saved in dest dir, with a subdir for each host, then the filename
if 'inventory_hostname' in task_vars:
target_name = task_vars['inventory_hostname']
else:
target_name = self._play_context.remote_addr
dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local)
dest = dest.replace("//","/")
if remote_checksum in ('0', '1', '2', '3', '4'):
# these don't fail because you may want to transfer a log file that
# possibly MAY exist but keep going to fetch other log files
result['changed'] = False
result['file'] = source
if remote_checksum == '0':
result['msg'] = "unable to calculate the checksum of the remote file"
elif remote_checksum == '1':
if fail_on_missing:
result['failed'] = True
del result['changed']
result['msg'] = "the remote file does not exist"
else:
result['msg'] = "the remote file does not exist, not transferring, ignored"
elif remote_checksum == '2':
result['msg'] = "no read permission on remote file, not transferring, ignored"
elif remote_checksum == '3':
result['msg'] = "remote file is a directory, fetch cannot work on directories"
elif remote_checksum == '4':
result['msg'] = "python isn't present on the system. Unable to compute checksum"
return result
# calculate checksum for the local file
local_checksum = checksum(dest)
if remote_checksum != local_checksum:
# create the containing directories, if needed
makedirs_safe(os.path.dirname(dest))
# fetch the file and check for changes
if remote_data is None:
self._connection.fetch_file(source, dest)
else:
try:
f = open(to_bytes(dest, errors='surrogate_or_strict'), 'wb')
f.write(remote_data)
f.close()
except (IOError, OSError) as e:
raise AnsibleError("Failed to fetch the file: %s" % e)
new_checksum = secure_hash(dest)
# For backwards compatibility. We'll return None on FIPS enabled systems
try:
new_md5 = md5(dest)
except ValueError:
new_md5 = None
if validate_checksum and new_checksum != remote_checksum:
result.update(dict(failed=True, md5sum=new_md5,
msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None,
checksum=new_checksum, remote_checksum=remote_checksum))
else:
result.update(dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum))
else:
# For backwards compatibility. We'll return None on FIPS enabled systems
try:
local_md5 = md5(dest)
except ValueError:
local_md5 = None
result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum))
return result
| gpl-3.0 | -1,681,434,668,187,707,000 | 44.010638 | 152 | 0.589931 | false |
MikeAmy/django-webtest | setup.py | 1 | 1559 | #!/usr/bin/env python
from distutils.core import setup
for cmd in ('egg_info', 'develop'):
import sys
if cmd in sys.argv:
from setuptools import setup
version='1.7.4'
setup(
name='django-webtest',
version=version,
author='Mikhail Korobov',
author_email='[email protected]',
packages=['django_webtest'],
url='https://bitbucket.org/kmike/django-webtest/',
license = 'MIT license',
description = """ Instant integration of Ian Bicking's WebTest
(http://webtest.pythonpaste.org/) with django's testing framework.""",
long_description = open('README.rst').read() + "\n\n" + open('CHANGES.txt').read(),
requires = ['webtest (>= 1.3.3)', 'django (>= 1.2.7)'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
],
)
| mit | 169,190,286,577,443,620 | 33.644444 | 87 | 0.610006 | false |
polyaxon/polyaxon | sdks/python/http_client/v1/polyaxon_sdk/models/v1_compiled_operation.py | 1 | 21129 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.10.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1CompiledOperation(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'version': 'float',
'kind': 'str',
'name': 'str',
'description': 'str',
'tags': 'list[str]',
'presets': 'list[str]',
'queue': 'str',
'cache': 'V1Cache',
'termination': 'V1Termination',
'plugins': 'V1Plugins',
'schedule': 'object',
'events': 'list[V1EventTrigger]',
'build': 'V1Build',
'hooks': 'list[V1Hook]',
'dependencies': 'list[str]',
'trigger': 'V1TriggerPolicy',
'conditions': 'str',
'skip_on_upstream_skip': 'bool',
'matrix': 'object',
'joins': 'dict(str, V1Join)',
'inputs': 'list[V1IO]',
'outputs': 'list[V1IO]',
'contexts': 'list[V1IO]',
'is_approved': 'bool',
'cost': 'float',
'run': 'object'
}
attribute_map = {
'version': 'version',
'kind': 'kind',
'name': 'name',
'description': 'description',
'tags': 'tags',
'presets': 'presets',
'queue': 'queue',
'cache': 'cache',
'termination': 'termination',
'plugins': 'plugins',
'schedule': 'schedule',
'events': 'events',
'build': 'build',
'hooks': 'hooks',
'dependencies': 'dependencies',
'trigger': 'trigger',
'conditions': 'conditions',
'skip_on_upstream_skip': 'skipOnUpstreamSkip',
'matrix': 'matrix',
'joins': 'joins',
'inputs': 'inputs',
'outputs': 'outputs',
'contexts': 'contexts',
'is_approved': 'isApproved',
'cost': 'cost',
'run': 'run'
}
def __init__(self, version=None, kind=None, name=None, description=None, tags=None, presets=None, queue=None, cache=None, termination=None, plugins=None, schedule=None, events=None, build=None, hooks=None, dependencies=None, trigger=None, conditions=None, skip_on_upstream_skip=None, matrix=None, joins=None, inputs=None, outputs=None, contexts=None, is_approved=None, cost=None, run=None, local_vars_configuration=None): # noqa: E501
"""V1CompiledOperation - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._version = None
self._kind = None
self._name = None
self._description = None
self._tags = None
self._presets = None
self._queue = None
self._cache = None
self._termination = None
self._plugins = None
self._schedule = None
self._events = None
self._build = None
self._hooks = None
self._dependencies = None
self._trigger = None
self._conditions = None
self._skip_on_upstream_skip = None
self._matrix = None
self._joins = None
self._inputs = None
self._outputs = None
self._contexts = None
self._is_approved = None
self._cost = None
self._run = None
self.discriminator = None
if version is not None:
self.version = version
if kind is not None:
self.kind = kind
if name is not None:
self.name = name
if description is not None:
self.description = description
if tags is not None:
self.tags = tags
if presets is not None:
self.presets = presets
if queue is not None:
self.queue = queue
if cache is not None:
self.cache = cache
if termination is not None:
self.termination = termination
if plugins is not None:
self.plugins = plugins
if schedule is not None:
self.schedule = schedule
if events is not None:
self.events = events
if build is not None:
self.build = build
if hooks is not None:
self.hooks = hooks
if dependencies is not None:
self.dependencies = dependencies
if trigger is not None:
self.trigger = trigger
if conditions is not None:
self.conditions = conditions
if skip_on_upstream_skip is not None:
self.skip_on_upstream_skip = skip_on_upstream_skip
if matrix is not None:
self.matrix = matrix
if joins is not None:
self.joins = joins
if inputs is not None:
self.inputs = inputs
if outputs is not None:
self.outputs = outputs
if contexts is not None:
self.contexts = contexts
if is_approved is not None:
self.is_approved = is_approved
if cost is not None:
self.cost = cost
if run is not None:
self.run = run
@property
def version(self):
"""Gets the version of this V1CompiledOperation. # noqa: E501
:return: The version of this V1CompiledOperation. # noqa: E501
:rtype: float
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this V1CompiledOperation.
:param version: The version of this V1CompiledOperation. # noqa: E501
:type: float
"""
self._version = version
@property
def kind(self):
"""Gets the kind of this V1CompiledOperation. # noqa: E501
:return: The kind of this V1CompiledOperation. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1CompiledOperation.
:param kind: The kind of this V1CompiledOperation. # noqa: E501
:type: str
"""
self._kind = kind
@property
def name(self):
"""Gets the name of this V1CompiledOperation. # noqa: E501
:return: The name of this V1CompiledOperation. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1CompiledOperation.
:param name: The name of this V1CompiledOperation. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this V1CompiledOperation. # noqa: E501
:return: The description of this V1CompiledOperation. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this V1CompiledOperation.
:param description: The description of this V1CompiledOperation. # noqa: E501
:type: str
"""
self._description = description
@property
def tags(self):
"""Gets the tags of this V1CompiledOperation. # noqa: E501
:return: The tags of this V1CompiledOperation. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this V1CompiledOperation.
:param tags: The tags of this V1CompiledOperation. # noqa: E501
:type: list[str]
"""
self._tags = tags
@property
def presets(self):
"""Gets the presets of this V1CompiledOperation. # noqa: E501
:return: The presets of this V1CompiledOperation. # noqa: E501
:rtype: list[str]
"""
return self._presets
@presets.setter
def presets(self, presets):
"""Sets the presets of this V1CompiledOperation.
:param presets: The presets of this V1CompiledOperation. # noqa: E501
:type: list[str]
"""
self._presets = presets
@property
def queue(self):
"""Gets the queue of this V1CompiledOperation. # noqa: E501
:return: The queue of this V1CompiledOperation. # noqa: E501
:rtype: str
"""
return self._queue
@queue.setter
def queue(self, queue):
"""Sets the queue of this V1CompiledOperation.
:param queue: The queue of this V1CompiledOperation. # noqa: E501
:type: str
"""
self._queue = queue
@property
def cache(self):
"""Gets the cache of this V1CompiledOperation. # noqa: E501
:return: The cache of this V1CompiledOperation. # noqa: E501
:rtype: V1Cache
"""
return self._cache
@cache.setter
def cache(self, cache):
"""Sets the cache of this V1CompiledOperation.
:param cache: The cache of this V1CompiledOperation. # noqa: E501
:type: V1Cache
"""
self._cache = cache
@property
def termination(self):
"""Gets the termination of this V1CompiledOperation. # noqa: E501
:return: The termination of this V1CompiledOperation. # noqa: E501
:rtype: V1Termination
"""
return self._termination
@termination.setter
def termination(self, termination):
"""Sets the termination of this V1CompiledOperation.
:param termination: The termination of this V1CompiledOperation. # noqa: E501
:type: V1Termination
"""
self._termination = termination
@property
def plugins(self):
"""Gets the plugins of this V1CompiledOperation. # noqa: E501
:return: The plugins of this V1CompiledOperation. # noqa: E501
:rtype: V1Plugins
"""
return self._plugins
@plugins.setter
def plugins(self, plugins):
"""Sets the plugins of this V1CompiledOperation.
:param plugins: The plugins of this V1CompiledOperation. # noqa: E501
:type: V1Plugins
"""
self._plugins = plugins
@property
def schedule(self):
"""Gets the schedule of this V1CompiledOperation. # noqa: E501
:return: The schedule of this V1CompiledOperation. # noqa: E501
:rtype: object
"""
return self._schedule
@schedule.setter
def schedule(self, schedule):
"""Sets the schedule of this V1CompiledOperation.
:param schedule: The schedule of this V1CompiledOperation. # noqa: E501
:type: object
"""
self._schedule = schedule
@property
def events(self):
"""Gets the events of this V1CompiledOperation. # noqa: E501
:return: The events of this V1CompiledOperation. # noqa: E501
:rtype: list[V1EventTrigger]
"""
return self._events
@events.setter
def events(self, events):
"""Sets the events of this V1CompiledOperation.
:param events: The events of this V1CompiledOperation. # noqa: E501
:type: list[V1EventTrigger]
"""
self._events = events
@property
def build(self):
"""Gets the build of this V1CompiledOperation. # noqa: E501
:return: The build of this V1CompiledOperation. # noqa: E501
:rtype: V1Build
"""
return self._build
@build.setter
def build(self, build):
"""Sets the build of this V1CompiledOperation.
:param build: The build of this V1CompiledOperation. # noqa: E501
:type: V1Build
"""
self._build = build
@property
def hooks(self):
"""Gets the hooks of this V1CompiledOperation. # noqa: E501
:return: The hooks of this V1CompiledOperation. # noqa: E501
:rtype: list[V1Hook]
"""
return self._hooks
@hooks.setter
def hooks(self, hooks):
"""Sets the hooks of this V1CompiledOperation.
:param hooks: The hooks of this V1CompiledOperation. # noqa: E501
:type: list[V1Hook]
"""
self._hooks = hooks
@property
def dependencies(self):
"""Gets the dependencies of this V1CompiledOperation. # noqa: E501
:return: The dependencies of this V1CompiledOperation. # noqa: E501
:rtype: list[str]
"""
return self._dependencies
@dependencies.setter
def dependencies(self, dependencies):
"""Sets the dependencies of this V1CompiledOperation.
:param dependencies: The dependencies of this V1CompiledOperation. # noqa: E501
:type: list[str]
"""
self._dependencies = dependencies
@property
def trigger(self):
"""Gets the trigger of this V1CompiledOperation. # noqa: E501
:return: The trigger of this V1CompiledOperation. # noqa: E501
:rtype: V1TriggerPolicy
"""
return self._trigger
@trigger.setter
def trigger(self, trigger):
"""Sets the trigger of this V1CompiledOperation.
:param trigger: The trigger of this V1CompiledOperation. # noqa: E501
:type: V1TriggerPolicy
"""
self._trigger = trigger
@property
def conditions(self):
"""Gets the conditions of this V1CompiledOperation. # noqa: E501
:return: The conditions of this V1CompiledOperation. # noqa: E501
:rtype: str
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1CompiledOperation.
:param conditions: The conditions of this V1CompiledOperation. # noqa: E501
:type: str
"""
self._conditions = conditions
@property
def skip_on_upstream_skip(self):
"""Gets the skip_on_upstream_skip of this V1CompiledOperation. # noqa: E501
:return: The skip_on_upstream_skip of this V1CompiledOperation. # noqa: E501
:rtype: bool
"""
return self._skip_on_upstream_skip
@skip_on_upstream_skip.setter
def skip_on_upstream_skip(self, skip_on_upstream_skip):
"""Sets the skip_on_upstream_skip of this V1CompiledOperation.
:param skip_on_upstream_skip: The skip_on_upstream_skip of this V1CompiledOperation. # noqa: E501
:type: bool
"""
self._skip_on_upstream_skip = skip_on_upstream_skip
@property
def matrix(self):
"""Gets the matrix of this V1CompiledOperation. # noqa: E501
:return: The matrix of this V1CompiledOperation. # noqa: E501
:rtype: object
"""
return self._matrix
@matrix.setter
def matrix(self, matrix):
"""Sets the matrix of this V1CompiledOperation.
:param matrix: The matrix of this V1CompiledOperation. # noqa: E501
:type: object
"""
self._matrix = matrix
@property
def joins(self):
"""Gets the joins of this V1CompiledOperation. # noqa: E501
:return: The joins of this V1CompiledOperation. # noqa: E501
:rtype: dict(str, V1Join)
"""
return self._joins
@joins.setter
def joins(self, joins):
"""Sets the joins of this V1CompiledOperation.
:param joins: The joins of this V1CompiledOperation. # noqa: E501
:type: dict(str, V1Join)
"""
self._joins = joins
@property
def inputs(self):
"""Gets the inputs of this V1CompiledOperation. # noqa: E501
:return: The inputs of this V1CompiledOperation. # noqa: E501
:rtype: list[V1IO]
"""
return self._inputs
@inputs.setter
def inputs(self, inputs):
"""Sets the inputs of this V1CompiledOperation.
:param inputs: The inputs of this V1CompiledOperation. # noqa: E501
:type: list[V1IO]
"""
self._inputs = inputs
@property
def outputs(self):
"""Gets the outputs of this V1CompiledOperation. # noqa: E501
:return: The outputs of this V1CompiledOperation. # noqa: E501
:rtype: list[V1IO]
"""
return self._outputs
@outputs.setter
def outputs(self, outputs):
"""Sets the outputs of this V1CompiledOperation.
:param outputs: The outputs of this V1CompiledOperation. # noqa: E501
:type: list[V1IO]
"""
self._outputs = outputs
@property
def contexts(self):
"""Gets the contexts of this V1CompiledOperation. # noqa: E501
:return: The contexts of this V1CompiledOperation. # noqa: E501
:rtype: list[V1IO]
"""
return self._contexts
@contexts.setter
def contexts(self, contexts):
"""Sets the contexts of this V1CompiledOperation.
:param contexts: The contexts of this V1CompiledOperation. # noqa: E501
:type: list[V1IO]
"""
self._contexts = contexts
@property
def is_approved(self):
"""Gets the is_approved of this V1CompiledOperation. # noqa: E501
:return: The is_approved of this V1CompiledOperation. # noqa: E501
:rtype: bool
"""
return self._is_approved
@is_approved.setter
def is_approved(self, is_approved):
"""Sets the is_approved of this V1CompiledOperation.
:param is_approved: The is_approved of this V1CompiledOperation. # noqa: E501
:type: bool
"""
self._is_approved = is_approved
@property
def cost(self):
"""Gets the cost of this V1CompiledOperation. # noqa: E501
:return: The cost of this V1CompiledOperation. # noqa: E501
:rtype: float
"""
return self._cost
@cost.setter
def cost(self, cost):
"""Sets the cost of this V1CompiledOperation.
:param cost: The cost of this V1CompiledOperation. # noqa: E501
:type: float
"""
self._cost = cost
@property
def run(self):
"""Gets the run of this V1CompiledOperation. # noqa: E501
:return: The run of this V1CompiledOperation. # noqa: E501
:rtype: object
"""
return self._run
@run.setter
def run(self, run):
"""Sets the run of this V1CompiledOperation.
:param run: The run of this V1CompiledOperation. # noqa: E501
:type: object
"""
self._run = run
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CompiledOperation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1CompiledOperation):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 | 1,406,717,811,419,982,800 | 25.847522 | 439 | 0.582328 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/troubleshooting_details.py | 1 | 1830 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TroubleshootingDetails(Model):
"""Information gained from troubleshooting of specified resource.
:param id: The id of the get troubleshoot operation.
:type id: str
:param reason_type: Reason type of failure.
:type reason_type: str
:param summary: A summary of troubleshooting.
:type summary: str
:param detail: Details on troubleshooting results.
:type detail: str
:param recommended_actions: List of recommended actions.
:type recommended_actions:
list[~azure.mgmt.network.v2017_08_01.models.TroubleshootingRecommendedActions]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'reason_type': {'key': 'reasonType', 'type': 'str'},
'summary': {'key': 'summary', 'type': 'str'},
'detail': {'key': 'detail', 'type': 'str'},
'recommended_actions': {'key': 'recommendedActions', 'type': '[TroubleshootingRecommendedActions]'},
}
def __init__(self, **kwargs):
super(TroubleshootingDetails, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.reason_type = kwargs.get('reason_type', None)
self.summary = kwargs.get('summary', None)
self.detail = kwargs.get('detail', None)
self.recommended_actions = kwargs.get('recommended_actions', None)
| mit | 1,079,771,110,420,915,600 | 39.666667 | 108 | 0.610929 | false |
Mathew/psychoanalysis | psychoanalysis/apps/pa/aggregation.py | 1 | 6791 | from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from itertools import chain
def make_query(query_dict):
query_type = 0
if 'result type' in query_dict:
if query_dict['result type'] == 'percent':
query_type = 1
data_set = 0
if 'data set' in query_dict:
data_set = 2
the_list = get_data_set(data_set)
return the_list
def get_data_set(num=0):
if num == 0:
return get_data_set_0()
if num == 1:
return get_data_set_1()
if num == 2:
return get_data_set_2()
return get_data_set_3()
def get_data_set_0():
data_list = []
data_list.append(['Activity', 'Consultant Clinical Psychologist', 'Clinical Psychologist 1', 'Clinical Psychologist 2', 'Clinical Psychologist 3', 'CAAP', 'CAAP Trainee', 'Clinical Psychology Trainee'])
data_list.append(['Assessment', 60, 120, 0, 240, 0, 0, 120])
data_list.append(['Individual Follow up', 990, 1140, 180, 120, 315, 495, 330])
data_list.append(['Low Intensity', 0, 0, 0, 0, 0, 60, 0])
data_list.append(['High Intensity', 60, 0, 0, 0, 315, 435, 0])
data_list.append(['High Intensity - Specialist', 375, 660, 0, 0, 0, 0, 330])
data_list.append(['Highly Specialist', 555, 480, 180, 240, 0, 0, 0])
data_list.append(['Group Therapy', 0, 0, 270, 285, 90, 0, 0])
data_list.append(['Case review (with patient)', 0, 0, 0, 0, 0, 0, 0])
data_list.append(['Other Treatment', 0, 0, 0, 0, 30, 0, 0])
data_list.append(['Clinical Administration', 750, 1230, 315, 660, 645, 990, 465])
data_list.append(['Telephone', 0, 30, 30, 0, 0, 0, 105])
data_list.append(['Clinical meeting', 195, 300, 0, 60, 75, 90, 15])
data_list.append(['Supervision - giving', 60, 360, 0, 120, 75, 0, 60])
data_list.append(['Supervision - receiving', 0, 90, 0, 0, 180, 60, 60])
data_list.append(['Other Supervision', 0, 0, 0, 0, 0, 0, 0])
data_list.append(['Admin tasks', 165, 255, 15, 75, 0, 225, 75])
data_list.append(['Dealing with emails', 525, 420, 0, 60, 90, 75, 105])
data_list.append(['Travel', 270, 525, 75, 180, 210, 120, 135])
data_list.append(['Meetings (non-clinical)', 1050, 330, 30, 135, 0, 0, 0])
data_list.append(['Research', 30, 75, 0, 45, 30, 0, 0])
data_list.append(['Training/ CPD (Delivering)', 0, 0, 0, 0, 0, 0, 0])
data_list.append(['Training/ CPD (Receiving)', 0, 15, 0, 0, 0, 450, 0])
data_list.append(['Annual Leave', 0, 0, 0, 0, 480, 540, 0])
data_list.append(['Sick Leave', 0, 0, 0, 0, 0, 0, 0])
data_list.append(['Other leave', 0, 0, 0, 0, 240, 0, 540])
data_list.append(['Breaks', 195, 15, 45, 90, 45, 150, 90])
data_list.append(['Management', 735, 15, 0, 0, 30, 30, 0])
data_list.append(['Other Management', 0, 345, 0, 0, 0, 0, 30])
return data_list
def get_data_set_1():
the_list = get_data_set_0()
percent_list = ['Time Recorded', 5025, 5265, 960, 2070, 2535, 3225, 2130]
num = 0
ret_list = []
for item in the_list:
if num == 0:
ret_list.append(item)
else:
ret_list.append(percent_item(item, percent_list))
num += 1
return ret_list
def get_data_set_2():
main_list = get_category_data_set()
data_list = []
data_list.append(['Category', 'Consultant Clinical Psychologist', 'Clinical Psychologist 1', 'Clinical Psychologist 2', 'Clinical Psychologist 3', 'CAAP', 'CAAP Trainee', 'Clinical Psychology Trainee'])
direct_list = get_one_catergory_data('Direct', main_list)
indirect_list = get_one_catergory_data('Indirect', main_list)
other_list = get_one_catergory_data('Other', main_list)
data_list.append(direct_list)
data_list.append(indirect_list)
data_list.append(other_list)
return data_list
def get_data_set_3():
the_list = get_data_set_2()
percent_list = ['Time Recorded', 5025, 5265, 960, 2070, 2535, 3225, 2130]
num = 0
ret_list = []
for item in the_list:
if num == 0:
ret_list.append(item)
else:
ret_list.append(percent_item(item, percent_list))
num += 1
return ret_list
def get_category_data_set():
data_list = []
data_list.append(['Direct', 60, 120, 0, 240, 0, 0, 120])
data_list.append(['Direct', 990, 1140, 180, 120, 315, 495, 330])
data_list.append(['Direct', 0, 0, 0, 0, 0, 60, 0])
data_list.append(['Direct', 60, 0, 0, 0, 315, 435, 0])
data_list.append(['Direct', 375, 660, 0, 0, 0, 0, 330])
data_list.append(['Direct', 555, 480, 180, 240, 0, 0, 0])
data_list.append(['Direct', 0, 0, 270, 285, 90, 0, 0])
data_list.append(['Direct', 0, 0, 0, 0, 0, 0, 0])
data_list.append(['Direct', 0, 0, 0, 0, 30, 0, 0])
data_list.append(['Indirect', 750, 1230, 315, 660, 645, 990, 465])
data_list.append(['Indirect', 0, 30, 30, 0, 0, 0, 105])
data_list.append(['Indirect', 195, 300, 0, 60, 75, 90, 15])
data_list.append(['Indirect', 60, 360, 0, 120, 75, 0, 60])
data_list.append(['Indirect', 0, 90, 0, 0, 180, 60, 60])
data_list.append(['Indirect', 0, 0, 0, 0, 0, 0, 0])
data_list.append(['Other', 165, 255, 15, 75, 0, 225, 75])
data_list.append(['Other', 525, 420, 0, 60, 90, 75, 105])
data_list.append(['Other', 270, 525, 75, 180, 210, 120, 135])
data_list.append(['Other', 1050, 330, 30, 135, 0, 0, 0])
data_list.append(['Other', 30, 75, 0, 45, 30, 0, 0])
data_list.append(['Other', 0, 0, 0, 0, 0, 0, 0])
data_list.append(['Other', 0, 15, 0, 0, 0, 450, 0])
data_list.append(['Other', 0, 0, 0, 0, 480, 540, 0])
data_list.append(['Other', 0, 0, 0, 0, 0, 0, 0])
data_list.append(['Other', 0, 0, 0, 0, 240, 0, 540])
data_list.append(['Other', 195, 15, 45, 90, 45, 150, 90])
data_list.append(['Other', 735, 15, 0, 0, 30, 30, 0])
data_list.append(['Other', 0, 345, 0, 0, 0, 0, 30])
return data_list
def get_one_catergory_data(category, data_list):
the_len = len(data_list[0])
ret_list = []
ret_list.append(category)
for num in range(1, the_len):
ret_list.append(0)
for sub_list in data_list:
if sub_list[0] != category:
continue
for index in range(1, the_len):
tot = sub_list[index] + ret_list[index]
ret_list[index] = tot
return ret_list
def percent_item(item, percent_list):
num = 0
ret_list = []
for val in item:
if num == 0:
ret_list.append(val)
else:
result = val * 100.0 / percent_list[num]
ret_list.append(result)
num += 1
return ret_list
| mit | -728,023,969,507,841,200 | 38.254335 | 206 | 0.566485 | false |
chitr/neutron | neutron/cmd/netns_cleanup.py | 1 | 6192 | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from neutron.agent.common import config as agent_config
from neutron.agent.common import ovs_lib
from neutron.agent.dhcp import config as dhcp_config
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3 import dvr
from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
LB_NS_PREFIX = 'qlbaas-'
NS_MANGLING_PATTERN = ('(%s|%s|%s|%s|%s)' % (dhcp.NS_PREFIX,
l3_agent.NS_PREFIX,
dvr.SNAT_NS_PREFIX,
dvr_fip_ns.FIP_NS_PREFIX,
LB_NS_PREFIX) +
attributes.UUID_PATTERN)
class FakeDhcpPlugin(object):
"""Fake RPC plugin to bypass any RPC calls."""
def __getattribute__(self, name):
def fake_method(*args):
pass
return fake_method
def setup_conf():
"""Setup the cfg for the clean up utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during clean-up.
"""
cli_opts = [
cfg.BoolOpt('force',
default=False,
help=_('Delete the namespace by removing all devices.')),
]
conf = cfg.CONF
conf.register_cli_opts(cli_opts)
agent_config.register_interface_driver_opts_helper(conf)
conf.register_opts(dhcp_config.DHCP_AGENT_OPTS)
conf.register_opts(dhcp_config.DHCP_OPTS)
conf.register_opts(dhcp_config.DNSMASQ_OPTS)
conf.register_opts(interface.OPTS)
return conf
def _get_dhcp_process_monitor(config):
return external_process.ProcessMonitor(config=config,
resource_type='dhcp')
def kill_dhcp(conf, namespace):
"""Disable DHCP for a network if DHCP is still active."""
network_id = namespace.replace(dhcp.NS_PREFIX, '')
dhcp_driver = importutils.import_object(
conf.dhcp_driver,
conf=conf,
process_monitor=_get_dhcp_process_monitor(conf),
network=dhcp.NetModel({'id': network_id}),
plugin=FakeDhcpPlugin())
if dhcp_driver.active:
dhcp_driver.disable()
def eligible_for_deletion(conf, namespace, force=False):
"""Determine whether a namespace is eligible for deletion.
Eligibility is determined by having only the lo device or if force
is passed as a parameter.
"""
# filter out namespaces without UUID as the name
if not re.match(NS_MANGLING_PATTERN, namespace):
return False
ip = ip_lib.IPWrapper(namespace=namespace)
return force or ip.namespace_is_empty()
def unplug_device(conf, device):
try:
device.link.delete()
except RuntimeError:
# Maybe the device is OVS port, so try to delete
ovs = ovs_lib.BaseOVS()
bridge_name = ovs.get_bridge_for_iface(device.name)
if bridge_name:
bridge = ovs_lib.OVSBridge(bridge_name)
bridge.delete_port(device.name)
else:
LOG.debug('Unable to find bridge for device: %s', device.name)
def destroy_namespace(conf, namespace, force=False):
"""Destroy a given namespace.
If force is True, then dhcp (if it exists) will be disabled and all
devices will be forcibly removed.
"""
try:
ip = ip_lib.IPWrapper(namespace=namespace)
if force:
kill_dhcp(conf, namespace)
# NOTE: The dhcp driver will remove the namespace if is it empty,
# so a second check is required here.
if ip.netns.exists(namespace):
for device in ip.get_devices(exclude_loopback=True):
unplug_device(conf, device)
ip.garbage_collect_namespace()
except Exception:
LOG.exception(_LE('Error unable to destroy namespace: %s'), namespace)
def cleanup_network_namespaces(conf):
# Identify namespaces that are candidates for deletion.
candidates = [ns for ns in
ip_lib.IPWrapper.get_namespaces()
if eligible_for_deletion(conf, ns, conf.force)]
if candidates:
time.sleep(2)
for namespace in candidates:
destroy_namespace(conf, namespace, conf.force)
def main():
"""Main method for cleaning up network namespaces.
This method will make two passes checking for namespaces to delete. The
process will identify candidates, sleep, and call garbage collect. The
garbage collection will re-verify that the namespace meets the criteria for
deletion (ie it is empty). The period of sleep and the 2nd pass allow
time for the namespace state to settle, so that the check prior deletion
will re-confirm the namespace is empty.
The utility is designed to clean-up after the forced or unexpected
termination of Neutron agents.
The --force flag should only be used as part of the cleanup of a devstack
installation as it will blindly purge namespaces and their devices. This
option also kills any lingering DHCP instances.
"""
conf = setup_conf()
conf()
config.setup_logging()
cleanup_network_namespaces(conf)
| apache-2.0 | -7,532,679,411,237,745,000 | 32.652174 | 79 | 0.66053 | false |
kernelci/lava-ci-staging | lib/test_configs.py | 1 | 15454 | # Copyright (C) 2018 Collabora Limited
# Author: Guillaume Tucker <[email protected]>
#
# This module is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import yaml
class YAMLObject(object):
"""Base class with helper methods to initialise objects from YAML data."""
@classmethod
def _kw_from_yaml(cls, data, args):
"""Create some keyword arguments based on a YAML dictionary
Return a dictionary suitable to be used as Python keyword arguments in
an object constructor using values from some YAML *data*. The *args*
is a list of keys to look up from the *data* and convert to a
dictionary. Keys that are not in the YAML data are simply omitted from
the returned keywords, relying on default values in object
constructors.
"""
return {
k: v for k, v in ((k, data.get(k)) for k in args) if v
}
class Filter(object):
"""Base class to implement arbitrary configuration filters."""
def __init__(self, items):
"""The *items* can be any data used to filter configurations."""
self._items = items
def match(self, **kw):
"""Return True if the given *kw* keywords match the filter."""
raise NotImplementedError("Filter.match() is not implemented")
class Blacklist(Filter):
"""Blacklist filter to discard certain configurations.
Blacklist *items* are a dictionary associating keys with lists of values.
Any configuration with a key-value pair present in these lists will be
rejected.
"""
def match(self, **kw):
for k, v in kw.iteritems():
bl = self._items.get(k)
if not bl:
continue
if any(x in v for x in bl):
return False
return True
class Whitelist(Filter):
"""Whitelist filter to only accept certain configurations.
Whitelist *items* are a dictionary associating keys with lists of values.
For a configuration to be accepted, there must be a value found in each of
these lists.
"""
def match(self, **kw):
for k, wl in self._items.iteritems():
v = kw.get(k)
if not v:
return False
if not any(x in v for x in wl):
return False
return True
class Combination(Filter):
"""Combination filter to only accept some combined configurations.
Combination *items* are a dictionary with 'keys' and 'values'. The 'keys'
are a list of keywords to look for, and 'values' are a list of combined
values for the given keys. The length of each 'values' item must therefore
match the length of the 'keys' list, and the order of the values must match
the order of the keys.
"""
def __init__(self, items):
self._keys = tuple(items['keys'])
self._values = list(tuple(values) for values in items['values'])
def match(self, **kw):
filter_values = tuple(kw.get(k) for k in self._keys)
return filter_values in self._values
class FilterFactory(YAMLObject):
"""Factory to create filters from YAML data."""
_classes = {
'blacklist': Blacklist,
'whitelist': Whitelist,
'combination': Combination,
}
@classmethod
def from_yaml(cls, filter_params):
"""Iterate through the YAML filters and return Filter objects."""
filter_list = []
for f in filter_params:
for filter_type, items in f.iteritems():
filter_cls = cls._classes[filter_type]
filter_list.append(filter_cls(items))
return filter_list
@classmethod
def from_data(cls, data, default_filters=[]):
"""Look for filters in YAML *data* or return *default_filters*.
Look for a *filters* element in the YAML *data* dictionary. If there
is one, iterate over each item to return a list of Filter objects.
Otherwise, return *default_filters*.
"""
params = data.get('filters')
return cls.from_yaml(params) if params else default_filters
class DeviceType(YAMLObject):
"""Device type model."""
def __init__(self, name, mach, arch, boot_method, dtb=None,
flags=[], filters=[], context={}):
"""A device type describes a category of equivalent hardware devices.
*name* is unique for the device type, typically as used by LAVA.
*mach* is the name of the SoC manufacturer.
*arch* is the CPU architecture following the Linux kernel convention.
*boot_method* is the name of the boot method to use.
*dtb* is an optional name for a device tree binary.
*flags* is a list of optional arbitrary strings.
*filters* is a list of Filter objects associated with this device type.
*context* is an arbirary dictionary used when scheduling tests.
"""
self._name = name
self._mach = mach
self._arch = arch
self._boot_method = boot_method
self._dtb = dtb
self._flags = flags
self._filters = filters
self._context = context
def __repr__(self):
return self.name
@property
def name(self):
return self._name
@property
def mach(self):
return self._mach
@property
def arch(self):
return self._arch
@property
def boot_method(self):
return self._boot_method
@property
def dtb(self):
return self._dtb
@property
def context(self):
return self._context
def get_flag(self, name):
return name in self._flags
def match(self, flags, config):
"""Checks if the given *flags* and *config* match this device type."""
return (
all(not v or self.get_flag(k) for k, v in flags.iteritems()) and
all(f.match(**config) for f in self._filters)
)
class DeviceType_arm(DeviceType):
def __init__(self, name, mach, arch='arm', *args, **kw):
"""arm device type with a device tree."""
kw.setdefault('dtb', '{}.dtb'.format(name))
super(DeviceType_arm, self).__init__(name, mach, arch, *args, **kw)
class DeviceType_arm64(DeviceType):
def __init__(self, name, mach, arch='arm64', *args, **kw):
"""arm64 device type with a device tree."""
kw.setdefault('dtb', '{}/{}.dtb'.format(mach, name))
super(DeviceType_arm64, self).__init__(name, mach, arch, *args, **kw)
class DeviceTypeFactory(YAMLObject):
"""Factory to create device types from YAML data."""
_classes = {
'arm-dtb': DeviceType_arm,
'arm64-dtb': DeviceType_arm64,
}
@classmethod
def from_yaml(cls, name, device_type, default_filters=[]):
kw = cls._kw_from_yaml(device_type, [
'mach', 'arch', 'boot_method', 'dtb', 'flags', 'context'])
kw.update({
'name': device_type.get('name', name),
'filters': FilterFactory.from_data(device_type, default_filters),
})
cls_name = device_type.get('class')
device_cls = cls._classes[cls_name] if cls_name else DeviceType
return device_cls(**kw)
class RootFS(YAMLObject):
"""Root file system model."""
_arch_dict = {}
def __init__(self, url_formats, boot_protocol='tftp', root_type=None,
prompt="/ #"):
"""A root file system is any user-space that can be used in test jobs.
*url_formats* are a dictionary with a format string for each type of
file system available (ramdisk, nfs...). There is
typically only one entry here for the main *root_type*,
but multiple entries are possible in particular to boot
with first a ramdisk and then pivot to nfs root.
*boot_protocol* is how the file system is made available to the kernel,
by default `tftp` typically to download a ramdisk.
*root_type* is the name of the file system type (ramdisk, ...) as used
in the job template naming scheme.
*prompt* is a string used in the job definition to tell when the
user-space is available to run some commands.
"""
self._url_format = url_formats
self._root_type = root_type or url_formats.keys()[0]
self._boot_protocol = boot_protocol
self._prompt = prompt
@classmethod
def from_yaml(cls, file_system_types, rootfs):
kw = cls._kw_from_yaml(rootfs, [
'boot_protocol', 'root_type', 'prompt'])
fstype = file_system_types[rootfs['type']]
base_url = fstype['url']
kw['url_formats'] = {
fs: '/'.join([base_url, url]) for fs, url in (
(fs, rootfs.get(fs)) for fs in ['ramdisk', 'nfs'])
if url
}
obj = cls(**kw)
arch_map = fstype.get('arch_map')
if arch_map:
obj._arch_dict = {
tuple(v.values()): k for k, v in arch_map.iteritems()
}
return obj
@property
def prompt(self):
return self._prompt
@property
def boot_protocol(self):
return self._boot_protocol
@property
def root_type(self):
return self._root_type
def get_url(self, fs_type, arch, endianness):
"""Get the URL of the file system for the given variant and arch.
The *fs_type* should match one of the URL patterns known to this root
file system.
"""
fmt = self._url_format.get(fs_type)
if not fmt:
return None
arch_name = self._arch_dict.get((arch, endianness))
if not arch_name:
arch_name = self._arch_dict.get((arch,), arch)
return fmt.format(arch=arch_name)
class TestPlan(YAMLObject):
"""Test plan model."""
_pattern = '{plan}/{category}-{method}-{protocol}-{rootfs}-{plan}-template.jinja2'
def __init__(self, name, rootfs, category='generic', filters=[],
pattern=None):
"""A test plan is an arbitrary group of test cases to be run.
*name* is the overall arbitrary test plan name, used when looking for
job template files.
*rootfs* is a RootFS object to be used to run this test plan.
*category* is to classify the type of job to be run, used when looking
for job template files.
*filters* is a list of Filter objects associated with this test plan.
*pattern* is a string pattern to create the path to the job template
file, see TestPlan._pattern for the default value with the
regular template file naming scheme.
"""
self._name = name
self._rootfs = rootfs
self._category = category
self._filters = filters
if pattern:
self._pattern = pattern
@classmethod
def from_yaml(cls, name, test_plan, file_systems, default_filters=[]):
kw = {
'name': name,
'rootfs': file_systems[test_plan['rootfs']],
'filters': FilterFactory.from_data(test_plan, default_filters),
}
kw.update(cls._kw_from_yaml(test_plan, ['name', 'category', 'pattern']))
return cls(**kw)
@property
def name(self):
return self._name
@property
def rootfs(self):
return self._rootfs
def get_template_path(self, boot_method):
"""Get the path to the template file for the given *boot_method*
As different device types use different boot methods (u-boot, grub...),
each test plan can have several template variants to accomodate for
these. All the other parameters are attributes of the test plan.
"""
return self._pattern.format(
category=self._category,
method=boot_method,
protocol=self.rootfs.boot_protocol,
rootfs=self.rootfs.root_type,
plan=self.name)
def match(self, config):
return all(f.match(**config) for f in self._filters)
class TestConfig(YAMLObject):
"""Test configuration model."""
def __init__(self, device_type, test_plans, filters=[]):
"""A test configuration has a *device_type* and a list of *test_plans*.
*device_type* is a DeviceType object.
*test_plans* is a list of TestPlan objects to run on the device type.
"""
self._device_type = device_type
self._test_plans = {
t.name: t for t in test_plans
}
self._filters = filters
@classmethod
def from_yaml(cls, test_config, device_types, test_plans,
default_filters=[]):
kw = {
'device_type': device_types[test_config['device_type']],
'test_plans': [test_plans[test]
for test in test_config['test_plans']],
'filters': FilterFactory.from_data(test_config, default_filters),
}
return cls(**kw)
@property
def device_type(self):
return self._device_type
@property
def test_plans(self):
return self._test_plans
def match(self, arch, plan, flags, config):
return (
plan in self._test_plans and
self._test_plans[plan].match(config) and
self.device_type.arch == arch and
self.device_type.match(flags, config) and
all(f.match(**config) for f in self._filters)
)
def get_template_path(self, plan):
test_plan = self._test_plans[plan]
return test_plan.get_template_path(self._device_type.boot_method)
def load_from_yaml(yaml_path="test-configs.yaml"):
with open(yaml_path) as f:
data = yaml.load(f)
fs_types = data['file_system_types']
file_systems = {
name: RootFS.from_yaml(fs_types, rootfs)
for name, rootfs in data['file_systems'].iteritems()
}
plan_filters = FilterFactory.from_yaml(data['test_plan_default_filters'])
test_plans = {
name: TestPlan.from_yaml(name, test_plan, file_systems, plan_filters)
for name, test_plan in data['test_plans'].iteritems()
}
device_filters = FilterFactory.from_yaml(data['device_default_filters'])
device_types = {
name: DeviceTypeFactory.from_yaml(name, device_type, device_filters)
for name, device_type in data['device_types'].iteritems()
}
test_configs = [
TestConfig.from_yaml(test_config, device_types, test_plans)
for test_config in data['test_configs']
]
data = {
'file_systems': file_systems,
'test_plans': test_plans,
'device_types': device_types,
'test_configs': test_configs,
}
return data
| lgpl-2.1 | -5,836,779,994,696,674,000 | 32.092077 | 86 | 0.603015 | false |
gregpuzzles1/Sandbox | Example Programs/Ch_07_Student_Files/pilimages.py | 1 | 4982 | """
images.py
This module, writtn by Kenneth Lambert, supports simple image processing.
The Image class represents either an image loaded from a GIF file or a
blank image.
To instantiate an image from a file, enter
image = Image(aGifFileName)
To instantiate a blank image, enter
image = Image(aWidth, aHeight)
Image methods:
draw() Displays the image in a window
getWidth() -> anInt The width in pixels
getHeight() -> anInt The height in pixels
getPixel(x, y) -> (r, g, b) The RGB values of pixel at x, y
setPixel(x, y, (r, g, b)) Resets pixel at x, y to (r, g, b)
save() Saves the image to the current file name
save(aFileName) Saves the image to fileName
LICENSE: This is open-source software released under the terms of the
GPL (http://www.gnu.org/licenses/gpl.html).
"""
import Tkinter
import os, os.path
tk = Tkinter
import PIL.Image
import PIL.ImageTk
Pimg = PIL.Image
Pimgtk = PIL.ImageTk
import exceptions
_root = None
class ImageView(tk.Canvas):
def __init__(self, image,
title = "New Image",
autoflush=False):
master = tk.Toplevel(_root)
master.protocol("WM_DELETE_WINDOW", self.close)
tk.Canvas.__init__(self, master,
width = image.getWidth(),
height = image.getHeight())
self.master.title(title)
self.pack()
master.resizable(0,0)
self.image = image
self.height = image.getHeight()
self.width = image.getWidth()
self.autoflush = autoflush
self.closed = False
def close(self):
"""Close the window"""
self.closed = True
self.master.destroy()
self.image.canvas = None
_root.quit()
def isClosed(self):
return self.closed
def getHeight(self):
"""Return the height of the window"""
return self.height
def getWidth(self):
"""Return the width of the window"""
return self.width
class Image:
def __init__(self, *args):
self.canvas = None
if len(args) == 1:
name = args[0]
if type(name) != str:
raise Exception, 'Must be a file name'
if not os.path.exists(args[0]):
raise Exception, 'File not in current directory'
self.image = Pimg.open(args[0])
self.filename = args[0]
box = self.image.getbbox()
self.width = box[2]
self.height = box[3]
else: # arguments are width and height
self.width, self.height = args
self.image = Pimg.new(mode = "RGB",
size = (self.width, self.height))
self.filename = ""
def getWidth(self):
"""Returns the width of the image in pixels"""
return self.width
def getHeight(self):
"""Returns the height of the image in pixels"""
return self.height
def getPixel(self, x, y):
"""Returns a tuple (r,g,b) with the RGB color values for pixel (x,y)
r,g,b are in range(256)
"""
return self.image.getpixel((x, y))
def setPixel(self, x, y, color):
"""Sets pixel (x,y) to the color given by RGB values r, g, and b.
r,g,b should be in range(256)
"""
self.image.putpixel((x, y), color)
def draw(self):
"""Creates and opens a window on an image.
The user must close the window to return control to
the caller."""
if not self.canvas:
self.canvas = ImageView(self,
self.filename)
self.photoImage = Pimgtk.PhotoImage(self.image)
self.canvas.create_image(self.width / 2,
self.height / 2,
image = self.photoImage)
_root.mainloop()
def save(self, filename = ""):
"""Saves the image to filename. If no file name
is provided, uses the image's file name if there
is one; otherwise, simply returns.
If the .gif extension is not present, it is added.
"""
if filename == "":
return
else:
self.filename = filename
#path, name = os.path.split(filename)
#ext = name.split(".")[-1]
#if ext != "gif":
#filename += ".gif"
self.filename = filename
self.image.save(self.filename)
def clone(self):
new = Image(self.width, self.height)
new.image = self.image.copy()
return new
def __str__(self):
rep = ""
if self.filename:
rep += ("File name: " + self.filename + "\n")
rep += ("Width: " + str(self.width) + \
"\nHeight: " + str(self.height))
return rep
_root = tk.Tk()
_root.withdraw()
| gpl-3.0 | -1,244,779,245,137,367,800 | 27.965116 | 76 | 0.540145 | false |
suqinhuang/virt-test | qemu/tests/block_mirror.py | 1 | 5705 | import re, os, logging, time
from autotest.client.shared import utils, error
from virttest import qemu_monitor, storage, env_process, data_dir, utils_misc
@error.context_aware
def run_block_mirror(test, params, env):
"""
Test block mirroring functionality
Test consists of two subtests:
1) Mirror the guest and switch to the mirrored one
2) Synchronize disk and then do continuous backup
"qemu-img compare" is used to verify disk is mirrored successfully.
"""
image_name = params.get("image_name", "image")
image_format = params.get("image_format", "qcow2")
image_orig = "%s.%s" % (image_name, image_format)
image_mirror = utils_misc.get_path(data_dir.get_data_dir(),
"%s-mirror.%s" % (image_name, image_format))
drive_format = params["drive_format"]
block_mirror_cmd = params.get("block_mirror_cmd", "drive-mirror")
device_id = "None"
qemu_img = params["qemu_img_binary"]
def check_block_jobs_info():
"""
Verify block-jobs status reported by monitor command info block-jobs.
@return: parsed output of info block-jobs
"""
fail = 0
try:
output = vm.monitor.info("block-jobs")
except qemu_monitor.MonitorError, e:
logging.error(e)
fail += 1
return None, None
return (re.match("[\w ]+", str(output)), re.findall("\d+", str(output)))
def run_mirroring(vm, cmd, device, dest, complete = True):
"""
Run block mirroring.
@param vm: Virtual machine object
@param cmd: Command for start mirroring
@param device: Guest device that has to be mirrored
@param dest: Location image has to be mirrored into
@param complete: If True, mirroring will complete (switch to mirror),
If False, finish image synchronization and keep
mirroring running (any changes will be mirrored)
"""
vm.monitor.cmd("%s %s %s" % (cmd, device, dest))
while True:
blkjobout, blkjobstatus = check_block_jobs_info()
if 'mirror' in blkjobout.group(0):
logging.info("[(Completed bytes): %s (Total bytes): %s "
"(Speed limit in bytes/s): %s]", blkjobstatus[-3],
blkjobstatus[-2], blkjobstatus[-1])
if int(blkjobstatus[-3]) != int(blkjobstatus[-2]):
time.sleep(10)
continue
else:
logging.info("Target synchronized with source")
if complete:
logging.info("Start mirroring completing")
vm.monitor.cmd("stop")
vm.monitor.cmd("block_job_complete %s" % device)
time.sleep(5)
else:
break
elif 'No' in blkjobout.group(0):
logging.info("Block job completed")
break
def compare_images(cmd, img1, img2):
"""
Check if images are equal. Raise error.TestFail if images not equal.
@param cmd: qemu-img executable
@param img1: First image to compare
@param img2: Second image to compare
"""
logging.info("Comparing images")
compare_cmd = "%s compare %s %s" % (cmd, img1, img2)
rv = utils.run(compare_cmd, ignore_status=True)
if rv.exit_status == 0:
logging.info("Images are equal")
elif rv.exit_status == 1:
raise error.TestFail("Images differ - test failed")
else:
raise error.TestError("Error during image comparison")
try:
# Setup phase
vm_name = params['main_vm']
env_process.preprocess_vm(test, params, env, vm_name)
vm = env.get_vm(vm_name)
vm.create()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
img_path = storage.get_image_filename(params, data_dir.get_data_dir())
if 'ide' in drive_format:
device_id = " id0-hd0"
elif 'virtio' in drive_format:
device_id = " virtio0"
else:
raise error.TestNAError("Drive format %s is not supported" %
drive_format)
# Subtest 1 - Complete mirroring
error.context("Testing complete mirroring")
run_mirroring(vm, block_mirror_cmd, device_id, image_mirror)
output = vm.monitor.info("block")
if image_orig in output or image_mirror not in output:
raise error.TestError("Mirrored image not being used by guest")
error.context("Compare fully mirrored images")
compare_images(qemu_img, img_path, image_mirror)
vm.destroy()
# Subtest 2 - Continuous backup
error.context("Testing continuous backup")
vm.create()
session = vm.wait_for_login(timeout=timeout)
run_mirroring(vm, block_mirror_cmd, device_id, image_mirror,False)
if image_orig in output or image_mirror not in output:
raise error.TestError("Mirrored image not used by guest")
for fn in range(0,128):
session.cmd("dd bs=1024 count=1024 if=/dev/urandom of=tmp%d.file"
% fn)
time.sleep(10)
vm.monitor.cmd("stop")
time.sleep(5)
error.context("Compare original and backup images")
compare_images(qemu_img, img_path, image_mirror)
vm.destroy()
finally:
if os.path.isfile(image_mirror):
os.remove(image_mirror)
| gpl-2.0 | 8,536,157,792,847,916,000 | 36.781457 | 83 | 0.568975 | false |
ernstblecha/gen.sh | waitforstring.py | 1 | 1698 | #!/usr/bin/python3 -u
# using -u here to make stdin unbuffered
# This file is part of gen.sh.
#
# gen.sh is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gen.sh is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gen.sh. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
VERSION = 2
if os.environ.get("GET_VERSION") == "1":
print(VERSION)
sys.exit(0)
w = 0
n = ""
s = ""
if len(sys.argv) > 1: # get the needle and its length
w = len(sys.argv[1])
n = sys.argv[1]
while w > 0: # "endless" loop if we have a needle
c = sys.stdin.read(1)
if len(c) == 0:
sys.exit(1) # stream ended, needle not found
s += c
s = s[-w:] # store the last l characters for comparison
if s == n:
sys.exit(0) # needle was found
# usage message if needle is missing
print(os.path.basename(sys.argv[0])+""" needle
blocks until the string passed in the first argument (\"needle\") is found on
stdin or the stream ends additional parameters are ignored
returns 0 if string is found
returns 1 if string is not found
returns 2 if no string is given
This message is shown if no string is given
Version of """+os.path.basename(sys.argv[0])+": " + str(VERSION))
sys.exit(2) # errorcode for missing needle
| gpl-3.0 | -159,925,529,795,135,940 | 29.321429 | 77 | 0.691402 | false |
klmitch/nova | nova/tests/functional/notification_sample_tests/test_compute_task.py | 1 | 5997 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests import fixtures
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
from nova.tests.unit import fake_notifier
class TestComputeTaskNotificationSample(
notification_sample_base.NotificationSampleTestBase):
def setUp(self):
super(TestComputeTaskNotificationSample, self).setUp()
self.neutron = fixtures.NeutronFixture(self)
self.useFixture(self.neutron)
def test_build_instances_fault(self):
# Force down the compute node
service_id = self.api.get_service_id('nova-compute')
self.admin_api.put_service_force_down(service_id, True)
server = self._boot_a_server(
expected_status='ERROR',
extra_params={'networks': [{'port': self.neutron.port_1['id']}]},
additional_extra_specs={'hw:numa_nodes': 1,
'hw:numa_cpus.0': '0',
'hw:numa_mem.0': 512})
self._wait_for_notification('compute_task.build_instances.error')
# 0. scheduler.select_destinations.start
# 1. compute_task.rebuild_server.error
self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS)
self._verify_notification(
'compute_task-build_instances-error',
replacements={
'instance_uuid': server['id'],
'request_spec.instance_uuid': server['id'],
'request_spec.security_groups': [],
'request_spec.numa_topology.instance_uuid': server['id'],
'request_spec.pci_requests.instance_uuid': server['id'],
'reason.function_name': self.ANY,
'reason.module_name': self.ANY,
'reason.traceback': self.ANY
},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
def test_rebuild_fault(self):
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]},
additional_extra_specs={'hw:numa_nodes': 1,
'hw:numa_cpus.0': '0',
'hw:numa_mem.0': 512})
self._wait_for_notification('instance.create.end')
# Force down the compute node
service_id = self.api.get_service_id('nova-compute')
self.admin_api.put_service_force_down(service_id, True)
fake_notifier.reset()
# NOTE(takashin): The rebuild action and the evacuate action shares
# same code path. So the 'evacuate' action is used for this test.
self._evacuate_server(
server, expected_state='ERROR', expected_migration_status='error')
self._wait_for_notification('compute_task.rebuild_server.error')
# 0. instance.evacuate
# 1. scheduler.select_destinations.start
# 2. compute_task.rebuild_server.error
self.assertEqual(3, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS)
self._verify_notification(
'compute_task-rebuild_server-error',
replacements={
'instance_uuid': server['id'],
'request_spec.instance_uuid': server['id'],
'request_spec.security_groups': [],
'request_spec.numa_topology.instance_uuid': server['id'],
'request_spec.pci_requests.instance_uuid': server['id'],
'reason.function_name': self.ANY,
'reason.module_name': self.ANY,
'reason.traceback': self.ANY
},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
def test_migrate_fault(self):
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]},
additional_extra_specs={'hw:numa_nodes': 1,
'hw:numa_cpus.0': '0',
'hw:numa_mem.0': 512})
self._wait_for_notification('instance.create.end')
# Disable the compute node
service_id = self.api.get_service_id('nova-compute')
self.admin_api.put_service(service_id, {'status': 'disabled'})
fake_notifier.reset()
# Note that the operation will return a 202 response but fail with
# NoValidHost asynchronously.
self.admin_api.post_server_action(server['id'], {'migrate': None})
self._wait_for_notification('compute_task.migrate_server.error')
self.assertEqual(1, len(fake_notifier.VERSIONED_NOTIFICATIONS),
fake_notifier.VERSIONED_NOTIFICATIONS)
self._verify_notification(
'compute_task-migrate_server-error',
replacements={
'instance_uuid': server['id'],
'request_spec.instance_uuid': server['id'],
'request_spec.security_groups': [],
'request_spec.numa_topology.instance_uuid': server['id'],
'request_spec.pci_requests.instance_uuid': server['id'],
'reason.exception_message': 'No valid host was found. ',
'reason.function_name': self.ANY,
'reason.module_name': self.ANY,
'reason.traceback': self.ANY
},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
| apache-2.0 | 3,927,317,950,531,222,000 | 46.220472 | 78 | 0.591462 | false |
Charley-fan/metaArray | metaFunc.py | 1 | 10584 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from decimal import Decimal
# import numpy as np
from numpy import bincount
from numpy import linspace
from numpy import round
from numpy import zeros, ones
from numpy.fft import rfft as np_rfft
from scipy.signal import firwin
from core import metaArray
from misc import spline_resize
from misc import quantise
from misc import filtfilt
from misc import engUnit
####################
# Helper functions #
####################
def padding_calc(metAry, min_freq = 0, max_freq = 1e6, resolution = 2048, \
debug = False):
"""
For a given 1D metaArray, work out the overall length of array necessary
for the specified resolution between the frequency limits
Padding ratio is always >= len(metAry)
Example:
rfft(ary, n = padding_calc(ary))
"""
n = len(metAry)
t0 = metAry.get_range(0, 'begin')
t1 = metAry.get_range(0, 'end')
f = n / float(t1-t0) # Sampling freq
# f = abs(f) / 2 # Nyquist
N = n * abs(max_freq - min_freq) / abs(f) # Unpadded resolution
if N < resolution:
return int(round((resolution / N) * n)) # Scale up accordingly
else: # Already at or better resolution
return int(round(n))
def meta_fir_len(metAry, length = 0.005):
"""
Simple helper function to work out the approprate number of taps for type I
FIR for a given metaArray.
Default to 0.5% of the input metAry duration, minimum 3.
Input:
metAry Targed metaArray
length Desire length/duration of the filter as ratio to len(metAry)
Output:
length Length of the desire FIR filter (Int)
"""
length = int(round(len(metAry) * length)) # Round to nearest ratio
if length < 3: length = 3
# l must be odd for Type I filter
if length%2 == 0: length += 1
return length
def meta_lowpass(metAry, freq, length = 0.005, window='hann', copy = True):
"""
Perform a two pass Type I FIR filter of cut-off freq(uency) on the given
1D metaArray, once forward and once backward.
Inputs:
metAry Target metaArray
freq Cut-off frequency (float, in metAry unit)
length Length of the FIR filter (See notes below)
window Window function for the FIR filter
copy Whether to return a copy or modify inplace
Length
If given as float type, it will be interpreted as percentage length
(duration) of the input metaArray.
If given as int type, it will be interpreted as the desire number of
taps for FIR filter.
The default FIR length is 0.5% of that in the input metaArray, mimimum 3.
The exact number of taps is rounded to the next odd number, in order to
meet the type I conditions.
Scipy.signal.firwin support the following window options:
boxcar
triang
blackman
hamming
hann
bartlett
flattop
parzen
bohman
blackmanharris
nuttall
barthann
kaiser (needs beta)
gaussian (needs std)
general_gaussian (needs power, width)
slepian (needs width)
chebwin (needs attenuation)
"""
assert metAry.ndim is 1, "Only 1D metaArray accepted, there are %i dimemsions in the given data." % metAry.ndim
if copy: ary = metAry.copy()
else: ary = metAry
# Work out the Nyquist frequency
Nyquist = ary.get_smp_rate() / 2
# Normalise frequency
name_str = 'Low pass filtered at ' + engUnit(freq, unit = 'Hz', sigfig=3)
freq = float(freq) / Nyquist
# Number of taps
if type(length) is float:
length = meta_fir_len(ary, length = length)
elif type(length) is int:
pass
else:
raise ValueError('Unexpected variable type for length: ' + str(type(length)))
# a = [1.]
b = firwin(length, freq, window=window)
ary.data = filtfilt(b, [1.], ary.data)
if type(ary['name']) is str:
ary['name'] += ' (' + name_str + ')'
else:
ary['name'] = name_str
if copy: return ary
else: return
def meta_highpass(metAry, freq, length = 0.005, window='hann', copy=True):
"""
Perform a two pass Type I FIR filter of cut-off freq(uency) on the given
1D metaArray, once forward and once backward.
meta_highpass(metAry) === metAry - meta_lowpass(metAry)
Inputs:
metAry Target metaArray
freq Cut-off frequency (float, in metAry unit)
length Length of the FIR filter (See notes below)
window Window function for the FIR filter
copy Whether to return a copy or modify inplace
See meta_lowpass for details
"""
loary = meta_lowpass(metAry, freq, length=length, window=window, copy=True)
name_str = 'High pass filtered at ' + engUnit(freq, unit = 'Hz', sigfig=3)
if copy: ary = metAry.copy()
else: ary = metAry
ary.data -= loary.data
if type(metAry['name']) is str:
ary['name'] = metAry['name'] + ' (' + name_str + ')'
else:
ary['name'] = name_str
if copy: return ary
else: return
def meta_resample(metAry, rate=False, l=0.005, window='hamming', order = 5):
"""
Resample 1D metaArray data into the given sampling rate, this is
implemented using misc.spline_resize()
This function distinct from the scipy.signal.resample function that, it
uses spline for resampling, instead of FFT based method. Periodicity of the
metAry content is not implied, or required.
Inputs:
metAry Input metaArray
rate Sampling rate (float, in metaArray unit)
l Length of the FIR filter, default to 0.5% len(metAry) mimimum 3
window Window method to generate the FIR filter
order Order of spline polynomial, default to 5
Output:
metaArray A resampled copy of the input metAry
If upsampling, quintic spline interpolation will be used.
If downsampling, two pass anti-aliasing FIR filter will be applied, once
forward and once reverse to null the group delay, then quintic spline
interpolation will be used.
If target sampling rate is not given, it will try to find the next highest
sampling rate by default. The resampled data will always align at time 0,
and never exceed the duration of the given data.
The sampling rate will come in multiples of 1, 2, or 5Hz, this function
will modify the input array in place.
"""
assert metAry.ndim is 1, "Only 1D metaArray accepted, there are %i dimemsions in the given data." % metAry.ndim
ary = metAry.copy()
if rate is False:
# Target sampling rate is not specified
r = len(ary) / float(abs(ary.get_range(0, 'end') - ary.get_range(0, 'begin')))
# Find out the exponent of the current sampling rate
exponent = Decimal(str(r)).adjusted()
# Remove the exponent
scale = r * 10**(0 - exponent)
# make the standard scale slightly larger (1e-5) so numerical
# error (rounding error) do not come in to play and force it up
# to the next sampling scale
if scale > 5.00005:
scale = 10
elif scale > 2.00002:
scale = 5
elif scale > 1.00001:
scale = 2
else:
# This really shouldnt happen, but just in case the Decimal
# function return numbers like 0.123e+45 instead of 1.23e+45
scale = 1
print "Warning!! Unexpected values for scale evaluation!" + \
'scale variable (' + str(scale) + ') should be greater than 1.'
# This is what the sampling rate should be
rate = scale * 10**exponent
# Target size of the ary
n = float(abs(ary.get_range(0, 'end') - ary.get_range(0, 'begin'))) * rate
if type(l) is float: l = meta_fir_len(ary, l)
# resize the data
ary.data = spline_resize(ary.data, n, l=l, window=window, order = order)
# Update the meta info
ary.update_range()
return ary
metaResample = meta_resample
def meta_histogram(metAry, bins = False):
"""
Compute a histogram of the given 1D metaArray.
It will try to work out the maximum number of bins (i.e. minimum
quantisation from the data) by default.
Will raise QuantsationError if unable to determin number of bins.
"""
assert metAry.ndim is 1, "Only 1D metaArray accepted, there are %i dimemsions in the given data." % metAry.ndim
# Flatten the data to 1D array
data = metAry.data.ravel()
if bins is not False:
quanter = data.ptp() / bins
else:
# Try to quantise the array data
quanter = quantise(data)
# Quantise the data, and offset to the +ve side of value, bincount requires +ve
# int arrays
quantum = round(data / quanter).astype(int)
quantum -= quantum.min()
# Do the bincount for histogram
hist = bincount(quantum)
# Update the metaInfo
hist = metaArray(hist)
hist.set_range(0, 'begin', metAry.min())
hist.set_range(0, 'end', metAry.max())
hist.set_range(0, 'unit', metAry['unit'])
hist.set_range(0, 'label', metAry['label'])
hist['name'] = 'Histogram of ' + metAry['name']
hist['unit'] = ''
hist['label'] = 'Counts'
return hist
histogram = meta_histogram
| gpl-3.0 | -2,785,566,513,698,070,000 | 31.268293 | 115 | 0.606293 | false |
mveitas/metrics | docs/source/conf.py | 1 | 9611 | # -*- coding: utf-8 -*-
#
# Dropwizard documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 13 11:29:49 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['ytemplates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Metrics'
copyright = u'2010-2014, Coda Hale, Yammer Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '@parsedVersion.majorVersion@[email protected]@'
# The full version, including alpha/beta/rc tags.
release = '@project.version@'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'trac'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'metrics'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'tagline': u'Mind the gap.',
'gradient_start': u'#ff684b',
'gradient_end': u'#cf2c0f',
'gradient_text': u'#fff',
'gradient_bg': u'#ED4A2D',
'gradient_shadow': u'#CF2C0F',
'landing_logo': u'metrics-hat.png',
'landing_logo_width': u'200px',
'github_page': u'https://github.com/dropwizard/metrics',
'mailing_list': u'https://groups.google.com/forum/#!forum/metrics-user',
'apidocs': u'https://dropwizard.github.io/metrics/' + release + '/apidocs/'
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["./_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = u'Metrics'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = u'metrics-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
html_add_permalinks = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Metricsdoc'
todo_include_todos = True
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Metrics.tex', u'Metrics Documentation',
u'Coda Hale', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'metrics', u'Metrics Documentation',
[u'Coda Hale'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Metrics', u'Metrics Documentation',
u'Coda Hale', 'Metrics', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Metrics'
epub_author = u'Coda Hale'
epub_publisher = u'Coda Hale'
epub_copyright = u'2012, Coda Hale'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| apache-2.0 | 7,791,638,484,242,115,000 | 30.930233 | 80 | 0.699927 | false |
alenickwork/python_training | fixture/orm.py | 1 | 4159 | from pony.orm import *
from datetime import datetime
from model.group import Group
from model.contact import Contact
from pymysql.converters import decoders
from pymysql.converters import encoders, decoders, convert_mysql_timestamp
conv = encoders
conv.update(decoders)
conv[datetime] = convert_mysql_timestamp
class ORMFixture:
db = Database()
class ORMGroup(db.Entity):
_table_ = 'group_list'
id = PrimaryKey(int, column = 'group_id')
name = Optional(str, column= 'group_name')
header = Optional(str, column = 'group_header')
footer = Optional(str, column = 'group_footer')
contacts = Set(lambda: ORMFixture.ORMContact, table = 'address_in_groups',
column = "id", reverse = "groups", lazy = True)
class ORMContact(db.Entity):
_table_ = 'addressbook'
id = PrimaryKey(int, column = 'id')
firstname = Optional(str, column= 'firstname')
lastname = Optional(str, column= 'lastname')
deprecated = Optional(datetime, column= 'deprecated')
groups = Set(lambda: ORMFixture.ORMGroup, table = 'address_in_groups',
column = "group_id", reverse = "contacts", lazy = True)
email_prior = Optional(str, column= 'email')
email_2 = Optional(str, column= 'email2')
email_3 = Optional(str, column= 'email3')
home_phone = Optional(str, column= 'home')
mobile_phone = Optional(str, column= 'mobile')
work_phone = Optional(str, column= 'work')
phone_secondary = Optional(str, column= 'phone2')
def __init__(self,host, name, user, password):
self.db.bind('mysql', host=host, database=name, user=user, password=password, autocommit=True,conv=conv)
self.db.generate_mapping()
#sql_debug(True)
def convert_groups_to_model(self, groups):
def convert(group):
return Group(id = str(group.id),
name = group.name,
header = group.header,
footer = group.footer)
return list(map(convert, groups))
@db_session
def get_group_list(self):
return self.convert_groups_to_model(list(select(g for g in ORMFixture.ORMGroup)))
def convert_contacts_to_model(self, contacts, full = False):
def convert(cont):
return Contact(id = str(cont.id),
firstname = cont.firstname,
lastname = cont.lastname)
def convert_full(cont):
return Contact(id = str(cont.id),
firstname = cont.firstname,
lastname = cont.lastname,
email_prior = cont.email_prior,
email_2 = cont.email_2,
email_3 = cont.email_3,
home_phone = cont.home_phone,
mobile_phone = cont.mobile_phone,
work_phone = cont.work_phone,
phone_secondary = cont.phone_secondary
)
if not full:
return list(map(convert, contacts))
else:
return list(map(convert_full, contacts))
@db_session
def get_contact_list(self, full = False):
return self.convert_contacts_to_model(select(c for c in ORMFixture.ORMContact if c.deprecated is None), full)
@db_session
def get_contact_by_id(self, id, full = False):
return self.convert_contacts_to_model(select(c for c in ORMFixture.ORMContact if c.deprecated is None and c.id == id), full)[0]
@db_session
def get_contacts_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
return self.convert_contacts_to_model(orm_group.contacts)
@db_session
def get_contacts_not_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
conts = select(c for c in ORMFixture.ORMContact if c.deprecated is None and orm_group not in c.groups)
return self.convert_contacts_to_model(conts)
| apache-2.0 | -884,994,360,897,504,300 | 39.378641 | 135 | 0.591248 | false |
Den1al/JSShell | web/content/routes.py | 1 | 1400 | from flask import render_template, url_for, Response
from common.config import read_config
from common.utils import concat_url_path
from web.content import content
@content.route('/jquery', methods=['GET'])
def get_jquery():
""" Returns the jQuery.js file """
return render_template('javascript/jquery.js')
@content.route('/prune', methods=['GET'])
def get_prune():
""" Returns the prune.js file """
return render_template('javascript/prune.js')
@content.route('/js', methods=['GET'])
def get_javascript():
""" The view that returns the actual javascript shell
to the client. It takes in consideration configuration
from the `config.json` file. It also appends the dependencies
which are `jQuery` and `JSON.prune`. """
config = read_config()
url = config.get('URL', '//')
shell_javascript = render_template(
'javascript/shell.js',
post_back_url=concat_url_path(url, url_for('api.post_back')),
poll_url=concat_url_path(url, url_for('api.poll_new_commands')),
register_url=concat_url_path(url, url_for('api.register'))
)
script_content = '\n\n'.join([
render_template('javascript/jquery.js'),
'var JJ = $.noConflict(true);;',
render_template('javascript/prune.js'),
shell_javascript
])
return Response(script_content, mimetype='application/javascript')
| mit | 1,664,180,753,557,208,800 | 29.434783 | 72 | 0.656429 | false |
buzzer/tams_pr2 | robot_motion_param/nodes/get_motion_param.py | 1 | 1163 | #!/usr/bin/env python
#
# 2014-08-04 Sebastian Rockel ([email protected])
# Provides a service interface to read the robot motion parameters
# uses the dynamic_reconfigure client
#
import roslib; roslib.load_manifest('robot_motion_param')
from robot_motion_param.srv import *
import rospy
import dynamic_reconfigure.client
def get_params(node, timeout=None):
client = dynamic_reconfigure.client.Client(node, timeout=timeout)
return client.get_configuration(timeout=timeout)
def handle_request(req):
config = get_params('/move_base_node/DWAPlannerROS', 1.0)
max_vel = config['max_trans_vel']
max_acc = config['acc_lim_x']
max_jerk = 0
print "Reading motion parameters (vel, acc, jerk).. {0:4.2f} {1:4.2f} {2:4.2f}".format(max_vel, max_acc, max_jerk)
#print config
return GetMotionParamResponse(max_vel, max_acc, max_jerk)
def server():
rospy.init_node('get_robot_motion_param')
s = rospy.Service('get_motion_param', GetMotionParam, handle_request)
print "Ready to read motion parameters.."
rospy.spin()
if __name__ == "__main__":
client = dynamic_reconfigure.client.Client('/move_base_node/DWAPlannerROS')
server()
| bsd-2-clause | -6,549,244,026,538,426,000 | 34.242424 | 116 | 0.729149 | false |
linted/Skip-Trace | common/STcommon.py | 1 | 1245 | try:
import logging
from Crypto.PublicKey import RSA
except ImportError as e:
print("[-] {}, exiting".format(e))
exit(1)
def configDebugLog(logFileName):
log_file = logging.FileHandler(logFileName,mode='w')
log_file.setLevel(logging.DEBUG)
log_file.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
# ERROR level or higher should be output to console as well
log_console = logging.StreamHandler()
log_console.setLevel(logging.ERROR)
log_console.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
logger = logging.getLogger('main_logger')
logger.addHandler(log_console)
logger.addHandler(log_file)
return logger
def keyGen(path):
key = RSA.generate(2048)
with open(path +'/python.pem','wb') as privateKey:
privateKey.write(key.exportKey('PEM'))
with open(path+ '/python.pub', 'wb') as publicKey:
publicKey.write(key.publickey().exportKey('PEM'))
def parseArgs():
'''Parses args using the argparse lib'''
parser = argparse.ArgumentParser(description='Location logging server')
parser.add_argument('-g', '--generate-keys', metavar='PATH', type=str)
return parser.parse_args()
if __name__ == "__main__":
args = parseArgs()
if args.generate_keys:
keyGen(args.generate_keys) | mit | -4,271,112,123,629,025,000 | 28.666667 | 86 | 0.724498 | false |
klen/simpletree | docs/conf.py | 1 | 1144 | # -*- coding: utf-8 -*-
import os
import sys
from simpletree import __version__ as release
project = 'SimpleTree'
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
copyright = u'2012, Kirill Klenov'
version = '.'.join(release.split('.')[:2])
exclude_patterns = ['_build']
html_use_modindex = False
html_show_sphinx = False
htmlhelp_basename = '{0}doc'.format(project)
latex_documents = [
('index', '{0}.tex'.format(project), u'{0} Documentation'.format(project),
u'Kirill Klenov', 'manual'),
]
latex_use_modindex = False
latex_elements = {
'fontpkg': r'\usepackage{mathpazo}',
'papersize': 'a4paper',
'pointsize': '12pt',
'preamble': r'\usepackage{flaskstyle}'
}
latex_use_parts = True
latex_additional_files = ['flaskstyle.sty', 'logo.pdf']
man_pages = [
('index', project.lower(), u'{0} Documentation'.format(project),
[u'Kirill Klenov'], 1)
]
pygments_style = 'tango'
html_theme = 'default'
html_theme_options = {}
| bsd-3-clause | -5,772,293,872,016,142,000 | 28.333333 | 82 | 0.653846 | false |
MaxTyutyunnikov/lino | lino/projects/min2/settings.py | 1 | 2156 | # -*- coding: UTF-8 -*-
## Copyright 2012-2013 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
import os
import lino
from lino.projects.std.settings import *
#~ from lino.modlib import cal
#~ class Site(Site,cal.SiteMixin):
class Site(Site):
title = "Lino/MinimalApp 2"
#~ help_url = "http://lino.saffre-rumma.net/az/index.html"
#~ migration_module = 'lino.projects.az.migrate'
#~ project_model = 'contacts.Person'
#~ project_model = 'contacts.Person'
project_model = 'projects.Project'
user_model = "users.User"
#~ languages = ('de', 'fr')
languages = 'en et'
#~ index_view_action = "dsbe.Home"
#~ remote_user_header = "REMOTE_USER"
#~ remote_user_header = None
#~ def setup_quicklinks(self,ui,user,tb):
#~ tb.add_action(self.modules.contacts.Persons.detail_action)
#~ tb.add_action(self.modules.contacts.Companies.detail_action)
def get_installed_apps(self):
for a in super(Site,self).get_installed_apps():
yield a
yield 'django.contrib.contenttypes'
yield 'lino.modlib.system'
yield 'lino.modlib.users'
yield 'lino.modlib.countries'
yield 'lino.modlib.contacts'
yield 'lino.modlib.projects'
yield 'lino.modlib.uploads'
yield 'lino.modlib.cal'
yield 'lino.modlib.outbox'
yield 'lino.modlib.pages'
#~ yield 'lino.projects.min2'
SITE = Site(globals())
| gpl-3.0 | 804,034,290,664,175,200 | 32.774194 | 71 | 0.641466 | false |
vlegoff/tsunami | src/primaires/scripting/actions/equiper.py | 1 | 5397 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action equiper."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Fait équiper un personnage."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.equiper_prototype, "Personnage", "str")
cls.ajouter_types(cls.equiper_objet, "Personnage", "Objet")
@staticmethod
def equiper_prototype(personnage, cle_prototype):
"""Fait équiper un objet à un personnage.
Paramètres à préciser :
* personnage : le personnage qui doit s'équiper
* cle_prototype : la clé du prototype d'objet à équiper
Exemple d'utilisation :
equiper personnage "sabre_bois"
Le personnage n'a pas besoin d'avoir l'objet indiqué dans
son inventaire : il sera dans tous les cas créé. En outre,
cette action ne vérifie pas que le joueur peut s'équiper
à cet emplacement (utilisez la fonction 'peut_equiper' pour
vérifier cela).
"""
if not cle_prototype in importeur.objet.prototypes:
raise ErreurExecution("prototype d'objet {} introuvable".format(
repr(cle_prototype)))
prototype = importeur.objet.prototypes[cle_prototype]
objet = importeur.objet.creer_objet(prototype)
for membre in personnage.equipement.membres:
if membre.peut_equiper(objet):
membre.equiper(objet)
return
raise ErreurExecution("le personnage {} ne peut équiper {}".format(
repr(personnage), repr(objet.cle)))
@staticmethod
def equiper_objet(personnage, objet):
"""Force un personnage à équiper l'objet précisé.
Cette syntaxe de l'action se rapproche davantage de la commande
**porter/wear**. Elle demande à un personnage d'équiper un
objet qu'il possède (dans ses mains, ou dans un sac qu'il équipe).
Paramètres à préciser :
* personnage : le personnage que l'on souhaite équiper
* objet : l'objet que l'on souhaite équiper.
Cette action est susceptible de faire des erreurs, par exemple,
si l'objet n'est pas possédé par le personnage ou si il ne
peut être équipé par le personnage. Il est de bonne politique
de tester avant d'équiper le personnage, sauf si on est dans
une situation extrêmement limitée en aléatoire.
Exemple d'utilisation :
# On cherche à faire équiper un sabre de bois au personnage
# Le personnage possède le sabre de bois dans son inventaire
sabre = possede(personnage, "sabre_bois")
si sabre:
# On vérifié qu'il n'a rien dans la main gauche
si !equipe(personnage, "*main gauche"):
equiper personnage sabre
finsi
finsi
"""
if not any(o for o in personnage.equipement.inventaire if o is objet):
raise ErreurExecution("{} ne possède visiblement pas {}".format(
personnage.nom_unique, objet.identifiant))
# Si 'objet' est déjà équipé, ne fait rien
if objet.contenu is personnage.equipement.equipes:
return
# Essaye d'équiper l'objet sur un membre
for membre in personnage.equipement.membres:
if membre.peut_equiper(objet):
objet.contenu.retirer(objet)
membre.equiper(objet)
objet.script["porte"].executer(objet=objet,
personnage=personnage)
return
raise ErreurExecution("{} ne peut équiper {}, aucun emplacement " \
"libre".format(personnage.nom_unique, objet.identifiant))
| bsd-3-clause | 5,963,677,848,703,850,000 | 40.757813 | 79 | 0.674275 | false |
fiduswriter/fiduswriter | fiduswriter/base/tests/test_prelogin.py | 1 | 2855 | from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from testing.testcases import LiveTornadoTestCase
from testing.selenium_helper import SeleniumHelper
class PreloginTest(LiveTornadoTestCase, SeleniumHelper):
fixtures = [
'initial_terms.json',
]
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.base_url = cls.live_server_url
driver_data = cls.get_drivers(1)
cls.driver = driver_data["drivers"][0]
cls.client = driver_data["clients"][0]
cls.driver.implicitly_wait(driver_data["wait_time"])
cls.wait_time = driver_data["wait_time"]
@classmethod
def tearDownClass(cls):
cls.driver.quit()
super().tearDownClass()
def test_flatpage(self):
self.driver.get(self.base_url + "/")
self.driver.find_element(
By.CSS_SELECTOR,
"a[href='/pages/terms/']"
).click()
h3 = WebDriverWait(self.driver, self.wait_time).until(
EC.presence_of_element_located((By.CSS_SELECTOR, 'h3'))
)
self.assertEqual(
h3.text,
'Your Account and Documents on the Website'
)
self.driver.find_element(
By.CSS_SELECTOR,
"a[href='/pages/privacy/']"
).click()
h3 = WebDriverWait(self.driver, self.wait_time).until(
EC.presence_of_element_located(
(By.CSS_SELECTOR, 'h3:nth-child(4)')
)
)
self.assertEqual(
h3.text,
'B. Collecting personal information'
)
def test_language_switch(self):
driver = self.driver
driver.get(self.base_url + "/")
self.driver.find_element(
By.ID,
"lang-selection"
).click()
self.driver.find_element(
By.CSS_SELECTOR,
"#lang-selection option[value=es]"
).click()
self.assertEqual(
self.driver.find_element(
By.CSS_SELECTOR,
"html[lang=es] h1.fw-login-title"
).text,
'INICIAR SESIÓN'
)
self.assertEqual(
self.driver.find_element(
By.ID,
"lang-selection"
).get_attribute('value'),
'es'
)
self.driver.find_element(
By.ID,
"lang-selection"
).click()
self.driver.find_element(
By.CSS_SELECTOR,
"#lang-selection option[value=en]"
).click()
self.assertEqual(
self.driver.find_element(
By.CSS_SELECTOR,
"html[lang=en] h1.fw-login-title"
).text,
'LOG IN'
)
| agpl-3.0 | -6,557,665,526,337,172,000 | 29.361702 | 67 | 0.539944 | false |
TobleMiner/fahrschulcard | solver.py | 1 | 2470 | import sqlite3
from question import Question, Answer
conn = sqlite3.connect('license.db')
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS "answers" (\
`id` INTEGER PRIMARY KEY AUTOINCREMENT,\
`answer` TEXT,\
`question` INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS "questions" (\
`id` INTEGER PRIMARY KEY AUTOINCREMENT,\
`question` TEXT,\
`media` TEXT)')
conn.commit()
def find_answers(question):
if(question.media):
t = (question.question, question.media)
c.execute('SELECT answers.answer FROM questions, answers WHERE \
questions.question = ? AND questions.id = answers.question AND \
questions.media = ? ORDER BY answers.id ASC', t)
else:
t = (question.question, )
c.execute('SELECT answers.answer FROM questions, answers WHERE \
questions.question = ? AND questions.id = answers.question \
ORDER BY answers.id ASC', t)
answers = []
row = c.fetchone()
aid = 1
while(row):
if(question.type == Question.Type.multiple_choice):
for answer in question.answers:
if(answer.answer == row[0]):
answers.append(answer)
elif(question.type == Question.Type.text):
answer = Answer(aid)
answer.answer = row[0]
answers.append(answer)
aid += 1
row = c.fetchone()
return answers
def add_question(question):
if(question.media):
t = (question.question, question.media)
c.execute('SELECT * FROM questions WHERE question = ? AND media = ?', t)
else:
t = (question.question,)
c.execute('SELECT * FROM questions WHERE question = ?', t)
if(not c.fetchone()):
t = (question.question, question.media)
c.execute('INSERT INTO questions (question, media) VALUES (?, ?)', t)
conn.commit();
def add_answer(question, answer):
if(question.media):
t = (question.question, question.media)
c.execute('SELECT id FROM questions WHERE question = ? AND media = ?', t)
else:
t = (question.question,)
c.execute('SELECT id FROM questions WHERE question = ?', t)
qid = c.fetchone()[0]
t = (answer.answer, qid)
c.execute('SELECT * FROM answers WHERE answer = ? AND question = ?', t)
if(not c.fetchone()):
t = (answer.answer, qid)
c.execute('INSERT INTO answers (answer, question) VALUES (?, ?)', t)
conn.commit();
| mit | 8,390,763,817,388,021,000 | 34.285714 | 81 | 0.604453 | false |
r-martin-/Code_College | PythonProgramming/poker_hands_v2.py | 1 | 1983 | """Count poker hands
Sample program to count poker hands and thus estimate the probability of a given hand occurring .
The file contains 1 million records randomly distributed and is, therefore, statistically valid.
The data looks like this:
1,1,1,13,2,4,2,3,1,12,0
3,12,3,2,3,11,4,5,2,5,1
1,9,4,6,1,4,3,2,3,9,1
1,4,3,13,2,13,2,1,3,6,1
A hand in poker consists of five cards. Each pair of numbers represents a card giving its suit and value.
Suits are 1-spades, 2-hearts, 3-diamonds, 4-clubs
Values go from Ace (13) highest to 2 (shown as 1) lowest.
Ranks are 0-nothing, 1-pair, 2-two pair, 3-three of a kind, 4-flush, 5-straight, 6-full house, 7-four of a kind,
8-straight flush, 9-royal flush
In our example above the first line represents the hand, 2 of spades, ace of spades, 5 of hearts, 4 of hearts,
king of clubs, The last column is the rank
"""
# 1. Open file for reading
try:
poker_file = open("poker-hand-testing.data", 'r')
except IOError as e:
print(e)
quit()
# 2. Create and initialize variables to hold the counts
total_count = 0
rank_counts = {}
rank_list = ['nothing', 'pair', 'two pair', 'three of a kind', 'straight', 'flush', 'full house', 'four of a kind',
'straight flush', 'royal flush']
# 3. Loop through each line of the file
for line in poker_file:
# At each line increment the counter
total_count += 1
# Get hand rank: split on comma, get last item as int
try:
handRank = int(line.split(',')[-1])
except ValueError as e:
print(e)
continue
# If rank already in dictionary, increment it otherwise add it and set to 1
if handRank in rank_counts:
rank_counts[handRank] += 1
else:
rank_counts[handRank] = 1
# 4. Print the results
print("Total hands in file: {}".format(total_count))
print("Count and probability of hands:")
for i in range(10):
print(" {:18s}:{:10,d}{:10.4%}".format(rank_list[i], rank_counts[i], rank_counts[i] / total_count))
| mit | -5,603,430,153,374,674,000 | 30.47619 | 115 | 0.672718 | false |
tvuillemin/nicetypes | setup.py | 1 | 2325 | import codecs
import os
import re
from setuptools import setup, find_packages
###################################################################
NAME = "nicetypes"
PACKAGES = find_packages(where="src")
META_PATH = os.path.join("src", "nicetypes", "__init__.py")
KEYWORDS = ["class", "attribute", "boilerplate"]
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
]
INSTALL_REQUIRES = []
###################################################################
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
def find_meta(meta):
"""
Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta),
META_FILE, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
if __name__ == "__main__":
setup(
name=NAME,
description=find_meta("description"),
license=find_meta("license"),
url=find_meta("uri"),
version=find_meta("version"),
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
maintainer_email=find_meta("email"),
keywords=KEYWORDS,
long_description=read("README.md"),
packages=PACKAGES,
package_dir={"": "src"},
zip_safe=False,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
)
| mit | -7,027,364,177,655,694,000 | 28.43038 | 77 | 0.569892 | false |
martynovp/edx-platform | lms/djangoapps/instructor_task/tasks_helper.py | 1 | 62345 | """
This file contains tasks that are designed to perform background operations on the
running state of a course.
"""
import json
from collections import OrderedDict
from datetime import datetime
from django.conf import settings
from eventtracking import tracker
from itertools import chain
from time import time
import unicodecsv
import logging
from celery import Task, current_task
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.core.files.storage import DefaultStorage
from django.db import transaction, reset_queries
from django.db.models import Q
import dogstats_wrapper as dog_stats_api
from pytz import UTC
from StringIO import StringIO
from edxmako.shortcuts import render_to_string
from instructor.paidcourse_enrollment_report import PaidCourseEnrollmentReportProvider
from shoppingcart.models import (
PaidCourseRegistration, CourseRegCodeItem, InvoiceTransaction,
Invoice, CouponRedemption, RegistrationCodeRedemption, CourseRegistrationCode
)
from track.views import task_track
from util.file import course_filename_prefix_generator, UniversalNewlineIterator
from xmodule.modulestore.django import modulestore
from xmodule.split_test_module import get_split_user_partitions
from django.utils.translation import ugettext as _
from certificates.models import (
CertificateWhitelist,
certificate_info_for_user,
CertificateStatuses
)
from certificates.api import generate_user_certificates
from courseware.courses import get_course_by_id, get_problems_in_section
from courseware.grades import iterate_grades_for
from courseware.models import StudentModule
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor_internal
from instructor_analytics.basic import enrolled_students_features, list_may_enroll
from instructor_analytics.csvs import format_dictlist
from instructor_task.models import ReportStore, InstructorTask, PROGRESS
from lms.djangoapps.lms_xblock.runtime import LmsPartitionService
from openedx.core.djangoapps.course_groups.cohorts import get_cohort
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from opaque_keys.edx.keys import UsageKey
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, is_course_cohorted
from student.models import CourseEnrollment, CourseAccessRole
from verify_student.models import SoftwareSecurePhotoVerification
from util.query import use_read_replica_if_available
# define different loggers for use within tasks and on client side
TASK_LOG = logging.getLogger('edx.celery.task')
# define value to use when no task_id is provided:
UNKNOWN_TASK_ID = 'unknown-task_id'
FILTERED_OUT_ROLES = ['staff', 'instructor', 'finance_admin', 'sales_admin']
# define values for update functions to use to return status to perform_module_state_update
UPDATE_STATUS_SUCCEEDED = 'succeeded'
UPDATE_STATUS_FAILED = 'failed'
UPDATE_STATUS_SKIPPED = 'skipped'
# The setting name used for events when "settings" (account settings, preferences, profile information) change.
REPORT_REQUESTED_EVENT_NAME = u'edx.instructor.report.requested'
class BaseInstructorTask(Task):
"""
Base task class for use with InstructorTask models.
Permits updating information about task in corresponding InstructorTask for monitoring purposes.
Assumes that the entry_id of the InstructorTask model is the first argument to the task.
The `entry_id` is the primary key for the InstructorTask entry representing the task. This class
updates the entry on success and failure of the task it wraps. It is setting the entry's value
for task_state based on what Celery would set it to once the task returns to Celery:
FAILURE if an exception is encountered, and SUCCESS if it returns normally.
Other arguments are pass-throughs to perform_module_state_update, and documented there.
"""
abstract = True
def on_success(self, task_progress, task_id, args, kwargs):
"""
Update InstructorTask object corresponding to this task with info about success.
Updates task_output and task_state. But it shouldn't actually do anything
if the task is only creating subtasks to actually do the work.
Assumes `task_progress` is a dict containing the task's result, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
This is JSON-serialized and stored in the task_output column of the InstructorTask entry.
"""
TASK_LOG.debug('Task %s: success returned with progress: %s', task_id, task_progress)
# We should be able to find the InstructorTask object to update
# based on the task_id here, without having to dig into the
# original args to the task. On the other hand, the entry_id
# is the first value passed to all such args, so we'll use that.
# And we assume that it exists, else we would already have had a failure.
entry_id = args[0]
entry = InstructorTask.objects.get(pk=entry_id)
# Check to see if any subtasks had been defined as part of this task.
# If not, then we know that we're done. (If so, let the subtasks
# handle updating task_state themselves.)
if len(entry.subtasks) == 0:
entry.task_output = InstructorTask.create_output_for_success(task_progress)
entry.task_state = SUCCESS
entry.save_now()
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
Update InstructorTask object corresponding to this task with info about failure.
Fetches and updates exception and traceback information on failure.
If an exception is raised internal to the task, it is caught by celery and provided here.
The information is recorded in the InstructorTask object as a JSON-serialized dict
stored in the task_output column. It contains the following keys:
'exception': type of exception object
'message': error message from exception object
'traceback': traceback information (truncated if necessary)
Note that there is no way to record progress made within the task (e.g. attempted,
succeeded, etc.) when such failures occur.
"""
TASK_LOG.debug(u'Task %s: failure returned', task_id)
entry_id = args[0]
try:
entry = InstructorTask.objects.get(pk=entry_id)
except InstructorTask.DoesNotExist:
# if the InstructorTask object does not exist, then there's no point
# trying to update it.
TASK_LOG.error(u"Task (%s) has no InstructorTask object for id %s", task_id, entry_id)
else:
TASK_LOG.warning(u"Task (%s) failed", task_id, exc_info=True)
entry.task_output = InstructorTask.create_output_for_failure(einfo.exception, einfo.traceback)
entry.task_state = FAILURE
entry.save_now()
class UpdateProblemModuleStateError(Exception):
"""
Error signaling a fatal condition while updating problem modules.
Used when the current module cannot be processed and no more
modules should be attempted.
"""
pass
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task
class TaskProgress(object):
"""
Encapsulates the current task's progress by keeping track of
'attempted', 'succeeded', 'skipped', 'failed', 'total',
'action_name', and 'duration_ms' values.
"""
def __init__(self, action_name, total, start_time):
self.action_name = action_name
self.total = total
self.start_time = start_time
self.attempted = 0
self.succeeded = 0
self.skipped = 0
self.failed = 0
def update_task_state(self, extra_meta=None):
"""
Update the current celery task's state to the progress state
specified by the current object. Returns the progress
dictionary for use by `run_main_task` and
`BaseInstructorTask.on_success`.
Arguments:
extra_meta (dict): Extra metadata to pass to `update_state`
Returns:
dict: The current task's progress dict
"""
progress_dict = {
'action_name': self.action_name,
'attempted': self.attempted,
'succeeded': self.succeeded,
'skipped': self.skipped,
'failed': self.failed,
'total': self.total,
'duration_ms': int((time() - self.start_time) * 1000),
}
if extra_meta is not None:
progress_dict.update(extra_meta)
_get_current_task().update_state(state=PROGRESS, meta=progress_dict)
return progress_dict
def run_main_task(entry_id, task_fcn, action_name):
"""
Applies the `task_fcn` to the arguments defined in `entry_id` InstructorTask.
Arguments passed to `task_fcn` are:
`entry_id` : the primary key for the InstructorTask entry representing the task.
`course_id` : the id for the course.
`task_input` : dict containing task-specific arguments, JSON-decoded from InstructorTask's task_input.
`action_name` : past-tense verb to use for constructing status messages.
If no exceptions are raised, the `task_fcn` should return a dict containing
the task's result with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages.
Should be past-tense. Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
"""
# Get the InstructorTask to be updated. If this fails then let the exception return to Celery.
# There's no point in catching it here.
entry = InstructorTask.objects.get(pk=entry_id)
entry.task_state = PROGRESS
entry.save_now()
# Get inputs to use in this task from the entry
task_id = entry.task_id
course_id = entry.course_id
task_input = json.loads(entry.task_input)
# Construct log message
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(task_id=task_id, entry_id=entry_id, course_id=course_id, task_input=task_input)
TASK_LOG.info(u'%s, Starting update (nothing %s yet)', task_info_string, action_name)
# Check that the task_id submitted in the InstructorTask matches the current task
# that is running.
request_task_id = _get_current_task().request.id
if task_id != request_task_id:
fmt = u'{task_info}, Requested task did not match actual task "{actual_id}"'
message = fmt.format(task_info=task_info_string, actual_id=request_task_id)
TASK_LOG.error(message)
raise ValueError(message)
# Now do the work
with dog_stats_api.timer('instructor_tasks.time.overall', tags=[u'action:{name}'.format(name=action_name)]):
task_progress = task_fcn(entry_id, course_id, task_input, action_name)
# Release any queries that the connection has been hanging onto
reset_queries()
# Log and exit, returning task_progress info as task result
TASK_LOG.info(u'%s, Task type: %s, Finishing task: %s', task_info_string, action_name, task_progress)
return task_progress
def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, task_input, action_name):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem.
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
argument, which is the query being filtered, and returns the filtered version of the query.
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
It is passed three arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, and the xmodule_instance_args being
passed through. If the value returned by the update function evaluates to a boolean True,
the update is successful; False indicates the update on the particular student module failed.
A raised exception indicates a fatal condition -- that no other student modules should be considered.
The return value is a dict containing the task's results, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible updates to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
result object.
"""
start_time = time()
usage_keys = []
problem_url = task_input.get('problem_url')
entrance_exam_url = task_input.get('entrance_exam_url')
student_identifier = task_input.get('student')
problems = {}
# if problem_url is present make a usage key from it
if problem_url:
usage_key = course_id.make_usage_key_from_deprecated_string(problem_url)
usage_keys.append(usage_key)
# find the problem descriptor:
problem_descriptor = modulestore().get_item(usage_key)
problems[unicode(usage_key)] = problem_descriptor
# if entrance_exam is present grab all problems in it
if entrance_exam_url:
problems = get_problems_in_section(entrance_exam_url)
usage_keys = [UsageKey.from_string(location) for location in problems.keys()]
# find the modules in question
modules_to_update = StudentModule.objects.filter(course_id=course_id, module_state_key__in=usage_keys)
# give the option of updating an individual student. If not specified,
# then updates all students who have responded to a problem so far
student = None
if student_identifier is not None:
# if an identifier is supplied, then look for the student,
# and let it throw an exception if none is found.
if "@" in student_identifier:
student = User.objects.get(email=student_identifier)
elif student_identifier is not None:
student = User.objects.get(username=student_identifier)
if student is not None:
modules_to_update = modules_to_update.filter(student_id=student.id)
if filter_fcn is not None:
modules_to_update = filter_fcn(modules_to_update)
task_progress = TaskProgress(action_name, modules_to_update.count(), start_time)
task_progress.update_task_state()
for module_to_update in modules_to_update:
task_progress.attempted += 1
module_descriptor = problems[unicode(module_to_update.module_state_key)]
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=[u'action:{name}'.format(name=action_name)]):
update_status = update_fcn(module_descriptor, module_to_update)
if update_status == UPDATE_STATUS_SUCCEEDED:
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
task_progress.succeeded += 1
elif update_status == UPDATE_STATUS_FAILED:
task_progress.failed += 1
elif update_status == UPDATE_STATUS_SKIPPED:
task_progress.skipped += 1
else:
raise UpdateProblemModuleStateError("Unexpected update_status returned: {}".format(update_status))
return task_progress.update_task_state()
def _get_task_id_from_xmodule_args(xmodule_instance_args):
"""Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID
def _get_xqueue_callback_url_prefix(xmodule_instance_args):
"""Gets prefix to use when constructing xqueue_callback_url."""
return xmodule_instance_args.get('xqueue_callback_url_prefix', '') if xmodule_instance_args is not None else ''
def _get_track_function_for_task(student, xmodule_instance_args=None, source_page='x_module_task'):
"""
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
"""
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {'student': student.username, 'task_id': _get_task_id_from_xmodule_args(xmodule_instance_args)}
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page=source_page)
def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
grade_bucket_type=None, course=None):
"""
Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
`xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
the need for a Request object when instantiating an xmodule instance.
"""
# reconstitute the problem's corresponding XModule:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
def make_track_function():
'''
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
'''
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
if xmodule_instance_args is not None else ''
return get_module_for_descriptor_internal(
user=student,
descriptor=module_descriptor,
field_data_cache=field_data_cache,
course_id=course_id,
track_function=make_track_function(),
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
grade_bucket_type=grade_bucket_type,
# This module isn't being used for front-end rendering
request_token=None,
# pass in a loaded course for override enabling
course=course
)
@transaction.autocommit
def rescore_problem_module_state(xmodule_instance_args, module_descriptor, student_module):
'''
Takes an XModule descriptor and a corresponding StudentModule object, and
performs rescoring on the student's problem submission.
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
or if the module doesn't support rescoring.
Returns True if problem was successfully rescored for the given student, and False
if problem encountered some kind of error in rescoring.
'''
# unpack the StudentModule:
course_id = student_module.course_id
student = student_module.student
usage_key = student_module.module_state_key
with modulestore().bulk_operations(course_id):
course = get_course_by_id(course_id)
# TODO: Here is a call site where we could pass in a loaded course. I
# think we certainly need it since grading is happening here, and field
# overrides would be important in handling that correctly
instance = _get_module_instance_for_task(
course_id,
student,
module_descriptor,
xmodule_instance_args,
grade_bucket_type='rescore',
course=course
)
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
msg = "No module {loc} for student {student}--access denied?".format(
loc=usage_key,
student=student
)
TASK_LOG.debug(msg)
raise UpdateProblemModuleStateError(msg)
if not hasattr(instance, 'rescore_problem'):
# This should also not happen, since it should be already checked in the caller,
# but check here to be sure.
msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg)
result = instance.rescore_problem()
instance.save()
if 'success' not in result:
# don't consider these fatal, but false means that the individual call didn't complete:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: unexpected response %(msg)s",
dict(
msg=result,
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
elif result['success'] not in ['correct', 'incorrect']:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
else:
TASK_LOG.debug(
u"successfully processed rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_SUCCEEDED
@transaction.autocommit
def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Resets problem attempts to zero for specified `student_module`.
Returns a status of UPDATE_STATUS_SUCCEEDED if a problem has non-zero attempts
that are being reset, and UPDATE_STATUS_SKIPPED otherwise.
"""
update_status = UPDATE_STATUS_SKIPPED
problem_state = json.loads(student_module.state) if student_module.state else {}
if 'attempts' in problem_state:
old_number_of_attempts = problem_state["attempts"]
if old_number_of_attempts > 0:
problem_state["attempts"] = 0
# convert back to json and save
student_module.state = json.dumps(problem_state)
student_module.save()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0}
track_function('problem_reset_attempts', event_info)
update_status = UPDATE_STATUS_SUCCEEDED
return update_status
@transaction.autocommit
def delete_problem_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Delete the StudentModule entry.
Always returns UPDATE_STATUS_SUCCEEDED, indicating success, if it doesn't raise an exception due to database error.
"""
student_module.delete()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
track_function('problem_delete_state', {})
return UPDATE_STATUS_SUCCEEDED
def upload_csv_to_report_store(rows, csv_name, course_id, timestamp, config_name='GRADES_DOWNLOAD'):
"""
Upload data as a CSV using ReportStore.
Arguments:
rows: CSV data in the following format (first column may be a
header):
[
[row1_colum1, row1_colum2, ...],
...
]
csv_name: Name of the resulting CSV
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
report_store.store_rows(
course_id,
u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
course_prefix=course_filename_prefix_generator(course_id),
csv_name=csv_name,
timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")
),
rows
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": csv_name, })
def upload_exec_summary_to_store(data_dict, report_name, course_id, generated_at, config_name='FINANCIAL_REPORTS'):
"""
Upload Executive Summary Html file using ReportStore.
Arguments:
data_dict: containing executive report data.
report_name: Name of the resulting Html File.
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
# Use the data dict and html template to generate the output buffer
output_buffer = StringIO(render_to_string("instructor/instructor_dashboard_2/executive_summary.html", data_dict))
report_store.store(
course_id,
u"{course_prefix}_{report_name}_{timestamp_str}.html".format(
course_prefix=course_filename_prefix_generator(course_id),
report_name=report_name,
timestamp_str=generated_at.strftime("%Y-%m-%d-%H%M")
),
output_buffer,
config={
'content_type': 'text/html',
'content_encoding': None,
}
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": report_name})
def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements
"""
For a given `course_id`, generate a grades CSV file for all students that
are enrolled, and store using a `ReportStore`. Once created, the files can
be accessed by instantiating another `ReportStore` (via
`ReportStore.from_config()`) and calling `link_for()` on it. Writes are
buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
that are visible in ReportStore will be complete ones.
As we start to add more CSV downloads, it will probably be worthwhile to
make a more general CSVDoc class instead of building out the rows like we
do here.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
course = get_course_by_id(course_id)
course_is_cohorted = is_course_cohorted(course.id)
cohorts_header = ['Cohort Name'] if course_is_cohorted else []
experiment_partitions = get_split_user_partitions(course.user_partitions)
group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions]
certificate_info_header = ['Certificate Eligible', 'Certificate Delivered', 'Certificate Type']
certificate_whitelist = CertificateWhitelist.objects.filter(course_id=course_id, whitelist=True)
whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]
# Loop over all our students and build our CSV lists in memory
header = None
rows = []
err_rows = [["id", "username", "error_msg"]]
current_step = {'step': 'Calculating Grades'}
total_enrolled_students = enrolled_students.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
task_info_string,
action_name,
current_step,
total_enrolled_students
)
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after each student is graded to get a sense
# of the task's progress
student_counter += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
if gradeset:
# We were able to successfully grade this student for this course.
task_progress.succeeded += 1
if not header:
header = [section['label'] for section in gradeset[u'section_breakdown']]
rows.append(
["id", "email", "username", "grade"] + header + cohorts_header +
group_configs_header + ['Enrollment Track', 'Verification Status'] + certificate_info_header
)
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
if 'label' in section
}
cohorts_group_name = []
if course_is_cohorted:
group = get_cohort(student, course_id, assign=False)
cohorts_group_name.append(group.name if group else '')
group_configs_group_names = []
for partition in experiment_partitions:
group = LmsPartitionService(student, course_id).get_group(partition, assign=False)
group_configs_group_names.append(group.name if group else '')
enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0]
verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
student,
course_id,
enrollment_mode
)
certificate_info = certificate_info_for_user(
student,
course_id,
gradeset['grade'],
student.id in whitelisted_user_ids
)
# Not everybody has the same gradable items. If the item is not
# found in the user's gradeset, just assume it's a 0. The aggregated
# grades for their sections and overall course will be calculated
# without regard for the item they didn't have access to, so it's
# possible for a student to have a 0.0 show up in their row but
# still have 100% for the course.
row_percents = [percents.get(label, 0.0) for label in header]
rows.append(
[student.id, student.email, student.username, gradeset['percent']] +
row_percents + cohorts_group_name + group_configs_group_names +
[enrollment_mode] + [verification_status] + certificate_info
)
else:
# An empty gradeset means we failed to grade a student.
task_progress.failed += 1
err_rows.append([student.id, student.username, err_msg])
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'grade_report', course_id, start_date)
# If there are any error rows (don't count the header), write them out as well
if len(err_rows) > 1:
upload_csv_to_report_store(err_rows, 'grade_report_err', course_id, start_date)
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def _order_problems(blocks):
"""
Sort the problems by the assignment type and assignment that it belongs to.
Args:
blocks (OrderedDict) - A course structure containing blocks that have been ordered
(i.e. when we iterate over them, we will see them in the order
that they appear in the course).
Returns:
an OrderedDict that maps a problem id to its headers in the final report.
"""
problems = OrderedDict()
assignments = dict()
grading_type = settings.GRADING_TYPE
# First, sort out all the blocks into their correct assignments and all the
# assignments into their correct types.
for block in blocks:
# Put the assignments in order into the assignments list.
if blocks[block]['block_type'] == grading_type:
block_format = blocks[block]['format']
if block_format not in assignments:
assignments[block_format] = OrderedDict()
assignments[block_format][block] = list()
# Put the problems into the correct order within their assignment.
if blocks[block]['block_type'] == 'problem' and blocks[block]['graded'] is True:
current = blocks[block]['parent']
# crawl up the tree for the block with type == grading_type
while blocks[current]['block_type'] != grading_type:
current = blocks[current]['parent']
current_format = blocks[current]['format']
assignments[current_format][current].append(block)
# Now that we have a sorting and an order for the assignments and problems,
# iterate through them in order to generate the header row.
for assignment_type in assignments:
for assignment_index, assignment in enumerate(assignments[assignment_type].keys(), start=1):
for problem in assignments[assignment_type][assignment]:
header_name = u"{assignment_type} {assignment_index}: {assignment_name} - {block}".format(
block=blocks[problem]['display_name'],
assignment_type=assignment_type,
assignment_index=assignment_index,
assignment_name=blocks[assignment]['display_name']
)
problems[problem] = [header_name + " (Earned)", header_name + " (Possible)"]
return problems
def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
Generate a CSV containing all students' problem grades within a given
`course_id`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
# This struct encapsulates both the display names of each static item in the
# header row as values as well as the django User field names of those items
# as the keys. It is structured in this way to keep the values related.
header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')])
try:
course_structure = CourseStructure.objects.get(course_id=course_id)
blocks = course_structure.ordered_blocks
problems = _order_problems(blocks)
except CourseStructure.DoesNotExist:
return task_progress.update_task_state(
extra_meta={'step': 'Generating course structure. Please refresh and try again.'}
)
# Just generate the static fields for now.
rows = [list(header_row.values()) + ['Final Grade'] + list(chain.from_iterable(problems.values()))]
error_rows = [list(header_row.values()) + ['error_msg']]
current_step = {'step': 'Calculating Grades'}
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students, keep_raw_scores=True):
student_fields = [getattr(student, field_name) for field_name in header_row]
task_progress.attempted += 1
if 'percent' not in gradeset or 'raw_scores' not in gradeset:
# There was an error grading this student.
# Generally there will be a non-empty err_msg, but that is not always the case.
if not err_msg:
err_msg = u"Unknown error"
error_rows.append(student_fields + [err_msg])
task_progress.failed += 1
continue
final_grade = gradeset['percent']
# Only consider graded problems
problem_scores = {unicode(score.module_id): score for score in gradeset['raw_scores'] if score.graded}
earned_possible_values = list()
for problem_id in problems:
try:
problem_score = problem_scores[problem_id]
earned_possible_values.append([problem_score.earned, problem_score.possible])
except KeyError:
# The student has not been graded on this problem. For example,
# iterate_grades_for skips problems that students have never
# seen in order to speed up report generation. It could also be
# the case that the student does not have access to it (e.g. A/B
# test or cohorted courseware).
earned_possible_values.append(['N/A', 'N/A'])
rows.append(student_fields + [final_grade] + list(chain.from_iterable(earned_possible_values)))
task_progress.succeeded += 1
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload if any students have been successfully graded
if len(rows) > 1:
upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date)
# If there are any error rows, write them out as well
if len(error_rows) > 1:
upload_csv_to_report_store(error_rows, 'problem_grade_report_err', course_id, start_date)
return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'})
def upload_students_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
current_step = {'step': 'Calculating Profile Info'}
task_progress.update_task_state(extra_meta=current_step)
# compute the student features table and format it
query_features = task_input.get('features')
student_data = enrolled_students_features(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'student_profile_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_enrollment_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
students_in_course = CourseEnrollment.objects.enrolled_and_dropped_out_users(course_id)
task_progress = TaskProgress(action_name, students_in_course.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
# Loop over all our students and build our CSV lists in memory
rows = []
header = None
current_step = {'step': 'Gathering Profile Information'}
enrollment_report_provider = PaidCourseEnrollmentReportProvider()
total_students = students_in_course.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating detailed enrollment report for total students: %s',
task_info_string,
action_name,
current_step,
total_students
)
for student in students_in_course:
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after certain intervals to get a hint that task is in progress
student_counter += 1
if student_counter % 100 == 0:
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, gathering enrollment profile for students in progress: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
user_data = enrollment_report_provider.get_user_profile(student.id)
course_enrollment_data = enrollment_report_provider.get_enrollment_info(student, course_id)
payment_data = enrollment_report_provider.get_payment_info(student, course_id)
# display name map for the column headers
enrollment_report_headers = {
'User ID': _('User ID'),
'Username': _('Username'),
'Full Name': _('Full Name'),
'First Name': _('First Name'),
'Last Name': _('Last Name'),
'Company Name': _('Company Name'),
'Title': _('Title'),
'Language': _('Language'),
'Year of Birth': _('Year of Birth'),
'Gender': _('Gender'),
'Level of Education': _('Level of Education'),
'Mailing Address': _('Mailing Address'),
'Goals': _('Goals'),
'City': _('City'),
'Country': _('Country'),
'Enrollment Date': _('Enrollment Date'),
'Currently Enrolled': _('Currently Enrolled'),
'Enrollment Source': _('Enrollment Source'),
'Enrollment Role': _('Enrollment Role'),
'List Price': _('List Price'),
'Payment Amount': _('Payment Amount'),
'Coupon Codes Used': _('Coupon Codes Used'),
'Registration Code Used': _('Registration Code Used'),
'Payment Status': _('Payment Status'),
'Transaction Reference Number': _('Transaction Reference Number')
}
if not header:
header = user_data.keys() + course_enrollment_data.keys() + payment_data.keys()
display_headers = []
for header_element in header:
# translate header into a localizable display string
display_headers.append(enrollment_report_headers.get(header_element, header_element))
rows.append(display_headers)
rows.append(user_data.values() + course_enrollment_data.values() + payment_data.values())
task_progress.succeeded += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Detailed enrollment report generated for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'enrollment_report', course_id, start_date, config_name='FINANCIAL_REPORTS')
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing detailed enrollment task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def upload_may_enroll_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing
information about students who may enroll but have not done so
yet, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating info about students who may enroll'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
query_features = task_input.get('features')
student_data = list_may_enroll(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'may_enroll_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def get_executive_report(course_id):
"""
Returns dict containing information about the course executive summary.
"""
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_id)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id)
paid_invoices_total = InvoiceTransaction.get_total_amount_of_paid_course_invoices(course_id)
gross_paid_revenue = single_purchase_total + bulk_purchase_total + paid_invoices_total
all_invoices_total = Invoice.get_invoice_total_amount_for_course(course_id)
gross_pending_revenue = all_invoices_total - float(paid_invoices_total)
gross_revenue = float(gross_paid_revenue) + float(gross_pending_revenue)
refunded_self_purchased_seats = PaidCourseRegistration.get_self_purchased_seat_count(
course_id, status='refunded'
)
refunded_bulk_purchased_seats = CourseRegCodeItem.get_bulk_purchased_seat_count(
course_id, status='refunded'
)
total_seats_refunded = refunded_self_purchased_seats + refunded_bulk_purchased_seats
self_purchased_refunds = PaidCourseRegistration.get_total_amount_of_purchased_item(
course_id,
status='refunded'
)
bulk_purchase_refunds = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id, status='refunded')
total_amount_refunded = self_purchased_refunds + bulk_purchase_refunds
top_discounted_codes = CouponRedemption.get_top_discount_codes_used(course_id)
total_coupon_codes_purchases = CouponRedemption.get_total_coupon_code_purchases(course_id)
bulk_purchased_codes = CourseRegistrationCode.order_generated_registration_codes(course_id)
unused_registration_codes = 0
for registration_code in bulk_purchased_codes:
if not RegistrationCodeRedemption.is_registration_code_redeemed(registration_code.code):
unused_registration_codes += 1
self_purchased_seat_count = PaidCourseRegistration.get_self_purchased_seat_count(course_id)
bulk_purchased_seat_count = CourseRegCodeItem.get_bulk_purchased_seat_count(course_id)
total_invoiced_seats = CourseRegistrationCode.invoice_generated_registration_codes(course_id).count()
total_seats = self_purchased_seat_count + bulk_purchased_seat_count + total_invoiced_seats
self_purchases_percentage = 0.0
bulk_purchases_percentage = 0.0
invoice_purchases_percentage = 0.0
avg_price_paid = 0.0
if total_seats != 0:
self_purchases_percentage = (float(self_purchased_seat_count) / float(total_seats)) * 100
bulk_purchases_percentage = (float(bulk_purchased_seat_count) / float(total_seats)) * 100
invoice_purchases_percentage = (float(total_invoiced_seats) / float(total_seats)) * 100
avg_price_paid = gross_revenue / total_seats
course = get_course_by_id(course_id, depth=0)
currency = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
return {
'display_name': course.display_name,
'start_date': course.start.strftime("%Y-%m-%d") if course.start is not None else 'N/A',
'end_date': course.end.strftime("%Y-%m-%d") if course.end is not None else 'N/A',
'total_seats': total_seats,
'currency': currency,
'gross_revenue': float(gross_revenue),
'gross_paid_revenue': float(gross_paid_revenue),
'gross_pending_revenue': gross_pending_revenue,
'total_seats_refunded': total_seats_refunded,
'total_amount_refunded': float(total_amount_refunded),
'average_paid_price': float(avg_price_paid),
'discount_codes_data': top_discounted_codes,
'total_seats_using_discount_codes': total_coupon_codes_purchases,
'total_self_purchase_seats': self_purchased_seat_count,
'total_bulk_purchase_seats': bulk_purchased_seat_count,
'total_invoiced_seats': total_invoiced_seats,
'unused_bulk_purchase_code_count': unused_registration_codes,
'self_purchases_percentage': self_purchases_percentage,
'bulk_purchases_percentage': bulk_purchases_percentage,
'invoice_purchases_percentage': invoice_purchases_percentage,
}
def upload_exec_summary_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements
"""
For a given `course_id`, generate a html report containing information,
which provides a snapshot of how the course is doing.
"""
start_time = time()
report_generation_date = datetime.now(UTC)
status_interval = 100
enrolled_users = CourseEnrollment.objects.users_enrolled_in(course_id)
true_enrollment_count = 0
for user in enrolled_users:
if not user.is_staff and not CourseAccessRole.objects.filter(
user=user, course_id=course_id, role__in=FILTERED_OUT_ROLES
).exists():
true_enrollment_count += 1
task_progress = TaskProgress(action_name, true_enrollment_count, start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
current_step = {'step': 'Gathering executive summary report information'}
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating executive summary report',
task_info_string,
action_name,
current_step
)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# get the course executive summary report information.
data_dict = get_executive_report(course_id)
data_dict.update(
{
'total_enrollments': true_enrollment_count,
'report_generation_date': report_generation_date.strftime("%Y-%m-%d"),
}
)
# By this point, we've got the data that we need to generate html report.
current_step = {'step': 'Uploading executive summary report HTML file'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_exec_summary_to_store(data_dict, 'executive_report', course_id, report_generation_date)
task_progress.succeeded += 1
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing executive summary report task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def generate_students_certificates(
_xmodule_instance_args, _entry_id, course_id, task_input, action_name): # pylint: disable=unused-argument
"""
For a given `course_id`, generate certificates for all students
that are enrolled.
"""
start_time = time()
enrolled_students = use_read_replica_if_available(CourseEnrollment.objects.users_enrolled_in(course_id))
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
current_step = {'step': 'Calculating students already have certificates'}
task_progress.update_task_state(extra_meta=current_step)
students_require_certs = students_require_certificate(course_id, enrolled_students)
task_progress.skipped = task_progress.total - len(students_require_certs)
current_step = {'step': 'Generating Certificates'}
task_progress.update_task_state(extra_meta=current_step)
course = modulestore().get_course(course_id, depth=0)
# Generate certificate for each student
for student in students_require_certs:
task_progress.attempted += 1
status = generate_user_certificates(
student,
course_id,
course=course
)
if status in [CertificateStatuses.generating, CertificateStatuses.downloadable]:
task_progress.succeeded += 1
else:
task_progress.failed += 1
return task_progress.update_task_state(extra_meta=current_step)
def cohort_students_and_upload(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
Within a given course, cohort students in bulk, then upload the results
using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
# Iterate through rows to get total assignments for task progress
with DefaultStorage().open(task_input['file_name']) as f:
total_assignments = 0
for _line in unicodecsv.DictReader(UniversalNewlineIterator(f)):
total_assignments += 1
task_progress = TaskProgress(action_name, total_assignments, start_time)
current_step = {'step': 'Cohorting Students'}
task_progress.update_task_state(extra_meta=current_step)
# cohorts_status is a mapping from cohort_name to metadata about
# that cohort. The metadata will include information about users
# successfully added to the cohort, users not found, and a cached
# reference to the corresponding cohort object to prevent
# redundant cohort queries.
cohorts_status = {}
with DefaultStorage().open(task_input['file_name']) as f:
for row in unicodecsv.DictReader(UniversalNewlineIterator(f), encoding='utf-8'):
# Try to use the 'email' field to identify the user. If it's not present, use 'username'.
username_or_email = row.get('email') or row.get('username')
cohort_name = row.get('cohort') or ''
task_progress.attempted += 1
if not cohorts_status.get(cohort_name):
cohorts_status[cohort_name] = {
'Cohort Name': cohort_name,
'Students Added': 0,
'Students Not Found': set()
}
try:
cohorts_status[cohort_name]['cohort'] = CourseUserGroup.objects.get(
course_id=course_id,
group_type=CourseUserGroup.COHORT,
name=cohort_name
)
cohorts_status[cohort_name]["Exists"] = True
except CourseUserGroup.DoesNotExist:
cohorts_status[cohort_name]["Exists"] = False
if not cohorts_status[cohort_name]['Exists']:
task_progress.failed += 1
continue
try:
with transaction.commit_on_success():
add_user_to_cohort(cohorts_status[cohort_name]['cohort'], username_or_email)
cohorts_status[cohort_name]['Students Added'] += 1
task_progress.succeeded += 1
except User.DoesNotExist:
cohorts_status[cohort_name]['Students Not Found'].add(username_or_email)
task_progress.failed += 1
except ValueError:
# Raised when the user is already in the given cohort
task_progress.skipped += 1
task_progress.update_task_state(extra_meta=current_step)
current_step['step'] = 'Uploading CSV'
task_progress.update_task_state(extra_meta=current_step)
# Filter the output of `add_users_to_cohorts` in order to upload the result.
output_header = ['Cohort Name', 'Exists', 'Students Added', 'Students Not Found']
output_rows = [
[
','.join(status_dict.get(column_name, '')) if column_name == 'Students Not Found'
else status_dict[column_name]
for column_name in output_header
]
for _cohort_name, status_dict in cohorts_status.iteritems()
]
output_rows.insert(0, output_header)
upload_csv_to_report_store(output_rows, 'cohort_results', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def students_require_certificate(course_id, enrolled_students):
""" Returns list of students where certificates needs to be generated.
Removing those students who have their certificate already generated
from total enrolled students for given course.
:param course_id:
:param enrolled_students:
"""
# compute those students where certificates already generated
students_already_have_certs = use_read_replica_if_available(User.objects.filter(
~Q(generatedcertificate__status=CertificateStatuses.unavailable),
generatedcertificate__course_id=course_id))
return list(set(enrolled_students) - set(students_already_have_certs))
| agpl-3.0 | -3,188,295,569,391,423,000 | 43.820273 | 142 | 0.663117 | false |
aubzen/sheltermanager | src/waitinglist.py | 1 | 14773 | #!/usr/bin/python
import additional
import al
import animal
import audit
import configuration
import db
import diary
import log
import media
import utils
from i18n import _, after, now, python2display, subtract_years, add_days, date_diff
def get_waitinglist_query():
"""
Returns the SELECT and JOIN commands necessary for selecting
waiting list rows with resolved lookups.
"""
return "SELECT * FROM v_animalwaitinglist a"
def get_waitinglist_by_id(dbo, wid):
"""
Returns a single waitinglist record for the ID given
"""
l = dbo.locale
sql = get_waitinglist_query() + " WHERE a.ID = %d" % int(wid)
rows = db.query(dbo, sql)
if len(rows) == 0: return None
r = rows[0]
ranks = get_waitinglist_ranks(dbo)
if ranks.has_key(r["WLID"]):
r["RANK"] = ranks[r["WLID"]]
else:
r["RANK"] = ""
r["TIMEONLIST"] = date_diff(l, r["DATEPUTONLIST"], now(dbo.timezone))
return r
def get_person_name(dbo, wid):
"""
Returns the contact name for the waitinglist with id
"""
return db.query_string(dbo, "SELECT o.OwnerName FROM animalwaitinglist a INNER JOIN owner o ON a.OwnerID = o.ID WHERE a.ID = %d" % int(wid))
def get_waitinglist_ranks(dbo):
"""
Returns a dictionary of waiting list IDs with their current ranks.
"""
byspecies = configuration.waiting_list_rank_by_species(dbo)
if not byspecies:
rows = db.query(dbo, "SELECT a.ID, a.SpeciesID FROM animalwaitinglist a " \
"INNER JOIN owner o ON a.OwnerID = o.ID " \
"WHERE a.DateRemovedFromList Is Null " \
"ORDER BY a.Urgency, a.DatePutOnList")
else:
rows = db.query(dbo, "SELECT a.ID, a.SpeciesID FROM animalwaitinglist a " \
"INNER JOIN owner o ON a.OwnerID = o.ID " \
"WHERE a.DateRemovedFromList Is Null " \
"ORDER BY a.SpeciesID, a.Urgency, a.DatePutOnList")
ranks = {}
lastspecies = 0
rank = 1
for r in rows:
if byspecies:
if not lastspecies == r["SPECIESID"]:
lastspecies = r["SPECIESID"]
rank = 1
ranks[r["ID"]] = rank
rank += 1
return ranks
def get_waitinglist(dbo, priorityfloor = 5, species = -1, addresscontains = "", includeremoved = 0, namecontains = "", descriptioncontains = ""):
"""
Retrieves the waiting list
priorityfloor: The lowest urgency to show (1 = urgent, 5 = lowest)
species: A species filter or -1 for all
addresscontains: A partial address
includeremoved: Whether or not to include removed entries
namecontains: A partial name
descriptioncontains: A partial description
"""
l = dbo.locale
ranks = get_waitinglist_ranks(dbo)
sql = get_waitinglist_query() + " WHERE a.Urgency <= " + str(priorityfloor)
if includeremoved == 0: sql += " AND a.DateRemovedFromList Is Null"
if species != -1: sql += " AND a.SpeciesID = " + str(species)
if addresscontains != "": sql += " AND UPPER(OwnerAddress) Like '%" + str(addresscontains).upper().replace("'", "`") + "%'"
if namecontains != "": sql += " AND UPPER(OwnerName) Like '%" + str(namecontains).upper().replace("'", "`") + "%'"
if descriptioncontains != "": sql += " AND UPPER(AnimalDescription) Like '%" + str(descriptioncontains).upper().replace("'", "`") + "%'"
sql += " ORDER BY a.Urgency, a.DatePutOnList"
rows = db.query(dbo, sql)
wlh = configuration.waiting_list_highlights(dbo).split(" ")
for r in rows:
r["HIGHLIGHT"] = ""
for hi in wlh:
if hi != "":
if hi.find("|") == -1:
wid = hi
h = "1"
else:
wid, h = hi.split("|")
if wid == str(r["WLID"]).strip():
r["HIGHLIGHT"] = h
break
if ranks.has_key(r["WLID"]):
r["RANK"] = ranks[r["WLID"]]
else:
r["RANK"] = ""
r["TIMEONLIST"] = date_diff(l, r["DATEPUTONLIST"], now(dbo.timezone))
return rows
def get_waitinglist_find_simple(dbo, query = "", limit = 0, onlyindexed = False):
"""
Returns rows for simple waiting list searches.
query: The search criteria
"""
# If no query has been given, do a current waitinglist search
if query == "":
return get_waitinglist(dbo)
ors = []
add = lambda f: "LOWER(%s) LIKE '%%%s%%'" % (f, query.lower())
if utils.is_numeric(query):
ors.append("a.ID = " + str(utils.cint(query)))
ors.append(add("a.OwnerName"))
ors.append(add("a.AnimalDescription"))
if not onlyindexed:
ors.append(add("a.ReasonForWantingToPart"))
ors.append(add("a.ReasonForRemoval"))
sql = get_waitinglist_query() + " WHERE " + " OR ".join(ors)
if limit > 0: sql += " LIMIT " + str(limit)
return db.query(dbo, sql)
def get_satellite_counts(dbo, wlid):
"""
Returns a resultset containing the number of each type of satellite
record that a waitinglist entry has.
"""
sql = "SELECT a.ID, " \
"(SELECT COUNT(*) FROM media me WHERE me.LinkID = a.ID AND me.LinkTypeID = %d) AS media, " \
"(SELECT COUNT(*) FROM diary di WHERE di.LinkID = a.ID AND di.LinkType = %d) AS diary, " \
"(SELECT COUNT(*) FROM log WHERE log.LinkID = a.ID AND log.LinkType = %d) AS logs " \
"FROM animalwaitinglist a WHERE a.ID = %d" \
% (media.WAITINGLIST, diary.WAITINGLIST, log.WAITINGLIST, int(wlid))
return db.query(dbo, sql)
def delete_waitinglist(dbo, username, wid):
"""
Deletes a waiting list record
"""
audit.delete(dbo, username, "animalwaitinglist", str(db.query(dbo, "SELECT * FROM animalwaitinglist WHERE ID=%d" % wid)))
db.execute(dbo, "DELETE FROM animalwaitinglist WHERE ID = %d" % wid)
def update_waitinglist_remove(dbo, username, wid):
"""
Marks a waiting list record as removed
"""
db.execute(dbo, "UPDATE animalwaitinglist SET DateRemovedFromList = %s WHERE ID = %d" % ( db.dd(now(dbo.timezone)), int(wid) ))
audit.edit(dbo, username, "animalwaitinglist", "%s: DateRemovedFromList ==> %s" % ( str(wid), python2display(dbo.locale, now(dbo.timezone))))
def update_waitinglist_highlight(dbo, wlid, himode):
"""
Toggles a waiting list ID record as highlighted.
wlid: The waiting list id to toggle
himode: a highlight value from 1 to 5 for a colour
"""
hl = list(configuration.waiting_list_highlights(dbo).split(" "))
wlid = str(wlid).strip()
# Create a new highlight list that doesn't have our id in it
nl = []
removed = False
for hi in hl:
if hi != "":
if hi.find("|") != -1:
wid, h = hi.split("|")
else:
wid = hi
h = "1"
if wlid == wid:
removed = True
else:
nl.append(wid + "|" + h)
# If our id wasn't present in the list, add it (so we're
# effectively toggling the id on and off)
if not removed:
nl.append(wlid + "|" + himode)
configuration.waiting_list_highlights(dbo, " ".join(nl))
def auto_remove_waitinglist(dbo):
"""
Finds and automatically marks entries removed that have gone past
the last contact date + weeks.
"""
l = dbo.locale
rows = db.query(dbo, "SELECT a.ID, a.DateOfLastOwnerContact, " \
"a.AutoRemovePolicy " \
"FROM animalwaitinglist a WHERE a.DateRemovedFromList Is Null " \
"AND AutoRemovePolicy > 0 AND DateOfLastOwnerContact Is Not Null")
updates = []
for r in rows:
xdate = add_days(r["DATEOFLASTOWNERCONTACT"], 7 * r["AUTOREMOVEPOLICY"])
if after(now(dbo.timezone), xdate):
al.debug("auto removing waitinglist entry %d due to policy" % int(r["ID"]), "waitinglist.auto_remove_waitinglist", dbo)
updates.append((now(dbo.timezone), _("Auto removed due to lack of owner contact.", l), r["ID"]))
if len(updates) > 0:
db.execute_many(dbo, "UPDATE animalwaitinglist SET DateRemovedFromList = %s, " \
"ReasonForRemoval=%s WHERE ID=%s", updates)
def auto_update_urgencies(dbo):
"""
Finds all animals where the next UrgencyUpdateDate field is greater
than or equal to today and the urgency is larger than High (so we
can never reach Urgent).
"""
update_period_days = configuration.waiting_list_urgency_update_period(dbo)
if update_period_days == 0:
al.debug("urgency update period is 0, not updating waiting list entries", "waitinglist.auto_update_urgencies", dbo)
return
rows = db.query(dbo, "SELECT a.* " \
"FROM animalwaitinglist a WHERE UrgencyUpdateDate <= %s " \
"AND Urgency > 2" % db.dd(now(dbo.timezone)))
updates = []
for r in rows:
al.debug("increasing urgency of waitinglist entry %d" % int(r["ID"]), "waitinglist.auto_update_urgencies", dbo)
updates.append((now(dbo.timezone), add_days(r["URGENCYUPDATEDATE"], update_period_days), r["URGENCY"] - 1, r["ID"]))
if len(updates) > 0:
db.execute_many(dbo, "UPDATE animalwaitinglist SET " \
"UrgencyLastUpdatedDate=%s, " \
"UrgencyUpdateDate=%s, " \
"Urgency=%s " \
"WHERE ID=%s ", updates)
def update_waitinglist_from_form(dbo, data, username):
"""
Updates a waiting list record from the screen
data: The webpy data object containing form parameters
"""
l = dbo.locale
wlid = utils.df_ki(data, "id")
if utils.df_ks(data, "description") == "":
raise utils.ASMValidationError(_("Description cannot be blank", l))
if utils.df_ki(data, "owner") == "0":
raise utils.ASMValidationError(_("Waiting list entries must have a contact", l))
if utils.df_ks(data, "dateputon") == "":
raise utils.ASMValidationError(_("Date put on cannot be blank", l))
preaudit = db.query(dbo, "SELECT * FROM animalwaitinglist WHERE ID = %d" % wlid)
db.execute(dbo, db.make_update_user_sql(dbo, "animalwaitinglist", username, "ID=%d" % wlid, (
( "SpeciesID", utils.df_s(data, "species")),
( "DatePutOnList", utils.df_d(data, "dateputon", l)),
( "OwnerID", utils.df_s(data, "owner")),
( "AnimalDescription", utils.df_t(data, "description")),
( "ReasonForWantingToPart", utils.df_t(data, "reasonforwantingtopart")),
( "CanAffordDonation", utils.df_c(data, "canafforddonation")),
( "Urgency", utils.df_s(data, "urgency")),
( "DateRemovedFromList", utils.df_d(data, "dateremoved", l)),
( "AutoRemovePolicy", utils.df_s(data, "autoremovepolicy")),
( "DateOfLastOwnerContact", utils.df_d(data, "dateoflastownercontact", l)),
( "ReasonForRemoval", utils.df_t(data, "reasonforremoval")),
( "Comments", utils.df_t(data, "comments"))
)))
additional.save_values_for_link(dbo, data, wlid, "waitinglist")
postaudit = db.query(dbo, "SELECT * FROM animalwaitinglist WHERE ID = %d" % wlid)
audit.edit(dbo, username, "animalwaitinglist", audit.map_diff(preaudit, postaudit))
def insert_waitinglist_from_form(dbo, data, username):
"""
Creates a waiting list record from the screen
data: The webpy data object containing form parameters
"""
l = dbo.locale
if utils.df_ks(data, "description") == "":
raise utils.ASMValidationError(_("Description cannot be blank", l))
if utils.df_ki(data, "owner") == "0":
raise utils.ASMValidationError(_("Waiting list entries must have a contact", l))
if utils.df_ks(data, "dateputon") == "":
raise utils.ASMValidationError(_("Date put on cannot be blank", l))
nwlid = db.get_id(dbo, "animalwaitinglist")
db.execute(dbo, db.make_insert_user_sql(dbo, "animalwaitinglist", username, (
( "ID", db.di(nwlid)),
( "SpeciesID", utils.df_s(data, "species")),
( "DatePutOnList", utils.df_d(data, "dateputon", l)),
( "OwnerID", utils.df_s(data, "owner")),
( "AnimalDescription", utils.df_t(data, "description")),
( "ReasonForWantingToPart", utils.df_t(data, "reasonforwantingtopart")),
( "CanAffordDonation", utils.df_c(data, "canafforddonation")),
( "Urgency", utils.df_s(data, "urgency")),
( "DateRemovedFromList", utils.df_d(data, "dateremoved", l)),
( "AutoRemovePolicy", utils.df_s(data, "autoremovepolicy")),
( "DateOfLastOwnerContact", db.dd(now(dbo.timezone))),
( "ReasonForRemoval", utils.df_t(data, "reasonforremoval")),
( "Comments", utils.df_t(data, "comments")),
( "UrgencyLastUpdatedDate", db.dd(now(dbo.timezone))),
( "UrgencyUpdateDate", db.dd(add_days(now(dbo.timezone), configuration.waiting_list_urgency_update_period(dbo))))
)))
audit.create(dbo, username, "animalwaitinglist", str(nwlid))
return nwlid
def create_animal(dbo, username, wlid):
"""
Creates an animal record from a waiting list entry with the id given
"""
a = db.query(dbo, "SELECT * FROM animalwaitinglist WHERE ID = %d" % wlid)[0]
l = dbo.locale
data = {
"animalname": _("Waiting List {0}", l).format(wlid),
"markings": str(a["ANIMALDESCRIPTION"]),
"reasonforentry": str(a["REASONFORWANTINGTOPART"]),
"species": str(a["SPECIESID"]),
"comments": str(a["COMMENTS"]),
"broughtinby": str(a["OWNERID"]),
"originalowner": str(a["OWNERID"]),
"animaltype": configuration.default_type(dbo),
"breed1": configuration.default_breed(dbo),
"breed2": configuration.default_breed(dbo),
"basecolour": configuration.default_colour(dbo),
"size": configuration.default_size(dbo),
"internallocation": configuration.default_location(dbo),
"dateofbirth": python2display(l, subtract_years(now(dbo.timezone))),
"estimateddob": "1"
}
# If we're creating shelter codes manually, we need to put something unique
# in there for now. Use the id
if configuration.manual_codes(dbo):
data["sheltercode"] = "WL" + str(wlid)
data["shortcode"] = "WL" + str(wlid)
nextid, code = animal.insert_animal_from_form(dbo, data, username)
# Now that we've created our animal, we should remove this entry from the waiting list
db.execute(dbo, "UPDATE animalwaitinglist SET DateRemovedFromList = %s, ReasonForRemoval = %s " \
"WHERE ID = %d" % (
db.dd(now(dbo.timezone)),
db.ds(_("Moved to animal record {0}", l).format(code)),
wlid))
return nextid
| gpl-3.0 | -4,002,365,165,038,267,000 | 42.967262 | 145 | 0.6057 | false |
RedbackThomson/LoLShadow | sleekxmpp/test/sleektest.py | 1 | 31939 | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import unittest
from xml.parsers.expat import ExpatError
from sleekxmpp import ClientXMPP, ComponentXMPP
from sleekxmpp.util import Queue
from sleekxmpp.stanza import Message, Iq, Presence
from sleekxmpp.test import TestSocket, TestLiveSocket
from sleekxmpp.xmlstream import ET
from sleekxmpp.xmlstream import ElementBase
from sleekxmpp.xmlstream.tostring import tostring
from sleekxmpp.xmlstream.matcher import StanzaPath, MatcherId
from sleekxmpp.xmlstream.matcher import MatchXMLMask, MatchXPath
class SleekTest(unittest.TestCase):
"""
A SleekXMPP specific TestCase class that provides
methods for comparing message, iq, and presence stanzas.
Methods:
Message -- Create a Message stanza object.
Iq -- Create an Iq stanza object.
Presence -- Create a Presence stanza object.
check_jid -- Check a JID and its component parts.
check -- Compare a stanza against an XML string.
stream_start -- Initialize a dummy XMPP client.
stream_close -- Disconnect the XMPP client.
make_header -- Create a stream header.
send_header -- Check that the given header has been sent.
send_feature -- Send a raw XML element.
send -- Check that the XMPP client sent the given
generic stanza.
recv -- Queue data for XMPP client to receive, or
verify the data that was received from a
live connection.
recv_header -- Check that a given stream header
was received.
recv_feature -- Check that a given, raw XML element
was recveived.
fix_namespaces -- Add top-level namespace to an XML object.
compare -- Compare XML objects against each other.
"""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.xmpp = None
def parse_xml(self, xml_string):
try:
xml = ET.fromstring(xml_string)
return xml
except (SyntaxError, ExpatError) as e:
msg = e.msg if hasattr(e, 'msg') else e.message
if 'unbound' in msg:
known_prefixes = {
'stream': 'http://etherx.jabber.org/streams'}
prefix = xml_string.split('<')[1].split(':')[0]
if prefix in known_prefixes:
xml_string = '<fixns xmlns:%s="%s">%s</fixns>' % (
prefix,
known_prefixes[prefix],
xml_string)
xml = self.parse_xml(xml_string)
xml = list(xml)[0]
return xml
else:
self.fail("XML data was mal-formed:\n%s" % xml_string)
# ------------------------------------------------------------------
# Shortcut methods for creating stanza objects
def Message(self, *args, **kwargs):
"""
Create a Message stanza.
Uses same arguments as StanzaBase.__init__
Arguments:
xml -- An XML object to use for the Message's values.
"""
return Message(self.xmpp, *args, **kwargs)
def Iq(self, *args, **kwargs):
"""
Create an Iq stanza.
Uses same arguments as StanzaBase.__init__
Arguments:
xml -- An XML object to use for the Iq's values.
"""
return Iq(self.xmpp, *args, **kwargs)
def Presence(self, *args, **kwargs):
"""
Create a Presence stanza.
Uses same arguments as StanzaBase.__init__
Arguments:
xml -- An XML object to use for the Iq's values.
"""
return Presence(self.xmpp, *args, **kwargs)
def check_jid(self, jid, user=None, domain=None, resource=None,
bare=None, full=None, string=None):
"""
Verify the components of a JID.
Arguments:
jid -- The JID object to test.
user -- Optional. The user name portion of the JID.
domain -- Optional. The domain name portion of the JID.
resource -- Optional. The resource portion of the JID.
bare -- Optional. The bare JID.
full -- Optional. The full JID.
string -- Optional. The string version of the JID.
"""
if user is not None:
self.assertEqual(jid.user, user,
"User does not match: %s" % jid.user)
if domain is not None:
self.assertEqual(jid.domain, domain,
"Domain does not match: %s" % jid.domain)
if resource is not None:
self.assertEqual(jid.resource, resource,
"Resource does not match: %s" % jid.resource)
if bare is not None:
self.assertEqual(jid.bare, bare,
"Bare JID does not match: %s" % jid.bare)
if full is not None:
self.assertEqual(jid.full, full,
"Full JID does not match: %s" % jid.full)
if string is not None:
self.assertEqual(str(jid), string,
"String does not match: %s" % str(jid))
def check_roster(self, owner, jid, name=None, subscription=None,
afrom=None, ato=None, pending_out=None, pending_in=None,
groups=None):
roster = self.xmpp.roster[owner][jid]
if name is not None:
self.assertEqual(roster['name'], name,
"Incorrect name value: %s" % roster['name'])
if subscription is not None:
self.assertEqual(roster['subscription'], subscription,
"Incorrect subscription: %s" % roster['subscription'])
if afrom is not None:
self.assertEqual(roster['from'], afrom,
"Incorrect from state: %s" % roster['from'])
if ato is not None:
self.assertEqual(roster['to'], ato,
"Incorrect to state: %s" % roster['to'])
if pending_out is not None:
self.assertEqual(roster['pending_out'], pending_out,
"Incorrect pending_out state: %s" % roster['pending_out'])
if pending_in is not None:
self.assertEqual(roster['pending_in'], pending_out,
"Incorrect pending_in state: %s" % roster['pending_in'])
if groups is not None:
self.assertEqual(roster['groups'], groups,
"Incorrect groups: %s" % roster['groups'])
# ------------------------------------------------------------------
# Methods for comparing stanza objects to XML strings
def check(self, stanza, criteria, method='exact',
defaults=None, use_values=True):
"""
Create and compare several stanza objects to a correct XML string.
If use_values is False, tests using stanza.values will not be used.
Some stanzas provide default values for some interfaces, but
these defaults can be problematic for testing since they can easily
be forgotten when supplying the XML string. A list of interfaces that
use defaults may be provided and the generated stanzas will use the
default values for those interfaces if needed.
However, correcting the supplied XML is not possible for interfaces
that add or remove XML elements. Only interfaces that map to XML
attributes may be set using the defaults parameter. The supplied XML
must take into account any extra elements that are included by default.
Arguments:
stanza -- The stanza object to test.
criteria -- An expression the stanza must match against.
method -- The type of matching to use; one of:
'exact', 'mask', 'id', 'xpath', and 'stanzapath'.
Defaults to the value of self.match_method.
defaults -- A list of stanza interfaces that have default
values. These interfaces will be set to their
defaults for the given and generated stanzas to
prevent unexpected test failures.
use_values -- Indicates if testing using stanza.values should
be used. Defaults to True.
"""
if method is None and hasattr(self, 'match_method'):
method = getattr(self, 'match_method')
if method != 'exact':
matchers = {'stanzapath': StanzaPath,
'xpath': MatchXPath,
'mask': MatchXMLMask,
'id': MatcherId}
Matcher = matchers.get(method, None)
if Matcher is None:
raise ValueError("Unknown matching method.")
test = Matcher(criteria)
self.failUnless(test.match(stanza),
"Stanza did not match using %s method:\n" % method + \
"Criteria:\n%s\n" % str(criteria) + \
"Stanza:\n%s" % str(stanza))
else:
stanza_class = stanza.__class__
if not isinstance(criteria, ElementBase):
xml = self.parse_xml(criteria)
else:
xml = criteria.xml
# Ensure that top level namespaces are used, even if they
# were not provided.
self.fix_namespaces(stanza.xml, 'jabber:client')
self.fix_namespaces(xml, 'jabber:client')
stanza2 = stanza_class(xml=xml)
if use_values:
# Using stanza.values will add XML for any interface that
# has a default value. We need to set those defaults on
# the existing stanzas and XML so that they will compare
# correctly.
default_stanza = stanza_class()
if defaults is None:
known_defaults = {
Message: ['type'],
Presence: ['priority']
}
defaults = known_defaults.get(stanza_class, [])
for interface in defaults:
stanza[interface] = stanza[interface]
stanza2[interface] = stanza2[interface]
# Can really only automatically add defaults for top
# level attribute values. Anything else must be accounted
# for in the provided XML string.
if interface not in xml.attrib:
if interface in default_stanza.xml.attrib:
value = default_stanza.xml.attrib[interface]
xml.attrib[interface] = value
values = stanza2.values
stanza3 = stanza_class()
stanza3.values = values
debug = "Three methods for creating stanzas do not match.\n"
debug += "Given XML:\n%s\n" % tostring(xml)
debug += "Given stanza:\n%s\n" % tostring(stanza.xml)
debug += "Generated stanza:\n%s\n" % tostring(stanza2.xml)
debug += "Second generated stanza:\n%s\n" % tostring(stanza3.xml)
result = self.compare(xml, stanza.xml, stanza2.xml, stanza3.xml)
else:
debug = "Two methods for creating stanzas do not match.\n"
debug += "Given XML:\n%s\n" % tostring(xml)
debug += "Given stanza:\n%s\n" % tostring(stanza.xml)
debug += "Generated stanza:\n%s\n" % tostring(stanza2.xml)
result = self.compare(xml, stanza.xml, stanza2.xml)
self.failUnless(result, debug)
# ------------------------------------------------------------------
# Methods for simulating stanza streams.
def stream_disconnect(self):
"""
Simulate a stream disconnection.
"""
if self.xmpp:
self.xmpp.socket.disconnect_error()
def stream_start(self, mode='client', skip=True, header=None,
socket='mock', jid='tester@localhost',
password='test', server='localhost',
port=5222, sasl_mech=None,
plugins=None, plugin_config={}):
"""
Initialize an XMPP client or component using a dummy XML stream.
Arguments:
mode -- Either 'client' or 'component'. Defaults to 'client'.
skip -- Indicates if the first item in the sent queue (the
stream header) should be removed. Tests that wish
to test initializing the stream should set this to
False. Otherwise, the default of True should be used.
socket -- Either 'mock' or 'live' to indicate if the socket
should be a dummy, mock socket or a live, functioning
socket. Defaults to 'mock'.
jid -- The JID to use for the connection.
Defaults to 'tester@localhost'.
password -- The password to use for the connection.
Defaults to 'test'.
server -- The name of the XMPP server. Defaults to 'localhost'.
port -- The port to use when connecting to the server.
Defaults to 5222.
plugins -- List of plugins to register. By default, all plugins
are loaded.
"""
if mode == 'client':
self.xmpp = ClientXMPP(jid, password,
sasl_mech=sasl_mech,
plugin_config=plugin_config)
elif mode == 'component':
self.xmpp = ComponentXMPP(jid, password,
server, port,
plugin_config=plugin_config)
else:
raise ValueError("Unknown XMPP connection mode.")
# Remove unique ID prefix to make it easier to test
self.xmpp._id_prefix = ''
self.xmpp._disconnect_wait_for_threads = False
self.xmpp.default_lang = None
self.xmpp.peer_default_lang = None
# We will use this to wait for the session_start event
# for live connections.
skip_queue = Queue()
if socket == 'mock':
self.xmpp.set_socket(TestSocket())
# Simulate connecting for mock sockets.
self.xmpp.auto_reconnect = False
self.xmpp.state._set_state('connected')
# Must have the stream header ready for xmpp.process() to work.
if not header:
header = self.xmpp.stream_header
self.xmpp.socket.recv_data(header)
elif socket == 'live':
self.xmpp.socket_class = TestLiveSocket
def wait_for_session(x):
self.xmpp.socket.clear()
skip_queue.put('started')
self.xmpp.add_event_handler('session_start', wait_for_session)
if server is not None:
self.xmpp.connect((server, port))
else:
self.xmpp.connect()
else:
raise ValueError("Unknown socket type.")
if plugins is None:
self.xmpp.register_plugins()
else:
for plugin in plugins:
self.xmpp.register_plugin(plugin)
# Some plugins require messages to have ID values. Set
# this to True in tests related to those plugins.
self.xmpp.use_message_ids = False
self.xmpp.process(threaded=True)
if skip:
if socket != 'live':
# Mark send queue as usable
self.xmpp.session_started_event.set()
# Clear startup stanzas
self.xmpp.socket.next_sent(timeout=1)
if mode == 'component':
self.xmpp.socket.next_sent(timeout=1)
else:
skip_queue.get(block=True, timeout=10)
def make_header(self, sto='',
sfrom='',
sid='',
stream_ns="http://etherx.jabber.org/streams",
default_ns="jabber:client",
default_lang="en",
version="1.0",
xml_header=True):
"""
Create a stream header to be received by the test XMPP agent.
The header must be saved and passed to stream_start.
Arguments:
sto -- The recipient of the stream header.
sfrom -- The agent sending the stream header.
sid -- The stream's id.
stream_ns -- The namespace of the stream's root element.
default_ns -- The default stanza namespace.
version -- The stream version.
xml_header -- Indicates if the XML version header should be
appended before the stream header.
"""
header = '<stream:stream %s>'
parts = []
if xml_header:
header = '<?xml version="1.0"?>' + header
if sto:
parts.append('to="%s"' % sto)
if sfrom:
parts.append('from="%s"' % sfrom)
if sid:
parts.append('id="%s"' % sid)
if default_lang:
parts.append('xml:lang="%s"' % default_lang)
parts.append('version="%s"' % version)
parts.append('xmlns:stream="%s"' % stream_ns)
parts.append('xmlns="%s"' % default_ns)
return header % ' '.join(parts)
def recv(self, data, defaults=[], method='exact',
use_values=True, timeout=1):
"""
Pass data to the dummy XMPP client as if it came from an XMPP server.
If using a live connection, verify what the server has sent.
Arguments:
data -- If a dummy socket is being used, the XML that is to
be received next. Otherwise it is the criteria used
to match against live data that is received.
defaults -- A list of stanza interfaces with default values that
may interfere with comparisons.
method -- Select the type of comparison to use for
verifying the received stanza. Options are 'exact',
'id', 'stanzapath', 'xpath', and 'mask'.
Defaults to the value of self.match_method.
use_values -- Indicates if stanza comparisons should test using
stanza.values. Defaults to True.
timeout -- Time to wait in seconds for data to be received by
a live connection.
"""
if self.xmpp.socket.is_live:
# we are working with a live connection, so we should
# verify what has been received instead of simulating
# receiving data.
recv_data = self.xmpp.socket.next_recv(timeout)
if recv_data is None:
self.fail("No stanza was received.")
xml = self.parse_xml(recv_data)
self.fix_namespaces(xml, 'jabber:client')
stanza = self.xmpp._build_stanza(xml, 'jabber:client')
self.check(stanza, data,
method=method,
defaults=defaults,
use_values=use_values)
else:
# place the data in the dummy socket receiving queue.
data = str(data)
self.xmpp.socket.recv_data(data)
def recv_header(self, sto='',
sfrom='',
sid='',
stream_ns="http://etherx.jabber.org/streams",
default_ns="jabber:client",
version="1.0",
xml_header=False,
timeout=1):
"""
Check that a given stream header was received.
Arguments:
sto -- The recipient of the stream header.
sfrom -- The agent sending the stream header.
sid -- The stream's id. Set to None to ignore.
stream_ns -- The namespace of the stream's root element.
default_ns -- The default stanza namespace.
version -- The stream version.
xml_header -- Indicates if the XML version header should be
appended before the stream header.
timeout -- Length of time to wait in seconds for a
response.
"""
header = self.make_header(sto, sfrom, sid,
stream_ns=stream_ns,
default_ns=default_ns,
version=version,
xml_header=xml_header)
recv_header = self.xmpp.socket.next_recv(timeout)
if recv_header is None:
raise ValueError("Socket did not return data.")
# Apply closing elements so that we can construct
# XML objects for comparison.
header2 = header + '</stream:stream>'
recv_header2 = recv_header + '</stream:stream>'
xml = self.parse_xml(header2)
recv_xml = self.parse_xml(recv_header2)
if sid is None:
# Ignore the id sent by the server since
# we can't know in advance what it will be.
if 'id' in recv_xml.attrib:
del recv_xml.attrib['id']
# Ignore the xml:lang attribute for now.
if 'xml:lang' in recv_xml.attrib:
del recv_xml.attrib['xml:lang']
xml_ns = 'http://www.w3.org/XML/1998/namespace'
if '{%s}lang' % xml_ns in recv_xml.attrib:
del recv_xml.attrib['{%s}lang' % xml_ns]
if list(recv_xml):
# We received more than just the header
for xml in recv_xml:
self.xmpp.socket.recv_data(tostring(xml))
attrib = recv_xml.attrib
recv_xml.clear()
recv_xml.attrib = attrib
self.failUnless(
self.compare(xml, recv_xml),
"Stream headers do not match:\nDesired:\n%s\nReceived:\n%s" % (
'%s %s' % (xml.tag, xml.attrib),
'%s %s' % (recv_xml.tag, recv_xml.attrib)))
def recv_feature(self, data, method='mask', use_values=True, timeout=1):
"""
"""
if method is None and hasattr(self, 'match_method'):
method = getattr(self, 'match_method')
if self.xmpp.socket.is_live:
# we are working with a live connection, so we should
# verify what has been received instead of simulating
# receiving data.
recv_data = self.xmpp.socket.next_recv(timeout)
xml = self.parse_xml(data)
recv_xml = self.parse_xml(recv_data)
if recv_data is None:
self.fail("No stanza was received.")
if method == 'exact':
self.failUnless(self.compare(xml, recv_xml),
"Features do not match.\nDesired:\n%s\nReceived:\n%s" % (
tostring(xml), tostring(recv_xml)))
elif method == 'mask':
matcher = MatchXMLMask(xml)
self.failUnless(matcher.match(recv_xml),
"Stanza did not match using %s method:\n" % method + \
"Criteria:\n%s\n" % tostring(xml) + \
"Stanza:\n%s" % tostring(recv_xml))
else:
raise ValueError("Uknown matching method: %s" % method)
else:
# place the data in the dummy socket receiving queue.
data = str(data)
self.xmpp.socket.recv_data(data)
def send_header(self, sto='',
sfrom='',
sid='',
stream_ns="http://etherx.jabber.org/streams",
default_ns="jabber:client",
default_lang="en",
version="1.0",
xml_header=False,
timeout=1):
"""
Check that a given stream header was sent.
Arguments:
sto -- The recipient of the stream header.
sfrom -- The agent sending the stream header.
sid -- The stream's id.
stream_ns -- The namespace of the stream's root element.
default_ns -- The default stanza namespace.
version -- The stream version.
xml_header -- Indicates if the XML version header should be
appended before the stream header.
timeout -- Length of time to wait in seconds for a
response.
"""
header = self.make_header(sto, sfrom, sid,
stream_ns=stream_ns,
default_ns=default_ns,
default_lang=default_lang,
version=version,
xml_header=xml_header)
sent_header = self.xmpp.socket.next_sent(timeout)
if sent_header is None:
raise ValueError("Socket did not return data.")
# Apply closing elements so that we can construct
# XML objects for comparison.
header2 = header + '</stream:stream>'
sent_header2 = sent_header + b'</stream:stream>'
xml = self.parse_xml(header2)
sent_xml = self.parse_xml(sent_header2)
self.failUnless(
self.compare(xml, sent_xml),
"Stream headers do not match:\nDesired:\n%s\nSent:\n%s" % (
header, sent_header))
def send_feature(self, data, method='mask', use_values=True, timeout=1):
"""
"""
sent_data = self.xmpp.socket.next_sent(timeout)
xml = self.parse_xml(data)
sent_xml = self.parse_xml(sent_data)
if sent_data is None:
self.fail("No stanza was sent.")
if method == 'exact':
self.failUnless(self.compare(xml, sent_xml),
"Features do not match.\nDesired:\n%s\nReceived:\n%s" % (
tostring(xml), tostring(sent_xml)))
elif method == 'mask':
matcher = MatchXMLMask(xml)
self.failUnless(matcher.match(sent_xml),
"Stanza did not match using %s method:\n" % method + \
"Criteria:\n%s\n" % tostring(xml) + \
"Stanza:\n%s" % tostring(sent_xml))
else:
raise ValueError("Uknown matching method: %s" % method)
def send(self, data, defaults=None, use_values=True,
timeout=.5, method='exact'):
"""
Check that the XMPP client sent the given stanza XML.
Extracts the next sent stanza and compares it with the given
XML using check.
Arguments:
stanza_class -- The class of the sent stanza object.
data -- The XML string of the expected Message stanza,
or an equivalent stanza object.
use_values -- Modifies the type of tests used by check_message.
defaults -- A list of stanza interfaces that have defaults
values which may interfere with comparisons.
timeout -- Time in seconds to wait for a stanza before
failing the check.
method -- Select the type of comparison to use for
verifying the sent stanza. Options are 'exact',
'id', 'stanzapath', 'xpath', and 'mask'.
Defaults to the value of self.match_method.
"""
sent = self.xmpp.socket.next_sent(timeout)
if data is None and sent is None:
return
if data is None and sent is not None:
self.fail("Stanza data was sent: %s" % sent)
if sent is None:
self.fail("No stanza was sent.")
xml = self.parse_xml(sent)
self.fix_namespaces(xml, 'jabber:client')
sent = self.xmpp._build_stanza(xml, 'jabber:client')
self.check(sent, data,
method=method,
defaults=defaults,
use_values=use_values)
def stream_close(self):
"""
Disconnect the dummy XMPP client.
Can be safely called even if stream_start has not been called.
Must be placed in the tearDown method of a test class to ensure
that the XMPP client is disconnected after an error.
"""
if hasattr(self, 'xmpp') and self.xmpp is not None:
self.xmpp.socket.recv_data(self.xmpp.stream_footer)
self.xmpp.disconnect()
# ------------------------------------------------------------------
# XML Comparison and Cleanup
def fix_namespaces(self, xml, ns):
"""
Assign a namespace to an element and any children that
don't have a namespace.
Arguments:
xml -- The XML object to fix.
ns -- The namespace to add to the XML object.
"""
if xml.tag.startswith('{'):
return
xml.tag = '{%s}%s' % (ns, xml.tag)
for child in xml:
self.fix_namespaces(child, ns)
def compare(self, xml, *other):
"""
Compare XML objects.
Arguments:
xml -- The XML object to compare against.
*other -- The list of XML objects to compare.
"""
if not other:
return False
# Compare multiple objects
if len(other) > 1:
for xml2 in other:
if not self.compare(xml, xml2):
return False
return True
other = other[0]
# Step 1: Check tags
if xml.tag != other.tag:
return False
# Step 2: Check attributes
if xml.attrib != other.attrib:
return False
# Step 3: Check text
if xml.text is None:
xml.text = ""
if other.text is None:
other.text = ""
xml.text = xml.text.strip()
other.text = other.text.strip()
if xml.text != other.text:
return False
# Step 4: Check children count
if len(list(xml)) != len(list(other)):
return False
# Step 5: Recursively check children
for child in xml:
child2s = other.findall("%s" % child.tag)
if child2s is None:
return False
for child2 in child2s:
if self.compare(child, child2):
break
else:
return False
# Step 6: Recursively check children the other way.
for child in other:
child2s = xml.findall("%s" % child.tag)
if child2s is None:
return False
for child2 in child2s:
if self.compare(child, child2):
break
else:
return False
# Everything matches
return True
| mit | -1,355,327,946,575,225,300 | 40.479221 | 81 | 0.519052 | false |
pwwang/bioprocs | bioprocs/utils/meme.py | 1 | 5335 | """
Read or write MEME motif file
"""
import re
import math
from collections import OrderedDict
class MemeRecord(object):
def __init__(self,
name,
matrix,
altname = '',
mtype = 'letter-probability',
alength = None,
w = None,
nsites = 20,
E = 0,
URL = None
):
self.name = name
self.matrix = matrix
self.altname = altname
self.mtype = mtype
self.alength = alength or len(matrix[0])
self.w = w or len(matrix)
self.nsites = nsites
self.E = E
self.URL = URL
def __str__(self):
return """
MOTIF {name}{altname}
{mtype} matrix: alength= {alength} w= {w} nsites= {nsites} E= {E}
{matrix}
{URL}
""".format(
name = self.name,
altname = " " + self.altname if self.altname else "",
mtype = self.mtype,
alength = self.alength,
w = self.w,
nsites = self.nsites,
E = self.E,
matrix = "\n".join(" ".join(str(r) for r in row) for row in self.matrix),
URL = "URL {}".format(self.URL) if self.URL else ""
)
def pwm2logodds(self):
assert self.mtype == 'letter-probability'
matrix = [
tuple(math.exp(p)/(1.0 + math.exp(p)) for p in row)
for row in self.matrix
]
return MemeRecord(
name = self.name,
matrix = matrix,
altname = self.altname,
mtype = 'log-odds',
alength = self.alength,
w = self.w,
nsites = self.nsites,
E = self.E,
URL = self.URL
)
def pwm2prob(self):
assert self.mtype == 'log-odds'
matrix = [
tuple(math.log(p/(1.0-p)) for p in row)
for row in self.matrix
]
return MemeRecord(
name = self.name,
matrix = matrix,
altname = self.altname,
mtype = 'letter-probability',
alength = self.alength,
w = self.w,
nsites = self.nsites,
E = self.E,
URL = self.URL
)
class MemeReader(object):
def __init__(self, memefile):
self.meta = {}
alphabet_flag = False
bgfreqs_flag = False
self.file = open(memefile)
self.tell = 0
while True:
self.tell = self.file.tell()
line = self.file.readline()
if not line:
raise ValueError('Not a valid MEME motif file.')
if line.startswith('MEME version'):
self.meta['Version'] = line[12:].strip()
elif line.startswith('ALPHABET='):
self.meta['Alphabet'] = line[9:].strip()
elif line.startswith('ALPHABET'):
self.meta['Alphabet'] = line[8:].strip()
alphabet_flag = True
elif line.startswith('END ALPHABET'):
alphabet_flag = False
elif alphabet_flag:
self.meta['Alphabet'] += '\n' + line.strip()
elif line.startswith('strands:'):
self.meta['Strands'] = line[8:].strip()
elif line.startswith('Background letter frequencies'):
bgfreqs_flag = True
source = line[30:].strip()
if source.startswith('(from '):
source = source[6:-2]
else:
source = ''
self.meta['bgfreqs'] = {'from': source, 'freqs': OrderedDict()}
elif bgfreqs_flag:
bgfreqs_flag = False
parts = line.strip().split()
self.meta['bgfreqs']['freqs'] = OrderedDict(tuple([parts[2*i], float(parts[2*i+1])] for i in range(int(len(parts)/2))))
elif line.startswith('MOTIF'):
self.file.seek(self.tell)
break
def next(self):
name = None
altname = ''
url = None
mtype = ''
matrix = []
attrs = {}
while True:
tell = self.file.tell()
line = self.file.readline()
if not line:
raise StopIteration()
if line.startswith('MOTIF'):
if name:
self.file.seek(tell)
break
parts = line[5:].strip().split()
name = parts.pop(0)
if parts:
altname = parts[0]
elif line.startswith('URL'):
url = line[3:].strip()
elif 'matrix:' in line:
matrix = [] # in case there are multiple matrices
mtype, attrs = line.strip().split('matrix:')
mtype = mtype.strip()
attrs = re.split(r'(?:\s*=\s*|\s+)', attrs.strip())
attrs = {attrs[2*i]:attrs[2*i+1] for i in range(int(len(attrs)/2))}
else:
line = line.strip()
if not line:
continue
matrix.append(tuple(float(v) for v in line.split()))
return MemeRecord(
name,
matrix,
altname = altname,
mtype = mtype,
URL = url,
**attrs
)
def __next__(self):
return self.next()
def rewind(self):
self.file.seek(self.tell)
def __iter__(self):
return self
def __del__(self):
self.close()
def close(self):
if self.file:
self.file.close()
class MemeWriter(object):
def __init__(self, outfile, meta = None):
self.meta = meta or {}
self.file = open(outfile, 'w')
def writeMeta(self):
self.file.write("MEME version {}\n\n".format(self.meta.get('Version', 4)))
alphabet = self.meta.get('Alphabet', 'ACGT')
if '\n' in alphabet:
self.file.write("ALPHABET {}\nEND ALPHABET\n\n".format(alphabet))
else:
self.file.write("ALPHABET= {}\n\n".format(alphabet))
strands = self.meta.get('Strands', '+ -')
self.file.write("strands: {}\n\n".format(strands))
bgfreqs = self.meta.get("bgfreqs", {})
if "from" in bgfreqs:
self.file.write("Background letter frequencies (from {}):\n".format(bgfreqs['from']))
if "freqs" in bgfreqs:
self.file.write(" ".join('{} {}'.format(k, v) for k, v in bgfreqs['freqs'].items()) + "\n\n")
def write(self, mrec):
self.file.write(str(mrec))
def __del__(self):
self.close()
def close(self):
if self.file:
self.file.close()
| mit | -8,825,826,625,067,057,000 | 23.813953 | 124 | 0.597938 | false |
50wu/gpdb | src/backend/gporca/concourse/build_and_test.py | 10 | 3342 | #!/usr/bin/python
import optparse
import os
import shutil
import subprocess
import sys
def num_cpus():
# Use multiprocessing module, available in Python 2.6+
try:
import multiprocessing
return multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
pass
# Get POSIX system config value for number of processors.
posix_num_cpus = os.sysconf("SC_NPROCESSORS_ONLN")
if posix_num_cpus != -1:
return posix_num_cpus
# Guess
return 2
def install_dependencies(dependencies, output_dir):
for dependency in dependencies:
status = install_dependency(dependency, output_dir)
if status:
return status
def install_dependency(dependency_name, output_dir):
return subprocess.call(
["tar -xzf " + dependency_name + "/*.tar.gz -C " + output_dir], shell=True)
def cmake_configure(src_dir, build_type, output_dir, cxx_compiler = None, cxxflags = None):
if os.path.exists("build"):
shutil.rmtree("build")
os.mkdir("build")
cmake_args = ["cmake",
"-D", "CMAKE_INSTALL_PREFIX=" + output_dir,
"-D", "CMAKE_BUILD_TYPE=" + build_type]
if cxx_compiler:
cmake_args.append("-D")
cmake_args.append("CMAKE_CXX_COMPILER=" + cxx_compiler)
if cxxflags:
cmake_args.append("-D")
cmake_args.append("CMAKE_CXX_FLAGS=" + cxxflags)
cmake_args.append("../" + src_dir)
cmake_command = " ".join(cmake_args)
if os.path.exists('/opt/gcc_env.sh'):
cmake_command = "source /opt/gcc_env.sh && " + cmake_command
print cmake_command
return subprocess.call(cmake_command, cwd="build", shell=True)
def make():
return subprocess.call(["make",
"-j" + str(num_cpus()),
"-l" + str(2 * num_cpus()),
],
cwd="build",
env=ccache_env()
)
def ccache_env():
env = os.environ.copy()
env['CCACHE_DIR'] = os.getcwd() + '/.ccache'
return env
def run_tests():
return subprocess.call(["ctest",
"--output-on-failure",
"-j" + str(num_cpus()),
"--test-load", str(4 * num_cpus()),
],
cwd="build")
def main():
parser = optparse.OptionParser()
parser.add_option("--build_type", dest="build_type", default="RELEASE")
parser.add_option("--compiler", dest="compiler")
parser.add_option("--cxxflags", dest="cxxflags")
parser.add_option("--output_dir", dest="output_dir", default="install")
parser.add_option("--skiptests", dest="skiptests", action="store_true", default=False)
(options, args) = parser.parse_args()
# install deps for building
status = install_dependencies(args, "/usr/local")
if status:
return status
status = cmake_configure("",
options.build_type,
options.output_dir,
options.compiler,
options.cxxflags)
if status:
return status
status = make()
if status:
return status
if not options.skiptests:
status = run_tests()
if status:
return status
return 0
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | -4,175,105,556,896,641,500 | 29.381818 | 91 | 0.566427 | false |
fin/froide | froide/publicbody/csv_import.py | 1 | 3665 | # -*- encoding: utf-8 -*-
import requests
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.template.defaultfilters import slugify
from django.utils.six import StringIO, BytesIO, PY3
from taggit.utils import parse_tags
if PY3:
import csv
else:
import unicodecsv as csv
from froide.publicbody.models import (PublicBody, PublicBodyTag, Jurisdiction, FoiLaw)
User = get_user_model()
class CSVImporter(object):
topic_cache = {}
default_topic = None
jur_cache = {}
def __init__(self):
self.user = User.objects.order_by('id')[0]
self.site = Site.objects.get_current()
def import_from_url(self, url):
response = requests.get(url)
# Force requests to evaluate as UTF-8
response.encoding = 'utf-8'
csv_file = BytesIO(response.content)
self.import_from_file(csv_file)
def import_from_file(self, csv_file):
"""
csv_file should be encoded in utf-8
"""
if PY3:
csv_file = StringIO(csv_file.read().decode('utf-8'))
reader = csv.DictReader(csv_file)
for row in reader:
self.import_row(row)
def import_row(self, row):
# generate slugs
row['name'] = row['name'].strip()
row['email'] = row['email'].lower()
if row['url'] and not row['url'].startswith(('http://', 'https://')):
row['url'] = 'http://' + row['url']
row['slug'] = slugify(row['name'])
row['classification_slug'] = slugify(row['classification'])
tags = parse_tags(row.pop('tags', ''))
# Backwards compatible handling of topic__slug
topic_slug = row.pop('topic__slug', None)
if topic_slug:
tags.append(self.get_topic(topic_slug))
# resolve foreign keys
row['jurisdiction'] = self.get_jurisdiction(row.pop('jurisdiction__slug'))
parent = row.pop('parent__name', None)
if parent:
row['parent'] = PublicBody.objects.get(slug=slugify(parent))
# get optional values
for n in ('description', 'other_names', 'request_note', 'website_dump'):
row[n] = row.get(n, '')
try:
if 'id' in row and row['id']:
pb = PublicBody.objects.get(id=row['id'])
else:
pb = PublicBody.objects.get(slug=row['slug'])
# If it exists, update it
row.pop('id', None) # Do not update id though
row['_updated_by'] = self.user
PublicBody.objects.filter(id=pb.id).update(**row)
pb.laws.clear()
pb.laws.add(*row['jurisdiction'].laws)
pb.tags.set(*tags)
return
except PublicBody.DoesNotExist:
pass
row.pop('id', None) # Remove id if present
public_body = PublicBody(**row)
public_body._created_by = self.user
public_body._updated_by = self.user
public_body.confirmed = True
public_body.site = self.site
public_body.save()
public_body.laws.add(*row['jurisdiction'].laws)
public_body.tags.set(*list(tags))
def get_jurisdiction(self, slug):
if slug not in self.jur_cache:
jur = Jurisdiction.objects.get(slug=slug)
laws = FoiLaw.objects.filter(jurisdiction=jur)
jur.laws = laws
self.jur_cache[slug] = jur
return self.jur_cache[slug]
def get_topic(self, slug):
if slug not in self.topic_cache:
self.topic_cache[slug] = PublicBodyTag.objects.get(slug=slug, is_topic=True)
return self.topic_cache[slug]
| mit | 4,057,306,953,763,655,700 | 32.623853 | 88 | 0.583356 | false |
OpenTransportDataProject/ckanext-customharvesters | ckanext/dcat/harvesters/createorg.py | 1 | 2400 | # Description: create harvested organizations from orgfile.txt
# Author: Shanshan Jiang, last modified 14.12.2016
import json
import urllib
import urllib2
import pprint
print "create organizations"
org_dict = {
'name': 'testagain',
'title': 'test again',
'image_url': ''
}
def create_org(dataset_dict):
data_string = urllib.quote(json.dumps(dataset_dict))
# replace with the correct url of CKAN server
request = urllib2.Request(
'http://127.0.0.1:5000/api/action/organization_create')
# replace with the correct APIkey
request.add_header('Authorization', '765e099f-6d07-48a8-82ba-5a79730a976f')
# Make the HTTP request.
response = urllib2.urlopen(request, data_string)
assert response.code == 200
# Use the json module to load CKAN's response into a dictionary.
response_dict = json.loads(response.read())
assert response_dict['success'] is True
# package_create returns the created package as its result.
created_package = response_dict['result']
pprint.pprint(created_package)
# check if organization exists in the catalogue
def check_org_exist(org_name):
found = False
for org in org_list:
print org
if org == org_name:
print "Found the organization : " + org_name
found = True
break
return found
# get the list of organizations from the catalogue
org_url='http://127.0.0.1:5000/api/3/action/organization_list'
orglist=urllib.urlopen(org_url).read()
doc = json.loads(orglist)
org_list = doc["result"]
print 'The list of organizations: '
print org_list
with open('orgfile.txt') as f:
content = f.read().decode('utf8').splitlines()
print content
for line in content:
print line
if line.startswith('org_name:'):
org_name = line[9:]
print 'org_name: ' + org_name
org_dict.update({'name': org_name})
if line.startswith('url:'):
org_url = line[4:]
print 'image url: ' + org_url
org_dict.update({'image_url': org_url})
if line.startswith('display_name:'):
display_name = line[13:]
print 'display_name: ' + display_name
org_dict.update({'title': display_name})
print org_dict
if check_org_exist(org_name):
print 'The organization ' + org_name + ' already exists!'
else:
create_org(org_dict)
f.close()
| agpl-3.0 | -3,692,371,861,684,130,000 | 26.586207 | 80 | 0.646667 | false |
tusharmakkar08/AlphaPy | Syntax/syntaxhighlighter.py | 1 | 2865 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# syntaxhighlighter.py
#
# Copyright 2013 tusharmakkar08 <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
""" Importing Modules """
import pygments
from pygments import highlight
from pygments.lexers import *
from pygments.formatters import *
from pygments.styles import STYLE_MAP
from pygments.styles import get_all_styles
import os
import webbrowser
"""Main code"""
def guess_lex(code):
return guess_lexer(code)
def guess_lex_file(name,code):
return guess_lexer_for_filename(name,code)
def Highlight(name):
k="pygmentize %s"%(name)
os.system(k)
def pref_style():
styles = list(get_all_styles())
print "Choose from one of the styles"
count=1
for i in styles:
print count,":",i
count+=1
k=input()
return styles[k-1]
def html_out(name,k):
"""HTML printed"""
styles = list(get_all_styles())
m=styles[k-1]
print m
new=""
for i in name:
if i==".":
break
new+=i
stri="pygmentize -O full,style="+m+" -o "+new+".html "+name
print stri
os.system(stri)
def show_html(name):
new=""
for i in name:
if i==".":
break
new+=i
url=new+".html"
stri="libreoffice --writer -o %s"%(url)
os.system(stri)
def open_html(name):
newt=2 # open in a new tab, if possible
new=""
for i in name:
if i==".":
break
new+=i
url=new+".html"
webbrowser.open(url,new=newt)
def rtf_out(name,k):
"""Rich text format"""
styles = list(get_all_styles())
m=styles[k-1]
new=""
for i in name:
if i==".":
break
new+=i
stri="pygmentize -O full,style="+m+" -o "+new+".rtf "+name
os.system(stri)
def open_rtf(name):
new=""
for i in name:
if i==".":
break
new+=i
url=new+".rtf"
stri="libreoffice --writer -o %s"%(url)
os.system(stri)
def copy_clipboard(name,flag):
"""For directly cutting paste to different pahes like powerpoint etc"""
new=""
for i in name:
if i==".":
break
new+=i
if flag==1:
stri="xclip -in -selection c "+new+".html"
else:
stri="xclip -in -selection c "+new+".rtf"
os.system(stri)
"""Code Testing"""
#t=raw_input("Enter filename\n")
#rtf_out("test.py",5)
#copy_clipboard(t,1)
#open_rtf(t)
#print pref_style()
| mit | 668,118,477,405,931,000 | 20.222222 | 72 | 0.669459 | false |
mjtamlyn/django-denorm | test_project/test_app/tests.py | 1 | 17380 | from djangosanetesting import cases
from django.contrib.auth.models import User,Permission
from django.contrib.contenttypes.models import ContentType
import denorm
from denorm import denorms
import models
class TestSkip(cases.DestructiveDatabaseTestCase):
"""
Tests for the skip feature.
"""
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
post = models.SkipPost(text='Here be ponies.')
post.save()
self.post = post
# TODO: Enable and check!
# Unsure on how to test this behaviour. It results in an endless loop:
# update -> trigger -> update -> trigger -> ...
#
#def test_without_skip(self):
# # This results in an infinate loop on SQLite.
# comment = SkipCommentWithoutSkip(post=self.post, text='Oh really?')
# comment.save()
#
# denorm.flush()
# TODO: Check if an infinate loop happens and stop it.
def test_with_skip(self):
# This should not result in an endless loop.
comment = models.SkipCommentWithSkip(post=self.post, text='Oh really?')
comment.save()
denorm.flush()
def test_meta_skip(self):
"""Test a model with the attribute listed under denorm_always_skip."""
comment = models.SkipCommentWithAttributeSkip(post=self.post, text='Yup, and they have wings!')
comment.save()
denorm.flush()
class TestDenormalisation(cases.DestructiveDatabaseTestCase):
"""
Tests for the denormalisation fields.
"""
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
self.testuser = User.objects.create_user("testuser","testuser","testuser")
self.testuser.is_staff = True
ctype = ContentType.objects.get_for_model(models.Member)
Permission.objects.filter(content_type=ctype).get(name='Can change member').user_set.add(self.testuser)
self.testuser.save()
def tearDown(self):
# delete all model instances
self.testuser.delete()
models.Attachment.objects.all().delete()
models.Post.objects.all().delete()
models.Forum.objects.all().delete()
def test_depends_related(self):
"""
Test the DependsOnRelated stuff.
"""
# Make a forum, check it's got no posts
f1 = models.Forum.objects.create(title="forumone")
self.assertEqual(f1.post_count, 0)
# Check its database copy too
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 0)
# Add a post
p1 = models.Post.objects.create(forum=f1)
# Has the post count updated?
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
denorm.flush()
# Check its title, in p1 and the DB
self.assertEqual(p1.forum_title, "forumone")
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "forumone")
# Update the forum title
f1.title = "forumtwo"
f1.save()
denorm.flush()
# Has the post's title changed?
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "forumtwo")
# Add and remove some posts and check the post count
models.Post.objects.create(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
models.Post.objects.create(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 3)
p1.delete()
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
# Delete everything, check once more.
models.Post.objects.all().delete()
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 0)
# Make an orphaned post, see what its title is.
# Doesn't work yet - no support for null FKs
#p4 = Post.objects.create(forum=None)
#self.assertEqual(p4.forum_title, None)
def test_dependency_chains(self):
# create a forum, a member and a post
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
models.Post.objects.create(forum=f1,author=m1)
denorm.flush()
# check the forums author list contains the member
self.assertEqual(models.Forum.objects.get(id=f1.id).author_names, "memberone")
# change the member's name
m1.name = "membertwo"
m1.save()
denorm.flush()
# check again
self.assertEqual(models.Forum.objects.get(id=f1.id).author_names, "membertwo")
def test_trees(self):
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumtwo",parent_forum=f1)
f3 = models.Forum.objects.create(title="forumthree",parent_forum=f2)
denorm.flush()
self.assertEqual(f1.path,'/forumone/')
self.assertEqual(f2.path,'/forumone/forumtwo/')
self.assertEqual(f3.path,'/forumone/forumtwo/forumthree/')
f1.title = 'someothertitle'
f1.save()
denorm.flush()
f1 = models.Forum.objects.get(id=f1.id)
f2 = models.Forum.objects.get(id=f2.id)
f3 = models.Forum.objects.get(id=f3.id)
self.assertEqual(f1.path,'/someothertitle/')
self.assertEqual(f2.path,'/someothertitle/forumtwo/')
self.assertEqual(f3.path,'/someothertitle/forumtwo/forumthree/')
def test_reverse_fk_null(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
models.Post.objects.create(forum=f1,author=m1)
models.Attachment.objects.create()
denorm.flush()
def test_bulk_update(self):
"""
Test the DependsOnRelated stuff.
"""
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumtwo")
p1 = models.Post.objects.create(forum=f1)
p2 = models.Post.objects.create(forum=f2)
denorm.flush()
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "forumone")
self.assertEqual(models.Post.objects.get(id=p2.id).forum_title, "forumtwo")
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 1)
models.Post.objects.update(forum=f1)
denorm.flush()
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "forumone")
self.assertEqual(models.Post.objects.get(id=p2.id).forum_title, "forumone")
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 0)
models.Forum.objects.update(title="oneforall")
denorm.flush()
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "oneforall")
self.assertEqual(models.Post.objects.get(id=p2.id).forum_title, "oneforall")
def test_no_dependency(self):
m1 = models.Member.objects.create(first_name="first",name="last")
denorm.flush()
self.assertEqual(models.Member.objects.get(id=m1.id).full_name,"first last")
models.Member.objects.filter(id=m1.id).update(first_name="second")
denorm.flush()
self.assertEqual(models.Member.objects.get(id=m1.id).full_name,"second last")
def test_self_backward_relation(self):
f1 = models.Forum.objects.create(title="forumone")
p1 = models.Post.objects.create(forum=f1,)
p2 = models.Post.objects.create(forum=f1,response_to=p1)
p3 = models.Post.objects.create(forum=f1,response_to=p1)
p4 = models.Post.objects.create(forum=f1,response_to=p2)
denorm.flush()
self.assertEqual(models.Post.objects.get(id=p1.id).response_count, 3)
self.assertEqual(models.Post.objects.get(id=p2.id).response_count, 1)
self.assertEqual(models.Post.objects.get(id=p3.id).response_count, 0)
self.assertEqual(models.Post.objects.get(id=p4.id).response_count, 0)
def test_m2m_relation(self):
f1 = models.Forum.objects.create(title="forumone")
p1 = models.Post.objects.create(forum=f1,title="post1")
m1 = models.Member.objects.create(first_name="first1",name="last1")
denorm.flush()
m1.bookmarks.add(p1)
denorm.flush()
self.assertTrue('post1' in models.Member.objects.get(id=m1.id).bookmark_titles)
p1.title = "othertitle"
p1.save()
denorm.flush()
self.assertTrue('post1' not in models.Member.objects.get(id=m1.id).bookmark_titles)
self.assertTrue('othertitle' in models.Member.objects.get(id=m1.id).bookmark_titles)
p2 = models.Post.objects.create(forum=f1,title="thirdtitle")
m1.bookmarks.add(p2)
denorm.flush()
self.assertTrue('post1' not in models.Member.objects.get(id=m1.id).bookmark_titles)
self.assertTrue('othertitle' in models.Member.objects.get(id=m1.id).bookmark_titles)
self.assertTrue('thirdtitle' in models.Member.objects.get(id=m1.id).bookmark_titles)
m1.bookmarks.remove(p1)
denorm.flush()
self.assertTrue('othertitle' not in models.Member.objects.get(id=m1.id).bookmark_titles)
self.assertTrue('thirdtitle' in models.Member.objects.get(id=m1.id).bookmark_titles)
def test_middleware(self):
# FIXME, this test currently does not work with a transactional
# database, so it's skipped for now.
return
# FIXME, set and de-set middleware values
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(first_name="first1",name="last1")
p1 = models.Post.objects.create(forum=f1,author=m1)
self.assertEqual(models.Post.objects.get(id=p1.id).author_name, "last1")
self.client.login(username="testuser",password="testuser")
self.client.post("/admin/denorm_testapp/member/%s/"%(m1.pk),
{'name':'last2','first_name':'first2'})
self.assertEqual(models.Post.objects.get(id=p1.id).author_name, "last2")
def test_countfield(self):
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumone")
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 0)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 0)
models.Post.objects.create(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 0)
p2 = models.Post.objects.create(forum=f2)
p3 = models.Post.objects.create(forum=f2)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 2)
p2.forum = f1
p2.save()
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 1)
models.Post.objects.filter(pk=p3.pk).update(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 3)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 0)
def test_foreignkey(self):
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumtwo")
m1 = models.Member.objects.create(first_name="first1",name="last1")
p1 = models.Post.objects.create(forum=f1,author=m1)
a1 = models.Attachment.objects.create(post=p1)
self.assertEqual(models.Attachment.objects.get(id=a1.id).forum, f1)
a2 = models.Attachment.objects.create()
self.assertEqual(models.Attachment.objects.get(id=a2.id).forum, None)
# Change forum
p1.forum = f2
p1.save()
denorm.flush()
self.assertEqual(models.Attachment.objects.get(id=a1.id).forum, f2)
def test_m2m(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
models.Post.objects.create(forum=f1,author=m1)
denorm.flush()
# check the forums author list contains the member
self.assertTrue(m1 in models.Forum.objects.get(id=f1.id).authors.all())
m2 = models.Member.objects.create(name="membertwo")
p2 = models.Post.objects.create(forum=f1,author=m2)
denorm.flush()
self.assertTrue(m1 in models.Forum.objects.get(id=f1.id).authors.all())
self.assertTrue(m2 in models.Forum.objects.get(id=f1.id).authors.all())
p2.delete()
denorm.flush()
self.assertTrue(m2 not in models.Forum.objects.get(id=f1.id).authors.all())
def test_denorm_rebuild(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
p1 = models.Post.objects.create(forum=f1,author=m1)
denorm.denorms.rebuildall()
f1 = models.Forum.objects.get(id=f1.id)
m1 = models.Member.objects.get(id=m1.id)
p1 = models.Post.objects.get(id=p1.id)
self.assertEqual(f1.post_count, 1)
self.assertEqual(f1.authors.all()[0],m1)
def test_denorm_subclass(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
p1 = models.Post.objects.create(forum=f1,author=m1)
self.assertEqual(f1.tags_string, '')
self.assertEqual(p1.tags_string, '')
models.Tag.objects.create(name='tagone', content_object=f1)
models.Tag.objects.create(name='tagtwo', content_object=f1)
denorm.denorms.flush()
f1 = models.Forum.objects.get(id=f1.id)
m1 = models.Member.objects.get(id=m1.id)
p1 = models.Post.objects.get(id=p1.id)
self.assertEqual(f1.tags_string, 'tagone, tagtwo')
self.assertEqual(p1.tags_string, '')
models.Tag.objects.create(name='tagthree', content_object=p1)
t4 = models.Tag.objects.create(name='tagfour', content_object=p1)
denorm.denorms.flush()
f1 = models.Forum.objects.get(id=f1.id)
m1 = models.Member.objects.get(id=m1.id)
p1 = models.Post.objects.get(id=p1.id)
self.assertEqual(f1.tags_string, 'tagone, tagtwo')
self.assertEqual(p1.tags_string, 'tagfour, tagthree')
t4.content_object = f1
t4.save()
denorm.denorms.flush()
f1 = models.Forum.objects.get(id=f1.id)
m1 = models.Member.objects.get(id=m1.id)
p1 = models.Post.objects.get(id=p1.id)
self.assertEqual(f1.tags_string, 'tagfour, tagone, tagtwo')
self.assertEqual(p1.tags_string, 'tagthree')
def test_cache_key_field_backward(self):
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumtwo")
ck1 = f1.cachekey
ck2 = f2.cachekey
p1 = models.Post.objects.create(forum=f1)
f1 = models.Forum.objects.get(id=f1.id)
f2 = models.Forum.objects.get(id=f2.id)
self.assertNotEqual(ck1,f1.cachekey)
self.assertEqual(ck2,f2.cachekey)
ck1 = f1.cachekey
ck2 = f2.cachekey
p1 = models.Post.objects.get(id=p1.id)
p1.forum = f2
p1.save()
f1 = models.Forum.objects.get(id=f1.id)
f2 = models.Forum.objects.get(id=f2.id)
self.assertNotEqual(ck1,f1.cachekey)
self.assertNotEqual(ck2,f2.cachekey)
def test_cache_key_field_forward(self):
f1 = models.Forum.objects.create(title="forumone")
p1 = models.Post.objects.create(title='initial_title',forum=f1)
a1 = models.Attachment.objects.create(post=p1)
a2 = models.Attachment.objects.create(post=p1)
a1 = models.Attachment.objects.get(id=a1.id)
a2 = models.Attachment.objects.get(id=a2.id)
self.assertNotEqual(a1.cachekey,a2.cachekey)
ck1 = a1.cachekey
ck2 = a2.cachekey
p1.title = 'new_title'
p1.save()
a1 = models.Attachment.objects.get(id=a1.id)
a2 = models.Attachment.objects.get(id=a2.id)
self.assertNotEqual(ck1,a1.cachekey)
self.assertNotEqual(ck2,a2.cachekey)
a1 = models.Attachment.objects.get(id=a1.id)
a2 = models.Attachment.objects.get(id=a2.id)
self.assertNotEqual(a1.cachekey,a2.cachekey)
def test_cache_key_field_m2m(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
p1 = models.Post.objects.create(title='initial_title',forum=f1)
m1 = models.Member.objects.get(id=m1.id)
ck1 = m1.cachekey
m1.bookmarks.add(p1)
m1 = models.Member.objects.get(id=m1.id)
self.assertNotEqual(ck1,m1.cachekey)
ck1 = m1.cachekey
p1 = models.Post.objects.get(id=p1.id)
p1.title = 'new_title'
p1.save()
m1 = models.Member.objects.get(id=m1.id)
self.assertNotEqual(ck1,m1.cachekey)
| bsd-3-clause | -3,809,027,810,471,516,000 | 37.030635 | 111 | 0.637112 | false |
dvdotsenko/git_http_backend.py | test_git_http_backend.py | 1 | 4625 | import os
import sys
import threading
import socket
import tempfile
import shutil
import random
import time
try:
# 3.x style module
import urllib.request as urlopenlib
except:
# 2.x style module
import urllib as urlopenlib
import git_http_backend
import cherrypy as wsgiserver
import subprocess
def set_up_server(remote_base_path):
# choosing free port
s = socket.socket()
s.bind(('',0))
ip, port = s.getsockname()
s.close()
del s
print("Chosen URL is http://%s:%s/" % (ip, port))
# setting up the server.
server = wsgiserver.CherryPyWSGIServer(
(ip, port),
git_http_backend.assemble_WSGI_git_app(remote_base_path)
)
ip = 'localhost' # the IP the socket yields is '0.0.0.0' which is not useful for testing.
return ip, port, server
def test_smarthttp(url, base_path):
# this tests roundtrip -
# new repo > push up > clone down > push up > pull to original.
repo_one_path = os.path.join(base_path, 'repoone')
repo_two_path = os.path.join(base_path, 'repotwo')
line_one = 'This is a test\n'
line_two = 'Another line\n'
file_name = 'testfile.txt'
reponame = 'name%sname' % int(time.time())
large_file_name = 'largetestfile.bin'
# create local repo
print("== creating first local repo and adding content ==")
os.mkdir(repo_one_path)
os.chdir(repo_one_path)
subprocess.call('git init', shell=True)
f = open(file_name, 'w')
f.write(line_one)
f.close()
subprocess.call('git add %s' % file_name, shell=True)
subprocess.call('git commit -m "Initial import"', shell=True)
subprocess.call('git push http://%s/%s master' % (url, reponame), shell=True)
os.chdir('..')
# second local repo
print("== cloning to second local repo and verifying content, adding more ==")
subprocess.call('git clone http://%s/%s repotwo' % (url,reponame), shell=True)
assert(os.path.isdir(repo_two_path))
os.chdir(repo_two_path)
assert(file_name in os.listdir('.'))
lines = open(file_name).readlines()
print "lines are %s" % lines
assert(line_one in lines)
lines.append(line_two)
f = open(file_name, 'w')
f.writelines(lines)
f.close()
f = open(large_file_name, 'wb')
size = 1000000
while size:
f.write(chr(random.randrange(0,255)))
size -= 1
f.close()
subprocess.call('git add %s %s' % (file_name, large_file_name), shell=True)
subprocess.call('git commit -m "Changing the file"', shell=True)
subprocess.call('git push origin master', shell=True)
os.chdir('..')
# back to original local repo
print("== pulling to first local repo and verifying added content ==")
os.chdir(repo_one_path)
subprocess.call('git pull http://%s/%s master' % (url,reponame), shell=True)
assert(set([file_name,large_file_name]).issubset(os.listdir('.')))
assert(set([line_one,line_two]).issubset(open(file_name).readlines()))
print("=============\n== SUCCESS ==\n=============\n")
def server_runner(s):
try:
s.start()
except KeyboardInterrupt:
pass
finally:
s.stop()
def server_and_client(base_path):
remote_base_path = os.path.join(base_path, 'reporemote')
ip, port, server = set_up_server(remote_base_path)
t = threading.Thread(None, server_runner, None, [server])
t.daemon = True
t.start()
try:
test_smarthttp('%s:%s' % (ip, port), base_path)
except KeyboardInterrupt:
pass
finally:
server.stop()
shutil.rmtree(base_path, True)
def server_only(base_path):
remote_base_path = os.path.join(base_path, 'reporemote')
ip, port, server = set_up_server(remote_base_path)
try:
server.start()
except KeyboardInterrupt:
pass
finally:
server.stop()
shutil.rmtree(base_path, True)
def client_only(base_path, url):
try:
test_smarthttp(url, base_path)
except KeyboardInterrupt:
pass
finally:
shutil.rmtree(base_path, True)
if __name__ == "__main__":
base_path = tempfile.mkdtemp()
print("base path is %s" % base_path)
if '--client' in sys.argv:
url = sys.argv[-1]
client_only(base_path, url)
elif '--server' in sys.argv:
server_only(base_path)
elif '--help' in sys.argv:
print('Options: "--client url", "--server" Send no options for both server and client.')
else:
server_and_client(base_path) | gpl-2.0 | 5,822,722,147,818,892,000 | 30.584507 | 96 | 0.604541 | false |
novoid/tstest | tagstore-src/tsgui/tagcompleter.py | 1 | 17006 | #!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
## this file is part of tagstore, an alternative way of storing and retrieving information
## Copyright (C) 2010 Karl Voit, Christoph Friedl, Wolfgang Wintersteller
##
## This program is free software; you can redistribute it and/or modify it under the terms
## of the GNU General Public License as published by the Free Software Foundation; either
## version 3 of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
## without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along with this program;
## if not, see <http://www.gnu.org/licenses/>.
'''
Copyright (c) 2009 John Schember
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
'''
import time
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt, QObject, SIGNAL
from PyQt4.QtGui import QLineEdit, QCompleter, QStringListModel, QWidget
from tscore.specialcharhelper import SpecialCharHelper
from tscore.tsconstants import TsConstants
class TagCompleterWidget(QObject):
"""
widget in lineEdit-style with integrated qcompleter
"""
def __init__(self, max_tags, expiry_prefix=None, tag_list=None, parent=None, separator=",", show_datestamp=False):
QWidget.__init__(self, parent)
self.__completer_active = False
self.__max_tags = max_tags
self.__tag_separator = separator
self.__tag_list = tag_list
self.__parent = parent
self.__tag_line = QLineEdit(self.__parent)
#self.__tag_line = TagLineEdit(self.__parent)
self.__show_datestamp = show_datestamp
self.__datestamp_format = TsConstants.DATESTAMP_FORMAT_DAY
self.__expiry_prefix = expiry_prefix
## flag, if the line should be checked of emptiness
self.__check_not_empty = False
self.__check_tag_limit = False
self.__restricted_vocabulary = False
## the latest activated suggestion
self.__activated_text = None
# value of the actual datestamp
self.__datestamp = None
self.__datestamp_hidden = False
self.__completer = QCompleter(self.__tag_list, self);
self.__completer.setCaseSensitivity(Qt.CaseInsensitive)
self.__completer.setWidget(self.__tag_line)
#self.__handle_datestamp()
self.connect(self.__tag_line, SIGNAL("textChanged(QString)"), self.__text_changed_by_user)
self.connect(self.__completer, SIGNAL("activated(QString)"), self.__text_activated)
self.connect(self.__completer, SIGNAL("highlighted(QString)"), self.__text_highlighted)
def __text_highlighted(self, item_name):
"""
a suggestion has been selected in the dropdownbox
"""
# set this variable to True just to know, that
# this value comes from the completer and not from the user
self.__completer_active = True
self.__text_selected(item_name)
self.__completer_active = False
def __handle_datestamp(self, is_hidden):
"""
if the show_datestamp flag is set to True, provide an automatic datestamp on the tagline
"""
if self.__show_datestamp:
self.__datestamp = time.strftime(self.__datestamp_format)
if not is_hidden:
self.__tag_line.clear()
self.__tag_line.setText(self.__datestamp)
def set_datestamp_format(self, format, is_hidden):
self.__datestamp_format = format
self.__datestamp_hidden = is_hidden
self.__handle_datestamp(is_hidden)
def show_datestamp(self, show):
self.__show_datestamp = show
self.__handle_datestamp(show)
def clear_line(self):
"""
clear the tagline ...
if auto datestamp is set to "on" a fresh stamp will be placed into the tagline
"""
self.__tag_line.clear()
if self.__show_datestamp:
self.__handle_datestamp(self.__datestamp_hidden)
def set_check_not_empty(self, check_necessary):
"""
set this to True, if there should be sent a signal that indicates
that the tagline is not empty anymore
"""
self.__check_not_empty = True
def set_restricted_vocabulary(self, is_restricted):
"""
use True/False to turn the restricted function on/off
"""
self.__restricted_vocabulary = is_restricted
def select_line(self):
"""
select the tagline ...
"""
self.__tag_line.selectAll()
self.__tag_line.setFocus(QtCore.Qt.OtherFocusReason)
def __text_changed_by_user(self, text):
# create a QByteArray in utf8
all_text = text.toUtf8()
# make a python string out of it
all_text = str(all_text)
# convert the python string tu unicode utf-8
all_text = unicode(all_text, "utf-8")
if self.__check_not_empty:
if all_text is not None and all_text != "":
self.emit(QtCore.SIGNAL("line_empty"), False)
else:
self.emit(QtCore.SIGNAL("line_empty"), True)
text = all_text[:self.__tag_line.cursorPosition()]
## remove whitespace and filter out duplicates by using a set
tag_set = set([])
for tag in all_text.split(self.__tag_separator):
strip_tag = tag.strip()
if strip_tag != "":
tag_set.add(strip_tag)
max_tags = self.__max_tags
if self.__datestamp_hidden:
max_tags = max_tags - 1;
## do not proceed if the max tag count is reached
if len(tag_set) > max_tags:
self.emit(QtCore.SIGNAL("tag_limit_reached"), True)
self.__check_tag_limit = True
return
else:
if self.__check_tag_limit:
self.emit(QtCore.SIGNAL("tag_limit_reached"), False)
self.__check_tag_limit = False
prefix = text.split(self.__tag_separator)[-1].strip()
if not self.__completer_active:
self.__update_completer(tag_set, prefix)
def __update_completer(self, tag_set, completion_prefix):
if self.__tag_list is None:
return
tags = list(set(self.__tag_list).difference(tag_set))
#tags = list(self.__tag_list)
model = QStringListModel(tags, self)
self.__completer.setModel(model)
self.__completer.setCompletionPrefix(completion_prefix)
if self.__restricted_vocabulary:
self.__check_vocabulary(tag_set, completion_prefix)
if completion_prefix.strip() != '':
## use the default completion algorithm
self.__completer.complete()
def __check_finished_tags(self, typed_tags_list):
"""
use this method to control all typed tags. this means all tags terminated with a comma
"""
pass
def __check_in_completion_list(self, tag):
"""
if a written tag equals a tag of the completion list - the tag will be removed from the completion list
so the completer will return a completion count of 0 for this tag.
in this case there would be displayed an error message at the dialog (when controlled vocab is activated)
so check manually, if the provided tag is contained in the suggestion_list
"""
#for sug_tag in self.__tag_list:
# if sug_tag == tag:
# return True
#return False
return tag in self.__tag_list
def __check_vocabulary(self, tag_set, completion_prefix):
"""
have a look at the entered tag to be completed.
if restricted vocabulary is turned on:
datestamps do not have to be checked.
"""
not_allowed_tags_count = 0
no_completion_found = False
stripped_text = unicode(self.__tag_line.text()).strip()
##when a tag separator is on the last position, there should have been entered a new tag
##check this tag for its correctness
if len(stripped_text) > 0:
##check if all written tags are allowed (incl. datestamps an expiry tags)
for tag in tag_set:
## tag can be a datestamp -> OK
if not SpecialCharHelper.is_datestamp(tag) and tag != "":
## tag can be an expiry tag -> OK
# if self.__expiry_prefix is not None and not SpecialCharHelper.is_expiry_tag(self.__expiry_prefix, tag):
if self.__expiry_prefix is None or not SpecialCharHelper.is_partial_expiry_tag(self.__expiry_prefix, tag):
if unicode(tag) not in self.__tag_list:
not_allowed_tags_count += 1
if(completion_prefix.strip() == ""):
## if the prefix is an empty string - manually set the completion_count to 0
## because the completer would return the whole number of tags in its suggestion list
completion_count = 0
else:
completion_count = self.__completer.completionCount()
if self.__restricted_vocabulary and completion_count == 0:
## additionally check if the prefix equals a tag from the suggestion list
## this has to be done, because we do not get a completionCount > 0 if the prefix equals a given tag
#if completion_prefix not in self.__tag_list:
if completion_prefix is not None and len(completion_prefix) > 0 and completion_prefix.strip() != "":
## just send the signal if the tag is no datestamp AND if it is no full tag
if not SpecialCharHelper.is_datestamp(completion_prefix) and not self.__check_in_completion_list(completion_prefix):
if not SpecialCharHelper.is_partial_expiry_tag(self.__expiry_prefix, completion_prefix):
no_completion_found = True
## there are tags (terminated with comma) which are not in the allowed tag_list
if not_allowed_tags_count > 1:
self.emit(QtCore.SIGNAL("no_completion_found"), True)
return
if not_allowed_tags_count > 0:
## in this case the user has entered a not allowed tag and terminated it with a comma to mark it as a tag
## the completion count is 0 because there is nothing after the last comma in the taglist
if completion_count == 0:
self.emit(QtCore.SIGNAL("no_completion_found"), True)
return
## it could be the case, that the user is still typing an allowed tag
## so check, if the completer has a possible completion
## if not -> send the signal
if no_completion_found:
self.emit(QtCore.SIGNAL("no_completion_found"), True)
return
## everytime there is no completion found, emit the signal
elif no_completion_found:
self.emit(QtCore.SIGNAL("no_completion_found"), True)
return
## in this case everything is fine
self.emit(QtCore.SIGNAL("no_completion_found"), False)
def __text_selected(self, text):
self.__activated_text = text
cursor_pos = self.__tag_line.cursorPosition()
before_text = unicode(self.__tag_line.text())[:cursor_pos]
#after_text = unicode(self.__tag_line.text())[cursor_pos:]
prefix_len = len(before_text.split(self.__tag_separator)[-1].strip())
self.__tag_line.setText("%s%s" % (before_text[:cursor_pos - prefix_len], text))
self.__tag_line.setCursorPosition(cursor_pos - prefix_len + len(text) + 2)
def __text_activated(self, text):
"""
a suggestion has been choosen by the user
"""
self.__text_selected(text)
self.emit(QtCore.SIGNAL("activated"))
def get_tag_list(self):
tag_string = unicode(self.__tag_line.text())
result = set([])
tag_list = tag_string.split(self.__tag_separator)
for tag in tag_list:
strip_tag = tag.strip()
if strip_tag != "":
result.add(strip_tag)
# if the datestamp is hidden, add it manually to the taglist
if self.__datestamp_hidden:
result.add(self.__datestamp)
return result
def get_tag_line(self):
return self.__tag_line
def get_completer(self):
return self.__completer
def set_enabled(self, enable):
self.__tag_line.setEnabled(enable)
def set_text(self, text):
self.__tag_line.setText(text)
def set_tag_completion_list(self, tag_list):
self.__tag_list = tag_list
self.__completer.setModel(QtGui.QStringListModel(QtCore.QStringList(tag_list)))
def get_tag_completion_list(self):
return self.__tag_list
def is_empty(self):
if self.__tag_line.text() == "":
return True
else:
return False
def set_place_holder_text(self, text):
self.__tag_line.setPlaceholderText(text)
#def set_size(self, qrect):
# self.setGeometry(qrect)
# self.__tag_line.setGeometry(qrect)
'''
class TsListWidget(QtGui.QListWidget):
def __init__(self, parent=None):
super(TsListWidget, self).__init__(parent)
def keyPressEvent(self, event):
## throw a custom signal, when enter (on the keypad) or return has been hit
key = event.key()
if key == QtCore.Qt.Key_Return or key == QtCore.Qt.Key_Enter:
self.emit(QtCore.SIGNAL("return_pressed"))
## pass the signal to the normal parent chain
QtGui.QListWidget.keyPressEvent(self, event)
class ComboboxCompleter(QtGui.QWidget):
__pyqtSignals__ = ("text_edited(QString)",
"completion_activated(QString)",
"preference_activated(QString)")
def __init__(self, parent=None):
"""
Constructor
"""
QtGui.QWidget.__init__(self, parent)
self.__combobox = QtGui.QComboBox(parent)
self.__combobox.connect(self.__combobox, QtCore.SIGNAL("activated(QString)"), self, QtCore.SIGNAL("preference_activated(QString)"))
self.__lineedit = QtGui.QLineEdit(parent)
self.__lineedit.setStyleSheet("QLineEdit {border: none}")
self.__completer = QtGui.QCompleter()
self.__completer.setWidget(self.__lineedit)
self.__completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.__completer.connect(self.__completer, QtCore.SIGNAL("activated(QString)"), self, QtCore.SIGNAL("completion_activated(QString)"))
self.__lineedit.connect(self.__lineedit, QtCore.SIGNAL("textEdited(QString)"), self, QtCore.SIGNAL("text_edited(QString)"))
QtCore.QMetaObject.connectSlotsByName(parent)
def set_geometry(self, q_rect):
"""
sets the controls geometry property: position, height, width
"""
self.__combobox.setGeometry(q_rect)
self.__lineedit.setGeometry(QtCore.QRect(q_rect.left()+1, q_rect.top()+1, q_rect.width()-20, q_rect.height()-2))
def show(self):
"""
sets the control visible
"""
self.__combobox.show()
self.__lineedit.show()
def hide(self):
"""
set the control invisible
"""
self.__combobox.hide()
self.__lineedit.hide()
def set_enabled(self, enabled=True):
"""
enables/disabled the control
"""
self.__combobox.setEnabled(enabled)
self.__lineedit.setEnabled(enabled)
def set_preferences(self, list):
"""
sets the controls dropdown (combobox) list
"""
self.__combobox.clear()
self.__combobox.addItems(QtCore.QStringList(list))
def set_lookup_list(self, list):
"""
sets the controls lookup list (completer)
"""
self.__completer.setModel(QtGui.QStringListModel(QtCore.QStringList(list)))
'''
## end
| gpl-3.0 | 8,625,634,577,607,266,000 | 39.684211 | 141 | 0.604728 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.0/Lib/distutils/sysconfig.py | 1 | 20130 | """Provide access to Python's configuration information. The specific
configuration variables available depend heavily on the platform and
configuration. The values may be retrieved using
get_config_var(name), and the list of variables is available via
get_config_vars().keys(). Additional convenience functions are also
available.
Written by: Fred L. Drake, Jr.
Email: <[email protected]>
"""
__revision__ = "$Id: sysconfig.py 65860 2008-08-19 17:56:33Z antoine.pitrou $"
import io
import os
import re
import sys
from .errors import DistutilsPlatformError
# These are needed in a couple of spots, so just compute them once.
PREFIX = os.path.normpath(sys.prefix)
EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
# Path to the base directory of the project. On Windows the binary may
# live in project/PCBuild9. If we're dealing with an x64 Windows build,
# it'll live in project/PCbuild/amd64.
project_base = os.path.dirname(os.path.abspath(sys.executable))
if os.name == "nt" and "pcbuild" in project_base[-8:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in project_base[-10:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir,
os.path.pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in project_base[-14:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir,
os.path.pardir))
# python_build: (Boolean) if true, we're either building Python or
# building an extension with an un-installed Python, so we use
# different (hard-wired) directories.
# Setup.local is available for Makefile builds including VPATH builds,
# Setup.dist is available on Windows
def _python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(project_base, "Modules", fn)):
return True
return False
python_build = _python_build()
def get_python_version():
"""Return a string containing the major and minor Python version,
leaving off the patchlevel. Sample return values could be '1.5'
or '2.2'.
"""
return sys.version[:3]
def get_python_inc(plat_specific=0, prefix=None):
"""Return the directory containing installed Python header files.
If 'plat_specific' is false (the default), this is the path to the
non-platform-specific header files, i.e. Python.h and so on;
otherwise, this is the path to platform-specific header files
(namely pyconfig.h).
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
if python_build:
base = os.path.dirname(os.path.abspath(sys.executable))
if plat_specific:
inc_dir = base
else:
inc_dir = os.path.join(base, "Include")
if not os.path.exists(inc_dir):
inc_dir = os.path.join(os.path.dirname(base), "Include")
return inc_dir
return os.path.join(prefix, "include", "python" + get_python_version())
elif os.name == "nt":
return os.path.join(prefix, "include")
elif os.name == "mac":
if plat_specific:
return os.path.join(prefix, "Mac", "Include")
else:
return os.path.join(prefix, "Include")
elif os.name == "os2":
return os.path.join(prefix, "Include")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its C header files "
"on platform '%s'" % os.name)
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
"""Return the directory containing the Python library (standard or
site additions).
If 'plat_specific' is true, return the directory containing
platform-specific modules, i.e. any module from a non-pure-Python
module distribution; otherwise, return the platform-shared library
directory. If 'standard_lib' is true, return the directory
containing standard Python library modules; otherwise, return the
directory for site-specific modules.
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
libpython = os.path.join(prefix,
"lib", "python" + get_python_version())
if standard_lib:
return libpython
else:
return os.path.join(libpython, "site-packages")
elif os.name == "nt":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
if get_python_version() < "2.2":
return prefix
else:
return os.path.join(PREFIX, "Lib", "site-packages")
elif os.name == "mac":
if plat_specific:
if standard_lib:
return os.path.join(prefix, "Lib", "lib-dynload")
else:
return os.path.join(prefix, "Lib", "site-packages")
else:
if standard_lib:
return os.path.join(prefix, "Lib")
else:
return os.path.join(prefix, "Lib", "site-packages")
elif os.name == "os2":
if standard_lib:
return os.path.join(PREFIX, "Lib")
else:
return os.path.join(PREFIX, "Lib", "site-packages")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its library "
"on platform '%s'" % os.name)
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
Mainly needed on Unix, so we can plug in the information that
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
(cc, cxx, opt, cflags, ccshared, ldshared, so_ext) = \
get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SO')
if 'CC' in os.environ:
cc = os.environ['CC']
if 'CXX' in os.environ:
cxx = os.environ['CXX']
if 'LDSHARED' in os.environ:
ldshared = os.environ['LDSHARED']
if 'CPP' in os.environ:
cpp = os.environ['CPP']
else:
cpp = cc + " -E" # not always
if 'LDFLAGS' in os.environ:
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if 'CFLAGS' in os.environ:
cflags = opt + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if 'CPPFLAGS' in os.environ:
cpp = cpp + ' ' + os.environ['CPPFLAGS']
cflags = cflags + ' ' + os.environ['CPPFLAGS']
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
cc_cmd = cc + ' ' + cflags
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc)
compiler.shared_lib_extension = so_ext
def get_config_h_filename():
"""Return full pathname of installed pyconfig.h file."""
if python_build:
if os.name == "nt":
inc_dir = os.path.join(project_base, "PC")
else:
inc_dir = project_base
else:
inc_dir = get_python_inc(plat_specific=1)
if get_python_version() < '2.2':
config_h = 'config.h'
else:
# The name of the config.h file changed in 2.2
config_h = 'pyconfig.h'
return os.path.join(inc_dir, config_h)
def get_makefile_filename():
"""Return full pathname of installed Makefile from the Python build."""
if python_build:
return os.path.join(os.path.dirname(sys.executable), "Makefile")
lib_dir = get_python_lib(plat_specific=1, standard_lib=1)
return os.path.join(lib_dir, "config", "Makefile")
def parse_config_h(fp, g=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if g is None:
g = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
#
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
g[n] = v
else:
m = undef_rx.match(line)
if m:
g[m.group(1)] = 0
return g
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
def parse_makefile(fn, g=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
from distutils.text_file import TextFile
fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1)
if g is None:
g = {}
done = {}
notdone = {}
while True:
line = fp.readline()
if line is None: # eof
break
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
if "$" in v:
notdone[n] = v
else:
try: v = int(v)
except ValueError: pass
done[n] = v
# do variable interpolation here
while notdone:
for name in list(notdone):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
fp.close()
# save the results in the global dictionary
g.update(done)
return g
def expand_makefile_vars(s, vars):
"""Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
'string' according to 'vars' (a dictionary mapping variable names to
values). Variables not present in 'vars' are silently expanded to the
empty string. The variable values in 'vars' should not contain further
variable expansions; if 'vars' is the output of 'parse_makefile()',
you're fine. Returns a variable-expanded version of 's'.
"""
# This algorithm does multiple expansion, so if vars['foo'] contains
# "${bar}", it will expand ${foo} to ${bar}, and then expand
# ${bar}... and so forth. This is fine as long as 'vars' comes from
# 'parse_makefile()', which takes care of such expansions eagerly,
# according to make's variable expansion semantics.
while True:
m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
if m:
(beg, end) = m.span()
s = s[0:beg] + vars.get(m.group(1)) + s[end:]
else:
break
return s
_config_vars = None
def _init_posix():
"""Initialize the module as appropriate for POSIX systems."""
g = {}
# load the installed Makefile:
try:
filename = get_makefile_filename()
parse_makefile(filename, g)
except IOError as msg:
my_msg = "invalid Python installation: unable to open %s" % filename
if hasattr(msg, "strerror"):
my_msg = my_msg + " (%s)" % msg.strerror
raise DistutilsPlatformError(my_msg)
# load the installed pyconfig.h:
try:
filename = get_config_h_filename()
parse_config_h(io.open(filename), g)
except IOError as msg:
my_msg = "invalid Python installation: unable to open %s" % filename
if hasattr(msg, "strerror"):
my_msg = my_msg + " (%s)" % msg.strerror
raise DistutilsPlatformError(my_msg)
# On MacOSX we need to check the setting of the environment variable
# MACOSX_DEPLOYMENT_TARGET: configure bases some choices on it so
# it needs to be compatible.
# If it isn't set we set it to the configure-time value
if sys.platform == 'darwin' and 'MACOSX_DEPLOYMENT_TARGET' in g:
cfg_target = g['MACOSX_DEPLOYMENT_TARGET']
cur_target = os.getenv('MACOSX_DEPLOYMENT_TARGET', '')
if cur_target == '':
cur_target = cfg_target
os.putenv('MACOSX_DEPLOYMENT_TARGET', cfg_target)
elif [int(x) for x in cfg_target.split('.')] > [int(x) for x in cur_target.split('.')]:
my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: now "%s" but "%s" during configure'
% (cur_target, cfg_target))
raise DistutilsPlatformError(my_msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if python_build:
g['LDSHARED'] = g['BLDSHARED']
elif get_python_version() < '2.1':
# The following two branches are for 1.5.2 compatibility.
if sys.platform == 'aix4': # what about AIX 3.x ?
# Linker script is in the config directory, not in Modules as the
# Makefile says.
python_lib = get_python_lib(standard_lib=1)
ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
python_exp = os.path.join(python_lib, 'config', 'python.exp')
g['LDSHARED'] = "%s %s -bI:%s" % (ld_so_aix, g['CC'], python_exp)
global _config_vars
_config_vars = g
def _init_nt():
"""Initialize the module as appropriate for NT"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['SO'] = '.pyd'
g['EXE'] = ".exe"
g['VERSION'] = get_python_version().replace(".", "")
g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable))
global _config_vars
_config_vars = g
def _init_mac():
"""Initialize the module as appropriate for Macintosh systems"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
import MacOS
if not hasattr(MacOS, 'runtimemodel'):
g['SO'] = '.ppc.slb'
else:
g['SO'] = '.%s.slb' % MacOS.runtimemodel
# XXX are these used anywhere?
g['install_lib'] = os.path.join(EXEC_PREFIX, "Lib")
g['install_platlib'] = os.path.join(EXEC_PREFIX, "Mac", "Lib")
# These are used by the extension module build
g['srcdir'] = ':'
global _config_vars
_config_vars = g
def _init_os2():
"""Initialize the module as appropriate for OS/2"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['SO'] = '.pyd'
g['EXE'] = ".exe"
global _config_vars
_config_vars = g
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform. Generally this includes
everything needed to build extensions and install both pure modules and
extensions. On Unix, this means every variable defined in Python's
installed Makefile; on Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _config_vars
if _config_vars is None:
func = globals().get("_init_" + os.name)
if func:
func()
else:
_config_vars = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_config_vars['prefix'] = PREFIX
_config_vars['exec_prefix'] = EXEC_PREFIX
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _config_vars[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_config_vars[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _config_vars[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_config_vars[key] = flags
if args:
vals = []
for name in args:
vals.append(_config_vars.get(name))
return vals
else:
return _config_vars
def get_config_var(name):
"""Return the value of a single variable using the dictionary
returned by 'get_config_vars()'. Equivalent to
get_config_vars().get(name)
"""
return get_config_vars().get(name)
| mit | 3,387,479,919,306,659,000 | 35.467391 | 95 | 0.572827 | false |
fgirault/smeuhsocial | apps/threadedcomments/tests/test_views.py | 1 | 26436 | from django.core.urlresolvers import reverse
from django.test import TestCase
from json import loads
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from threadedcomments.models import FreeThreadedComment, ThreadedComment
from threadedcomments.models import TestModel
__all__ = ("ViewsTestCase",)
class ViewsTestCase(TestCase):
def test_freecomment_create(self):
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
url = reverse('tc_free_comment', kwargs={
'content_type': content_type.id,
'object_id': topic.id
})
self.client.post(url, {
'comment': 'test1',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]',
'next': '/'
})
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test1',
'name': u'eric',
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def test_freecomment_preview(self):
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
url = reverse('tc_free_comment', kwargs={
'content_type': content_type.id,
'object_id': topic.id
})
response = self.client.post(url, {
'comment': 'test1',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]',
'next': '/',
'preview': 'True'
})
self.assertEquals(len(response.content) > 0, True)
def test_freecomment_edit(self):
topic = TestModel.objects.create(name="Test2")
comment = FreeThreadedComment.objects.create_for_object(
topic,
ip_address='127.0.0.1',
comment="My test free comment!",
)
url = reverse('tc_free_comment_edit', kwargs={
'edit_id': comment.pk
})
self.client.post(url, {
'comment': 'test1_edited',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]',
'next': '/'
})
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test1_edited',
'name': u'eric',
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def test_freecomment_edit_with_preview(self):
topic = TestModel.objects.create(name="Test2")
comment = FreeThreadedComment.objects.create_for_object(
topic,
website="http://oebfare.com/",
comment="My test free comment!",
ip_address='127.0.0.1',
)
url = reverse('tc_free_comment_edit', kwargs={
'edit_id': comment.pk
})
response = self.client.post(url, {
'comment': 'test1_edited',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]',
'next': '/',
'preview': 'True'
})
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://oebfare.com/',
'comment': u'My test free comment!',
'name': u'',
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'',
'is_approved': False
})
self.assertEquals(len(response.content) > 0, True)
def test_freecomment_json_create(self):
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
url = reverse('tc_free_comment_ajax', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'ajax': 'json'
})
response = self.client.post(url, {
'comment': 'test2',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]'
})
tmp = loads(response.content)
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test2',
'name': u'eric',
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def test_freecomment_json_edit(self):
topic = TestModel.objects.create(name="Test2")
comment = FreeThreadedComment.objects.create_for_object(
topic,
ip_address='127.0.0.1',
comment="My test free comment!",
)
url = reverse('tc_free_comment_edit_ajax', kwargs={
'edit_id': comment.pk,
'ajax': 'json'
})
response = self.client.post(url, {
'comment': 'test2_edited',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]'
})
loads(response.content)
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test2_edited',
'name': u'eric',
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def test_freecomment_xml_create(self):
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
url = reverse('tc_free_comment_ajax', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'ajax': 'xml'
})
response = self.client.post(url, {
'comment': 'test3', 'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]', 'next': '/'
})
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test3',
'name': u'eric',
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def test_freecomment_xml_edit(self):
topic = TestModel.objects.create(name="Test2")
comment = FreeThreadedComment.objects.create_for_object(
topic,
ip_address='127.0.0.1',
comment="My test free comment!",
)
url = reverse('tc_free_comment_edit_ajax', kwargs={
'edit_id': comment.pk,
'ajax': 'xml'
})
self.client.post(url, {
'comment': 'test2_edited',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]'
})
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test2_edited',
'name': u'eric',
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def test_freecomment_child_create(self):
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
parent = FreeThreadedComment.objects.create_for_object(
topic,
ip_address='127.0.0.1',
comment="My test free comment!",
)
url = reverse('tc_free_comment_parent', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'parent_id': parent.id
})
self.client.post(url, {
'comment': 'test4',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]',
'next': '/'
})
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test4',
'name': u'eric',
'parent': parent,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def test_freecomment_child_json_create(self):
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
parent = FreeThreadedComment.objects.create_for_object(
topic,
ip_address='127.0.0.1',
comment="My test free comment!",
)
url = reverse('tc_free_comment_parent_ajax', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'parent_id': parent.id,
'ajax': 'json'
})
self.client.post(url, {
'comment': 'test5',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]'
})
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test5',
'name': u'eric',
'parent': parent,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def test_freecomment_child_xml_create(self):
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
parent = FreeThreadedComment.objects.create_for_object(
topic,
ip_address='127.0.0.1',
comment="My test free comment!",
)
url = reverse('tc_free_comment_parent_ajax', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'parent_id': parent.id,
'ajax': 'xml'
})
self.client.post(url, {
'comment': 'test6', 'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]'
})
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test6',
'name': u'eric',
'parent': parent,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def create_user_and_login(self):
user = User.objects.create_user(
'testuser',
'[email protected]',
'password',
)
user.is_active = True
user.save()
self.client.login(username='testuser', password='password')
return user
def test_comment_create(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
url = reverse('tc_comment', kwargs={
'content_type': content_type.id,
'object_id': topic.id
})
self.client.post(url, {
'comment': 'test7',
'next': '/'
})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test7',
'is_approved': False,
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_preview(self):
self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
url = reverse('tc_comment', kwargs={
'content_type': content_type.id,
'object_id': topic.id
})
response = self.client.post(url, {
'comment': 'test7',
'next': '/',
'preview': 'True'
})
self.assertEquals(len(response.content) > 0, True)
def test_comment_edit(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
comment = ThreadedComment.objects.create_for_object(
topic,
user=user,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
url = reverse('tc_comment_edit', kwargs={
'edit_id': comment.pk,
})
self.client.post(url, {
'comment': 'test7_edited',
'next': '/',
})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test7_edited',
'is_approved': False,
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_edit_with_preview(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
comment = ThreadedComment.objects.create_for_object(
topic,
user=user,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
url = reverse('tc_comment_edit', kwargs={
'edit_id': comment.pk,
})
response = self.client.post(url, {
'comment': 'test7_edited',
'next': '/',
'preview': 'True'
})
self.assertEquals(len(response.content) > 0, True)
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'My test comment!',
'is_approved': False,
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_json_create(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
url = reverse('tc_comment_ajax', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'ajax': 'json'
})
self.client.post(url, {'comment': 'test8'})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test8',
'is_approved': False,
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_json_edit(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
comment = ThreadedComment.objects.create_for_object(
topic,
user=user,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
url = reverse('tc_comment_edit_ajax', kwargs={
'edit_id': comment.pk,
'ajax': 'json',
})
response = self.client.post(url, {
'comment': 'test8_edited'
})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test8_edited',
'is_approved': False,
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_xml_create(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
url = reverse('tc_comment_ajax', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'ajax': 'xml'
})
self.client.post(url, {'comment': 'test9'})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test9',
'is_approved': False,
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_xml_edit(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
comment = ThreadedComment.objects.create_for_object(
topic,
user=user,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
url = reverse('tc_comment_edit_ajax', kwargs={
'edit_id': comment.pk,
'ajax': 'xml',
})
self.client.post(url, {'comment': 'test8_edited'})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test8_edited',
'is_approved': False,
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_child_create(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
parent = ThreadedComment.objects.create_for_object(
topic,
user=user,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
url = reverse('tc_comment_parent', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'parent_id': parent.id
})
self.client.post(url, {
'comment': 'test10',
'next': '/'
})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test10',
'is_approved': False,
'parent': parent,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_child_json_create(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
parent = ThreadedComment.objects.create_for_object(
topic,
user=user,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
url = reverse('tc_comment_parent_ajax', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'parent_id': parent.id,
'ajax': 'json'
})
self.client.post(url, {'comment': 'test11'})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test11',
'is_approved': False,
'parent': parent,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_child_xml_create(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
parent = ThreadedComment.objects.create_for_object(
topic,
user=user,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
url = reverse('tc_comment_parent_ajax', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'parent_id': parent.id,
'ajax': 'xml'
})
self.client.post(url, {'comment': 'test12'})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test12',
'is_approved': False,
'parent': parent,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_freecomment_delete(self):
user = User.objects.create_user(
'testuser',
'[email protected]',
'password',
)
user.is_active = True
user.save()
self.client.login(username='testuser', password='password')
topic = TestModel.objects.create(name="Test2")
comment = FreeThreadedComment.objects.create_for_object(
topic,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
deleted_id = comment.id
url = reverse('tc_free_comment_delete', kwargs={
'object_id': comment.id,
})
response = self.client.post(url, {'next': '/'})
o = response['Location'].split(
'?')[-1] == 'next=/comments/freecomment/%d/delete/' % deleted_id
self.assertEquals(o, True)
# become super user and try deleting comment
user.is_superuser = True
user.save()
response = self.client.post(url, {'next': '/'})
self.assertEquals(response['Location'], 'http://testserver/')
self.assertRaises(
FreeThreadedComment.DoesNotExist,
lambda: FreeThreadedComment.objects.get(id=deleted_id)
)
# re-create comment
comment.save()
response = self.client.get(url, {'next': '/'})
self.assertEquals(len(response.content) > 0, True)
o = FreeThreadedComment.objects.get(id=deleted_id) != None
self.assertEquals(o, True)
def test_comment_delete(self):
some_other_guy = User.objects.create_user(
'some_other_guy',
'[email protected]',
'password1',
)
user = User.objects.create_user(
'testuser',
'[email protected]',
'password',
)
user.is_active = True
user.save()
self.client.login(username='testuser', password='password')
topic = TestModel.objects.create(name="Test2")
comment = ThreadedComment.objects.create_for_object(
topic,
user=some_other_guy,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
deleted_id = comment.id
url = reverse('tc_comment_delete', kwargs={
'object_id': comment.id,
})
response = self.client.post(url, {'next': '/'})
self.assertEquals(response['Location'].split(
'?')[-1], 'next=/comments/comment/%s/delete/' % deleted_id)
user.is_superuser = True
user.save()
response = self.client.post(url, {'next': '/'})
self.assertEquals(response['Location'], 'http://testserver/')
self.assertRaises(
ThreadedComment.DoesNotExist,
lambda: ThreadedComment.objects.get(id=deleted_id)
)
# re-create comment
comment.save()
response = self.client.get(url, {'next': '/'})
self.assertEquals(len(response.content) > 0, True)
o = ThreadedComment.objects.get(id=deleted_id) != None
self.assertEquals(o, True)
| mit | 5,914,407,054,020,151,000 | 30.174528 | 76 | 0.511235 | false |
delfick/harpoon | harpoon/helpers.py | 1 | 2085 | from contextlib import contextmanager
from io import StringIO
import tempfile
import logging
import time
import os
log = logging.getLogger("harpoon.helpers")
@contextmanager
def a_temp_file():
"""Yield the name of a temporary file and ensure it's removed after use"""
filename = None
try:
tmpfile = tempfile.NamedTemporaryFile(delete=False)
filename = tmpfile.name
yield tmpfile
finally:
if filename and os.path.exists(filename):
os.remove(filename)
def until(timeout=10, step=0.5, action=None, silent=False):
"""Yield until timeout"""
yield
started = time.time()
while True:
if action and not silent:
log.info(action)
if time.time() - started > timeout:
if action and not silent:
log.error("Timedout %s", action)
return
else:
time.sleep(step)
yield
class memoized_property(object):
"""Decorator to make a descriptor that memoizes it's value"""
def __init__(self, func):
self.func = func
self.name = func.__name__
self.cache_name = "_{0}".format(self.name)
def __get__(self, instance=None, owner=None):
if not instance:
return self
if not getattr(instance, self.cache_name, None):
setattr(instance, self.cache_name, self.func(instance))
return getattr(instance, self.cache_name)
def __set__(self, instance, value):
setattr(instance, self.cache_name, value)
def __delete__(self, instance):
if hasattr(instance, self.cache_name):
delattr(instance, self.cache_name)
def write_to(output, txt):
"""Write some text to some output"""
if isinstance(txt, bytes) and isinstance(output, StringIO):
output.write(txt.decode("utf-8", "replace"))
elif (
isinstance(txt, str)
and hasattr(output, "file")
and "b" in getattr(output.file, "mode", "w")
):
output.write(txt.encode("utf-8", "replace"))
else:
output.write(txt)
| mit | -888,619,971,368,138,900 | 26.077922 | 78 | 0.606235 | false |
memsharded/conan | conans/test/integration/conanfile_errors_test.py | 1 | 5092 | import unittest
from conans.test.utils.tools import TestClient
class ConanfileErrorsTest(unittest.TestCase):
def copy_error_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def package(self):
self.copy2("*.h", dst="include", src=["include","platform"])
'''
files = {"conanfile.py": conanfile, "test.txt": "Hello world"}
client.save(files)
client.run("export . lasote/stable")
client.run("install Hello/0.1@lasote/stable --build", assert_error=True)
self.assertIn("Hello/0.1@lasote/stable: Error in package() method, line 9",
client.user_io.out)
self.assertIn('self.copy2("*.h", dst="include", src=["include","platform"]',
client.user_io.out)
self.assertIn("'HelloConan' object has no attribute 'copy2'",
client.user_io.out)
def copy_error2_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def package(self):
self.copy("*.h", dst="include", src=["include","platform"])
'''
files = {"conanfile.py": conanfile, "test.txt": "Hello world"}
client.save(files)
client.run("export . lasote/stable")
client.run("install Hello/0.1@lasote/stable --build", assert_error=True)
self.assertIn("Hello/0.1@lasote/stable: Error in package() method, line 9",
client.user_io.out)
self.assertIn('self.copy("*.h", dst="include", src=["include","platform"]',
client.user_io.out)
# It results that the error is different in different Python2/3 and OSs
# self.assertIn("'list' object has no attribute 'replace'", client.user_io.out)
def package_info_error_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def package_info(self):
self.copy2()
'''
files = {"conanfile.py": conanfile, "test.txt": "Hello world"}
client.save(files)
client.run("export . lasote/stable")
client.run("install Hello/0.1@lasote/stable --build", assert_error=True)
self.assertIn("Hello/0.1@lasote/stable: Error in package_info() method, line 9",
client.user_io.out)
self.assertIn('self.copy2()',
client.user_io.out)
self.assertIn("'HelloConan' object has no attribute 'copy2'",
client.user_io.out)
def config_error_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def configure(self):
self.copy2()
'''
files = {"conanfile.py": conanfile, "test.txt": "Hello world"}
client.save(files)
client.run("export . lasote/stable")
client.run("install Hello/0.1@lasote/stable --build", assert_error=True)
self.assertIn("""ERROR: Hello/0.1@lasote/stable: Error in configure() method, line 9
self.copy2()
AttributeError: 'HelloConan' object has no attribute 'copy2'""", client.user_io.out)
def source_error_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def source(self):
self.copy2()
'''
files = {"conanfile.py": conanfile, "test.txt": "Hello world"}
client.save(files)
client.run("export . lasote/stable")
client.run("install Hello/0.1@lasote/stable --build", assert_error=True)
self.assertIn("Hello/0.1@lasote/stable: Error in source() method, line 9",
client.user_io.out)
self.assertIn('self.copy2()',
client.user_io.out)
self.assertIn("'HelloConan' object has no attribute 'copy2'",
client.user_io.out)
def duplicate_requires_test(self):
client = TestClient()
conanfile = '''
[requires]
foo/0.1@user/testing
foo/0.2@user/testing
'''
files = {"conanfile.txt": conanfile}
client.save(files)
client.run("install . --build", assert_error=True)
self.assertIn("ERROR: Duplicated requirement", client.user_io.out)
def duplicate_requires_py_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
requires = "foo/0.1@user/testing", "foo/0.2@user/testing"
'''
files = {"conanfile.py": conanfile}
client.save(files)
client.run("install . --build", assert_error=True)
self.assertIn("Error while initializing requirements. Duplicated requirement",
client.user_io.out)
| mit | -8,369,451,500,046,241,000 | 33.639456 | 92 | 0.597211 | false |
simbha/GAE-appswell | appspot/framework/lib/error_handling.py | 1 | 1373 | """
Appswell Error Handling Lib
functions for handling errors
USAGE
from lib import error_handling
error_details = error_handling.get_error_details()
error_page = error_handling.render_error_page(error_details)
"""
#
# IMPORTS
#
import sys, os, logging, inspect
from os.path import (abspath, dirname, join as pathjoin)
import traceback
VIEW_DIR = abspath(pathjoin( dirname(__file__), '../views' ))
LAYOUT_DIR = pathjoin( VIEW_DIR, 'layouts' )
VIEW_PATH = pathjoin( VIEW_DIR, 'error/default.mako' )
def get_error_details():
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
detail = {
'error_type' : exceptionValue,
'tracelist' : traceback.extract_tb(exceptionTraceback),
'trace' : traceback.format_exc(),
'syspath' : sys.path
}
return detail
def render_error_page(detail):
from framework.vendor.mako.template import Template
from framework.vendor.mako.lookup import TemplateLookup
# create mako objects and render
mako_lookup = TemplateLookup( directories=[LAYOUT_DIR],
output_encoding='utf-8',
encoding_errors='replace' )
mako_template = Template(filename=VIEW_PATH, lookup=mako_lookup)
return mako_template.render_unicode(**detail).encode('utf-8', 'replace')
| mit | 4,580,432,256,818,936,000 | 29.511111 | 76 | 0.654042 | false |
edgedb/edgedb | edb/schema/schema.py | 1 | 63607 | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import abc
import collections
import functools
import itertools
import immutables as immu
from edb import errors
from . import casts as s_casts
from . import functions as s_func
from . import migrations as s_migrations
from . import modules as s_mod
from . import name as sn
from . import objects as so
from . import operators as s_oper
from . import pseudo as s_pseudo
from . import types as s_types
if TYPE_CHECKING:
import uuid
from edb.common import parsing
Refs_T = immu.Map[
uuid.UUID,
immu.Map[
Tuple[Type[so.Object], str],
immu.Map[uuid.UUID, None],
],
]
STD_MODULES = (
sn.UnqualName('std'),
sn.UnqualName('schema'),
sn.UnqualName('math'),
sn.UnqualName('sys'),
sn.UnqualName('cfg'),
sn.UnqualName('cal'),
sn.UnqualName('stdgraphql'),
)
# Specifies the order of processing of files and directories in lib/
STD_SOURCES = (
sn.UnqualName('std'),
sn.UnqualName('schema'),
sn.UnqualName('math'),
sn.UnqualName('sys'),
sn.UnqualName('cfg'),
sn.UnqualName('cal'),
sn.UnqualName('ext'),
)
Schema_T = TypeVar('Schema_T', bound='Schema')
class Schema(abc.ABC):
@abc.abstractmethod
def add_raw(
self: Schema_T,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def add(
self: Schema_T,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def discard(self: Schema_T, obj: so.Object) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def delete(self: Schema_T, obj: so.Object) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def update_obj(
self: Schema_T,
obj: so.Object,
updates: Mapping[str, Any],
) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def maybe_get_obj_data_raw(
self,
obj: so.Object,
) -> Optional[Tuple[Any, ...]]:
raise NotImplementedError
@abc.abstractmethod
def get_obj_data_raw(
self,
obj: so.Object,
) -> Tuple[Any, ...]:
raise NotImplementedError
@abc.abstractmethod
def set_obj_field(
self: Schema_T,
obj: so.Object,
field: str,
value: Any,
) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def unset_obj_field(
self: Schema_T,
obj: so.Object,
field: str,
) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def get_functions(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_func.Function, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_func.Function, ...]:
raise NotImplementedError
@abc.abstractmethod
def get_operators(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_oper.Operator, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_oper.Operator, ...]:
raise NotImplementedError
@abc.abstractmethod
def get_casts_to_type(
self,
to_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
raise NotImplementedError
@abc.abstractmethod
def get_casts_from_type(
self,
from_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
raise NotImplementedError
@overload
def get_referrers(
self,
scls: so.Object,
*,
scls_type: Type[so.Object_T],
field_name: Optional[str] = None,
) -> FrozenSet[so.Object_T]:
...
@overload
def get_referrers( # NoQA: F811
self,
scls: so.Object,
*,
scls_type: None = None,
field_name: Optional[str] = None,
) -> FrozenSet[so.Object]:
...
@abc.abstractmethod
def get_referrers( # NoQA: F811
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
field_name: Optional[str] = None,
) -> FrozenSet[so.Object_T]:
raise NotImplementedError
@abc.abstractmethod
def get_referrers_ex(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
) -> Dict[
Tuple[Type[so.Object_T], str],
FrozenSet[so.Object_T],
]:
raise NotImplementedError
@overload
def get_by_id(
self,
obj_id: uuid.UUID,
default: Union[so.Object, so.NoDefaultT] = so.NoDefault,
*,
type: None = None,
) -> so.Object:
...
@overload
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
*,
type: Optional[Type[so.Object_T]] = None,
) -> so.Object_T:
...
@overload
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: None = None,
*,
type: Optional[Type[so.Object_T]] = None,
) -> Optional[so.Object_T]:
...
@abc.abstractmethod
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT, None] = so.NoDefault,
*,
type: Optional[Type[so.Object_T]] = None,
) -> Optional[so.Object_T]:
raise NotImplementedError
@overload
def get_global(
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
) -> so.Object_T:
...
@overload
def get_global( # NoQA: F811
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: None = None,
) -> Optional[so.Object_T]:
...
@abc.abstractmethod
def get_global( # NoQA: F811
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT, None] = so.NoDefault,
) -> Optional[so.Object_T]:
raise NotImplementedError
@overload
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> so.Object:
...
@overload
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: None,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> Optional[so.Object]:
...
@overload
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
type: Type[so.Object_T],
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> so.Object_T:
...
@overload
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: None,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
type: Type[so.Object_T],
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> Optional[so.Object_T]:
...
@overload
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object, so.NoDefaultT, None] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
type: Optional[Type[so.Object_T]] = None,
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> Optional[so.Object]:
...
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object, so.NoDefaultT, None] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
type: Optional[Type[so.Object_T]] = None,
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> Optional[so.Object]:
return self.get_generic(
name,
default,
module_aliases=module_aliases,
type=type,
condition=condition,
label=label,
sourcectx=sourcectx,
)
@abc.abstractmethod
def get_generic( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object, so.NoDefaultT, None],
*,
module_aliases: Optional[Mapping[Optional[str], str]],
type: Optional[Type[so.Object_T]],
condition: Optional[Callable[[so.Object], bool]],
label: Optional[str],
sourcectx: Optional[parsing.ParserContext],
) -> Optional[so.Object]:
raise NotImplementedError
@abc.abstractmethod
def has_object(self, object_id: uuid.UUID) -> bool:
raise NotImplementedError
@abc.abstractmethod
def has_module(self, module: str) -> bool:
raise NotImplementedError
def get_children(
self,
scls: so.Object_T,
) -> FrozenSet[so.Object_T]:
# Ideally get_referrers needs to be made generic via
# an overload on scls_type, but mypy crashes on that.
return self.get_referrers(
scls,
scls_type=type(scls),
field_name='bases',
)
def get_descendants(
self,
scls: so.Object_T,
) -> FrozenSet[so.Object_T]:
return self.get_referrers(
scls, scls_type=type(scls), field_name='ancestors')
@abc.abstractmethod
def get_objects(
self,
*,
exclude_stdlib: bool = False,
exclude_global: bool = False,
exclude_internal: bool = True,
included_modules: Optional[Iterable[sn.Name]] = None,
excluded_modules: Optional[Iterable[sn.Name]] = None,
included_items: Optional[Iterable[sn.Name]] = None,
excluded_items: Optional[Iterable[sn.Name]] = None,
type: Optional[Type[so.Object_T]] = None,
extra_filters: Iterable[Callable[[Schema, so.Object], bool]] = (),
) -> SchemaIterator[so.Object_T]:
raise NotImplementedError
@abc.abstractmethod
def get_modules(self) -> Tuple[s_mod.Module, ...]:
raise NotImplementedError
@abc.abstractmethod
def get_last_migration(self) -> Optional[s_migrations.Migration]:
raise NotImplementedError
class FlatSchema(Schema):
_id_to_data: immu.Map[uuid.UUID, Tuple[Any, ...]]
_id_to_type: immu.Map[uuid.UUID, str]
_name_to_id: immu.Map[sn.Name, uuid.UUID]
_shortname_to_id: immu.Map[
Tuple[Type[so.Object], sn.Name],
FrozenSet[uuid.UUID],
]
_globalname_to_id: immu.Map[
Tuple[Type[so.Object], sn.Name],
uuid.UUID,
]
_refs_to: Refs_T
_generation: int
def __init__(self) -> None:
self._id_to_data = immu.Map()
self._id_to_type = immu.Map()
self._shortname_to_id = immu.Map()
self._name_to_id = immu.Map()
self._globalname_to_id = immu.Map()
self._refs_to = immu.Map()
self._generation = 0
def _replace(
self,
*,
id_to_data: Optional[immu.Map[uuid.UUID, Tuple[Any, ...]]] = None,
id_to_type: Optional[immu.Map[uuid.UUID, str]] = None,
name_to_id: Optional[immu.Map[sn.Name, uuid.UUID]] = None,
shortname_to_id: Optional[
immu.Map[
Tuple[Type[so.Object], sn.Name],
FrozenSet[uuid.UUID]
]
],
globalname_to_id: Optional[
immu.Map[Tuple[Type[so.Object], sn.Name], uuid.UUID]
],
refs_to: Optional[Refs_T] = None,
) -> FlatSchema:
new = FlatSchema.__new__(FlatSchema)
if id_to_data is None:
new._id_to_data = self._id_to_data
else:
new._id_to_data = id_to_data
if id_to_type is None:
new._id_to_type = self._id_to_type
else:
new._id_to_type = id_to_type
if name_to_id is None:
new._name_to_id = self._name_to_id
else:
new._name_to_id = name_to_id
if shortname_to_id is None:
new._shortname_to_id = self._shortname_to_id
else:
new._shortname_to_id = shortname_to_id
if globalname_to_id is None:
new._globalname_to_id = self._globalname_to_id
else:
new._globalname_to_id = globalname_to_id
if refs_to is None:
new._refs_to = self._refs_to
else:
new._refs_to = refs_to
new._generation = self._generation + 1
return new # type: ignore
def _update_obj_name(
self,
obj_id: uuid.UUID,
sclass: Type[so.Object],
old_name: Optional[sn.Name],
new_name: Optional[sn.Name],
) -> Tuple[
immu.Map[sn.Name, uuid.UUID],
immu.Map[Tuple[Type[so.Object], sn.Name], FrozenSet[uuid.UUID]],
immu.Map[Tuple[Type[so.Object], sn.Name], uuid.UUID],
]:
name_to_id = self._name_to_id
shortname_to_id = self._shortname_to_id
globalname_to_id = self._globalname_to_id
is_global = not issubclass(sclass, so.QualifiedObject)
has_sn_cache = issubclass(sclass, (s_func.Function, s_oper.Operator))
if old_name is not None:
if is_global:
globalname_to_id = globalname_to_id.delete((sclass, old_name))
else:
name_to_id = name_to_id.delete(old_name)
if has_sn_cache:
old_shortname = sn.shortname_from_fullname(old_name)
sn_key = (sclass, old_shortname)
new_ids = shortname_to_id[sn_key] - {obj_id}
if new_ids:
shortname_to_id = shortname_to_id.set(sn_key, new_ids)
else:
shortname_to_id = shortname_to_id.delete(sn_key)
if new_name is not None:
if is_global:
key = (sclass, new_name)
if key in globalname_to_id:
vn = sclass.get_verbosename_static(new_name)
raise errors.SchemaError(
f'{vn} is already present in the schema')
globalname_to_id = globalname_to_id.set(key, obj_id)
else:
assert isinstance(new_name, sn.QualName)
if (
not self.has_module(new_name.module)
and new_name.module != '__derived__'
):
raise errors.UnknownModuleError(
f'module {new_name.module!r} is not in this schema')
if new_name in name_to_id:
raise errors.SchemaError(
f'name {new_name!r} is already in the schema')
name_to_id = name_to_id.set(new_name, obj_id)
if has_sn_cache:
new_shortname = sn.shortname_from_fullname(new_name)
sn_key = (sclass, new_shortname)
try:
ids = shortname_to_id[sn_key]
except KeyError:
ids = frozenset()
shortname_to_id = shortname_to_id.set(sn_key, ids | {obj_id})
return name_to_id, shortname_to_id, globalname_to_id
def update_obj(
self,
obj: so.Object,
updates: Mapping[str, Any],
) -> FlatSchema:
if not updates:
return self
obj_id = obj.id
sclass = type(obj)
all_fields = sclass.get_schema_fields()
object_ref_fields = sclass.get_object_reference_fields()
reducible_fields = sclass.get_reducible_fields()
try:
data = list(self._id_to_data[obj_id])
except KeyError:
data = [None] * len(all_fields)
name_to_id = None
shortname_to_id = None
globalname_to_id = None
orig_refs = {}
new_refs = {}
for fieldname, value in updates.items():
field = all_fields[fieldname]
findex = field.index
if fieldname == 'name':
name_to_id, shortname_to_id, globalname_to_id = (
self._update_obj_name(
obj_id,
sclass,
data[findex],
value
)
)
if value is None:
if field in reducible_fields and field in object_ref_fields:
orig_value = data[findex]
if orig_value is not None:
orig_refs[fieldname] = (
field.type.schema_refs_from_data(orig_value))
else:
if field in reducible_fields:
value = value.schema_reduce()
if field in object_ref_fields:
new_refs[fieldname] = (
field.type.schema_refs_from_data(value))
orig_value = data[findex]
if orig_value is not None:
orig_refs[fieldname] = (
field.type.schema_refs_from_data(orig_value))
data[findex] = value
id_to_data = self._id_to_data.set(obj_id, tuple(data))
refs_to = self._update_refs_to(obj_id, sclass, orig_refs, new_refs)
return self._replace(name_to_id=name_to_id,
shortname_to_id=shortname_to_id,
globalname_to_id=globalname_to_id,
id_to_data=id_to_data,
refs_to=refs_to)
def maybe_get_obj_data_raw(
self,
obj: so.Object,
) -> Optional[Tuple[Any, ...]]:
return self._id_to_data.get(obj.id)
def get_obj_data_raw(
self,
obj: so.Object,
) -> Tuple[Any, ...]:
try:
return self._id_to_data[obj.id]
except KeyError:
err = (f'cannot get item data: item {str(obj.id)!r} '
f'is not present in the schema {self!r}')
raise errors.SchemaError(err) from None
def set_obj_field(
self,
obj: so.Object,
fieldname: str,
value: Any,
) -> FlatSchema:
obj_id = obj.id
try:
data = self._id_to_data[obj_id]
except KeyError:
err = (f'cannot set {fieldname!r} value: item {str(obj_id)!r} '
f'is not present in the schema {self!r}')
raise errors.SchemaError(err) from None
sclass = so.ObjectMeta.get_schema_class(self._id_to_type[obj_id])
field = sclass.get_schema_field(fieldname)
findex = field.index
is_object_ref = field in sclass.get_object_reference_fields()
if field in sclass.get_reducible_fields():
value = value.schema_reduce()
name_to_id = None
shortname_to_id = None
globalname_to_id = None
if fieldname == 'name':
old_name = data[findex]
name_to_id, shortname_to_id, globalname_to_id = (
self._update_obj_name(obj_id, sclass, old_name, value)
)
data_list = list(data)
data_list[findex] = value
new_data = tuple(data_list)
id_to_data = self._id_to_data.set(obj_id, new_data)
if not is_object_ref:
refs_to = None
else:
orig_value = data[findex]
if orig_value is not None:
orig_refs = {
fieldname: field.type.schema_refs_from_data(orig_value),
}
else:
orig_refs = {}
new_refs = {fieldname: field.type.schema_refs_from_data(value)}
refs_to = self._update_refs_to(obj_id, sclass, orig_refs, new_refs)
return self._replace(
name_to_id=name_to_id,
shortname_to_id=shortname_to_id,
globalname_to_id=globalname_to_id,
id_to_data=id_to_data,
refs_to=refs_to,
)
def unset_obj_field(
self,
obj: so.Object,
fieldname: str,
) -> FlatSchema:
obj_id = obj.id
try:
data = self._id_to_data[obj.id]
except KeyError:
return self
sclass = so.ObjectMeta.get_schema_class(self._id_to_type[obj.id])
field = sclass.get_schema_field(fieldname)
findex = field.index
name_to_id = None
shortname_to_id = None
globalname_to_id = None
orig_value = data[findex]
if orig_value is None:
return self
if fieldname == 'name':
name_to_id, shortname_to_id, globalname_to_id = (
self._update_obj_name(
obj_id,
sclass,
orig_value,
None
)
)
data_list = list(data)
data_list[findex] = None
new_data = tuple(data_list)
id_to_data = self._id_to_data.set(obj_id, new_data)
is_object_ref = field in sclass.get_object_reference_fields()
if not is_object_ref:
refs_to = None
else:
orig_refs = {
fieldname: field.type.schema_refs_from_data(orig_value),
}
refs_to = self._update_refs_to(obj_id, sclass, orig_refs, None)
return self._replace(
name_to_id=name_to_id,
shortname_to_id=shortname_to_id,
globalname_to_id=globalname_to_id,
id_to_data=id_to_data,
refs_to=refs_to,
)
def _update_refs_to(
self,
object_id: uuid.UUID,
sclass: Type[so.Object],
orig_refs: Optional[Mapping[str, FrozenSet[uuid.UUID]]],
new_refs: Optional[Mapping[str, FrozenSet[uuid.UUID]]],
) -> Refs_T:
objfields = sclass.get_object_reference_fields()
if not objfields:
return self._refs_to
with self._refs_to.mutate() as mm:
for field in objfields:
if not new_refs:
ids = None
else:
ids = new_refs.get(field.name)
if not orig_refs:
orig_ids = None
else:
orig_ids = orig_refs.get(field.name)
if not ids and not orig_ids:
continue
old_ids: Optional[FrozenSet[uuid.UUID]]
new_ids: Optional[FrozenSet[uuid.UUID]]
key = (sclass, field.name)
if ids and orig_ids:
new_ids = ids - orig_ids
old_ids = orig_ids - ids
elif ids:
new_ids = ids
old_ids = None
else:
new_ids = None
old_ids = orig_ids
if new_ids:
for ref_id in new_ids:
try:
refs = mm[ref_id]
except KeyError:
mm[ref_id] = immu.Map((
(key, immu.Map(((object_id, None),))),
))
else:
try:
field_refs = refs[key]
except KeyError:
field_refs = immu.Map(((object_id, None),))
else:
field_refs = field_refs.set(object_id, None)
mm[ref_id] = refs.set(key, field_refs)
if old_ids:
for ref_id in old_ids:
refs = mm[ref_id]
field_refs = refs[key].delete(object_id)
if not field_refs:
mm[ref_id] = refs.delete(key)
else:
mm[ref_id] = refs.set(key, field_refs)
return mm.finish()
def add_raw(
self,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> FlatSchema:
name_field = sclass.get_schema_field('name')
name = data[name_field.index]
if name in self._name_to_id:
raise errors.SchemaError(
f'{sclass.__name__} {name!r} is already present '
f'in the schema {self!r}')
if id in self._id_to_data:
raise errors.SchemaError(
f'{sclass.__name__} ({str(id)!r}) is already present '
f'in the schema {self!r}')
object_ref_fields = sclass.get_object_reference_fields()
if not object_ref_fields:
refs_to = None
else:
new_refs = {}
for field in object_ref_fields:
ref = data[field.index]
if ref is not None:
ref = field.type.schema_refs_from_data(ref)
new_refs[field.name] = ref
refs_to = self._update_refs_to(id, sclass, None, new_refs)
name_to_id, shortname_to_id, globalname_to_id = self._update_obj_name(
id, sclass, None, name)
updates = dict(
id_to_data=self._id_to_data.set(id, data),
id_to_type=self._id_to_type.set(id, sclass.__name__),
name_to_id=name_to_id,
shortname_to_id=shortname_to_id,
globalname_to_id=globalname_to_id,
refs_to=refs_to,
)
if (
issubclass(sclass, so.QualifiedObject)
and not self.has_module(name.module)
and name.module != '__derived__'
):
raise errors.UnknownModuleError(
f'module {name.module!r} is not in this schema')
return self._replace(**updates) # type: ignore
def add(
self,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> FlatSchema:
reducible_fields = sclass.get_reducible_fields()
if reducible_fields:
data_list = list(data)
for field in reducible_fields:
val = data[field.index]
if val is not None:
data_list[field.index] = val.schema_reduce()
data = tuple(data_list)
return self.add_raw(id, sclass, data)
def _delete(self, obj: so.Object) -> FlatSchema:
data = self._id_to_data.get(obj.id)
if data is None:
raise errors.InvalidReferenceError(
f'cannot delete {obj!r}: not in this schema')
sclass = type(obj)
name_field = sclass.get_schema_field('name')
name = data[name_field.index]
updates = {}
name_to_id, shortname_to_id, globalname_to_id = self._update_obj_name(
obj.id, sclass, name, None)
object_ref_fields = sclass.get_object_reference_fields()
if not object_ref_fields:
refs_to = None
else:
values = self._id_to_data[obj.id]
orig_refs = {}
for field in object_ref_fields:
ref = values[field.index]
if ref is not None:
ref = field.type.schema_refs_from_data(ref)
orig_refs[field.name] = ref
refs_to = self._update_refs_to(obj.id, sclass, orig_refs, None)
updates.update(dict(
name_to_id=name_to_id,
shortname_to_id=shortname_to_id,
globalname_to_id=globalname_to_id,
id_to_data=self._id_to_data.delete(obj.id),
id_to_type=self._id_to_type.delete(obj.id),
refs_to=refs_to,
))
return self._replace(**updates) # type: ignore
def discard(self, obj: so.Object) -> FlatSchema:
if obj.id in self._id_to_data:
return self._delete(obj)
else:
return self
def delete(self, obj: so.Object) -> FlatSchema:
return self._delete(obj)
def _get(
self,
name: Union[str, sn.Name],
*,
getter: Callable[[FlatSchema, sn.Name], Any],
default: Any,
module_aliases: Optional[Mapping[Optional[str], str]],
) -> Any:
if isinstance(name, str):
name = sn.name_from_string(name)
shortname = name.name
module = name.module if isinstance(name, sn.QualName) else None
implicit_builtins = module is None
if module == '__std__':
fqname = sn.QualName('std', shortname)
result = getter(self, fqname)
if result is not None:
return result
else:
return default
if module_aliases is not None:
fq_module = module_aliases.get(module)
if fq_module is not None:
module = fq_module
if module is not None:
fqname = sn.QualName(module, shortname)
result = getter(self, fqname)
if result is not None:
return result
if implicit_builtins:
fqname = sn.QualName('std', shortname)
result = getter(self, fqname)
if result is not None:
return result
return default
def get_functions(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_func.Function, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_func.Function, ...]:
if isinstance(name, str):
name = sn.name_from_string(name)
funcs = self._get(name,
getter=_get_functions,
module_aliases=module_aliases,
default=default)
if funcs is not so.NoDefault:
return cast(
Tuple[s_func.Function, ...],
funcs,
)
else:
return self._raise_bad_reference(
name=name,
module_aliases=module_aliases,
type=s_func.Function,
)
def get_operators(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_oper.Operator, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_oper.Operator, ...]:
funcs = self._get(name,
getter=_get_operators,
module_aliases=module_aliases,
default=default)
if funcs is not so.NoDefault:
return cast(
Tuple[s_oper.Operator, ...],
funcs,
)
else:
return self._raise_bad_reference(
name=name,
module_aliases=module_aliases,
type=s_oper.Operator,
)
@functools.lru_cache()
def _get_casts(
self,
stype: s_types.Type,
*,
disposition: str,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
all_casts = cast(
FrozenSet[s_casts.Cast],
self.get_referrers(
stype, scls_type=s_casts.Cast, field_name=disposition),
)
casts = []
for castobj in all_casts:
if implicit and not castobj.get_allow_implicit(self):
continue
if assignment and not castobj.get_allow_assignment(self):
continue
casts.append(castobj)
return frozenset(casts)
def get_casts_to_type(
self,
to_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
return self._get_casts(to_type, disposition='to_type',
implicit=implicit, assignment=assignment)
def get_casts_from_type(
self,
from_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
return self._get_casts(from_type, disposition='from_type',
implicit=implicit, assignment=assignment)
def get_referrers(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
field_name: Optional[str] = None,
) -> FrozenSet[so.Object_T]:
return self._get_referrers(
scls, scls_type=scls_type, field_name=field_name)
@functools.lru_cache()
def _get_referrers(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
field_name: Optional[str] = None,
) -> FrozenSet[so.Object_T]:
try:
refs = self._refs_to[scls.id]
except KeyError:
return frozenset()
else:
referrers: Set[so.Object] = set()
if scls_type is not None:
if field_name is not None:
for (st, fn), ids in refs.items():
if issubclass(st, scls_type) and fn == field_name:
referrers.update(
self.get_by_id(objid) for objid in ids)
else:
for (st, _), ids in refs.items():
if issubclass(st, scls_type):
referrers.update(
self.get_by_id(objid) for objid in ids)
elif field_name is not None:
for (_, fn), ids in refs.items():
if fn == field_name:
referrers.update(
self.get_by_id(objid) for objid in ids)
else:
refids = itertools.chain.from_iterable(refs.values())
referrers.update(self.get_by_id(objid) for objid in refids)
return frozenset(referrers) # type: ignore
@functools.lru_cache()
def get_referrers_ex(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
) -> Dict[
Tuple[Type[so.Object_T], str],
FrozenSet[so.Object_T],
]:
try:
refs = self._refs_to[scls.id]
except KeyError:
return {}
else:
result = {}
if scls_type is not None:
for (st, fn), ids in refs.items():
if issubclass(st, scls_type):
result[st, fn] = frozenset(
self.get_by_id(objid) for objid in ids)
else:
for (st, fn), ids in refs.items():
result[st, fn] = frozenset( # type: ignore
self.get_by_id(objid) for objid in ids)
return result # type: ignore
@overload
def get_by_id(
self,
obj_id: uuid.UUID,
default: Union[so.Object, so.NoDefaultT] = so.NoDefault,
*,
type: None = None,
) -> so.Object:
...
@overload
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
*,
type: Optional[Type[so.Object_T]] = None,
) -> so.Object_T:
...
@overload
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: None = None,
*,
type: Optional[Type[so.Object_T]] = None,
) -> Optional[so.Object_T]:
...
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT, None] = so.NoDefault,
*,
type: Optional[Type[so.Object_T]] = None,
) -> Optional[so.Object_T]:
try:
sclass_name = self._id_to_type[obj_id]
except KeyError:
if default is so.NoDefault:
raise errors.InvalidReferenceError(
f'reference to a non-existent schema item {obj_id}'
f' in schema {self!r}'
) from None
else:
return default
else:
obj = so.Object.schema_restore((sclass_name, obj_id))
if type is not None and not isinstance(obj, type):
raise errors.InvalidReferenceError(
f'schema object {obj_id!r} exists, but is not '
f'{type.get_schema_class_displayname()}'
)
# Avoid the overhead of cast(Object_T) below
return obj # type: ignore
@overload
def get_global(
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
) -> so.Object_T:
...
@overload
def get_global( # NoQA: F811
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: None = None,
) -> Optional[so.Object_T]:
...
def get_global( # NoQA: F811
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT, None] = so.NoDefault,
) -> Optional[so.Object_T]:
if isinstance(name, str):
name = sn.UnqualName(name)
obj_id = self._globalname_to_id.get((objtype, name))
if obj_id is not None:
return self.get_by_id(obj_id) # type: ignore
elif default is not so.NoDefault:
return default
else:
self._raise_bad_reference(name, type=objtype)
def get_generic(
self,
name: Union[str, sn.Name],
default: Union[so.Object, so.NoDefaultT, None],
*,
module_aliases: Optional[Mapping[Optional[str], str]],
type: Optional[Type[so.Object_T]],
condition: Optional[Callable[[so.Object], bool]],
label: Optional[str],
sourcectx: Optional[parsing.ParserContext],
) -> Optional[so.Object]:
def getter(schema: FlatSchema, name: sn.Name) -> Optional[so.Object]:
obj_id = schema._name_to_id.get(name)
if obj_id is None:
return None
obj = schema.get_by_id(obj_id, type=type, default=None)
if obj is not None and condition is not None:
if not condition(obj):
obj = None
return obj
obj = self._get(name,
getter=getter,
module_aliases=module_aliases,
default=default)
if obj is not so.NoDefault:
return obj # type: ignore
else:
self._raise_bad_reference(
name=name,
label=label,
module_aliases=module_aliases,
sourcectx=sourcectx,
type=type,
)
def _raise_bad_reference(
self,
name: Union[str, sn.Name],
*,
label: Optional[str] = None,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
sourcectx: Optional[parsing.ParserContext] = None,
type: Optional[Type[so.Object]] = None,
) -> NoReturn:
refname = str(name)
if label is None:
if type is not None:
label = type.get_schema_class_displayname()
else:
label = 'schema item'
if type is not None:
if issubclass(type, so.QualifiedObject):
if not sn.is_qualified(refname):
if module_aliases is not None:
default_module = module_aliases.get(None)
if default_module is not None:
refname = type.get_displayname_static(
sn.QualName(default_module, refname),
)
else:
refname = type.get_displayname_static(
sn.QualName.from_string(refname))
else:
refname = type.get_displayname_static(
sn.UnqualName.from_string(refname))
raise errors.InvalidReferenceError(
f'{label} {refname!r} does not exist',
context=sourcectx,
)
def has_object(self, object_id: uuid.UUID) -> bool:
return object_id in self._id_to_type
def has_module(self, module: str) -> bool:
return self.get_global(s_mod.Module, module, None) is not None
def get_objects(
self,
*,
exclude_stdlib: bool = False,
exclude_global: bool = False,
exclude_internal: bool = True,
included_modules: Optional[Iterable[sn.Name]] = None,
excluded_modules: Optional[Iterable[sn.Name]] = None,
included_items: Optional[Iterable[sn.Name]] = None,
excluded_items: Optional[Iterable[sn.Name]] = None,
type: Optional[Type[so.Object_T]] = None,
extra_filters: Iterable[Callable[[Schema, so.Object], bool]] = (),
) -> SchemaIterator[so.Object_T]:
return SchemaIterator[so.Object_T](
self,
self._id_to_type,
exclude_stdlib=exclude_stdlib,
exclude_global=exclude_global,
exclude_internal=exclude_internal,
included_modules=included_modules,
excluded_modules=excluded_modules,
included_items=included_items,
excluded_items=excluded_items,
type=type,
extra_filters=extra_filters,
)
def get_modules(self) -> Tuple[s_mod.Module, ...]:
modules = []
for (objtype, _), objid in self._globalname_to_id.items():
if objtype is s_mod.Module:
modules.append(self.get_by_id(objid, type=s_mod.Module))
return tuple(modules)
def get_last_migration(self) -> Optional[s_migrations.Migration]:
return _get_last_migration(self)
def __repr__(self) -> str:
return (
f'<{type(self).__name__} gen:{self._generation} at {id(self):#x}>')
class SchemaIterator(Generic[so.Object_T]):
def __init__(
self,
schema: Schema,
object_ids: Iterable[uuid.UUID],
*,
exclude_stdlib: bool = False,
exclude_global: bool = False,
exclude_internal: bool = True,
included_modules: Optional[Iterable[sn.Name]],
excluded_modules: Optional[Iterable[sn.Name]],
included_items: Optional[Iterable[sn.Name]] = None,
excluded_items: Optional[Iterable[sn.Name]] = None,
type: Optional[Type[so.Object_T]] = None,
extra_filters: Iterable[Callable[[Schema, so.Object], bool]] = (),
) -> None:
filters = []
if type is not None:
t = type
filters.append(lambda schema, obj: isinstance(obj, t))
if included_modules:
modules = frozenset(included_modules)
filters.append(
lambda schema, obj:
isinstance(obj, so.QualifiedObject) and
obj.get_name(schema).get_module_name() in modules)
if excluded_modules or exclude_stdlib:
excmod: Set[sn.Name] = set()
if excluded_modules:
excmod.update(excluded_modules)
if exclude_stdlib:
excmod.update(STD_MODULES)
filters.append(
lambda schema, obj: (
not isinstance(obj, so.QualifiedObject)
or obj.get_name(schema).get_module_name() not in excmod
)
)
if included_items:
objs = frozenset(included_items)
filters.append(
lambda schema, obj: obj.get_name(schema) in objs)
if excluded_items:
objs = frozenset(excluded_items)
filters.append(
lambda schema, obj: obj.get_name(schema) not in objs)
if exclude_stdlib:
filters.append(
lambda schema, obj: not isinstance(obj, s_pseudo.PseudoType)
)
if exclude_global:
filters.append(
lambda schema, obj: not isinstance(obj, so.GlobalObject)
)
if exclude_internal:
filters.append(
lambda schema, obj: not isinstance(obj, so.InternalObject)
)
# Extra filters are last, because they might depend on type.
filters.extend(extra_filters)
self._filters = filters
self._schema = schema
self._object_ids = object_ids
def __iter__(self) -> Iterator[so.Object_T]:
filters = self._filters
schema = self._schema
get_by_id = schema.get_by_id
for obj_id in self._object_ids:
obj = get_by_id(obj_id)
if all(f(self._schema, obj) for f in filters):
yield obj # type: ignore
class ChainedSchema(Schema):
__slots__ = ('_base_schema', '_top_schema', '_global_schema')
def __init__(
self,
base_schema: FlatSchema,
top_schema: FlatSchema,
global_schema: FlatSchema
) -> None:
self._base_schema = base_schema
self._top_schema = top_schema
self._global_schema = global_schema
def get_top_schema(self) -> FlatSchema:
return self._top_schema
def get_global_schema(self) -> FlatSchema:
return self._global_schema
def add_raw(
self,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> ChainedSchema:
if issubclass(sclass, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.add_raw(id, sclass, data),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.add_raw(id, sclass, data),
self._global_schema,
)
def add(
self,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> ChainedSchema:
if issubclass(sclass, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.add(id, sclass, data),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.add(id, sclass, data),
self._global_schema,
)
def discard(self, obj: so.Object) -> ChainedSchema:
if isinstance(obj, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.discard(obj),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.discard(obj),
self._global_schema,
)
def delete(self, obj: so.Object) -> ChainedSchema:
if isinstance(obj, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.delete(obj),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.delete(obj),
self._global_schema,
)
def update_obj(
self,
obj: so.Object,
updates: Mapping[str, Any],
) -> ChainedSchema:
if isinstance(obj, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.update_obj(obj, updates),
)
else:
obj_id = obj.id
base_obj = self._base_schema.get_by_id(obj_id, default=None)
if (
base_obj is not None
and not self._top_schema.has_object(obj_id)
):
top_schema = self._top_schema.add_raw(
obj_id,
type(base_obj),
self._base_schema._id_to_data[obj_id],
)
else:
top_schema = self._top_schema
return ChainedSchema(
self._base_schema,
top_schema.update_obj(obj, updates),
self._global_schema,
)
def maybe_get_obj_data_raw(
self,
obj: so.Object,
) -> Optional[Tuple[Any, ...]]:
if isinstance(obj, so.GlobalObject):
return self._global_schema.maybe_get_obj_data_raw(obj)
else:
top = self._top_schema.maybe_get_obj_data_raw(obj)
if top is not None:
return top
else:
return self._base_schema.maybe_get_obj_data_raw(obj)
def get_obj_data_raw(
self,
obj: so.Object,
) -> Tuple[Any, ...]:
if isinstance(obj, so.GlobalObject):
return self._global_schema.get_obj_data_raw(obj)
else:
top = self._top_schema.maybe_get_obj_data_raw(obj)
if top is not None:
return top
else:
return self._base_schema.get_obj_data_raw(obj)
def set_obj_field(
self,
obj: so.Object,
fieldname: str,
value: Any,
) -> ChainedSchema:
if isinstance(obj, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.set_obj_field(obj, fieldname, value),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.set_obj_field(obj, fieldname, value),
self._global_schema,
)
def unset_obj_field(
self,
obj: so.Object,
field: str,
) -> ChainedSchema:
if isinstance(obj, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.unset_obj_field(obj, field),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.unset_obj_field(obj, field),
self._global_schema,
)
def get_functions(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_func.Function, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_func.Function, ...]:
objs = self._top_schema.get_functions(
name, module_aliases=module_aliases, default=())
if not objs:
objs = self._base_schema.get_functions(
name, default=default, module_aliases=module_aliases)
return objs
def get_operators(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_oper.Operator, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_oper.Operator, ...]:
objs = self._top_schema.get_operators(
name, module_aliases=module_aliases, default=())
if not objs:
objs = self._base_schema.get_operators(
name, default=default, module_aliases=module_aliases)
return objs
def get_casts_to_type(
self,
to_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
return (
self._base_schema.get_casts_to_type(
to_type,
implicit=implicit,
assignment=assignment,
)
| self._top_schema.get_casts_to_type(
to_type,
implicit=implicit,
assignment=assignment,
)
)
def get_casts_from_type(
self,
from_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
return (
self._base_schema.get_casts_from_type(
from_type,
implicit=implicit,
assignment=assignment,
)
| self._top_schema.get_casts_from_type(
from_type,
implicit=implicit,
assignment=assignment,
)
)
def get_referrers(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
field_name: Optional[str] = None,
) -> FrozenSet[so.Object_T]:
return (
self._base_schema.get_referrers(
scls,
scls_type=scls_type,
field_name=field_name,
)
| self._top_schema.get_referrers(
scls,
scls_type=scls_type,
field_name=field_name,
)
| self._global_schema.get_referrers(
scls,
scls_type=scls_type,
field_name=field_name,
)
)
def get_referrers_ex(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
) -> Dict[
Tuple[Type[so.Object_T], str],
FrozenSet[so.Object_T],
]:
base = self._base_schema.get_referrers_ex(scls, scls_type=scls_type)
top = self._top_schema.get_referrers_ex(scls, scls_type=scls_type)
gl = self._global_schema.get_referrers_ex(scls, scls_type=scls_type)
return {
k: (
base.get(k, frozenset())
| top.get(k, frozenset())
| gl.get(k, frozenset())
)
for k in itertools.chain(base, top)
}
@overload
def get_by_id(
self,
obj_id: uuid.UUID,
default: Union[so.Object, so.NoDefaultT] = so.NoDefault,
*,
type: None = None,
) -> so.Object:
...
@overload
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
*,
type: Optional[Type[so.Object_T]] = None,
) -> so.Object_T:
...
@overload
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: None = None,
*,
type: Optional[Type[so.Object_T]] = None,
) -> Optional[so.Object_T]:
...
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT, None] = so.NoDefault,
*,
type: Optional[Type[so.Object_T]] = None,
) -> Optional[so.Object_T]:
obj = self._top_schema.get_by_id(obj_id, type=type, default=None)
if obj is None:
obj = self._base_schema.get_by_id(
obj_id, default=None, type=type)
if obj is None:
obj = self._global_schema.get_by_id(
obj_id, default=default, type=type)
return obj
@overload
def get_global(
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
) -> so.Object_T:
...
@overload
def get_global( # NoQA: F811
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: None = None,
) -> Optional[so.Object_T]:
...
def get_global( # NoQA: F811
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT, None] = so.NoDefault,
) -> Optional[so.Object_T]:
if issubclass(objtype, so.GlobalObject):
return self._global_schema.get_global( # type: ignore
objtype, name, default=default)
else:
obj = self._top_schema.get_global(objtype, name, default=None)
if obj is None:
obj = self._base_schema.get_global(
objtype, name, default=default)
return obj
def get_generic( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object, so.NoDefaultT, None],
*,
module_aliases: Optional[Mapping[Optional[str], str]],
type: Optional[Type[so.Object_T]],
condition: Optional[Callable[[so.Object], bool]],
label: Optional[str],
sourcectx: Optional[parsing.ParserContext],
) -> Optional[so.Object]:
obj = self._top_schema.get(
name,
module_aliases=module_aliases,
type=type,
default=None,
condition=condition,
label=label,
sourcectx=sourcectx,
)
if obj is None:
return self._base_schema.get(
name,
default=default,
module_aliases=module_aliases,
type=type,
condition=condition,
label=label,
sourcectx=sourcectx,
)
else:
return obj
def has_object(self, object_id: uuid.UUID) -> bool:
return (
self._base_schema.has_object(object_id)
or self._top_schema.has_object(object_id)
or self._global_schema.has_object(object_id)
)
def has_module(self, module: str) -> bool:
return (
self._base_schema.has_module(module)
or self._top_schema.has_module(module)
)
def get_objects(
self,
*,
exclude_stdlib: bool = False,
exclude_global: bool = False,
exclude_internal: bool = True,
included_modules: Optional[Iterable[sn.Name]] = None,
excluded_modules: Optional[Iterable[sn.Name]] = None,
included_items: Optional[Iterable[sn.Name]] = None,
excluded_items: Optional[Iterable[sn.Name]] = None,
type: Optional[Type[so.Object_T]] = None,
extra_filters: Iterable[Callable[[Schema, so.Object], bool]] = (),
) -> SchemaIterator[so.Object_T]:
return SchemaIterator[so.Object_T](
self,
itertools.chain(
self._base_schema._id_to_type,
self._top_schema._id_to_type,
self._global_schema._id_to_type,
),
exclude_global=exclude_global,
exclude_stdlib=exclude_stdlib,
exclude_internal=exclude_internal,
included_modules=included_modules,
excluded_modules=excluded_modules,
included_items=included_items,
excluded_items=excluded_items,
type=type,
extra_filters=extra_filters,
)
def get_modules(self) -> Tuple[s_mod.Module, ...]:
return (
self._base_schema.get_modules()
+ self._top_schema.get_modules()
)
def get_last_migration(self) -> Optional[s_migrations.Migration]:
migration = self._top_schema.get_last_migration()
if migration is None:
migration = self._base_schema.get_last_migration()
return migration
@functools.lru_cache()
def _get_functions(
schema: FlatSchema,
name: sn.Name,
) -> Optional[Tuple[s_func.Function, ...]]:
objids = schema._shortname_to_id.get((s_func.Function, name))
if objids is None:
return None
return cast(
Tuple[s_func.Function, ...],
tuple(schema.get_by_id(oid) for oid in objids),
)
@functools.lru_cache()
def _get_operators(
schema: FlatSchema,
name: sn.Name,
) -> Optional[Tuple[s_oper.Operator, ...]]:
objids = schema._shortname_to_id.get((s_oper.Operator, name))
if objids is None:
return
return cast(
Tuple[s_oper.Operator, ...],
tuple(schema.get_by_id(oid) for oid in objids),
)
@functools.lru_cache()
def _get_last_migration(
schema: FlatSchema,
) -> Optional[s_migrations.Migration]:
migrations = cast(
List[s_migrations.Migration],
[
schema.get_by_id(mid)
for (t, _), mid in schema._globalname_to_id.items()
if t is s_migrations.Migration
],
)
if not migrations:
return None
migration_map = collections.defaultdict(list)
root = None
for m in migrations:
parents = m.get_parents(schema).objects(schema)
if not parents:
if root is not None:
raise errors.InternalServerError(
'multiple migration roots found')
root = m
for parent in parents:
migration_map[parent].append(m)
if root is None:
raise errors.InternalServerError('cannot find migration root')
latest = root
while children := migration_map[latest]:
if len(children) > 1:
raise errors.InternalServerError(
'nonlinear migration history detected')
latest = children[0]
return latest
| apache-2.0 | -8,112,427,022,049,611,000 | 30.118885 | 79 | 0.517647 | false |
yytang2012/novels-crawler | simpleSpiders/m-ifuwen.py | 1 | 1913 | import os
import requests
from parsel import Selector
from urllib.parse import urljoin
from libs.polish import polish_content, polish_title, polish_subtitle
def parse_content(url):
page = requests.get(url)
html = page.text
sel = Selector(text=html)
title = sel.xpath('//title/text()').extract()[0]
title = title.split('_')[0]
title = polish_title(title, 'm-ifuwen')
print(title)
file_path = os.path.join(os.getcwd(), '..')
file_path = os.path.join(file_path, 'userData')
file_path = os.path.join(file_path, 'downloads')
file_path = os.path.join(file_path, title + '.txt')
print(file_path)
if os.path.isfile(file_path):
return 0
next_page_url = sel.xpath('//div[@class="lb_mulu chapterList"]/ul/li/a/@href').extract()[0]
next_page_url = urljoin(page.url, next_page_url)
print(next_page_url)
article = ''
idx = 1
while True:
req = requests.get(next_page_url)
html = req.text
sel = Selector(text=html)
subtitle = sel.xpath('//h1/text()').extract()[0]
subtitle = polish_subtitle(subtitle)
article += subtitle
contents = sel.xpath('//div[@id="nr1"]/p/text()').extract()
cc = polish_content(contents)
article += cc
tmp = sel.xpath('//div[@class="nr_page"]/table/tr')
next_page_url = tmp.xpath('td[@class="next"]/a/@href').extract()[0]
mulu = tmp.xpath('td[@class="mulu"]/a/@href').extract()[0]
if next_page_url == mulu:
break
idx += 1
next_page_url = urljoin(page.url, next_page_url)
print(idx, next_page_url)
save_to_file(file_path, article)
def save_to_file(file_path, article):
print(article)
with open(file_path, 'w', encoding='utf-8') as f:
f.write(article)
if __name__ == '__main__':
url = 'https://m.ifuwen.com/novel/30264.html'
parse_content(url)
| mit | -1,006,423,881,069,310,800 | 29.365079 | 95 | 0.598014 | false |
kvar/ansible | lib/ansible/modules/network/nxos/nxos_ospf_vrf.py | 1 | 17083 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_ospf_vrf
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages a VRF for an OSPF router.
description:
- Manages a VRF for an OSPF router.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Value I(default) restores params default value, if any.
Otherwise it removes the existing param configuration.
options:
vrf:
description:
- Name of the resource instance. Valid value is a string.
The name 'default' is a valid VRF representing the global OSPF.
default: default
ospf:
description:
- Name of the OSPF instance.
required: true
router_id:
description:
- Router Identifier (ID) of the OSPF router VRF instance.
default_metric:
description:
- Specify the default Metric value. Valid values are an integer
or the keyword 'default'.
log_adjacency:
description:
- Controls the level of log messages generated whenever a
neighbor changes state. Valid values are 'log', 'detail',
and 'default'.
choices: ['log','detail','default']
timer_throttle_lsa_start:
description:
- Specify the start interval for rate-limiting Link-State
Advertisement (LSA) generation. Valid values are an integer,
in milliseconds, or the keyword 'default'.
timer_throttle_lsa_hold:
description:
- Specify the hold interval for rate-limiting Link-State
Advertisement (LSA) generation. Valid values are an integer,
in milliseconds, or the keyword 'default'.
timer_throttle_lsa_max:
description:
- Specify the max interval for rate-limiting Link-State
Advertisement (LSA) generation. Valid values are an integer,
in milliseconds, or the keyword 'default'.
timer_throttle_spf_start:
description:
- Specify initial Shortest Path First (SPF) schedule delay.
Valid values are an integer, in milliseconds, or
the keyword 'default'.
timer_throttle_spf_hold:
description:
- Specify minimum hold time between Shortest Path First (SPF)
calculations. Valid values are an integer, in milliseconds,
or the keyword 'default'.
timer_throttle_spf_max:
description:
- Specify the maximum wait time between Shortest Path First (SPF)
calculations. Valid values are an integer, in milliseconds,
or the keyword 'default'.
auto_cost:
description:
- Specifies the reference bandwidth used to assign OSPF cost.
Valid values are an integer, in Mbps, or the keyword 'default'.
bfd:
description:
- Enables BFD on all OSPF interfaces.
- "Dependency: 'feature bfd'"
version_added: "2.9"
type: str
choices: ['enable', 'disable']
passive_interface:
description:
- Setting to C(yes) will suppress routing update on interface.
version_added: "2.4"
type: bool
state:
description:
- State of ospf vrf configuration.
default: present
choices: ['present', 'absent']
'''
EXAMPLES = '''
- nxos_ospf_vrf:
ospf: 1
timer_throttle_spf_start: 50
timer_throttle_spf_hold: 1000
timer_throttle_spf_max: 2000
timer_throttle_lsa_start: 60
timer_throttle_lsa_hold: 1100
timer_throttle_lsa_max: 3000
vrf: test
bfd: enable
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample:
- router ospf 1
- vrf test
- bfd
- timers throttle lsa 60 1100 3000
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
BOOL_PARAMS = [
'passive_interface'
]
PARAM_TO_COMMAND_KEYMAP = {
'vrf': 'vrf',
'router_id': 'router-id',
'default_metric': 'default-metric',
'log_adjacency': 'log-adjacency-changes',
'timer_throttle_lsa_start': 'timers throttle lsa',
'timer_throttle_lsa_max': 'timers throttle lsa',
'timer_throttle_lsa_hold': 'timers throttle lsa',
'timer_throttle_spf_max': 'timers throttle spf',
'timer_throttle_spf_start': 'timers throttle spf',
'timer_throttle_spf_hold': 'timers throttle spf',
'auto_cost': 'auto-cost reference-bandwidth',
'bfd': 'bfd',
'passive_interface': 'passive-interface default'
}
PARAM_TO_DEFAULT_KEYMAP = {
'timer_throttle_lsa_start': '0',
'timer_throttle_lsa_max': '5000',
'timer_throttle_lsa_hold': '5000',
'timer_throttle_spf_start': '200',
'timer_throttle_spf_max': '5000',
'timer_throttle_spf_hold': '1000',
'auto_cost': '40000',
'bfd': 'disable',
'default_metric': '',
'passive_interface': False,
'router_id': '',
'log_adjacency': '',
}
def get_existing(module, args):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
parents = ['router ospf {0}'.format(module.params['ospf'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
config = netcfg.get_section(parents)
for arg in args:
if arg not in ['ospf', 'vrf']:
existing[arg] = PARAM_TO_DEFAULT_KEYMAP.get(arg)
if config:
if module.params['vrf'] == 'default':
splitted_config = config.splitlines()
vrf_index = False
for index in range(0, len(splitted_config) - 1):
if 'vrf' in splitted_config[index].strip():
vrf_index = index
break
if vrf_index:
config = '\n'.join(splitted_config[0:vrf_index])
splitted_config = config.splitlines()
for line in splitted_config:
if 'passive' in line:
existing['passive_interface'] = True
elif 'router-id' in line:
existing['router_id'] = re.search(r'router-id (\S+)', line).group(1)
elif 'metric' in line:
existing['default_metric'] = re.search(r'default-metric (\S+)', line).group(1)
elif 'adjacency' in line:
log = re.search(r'log-adjacency-changes(?: (\S+))?', line).group(1)
if log:
existing['log_adjacency'] = log
else:
existing['log_adjacency'] = 'log'
elif 'auto' in line:
cost = re.search(r'auto-cost reference-bandwidth (\d+) (\S+)', line).group(1)
if 'Gbps' in line:
cost = int(cost) * 1000
existing['auto_cost'] = str(cost)
elif 'bfd' in line:
existing['bfd'] = 'enable'
elif 'timers throttle lsa' in line:
tmp = re.search(r'timers throttle lsa (\S+) (\S+) (\S+)', line)
existing['timer_throttle_lsa_start'] = tmp.group(1)
existing['timer_throttle_lsa_hold'] = tmp.group(2)
existing['timer_throttle_lsa_max'] = tmp.group(3)
elif 'timers throttle spf' in line:
tmp = re.search(r'timers throttle spf (\S+) (\S+) (\S+)', line)
existing['timer_throttle_spf_start'] = tmp.group(1)
existing['timer_throttle_spf_hold'] = tmp.group(2)
existing['timer_throttle_spf_max'] = tmp.group(3)
existing['vrf'] = module.params['vrf']
existing['ospf'] = module.params['ospf']
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key in table:
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = table.get(key)
return new_dict
def get_timer_prd(key, proposed):
if proposed.get(key):
return proposed.get(key)
else:
return PARAM_TO_DEFAULT_KEYMAP.get(key)
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if key == 'vrf':
continue
if value is True:
commands.append(key)
elif value is False:
if key == 'passive-interface default':
if existing_commands.get(key):
commands.append('no {0}'.format(key))
else:
commands.append('no {0}'.format(key))
elif value == 'default' or value == '':
if key == 'log-adjacency-changes':
commands.append('no {0}'.format(key))
elif existing_commands.get(key):
existing_value = existing_commands.get(key)
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key == 'timers throttle lsa':
command = '{0} {1} {2} {3}'.format(
key,
get_timer_prd('timer_throttle_lsa_start', proposed),
get_timer_prd('timer_throttle_lsa_hold', proposed),
get_timer_prd('timer_throttle_lsa_max', proposed))
elif key == 'timers throttle spf':
command = '{0} {1} {2} {3}'.format(
key,
get_timer_prd('timer_throttle_spf_start', proposed),
get_timer_prd('timer_throttle_spf_hold', proposed),
get_timer_prd('timer_throttle_spf_max', proposed))
elif key == 'log-adjacency-changes':
if value == 'log':
command = key
elif value == 'detail':
command = '{0} {1}'.format(key, value)
elif key == 'auto-cost reference-bandwidth':
if len(value) < 5:
command = '{0} {1} Mbps'.format(key, value)
else:
value = str(int(value) // 1000)
command = '{0} {1} Gbps'.format(key, value)
elif key == 'bfd':
command = 'no bfd' if value == 'disable' else 'bfd'
else:
command = '{0} {1}'.format(key, value.lower())
if command not in commands:
commands.append(command)
if commands:
parents = ['router ospf {0}'.format(module.params['ospf'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
commands = []
parents = ['router ospf {0}'.format(module.params['ospf'])]
if module.params['vrf'] == 'default':
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in existing_commands.items():
if value and key != 'vrf':
command = None
if key == 'passive-interface default':
command = 'no {0}'.format(key)
elif key == 'timers throttle lsa':
if (existing['timer_throttle_lsa_start'] !=
PARAM_TO_DEFAULT_KEYMAP.get('timer_throttle_lsa_start') or
existing['timer_throttle_lsa_hold'] !=
PARAM_TO_DEFAULT_KEYMAP.get('timer_throttle_lsa_hold') or
existing['timer_throttle_lsa_max'] !=
PARAM_TO_DEFAULT_KEYMAP.get('timer_throttle_lsa_max')):
command = 'no {0} {1} {2} {3}'.format(
key,
existing['timer_throttle_lsa_start'],
existing['timer_throttle_lsa_hold'],
existing['timer_throttle_lsa_max'])
elif key == 'timers throttle spf':
if (existing['timer_throttle_spf_start'] !=
PARAM_TO_DEFAULT_KEYMAP.get('timer_throttle_spf_start') or
existing['timer_throttle_spf_hold'] !=
PARAM_TO_DEFAULT_KEYMAP.get('timer_throttle_spf_hold') or
existing['timer_throttle_spf_max'] !=
PARAM_TO_DEFAULT_KEYMAP.get('timer_throttle_spf_max')):
command = 'no {0} {1} {2} {3}'.format(
key,
existing['timer_throttle_spf_start'],
existing['timer_throttle_spf_hold'],
existing['timer_throttle_spf_max'])
elif key == 'log-adjacency-changes':
command = 'no {0}'.format(key)
elif key == 'auto-cost reference-bandwidth':
if value != PARAM_TO_DEFAULT_KEYMAP.get('auto_cost'):
command = 'no {0}'.format(key)
else:
command = None
elif key == 'bfd':
if value == 'enable':
command = 'no bfd'
else:
existing_value = existing_commands.get(key)
command = 'no {0} {1}'.format(key, existing_value)
if command:
if command not in commands:
commands.append(command)
else:
if (existing.get('vrf') and
existing.get('vrf') == module.params['vrf']):
commands = ['no vrf {0}'.format(module.params['vrf'])]
if commands:
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
vrf=dict(required=False, type='str', default='default'),
ospf=dict(required=True, type='str'),
router_id=dict(required=False, type='str'),
default_metric=dict(required=False, type='str'),
log_adjacency=dict(required=False, type='str', choices=['log', 'detail', 'default']),
timer_throttle_lsa_start=dict(required=False, type='str'),
timer_throttle_lsa_hold=dict(required=False, type='str'),
timer_throttle_lsa_max=dict(required=False, type='str'),
timer_throttle_spf_start=dict(required=False, type='str'),
timer_throttle_spf_hold=dict(required=False, type='str'),
timer_throttle_spf_max=dict(required=False, type='str'),
auto_cost=dict(required=False, type='str'),
bfd=dict(required=False, type='str', choices=['enable', 'disable']),
passive_interface=dict(required=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present', required=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, commands=[], warnings=warnings)
state = module.params['state']
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args)
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key != 'interface':
if str(value).lower() == 'true':
value = True
elif str(value).lower() == 'false':
value = False
elif str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
value = 'default'
if existing.get(key) != value:
proposed[key] = value
candidate = CustomNetworkConfig(indent=3)
if state == 'present':
state_present(module, existing, proposed, candidate)
if state == 'absent' and existing:
state_absent(module, existing, proposed, candidate)
if candidate:
candidate = candidate.items_text()
result['commands'] = candidate
if not module.check_mode:
load_config(module, candidate)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,757,120,865,278,113,000 | 37.649321 | 94 | 0.572323 | false |
hortonworks/hortonworks-sandbox | apps/oozie/src/oozie/management/commands/oozie_setup.py | 1 | 3188 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Portions Copyright © 2013 Hortonworks, Inc.
import logging
import os
from django.contrib.auth.models import User
from django.core import management
from django.core.management.base import NoArgsCommand
from django.utils.translation import ugettext as _
from hadoop import cluster
from hadoop.fs.hadoopfs import Hdfs
from liboozie.conf import REMOTE_DEPLOYMENT_DIR
from oozie.conf import LOCAL_SAMPLE_DATA_DIR, LOCAL_SAMPLE_DIR, REMOTE_SAMPLE_DIR
LOG = logging.getLogger(__name__)
class Command(NoArgsCommand):
def handle_noargs(self, **options):
fs = cluster.get_hdfs()
remote_dir = create_directories(fs)
# Copy examples binaries
for name in os.listdir(LOCAL_SAMPLE_DIR.get()):
local_dir = fs.join(LOCAL_SAMPLE_DIR.get(), name)
remote_data_dir = fs.join(remote_dir, name)
LOG.info(_('Copying examples %(local_dir)s to %(remote_data_dir)s\n') % {
'local_dir': local_dir, 'remote_data_dir': remote_data_dir})
fs.do_as_user(fs.DEFAULT_USER, fs.copyFromLocal, local_dir, remote_data_dir)
# Copy sample data
local_dir = LOCAL_SAMPLE_DATA_DIR.get()
remote_data_dir = fs.join(remote_dir, 'data')
LOG.info(_('Copying data %(local_dir)s to %(remote_data_dir)s\n') % {
'local_dir': local_dir, 'remote_data_dir': remote_data_dir})
fs.do_as_user(fs.DEFAULT_USER, fs.copyFromLocal, local_dir, remote_data_dir)
# Load jobs
sample, created = User.objects.get_or_create(username='sample')
management.call_command('loaddata', 'initial_oozie_examples.json', verbosity=2)
from oozie.models import Job
Job.objects.filter(owner__id=1100713).update(owner=sample) # 11OOZIE
def create_directories(fs):
# If needed, create the remote home, deployment and data directories
directories = (REMOTE_DEPLOYMENT_DIR.get(), REMOTE_SAMPLE_DIR.get())
for directory in directories:
if not fs.do_as_user("hdfs", fs.exists, directory):
remote_home_dir = Hdfs.join('/user', "hdfs")
if directory.startswith(remote_home_dir):
# Home is 755
fs.do_as_user("hdfs", fs.create_home_dir, remote_home_dir)
# Shared by all the users
fs.do_as_user("hdfs", fs.mkdir, directory, 511)
fs.do_as_user("hdfs", fs.chmod, directory, 511) # To remove after https://issues.apache.org/jira/browse/HDFS-3491
return REMOTE_SAMPLE_DIR.get()
| apache-2.0 | -7,638,046,287,902,017,000 | 38.8375 | 119 | 0.709131 | false |
ddworken/cubeOfResistors | 2D.py | 1 | 2155 | DIFF_THRESHOLD = 1e-40
width = height = 10
class Fixed:
FREE = 0
A = 1
B = 2
class Node:
__slots__ = ["voltage", "fixed"]
def __init__(self, v=0.0, f=Fixed.FREE):
self.voltage = v
self.fixed = f
def set_boundary(mesh):
mesh[width / 2][height / 2] = Node(1.0, Fixed.A)
mesh[width / 2 + 2][height / 2 + 1] = Node(-1.0, Fixed.B)
def calc_difference(mesh, difference):
total = 0.0
for y in xrange(height):
for x in xrange(width):
totalVoltage = 0.0
numberConnections = 0
if y != 0:
totalVoltage += mesh[y-1][x].voltage
numberConnections += 1
if x != 0:
totalVoltage += mesh[y][x-1].voltage
numberConnections += 1
if y < height-1:
totalVoltage += mesh[y + 1][x].voltage
numberConnections += 1
if x < width - 1:
totalVoltage += mesh[y][x + 1].voltage
numberConnections += 1
totalVoltage = mesh[y][x].voltage - totalVoltage / numberConnections
difference[y][x].voltage = totalVoltage
if mesh[y][x].fixed == Fixed.FREE:
total += totalVoltage ** 2
return total
def iter(mesh):
difference = [[Node() for j in xrange(width)] for i in xrange(height)]
while True:
set_boundary(mesh)
if calc_difference(mesh, difference) < DIFF_THRESHOLD:
break
for i, di in enumerate(difference):
for j, dij in enumerate(di):
mesh[i][j].voltage -= dij.voltage
current = [0.0] * 3
for i, di in enumerate(difference):
for j, dij in enumerate(di):
current[mesh[i][j].fixed] += (dij.voltage *
(bool(i) + bool(j) + (i < height - 1) + (j < width - 1)))
print 2 / ((current[1] - current[2]) / 2.0)
return (current[Fixed.A] - current[Fixed.B]) / 2.0
def main():
mesh = [[Node() for j in xrange(width)] for i in xrange(height)]
print "R = " + str(2 / iter(mesh))
if __name__ == "__main__":
main()
| gpl-2.0 | 5,152,036,103,168,269,000 | 26.278481 | 99 | 0.508121 | false |
mjvakili/ccppabc | ccppabc/code/archive/wp_covariance.py | 1 | 1717 | from halotools.empirical_models import Zheng07 , model_defaults
from halotools.mock_observables import wp
from halotools.mock_observables.clustering import tpcf
from halotools.empirical_models.mock_helpers import (three_dim_pos_bundle,
infer_mask_from_kwargs)
from halotools.mock_observables.clustering import wp
from halotools.sim_manager import supported_sims
import matplotlib.pyplot as plt
plt.switch_backend("Agg")
import time
import numpy as np
model = Zheng07()
xir = []
for i in range(500):
model.populate_mock()
xir.append(model.mock.compute_galaxy_clustering()[1])
covar = np.cov(np.array(xir).T)
np.savetxt("clustering_covariance_Mr20.dat" , covar)
"""
a = time.time()
model.mock.compute_galaxy_clustering()
print time.time() - a
rbins = model_defaults.default_rbins
rbin_centers = (rbins[1:] + rbins[:-1])/2.
cat = supported_sims.HaloCatalog()
l = cat.Lbox
print l
p_bins = np.linspace(0,l/2,200)
mask = infer_mask_from_kwargs(model.mock.galaxy_table)
pos = three_dim_pos_bundle(table=model.mock.galaxy_table,
key1='x', key2='y', key3='z', mask=mask,
return_complement=False)
figure = plt.figure(figsize=(10,10))
cl = wp(pos , rbins, p_bins , period = l , estimator = 'Landy-Szalay')
for n_pbins in np.array([2,8,16]):
p_bins = np.linspace(0 , l/2 , n_pbins)
a = time.time()
clustering = wp(pos, rbins, p_bins , period = l , estimator = 'Landy-Szalay')
print time.time() - a
plt.plot(rbin_centers , (clustering)/cl , label = "$N\pi_{bin}$="+str(n_pbins) , lw = 2)
plt.xscale("Log")
plt.yscale("Log")
plt.legend()
plt.savefig("/home/mj/public_html/wpex.png")"""
| mit | 7,285,614,063,505,886,000 | 32.019231 | 90 | 0.664531 | false |
vprime/puuuu | env/bin/pilfile.py | 1 | 2645 | #!/Users/Vincent/lm_svn/checkouts/personal/papertrail-django/env/bin/python
#
# The Python Imaging Library.
# $Id$
#
# a utility to identify image files
#
# this script identifies image files, extracting size and
# pixel mode information for known file formats. Note that
# you don't need the PIL C extension to use this module.
#
# History:
# 0.0 1995-09-01 fl Created
# 0.1 1996-05-18 fl Modified options, added debugging mode
# 0.2 1996-12-29 fl Added verify mode
# 0.3 1999-06-05 fl Don't mess up on class exceptions (1.5.2 and later)
# 0.4 2003-09-30 fl Expand wildcards on Windows; robustness tweaks
#
from __future__ import print_function
import site
import getopt, glob, sys
from PIL import Image
if len(sys.argv) == 1:
print("PIL File 0.4/2003-09-30 -- identify image files")
print("Usage: pilfile [option] files...")
print("Options:")
print(" -f list supported file formats")
print(" -i show associated info and tile data")
print(" -v verify file headers")
print(" -q quiet, don't warn for unidentified/missing/broken files")
sys.exit(1)
try:
opt, args = getopt.getopt(sys.argv[1:], "fqivD")
except getopt.error as v:
print(v)
sys.exit(1)
verbose = quiet = verify = 0
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats:")
for i in id:
print(i, end=' ')
sys.exit(1)
elif o == "-i":
verbose = 1
elif o == "-q":
quiet = 1
elif o == "-v":
verify = 1
elif o == "-D":
Image.DEBUG = Image.DEBUG + 1
def globfix(files):
# expand wildcards where necessary
if sys.platform == "win32":
out = []
for file in files:
if glob.has_magic(file):
out.extend(glob.glob(file))
else:
out.append(file)
return out
return files
for file in globfix(args):
try:
im = Image.open(file)
print("%s:" % file, im.format, "%dx%d" % im.size, im.mode, end=' ')
if verbose:
print(im.info, im.tile, end=' ')
print()
if verify:
try:
im.verify()
except:
if not quiet:
print("failed to verify image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
except IOError as v:
if not quiet:
print(file, "failed:", v)
except:
import traceback
if not quiet:
print(file, "failed:", "unexpected error")
traceback.print_exc(file=sys.stdout)
| mit | 2,741,384,306,119,726,000 | 26.842105 | 77 | 0.564461 | false |
swegener/gruvi | src/build_http.py | 1 | 3155 | #
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2017 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import os.path
from cffi import FFI
parent, _ = os.path.split(os.path.abspath(__file__))
topdir, _ = os.path.split(parent)
ffi = FFI()
ffi.set_source('http_ffi', """
#include <stdlib.h>
#include "src/http_parser.h"
#include "src/http_parser.c"
unsigned char http_message_type(http_parser *p) { return p->type; }
unsigned int http_status_code(http_parser *p) { return p->status_code; }
unsigned int http_method(http_parser *p) { return p->method; }
unsigned char http_errno(http_parser *p) { return p->http_errno; }
unsigned char http_is_upgrade(http_parser *p) { return p->upgrade; }
""", include_dirs=[topdir])
ffi.cdef("""
typedef struct http_parser http_parser;
typedef struct http_parser_settings http_parser_settings;
typedef int (*http_data_cb) (http_parser*, const char *at, size_t length);
typedef int (*http_cb) (http_parser*);
enum http_parser_type { HTTP_REQUEST, HTTP_RESPONSE, HTTP_BOTH, ... };
struct http_parser {
unsigned short http_major;
unsigned short http_minor;
void *data;
...;
};
struct http_parser_settings {
http_cb on_message_begin;
http_data_cb on_url;
http_data_cb on_status;
http_data_cb on_header_field;
http_data_cb on_header_value;
http_cb on_headers_complete;
http_data_cb on_body;
http_cb on_message_complete;
...;
};
enum http_parser_url_fields { UF_SCHEMA, UF_HOST, UF_PORT, UF_PATH,
UF_QUERY, UF_FRAGMENT, UF_USERINFO, UF_MAX };
struct http_parser_url {
uint16_t field_set;
uint16_t port;
struct {
uint16_t off;
uint16_t len;
} field_data[UF_MAX];
...;
};
void http_parser_init(http_parser *parser, enum http_parser_type type);
size_t http_parser_execute(http_parser *parser,
const http_parser_settings *settings,
const char *data,
size_t len);
int http_should_keep_alive(const http_parser *parser);
const char *http_method_str(enum http_method m);
const char *http_errno_name(enum http_errno err);
void http_parser_url_init(struct http_parser_url *u);
int http_parser_parse_url(const char *buf, size_t buflen,
int is_connect, struct http_parser_url *u);
/* Extra functions to extract bitfields not supported by cffi */
unsigned char http_message_type(http_parser *parser);
unsigned int http_status_code(http_parser *parser);
unsigned int http_method(http_parser *parser);
unsigned char http_errno(http_parser *parser);
unsigned char http_is_upgrade(http_parser *parser);
""")
if __name__ == '__main__':
ffi.compile()
| mit | 1,236,139,961,076,085,500 | 30.868687 | 78 | 0.626941 | false |
aroberge/docpicture | examples/fake_turtle.py | 1 | 7015 | """
This is a fake turtle module (with no relevant executable code, other than
a local docpicture parser included for testing) obtained through
severely amputating the original turtle module, for the purpose of
demonstrating the docpicture concept.
We start by including a drawing made with a docpicture "parser"
that is not part of the normal docpicture distribution, but is
defined in this file. We *suggest* that such parser names start
with "self." to indicate to the reader that they are defined locally.
docpicture will handle any name - but will first look for names in
its normal set.
..docpicture:: self.red_turtle
turtle.down()
turtle.color("orange")
turtle(45).forward(200)
Note that we get an error message saying that this parser is not
recognized. This will be changed, once this parser is set to be
"trusted".
From the original:
====================
Turtle graphics is a popular way for introducing programming to
kids. It was part of the original Logo programming language developed
by Wally Feurzeig and Seymour Papert in 1966.
Imagine a robotic turtle starting at (0, 0) in the x-y plane. Give it
the command turtle.forward(15), and it moves (on-screen!) 15 pixels in
the direction it is facing, drawing a line as it moves. Give it the
command turtle.left(25), and it rotates in-place 25 degrees clockwise.
By combining together these and similar commands, intricate shapes and
pictures can easily be drawn.
=====================
For docpictures, we modify slightly the notation so as to include
the angle at which the turtle is rotated. For example, we could have
..docpicture:: bw_turtle
turtle(20).forward(125)
We also have some other styles available, such as
..docpicture:: color_turtle
turtle.down()
turtle(20).forward(125)
and even
..docpicture:: turtle
turtle.down()
turtle.color("red")
turtle(20).forward(125)
Finally, we include a drawing with an unknown docpicture object - no
drawing will ever be made.
..docpicture:: unknown
turtle(20).forward(125)
"""
import parsers.turtle
import src.svg as svg
class RawPen:
def forward(self, distance):
""" Go forward distance steps.
Example:
>>> turtle.position()
[0.0, 0.0]
>>> turtle.forward(25)
>>> turtle.position()
[25.0, 0.0]
>>> turtle.forward(-75)
>>> turtle.position()
[-50.0, 0.0]
=====================
Using docpicture.view, you can see something like this in picture.
..docpicture:: turtle
turtle(0).forward(75)
"""
pass
def left(self, angle):
""" Turn left angle units (units are by default degrees,
but can be set via the degrees() and radians() functions.)
When viewed from above, the turning happens in-place around
its front tip.
Example:
>>> turtle.heading()
22
>>> turtle.left(45)
>>> turtle.heading()
67.0
================
Using docpicture.view, you can see something like this in picture.
..docpicture:: turtle
turtle(22).left(45)
"""
pass
def right(self, angle):
""" Turn right angle units (units are by default degrees,
but can be set via the degrees() and radians() functions.)
When viewed from above, the turning happens in-place around
its front tip.
Example:
>>> turtle.heading()
22
>>> turtle.right(45)
>>> turtle.heading()
337.0
================
Using docpicture.view, you can see something like this in picture.
..docpicture:: turtle
turtle(22).right(45)
"""
pass
def up(self):
""" Pull the pen up -- no drawing when moving.
Example:
>>> turtle.up()
================
Using docpicture.view, you can see something like this in picture.
..docpicture:: turtle
turtle.up()
turtle(10).forward(100)
"""
pass
def down(self):
""" Put the pen down -- draw when moving.
Example:
>>> turtle.down()
================
Let's add a picture
..docpicture:: turtle
turtle.down()
turtle(10).forward(100)
"""
pass
def color(self, *args):
""" Set the pen color.
In the original, three input formats are allowed; for docpicture,
only the named color is supported.
color(s)
s is a Tk specification string, such as "red" or "yellow"
Example:
>>> turtle.color('brown')
================
Using docpicture.view, you can see something like this in picture.
..docpicture:: turtle
turtle.down()
turtle.color("brown")
turtle(10).forward(100)
"""
pass
class RedTurtle(parsers.turtle.Turtle):
def __init__(self):
parsers.turtle.Turtle.__init__(self)
self.directive_name = 'self.red_turtle'
def get_svg_defs(self):
'''returns an object representing all the svg defs'''
defs = svg.SvgDefs()
defs.append(self.turtle_defs())
defs.append(self.plus_signs_defs())
return defs
def turtle_defs(self):
'''creates the svg:defs content for the turtle'''
t = svg.SvgElement("g", id="red_turtle")
# legs
t.append(svg.SvgElement("circle", cx=23, cy=16, r=8, fill="yellow"))
t.append(svg.SvgElement("circle", cx=23, cy=-15, r=8, fill="yellow"))
t.append(svg.SvgElement("circle", cx=-23, cy=16, r=8, fill="yellow"))
t.append(svg.SvgElement("circle", cx=-23, cy=-15, r=8, fill="yellow"))
# head and eyes
t.append(svg.SvgElement("circle", cx=32, cy=0, r=8, fill="yellow"))
t.append(svg.SvgElement("circle", cx=36, cy=4, r=2, fill="black"))
t.append(svg.SvgElement("circle", cx=36, cy=-4, r=2, fill="black"))
# body
t.append(svg.SvgElement("ellipse", cx=0, cy=0, rx=30, ry=25,
fill="red"))
return t
def first_turtle(self):
'''creation of first turtle '''
# same as Turtle, except no filter
t1 = svg.SvgElement("g", transform="translate(%d, %d)"%(self.x1, self.y1))
_t1 = svg.SvgElement("use", x=0, y=0, transform="rotate(%s 0 0)"%(-float(self.angle1)))
_t1.attributes["xlink:href"] = "#red_turtle"
t1.append(_t1)
return t1
def second_turtle(self):
'''creation of second turtle'''
# same as Turtle, except no filter
t2 = svg.SvgElement("g", transform="translate(%d, %d)"%(self.x2, self.y2))
_t2 = svg.SvgElement("use", x=0, y=0, transform="rotate(%s 0 0)"%(-float(self.angle2)))
_t2.attributes["xlink:href"] = "#red_turtle"
t2.append(_t2)
return t2
def register_docpicture_parser(register_parser):
register_parser(RedTurtle)
| bsd-3-clause | 6,207,094,637,867,796,000 | 28.351464 | 95 | 0.594726 | false |
jbrendel/RESTx | src/python/restx/components/test/test_Filter.py | 1 | 7040 | """
RESTx: Sane, simple and effective data publishing and integration.
Copyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
#
# To run this and other RESTx test files, use bin/testrun.
#
# These imports are necessary for all component tests
from restx.testtools.utils import *
from restx.components.api import *
# Importing the component we wish to test
from restx.components.Filter import Filter
# ============================
# Testing the Filter component
# ============================
def runtest():
#
# -------------------------------------------------------------------
# Mocking setup: Provide overrides for some of the component methods
# -------------------------------------------------------------------
#
class MyBaseCapabilities(BaseCapabilities):
def accessResource(self, resource_uri, input=None, params=None, method=HTTP.GET):
return RESOURCE_DICT[resource_uri]
#
# -------------------------------------------------------------------
# The actual tests
# -------------------------------------------------------------------
#
#
# Setting up a dummy component
#
rctp = dict(
input_resource_uri = "/resource/foo",
filter_expression_1 = "a/b/c = 123",
filter_expression_2 = "",
filter_expression_3 = "",
match_all = True,
)
c = make_component(rctp, Filter, MyBaseCapabilities)
#
# Testing filter_compile()
#
test_evaluator("Test 1", compare_list(c._filter_compile('a/b/c = 123'),
(['a', 'b', 'c'], '=', 123)))
test_evaluator("Test 2", compare_list(c._filter_compile('a/b/c = "123"'),
(['a', 'b', 'c'], '=', '123')))
test_evaluator("Test 3", compare_list(c._filter_compile('"a"/"one two b"/c=x = >= true'),
(['a', 'one two b', 'c=x ='], '>=', True)))
test_evaluator("Test 4", compare_list(c._filter_compile('"a" >= true'),
(['a'], '>=', True)))
test_evaluator("Test 5", compare_list(c._filter_compile('1 >= 123'),
([1], '>=', 123)))
test_evaluator("Test 6", compare_list(c._filter_compile('a/1/2 >= 123'),
(['a', 1, 2], '>=', 123)))
test_evaluator("Test 7", compare_list(c._filter_compile('a/"1"/2 >= 123'),
(['a', '1', 2], '>=', 123)))
#
# Testing element extraction
#
test_evaluator("Test 8", compare_elem(123, c._get_elem([ 111, 123 ], c._filter_compile("1 = 1")[0])))
test_evaluator("Test 9", compare_elem(123, c._get_elem({ 1: 123}, c._filter_compile("1 = 1")[0])))
test_evaluator("Test 10", compare_elem(123, c._get_elem({ "1": 123}, c._filter_compile('"1" = 1')[0])))
test_evaluator("Test 11", compare_elem(1, c._get_elem({ "1": [ 1, 2 ]}, c._filter_compile('"1"/0 = 1')[0])))
test_evaluator("Test 12", compare_elem("a", c._get_elem({ "x": [ 1, "a" ]}, c._filter_compile('x/1 = 1')[0])))
test_evaluator("Test 13", compare_elem("a", c._get_elem({ "x": [ 1, { "b" : "a" } ]}, c._filter_compile('x/1/b = 1')[0])))
#
# Testing filtering
#
rctp['filter_expression_1'] = "foo = xyz"
c = make_component(rctp, Filter, MyBaseCapabilities)
data = [
{ "email" : "[email protected]", "foo" : "abc" },
{ "blah" : 123 },
{ "email" : "[email protected]", "foo" : "xyz" },
{ "email" : "[email protected]", "foo" : "xyz" },
]
RESOURCE_DICT = { c.input_resource_uri : ( 200, data ) }
#
# Test 14: PASS filter
#
res = c.filter(None, None, False)
should_be = [
{ "email" : "[email protected]", "foo" : "xyz" },
{ "email" : "[email protected]", "foo" : "xyz" },
]
test_evaluator("Test 14", compare_out_lists(res, 200, should_be))
#
# Test 15: Deny filter
#
res = c.filter(None, None, True)
should_be = [
{ "email" : "[email protected]", "foo" : "abc" },
{ "blah" : 123 },
]
test_evaluator("Test 15", compare_out_lists(res, 200, should_be))
#
# Test 16: Filter with dictionary at top level
#
c = make_component(rctp, Filter, MyBaseCapabilities)
data = {
"aaa" : { "email" : "[email protected]", "foo" : "abc" },
"bbb" : { "blah" : 123 },
"ccc" : { "email" : "[email protected]", "foo" : "xyz" },
"ddd" : { "email" : "[email protected]", "foo" : "xyz" },
}
RESOURCE_DICT = { c.input_resource_uri : (200, data) }
res = c.filter(None, None, False)
should_be = {
"ccc" : { "email" : "[email protected]", "foo" : "xyz" },
"ddd" : { "email" : "[email protected]", "foo" : "xyz" },
}
test_evaluator("Test 16", compare_out_dicts(res, 200, should_be))
#
# Test 17: Other operator: !=
#
rctp['filter_expression_1'] = "foo != xyz"
c = make_component(rctp, Filter, MyBaseCapabilities)
res = c.filter(None, None, False)
should_be = {
"aaa" : { "email" : "[email protected]", "foo" : "abc" },
}
test_evaluator("Test 17", compare_out_dicts(res, 200, should_be))
#
# Test 18: Multiple expressions with AND
#
rctp['filter_expression_1'] = "b = 2"
rctp['filter_expression_2'] = "c = 1"
c = make_component(rctp, Filter, MyBaseCapabilities)
data = [
{ "a" : 1, "b" : 2, "c" : 1 },
{ "a" : 1, "b" : 1, "c" : 1 },
{ "a" : 1, "b" : 2, "c" : 1 },
{ "a" : 1, "b" : 3, "c" : 1 },
{ "a" : 1, "b" : 3, "c" : 4 },
]
RESOURCE_DICT = { c.input_resource_uri : (200, data) }
res = c.filter(None, None, False)
should_be = [
{ "a" : 1, "b" : 2, "c" : 1 },
{ "a" : 1, "b" : 2, "c" : 1 },
]
test_evaluator("Test 18", compare_out_lists(res, 200, should_be))
#
# Test 19: Multiple expressions with OR
#
rctp['filter_expression_2'] = "c = 4"
rctp['match_all'] = False
c = make_component(rctp, Filter, MyBaseCapabilities)
res = c.filter(None, None, False)
should_be = [
{ "a" : 1, "b" : 2, "c" : 1 },
{ "a" : 1, "b" : 2, "c" : 1 },
{ "a" : 1, "b" : 3, "c" : 4 },
]
test_evaluator("Test 19", compare_out_lists(res, 200, should_be))
return get_test_result()
| gpl-3.0 | 3,631,399,253,228,486,000 | 33.851485 | 128 | 0.484517 | false |
hmarkus/dynclasp | DflatDecomposition.py | 1 | 2536 | #!/bin/python
class DflatIdentifiable(object):
def __init__(self, keys):
self._keys = keys
def add(self, val):
self._keys.append(val)
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self._keys)
def id(self):
return DflatIdentifiable.idstr(self._keys)
def keys(self):
return self._keys
@staticmethod
def idstr(val):
val.sort()
return str(val)
def content(self):
self.id()
return DflatIdentifiable.contentStr(self._keys, lambda _ : False) #DflatIdentifiable.SHOW_ALL)
@staticmethod
def contentItemStr(val):
if type(val) is list:
res = ""
i = 0
for j in val:
if i == 0:
res += j + "("
else: #if not exc(j):
if i > 1:
res += ", "
res += DflatIdentifiable.contentItemStr(j)
i += 1
res += ")"
return res
else:
return val
@staticmethod
def contentStr(val, exc):
res = "["
for j in val:
if not exc(j):
if len(res) > 1:
res += ", "
res += DflatIdentifiable.contentItemStr(j)
res += "]"
return res
class DflatRowContainer(DflatIdentifiable):
def __init__(self, keys):
super(DflatRowContainer, self).__init__(keys)
self._next = []
self._prev = []
self._node = None
def setNode(self, n):
self._node = n
def node(self):
return self._node
def prev(self):
return self._prev
def next(self):
return self._next
def setPrev(self, child):
self._prev = child
def setNext(self, child):
self._next = child
def addPrev(self, child):
self._prev.append(child)
def addNext(self, child):
self._next.append(child)
def __str__(self):
#return super(DflatDecomposition, self).__str__(self._keys) + str(self._next)
return super(DflatRowContainer, self).__str__() + "@" #+ str(self.prev()) # + "->" + str(self._next)
class DflatDecomposition(DflatRowContainer):
def __init__(self, keys, fullintro = True):
super(DflatDecomposition, self).__init__(keys)
self._nr = 0
self._intro = []
self._posIntro = fullintro
self._introManaged = False
def setNr(self, nr):
self._nr = nr
def nr(self):
return self._nr
def addIntro(self, intro):
self._intro.append(intro)
def setIntroPositive(self, introP):
self._posIntro = introP
def setIntro(self, intro):
self._intro = intro
def intro(self):
if not self._posIntro and not self._introManaged:
self._intro = set(self._keys) - set(self._intro)
self._introManaged = True
return self._intro
def content(self):
return "n" + str(self._nr) + ": " + super(DflatDecomposition, self).content()
| gpl-3.0 | 3,731,327,717,661,745,700 | 18.658915 | 102 | 0.630521 | false |
QueenMargaretsCompSci/PiWars2016 | source/wii_remote_test.py | 1 | 3219 | #!/usr/bin/python
# import our modules
import cwiid
import time
import RPi.GPIO as GPIO
import piconzero as pz
import sensorlibs as sl
from picamera import PiCamera
# setup our camera
cam = PiCamera()
# setup our constants
button_delay = 0.1
PIN_LED = sl.GPIOtoBoard(4)
GPIO.setup(PIN_LED, GPIO.OUT)
GPIO.output(PIN_LED, 0)
# prompt for Wii connection
print 'Press 1 + 2 on your Wii Remote now ...'
GPIO.output(PIN_LED, 1)
time.sleep(1)
# Connect to the Wii Remote. If it times out
# then quit.
try:
wii=cwiid.Wiimote()
GPIO.output(PIN_LED, 0)
except RuntimeError:
print "Error opening wiimote connection"
GPIO.output(PIN_LED, 0)
quit()
print 'Wii Remote connected...\n'
print 'Press some buttons!\n'
print 'Press PLUS and MINUS together to disconnect and quit.\n'
# connected so lets flash our LED
for x in range(0,3):
GPIO.output(PIN_LED, 1)
time.sleep(0.25)
GPIO.output(PIN_LED, 0)
time.sleep(0.25)
wii.rpt_mode = cwiid.RPT_BTN
# initialise piconzero
pz.init()
# start recording
ts = str(time.time())
cam.vflip = True
cam.hflip = True
cam.start_recording("/home/pi/qmpiwars/videos/remote-" + ts + ".h264")
while True:
buttons = wii.state['buttons']
# If Plus and Minus buttons pressed
# together then rumble and quit.
if (buttons - cwiid.BTN_PLUS - cwiid.BTN_MINUS == 0):
print '\nClosing connection ...'
wii.rumble = 1
time.sleep(1)
wii.rumble = 0
sl.neoPixelLight("off")
pz.cleanup()
cam.stop_recording()
exit(wii)
# Check if other buttons are pressed by
# doing a bitwise AND of the buttons number
# and the predefined constant for that button.
if (buttons & cwiid.BTN_LEFT):
print 'Left pressed'
pz.spinRight(100)
time.sleep(button_delay)
sl.neoPixelLight("left")
if(buttons & cwiid.BTN_RIGHT):
print 'Right pressed'
pz.spinLeft(100)
time.sleep(button_delay)
sl.neoPixelLight("right")
if (buttons & cwiid.BTN_UP):
print 'Up pressed'
pz.forward(80)
time.sleep(button_delay)
sl.neoPixelLight("forward")
if (buttons & cwiid.BTN_B):
print 'Turbo pressed'
pz.forward(100)
time.sleep(button_delay)
sl.neoPixelLight("forward")
if (buttons & cwiid.BTN_DOWN):
print 'Down pressed'
pz.reverse(80)
time.sleep(button_delay)
sl.neoPixelLight("backward")
if (buttons & cwiid.BTN_1):
print 'Button 1 pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_2):
print 'Button 2 pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_A):
print 'Button A pressed'
pz.stop()
sl.neoPixelLight("off")
time.sleep(button_delay)
##########################################
# Not using these buttons
#
# if (buttons & cwiid.BTN_B):
# print 'Button B pressed'
# time.sleep(button_delay)
#
# if (buttons & cwiid.BTN_HOME):
# print 'Home Button pressed'
# time.sleep(button_delay)
#
# if (buttons & cwiid.BTN_MINUS):
# print 'Minus Button pressed'
# time.sleep(button_delay)
#
# if (buttons & cwiid.BTN_PLUS):
# print 'Plus Button pressed'
# time.sleep(button_delay)
| gpl-3.0 | 1,293,673,930,725,031,700 | 22.326087 | 70 | 0.636533 | false |
google/closure-templates | python/runtime.py | 1 | 22615 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runtime module for compiled soy templates.
This module provides utility functions required by soy templates compiled with
the Python compilers. These functions handle the runtime internals necessary to
match JS behavior in module and function loading, along with type behavior.
"""
from __future__ import unicode_literals
__author__ = '[email protected] (David Phillips)'
import importlib
import math
import os
import re
import sys
from . import environment
from . import sanitize
import six
try:
import scandir
except ImportError:
scandir = None
# To allow the rest of the file to assume Python 3 strings, we will assign str
# to unicode for Python 2. This will error in 3 and be ignored.
try:
str = unicode # pylint: disable=redefined-builtin, invalid-name
except NameError:
pass
# Map from registered delegate template key to the priority, function, and
# function name tuple.
_DELEGATE_REGISTRY = {}
# All number types for use during custom type functions.
_NUMBER_TYPES = six.integer_types + (float,)
# The mapping of css class names for get_css_name.
_css_name_mapping = None
# The xid map for get_xid_name.
_xid_name_mapping = None
def get_xid_name(xid):
"""Return the mapped xid name.
Args:
xid: The xid name to modify.
Returns:
The renamed xid.
"""
if _xid_name_mapping:
renamed = _xid_name_mapping.get(xid)
if renamed:
return renamed
return xid + '_'
def get_css_name(class_name, modifier=None):
"""Return the mapped css class name with modifier.
Following the pattern of goog.getCssName in closure, this function maps a css
class name to its proper name, and applies an optional modifier.
If no mapping is present, the class_name and modifier are joined with hyphens
and returned directly.
If a mapping is present, the resulting css name will be retrieved from the
mapping and returned.
If one argument is passed it will be processed, if two are passed only the
modifier will be processed, as it is assumed the first argument was generated
as a result of calling goog.getCssName.
Args:
class_name: The class name to look up.
modifier: An optional modifier to append to the class_name.
Returns:
A mapped class name with optional modifier.
"""
pieces = [class_name]
if modifier:
pieces.append(modifier)
if _css_name_mapping:
# Only map the last piece of the name.
pieces[-1] = _css_name_mapping.get(pieces[-1], pieces[-1])
return '-'.join(pieces)
def set_css_name_mapping(mapping):
"""Set the mapping of css names.
Args:
mapping: A dictionary of original class names to mapped class names.
"""
global _css_name_mapping
_css_name_mapping = mapping
def set_xid_name_mapping(mapping):
"""Sets the mapping of xids.
Args:
mapping: A dictionary of xid names.
"""
global _xid_name_mapping
_xid_name_mapping = mapping
def get_delegate_fn(template_id, variant, allow_empty_default):
"""Get the delegate function associated with the given template_id/variant.
Retrieves the (highest-priority) implementation that has been registered for
a given delegate template key (template_id and variant). If no implementation
has been registered for the key, then the fallback is the same template_id
with empty variant. If the fallback is also not registered,
and allow_empty_default is true, then returns an implementation that is
equivalent to an empty template (i.e. rendered output would be empty string).
Args:
template_id: The delegate template id.
variant: The delegate template variant (can be an empty string, or a number
when a global is used).
allow_empty_default: Whether to default to the empty template function if
there's no active implementation.
Returns:
The retrieved implementation function.
Raises:
RuntimeError: when no implementation of one delegate template is found.
"""
entry = _DELEGATE_REGISTRY.get(_gen_delegate_id(template_id, variant))
fn = entry[1] if entry else None
# variant may be another zero value besides the empty string and we want to
# detect that
# pylint: disable=g-explicit-bool-comparison
if not fn and variant != '':
# Fallback to empty variant.
entry = _DELEGATE_REGISTRY.get(_gen_delegate_id(template_id))
fn = entry[1] if entry else None
if fn:
return fn
elif allow_empty_default:
return _empty_template_function
else:
msg = ('Found no active impl for delegate call to "%s%s" '
'(and delcall does not set allowemptydefault="true").')
raise RuntimeError(msg % (template_id, ':' + variant if variant else ''))
def concat_attribute_values(l, r, delimiter):
"""Merge two attribute values with a delimiter or use one or the other.
Args:
l: The string which is prefixed in the return value
r: The string which is suffixed in the return value
delimiter: The delimiter between the two sides
Returns:
The combined string separated by the delimiter.
"""
if not l:
return r
if not r:
return l
return l + delimiter + r
def concat_css_values(l, r):
"""Merge two css values.
Args:
l: The css which is prefixed in the return value
r: The css which is suffixed in the return value
Returns:
The combined css separated by the delimiter.
"""
return sanitize.SanitizedCss(
concat_attribute_values(str(l), str(r), ';'),
sanitize.IActuallyUnderstandSoyTypeSafetyAndHaveSecurityApproval(
"""Internal framework code."""))
def merge_into_dict(original, secondary):
"""Merge two dictionaries into the first and return it.
This is simply a conveinence wrapper around the dictionary update method. In
addition to the update it returns the original dict to allow for chaining.
Args:
original: The dict which will be updated.
secondary: The dict which will be copied.
Returns:
The updated original dictionary.
"""
original.update(secondary)
return original
def namespaced_import(name, namespace=None, environment_path=None):
"""A function to import compiled soy modules using the Soy namespace.
This function attempts to first import the module directly. If it isn't found
in the matching package as the Soy Namespace, it will walk the sys.path
structure open any module with a matching name and test its SOY_NAMESPACE
attribute. If it matches it will load that instead.
Multiple files can share the same soy namespace. In that instance, all of
these files will be loaded, combined, and loaded as one module.
Note: If multiple files share the same namespace, they still require that the
module name begins with the last part of the namespace (e.g.
soy.examples.delegates will load delegates0.py, delegatesxyz.py, etc.).
TODO(dcphillips): See if there's any way we can avoid this limitation without
blowing up load times.
Args:
name: The name of the module to import.
namespace: The namespace of the module to import.
environment_path: A custom environment module path for interacting with the
runtime environment.
Returns:
The Module object.
"""
full_namespace = '%s.%s' % (namespace, name) if namespace else name
try:
# Try searching for the module directly
return importlib.import_module(full_namespace)
except ImportError:
# If the module isn't found, search without the namespace and check the
# namespaces.
if namespace:
namespace_key = "SOY_NAMESPACE: '%s'." % full_namespace
module = None
if environment_path:
file_loader = importlib.import_module(environment_path).file_loader
else:
file_loader = environment.file_loader
for sys_path, f_path, f_name in _find_modules(name):
# Verify the file namespace by comparing the 5th line.
with file_loader(f_path, f_name, 'r') as f:
for _ in range(4):
next(f)
if namespace_key != next(f).rstrip():
continue
# Strip the root path and the file extension.
module_path = six.ensure_str(os.path.relpath(f_path, sys_path)).replace(
'/', '.')
module_name = os.path.splitext(f_name)[0]
# Python 2 performs relative or absolute imports. Beginning with
# Python 3.3, only absolute imports are possible. Compare the
# docs for the default value of the `level` argument of `__import__`:
# https://docs.python.org/2/library/functions.html#__import__
# https://docs.python.org/3/library/functions.html#__import__
module = getattr(
__import__(module_path, globals(), locals(), [module_name]),
module_name)
break
if module:
# Add this to the global modules list for faster loading in the future.
_cache_module(full_namespace, module)
return module
raise
def manifest_import(namespace, manifest):
"""Imports a module using a namespace manifest to find the module."""
if not manifest:
raise ImportError('No manifest provided')
elif namespace not in manifest:
raise ImportError('Manfest does not contain namespace: %s' % namespace)
return importlib.import_module(manifest[namespace])
def key_safe_data_access(data, key):
"""Safe key based data access.
Traditional bracket access in Python (foo['bar']) will throw a KeyError (or
IndexError if in a list) when encountering a non-existent key.
foo.get(key, None) is solves this problem for objects, but doesn't work with
lists. Thus this function serves to do safe access with a unified syntax for
both lists and dictionaries.
Args:
data: The data object to search for the key within.
key: The key to use for access.
Returns:
data[key] if key is present or None otherwise.
"""
try:
return data[key]
except (KeyError, IndexError):
return None
def register_delegate_fn(template_id, variant, priority, fn, fn_name):
"""Register a delegate function in the global registry.
Args:
template_id: The id for the given template.
variant: The variation key for the given template.
priority: The priority value of the given template.
fn: The template function.
fn_name: A unique name of the function generated at compile time.
Raises:
RuntimeError: If a delegate was attempted to be added with the same
priority an error will be raised.
"""
map_key = _gen_delegate_id(template_id, variant)
curr_priority, _, curr_fn_name = _DELEGATE_REGISTRY.get(
map_key, (None, None, None))
# Ignore unless at a equal or higher priority.
if curr_priority is None or priority > curr_priority:
# Registering new or higher-priority function: replace registry entry.
_DELEGATE_REGISTRY[map_key] = (priority, fn, fn_name)
elif priority == curr_priority and fn_name != curr_fn_name:
# Registering same-priority function: error.
raise RuntimeError(
'Encountered two active delegates with the same priority (%s:%s:%s).' %
(template_id, variant, priority))
def type_safe_add(*args):
"""A coercion function emulating JS style type conversion in the '+' operator.
This function is similar to the JavaScript behavior when using the '+'
operator. Variables will will use the default behavior of the '+' operator
until they encounter a type error at which point the more 'simple' type will
be coerced to the more 'complex' type.
Supported types are None (which is treated like a bool), bool, primitive
numbers (int, float, etc.), and strings. All other objects will be converted
to strings.
Example:
type_safe_add(True, True) = 2
type_safe_add(True, 3) = 4
type_safe_add(3, 'abc') = '3abc'
type_safe_add(True, 3, 'abc') = '4abc'
type_safe_add('abc', True, 3) = 'abcTrue3'
Args:
*args: List of parameters for addition/coercion.
Returns:
The result of the addition. The return type will be based on the most
'complex' type passed in. Typically an integer or a string.
"""
if not args:
return None
# JS operators can sometimes work as unary operators. So, we fall back to the
# initial value here in those cases to prevent ambiguous output.
if len(args) == 1:
return args[0]
is_string = isinstance(args[0], six.string_types)
result = args[0]
for arg in args[1:]:
try:
if is_string:
arg = _convert_to_js_string(arg)
result += arg
except TypeError:
# Special case for None which can be converted to bool but is not
# autocoerced. This can result in a conversion of result from a boolean to
# a number (which can affect later string conversion) and should be
# retained.
if arg is None:
result += False
else:
result = _convert_to_js_string(result) + _convert_to_js_string(arg)
is_string = True
return result
def list_contains(l, item):
return list_indexof(l, item) >= 0
def list_indexof(l, item):
"""Equivalent getting the index of `item in l` but using soy's equality algorithm."""
for i in range(len(l)):
if type_safe_eq(l[i], item):
return i
return -1
def concat_maps(d1, d2):
"""Merges two maps together."""
d3 = dict(d1)
d3.update(d2)
return d3
def map_entries(m):
"""Return map entries."""
return [{'key': k, 'value': m[k]} for k in m]
def list_slice(l, start, stop):
"""Equivalent of JavaScript Array.prototype.slice."""
return l[slice(start, stop)]
def list_reverse(l):
"""Reverses a list. The original list passed is not modified."""
return l[::-1]
def number_list_sort(l):
"""Sorts in numerical order."""
# Lists of numbers are sorted numerically by default.
return sorted(l)
def string_list_sort(l):
"""Sorts in lexicographic order."""
# Lists of strings are sorted lexicographically by default.
return sorted(l)
def type_safe_eq(first, second):
"""An equality function that does type coercion for various scenarios.
This function emulates JavaScript's equalty behavior. In JS, Objects will be
converted to strings when compared to a string primitive.
Args:
first: The first value to compare.
second: The second value to compare.
Returns:
True/False depending on the result of the comparison.
"""
# If the values are empty or of the same type, no coersion necessary.
# TODO(dcphillips): Do a more basic type equality check if it's not slower
# (b/16661176).
if first is None or second is None or type(first) == type(second):
return first == second
try:
# TODO(dcphillips): This potentially loses precision for very large numbers.
# See b/16241488.
if isinstance(first, _NUMBER_TYPES) and not isinstance(first, bool):
return first == float(second)
if isinstance(second, _NUMBER_TYPES) and not isinstance(second, bool):
return float(first) == second
if isinstance(first, six.string_types):
return first == str(second)
if isinstance(second, six.string_types):
return str(first) == second
except ValueError:
# Ignore type coersion failures
pass
return first == second
def check_not_null(val):
"""A helper to implement the Soy Function checkNotNull.
Args:
val: The value to test.
Returns:
val if it was not None.
Raises:
RuntimeError: If val is None.
"""
if val is None:
raise RuntimeError('Unexpected null value')
return val
def is_set(field, container):
"""A helper to implement the Soy Function isSet.
Args:
field (str): The field to test.
container (Dict[str, Any]): The container to test.
Returns:
True if the field is set in the container.
"""
return field in container
def parse_int(s):
"""A function that attempts to convert the input string into an int.
Returns None if the input is not a valid int.
Args:
s: String to convert.
Returns:
int if s is a valid int string, otherwise None.
"""
try:
return int(s)
except ValueError:
return None
def parse_float(s):
"""A function that attempts to convert the input string into a float.
Returns None if the input is not a valid float, or if the input is NaN.
Args:
s: String to convert.
Returns:
float if s is a valid float string that is not NaN, otherwise None.
"""
try:
f = float(s)
except ValueError:
return None
return None if math.isnan(f) else f
def sqrt(num):
"""Returns the square root of the given number."""
return math.sqrt(num)
def unsupported(msg):
raise Exception('unsupported feature: ' + msg)
def map_to_legacy_object_map(m):
"""Converts a Soy map to a Soy legacy_object_map.
legacy_object_maps must have string keys, but maps do not have this
restriction.
Args:
m: Map to convert.
Returns:
An equivalent legacy_object_map, with keys coerced to strings.
"""
return {str(key): m[key] for key in m}
def str_to_ascii_lower_case(s):
"""Converts the ASCII characters in the given string to lower case."""
return ''.join([c.lower() if 'A' <= c <= 'Z' else c for c in s])
def str_to_ascii_upper_case(s):
"""Converts the ASCII characters in the given string to upper case."""
return ''.join([c.upper() if 'a' <= c <= 'z' else c for c in s])
def str_starts_with(s, val):
"""Returns whether s starts with val."""
return s.startswith(val)
def str_ends_with(s, val):
"""Returns whether s ends with val."""
return s.endswith(val)
def str_replace_all(s, match, token):
"""Replaces all occurrences in s of match with token."""
return s.replace(match, token)
def str_trim(s):
"""Trims leading and trailing whitespace from s."""
return s.strip()
def str_split(s, sep):
"""Splits s into an array on sep."""
return s.split(sep) if sep else list(s)
def str_substring(s, start, end):
"""Implements the substring method according to the JavaScript spec."""
if start < 0:
start = 0
if end is not None:
if end < 0:
end = 0
if start > end:
# pylint: disable=arguments-out-of-order
return str_substring(s, end, start)
return s[start:end]
def soy_round(num, precision=0):
"""Implements the soy rounding logic for the round() function.
Python rounds ties away from 0 instead of towards infinity as JS and Java do.
So to make the behavior consistent, we add the smallest possible float amount
to break ties towards infinity.
Args:
num: the number to round
precision: the number of digits after the point to preserve
Returns:
a rounded number
"""
float_breakdown = math.frexp(num)
tweaked_number = ((float_breakdown[0] + sys.float_info.epsilon) *
2**float_breakdown[1])
rounded_number = round(tweaked_number, precision)
if not precision or precision < 0:
return int(rounded_number)
return rounded_number
######################
# Utility functions. #
######################
# pylint: disable=unused-argument
def _empty_template_function(data=None, ij_data=None):
return ''
def _cache_module(namespace, module):
"""Cache a loaded module in sys.modules.
Besides the caching of the main module itself, any parent packages that don't
exist need to be cached as well.
Args:
namespace: The python namespace.
module: The module object to be cached.
"""
sys.modules[namespace] = module
while '.' in namespace:
namespace = namespace.rsplit('.', 1)[0]
if namespace in sys.modules:
return
# TODO(dcphillips): Determine if anything's gained by having real modules
# for the packages.
sys.modules[namespace] = {}
def _convert_to_js_string(value):
"""Convert a value to a string, with the JS string values for primitives.
Args:
value: The value to stringify.
Returns:
A string representation of value. For primitives, ensure that the result
matches the string value of their JS counterparts.
"""
if value is None:
return 'null'
elif isinstance(value, bool):
return str(value).lower()
else:
return str(value)
def _find_modules(name):
"""Walks the sys path and looks for modules that start with 'name'.
This function yields all results which match the pattern in the sys path.
It can be treated similar to os.walk(), but yields only files which match
the pattern. These are meant to be used for traditional import
syntax. Bad paths are ignored and skipped.
Args:
name: The name to match against the beginning of the module name.
Yields:
A tuple containing the path, the base system path, and the file name.
"""
# TODO(dcphillips): Allow for loading of compiled source once namespaces are
# limited to one file (b/16628735).
module_file_name = re.compile(r'^%s.*\.py$' % name)
# If scandir is available, it offers 5-20x improvement of walk performance.
walk = scandir.walk if scandir else os.walk
for path in sys.path:
try:
for root, _, files in walk(path):
for f in files:
if module_file_name.match(f):
yield path, root, f
except OSError:
# Ignore bad paths
pass
def _gen_delegate_id(template_id, variant=''):
return 'key_%s:%s' % (template_id, variant)
def create_template_type(template, name):
"""Returns a wrapper object for a given template function.
The wrapper object forwards calls to the underlying template, but overrides
the __str__ method.
Args:
template: The underlying template function.
name: The fully-qualified template name.
Returns:
A wrapper object that can be called like the underlying template.
"""
return _TemplateWrapper(template, name)
def bind_template_params(template, params):
"""Binds the given parameters to the given template."""
return lambda data, ij: template(dict(data, **params), ij)
class _TemplateWrapper:
"""A wrapper object that forwards to the underlying template."""
def __init__(self, template, name):
self.template = template
self.name = name
def __call__(self, *args):
return self.template(*args)
def __str__(self):
return '** FOR DEBUGGING ONLY: %s **' % self.name
| apache-2.0 | -4,886,007,463,098,833,000 | 28.446615 | 87 | 0.691576 | false |
nodakai/watchman | tests/integration/test_sock_perms.py | 1 | 10512 | # vim:ts=4:sw=4:et:
# Copyright 2016-present Facebook, Inc.
# Licensed under the Apache License, Version 2.0
# no unicode literals
from __future__ import absolute_import, division, print_function
import os
import random
import stat
import string
import sys
import time
import pywatchman
import WatchmanInstance
import WatchmanTestCase
try:
import grp
except ImportError:
# Windows
pass
try:
import unittest2 as unittest
except ImportError:
import unittest
@unittest.skipIf(
os.name == "nt" or sys.platform == "darwin" or os.geteuid() == 0,
"win or root or bad ldap",
)
class TestSockPerms(unittest.TestCase):
def _new_instance(self, config, expect_success=True):
if expect_success:
start_timeout = 10
else:
# If the instance is going to fail anyway then there's no point
# waiting so long
start_timeout = 5
return WatchmanInstance.InstanceWithStateDir(
config=config, start_timeout=start_timeout
)
def _get_custom_gid(self):
# This is a bit hard to do: we need to find a group the user is a member
# of that's not the effective or real gid. If there are none then we
# must skip.
groups = os.getgroups()
for gid in groups:
if gid != os.getgid() and gid != os.getegid():
return gid
self.skipTest("no usable groups found")
def _get_non_member_group(self):
"""Get a group tuple that this user is not a member of."""
user_groups = set(os.getgroups())
for group in grp.getgrall():
if group.gr_gid not in user_groups:
return group
self.skipTest("no usable groups found")
def waitFor(self, cond, timeout=10):
deadline = time.time() + timeout
res = None
while time.time() < deadline:
try:
res = cond()
if res:
return [True, res]
except Exception:
pass
time.sleep(0.03)
return [False, res]
def assertWaitFor(self, cond, timeout=10, message=None, get_debug_output=None):
status, res = self.waitFor(cond, timeout)
if status:
return res
if message is None:
message = "%s was not met in %s seconds: %s" % (cond, timeout, res)
if get_debug_output is not None:
message += "\ndebug output:\n%s" % get_debug_output()
self.fail(message)
def test_too_open_user_dir(self):
instance = self._new_instance({}, expect_success=False)
os.makedirs(instance.user_dir)
os.chmod(instance.user_dir, 0o777)
with self.assertRaises(pywatchman.SocketConnectError) as ctx:
instance.start()
self.assertEqual(ctx.exception.sockpath, instance.getSockPath().unix_domain)
wanted = "the permissions on %s allow others to write to it" % (
instance.user_dir
)
self.assertWaitFor(
lambda: wanted in instance.getCLILogContents(),
get_debug_output=lambda: instance.getCLILogContents(),
)
def test_invalid_sock_group(self):
# create a random group name
while True:
group_name = "".join(
random.choice(string.ascii_lowercase) for _ in range(8)
)
try:
grp.getgrnam(group_name)
except KeyError:
break
instance = self._new_instance({"sock_group": group_name}, expect_success=False)
with self.assertRaises(pywatchman.SocketConnectError) as ctx:
instance.start()
self.assertEqual(ctx.exception.sockpath, instance.getSockPath().unix_domain)
# This is the error we expect to find
wanted = "group '%s' does not exist" % group_name
# But if the site uses LDAP or YP/NIS or other similar technology for
# their password database then we might experience other infra flakeyness
# so we allow for the alternative error case to be present and consider
# it a pass.
we_love_ldap = "getting gid for '%s' failed:" % group_name
self.assertWaitFor(
lambda: (wanted in instance.getCLILogContents())
or (we_love_ldap in instance.getCLILogContents()),
get_debug_output=lambda: str(ctx.exception)
+ "\n"
+ instance.getCLILogContents(),
)
def test_user_not_in_sock_group(self):
group = self._get_non_member_group()
instance = self._new_instance(
{"sock_group": group.gr_name}, expect_success=False
)
with self.assertRaises(pywatchman.SocketConnectError) as ctx:
instance.start()
self.assertEqual(ctx.exception.sockpath, instance.getSockPath().unix_domain)
wanted = "setting up group '%s' failed" % group.gr_name
self.assertWaitFor(
lambda: wanted in instance.getCLILogContents(),
get_debug_output=lambda: instance.getCLILogContents(),
)
def test_default_sock_group(self):
# By default the socket group should be the effective gid of the process
gid = os.getegid()
instance = self._new_instance({})
instance.start()
instance.stop()
self.assertFileGID(instance.user_dir, gid)
self.assertFileGID(instance.sock_file, gid)
def test_custom_sock_group(self):
gid = self._get_custom_gid()
group = grp.getgrgid(gid)
instance = self._new_instance({"sock_group": group.gr_name})
instance.start()
instance.stop()
self.assertFileGID(instance.user_dir, gid)
self.assertFileGID(instance.sock_file, gid)
def test_user_previously_in_sock_group(self):
"""This tests the case where a user was previously in sock_group
(so Watchman created the directory with that group), but no longer is
(so the socket is created with a different group)."""
# Since it's hard to drop a group from a process without being
# superuser, fake it. Use a private testing-only config option to set
# up separate groups for the directory and the file.
gid = self._get_custom_gid()
group = grp.getgrgid(gid)
non_member_group = self._get_non_member_group()
# Need to wait for the server to come up here, can't use
# expect_success=False.
instance = self._new_instance(
{"sock_group": group.gr_name, "__sock_file_group": non_member_group.gr_name}
)
with self.assertRaises(pywatchman.SocketConnectError):
instance.start()
wanted = (
"for socket '%s', gid %d doesn't match expected gid %d "
"(group name %s)."
% (
instance.getSockPath().unix_domain,
gid,
non_member_group.gr_gid,
non_member_group.gr_name,
)
)
self.assertWaitFor(lambda: wanted in instance.getServerLogContents())
def test_invalid_sock_access(self):
instance = self._new_instance({"sock_access": "bogus"}, expect_success=False)
with self.assertRaises(pywatchman.SocketConnectError) as ctx:
instance.start()
self.assertEqual(ctx.exception.sockpath, instance.getSockPath().unix_domain)
wanted = "Expected config value sock_access to be an object"
self.assertWaitFor(
lambda: wanted in instance.getCLILogContents(),
get_debug_output=lambda: instance.getCLILogContents(),
)
instance = self._new_instance(
{"sock_access": {"group": "oui"}}, expect_success=False
)
with self.assertRaises(pywatchman.SocketConnectError) as ctx:
instance.start()
self.assertEqual(ctx.exception.sockpath, instance.getSockPath().unix_domain)
wanted = "Expected config value sock_access.group to be a boolean"
self.assertWaitFor(
lambda: wanted in instance.getCLILogContents(),
get_debug_output=lambda: instance.getCLILogContents(),
)
def test_default_sock_access(self):
instance = self._new_instance({})
instance.start()
instance.stop()
self.assertFileMode(instance.user_dir, 0o700 | stat.S_ISGID)
self.assertFileMode(instance.sock_file, 0o600)
def test_custom_sock_access_group(self):
instance = self._new_instance({"sock_access": {"group": True}})
instance.start()
instance.stop()
self.assertFileMode(instance.user_dir, 0o750 | stat.S_ISGID)
self.assertFileMode(instance.sock_file, 0o660)
def test_custom_sock_access_others(self):
instance = self._new_instance({"sock_access": {"group": True, "others": True}})
instance.start()
instance.stop()
self.assertFileMode(instance.user_dir, 0o755 | stat.S_ISGID)
self.assertFileMode(instance.sock_file, 0o666)
def test_sock_access_upgrade(self):
instance = self._new_instance({"sock_access": {"group": True, "others": True}})
os.makedirs(instance.user_dir)
os.chmod(instance.user_dir, 0o700)
instance.start()
instance.stop()
self.assertFileMode(instance.user_dir, 0o755 | stat.S_ISGID)
self.assertFileMode(instance.sock_file, 0o666)
def test_sock_access_downgrade(self):
instance = self._new_instance({"sock_access": {"group": True}})
os.makedirs(instance.user_dir)
os.chmod(instance.user_dir, 0o755 | stat.S_ISGID)
instance.start()
instance.stop()
self.assertFileMode(instance.user_dir, 0o750 | stat.S_ISGID)
self.assertFileMode(instance.sock_file, 0o660)
def test_sock_access_group_change(self):
gid = self._get_custom_gid()
group = grp.getgrgid(gid)
instance = self._new_instance({"sock_group": group.gr_name})
os.makedirs(instance.user_dir)
# ensure that a different group is set
os.chown(instance.user_dir, -1, os.getegid())
instance.start()
instance.stop()
self.assertFileGID(instance.user_dir, gid)
self.assertFileGID(instance.sock_file, gid)
def assertFileMode(self, f, mode):
st = os.lstat(f)
self.assertEqual(stat.S_IMODE(st.st_mode), mode)
def assertFileGID(self, f, gid):
st = os.lstat(f)
self.assertEqual(st.st_gid, gid)
| apache-2.0 | 3,227,173,042,821,205,500 | 35.884211 | 88 | 0.612348 | false |
dgrtwo/gleam | examples/baseball.py | 1 | 2364 | import os
from collections import OrderedDict
from flask import Flask
from wtforms import fields
from ggplot import (aes, stat_smooth, geom_point, geom_text, ggtitle, ggplot,
xlab, ylab)
import numpy as np
import pandas as pd
from gleam import Page, panels
# setup
stats = ['At-Bats (AB)', 'Runs (R)', 'Hits (H)', 'Doubles (2B)',
'Triples (3B)', 'Home Runs (HR)', 'Runs Batted In (RBI)',
'Stolen Bases (SB)', 'Caught Stealing (CS)', 'Walks (BB)',
'Intentional Walk (IBB)', 'Salary', 'Attendance']
statchoices = [(s, s) for s in stats]
dir = os.path.split(__file__)[0]
players = pd.read_csv(os.path.join(dir, "baseball_data", "players.csv"))
teams = pd.read_csv(os.path.join(dir, "baseball_data", "teams.csv"))
class BaseballInput(panels.InputPanel):
xvar = fields.SelectField(label="X axis", choices=statchoices,
default="Hits (H)")
yvar = fields.SelectField(label="Y axis", choices=statchoices,
default="Runs (R)")
year = fields.IntegerField(label="Year", default=2013)
linear = fields.BooleanField(label="Linear Fit")
shownames = fields.BooleanField(label="Show Names")
class DataScatter(panels.PlotPanel):
height = 500
width = 700
def __init__(self, name, dat, ID_col):
self.name = name
self.dat = dat
self.ID_col = ID_col
panels.PlotPanel.__init__(self)
def plot(self, inputs):
"""Plot the given X and Y axes on a scatter plot"""
if inputs.year not in self.dat.Year.values:
return
if inputs.xvar not in self.dat or inputs.yvar not in self.dat:
return
subdat = self.dat[self.dat.Year == inputs.year]
p = ggplot(subdat, aes(x=inputs.xvar, y=inputs.yvar))
p = p + geom_point()
if inputs.shownames:
p = p + geom_text(aes(label=self.ID_col), vjust=1, hjust=1)
if inputs.linear:
p = p + stat_smooth(color="red", method="lm")
return p
class BaseballGleam(Page):
title = "Baseball Statistics"
input = BaseballInput()
output = panels.TabPanel([DataScatter("Teams", teams, "teamID"),
DataScatter("Players", players, "name")])
app = Flask("BaseballGleam")
BaseballGleam.add_flask(app)
app.debug = True
app.run()
| mit | 4,333,654,083,607,356,400 | 28.924051 | 77 | 0.60533 | false |
NeCTAR-RC/horizon | openstack_dashboard/dashboards/project/key_pairs/tests.py | 1 | 7827 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
import mock
import six
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.key_pairs.forms \
import KEYPAIR_ERROR_MESSAGES
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
INDEX_URL = reverse('horizon:project:key_pairs:index')
class KeyPairTests(test.TestCase):
@test.create_mocks({api.nova: ('keypair_list',),
quotas: ('tenant_quota_usages',)})
def test_index(self):
keypairs = self.keypairs.list()
quota_data = self.quota_usages.first()
self.mock_tenant_quota_usages.return_value = quota_data
self.mock_keypair_list.return_value = keypairs
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'horizon/common/_data_table_view.html')
self.assertItemsEqual(res.context['keypairs_table'].data, keypairs)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_tenant_quota_usages, 4,
mock.call(test.IsHttpRequest(), targets=('key_pairs', )))
self.mock_keypair_list.assert_called_once_with(test.IsHttpRequest())
@test.create_mocks({api.nova: ('keypair_list',
'keypair_delete')})
def test_delete_keypair(self):
keypair = self.keypairs.first()
self.mock_keypair_list.return_value = self.keypairs.list()
self.mock_keypair_delete.return_value = None
formData = {'action': 'keypairs__delete__%s' % keypair.name}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_keypair_list.assert_called_once_with(test.IsHttpRequest())
self.mock_keypair_delete.assert_called_once_with(test.IsHttpRequest(),
keypair.name)
@test.create_mocks({api.nova: ('keypair_list',
'keypair_delete')})
def test_delete_keypair_exception(self):
keypair = self.keypairs.first()
self.mock_keypair_list.return_value = self.keypairs.list()
self.mock_keypair_delete.side_effect = self.exceptions.nova
formData = {'action': 'keypairs__delete__%s' % keypair.name}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_keypair_list.assert_called_once_with(test.IsHttpRequest())
self.mock_keypair_delete.assert_called_once_with(test.IsHttpRequest(),
keypair.name)
@test.create_mocks({api.nova: ('keypair_get',)})
def test_keypair_detail_get(self):
keypair = self.keypairs.first()
keypair.private_key = "secret"
self.mock_keypair_get.return_value = keypair
context = {'keypair_name': keypair.name}
url = reverse('horizon:project:key_pairs:detail',
kwargs={'keypair_name': keypair.name})
res = self.client.get(url, context)
self.assertContains(res, "<dd>%s</dd>" % keypair.name, 1, 200)
self.mock_keypair_get.assert_called_once_with(test.IsHttpRequest(),
keypair.name)
@test.create_mocks({api.nova: ('keypair_import',)})
def test_import_keypair(self):
key1_name = "new_key_pair"
public_key = "ssh-rsa ABCDEFGHIJKLMNOPQR\r\n" \
"STUVWXYZ1234567890\r" \
"XXYYZZ user@computer\n\n"
key_type = "ssh"
self.mock_keypair_import.return_value = None
formData = {'method': 'ImportKeypair',
'name': key1_name,
'public_key': public_key,
'key_type': key_type}
url = reverse('horizon:project:key_pairs:import')
res = self.client.post(url, formData)
self.assertMessageCount(res, success=1)
self.mock_keypair_import.assert_called_once_with(
test.IsHttpRequest(), key1_name,
public_key.replace("\r", "").replace("\n", ""),
key_type)
@test.create_mocks({api.nova: ('keypair_import',)})
def test_import_keypair_invalid_key(self):
key_name = "new_key_pair"
public_key = "ABCDEF"
key_type = "ssh"
self.mock_keypair_import.side_effect = self.exceptions.nova
formData = {'method': 'ImportKeypair',
'name': key_name,
'public_key': public_key,
'key_type': key_type}
url = reverse('horizon:project:key_pairs:import')
res = self.client.post(url, formData, follow=True)
self.assertEqual(res.redirect_chain, [])
msg = 'Unable to import key pair.'
self.assertFormErrors(res, count=1, message=msg)
self.mock_keypair_import.assert_called_once_with(
test.IsHttpRequest(), key_name, public_key, key_type)
def test_import_keypair_invalid_key_name(self):
key_name = "invalid#key?name=!"
public_key = "ABCDEF"
key_type = "ssh"
formData = {'method': 'ImportKeypair',
'name': key_name,
'public_key': public_key,
'key_type': key_type}
url = reverse('horizon:project:key_pairs:import')
res = self.client.post(url, formData, follow=True)
self.assertEqual(res.redirect_chain, [])
msg = six.text_type(KEYPAIR_ERROR_MESSAGES['invalid'])
self.assertFormErrors(res, count=1, message=msg)
def test_import_keypair_space_key_name(self):
key_name = " "
public_key = "ABCDEF"
key_type = "ssh"
formData = {'method': 'ImportKeypair',
'name': key_name,
'public_key': public_key,
'key_type': key_type}
url = reverse('horizon:project:key_pairs:import')
res = self.client.post(url, formData, follow=True)
self.assertEqual(res.redirect_chain, [])
msg = six.text_type(KEYPAIR_ERROR_MESSAGES['invalid'])
self.assertFormErrors(res, count=1, message=msg)
@test.create_mocks({api.nova: ('keypair_import',)})
def test_import_keypair_with_regex_defined_name(self):
key1_name = "new-key-pair with_regex"
public_key = "ssh-rsa ABCDEFGHIJKLMNOPQR\r\n" \
"STUVWXYZ1234567890\r" \
"XXYYZZ user@computer\n\n"
key_type = "ssh"
self.mock_keypair_import.return_value = None
formData = {'method': 'ImportKeypair',
'name': key1_name,
'public_key': public_key,
'key_type': key_type}
url = reverse('horizon:project:key_pairs:import')
res = self.client.post(url, formData)
self.assertMessageCount(res, success=1)
self.mock_keypair_import.assert_called_once_with(
test.IsHttpRequest(), key1_name,
public_key.replace("\r", "").replace("\n", ""),
key_type)
| apache-2.0 | 6,792,557,284,657,845,000 | 39.345361 | 78 | 0.60138 | false |
gangadhar-kadam/mic-erpnext | stock/report/delivery_note_trends/delivery_note_trends.py | 1 | 1058 | # ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import webnotes
from controllers.trends import get_columns,get_data
def execute(filters=None):
if not filters: filters ={}
data = []
trans = "Delivery Note"
conditions = get_columns(filters, trans)
data = get_data(filters, conditions)
return conditions["columns"], data | agpl-3.0 | -2,810,063,489,910,439,000 | 36.821429 | 71 | 0.751418 | false |
sevikkk/django-sql-explorer | explorer/forms.py | 1 | 3003 | from django import forms
from django.forms import ModelForm, Field, ValidationError
from explorer.models import Query, MSG_FAILED_BLACKLIST
from django.db import DatabaseError, connections
from crontab import CronTab
from explorer.utils import get_connections_list
_ = lambda x: x
class SqlField(Field):
def validate(self, value):
"""
Ensure that the SQL passes the blacklist and executes. Execution check is skipped if params are present.
:param value: The SQL for this Query model.
"""
query = Query(sql=value)
error = MSG_FAILED_BLACKLIST if not query.passes_blacklist() else None
#if not error and not query.available_params():
# try:
# query.try_execute()
# except DatabaseError as e:
# error = str(e)
if error:
raise ValidationError(
_(error),
code="InvalidSql"
)
class CrontabField(Field):
def validate(self, value):
"""
Ensure that the field is valid crontab entry
:param value: The schedule entry for this Query model.
"""
error = None
if not value:
return
if value.startswith('#'):
return
try:
cron = CronTab(value)
except ValueError, e:
error = str(e)
if error:
raise ValidationError(
_(error),
code="InvalidCrontabEntry"
)
class DatabaseField(forms.ChoiceField):
def __init__(self, *args, **kwargs):
super(DatabaseField, self).__init__(choices=get_connections_list(), *args, **kwargs)
def validate(self, value):
"""
Ensure that the field is valid crontab entry
:param value: The schedule entry for this Query model.
"""
error = None
if not value:
return
if value not in connections._databases:
error = "Connection is not configured, known connections: %s" % (", ".join(connections._databases.keys()))
if error:
raise ValidationError(
_(error),
code="InvalidDatabase"
)
class QueryForm(ModelForm):
sql = SqlField()
schedule = CrontabField()
database = DatabaseField()
def clean(self):
if self.instance and self.data.get('created_by_user', None):
self.cleaned_data['created_by_user'] = self.instance.created_by_user
return super(QueryForm, self).clean()
@property
def created_by_user_email(self):
return self.instance.created_by_user.email if self.instance.created_by_user else '--'
@property
def created_by_user_id(self):
return self.instance.created_by_user.id if self.instance.created_by_user else '--'
class Meta:
model = Query
fields = ['title', 'sql', 'description', 'created_by_user', 'database', 'cache_table', 'schedule', 'groups'] | mit | 8,805,063,488,350,659,000 | 25.584071 | 118 | 0.586414 | false |
ywang037/delta-ntu-slerp4 | Training/train_mobilenet_casia_1771.py | 1 | 7420 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 4 14:47:47 2017
@author: slerp4
Compared with _debug version, this version excludes RMSprop optimizer
"""
#import tensorflow as tf
from keras import backend as K
from keras.applications.mobilenet import MobileNet
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD, Adam
from keras.callbacks import LearningRateScheduler, CSVLogger
import os, importlib
from timeit import default_timer as timer
import datetime
import math
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import tensorflow as tf
# check and set tensorflow as backend
if K.backend() != 'tensorflow':
os.environ['KERAS_BACKEND'] = 'tensorflow'
importlib.reload(K)
assert K.backend() == 'tensorflow'
print('{} backend is sucessfully set'.format(K.backend()))
elif K.backend() == 'tensorflow':
print('{} backend has already been set'.format(K.backend()))
# force to use gpu:0 tesla k20c
# Creates a graph.
with tf.device('/device:GPU:0'):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# Runs the op.
print(sess.run(c))
# training hyper parameters
train_data_dir = '.\Datasets\casia-1771'
numclass = 1771
num_train_samples = 233505
batch_size = 64
#epochs = 100
alpha = 0.5 # choices=[0.25, 0.5, 0.75, 1.0]
inputsize = 224 # choices=[128, 160, 192, 224, 224], >=32 is ok
'''
# define step decay function - used to visualize learning rate change
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.lr = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.lr.append(step_decay(len(self.losses)))
print('Current learning rate:', step_decay(len(self.losses)))
'''
# learning rate schedule
def step_decay(epoch):
# initial_lrate = 0.01
drop = 0.5
epochs_drop = 20.0
lrate = init_lr * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
# Setup the model
# using CASIA-WebFaces dataset for training, 10575 identities in total
model = MobileNet(alpha=alpha, depth_multiplier=1, dropout=1e-3,
include_top=True, weights=None, input_tensor=None, pooling=None, classes=numclass)
model.summary()
print('\nPrepare to train cnn model {}-MobileNet-224 with top layer included'.format(alpha))
#print('Total classes: {}'.format(numclass))
#print('Training samples: {}'.format(num_train_samples))
optimizer_chosen = input('Optimizer (A: SGD/B: Adam)? ')
while optimizer_chosen not in ['A', 'B']:
optimizer_chosen = input('Optimizer (A: SGD/B: Adam)? ')
epochs = int(input('Number of epochs? '))
while epochs < 0:
epochs = int(input('Use a positive integer as the number of epochs: '))
init_lr = float(input('Initial learning rate? '))
while init_lr < 0 or init_lr>0.2:
init_lr = float(input('Use a learning rate in [0, 0.2]: '))
# preparing training data
print('\nDataset path: '+ train_data_dir)
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# load training and testing data
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(224, 224),
batch_size=batch_size)
# define the format of names of several files
stamp = str(alpha)+'-mobilenet-'+str(inputsize)+'-c{}-'.format(numclass)+'b{}-'.format(batch_size)+'e{}-'.format(epochs)
if optimizer_chosen == 'A':
# using step-decaying sgd
method = 'SGD'
print('\nUsing step-decaying stochastic gradient descent')
print('learning rate folds every 20 epochs')
sgd = SGD(lr=0.0, momentum=0.9, decay=0.0, nesterov=False)
# compile the model
# loss = mse can be tried also
train_start = timer()
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
'''
# use following scripts to have learning rate displayed
# learning schedule callback
loss_history = LossHistory()
lrate = LearningRateScheduler(step_decay)
# training logger callback, log in csv file
record = stamp + method
csv_logger = CSVLogger(record+'.csv',append=True, separator=',')
callbacks_list = [loss_history, lrate, csv_logger]
# train the model
history = model.fit_generator(train_generator, steps_per_epoch=num_train_samples//batch_size,
epochs=epochs, validation_data=None, callbacks=callbacks_list, verbose=2)
'''
# learning schedule callback
lrate = LearningRateScheduler(step_decay)
# training logger callback, log in csv file
record = stamp + method + '-lr{}'.format(init_lr)
csv_logger = CSVLogger(record+'.csv',append=True, separator=',')
callbacks_list = [lrate, csv_logger]
# train the model
history = model.fit_generator(train_generator, steps_per_epoch=num_train_samples//batch_size,
epochs=epochs, validation_data=None, callbacks=callbacks_list, verbose=1)
elif optimizer_chosen == 'B':
# using adam update as adaptive learning rate method
method = 'Adam'
print('\nUsing using adam update as adaptive learning rate method')
adam = Adam(lr=init_lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) # original lr=0.001
# compile the model
# loss = mse can be tried also
train_start = timer()
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
# training logger callback, log in csv file
record = stamp + method + '-lr{}'.format(init_lr)
csv_logger = CSVLogger(record+'.csv',append=True, separator=',')
# train the model
history = model.fit_generator(train_generator, steps_per_epoch=num_train_samples // batch_size,
epochs=epochs, validation_data=None, callbacks=[csv_logger], verbose=1)
train_end = timer()
mins, secs = divmod(train_end-train_start,60)
hour, mins = divmod(mins,60)
print('Training process took %d:%02d:%02d' % (hour,mins,secs))
# set a stamp of file name for saving the record and weights
now = datetime.datetime.now() #current date and time
save_name = record +'-'+now.strftime("%Y%m%d-%H%M")
#print(history.history)
print(history.history.keys())
# print plots of acc and loss in one pdf
pp = PdfPages(save_name +'.pdf')
# summarize history for accuracy
plt.plot(history.history['acc']) # plt.plot(history.history['val_acc'])
plt_title = str(alpha)+'-mobilenet-'+str(inputsize)+' trained on small dataset'
plt_legend = method + ', {} classes'.format(numclass)+', batch size ={}'.format(batch_size)
plt.title(plt_title)
plt.ylabel('Model accuracy')
plt.xlabel('Epoch')
plt.legend([plt_legend], loc='lower right')
pp.savefig()
plt.show()
# summarize history for loss
plt.plot(history.history['loss']) #plt.plot(history.history['val_loss'])
plt.title(plt_title)
plt.ylabel('Model loss')
plt.xlabel('Epoch')
plt.legend([plt_legend], loc='upper left') #plt.legend(['train', 'test'], loc='upper left')
pp.savefig()
plt.show()
pp.close()
# save trained weights
model.save_weights(save_name +'.h5')
| mit | -1,016,345,155,239,914,100 | 35.732673 | 120 | 0.684097 | false |
gslab-econ/gslab_python | gslab_make/get_externals.py | 1 | 4632 | #! /usr/bin/env python
import os
import private.preliminaries as prelim
import private.metadata as metadata
import private.messages as messages
from private.getexternalsdirectives import SystemDirective
def get_externals(externals_file,
external_dir = '@DEFAULTVALUE@',
makelog = '@DEFAULTVALUE@',
quiet = False):
'''Fetch external files
Description:
This function interprets a formatted text document listing files
to be exported via SVN or a system copy command.
Syntax:
get_externals(externals_file [, externals_dir [, makelog [, quiet]]])
Usage:
The `externals_file` argument should be the path of a tab-delimited text
file containing information on the external files that the function call
should retrieve. This file needs to rows of numbers or characters, delimited
by either tabs or 4 spaces,one for each file to be exported via svn.
The proper format is: rev dir file outdir outfile notes
### Column descriptions:
* rev
* Revision number of the file/directory in integer format.
If left blank along with directory column, get_externals.py will
read the last specified revision number. If copying from a shared
drive rather than the repository, list revision number as COPY.
* dir
* Directory of the file/directory requested. As described above,
%xxx% placemarkers are substituted in from predefined values in
metadata.py. If left blank along with revision column,
get_externals.py will read the last specified directory.
* file
* Name of the file requested. If entire directory is required, leave
column as a single *. If a file name wildcard is required place
single * within filename. get_externals.py will attempt to screen
out bad file names. Cannot be left blank.
* outdir
* Desired output directory of the exported file/directory.
Typically of the form ./subdir/. If left blank, will be
filled with the first level of the externals relative path.
* outfile
* Desired output name of the exported file/directory. If left as
double quotes, indicates that it should have the same name.
Adding a directory name that is different from the default """"
will place this subdirectory within the outdir. Additionally,
get_externals can assign a prefix tag to exported file collections,
either through a folder export, or a wildcard call; it does so
when the outfile column contains text of the pattern '[prefix]*',
where the prefix [prefix] will be attached to exported files.
* notes
* Optional column with notes on the export. get_externals.py ignores this,
but logs it.
Example of externals.txt:
```
rev dir file outdir outfile notes
2 %svn%/directory/ * ./destination_directory/ """"
COPY %svn%/other_directory/ my_file.txt . """"
```
The destination directory is specified by an optional second
parameter whose default value is "../external". The log file produced by
get_externals is automatically added to an optional third parameter
whose default value is '../output/make.log'.
The fourth argument, quiet, is by default False. Setting this argument to
True suppresses standard output and errors from SVN.
'''
try:
LOGFILE = prelim.start_logging(metadata.settings['externalslog_file'], 'get_externals.py')
makelog, externals, last_dir, last_rev = \
prelim.externals_preliminaries(makelog, externals_file, LOGFILE)
for line in externals:
try:
directive = SystemDirective(line, LOGFILE, last_dir, last_rev)
directive.error_check()
directive.clean(external_dir)
directive.issue_sys_command(quiet)
# Save rev/dir for next line
last_dir = directive.dir
last_rev = directive.rev
except:
prelim.print_error(LOGFILE)
prelim.end_logging(LOGFILE, makelog, 'get_externals.py')
except Exception as errmsg:
print "Error with get_external: \n", errmsg
| mit | -6,831,750,537,376,042,000 | 45.265306 | 106 | 0.618523 | false |
anselmobd/fo2 | script/TussorBipaRolo.py | 1 | 1346 | import sys
import android
import os
import json
import urllib.request
print('\n'*10)
print('='*30)
print(' Tussor')
print(' Coletor de códigos de barras')
print('='*30)
droid = android.Android()
print('\nCelular: "{}"'.format(os.environ['QPY_USERNO']))
choice = ' '
while choice == ' ':
print('')
print('='*30)
print(' "Enter" para bipar um código')
print(' Para sair: qq tecla + "Enter"')
print('='*30)
c = input('')
if c != '':
sys.exit()
code = droid.scanBarcode()
if code.result is None:
print('Nenhum código bipado!')
else:
barcode = code.result['extras']['SCAN_RESULT']
print('Código de barras: "{}"\n'.format(barcode))
data = {}
url = 'http://intranet.tussor.com.br:88/insumo/rolo/{}/{}/'.format(
barcode, os.environ['QPY_USERNO'])
webURL = urllib.request.urlopen(url)
data = webURL.read()
encoding = webURL.info().get_content_charset('utf-8')
rolo = json.loads(data.decode(encoding))
if rolo == {}:
print('Rolo não encontrado!')
else:
print(' Rolo: {:09}'.format(rolo['ROLO']))
print('Referência: {}'.format(rolo['REF']))
print(' Cor: {}'.format(rolo['COR']))
print(' Tamanho: {}'.format(rolo['TAM']))
| mit | 2,331,994,624,095,525,400 | 25.27451 | 75 | 0.54403 | false |
facelessuser/TabsExtra | tabs_extra.py | 1 | 32989 | """
TabsExtra.
Copyright (c) 2014 - 2016 Isaac Muse <[email protected]>
License: MIT
"""
import sublime_plugin
import sublime
import time
import sys
from TabsExtra import tab_menu
import os
import functools
from operator import itemgetter
import sublime_api
from urllib.parse import urljoin
from urllib.request import pathname2url
SETTINGS = "tabs_extra.sublime-settings"
PREFS = "Preferences.sublime-settings"
LEFT = 0
RIGHT = 1
LAST = 2
LAST_ACTIVE = None
OVERRIDE_CONFIRM = '''TabsExtra will overwrite the entire "Tab Context.sublime-menu" file in "Packages/Default" with a new one. ST3 keeps an unmodified copy in the archive.
You do this at your own risk. If something goes wrong, you may need to manually fix the menu.
Are you sure you want to continue?
''' # noqa
RESTORE_CONFIRM = '''In ST3 TabsExtra will simply delete the override "Tab Context.sublime-menu" from "Packages/Default" to allow the archived menu to take effect.
You do this at your own risk. If something goes wrong, you may need to manually fix the menu.
Are you sure you want to continue?
''' # noqa
###############################
# Helpers
###############################
def log(msg, status=False):
"""Log message."""
string = str(msg)
print("TabsExtra: %s" % string)
if status:
sublime.status_message(string)
def debug(s):
"""Debug message."""
if sublime.load_settings(SETTINGS).get("debug", False):
log(s)
def sublime_format_path(pth):
"""Format path for sublime."""
import re
m = re.match(r"^([A-Za-z]{1}):(?:/|\\)(.*)", pth)
if sublime.platform() == "windows" and m is not None:
pth = m.group(1) + "/" + m.group(2)
return pth.replace("\\", "/")
def is_persistent():
"""Check if sticky tabs should be persistent."""
return sublime.load_settings(SETTINGS).get("persistent_sticky", False)
def sort_on_load_save():
"""Sort on save."""
return (
sublime.load_settings(SETTINGS).get("sort_on_load_save", False) and
not sublime.load_settings(PREFS).get("preview_on_click")
)
def timestamp_view(window, sheet):
"""Timestamp view."""
global LAST_ACTIVE
view = window.active_view()
if view is None:
return
# Detect if this focus is due to the last active tab being moved
if (
LAST_ACTIVE is not None and
not LAST_ACTIVE.settings().get("tabs_extra_is_closed", False) and
LAST_ACTIVE.window() is None
):
# Flag last active tab as being moved
window = view.window()
active_group, active_index = window.get_sheet_index(sheet)
LAST_ACTIVE.settings().set("tabs_extra_moving", [window.id(), active_group])
# Skip if moving a tab
LAST_ACTIVE = None
allow = False
else:
allow = True
if allow:
window = view.window()
active_group, active_index = window.get_sheet_index(sheet)
# Add time stamp of last activation
view.settings().set('tabs_extra_last_activated', time.time())
# Track the tabs last position to help with focusing after a tab is moved
view.settings().set('tabs_extra_last_activated_sheet_index', active_index)
LAST_ACTIVE = view
debug("activated - %s" % view.file_name())
else:
debug("skipping - %s" % view.file_name())
def get_group_view(window, group, index):
"""Get the view at the given index in the given group."""
sheets = window.sheets_in_group(int(group))
sheet = sheets[index] if -1 < index < len(sheets) else None
view = sheet.view() if sheet is not None else None
return view
class Focus(object):
"""View focus handler."""
win = None
obj = None
@classmethod
def cancel(cls):
"""Cancel focus."""
cls.win = None
cls.obj = None
@classmethod
def defer(cls, win, obj):
"""Defer focus."""
if cls.win is None and cls.obj is None:
cls.win = win
cls.obj = obj
sublime.set_timeout(cls.on_focus, 100)
else:
cls.win = win
cls.obj = obj
@classmethod
def on_focus(cls):
"""On focus event."""
cls._focus()
@classmethod
def focus(cls, win, obj):
"""Set the win and obj before calling focus."""
cls.win = win
cls.obj = obj
cls._focus()
@classmethod
def _focus(cls):
"""Perform view focus."""
try:
if cls.win is not None and cls.obj is not None:
if isinstance(cls.obj, sublime.View):
cls.win.focus_view(cls.obj)
timestamp_view(cls.win, cls.obj)
elif isinstance(cls.obj, sublime.Sheet):
cls.win.focus_sheet(cls.obj)
timestamp_view(cls.win, cls.obj)
except Exception:
pass
cls.cancel()
###############################
# Sticky Tabs
###############################
class TabsExtraClearAllStickyCommand(sublime_plugin.WindowCommand):
"""Clear all sticky tabs."""
def run(self, group=-1, force=False):
"""Clear all tab sticky states of current active group."""
if group == -1:
group = self.window.active_group()
if group >= 0:
persistent = is_persistent()
views = self.window.views_in_group(int(group))
if not persistent or force:
for v in views:
v.settings().erase("tabs_extra_sticky")
def is_visible(self, group=-1, force=False):
"""Show command if any tabs in active group are sticky."""
if group == -1:
group = self.window.active_group()
marked = False
views = self.window.views_in_group(int(group))
for v in views:
if v.settings().get("tabs_extra_sticky", False):
marked = True
break
return marked
class TabsExtraToggleStickyCommand(sublime_plugin.WindowCommand):
"""Toggle sticky state for tab."""
def run(self, group=-1, index=-1):
"""Toggle a tabs sticky state."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
if not view.settings().get("tabs_extra_sticky", False):
view.settings().set("tabs_extra_sticky", True)
else:
view.settings().erase("tabs_extra_sticky")
def is_checked(self, group=-1, index=-1):
"""Show in menu whether the tab is sticky."""
checked = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
checked = view.settings().get("tabs_extra_sticky", False)
return checked
class TabsExtraSetStickyCommand(sublime_plugin.TextCommand):
"""Set sticky value for the tab."""
def run(self, edit, value):
"""Set the sticky command to the specific value."""
if self.is_enabled(value):
self.view.settings().set("tabs_extra_sticky", bool(value))
def is_enabled(self, value):
"""Check if sticky value is already set to desired value."""
enabled = False
if self.view is not None:
current_value = self.view.settings().get("tabs_extra_sticky", False)
if current_value != value:
enabled = True
return enabled
###############################
# Close
###############################
class TabsExtraCloseMenuCommand(sublime_plugin.WindowCommand):
"""Close tabs via a quick panel menu."""
close_types = [
("Close", "single"),
("Close Other Tabs", "other"),
("Close Tabs to Right", "right"),
("Close Tabs to Left", "left"),
("Close All Tabs", "all")
]
def run(self, mode="normal", close_type=None):
"""Run command."""
self.mode = mode
self.group = -1
self.index = -1
sheet = self.window.active_sheet()
if sheet is not None:
self.group, self.index = self.window.get_sheet_index(sheet)
if self.group != -1 and self.index != -1:
value = None
if close_type is not None:
index = 0
for ct in self.close_types:
if ct[1] == close_type:
value = index
index += 1
if value is None:
self.window.show_quick_panel(
[x[0] for x in self.close_types],
self.check_selection
)
else:
self.check_selection(value)
def check_selection(self, value):
"""Check the user's selection."""
if value != -1:
close_unsaved = True
unsaved_prompt = True
if self.mode == "skip_unsaved":
close_unsaved = False
if self.mode == "dismiss_unsaved":
unsaved_prompt = False
close_type = self.close_types[value][1]
self.window.run_command(
"tabs_extra_close",
{
"group": int(self.group),
"index": int(self.index),
"close_type": close_type,
"unsaved_prompt": unsaved_prompt,
"close_unsaved": close_unsaved
}
)
def is_enabled(self, mode="normal"):
"""Check if command is enabled."""
group = -1
index = -1
sheet = self.window.active_sheet()
if sheet is not None:
group, index = self.window.get_sheet_index(sheet)
return group != -1 and index != -1 and mode in ["normal", "skip_unsaved", "dismiss_unsaved"]
class TabsExtraCloseAllCommand(sublime_plugin.WindowCommand):
"""Close all tabs in the whole window."""
def run(self):
"""Close all tabs in window; not just the tabs in the active group."""
for group in range(0, self.window.num_groups()):
sheet = self.window.active_sheet_in_group(group)
if sheet is not None:
index = self.window.get_sheet_index(sheet)[1]
self.window.run_command("tabs_extra_close", {"close_type": "all", "group": group, "index": index})
class TabsExtraCloseCommand(sublime_plugin.WindowCommand):
"""Close tab command."""
def init(self, close_type, group, index):
"""
Determine which views will be targeted by close command.
Also determine which tab states need to be cleaned up.
"""
self.persistent = is_persistent()
self.sheets = self.window.sheets_in_group(int(group))
assert(close_type in ["single", "left", "right", "other", "all"])
# Setup active index and group
active_sheet = self.window.active_sheet()
active_index = None
self.active_index = index
self.active_group = None
if active_sheet is not None:
active_group, active_index = self.window.get_sheet_index(active_sheet)
if group != active_group:
active_index = None
if active_index is not None:
self.active_index = active_index
# Determine targeted sheets to close and sheets to cleanup
if close_type == "single":
self.targets = [self.sheets[index]]
self.cleanup = bool(len(self.sheets[:index] + self.sheets[index + 1:]))
elif close_type == "left":
self.targets = self.sheets[:index]
self.cleanup = bool(len(self.sheets[index:]))
elif close_type == "right":
self.targets = self.sheets[index + 1:]
self.cleanup = bool(len(self.sheets[:index + 1]))
elif close_type == "other":
self.targets = self.sheets[:index] + self.sheets[index + 1:]
self.cleanup = True
elif close_type == "all":
self.targets = self.sheets[:]
self.cleanup = False
def can_close(self, is_sticky, is_single):
"""Prompt user in certain scenarios if okay to close."""
is_okay = True
if is_sticky:
if not is_single:
is_okay = False
return is_okay
def run(
self, group=-1, index=-1,
close_type="single", unsaved_prompt=True, close_unsaved=True
):
"""Close the specified tabs and cleanup sticky states."""
TabsExtraListener.extra_command_call = True
try:
if group >= 0 and index >= 0:
self.init(close_type, group, index)
if (
len(self.targets) and
not unsaved_prompt and
not all(not target.view().is_dirty() for target in self.targets) and
not sublime.ok_cancel_dialog(
"Are you sure you want to dismiss all targeted unsaved buffers?"
)
):
TabsExtraListener.extra_command_call = False
return
for s in self.targets:
v = s.view()
if v is not None:
if self.can_close(v.settings().get("tabs_extra_sticky", False), close_type == "single"):
if not self.persistent:
v.settings().erase("tabs_extra_sticky")
self.window.focus_view(v)
if not v.is_dirty() or close_unsaved:
if not unsaved_prompt:
v.set_scratch(True)
sublime_api.window_close_file(self.window.id(), v.id())
elif not self.persistent:
v.settings().erase("tabs_extra_sticky")
else:
self.window.focus_sheet(s)
self.window.run_command('close_file')
if not self.persistent and self.cleanup:
self.window.run_command("tabs_extra_clear_all_sticky", {"group": group})
except Exception:
pass
TabsExtraListener.extra_command_call = False
###############################
# Listener
###############################
class TabsExtraListener(sublime_plugin.EventListener):
"""Listener command to handle tab focus, closing, moving events."""
extra_command_call = False
def on_window_command(self, window, command_name, args):
"""Intercept and override specific close tab commands."""
extra_command_call = TabsExtraListener.extra_command_call
cmd = None
if args is None:
view = window.active_view()
if view is None:
return cmd
# Mark all actual file closes done from TabsExtra
# This helps us know when file close was called outside of TabsExtra commands
if extra_command_call and command_name == "close_file":
view.settings().set("tabs_extra_closing", True)
return cmd
group, index = window.get_view_index(view)
args = {"group": group, "index": index}
if command_name in ["close_by_index", "close"]:
command_name = "tabs_extra_close"
args["close_type"] = "single"
cmd = (command_name, args)
elif command_name == "close_all":
command_name = "tabs_extra_close_all"
args = {}
cmd = (command_name, args)
elif command_name == "close_others_by_index":
command_name = "tabs_extra_close"
args["close_type"] = "other"
cmd = (command_name, args)
elif command_name == "close_to_right_by_index":
command_name = "tabs_extra_close"
args["close_type"] = "right"
cmd = (command_name, args)
return cmd
def on_load(self, view):
"""Handle load focus or spawning."""
Focus.cancel()
if sort_on_load_save():
if not self.on_sort(view):
view.settings().set('tabsextra_to_sort', True)
else:
self.on_spawn(view)
def on_post_save(self, view):
"""On save sorting."""
if sort_on_load_save():
self.on_sort(view)
def on_sort(self, view):
"""Sort views."""
sorted_views = False
window = view.window()
if window and window.get_view_index(view)[1] != -1:
cmd = sublime.load_settings(SETTINGS).get("sort_on_load_save_command", {})
module = str(cmd.get("module", ""))
reverse = bool(cmd.get("reverse", False))
if module != "":
window.run_command(
"tabs_extra_sort",
{"sort_by": module, "reverse": reverse}
)
sorted_views = True
return sorted_views
def on_pre_close(self, view):
"""
If a view is closing without being marked, we know it was done outside of TabsExtra.
Attach view and window info so we can focus the right view after close.
"""
Focus.cancel()
view.settings().set("tabs_extra_is_closed", True)
if not view.settings().get("tabs_extra_closing", False):
TabsExtraListener.extra_command_call = True
window = view.window()
if window is not None:
view.settings().set("tabs_extra_view_info", view.window().get_view_index(view))
view.settings().set("tabs_extra_window_info", view.window().id())
else:
TabsExtraListener.extra_command_call = False
def on_close(self, view):
"""
Handle focusing the correct view in window group.
Close command was initiated outside of TabsExtra, so a focus is required.
"""
view_info = view.settings().get("tabs_extra_view_info", None)
window_info = view.settings().get("tabs_extra_window_info", None)
if view_info is not None and window_info is not None:
TabsExtraListener.extra_command_call = False
def on_activated(self, view):
"""
Timestamp each view when activated.
Detect if on_move event should be executed.
"""
if not TabsExtraListener.extra_command_call:
window = view.window()
if window is None:
return
s = window.active_sheet()
timestamp_view(window, s)
# Detect if tab was moved to a new group
# Run on_move event if it has.
moving = view.settings().get("tabs_extra_moving", None)
if moving is not None:
win_id, group_id = moving
window = view.window()
if window is None:
return
active_group = window.get_view_index(view)[0]
if window.id() != win_id or int(group_id) != int(active_group):
view.settings().erase("tabs_extra_moving")
elif sort_on_load_save() and view.settings().get('tabsextra_to_sort'):
view.settings().erase('tabsextra_to_sort')
self.on_sort(view)
###############################
# Wrappers
###############################
class TabsExtraViewWrapperCommand(sublime_plugin.WindowCommand):
"""Wrapper for for executing certain commands from the tab context menu."""
def run(self, command, group=-1, index=-1, args=None):
"""Wrap command in order to ensure view gets focused first."""
if args is None:
args = {}
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
self.window.focus_view(view)
self.window.run_command(command, args)
###############################
# File Management Commands
###############################
class TabsExtraDeleteCommand(sublime_plugin.WindowCommand):
"""Delete the file."""
def run(self, group=-1, index=-1):
"""Delete the tab's file."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
file_name = view.file_name()
if file_name is not None and os.path.exists(file_name):
if sublime.ok_cancel_dialog("Delete %s?" % file_name, "Delete"):
if not view.close():
return
import Default.send2trash as send2trash # noqa: N813
send2trash.send2trash(file_name)
def is_visible(self, group=-1, index=-1):
"""Check if command should be visible."""
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None and view.file_name() is not None and os.path.exists(view.file_name()):
enabled = True
return enabled
class TabsExtraDuplicateCommand(sublime_plugin.WindowCommand):
"""Duplicate tab."""
def run(self, group=-1, index=-1):
"""Rename the given tab."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
file_name = view.file_name()
if file_name is not None and os.path.exists(file_name):
v = self.window.show_input_panel(
"Duplicate:", file_name,
lambda x: self.on_done(file_name, x),
None, None
)
file_path_len = len(file_name)
file_name_len = len(os.path.basename(file_name))
v.sel().clear()
v.sel().add(
sublime.Region(
file_path_len - file_name_len,
file_path_len
)
)
def on_done(self, old, new):
"""Handle the tab duplication when the user is done with the input panel."""
new_path = os.path.dirname(new)
if os.path.exists(new_path) and os.path.isdir(new_path):
if not os.path.exists(new) or sublime.ok_cancel_dialog("Overwrite %s?" % new, "Replace"):
try:
with open(old, 'rb') as f:
text = f.read()
with open(new, 'wb') as f:
f.write(text)
self.window.open_file(new)
except Exception:
sublime.status_message("Unable to duplicate")
def is_visible(self, group=-1, index=-1):
"""Check if the command is visible."""
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None and view.file_name() is not None and os.path.exists(view.file_name()):
enabled = True
return enabled
class TabsExtraRenameCommand(sublime_plugin.WindowCommand):
"""Rename the tab's file."""
def run(self, group=-1, index=-1):
"""Rename the given tab."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
file_name = view.file_name()
if file_name is not None and os.path.exists(file_name):
branch, leaf = os.path.split(file_name)
v = self.window.show_input_panel(
"New Name:", leaf,
functools.partial(self.on_done, file_name, branch),
None, None
)
name = os.path.splitext(leaf)[0]
v.sel().clear()
v.sel().add(sublime.Region(0, len(name)))
def on_done(self, old, branch, leaf):
"""Handle the renaming when user is done with the input panel."""
new = os.path.join(branch, leaf)
try:
os.rename(old, new)
v = self.window.find_open_file(old)
if v:
v.retarget(new)
except Exception:
sublime.status_message("Unable to rename")
def is_visible(self, group=-1, index=-1):
"""Check if the command is visible."""
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None and view.file_name() is not None and os.path.exists(view.file_name()):
enabled = True
return enabled
class TabsExtraMoveCommand(sublime_plugin.WindowCommand):
"""Move the tab's file."""
def run(self, group=-1, index=-1):
"""Move the file in the given tab."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
file_name = view.file_name()
if file_name is not None and os.path.exists(file_name):
v = self.window.show_input_panel(
"New Location:", file_name,
functools.partial(self.on_done, file_name),
None, None
)
file_path_len = len(file_name)
file_name_len = len(os.path.basename(file_name))
v.sel().clear()
v.sel().add(
sublime.Region(
file_path_len - file_name_len,
file_path_len
)
)
def on_done(self, old, new):
"""Handle the moving when user is done with the input panel."""
try:
directory = os.path.dirname(new)
if not os.path.exists(directory):
os.makedirs(directory)
os.rename(old, new)
v = self.window.find_open_file(old)
if v:
v.retarget(new)
except Exception:
sublime.status_message("Unable to move")
def is_visible(self, group=-1, index=-1):
"""Check if the command is visible."""
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None and view.file_name() is not None and os.path.exists(view.file_name()):
enabled = True
return enabled
class TabsExtraRevertCommand(TabsExtraViewWrapperCommand):
"""Revert changes in file."""
def is_visible(self, command, group=-1, index=-1, args=None):
"""Determine if command should be visible in menu."""
if args is None:
args = {}
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None and view.file_name() is not None:
enabled = view.is_dirty()
return enabled
class TabsExtraFileCommand(TabsExtraViewWrapperCommand):
"""Wrapper for file commands."""
def is_enabled(self, command, group=-1, index=-1, args=None):
"""Determine if command should be enabled."""
if args is None:
args = {}
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
enabled = view.file_name() is not None
return enabled
class TabsExtraFilePathCommand(sublime_plugin.WindowCommand):
"""Get file paths."""
def run(self, group=-1, index=-1, path_type='path'):
"""Run the command."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
self.window.focus_view(view)
view.run_command('copy_path')
pth = sublime.get_clipboard()
if path_type == 'name':
pth = os.path.basename(pth)
elif path_type == 'path_uri':
pth = urljoin('file:', pathname2url(pth))
sublime.set_clipboard(pth)
def is_enabled(self, group=-1, index=-1, path_type='path'):
"""Determine if command should be enabled."""
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
enabled = view.file_name() is not None
return enabled
###############################
# Sort
###############################
class TabsExtraSortMenuCommand(sublime_plugin.WindowCommand):
"""Sort tabs."""
def run(self):
"""Using "sort_layout" setting, construct a quick panel sort menu."""
sort_layout = sublime.load_settings(SETTINGS).get("sort_layout", [])
if len(sort_layout):
self.sort_commands = []
sort_menu = []
for sort_entry in sort_layout:
caption = str(sort_entry.get("caption", ""))
module = str(sort_entry.get("module", ""))
reverse = bool(sort_entry.get("reverse", False))
if module != "":
self.sort_commands.append((module, reverse))
sort_menu.append(caption)
if len(sort_menu):
self.window.show_quick_panel(sort_menu, self.check_selection)
def check_selection(self, value):
"""Launch the selected sort command."""
if value != -1:
command = self.sort_commands[value]
self.window.run_command("tabs_extra_sort", {"sort_by": command[0], "reverse": command[1]})
class TabsExtraSortCommand(sublime_plugin.WindowCommand):
"""Sort tabs."""
def run(self, group=-1, sort_by=None, reverse=False):
"""Sort Tabs."""
if sort_by is not None:
if group == -1:
group = self.window.active_group()
self.group = group
self.reverse = reverse
views = self.window.views_in_group(int(group))
if len(views):
sort_module = self.get_sort_module(sort_by)
if sort_module is not None:
view_data = []
sort_module.run(views, view_data)
self.sort(view_data)
self.window.focus_view(self.window.active_view())
def sort(self, view_data):
"""Sort the views."""
indexes = tuple([x for x in range(0, len(view_data[0]) - 1)])
sorted_views = sorted(view_data, key=itemgetter(*indexes))
if self.reverse:
sorted_views = sorted_views[::-1]
if sorted_views != view_data:
for index in range(0, len(sorted_views)):
self.window.set_view_index(sorted_views[index][-1], self.group, index)
def get_sort_module(self, module_name):
"""Import the sort_by module."""
import imp
path_name = os.path.join("Packages", os.path.normpath(module_name.replace('.', '/')))
path_name += ".py"
module = imp.new_module(module_name)
sys.modules[module_name] = module
exec(
compile(
sublime.load_resource(sublime_format_path(path_name)),
module_name, 'exec'
),
sys.modules[module_name].__dict__
)
return module
###############################
# Menu Installation
###############################
class TabsExtraInstallOverrideMenuCommand(sublime_plugin.ApplicationCommand):
"""Install TabsExtra menu overriding the default tab context menu."""
def run(self):
"""Install/upgrade the override tab menu."""
msg = OVERRIDE_CONFIRM
if sublime.ok_cancel_dialog(msg):
tab_menu.upgrade_override_menu()
class TabsExtraUninstallOverrideMenuCommand(sublime_plugin.ApplicationCommand):
"""Uninstall the TabsExtra override menu."""
def run(self):
"""Uninstall the override tab menu."""
msg = RESTORE_CONFIRM
if sublime.ok_cancel_dialog(msg):
tab_menu.uninstall_override_menu()
class TabsExtraInstallMenuCommand(sublime_plugin.ApplicationCommand):
"""Install the TabsExtra menu by appending it to the existing tab context menu."""
def run(self):
"""Install/upgrade the standard tab menu."""
tab_menu.upgrade_default_menu()
###############################
# Plugin Loading
###############################
def plugin_loaded():
"""Handle plugin setup."""
win = sublime.active_window()
if win is not None:
sheet = win.active_sheet()
if sheet is not None:
timestamp_view(win, sheet)
| mit | 4,686,407,101,717,471,000 | 32.593686 | 173 | 0.5363 | false |
annayqho/TheCannon | presentations/for_michigan/make_talk_plots.py | 1 | 4618 | import pickle
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# make sample spectra
plot(dataset.wl, dataset.tr_flux[2,:], alpha=0.7, c='k')
title(r"Typical High-S/N LAMOST Spectrum", fontsize=27)
xlim(3500, 9500)
tick_params(axis='x', labelsize=27)
tick_params(axis='y', labelsize=27)
xlabel("Wavelength ($\AA$)", fontsize=27)
ylabel("Flux", fontsize=27)
savefig("typical_spec_snr186.png")
ID = "spec-55938-B5593806_sp04-159.fits"
# now find it in APOGEE...
ID = "aspcapStar-r5-v603-2M12252154+2732475.fits"
import pyfits
fits_file = ID
file_in = pyfits.open(fits_file)
flux = np.array(file_in[1].data)
npixels = len(flux)
start_wl = file_in[1].header['CRVAL1']
diff_wl = file_in[1].header['CDELT1']
val = diff_wl * (npixels) + start_wl
wl_full_log = np.arange(start_wl,val, diff_wl)
wl_full = [10 ** aval for aval in wl_full_log]
wl = np.array(wl_full)
bad = flux == 0
wl = np.ma.array(wl, mask=bad)
flux = np.ma.array(flux, mask=bad)
plot(wl, flux, alpha=0.7, c='k')
xlim(15100, 17000)
ylim(0.6, 1.15)
title(r"Typical High-S/N APOGEE Spectrum", fontsize=27)
tight_layout()
savefig("typical_spec_snr186_apogee.png")
label_file = 'reference_labels.csv'
(test_ID, test_SNR) = pickle.load(open("test_ID_SNR.p", "r"))
# for each test ID, find its index in label_file IDs
ids = np.loadtxt(label_file, usecols=(0,), dtype=str, delimiter=',')
inds = [np.where(ids==test_ID_val) for test_ID_val in test_ID]
names = ['T_{eff}', '\log g', '[Fe/H]', '[\\alpha/Fe]']
lims = [[3900,6000], [0,5], [-2, 1], [-0.1,0.4]]
#id,teff,logg,feh,alpha,snr
teff = np.loadtxt(label_file, usecols=(2,), dtype=float, delimiter=',')
logg = np.loadtxt(label_file, usecols=(3,), dtype=float, delimiter=',')
feh = np.loadtxt(label_file, usecols=(4,), dtype=float, delimiter=',')
alpha = np.loadtxt(label_file, usecols=(5,), dtype=float, delimiter=',')
apogee_label_vals = np.vstack(
(teff[inds].flatten(), logg[inds].flatten(), feh[inds].flatten(), alpha[inds].flatten())).T
test_labels = pickle.load(open("test_labels.p", "r"))
for i in range(0, len(names)):
name = names[i]
cannon = np.array(test_labels[:,i])
orig = np.array(apogee_label_vals[:,i], dtype=float)
snr = test_SNR
#bad = orig < -8000
#good = snr > 50
#orig = np.ma.array(orig, mask=bad)
#cannon = np.ma.array(cannon, mask=bad)
#snr = np.ma.array(snr, mask=bad)
#orig = orig[good]
#cannon = cannon[good]
#snr = snr[good]
scatter = np.round(np.std(orig-cannon),3)
scatter = int(scatter)
bias = np.round(np.mean(orig-cannon),4)
bias = np.round(bias, 3)
low = np.minimum(min(orig), min(cannon))
high = np.maximum(max(orig), max(cannon))
fig = plt.figure(figsize=(10,6))
gs = gridspec.GridSpec(1,2,width_ratios=[2,1], wspace=0.3)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax1.plot([low, high], [low, high], 'k-', linewidth=2.0, label="x=y")
low = lims[i][0]
high = lims[i][1]
ax1.set_xlim(low, high)
ax1.set_ylim(low, high)
c = np.zeros(len(snr))
take = snr < 100
ax1.scatter(orig[take], cannon[take], marker='x', c='0.10', alpha=0.3, label="snr < 100")
take = snr > 100
ax1.scatter(orig[take], cannon[take], marker='x', c='k', label="snr > 100", alpha=0.7)
ax1.legend(fontsize=14, loc='lower right')
textstr = 'Scatter: %s \nBias: %s' %(scatter, bias)
ax1.text(0.05, 0.95, textstr, transform=ax1.transAxes,
fontsize=14, verticalalignment='top')
ax1.tick_params(axis='x', labelsize=14)
ax1.tick_params(axis='y', labelsize=14)
ax1.set_xlabel("APOGEE $%s$" %name, fontsize=14)
ax1.set_ylabel("Cannon-LAMOST $%s$" %name, fontsize=14)
ax1.set_title("Cannon-LAMOST Output vs. APOGEE $%s$ " %name, fontsize=14)
diff = cannon - orig
npoints = len(diff)
mu = np.mean(diff)
sig = np.std(diff)
ax2.hist(diff, range=[-3*sig,3*sig], color='k', bins=np.sqrt(npoints),
orientation='horizontal', alpha=0.3, histtype='stepfilled')
textstr = r"$\sigma=%s$" %(np.round(sig,2))
ax2.text(0.05, 0.95, textstr, transform=ax2.transAxes,
fontsize=14, verticalalignment='top')
ax2.tick_params(axis='x', labelsize=14)
ax2.tick_params(axis='y', labelsize=14)
ax2.set_xlabel("Count", fontsize=14)
ax2.set_ylabel("Difference", fontsize=14)
ax2.axhline(y=0, c='k', lw=3, label='Difference=0')
ax2.set_title("Cannon-LAMOST Output Minus \n APOGEE Labels for $%s$" %name,
fontsize=14)
ax2.legend(fontsize=14, loc='lower center')
plt.savefig('1to1_%s.png'%i)
| mit | -7,311,588,117,771,364,000 | 35.650794 | 99 | 0.639454 | false |
Justaphf/BitcoinUnlimited | qa/rpc-tests/mempool_push.py | 1 | 9835 | #!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import test_framework.loginit
# This is a template to make creating new QA tests easy.
# You can also use this template to quickly start and connect a few regtest nodes.
import time
import sys
if sys.version_info[0] < 3:
raise "Use Python 3"
import logging
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
BCH_UNCONF_DEPTH = 25
BCH_UNCONF_SIZE_KB = 101
BCH_UNCONF_SIZE = BCH_UNCONF_SIZE_KB*1000
DELAY_TIME = 45
class MyTest (BitcoinTestFramework):
def setup_chain(self,bitcoinConfDict=None, wallets=None):
logging.info("Initializing test directory " + self.options.tmpdir)
initialize_chain(self.options.tmpdir, bitcoinConfDict, wallets)
def setup_network(self, split=False):
mempoolConf = [
["-blockprioritysize=2000000", "-limitdescendantcount=25", "-limitancestorcount=25",
"-limitancestorsize=101", "-limitdescendantsize=101"],
["-blockprioritysize=2000000",
"-maxmempool=8080",
"-limitancestorsize=%d" % (BCH_UNCONF_SIZE_KB*2),
"-limitdescendantsize=%d" % (BCH_UNCONF_SIZE_KB*2),
"-limitancestorcount=%d" % (BCH_UNCONF_DEPTH*2),
"-limitdescendantcount=%d" % (BCH_UNCONF_DEPTH*2),
"-net.unconfChainResendAction=2",
"-net.restrictInputs=0"],
["-blockprioritysize=2000000", "-limitdescendantcount=1000", "-limitancestorcount=1000",
"-limitancestorsize=1000", "-limitdescendantsize=1000", "-net.unconfChainResendAction=2",
"-net.restrictInputs=0"],
["-blockprioritysize=2000000", "-limitdescendantcount=25", "-limitancestorcount=25",
"-limitancestorsize=150","-limitdescendantsize=101", "-net.unconfChainResendAction=2"]
]
self.nodes = start_nodes(4, self.options.tmpdir, mempoolConf)
connect_nodes_full(self.nodes)
self.is_network_split=False
self.sync_blocks()
def run_test (self):
# kick us out of IBD mode since the cached blocks will be old time so it'll look like our blockchain isn't up to date
# if we are in IBD mode, we don't request incoming tx.
self.nodes[0].generate(1)
logging.info("ancestor count test")
bal = self.nodes[1].getbalance()
addr = self.nodes[1].getnewaddress()
txhex = []
for i in range(0,BCH_UNCONF_DEPTH*2):
try:
txhex.append(self.nodes[1].sendtoaddress(addr, bal-1)) # enough so that it uses all UTXO, but has fee left over
logging.info("tx depth %d" % i) # Keep travis from timing out
except JSONRPCException as e: # an exception you don't catch is a testing error
print(str(e))
raise
waitFor(DELAY_TIME, lambda: self.nodes[0].getmempoolinfo()["size"] == BCH_UNCONF_DEPTH)
waitFor(DELAY_TIME, lambda: self.nodes[1].getmempoolinfo()["size"] == BCH_UNCONF_DEPTH*2)
# Set small to commit just a few tx so we can see if the missing ones get pushed
self.nodes[0].set("mining.blockSize=6000")
blk = self.nodes[0].generate(1)[0]
blkhex = self.nodes[0].getblock(blk)
waitFor(DELAY_TIME, lambda: self.nodes[0].getmempoolinfo()["size"] == BCH_UNCONF_DEPTH)
# generate the block somewhere else and see if the tx get pushed
self.nodes[2].set("mining.blockSize=4000")
blk2 = self.nodes[2].generate(1)[0]
waitFor(DELAY_TIME, lambda: self.nodes[0].getmempoolinfo()["size"] == BCH_UNCONF_DEPTH)
waitFor(DELAY_TIME, lambda: self.nodes[1].getbestblockhash() == blk2) # make sure its settled so we can get a good leftover count for the next test.
unconfLeftOver = self.nodes[1].getmempoolinfo()["size"]
assert(unconfLeftOver >= BCH_UNCONF_DEPTH) # if someone bumps the BCH network unconfirmed depth, you need to build a bigger unconf chain
# Let's consume all BCH_UNCONF_DEPTH tx
self.nodes[0].set("mining.blockSize=8000000")
waitFor(DELAY_TIME, lambda: len(self.nodes[0].getblocktemplate()["transactions"])>=BCH_UNCONF_DEPTH)
blk3 = self.nodes[0].generate(1)[0]
blk3data = self.nodes[0].getblock(blk3)
# this would be ideal, but a particular block is not guaranteed to contain all tx in the mempool
# assert_equal(len(blk3data["tx"]), BCH_UNCONF_DEPTH + 1) # chain of BCH_UNCONF_DEPTH unconfirmed + coinbase
committedTxCount = len(blk3data["tx"])-1 # -1 to remove coinbase
waitFor(DELAY_TIME, lambda: self.nodes[1].getbestblockhash() == blk3)
waitFor(DELAY_TIME, lambda: self.nodes[1].getmempoolinfo()["size"] == unconfLeftOver - committedTxCount)
# make sure that everything that can be pushed is pushed
waitFor(DELAY_TIME, lambda: self.nodes[0].getmempoolinfo()["size"] == min(unconfLeftOver - committedTxCount, BCH_UNCONF_DEPTH))
# clean up: confirm all the left over tx from the prior test
self.nodes[1].generate(1)
logging.info("ancestor size test")
# Grab existing addresses on all the nodes to create destinations for sendmany
# Grabbing existing addrs is a lot faster than creating new ones
addrlist = []
for node in self.nodes:
tmpaddrs = node.listaddressgroupings()
for axx in tmpaddrs:
addrlist.append(axx[0][0])
amounts = {}
for a in addrlist:
amounts[a] = "0.00001"
bal = self.nodes[1].getbalance()
amounts[addr] = bal - Decimal("5.0")
# Wait for sync before issuing the tx chain so that no txes are rejected as nonfinal
self.sync_blocks()
logging.info("Block heights: %s" % str([x.getblockcount() for x in self.nodes]))
# Create an unconfirmed chain that exceeds what node 0 allows
cumulativeTxSize = 0
while cumulativeTxSize < BCH_UNCONF_SIZE:
txhash = self.nodes[1].sendmany("",amounts,0)
tx = self.nodes[1].getrawtransaction(txhash)
txinfo = self.nodes[1].gettransaction(txhash)
logging.info("fee: %s fee sat/byte: %s" % (str(txinfo["fee"]), str(txinfo["fee"]*100000000/Decimal(len(tx)/2)) ))
cumulativeTxSize += len(tx)/2 # /2 because tx is a hex representation of the tx
logging.info("total size: %d" % cumulativeTxSize)
txCommitted = self.nodes[1].getmempoolinfo()["size"]
waitFor(DELAY_TIME, lambda: self.nodes[0].getmempoolinfo()["size"] == txCommitted-1) # nodes[0] will eliminate 1 tx because ancestor size too big
waitFor(DELAY_TIME, lambda: self.nodes[2].getmempoolinfo()["size"] == txCommitted) # nodes[2] should have gotten everything because its ancestor size conf is large
self.nodes[0].generate(1)
waitFor(DELAY_TIME, lambda: self.nodes[0].getmempoolinfo()["size"] == 1) # node 1 should push the tx that's now acceptable to node 0
self.nodes[0].generate(1) # clean up
self.sync_blocks() # Wait for sync before issuing the tx chain so that no txes are rejected as nonfinal
logging.info("Block heights: %s" % str([x.getblockcount() for x in self.nodes]))
# Now let's run a more realistic test with 2 mining nodes of varying mempool depth, and one application node with a huge depth
logging.info("deep unconfirmed chain test")
# Because the TX push races the block, connect the network in a special way to avoid this race.
# This is undesirable for a test, but in the real network will likely result in a faster dispersal of the TX because the miners are interconnected
for n in self.nodes:
disconnect_all(n)
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
bal = self.nodes[2].getbalance()
addr = self.nodes[2].getnewaddress()
txhex = []
for i in range(0,51):
try:
txhex.append(self.nodes[2].sendtoaddress(addr, bal-1)) # enough so that it uses all UTXO, but has fee left over
logging.info("send depth %d" % i) # Keep travis from timing out
except JSONRPCException as e: # an exception you don't catch is a testing error
print(str(e))
raise
count = 0
while self.nodes[2].getmempoolinfo()["size"] != 0:
# these checks aren't going to work at the end when I run out of tx so check for that
if self.nodes[2].getmempoolinfo()["size"] >= BCH_UNCONF_DEPTH*2:
waitFor(DELAY_TIME, lambda: self.nodes[0].getmempoolinfo()["size"] == BCH_UNCONF_DEPTH)
waitFor(DELAY_TIME, lambda: self.nodes[1].getmempoolinfo()["size"] == BCH_UNCONF_DEPTH*2)
logging.info("%d: sizes %d, %d, %d" % (count,self.nodes[0].getmempoolinfo()["size"],self.nodes[1].getmempoolinfo()["size"],self.nodes[2].getmempoolinfo()["size"]))
blk = self.nodes[0].generate(1)[0]
waitFor(DELAY_TIME, lambda: self.nodes[2].getbestblockhash() == blk)
count+=1
if __name__ == '__main__':
t = MyTest()
t.main (None, { "blockprioritysize": 2000000, "keypool":5 })
# Create a convenient function for an interactive python debugging session
def Test():
t = MyTest()
t.drop_to_pdb = True
bitcoinConf = {
"debug": [ "net", "blk", "thin", "mempool", "req", "bench", "evict"],
"blockprioritysize": 2000000 # we don't want any transactions rejected due to insufficient fees...
}
flags = standardFlags()
t.main(flags, bitcoinConf, None)
| mit | -8,783,700,860,843,381,000 | 51.037037 | 175 | 0.642501 | false |
tonioo/modoboa | modoboa/core/app_settings.py | 1 | 16059 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.conf import settings
from django.contrib.auth import password_validation
from django.utils.translation import ugettext as _, ugettext_lazy
from modoboa.core.password_hashers import get_dovecot_schemes
from modoboa.core.password_hashers.base import PasswordHasher
from modoboa.lib import fields as lib_fields
from modoboa.lib.form_utils import (
HorizontalRadioSelect, SeparatorField, YesNoField
)
from modoboa.parameters import forms as param_forms, tools as param_tools
from . import constants
def enabled_applications():
"""Return the list of installed extensions."""
from modoboa.core.extensions import exts_pool
result = [("user", _("User profile"))]
for extension in exts_pool.list_all():
if "topredirection_url" not in extension:
continue
result.append((extension["name"], extension["label"]))
return sorted(result, key=lambda e: e[0])
class GeneralParametersForm(param_forms.AdminParametersForm):
"""General parameters."""
app = "core"
sep1 = SeparatorField(label=ugettext_lazy("Authentication"))
authentication_type = forms.ChoiceField(
label=ugettext_lazy("Authentication type"),
choices=[("local", ugettext_lazy("Local")),
("ldap", "LDAP")],
initial="local",
help_text=ugettext_lazy("The backend used for authentication"),
widget=HorizontalRadioSelect()
)
password_scheme = forms.ChoiceField(
label=ugettext_lazy("Default password scheme"),
choices=[(hasher.name, ugettext_lazy(hasher.label))
for hasher in PasswordHasher.get_password_hashers()
if hasher().scheme in get_dovecot_schemes()],
initial="sha512crypt",
help_text=ugettext_lazy("Scheme used to crypt mailbox passwords"),
widget=forms.Select(attrs={"class": "form-control"})
)
rounds_number = forms.IntegerField(
label=ugettext_lazy("Rounds"),
initial=70000,
help_text=ugettext_lazy(
"Number of rounds to use (only used by sha256crypt and "
"sha512crypt). Must be between 1000 and 999999999, inclusive."
),
widget=forms.TextInput(attrs={"class": "form-control"})
)
update_scheme = YesNoField(
label=ugettext_lazy("Update password scheme at login"),
initial=True,
help_text=ugettext_lazy(
"Update user password at login to use the default password scheme"
)
)
default_password = forms.CharField(
label=ugettext_lazy("Default password"),
initial="password",
help_text=ugettext_lazy(
"Default password for automatically created accounts.")
)
random_password_length = forms.IntegerField(
label=ugettext_lazy("Random password length"),
min_value=8,
initial=8,
help_text=ugettext_lazy(
"Length of randomly generated passwords.")
)
# LDAP specific settings
ldap_sep = SeparatorField(label=ugettext_lazy("LDAP settings"))
ldap_server_address = forms.CharField(
label=ugettext_lazy("Server address"),
initial="localhost",
help_text=ugettext_lazy(
"The IP address or the DNS name of the LDAP server"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_server_port = forms.IntegerField(
label=ugettext_lazy("Server port"),
initial=389,
help_text=ugettext_lazy("The TCP port number used by the LDAP server"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_secured = forms.ChoiceField(
label=ugettext_lazy("Use a secured connection"),
choices=constants.LDAP_SECURE_MODES,
initial="none",
help_text=ugettext_lazy(
"Use an SSL/STARTTLS connection to access the LDAP server")
)
ldap_auth_method = forms.ChoiceField(
label=ugettext_lazy("Authentication method"),
choices=[("searchbind", ugettext_lazy("Search and bind")),
("directbind", ugettext_lazy("Direct bind"))],
initial="searchbind",
help_text=ugettext_lazy("Choose the authentication method to use"),
widget=forms.Select(attrs={"class": "form-control"})
)
ldap_bind_dn = forms.CharField(
label=ugettext_lazy("Bind DN"),
initial="",
help_text=ugettext_lazy(
"The distinguished name to use when binding to the LDAP server. "
"Leave empty for an anonymous bind"
),
required=False,
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_bind_password = forms.CharField(
label=ugettext_lazy("Bind password"),
initial="",
help_text=ugettext_lazy(
"The password to use when binding to the LDAP server "
"(with 'Bind DN')"
),
widget=forms.PasswordInput(
attrs={"class": "form-control"}, render_value=True),
required=False
)
ldap_search_base = forms.CharField(
label=ugettext_lazy("Users search base"),
initial="",
help_text=ugettext_lazy(
"The distinguished name of the search base used to find users"
),
required=False,
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_search_filter = forms.CharField(
label=ugettext_lazy("Search filter"),
initial="(mail=%(user)s)",
help_text=ugettext_lazy(
"An optional filter string (e.g. '(objectClass=person)'). "
"In order to be valid, it must be enclosed in parentheses."
),
required=False,
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_user_dn_template = forms.CharField(
label=ugettext_lazy("User DN template"),
initial="",
help_text=ugettext_lazy(
"The template used to construct a user's DN. It should contain "
"one placeholder (ie. %(user)s)"
),
required=False,
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_password_attribute = forms.CharField(
label=ugettext_lazy("Password attribute"),
initial="userPassword",
help_text=ugettext_lazy("The attribute used to store user passwords"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_is_active_directory = YesNoField(
label=ugettext_lazy("Active Directory"),
initial=False,
help_text=ugettext_lazy(
"Tell if the LDAP server is an Active Directory one")
)
ldap_admin_groups = forms.CharField(
label=ugettext_lazy("Administrator groups"),
initial="",
help_text=ugettext_lazy(
"Members of those LDAP Posix groups will be created as domain "
"administrators. Use ';' characters to separate groups."
),
required=False
)
ldap_group_type = forms.ChoiceField(
label=ugettext_lazy("Group type"),
initial="posixgroup",
choices=constants.LDAP_GROUP_TYPES,
help_text=ugettext_lazy(
"The LDAP group type to use with your directory."
)
)
ldap_groups_search_base = forms.CharField(
label=ugettext_lazy("Groups search base"),
initial="",
help_text=ugettext_lazy(
"The distinguished name of the search base used to find groups"
),
required=False
)
dash_sep = SeparatorField(label=ugettext_lazy("Dashboard"))
rss_feed_url = forms.URLField(
label=ugettext_lazy("Custom RSS feed"),
required=False,
help_text=ugettext_lazy(
"Display custom RSS feed to resellers and domain administrators"
)
)
hide_features_widget = YesNoField(
label=ugettext_lazy("Hide features widget"),
initial=False,
help_text=ugettext_lazy(
"Hide features widget for resellers and domain administrators"
)
)
notif_sep = SeparatorField(label=ugettext_lazy("Notifications"))
sender_address = lib_fields.UTF8EmailField(
label=_("Sender address"),
initial="[email protected]",
help_text=_(
"Email address used to send notifications."
)
)
api_sep = SeparatorField(label=ugettext_lazy("Public API"))
enable_api_communication = YesNoField(
label=ugettext_lazy("Enable communication"),
initial=True,
help_text=ugettext_lazy(
"Enable communication with Modoboa public API")
)
check_new_versions = YesNoField(
label=ugettext_lazy("Check new versions"),
initial=True,
help_text=ugettext_lazy(
"Automatically checks if a newer version is available")
)
send_statistics = YesNoField(
label=ugettext_lazy("Send statistics"),
initial=True,
help_text=ugettext_lazy(
"Send statistics to Modoboa public API "
"(counters and used extensions)")
)
sep3 = SeparatorField(label=ugettext_lazy("Miscellaneous"))
inactive_account_threshold = forms.IntegerField(
label=_("Inactive account threshold"),
initial=30,
help_text=_(
"An account with a last login date greater than this threshold "
"(in days) will be considered as inactive"
),
widget=forms.TextInput(attrs={"class": "form-control"})
)
top_notifications_check_interval = forms.IntegerField(
label=_("Top notifications check interval"),
initial=30,
help_text=_(
"Interval between two top notification checks (in seconds)"
),
widget=forms.TextInput(attrs={"class": "form-control"})
)
log_maximum_age = forms.IntegerField(
label=ugettext_lazy("Maximum log record age"),
initial=365,
help_text=ugettext_lazy("The maximum age in days of a log record"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
items_per_page = forms.IntegerField(
label=ugettext_lazy("Items per page"),
initial=30,
help_text=ugettext_lazy("Number of displayed items per page"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
default_top_redirection = forms.ChoiceField(
label=ugettext_lazy("Default top redirection"),
choices=[],
initial="user",
help_text=ugettext_lazy(
"The default redirection used when no application is specified"
),
widget=forms.Select(attrs={"class": "form-control"})
)
# Visibility rules
visibility_rules = {
"ldap_sep": "authentication_type=ldap",
"ldap_server_address": "authentication_type=ldap",
"ldap_server_port": "authentication_type=ldap",
"ldap_secured": "authentication_type=ldap",
"ldap_auth_method": "authentication_type=ldap",
"ldap_bind_dn": "ldap_auth_method=searchbind",
"ldap_bind_password": "ldap_auth_method=searchbind",
"ldap_search_base": "ldap_auth_method=searchbind",
"ldap_search_filter": "ldap_auth_method=searchbind",
"ldap_user_dn_template": "ldap_auth_method=directbind",
"ldap_password_attribute": "authentication_type=ldap",
"ldap_is_active_directory": "authentication_type=ldap",
"ldap_admin_groups": "authentication_type=ldap",
"ldap_group_type": "authentication_type=ldap",
"ldap_groups_search_base": "authentication_type=ldap",
"check_new_versions": "enable_api_communication=True",
"send_statistics": "enable_api_communication=True",
}
def __init__(self, *args, **kwargs):
super(GeneralParametersForm, self).__init__(*args, **kwargs)
self.fields["default_top_redirection"].choices = enabled_applications()
def clean_ldap_user_dn_template(self):
tpl = self.cleaned_data["ldap_user_dn_template"]
try:
tpl % {"user": "toto"}
except (KeyError, ValueError):
raise forms.ValidationError(_("Invalid syntax"))
return tpl
def clean_rounds_number(self):
value = self.cleaned_data["rounds_number"]
if value < 1000 or value > 999999999:
raise forms.ValidationError(_("Invalid rounds number"))
return value
def clean_default_password(self):
"""Check password complexity."""
value = self.cleaned_data["default_password"]
password_validation.validate_password(value)
return value
def clean(self):
"""Custom validation method
Depending on 'ldap_auth_method' value, we check for different
required parameters.
"""
super(GeneralParametersForm, self).clean()
cleaned_data = self.cleaned_data
if cleaned_data["authentication_type"] != "ldap":
return cleaned_data
if cleaned_data["ldap_auth_method"] == "searchbind":
required_fields = ["ldap_search_base", "ldap_search_filter"]
else:
required_fields = ["ldap_user_dn_template"]
for f in required_fields:
if f not in cleaned_data or cleaned_data[f] == u'':
self.add_error(f, _("This field is required"))
return cleaned_data
def to_django_settings(self):
"""Apply LDAP related parameters to Django settings.
Doing so, we can use the django_auth_ldap module.
"""
try:
import ldap
from django_auth_ldap.config import (
LDAPSearch, PosixGroupType, GroupOfNamesType)
ldap_available = True
except ImportError:
ldap_available = False
values = dict(param_tools.get_global_parameters("core"))
if not ldap_available or values["authentication_type"] != "ldap":
return
if not hasattr(settings, "AUTH_LDAP_USER_ATTR_MAP"):
setattr(settings, "AUTH_LDAP_USER_ATTR_MAP", {
"first_name": "givenName",
"email": "mail",
"last_name": "sn"
})
ldap_uri = "ldaps://" if values["ldap_secured"] == "ssl" else "ldap://"
ldap_uri += "%s:%s" % (
values["ldap_server_address"], values["ldap_server_port"])
setattr(settings, "AUTH_LDAP_SERVER_URI", ldap_uri)
if values["ldap_secured"] == "starttls":
setattr(settings, "AUTH_LDAP_START_TLS", True)
if values["ldap_group_type"] == "groupofnames":
setattr(settings, "AUTH_LDAP_GROUP_TYPE", GroupOfNamesType())
searchfilter = "(objectClass=groupOfNames)"
else:
setattr(settings, "AUTH_LDAP_GROUP_TYPE", PosixGroupType())
searchfilter = "(objectClass=posixGroup)"
setattr(settings, "AUTH_LDAP_GROUP_SEARCH", LDAPSearch(
values["ldap_groups_search_base"], ldap.SCOPE_SUBTREE,
searchfilter
))
if values["ldap_auth_method"] == "searchbind":
setattr(settings, "AUTH_LDAP_BIND_DN", values["ldap_bind_dn"])
setattr(
settings, "AUTH_LDAP_BIND_PASSWORD",
values["ldap_bind_password"]
)
search = LDAPSearch(
values["ldap_search_base"], ldap.SCOPE_SUBTREE,
values["ldap_search_filter"]
)
setattr(settings, "AUTH_LDAP_USER_SEARCH", search)
else:
setattr(
settings, "AUTH_LDAP_USER_DN_TEMPLATE",
values["ldap_user_dn_template"]
)
if values["ldap_is_active_directory"]:
if not hasattr(settings, "AUTH_LDAP_GLOBAL_OPTIONS"):
setattr(settings, "AUTH_LDAP_GLOBAL_OPTIONS", {
ldap.OPT_REFERRALS: False
})
else:
settings.AUTH_LDAP_GLOBAL_OPTIONS[ldap.OPT_REFERRALS] = False
| isc | -478,008,678,375,216,060 | 34.845982 | 79 | 0.609067 | false |
luzfcb/saefacto | saefacto/config/settings.py | 1 | 24164 | # -*- coding: utf-8 -*-
"""
Django settings for saefacto project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import join
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from configurations import Configuration, values
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
class Common(Configuration):
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# 'suit',
# Admin
'django.contrib.admin',
'django.contrib.admindocs',
)
THIRD_PARTY_APPS = (
'south', # Database migration helpers:
'crispy_forms', # Form layouts
'avatar', # for user avatars
'sitetree',
'sitetree_smartadmin',
'django_user_agents',
'statici18n', # javascript
'parsley',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'users', # custom users app
'core',
'main',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
INSTALLED_APPS += (
# Needs to come last for now because of a weird edge case between
# South and allauth
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
########## END APP CONFIGURATION
########## MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_user_agents.middleware.UserAgentMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = values.BooleanValue(False)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = "CHANGEME!!!"
########## END SECRET CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
########## END FIXTURE CONFIGURATION
########## EMAIL CONFIGURATION
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
########## END EMAIL CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Fábio C. Barrionuevo da Luz', '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = values.DatabaseURLValue('postgres://localhost/saefacto')
########## END DATABASE CONFIGURATION
########## CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
# memcacheify is what's used in Production
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
########## END CACHING
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Araguaina'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'pt-br'
LANGUAGES = (
('pt-br', u'Português do Brasil'),
('en', 'English'),
('es', u'Español'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
########## END TEMPLATE CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## URL Configuration
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
########## End URL Configuration
########## AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_PASSWORD_MIN_LENGTH = 1
########## END AUTHENTICATION CONFIGURATION
########## Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "users:redirect"
########## END Custom user app defaults
########## SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = "slugify.slugify"
########## END SLUGLIFIER
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## Your common stuff: Below this line define 3rd party libary settings
class Local(Common):
########## DEBUG
DEBUG = values.BooleanValue(True)
TEMPLATE_DEBUG = DEBUG
########## END DEBUG
########## INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
########## END INSTALLED_APPS
########## Mail settings
EMAIL_HOST = "localhost"
EMAIL_PORT = 1025
EMAIL_BACKEND = values.Value('django.core.mail.backends.console.EmailBackend')
########## End mail settings
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
#DATABASES = values.DatabaseURLValue('postgres://localhost/projetosgt')
DATABASES = values.DatabaseURLValue('sqlite:////{0}.sqlite'.format(join(BASE_DIR, 'sae_db')))
########## END DATABASE CONFIGURATION
########## django-debug-toolbar
MIDDLEWARE_CLASSES = Common.MIDDLEWARE_CLASSES + ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TEMPLATE_CONTEXT': True,
}
########## end django-debug-toolbar
########## Your local stuff: Below this line define 3rd party libary settings
#SITETREE_MODEL_TREE = 'sitetree_smartadmin.SmartTree'
SITETREE_MODEL_TREE_ITEM = 'sitetree_smartadmin.SmartTreeItem'
class Production(Common):
########## INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
INSTALLED_APPS += ('allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.github', )
########## END INSTALLED_APPS
########## SECRET KEY
SECRET_KEY = values.SecretValue()
########## END SECRET KEY
########## django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
########## end django-secure
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
########## END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
########## STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ("collectfast", )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIREY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (AWS_EXPIREY,
AWS_EXPIREY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
########## END STORAGE CONFIGURATION
########## EMAIL
DEFAULT_FROM_EMAIL = values.Value(
'saefacto <[email protected]>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[saefacto] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
########## END TEMPLATE CONFIGURATION
########## CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
########## END CACHING
########## Your production stuff: Below this line define 3rd party libary settings
########## DEBUG
DEBUG = values.BooleanValue(True)
TEMPLATE_DEBUG = DEBUG
########## END DEBUG
########## django-debug-toolbar
MIDDLEWARE_CLASSES = Common.MIDDLEWARE_CLASSES + ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': ['debug_toolbar.panels.redirects.RedirectsPanel'],
'SHOW_TEMPLATE_CONTEXT': True,
}
########## end django-debug-toolbar
#######################################################################################
# hack terrivelmente feio para fazer o Pycharm identificar as bibliotecas
# o codigo abaixo nunca sera executado
if 1 == 2:
INSTALLED_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
'south', # Database migration helpers:
'crispy_forms', # Form layouts
'avatar', # for user avatars
'sitetree',
'sitetree_smartadmin',
'django_user_agents',
'statici18n', # javascript
'users', # custom users app
'core',
'main',
# Needs to come last for now because of a weird edge case between
# South and allauth
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
########## END APP CONFIGURATION
########## MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = "CHANGEME!!!"
########## END SECRET CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
########## END FIXTURE CONFIGURATION
########## EMAIL CONFIGURATION
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
########## END EMAIL CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Fábio C. Barrionuevo da Luz', '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
########## END DATABASE CONFIGURATION
########## CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
# memcacheify is what's used in Production
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
########## END CACHING
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
########## END TEMPLATE CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## URL Configuration
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
########## End URL Configuration
########## AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
########## END AUTHENTICATION CONFIGURATION
########## Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "users:redirect"
########## END Custom user app defaults
########## SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = "slugify.slugify"
########## END SLUGLIFIER
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## Your common stuff: Below this line define 3rd party libary settings
| bsd-3-clause | 4,225,165,152,637,254,000 | 34.167394 | 119 | 0.63005 | false |
gbd-consult/CartoCSS-Export | CartoCSSExport/ce/cartocss.py | 1 | 66477 | """CartoCSS properties."""
# extracted from https://raw.githubusercontent.com/mapbox/carto/master/docs/latest.md
Properties = {
"background-color": {
"default": None,
"description": "Map Background color.",
"type": "color"
},
"background-image": {
"default": "",
"description": "An image that is repeated below all features on a map as a background. Accepted formats: svg, jpg, png, tiff, and webp.",
"type": "uri"
},
"background-image-comp-op": {
"default": "src-over",
"description": "Set the compositing operation used to blend the image into the background.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"background-image-opacity": {
"default": 1,
"description": "Set the opacity of the image.",
"type": "float"
},
"base": {
"default": "",
"description": "Any relative paths used to reference files will be understood as relative to this directory path if the map is loaded from an in memory object rather than from the filesystem. If the map is loaded from the filesystem and this option is not provided it will be set to the directory of the stylesheet.",
"type": "string"
},
"buffer-size": {
"default": 0,
"description": "Extra tolerance around the map (in pixels) used to ensure labels crossing tile boundaries are equally rendered in each tile (e.g. cut in each tile). Not intended to be used in combination with \"avoid-edges\".",
"type": "float"
},
"building-fill": {
"default": "The color gray will be used for fill.",
"description": "The color of the buildings fill. Note: 0.8 will be used to multiply each color component to auto-generate a darkened wall color.",
"type": "color"
},
"building-fill-opacity": {
"default": 1,
"description": "The opacity of the building as a whole, including all walls.",
"type": "float"
},
"building-height": {
"default": 0,
"description": "The height of the building in pixels.",
"type": "float"
},
"comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this layer should behave relative to layers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"debug-mode": {
"default": "collision",
"description": "The mode for debug rendering.",
"type": "string"
},
"direct-image-filters": {
"default": None,
"description": "A list of image filters to apply to the main canvas (see the image-filters doc for how they work on a separate canvas).",
"type": "functions",
"values": [
"agg-stack-blur",
"emboss",
"blur",
"gray",
"sobel",
"edge-detect",
"x-gradient",
"y-gradient",
"invert",
"sharpen",
"color-blind-protanope",
"color-blind-deuteranope",
"color-blind-tritanope",
"colorize-alpha",
"color-to-alpha",
"scale-hsla"
]
},
"dot-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this layer should behave relative to layers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"dot-fill": {
"default": "gray",
"description": "The color of the area of the dot.",
"type": "color"
},
"dot-height": {
"default": 1,
"description": "The height of the dot in pixels.",
"type": "float"
},
"dot-opacity": {
"default": 1,
"description": "The overall opacity of the dot.",
"type": "float"
},
"dot-width": {
"default": 1,
"description": "The width of the dot in pixels.",
"type": "float"
},
"font-directory": {
"default": None,
"description": "Path to a directory which holds fonts which should be registered when the Map is loaded (in addition to any fonts that may be automatically registered).",
"type": "uri"
},
"image-filters": {
"default": None,
"description": "A list of image filters that will be applied to the active rendering canvas for a given style. The presence of one more image-filters will trigger a new canvas to be created before starting to render a style and then this canvas will be composited back into the main canvas after rendering all features and after all image-filters have been applied. See direct-image-filters if you want to apply a filter directly to the main canvas.",
"type": "functions",
"values": [
"agg-stack-blur",
"emboss",
"blur",
"gray",
"sobel",
"edge-detect",
"x-gradient",
"y-gradient",
"invert",
"sharpen",
"color-blind-protanope",
"color-blind-deuteranope",
"color-blind-tritanope",
"colorize-alpha",
"color-to-alpha",
"scale-hsla"
]
},
"image-filters-inflate": {
"default": False,
"description": "A property that can be set to True to enable using an inflated image internally for seamless blurring across tiles (requires buffered data).",
"type": "boolean"
},
"line-cap": {
"default": "butt",
"description": "The display of line endings.",
"type": "keyword",
"values": [
"butt",
"round",
"square"
]
},
"line-clip": {
"default": False,
"description": "Turning on clipping can help performance in the case that the boundaries of the geometry extent outside of tile extents. But clipping can result in undesirable rendering artifacts in rare cases.",
"type": "boolean"
},
"line-color": {
"default": "black",
"description": "The color of a drawn line.",
"type": "color"
},
"line-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"line-dash-offset": {
"default": None,
"description": "Valid parameter but not currently used in renderers (only exists for experimental svg support in Mapnik which is not yet enabled).",
"type": "numbers"
},
"line-dasharray": {
"default": None,
"description": "A pair of length values [a,b], where (a) is the dash length and (b) is the gap length respectively. More than two values are supported for more complex patterns.",
"type": "numbers"
},
"line-gamma": {
"default": 1,
"description": "Level of antialiasing of stroke line.",
"type": "float"
},
"line-gamma-method": {
"default": "power",
"description": "An Antigrain Geometry specific rendering hint to control the quality of antialiasing. Under the hood in Mapnik this method is used in combination with the 'gamma' value (which defaults to 1). The methods are in the AGG source at https://github.com/mapnik/mapnik/blob/master/deps/agg/include/agg_gamma_functions.",
"type": "keyword",
"values": [
"power",
"linear",
"none",
"threshold",
"multiply"
]
},
"line-geometry-transform": {
"default": None,
"description": "Transform line geometry with specified function.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"line-join": {
"default": "miter",
"description": "The behavior of lines when joining.",
"type": "keyword",
"values": [
"miter",
"miter-revert",
"round",
"bevel"
]
},
"line-miterlimit": {
"default": 4,
"description": "The limit on the ratio of the miter length to the stroke-width. Used to automatically convert miter joins to bevel joins for sharp angles to avoid the miter extending beyond the thickness of the stroking path. Normally will not need to be set, but a larger value can sometimes help avoid jaggy artifacts.",
"type": "float"
},
"line-offset": {
"default": 0,
"description": "Offsets a line a number of pixels parallel to its actual path. Positive values move the line left, negative values move it right (relative to the directionality of the line).",
"type": "float"
},
"line-opacity": {
"default": 1,
"description": "The opacity of a line.",
"type": "float"
},
"line-pattern-clip": {
"default": False,
"description": "Turning on clipping can help performance in the case that the boundaries of the geometry extent outside of tile extents. But clipping can result in undesirable rendering artifacts in rare cases.",
"type": "boolean"
},
"line-pattern-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"line-pattern-file": {
"default": None,
"description": "An image file to be repeated and warped along a line. Accepted formats: svg, jpg, png, tiff, and webp.",
"type": "uri"
},
"line-pattern-geometry-transform": {
"default": None,
"description": "Transform line geometry with specified function and apply pattern to transformed geometry.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"line-pattern-offset": {
"default": 0,
"description": "Offsets a line a number of pixels parallel to its actual path. Positive values move the line left, negative values move it right (relative to the directionality of the line).",
"type": "float"
},
"line-pattern-opacity": {
"default": 1,
"description": "Apply an opacity level to the image used for the pattern.",
"type": "float"
},
"line-pattern-simplify": {
"default": 0,
"description": "geometries are simplified by the given tolerance.",
"type": "float"
},
"line-pattern-simplify-algorithm": {
"default": "radial-distance",
"description": "geometries are simplified by the given algorithm.",
"type": "keyword",
"values": [
"radial-distance",
"zhao-saalfeld",
"visvalingam-whyatt"
]
},
"line-pattern-smooth": {
"default": 0,
"description": "Smooths out geometry angles. 0 is no smoothing, 1 is fully smoothed. Values greater than 1 will produce wild, looping geometries.",
"type": "float"
},
"line-pattern-transform": {
"default": None,
"description": "Transform line pattern instance with specified function.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"line-rasterizer": {
"default": "full",
"description": "Exposes an alternate AGG rendering method that sacrifices some accuracy for speed.",
"type": "keyword",
"values": [
"full",
"fast"
]
},
"line-simplify": {
"default": 0,
"description": "Simplify geometries by the given tolerance.",
"type": "float"
},
"line-simplify-algorithm": {
"default": "radial-distance",
"description": "Simplify geometries by the given algorithm.",
"type": "keyword",
"values": [
"radial-distance",
"zhao-saalfeld",
"visvalingam-whyatt"
]
},
"line-smooth": {
"default": 0,
"description": "Smooths out geometry angles. 0 is no smoothing, 1 is fully smoothed. Values greater than 1 will produce wild, looping geometries.",
"type": "float"
},
"line-width": {
"default": 1,
"description": "The width of a line in pixels.",
"type": "float"
},
"marker-allow-overlap": {
"default": False,
"description": "Control whether overlapping markers are shown or hidden.",
"type": "boolean"
},
"marker-avoid-edges": {
"default": False,
"description": "Avoid placing markers that intersect with tile boundaries.",
"type": "boolean"
},
"marker-clip": {
"default": False,
"description": "Turning on clipping can help performance in the case that the boundaries of the geometry extent outside of tile extents. But clipping can result in undesirable rendering artifacts in rare cases.",
"type": "boolean"
},
"marker-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"marker-direction": {
"default": "right",
"description": "How markers should be placed along lines. With the \"auto\" setting when marker is upside down the marker is automatically rotated by 180 degrees to keep it upright. The \"auto-down\" value places marker in the opposite orientation to \"auto\". The \"left\" or \"right\" settings can be used to force marker to always be placed along a line in a given direction and therefore disables rotating if marker appears upside down. The \"left-only\" or \"right-only\" properties also force a given direction but will discard upside down markers rather than trying to flip it. The \"up\" and \"down\" settings don't adjust marker's orientation to the line direction.",
"type": "keyword",
"values": [
"auto",
"auto-down",
"left",
"right",
"left-only",
"right-only",
"up",
"down"
]
},
"marker-file": {
"default": None,
"description": "A file that this marker shows at each placement. If no file is given, the marker will show an ellipse. Accepted formats: svg, jpg, png, tiff, and webp.",
"type": "uri"
},
"marker-fill": {
"default": "blue",
"description": "The color of the area of the marker. This property will also set the fill of elements in an SVG loaded from a file.",
"type": "color"
},
"marker-fill-opacity": {
"default": 1,
"description": "The fill opacity of the marker. This property will also set the fill-opacity of elements in an SVG loaded from a file.",
"type": "float"
},
"marker-geometry-transform": {
"default": None,
"description": "Transform marker geometry with specified function.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"marker-height": {
"default": 10,
"description": "The height of the marker, if using one of the default types.",
"type": "float"
},
"marker-ignore-placement": {
"default": False,
"description": "Value to control whether the placement of the feature will prevent the placement of other features.",
"type": "boolean"
},
"marker-line-color": {
"default": "black",
"description": "The color of the stroke around the marker. This property will also set the stroke of elements in an SVG loaded from a file.",
"type": "color"
},
"marker-line-opacity": {
"default": 1,
"description": "The opacity of a line.",
"type": "float"
},
"marker-line-width": {
"default": 0.5,
"description": "The width of the stroke around the marker, in pixels. This is positioned on the boundary, so high values can cover the area itself. This property will also set the stroke-width of elements in an SVG loaded from a file.",
"type": "float"
},
"marker-max-error": {
"default": 0.2,
"description": "N/A: not intended to be changed.",
"type": "float"
},
"marker-multi-policy": {
"default": "each",
"description": "A special setting to allow the user to control rendering behavior for 'multi-geometries' (when a feature contains multiple geometries). This setting does not apply to markers placed along lines. The 'each' policy is default and means all geometries will get a marker. The 'whole' policy means that the aggregate centroid between all geometries will be used. The 'largest' policy means that only the largest (by bounding box areas) feature will get a rendered marker (this is how text labeling behaves by default).",
"type": "keyword",
"values": [
"each",
"whole",
"largest"
]
},
"marker-offset": {
"default": 0,
"description": "Offsets a marker from a line a number of pixels parallel to its actual path. Positive values move the marker left, negative values move it right (relative to the directionality of the line).",
"type": "float"
},
"marker-opacity": {
"default": 1,
"description": "The overall opacity of the marker, if set, overrides both the opacity of the fill and the opacity of the stroke.",
"type": "float"
},
"marker-placement": {
"default": "point",
"description": "Attempt to place markers on a point, in the center of a polygon, or if markers-placement:line, then multiple times along a line. 'interior' placement can be used to ensure that points placed on polygons are forced to be inside the polygon interior. The 'vertex-first' and 'vertex-last' options can be used to place markers at the first or last vertex of lines or polygons.",
"type": "keyword",
"values": [
"point",
"line",
"interior",
"vertex-first",
"vertex-last"
]
},
"marker-simplify": {
"default": 0,
"description": "geometries are simplified by the given tolerance.",
"type": "float"
},
"marker-simplify-algorithm": {
"default": "radial-distance",
"description": "geometries are simplified by the given algorithm.",
"type": "keyword",
"values": [
"radial-distance",
"zhao-saalfeld",
"visvalingam-whyatt"
]
},
"marker-smooth": {
"default": 0,
"description": "Smooths out geometry angles. 0 is no smoothing, 1 is fully smoothed. Values greater than 1 will produce wild, looping geometries.",
"type": "float"
},
"marker-spacing": {
"default": 100,
"description": "Space between repeated markers in pixels. If the spacing is less than the marker size or larger than the line segment length then no marker will be placed. Any value less than 1 will be ignored and the default will be used instead.",
"type": "float"
},
"marker-transform": {
"default": None,
"description": "Transform marker instance with specified function. Ignores map scale factor.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"marker-type": {
"default": "ellipse",
"description": "The default marker-type. If a SVG file is not given as the marker-file parameter, the renderer provides either an arrow or an ellipse (a circle if height is equal to width).",
"type": "keyword",
"values": [
"arrow",
"ellipse"
]
},
"marker-width": {
"default": 10,
"description": "The width of the marker, if using one of the default types.",
"type": "float"
},
"maximum-extent": {
"default": "-20037508.34,-20037508.34,20037508.34,20037508.34",
"description": "An extent to be used to limit the bounds used to query all layers during rendering. Should be minx, miny, maxx, maxy in the coordinates of the Map.",
"type": "string"
},
"opacity": {
"default": 1,
"description": "An alpha value for the style (which means an alpha applied to all features in separate buffer and then composited back to main buffer).",
"type": "float"
},
"point-allow-overlap": {
"default": False,
"description": "Control whether overlapping points are shown or hidden.",
"type": "boolean"
},
"point-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"point-file": {
"default": None,
"description": "Image file to represent a point. Accepted formats: svg, jpg, png, tiff, and webp.",
"type": "uri"
},
"point-ignore-placement": {
"default": False,
"description": "Control whether the placement of the feature will prevent the placement of other features.",
"type": "boolean"
},
"point-opacity": {
"default": 1,
"description": "A value from 0 to 1 to control the opacity of the point.",
"type": "float"
},
"point-placement": {
"default": "centroid",
"description": "Control how this point should be placed. Centroid calculates the geometric center of a polygon, which can be outside of it, while interior always places inside of a polygon.",
"type": "keyword",
"values": [
"centroid",
"interior"
]
},
"point-transform": {
"default": None,
"description": "Transform point instance with specified function. Ignores map scale factor.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"polygon-clip": {
"default": False,
"description": "Turning on clipping can help performance in the case that the boundaries of the geometry extend outside of tile extents. But clipping can result in undesirable rendering artifacts in rare cases.",
"type": "boolean"
},
"polygon-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"polygon-fill": {
"default": "The color gray will be used for fill.",
"description": "Fill color to assign to a polygon.",
"type": "color"
},
"polygon-gamma": {
"default": 1,
"description": "Level of antialiasing of polygon edges.",
"type": "float"
},
"polygon-gamma-method": {
"default": "power",
"description": "An Antigrain Geometry specific rendering hint to control the quality of antialiasing. Under the hood in Mapnik this method is used in combination with the 'gamma' value (which defaults to 1). The methods are in the AGG source at https://github.com/mapnik/mapnik/blob/master/deps/agg/include/agg_gamma_functions.",
"type": "keyword",
"values": [
"power",
"linear",
"none",
"threshold",
"multiply"
]
},
"polygon-geometry-transform": {
"default": None,
"description": "Transform polygon geometry with specified function.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"polygon-opacity": {
"default": 1,
"description": "The opacity of the polygon.",
"type": "float"
},
"polygon-pattern-alignment": {
"default": "global",
"description": "Specify whether to align pattern fills to the layer's geometry (local) or to the map (global).",
"type": "keyword",
"values": [
"global",
"local"
]
},
"polygon-pattern-clip": {
"default": False,
"description": "Turning on clipping can help performance in the case that the boundaries of the geometry extent outside of tile extents. But clipping can result in undesirable rendering artifacts in rare cases.",
"type": "boolean"
},
"polygon-pattern-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"polygon-pattern-file": {
"default": None,
"description": "Image to use as a repeated pattern fill within a polygon. Accepted formats: svg, jpg, png, tiff, and webp.",
"type": "uri"
},
"polygon-pattern-gamma": {
"default": 1,
"description": "Level of antialiasing of polygon pattern edges.",
"type": "float"
},
"polygon-pattern-geometry-transform": {
"default": None,
"description": "Transform polygon geometry with specified function and apply pattern to transformed geometry.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"polygon-pattern-opacity": {
"default": 1,
"description": "Apply an opacity level to the image used for the pattern.",
"type": "float"
},
"polygon-pattern-simplify": {
"default": 0,
"description": "geometries are simplified by the given tolerance.",
"type": "float"
},
"polygon-pattern-simplify-algorithm": {
"default": "radial-distance",
"description": "geometries are simplified by the given algorithm.",
"type": "keyword",
"values": [
"radial-distance",
"zhao-saalfeld",
"visvalingam-whyatt"
]
},
"polygon-pattern-smooth": {
"default": 0,
"description": "Smooths out geometry angles. 0 is no smoothing, 1 is fully smoothed. Values greater than 1 will produce wild, looping geometries.",
"type": "float"
},
"polygon-pattern-transform": {
"default": None,
"description": "Transform polygon pattern instance with specified function.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"polygon-simplify": {
"default": 0,
"description": "Simplify geometries by the given tolerance.",
"type": "float"
},
"polygon-simplify-algorithm": {
"default": "radial-distance",
"description": "Simplify geometries by the given algorithm.",
"type": "keyword",
"values": [
"radial-distance",
"zhao-saalfeld",
"visvalingam-whyatt"
]
},
"polygon-smooth": {
"default": 0,
"description": "Smooths out geometry angles. 0 is no smoothing, 1 is fully smoothed. Values greater than 1 will produce wild, looping geometries.",
"type": "float"
},
"raster-colorizer-default-color": {
"default": "transparent",
"description": "This can be any color. Sets the color that is applied to all values outside of the range of the colorizer-stops. If not supplied pixels will be fully transparent.",
"type": "color"
},
"raster-colorizer-default-mode": {
"default": "linear",
"description": "This can be either discrete, linear or exact. If it is not specified then the default is linear.",
"type": "keyword",
"values": [
"discrete",
"linear",
"exact"
]
},
"raster-colorizer-epsilon": {
"default": 1.1920928955078125e-07,
"description": "This can be any positive floating point value and will be used as a tolerance in floating point comparisions. The higher the value the more likely a stop will match and color data.",
"type": "float"
},
"raster-colorizer-stops": {
"default": "",
"description": "Assigns raster data values to colors. Stops must be listed in ascending order, and contain at a minimum the value and the associated color. You can also include the color-mode as a third argument, like stop(100,#fff,exact).",
"type": "tags"
},
"raster-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"raster-filter-factor": {
"default": -1,
"description": "This is used by the Raster or Gdal datasources to pre-downscale images using overviews. Higher numbers can sometimes cause much better scaled image output, at the cost of speed.",
"type": "float"
},
"raster-mesh-size": {
"default": 16,
"description": "A reduced resolution mesh is used for raster reprojection, and the total image size is divided by the mesh-size to determine the quality of that mesh. Values for mesh-size larger than the default will result in faster reprojection but might lead to distortion.",
"type": "unsigned"
},
"raster-opacity": {
"default": 1,
"description": "The opacity of the raster symbolizer on top of other symbolizers.",
"type": "float"
},
"raster-scaling": {
"default": "near",
"description": "The scaling algorithm used to making different resolution versions of this raster layer. Bilinear is a good compromise between speed and accuracy, while lanczos gives the highest quality.",
"type": "keyword",
"values": [
"near",
"fast",
"bilinear",
"bicubic",
"spline16",
"spline36",
"hanning",
"hamming",
"hermite",
"kaiser",
"quadric",
"catrom",
"gaussian",
"bessel",
"mitchell",
"sinc",
"lanczos",
"blackman"
]
},
"shield-allow-overlap": {
"default": False,
"description": "Control whether overlapping shields are shown or hidden.",
"type": "boolean"
},
"shield-avoid-edges": {
"default": False,
"description": "Avoid placing shields that intersect with tile boundaries.",
"type": "boolean"
},
"shield-character-spacing": {
"default": 0,
"description": "Horizontal spacing between characters (in pixels). Currently works for point placement only, not line placement.",
"type": "unsigned"
},
"shield-clip": {
"default": False,
"description": "Turning on clipping can help performance in the case that the boundaries of the geometry extent outside of tile extents. But clipping can result in undesirable rendering artifacts in rare cases.",
"type": "boolean"
},
"shield-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"shield-dx": {
"default": 0,
"description": "Displace shield by fixed amount, in pixels, +/- along the X axis. A positive value will shift the text right.",
"type": "float"
},
"shield-dy": {
"default": 0,
"description": "Displace shield by fixed amount, in pixels, +/- along the Y axis. A positive value will shift the text down.",
"type": "float"
},
"shield-face-name": {
"default": None,
"description": "Font name and style to use for the shield text.",
"type": "string"
},
"shield-file": {
"default": None,
"description": "Image file to render behind the shield text. Accepted formats: svg, jpg, png, tiff, and webp.",
"type": "uri"
},
"shield-fill": {
"default": "black",
"description": "The color of the shield text.",
"type": "color"
},
"shield-halo-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"shield-halo-fill": {
"default": "white",
"description": "Specifies the color of the halo around the text.",
"type": "color"
},
"shield-halo-opacity": {
"default": 1,
"description": "A number from 0 to 1 specifying the opacity for the text halo.",
"type": "float"
},
"shield-halo-radius": {
"default": 0,
"description": "Specify the radius of the halo in pixels.",
"type": "float"
},
"shield-halo-rasterizer": {
"default": "full",
"description": "Exposes an alternate text halo rendering method that sacrifices quality for speed.",
"type": "keyword",
"values": [
"full",
"fast"
]
},
"shield-halo-transform": {
"default": "",
"description": "Transform shield halo relative to the actual text with specified function. Allows for shadow or embossed effects. Ignores map scale factor.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"shield-horizontal-alignment": {
"default": "auto",
"description": "The shield's horizontal alignment from its centerpoint.",
"type": "keyword",
"values": [
"left",
"middle",
"right",
"auto"
]
},
"shield-justify-alignment": {
"default": "auto",
"description": "Define how text in a shield's label is justified.",
"type": "keyword",
"values": [
"left",
"center",
"right",
"auto"
]
},
"shield-label-position-tolerance": {
"default": "shield-spacing/2.0",
"description": "Allows the shield to be displaced from its ideal position by a number of pixels (only works with placement:line).",
"type": "float"
},
"shield-line-spacing": {
"default": 0,
"description": "Vertical spacing between lines of multiline labels (in pixels).",
"type": "float"
},
"shield-margin": {
"default": 0,
"description": "Minimum distance that a shield can be placed from any other text, shield, or marker.",
"type": "float"
},
"shield-min-distance": {
"default": 0,
"description": "Minimum distance to the next shield with the same text. Only works for line placement.",
"type": "float"
},
"shield-min-padding": {
"default": 0,
"description": "Minimum distance a shield will be placed from the edge of a tile. This option is similar to shield-avoid-edges:True except that the extra margin is used to discard cases where the shield+margin are not fully inside the tile.",
"type": "float"
},
"shield-name": {
"default": "",
"description": "Value to use for a shield\"s text label. Data columns are specified using brackets like [column_name].",
"type": "string"
},
"shield-opacity": {
"default": 1,
"description": "The opacity of the image used for the shield.",
"type": "float"
},
"shield-placement": {
"default": "point",
"description": "How this shield should be placed. Point placement places one shield on top of a point geometry and at the centroid of a polygon or the middle point of a line, line places along lines multiple times per feature, vertex places on the vertexes of polygons, and interior attempts to place inside of a polygon.",
"type": "keyword",
"values": [
"point",
"line",
"vertex",
"interior"
]
},
"shield-placement-type": {
"default": "dummy",
"description": "Re-position and/or re-size shield to avoid overlaps. \"simple\" for basic algorithm (using shield-placements string,) \"dummy\" to turn this feature off.",
"type": "keyword",
"values": [
"dummy",
"simple",
"list"
]
},
"shield-placements": {
"default": "",
"description": "If \"placement-type\" is set to \"simple\", use this \"POSITIONS,[SIZES]\" string. An example is shield-placements: \"E,NE,SE,W,NW,SW\";.",
"type": "string"
},
"shield-repeat-distance": {
"default": 0,
"description": "Minimum distance between repeated shields. If set this will prevent shields being rendered nearby each other that contain the same text. Similar to shield-min-distance with the difference that it works the same no matter what placement strategy is used.",
"type": "float"
},
"shield-simplify": {
"default": 0,
"description": "Simplify the geometries used for shield placement by the given tolerance.",
"type": "float"
},
"shield-simplify-algorithm": {
"default": "radial-distance",
"description": "Simplify the geometries used for shield placement by the given algorithm.",
"type": "keyword",
"values": [
"radial-distance",
"zhao-saalfeld",
"visvalingam-whyatt"
]
},
"shield-size": {
"default": 10,
"description": "The size of the shield text in pixels.",
"type": "float"
},
"shield-smooth": {
"default": 0,
"description": "Smooths out the angles of the geometry used for shield placement. 0 is no smoothing, 1 is fully smoothed. Values greater than 1 will produce wild, looping geometries.",
"type": "float"
},
"shield-spacing": {
"default": 0,
"description": "Distance the renderer should use to try to place repeated shields on a line.",
"type": "float"
},
"shield-text-dx": {
"default": 0,
"description": "Displace text within shield by fixed amount, in pixels, +/- along the X axis. A positive value will shift the shield right.",
"type": "float"
},
"shield-text-dy": {
"default": 0,
"description": "Displace text within shield by fixed amount, in pixels, +/- along the Y axis. A positive value will shift the shield down.",
"type": "float"
},
"shield-text-opacity": {
"default": 1,
"description": "The opacity of the text placed on top of the shield.",
"type": "float"
},
"shield-text-transform": {
"default": None,
"description": "Transform the case of the characters.",
"type": "keyword",
"values": [
"none",
"uppercase",
"lowercase",
"capitalize",
"reverse"
]
},
"shield-transform": {
"default": None,
"description": "Transform shield instance with specified function. Ignores map scale factor.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"shield-unlock-image": {
"default": False,
"description": "This parameter should be set to True if you are trying to position text beside rather than on top of the shield image.",
"type": "boolean"
},
"shield-vertical-alignment": {
"default": "middle",
"description": "The shield's vertical alignment from its centerpoint.",
"type": "keyword",
"values": [
"top",
"middle",
"bottom",
"auto"
]
},
"shield-wrap-before": {
"default": False,
"description": "Wrap text before wrap-width is reached.",
"type": "boolean"
},
"shield-wrap-character": {
"default": None,
"description": "Use this character instead of a space to wrap long names.",
"type": "string"
},
"shield-wrap-width": {
"default": 0,
"description": "Length of a chunk of text in pixels before wrapping text. If set to zero, text doesn't wrap.",
"type": "unsigned"
},
"srs": {
"default": "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs",
"description": "Map spatial reference (proj4 string).",
"type": "string"
},
"text-align": {
"default": "auto",
"description": "Define how text is justified.",
"type": "keyword",
"values": [
"left",
"right",
"center",
"auto"
]
},
"text-allow-overlap": {
"default": False,
"description": "Control whether overlapping text is shown or hidden.",
"type": "boolean"
},
"text-avoid-edges": {
"default": False,
"description": "Avoid placing labels that intersect with tile boundaries.",
"type": "boolean"
},
"text-character-spacing": {
"default": 0,
"description": "Horizontal spacing adjustment between characters in pixels. This value is ignored when horizontal-alignment is set to adjust. Typographic ligatures are turned off when this value is greater than zero.",
"type": "float"
},
"text-clip": {
"default": False,
"description": "Turning on clipping can help performance in the case that the boundaries of the geometry extent outside of tile extents. But clipping can result in undesirable rendering artifacts in rare cases.",
"type": "boolean"
},
"text-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"text-dx": {
"default": 0,
"description": "Displace text by fixed amount, in pixels, +/- along the X axis. With \"dummy\" placement-type, a positive value displaces to the right. With \"simple\" placement-type, it is either left, right or unchanged, depending on the placement selected. Any non-zero value implies \"horizontal-alignment\" changes to \"left\" by default. Has no effect with 'line' text-placement-type.",
"type": "float"
},
"text-dy": {
"default": 0,
"description": "Displace text by fixed amount, in pixels, +/- along the Y axis. With \"dummy\" placement-type, a positive value displaces downwards. With \"simple\" placement-type, it is either up, down or unchanged, depending on the placement selected. With \"line\" placement-type, a positive value displaces above the path.",
"type": "float"
},
"text-face-name": {
"default": None,
"description": "Font name and style to render a label in.",
"type": "string"
},
"text-fill": {
"default": "black",
"description": "Specifies the color for the text.",
"type": "color"
},
"text-font-feature-settings": {
"default": "",
"description": "Comma separated list of OpenType typographic features. The syntax and semantics conforms to font-feature-settings from W3C CSS.",
"type": "string"
},
"text-halo-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"text-halo-fill": {
"default": "white",
"description": "Specifies the color of the halo around the text.",
"type": "color"
},
"text-halo-opacity": {
"default": 1,
"description": "A number from 0 to 1 specifying the opacity for the text halo.",
"type": "float"
},
"text-halo-radius": {
"default": 0,
"description": "Specify the radius of the halo in pixels.",
"type": "float"
},
"text-halo-rasterizer": {
"default": "full",
"description": "Exposes an alternate text halo rendering method that sacrifices quality for speed.",
"type": "keyword",
"values": [
"full",
"fast"
]
},
"text-halo-transform": {
"default": "",
"description": "Transform text halo relative to the actual text with specified function. Allows for shadow or embossed effects. Ignores map scale factor.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"text-horizontal-alignment": {
"default": "auto",
"description": "The text's horizontal alignment from it's centerpoint. If placement is set to line, then adjust can be set to auto-fit the text to the length of the path by dynamically calculating character-spacing.",
"type": "keyword",
"values": [
"left",
"middle",
"right",
"auto",
"adjust"
]
},
"text-label-position-tolerance": {
"default": "text-spacing/2.0",
"description": "Allows the label to be displaced from its ideal position by a number of pixels (only works with placement:line).",
"type": "float"
},
"text-largest-bbox-only": {
"default": True,
"description": "Controls default labeling behavior on multipolygons. The default is True and means that only the polygon with largest bbox is labeled.",
"type": "boolean"
},
"text-line-spacing": {
"default": 0,
"description": "Vertical spacing adjustment between lines in pixels.",
"type": "float"
},
"text-margin": {
"default": 0,
"description": "Minimum distance that a label can be placed from any other text, shield, or marker.",
"type": "float"
},
"text-max-char-angle-delta": {
"default": 22.5,
"description": "The maximum angle change, in degrees, allowed between adjacent characters in a label. This value internally is converted to radians to the default is 22.5*math.pi/180.0. The higher the value the fewer labels will be placed around around sharp corners.",
"type": "float"
},
"text-min-distance": {
"default": 0,
"description": "Minimum distance to the next label with the same text. Only works for line placement. Deprecated: replaced by text-repeat-distance and text-margin",
"type": "float"
},
"text-min-padding": {
"default": 0,
"description": "Minimum distance a text label will be placed from the edge of a tile. This option is similar to shield-avoid-edges:True except that the extra margin is used to discard cases where the shield+margin are not fully inside the tile.",
"type": "float"
},
"text-min-path-length": {
"default": 0,
"description": "Place labels only on polygons and lines with a bounding width longer than this value (in pixels).",
"type": "float"
},
"text-name": {
"default": None,
"description": "Value to use for a text label. Data columns are specified using brackets like [column_name].",
"type": "string"
},
"text-opacity": {
"default": 1,
"description": "A number from 0 to 1 specifying the opacity for the text.",
"type": "float"
},
"text-orientation": {
"default": 0,
"description": "Rotate the text. (only works with text-placement:point).",
"type": "float"
},
"text-placement": {
"default": "point",
"description": "How this label should be placed. Point placement places one label on top of a point geometry and at the centroid of a polygon or the middle point of a line, line places along lines multiple times per feature, vertex places on the vertexes of polygons, and interior attempts to place inside of a polygon.",
"type": "keyword",
"values": [
"point",
"line",
"vertex",
"interior"
]
},
"text-placement-type": {
"default": "dummy",
"description": "Re-position and/or re-size text to avoid overlaps. \"simple\" for basic algorithm (using text-placements string,) \"dummy\" to turn this feature off.",
"type": "keyword",
"values": [
"dummy",
"simple",
"list"
]
},
"text-placements": {
"default": "",
"description": "If \"placement-type\" is set to \"simple\", use this \"POSITIONS,[SIZES]\" string. An example is text-placements: \"E,NE,SE,W,NW,SW\";.",
"type": "string"
},
"text-ratio": {
"default": 0,
"description": "Define the amount of text (of the total) present on successive lines when wrapping occurs.",
"type": "unsigned"
},
"text-repeat-distance": {
"default": 0,
"description": "Minimum distance between repeated text. If set this will prevent text labels being rendered nearby each other that contain the same text. Similar to text-min-distance with the difference that it works the same no matter what placement strategy is used.",
"type": "float"
},
"text-repeat-wrap-character": {
"default": False,
"description": "Keep the character used to wrap a line instead of removing it, and repeat it on the new line.",
"type": "boolean"
},
"text-rotate-displacement": {
"default": False,
"description": "Rotates the displacement around the placement origin by the angle given by \"orientation\".",
"type": "boolean"
},
"text-simplify": {
"default": 0,
"description": "Simplify the geometries used for text placement by the given tolerance.",
"type": "float"
},
"text-simplify-algorithm": {
"default": "radial-distance",
"description": "Simplify the geometries used for text placement by the given algorithm.",
"type": "keyword",
"values": [
"radial-distance",
"zhao-saalfeld",
"visvalingam-whyatt"
]
},
"text-size": {
"default": 10,
"description": "Text size in pixels.",
"type": "float"
},
"text-smooth": {
"default": 0,
"description": "Smooths out the angles of the geometry used for text placement. 0 is no smoothing, 1 is fully smoothed. Values greater than 1 will produce wild, looping geometries.",
"type": "float"
},
"text-spacing": {
"default": 0,
"description": "Distance the renderer should use to try to place repeated text labels on a line.",
"type": "unsigned"
},
"text-transform": {
"default": None,
"description": "Transform the case of the characters.",
"type": "keyword",
"values": [
"none",
"uppercase",
"lowercase",
"capitalize",
"reverse"
]
},
"text-upright": {
"default": "auto",
"description": "How this label should be placed along lines. By default when more than half of a label's characters are upside down the label is automatically flipped to keep it upright. By changing this parameter you can prevent this \"auto-upright\" behavior. The \"auto-down\" value places text in the opposite orientation to \"auto\". The \"left\" or \"right\" settings can be used to force text to always be placed along a line in a given direction and therefore disables flipping if text appears upside down. The \"left-only\" or \"right-only\" properties also force a given direction but will discard upside down text rather than trying to flip it.",
"type": "keyword",
"values": [
"auto",
"auto-down",
"left",
"right",
"left-only",
"right-only"
]
},
"text-vertical-alignment": {
"default": "auto",
"description": "Position of label relative to point position.",
"type": "keyword",
"values": [
"top",
"middle",
"bottom",
"auto"
]
},
"text-wrap-before": {
"default": False,
"description": "Wrap text before wrap-width is reached.",
"type": "boolean"
},
"text-wrap-character": {
"default": None,
"description": "Use this character instead of a space to wrap long text.",
"type": "string"
},
"text-wrap-width": {
"default": 0,
"description": "Length of a chunk of text in pixels before wrapping text. If set to zero, text doesn't wrap.",
"type": "unsigned"
}
}
| gpl-2.0 | 491,658,651,293,921,540 | 33.713838 | 684 | 0.521985 | false |
Linaro/lava-dispatcher | lava_dispatcher/actions/deploy/mps.py | 1 | 5714 | # Copyright (C) 2018 Linaro Limited
#
# Author: Dean Arnold <[email protected]>
#
# This file is part of LAVA Dispatcher.
#
# LAVA Dispatcher is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# LAVA Dispatcher is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along
# with this program; if not, see <http://www.gnu.org/licenses>.
# List just the subclasses supported for this base strategy
# imported by the parser to populate the list of subclasses.
import os
import shutil
from lava_dispatcher.action import (
Action,
InfrastructureError,
Pipeline,
)
from lava_dispatcher.logical import Deployment
from lava_dispatcher.actions.deploy import DeployAction
from lava_dispatcher.actions.deploy.download import DownloaderAction
from lava_dispatcher.connections.serial import ConnectDevice
from lava_dispatcher.power import ResetDevice
from lava_dispatcher.utils.udev import WaitUSBMassStorageDeviceAction
from lava_dispatcher.actions.deploy.vemsd import (
MountVExpressMassStorageDevice,
ExtractVExpressRecoveryImage,
DeployVExpressRecoveryImage,
UnmountVExpressMassStorageDevice,
)
class Mps(Deployment):
"""
Strategy class for a booting Arm MPS devices.
Downloads board recovery image and deploys to target
"""
compatibility = 1
name = 'mps'
def __init__(self, parent, parameters):
super(Mps, self).__init__(parent)
self.action = MpsAction()
self.action.section = self.action_type
self.action.job = self.job
parent.add_action(self.action, parameters)
@classmethod
def accepts(cls, device, parameters):
if 'mps' not in device['actions']['deploy']['methods']:
return False, '"mps" was not in the device configuration deploy methods'
if 'to' not in parameters:
return False, '"to" was not in parameters'
if parameters['to'] != 'mps':
return False, '"to" was not "mps"'
if 'usb_filesystem_label' not in device:
return False, '"usb_filesystem_label" is not in the device configuration'
return True, 'accepted'
class MpsAction(DeployAction):
"""
Action for deploying firmware to a MPS board in the form
of a board recovery image. Recovery images must have AUTORUN
set to true in config.txt in order for the device to come to
a prompt after reboot.
"""
def __init__(self):
super(MpsAction, self).__init__()
self.name = "mps-deploy"
self.description = "deploy image to MPS device"
self.summary = "MPS device image deployment"
def validate(self):
super(MpsAction, self).validate()
if not self.valid:
return
if not self.parameters.get('recovery_image', None):
return
def populate(self, parameters):
download_dir = self.mkdtemp()
self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
self.internal_pipeline.add_action(ConnectDevice())
self.internal_pipeline.add_action(ResetDevice())
self.internal_pipeline.add_action(WaitUSBMassStorageDeviceAction())
self.internal_pipeline.add_action(WaitUSBMassStorageDeviceAction())
for image in parameters['images'].keys():
if image != 'yaml_line':
self.internal_pipeline.add_action(DownloaderAction(image, path=download_dir))
self.internal_pipeline.add_action(MountVExpressMassStorageDevice())
if 'recovery_image' in parameters['images'].keys():
self.internal_pipeline.add_action(ExtractVExpressRecoveryImage())
self.internal_pipeline.add_action(DeployVExpressRecoveryImage())
if 'test_binary' in parameters['images'].keys():
self.internal_pipeline.add_action(DeployMPSTestBinary())
self.internal_pipeline.add_action(UnmountVExpressMassStorageDevice())
class DeployMPSTestBinary(Action):
"""
Copies test binary to MPS device and renames if required
"""
def __init__(self):
super(DeployMPSTestBinary, self).__init__()
self.name = "deploy-mps-test-binary"
self.description = "deploy test binary to usb msd"
self.summary = "copy test binary to MPS device and rename if required"
self.param_key = "test_binary"
def validate(self):
super(DeployMPSTestBinary, self).validate()
if not self.valid:
return
if not self.parameters['images'].get(self.param_key, None):
return
def run(self, connection, max_end_time, args=None):
connection = super(DeployMPSTestBinary, self).run(connection, max_end_time, args)
mount_point = self.get_namespace_data(action='mount-vexpress-usbmsd', label='vexpress-fw', key='mount-point')
try:
os.path.realpath(mount_point)
except OSError:
raise InfrastructureError("Unable to locate mount point: %s" % mount_point)
dest = os.path.join(mount_point, self.parameters['images'][self.param_key].get('rename', ''))
test_binary = self.get_namespace_data(action='download-action', label=self.param_key, key='file')
self.logger.debug("Copying %s to %s", test_binary, dest)
shutil.copy(test_binary, dest)
return connection
| gpl-2.0 | -8,694,243,085,509,229,000 | 38.958042 | 117 | 0.685159 | false |
nikodtbVf/aima-si | search.py | 1 | 41693 | """Search (Chapters 3-4)
The way to use this code is to subclass Problem to create a class of problems,
then create problem instances and solve them with calls to the various search
functions."""
from utils import (
is_in, argmin, argmax, argmax_random_tie, probability,
weighted_sample_with_replacement, memoize, print_table, DataFile, Stack,
FIFOQueue, PriorityQueue, name
)
#from grid import distance
def distance(a, b):
"""The distance between two (x, y) points."""
return math.hypot((a[0] - b[0]), (a[1] - b[1]))
from collections import defaultdict
import math
import random
import sys
import bisect
infinity = float('inf')
# ______________________________________________________________________________
class Problem(object):
"""The abstract class for a formal problem. You should subclass
this and implement the methods actions and result, and possibly
__init__, goal_test, and path_cost. Then you will create instances
of your subclass and solve them with the various search functions."""
def __init__(self, initial, goal=None):
"""The constructor specifies the initial state, and possibly a goal
state, if there is a unique goal. Your subclass's constructor can add
other arguments."""
self.initial = initial
self.goal = goal
def actions(self, state):
"""Return the actions that can be executed in the given
state. The result would typically be a list, but if there are
many actions, consider yielding them one at a time in an
iterator, rather than building them all at once."""
raise NotImplementedError
def result(self, state, action):
"""Return the state that results from executing the given
action in the given state. The action must be one of
self.actions(state)."""
raise NotImplementedError
def goal_test(self, state):
"""Return True if the state is a goal. The default method compares the
state to self.goal or checks for state in self.goal if it is a
list, as specified in the constructor. Override this method if
checking against a single self.goal is not enough."""
if isinstance(self.goal, list):
return is_in(state, self.goal)
else:
return state == self.goal
def path_cost(self, c, state1, action, state2):
"""Return the cost of a solution path that arrives at state2 from
state1 via action, assuming cost c to get up to state1. If the problem
is such that the path doesn't matter, this function will only look at
state2. If the path does matter, it will consider c and maybe state1
and action. The default method costs 1 for every step in the path."""
# costo = self.goal/(c + abs(state1 - state2)+1)
# costo = c+abs(state1 - state2)
costo = c+1
# costo = 2*abs(state1 - state2)
# print("estado 1: %d %s estado 2: %d Costo: %d"%(state1,action,state2,c))
# return costo
return costo
def value(self, state):
"""For optimization problems, each state has a value. Hill-climbing
and related algorithms try to maximize this value."""
raise NotImplementedError
# ______________________________________________________________________________
class Node:
"""A node in a search tree. Contains a pointer to the parent (the node
that this is a successor of) and to the actual state for this node. Note
that if a state is arrived at by two paths, then there are two nodes with
the same state. Also includes the action that got us to this state, and
the total path_cost (also known as g) to reach the node. Other functions
may add an f and h value; see best_first_graph_search and astar_search for
an explanation of how the f and h values are handled. You will not need to
subclass this class."""
def __init__(self, state, parent=None, action=None, path_cost=0):
"Create a search tree Node, derived from a parent by an action."
self.state = state
self.parent = parent
self.action = action
self.path_cost = path_cost
self.depth = 0
if parent:
self.depth = parent.depth + 1
def __repr__(self):
return "<Node %s>" % (self.state,)
def __lt__(self, node):
return self.state < node.state
def expand(self, problem):
"List the nodes reachable in one step from this node."
return [self.child_node(problem, action)
for action in problem.actions(self.state)]
def child_node(self, problem, action):
"[Figure 3.10]"
next = problem.result(self.state, action)
return Node(next, self, action,
problem.path_cost(self.path_cost, self.state,
action, next))
def solution(self):
"Return the sequence of actions to go from the root to this node."
return [node.action for node in self.path()[1:]]
def path(self):
"Return a list of nodes forming the path from the root to this node."
node, path_back = self, []
while node:
path_back.append(node)
node = node.parent
return list(reversed(path_back))
# We want for a queue of nodes in breadth_first_search or
# astar_search to have no duplicated states, so we treat nodes
# with the same state as equal. [Problem: this may not be what you
# want in other contexts.]
def __eq__(self, other):
return isinstance(other, Node) and self.state == other.state
def __hash__(self):
return hash(self.state)
# ______________________________________________________________________________
class SimpleProblemSolvingAgentProgram:
"""Abstract framework for a problem-solving agent. [Figure 3.1]"""
def __init__(self, initial_state=None):
self.state = initial_state
self.seq = []
def __call__(self, percept):
self.state = self.update_state(self.state, percept)
if not self.seq:
goal = self.formulate_goal(self.state)
problem = self.formulate_problem(self.state, goal)
self.seq = self.search(problem)
if not self.seq:
return None
return self.seq.pop(0)
def update_state(self, percept):
raise NotImplementedError
def formulate_goal(self, state):
raise NotImplementedError
def formulate_problem(self, state, goal):
raise NotImplementedError
def search(self, problem):
raise NotImplementedError
# ______________________________________________________________________________
# Uninformed Search algorithms
def tree_search(problem, frontier):
"""Search through the successors of a problem to find a goal.
The argument frontier should be an empty queue.
Don't worry about repeated paths to a state. [Figure 3.7]"""
frontier.append(Node(problem.initial))
while frontier:
node = frontier.pop()
if problem.goal_test(node.state):
return node
frontier.extend(node.expand(problem))
return None
def graph_search(problem, frontier):
"""Search through the successors of a problem to find a goal.
The argument frontier should be an empty queue.
If two paths reach a state, only use the first one. [Figure 3.7]"""
frontier.append(Node(problem.initial))
explored = set()
while frontier:
node = frontier.pop()
if problem.goal_test(node.state):
return node
explored.add(node.state)
frontier.extend(child for child in node.expand(problem)
if child.state not in explored and
child not in frontier)
return None
def breadth_first_tree_search(problem):
"Search the shallowest nodes in the search tree first."
return tree_search(problem, FIFOQueue())
def depth_first_tree_search(problem):
"Search the deepest nodes in the search tree first."
return tree_search(problem, Stack())
def depth_first_graph_search(problem):
"Search the deepest nodes in the search tree first."
return graph_search(problem, Stack())
def breadth_first_search(problem):
"[Figure 3.11]"
node = Node(problem.initial)
if problem.goal_test(node.state):
return node
frontier = FIFOQueue()
frontier.append(node)
explored = set()
while frontier:
node = frontier.pop()
explored.add(node.state)
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
#print("state child" + str(child.state))
if problem.goal_test(child.state):
return child
frontier.append(child)
return None
def best_first_graph_search(problem, f):
"""Search the nodes with the lowest f scores first.
You specify the function f(node) that you want to minimize; for example,
if f is a heuristic estimate to the goal, then we have greedy best
first search; if f is node.depth then we have breadth-first search.
There is a subtlety: the line "f = memoize(f, 'f')" means that the f
values will be cached on the nodes as they are computed. So after doing
a best first search you can examine the f values of the path returned."""
f = memoize(f, 'f')
node = Node(problem.initial)
if problem.goal_test(node.state):
return node
frontier = PriorityQueue(min, f)
frontier.append(node)
explored = set()
while frontier:
node = frontier.pop()
# print(node)
if problem.goal_test(node.state):
return node
explored.add(node.state)
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
frontier.append(child)
elif child in frontier:
incumbent = frontier[child]
if f(child) < f(incumbent):
del frontier[incumbent]
frontier.append(child)
return None
def uniform_cost_search(problem):
"[Figure 3.14]"
return best_first_graph_search(problem, lambda node: node.path_cost)
def depth_limited_search(problem, limit=50):
"[Figure 3.17]"
def recursive_dls(node, problem, limit):
if problem.goal_test(node.state):
return node
elif limit == 0:
return 'cutoff'
else:
cutoff_occurred = False
for child in node.expand(problem):
result = recursive_dls(child, problem, limit - 1)
if result == 'cutoff':
cutoff_occurred = True
elif result is not None:
return result
return 'cutoff' if cutoff_occurred else None
# Body of depth_limited_search:
return recursive_dls(Node(problem.initial), problem, limit)
def iterative_deepening_search(problem):
"[Figure 3.18]"
for depth in range(sys.maxsize):
result = depth_limited_search(problem, depth)
if result != 'cutoff':
return result
# ______________________________________________________________________________
# Informed (Heuristic) Search
greedy_best_first_graph_search = best_first_graph_search
# Greedy best-first search is accomplished by specifying f(n) = h(n).
def astar_search(problem, h=None):
"""A* search is best-first graph search with f(n) = g(n)+h(n).
You need to specify the h function when you call astar_search, or
else in your Problem subclass."""
h = memoize(h or problem.h, 'h')
return best_first_graph_search(problem, lambda n: 1.0*n.path_cost + 1.0*h(n))
# ______________________________________________________________________________
# Other search algorithms
def recursive_best_first_search(problem, h=None):
"[Figure 3.26]"
h = memoize(h or problem.h, 'h')
def RBFS(problem, node, flimit):
if problem.goal_test(node.state):
return node, 0 # (The second value is immaterial)
successors = node.expand(problem)
if len(successors) == 0:
return None, infinity
for s in successors:
s.f = max(s.path_cost + h(s), node.f)
while True:
# Order by lowest f value
successors.sort(key=lambda x: x.f)
best = successors[0]
if best.f > flimit:
return None, best.f
if len(successors) > 1:
alternative = successors[1].f
else:
alternative = infinity
result, best.f = RBFS(problem, best, min(flimit, alternative))
if result is not None:
return result, best.f
node = Node(problem.initial)
node.f = h(node)
result, bestf = RBFS(problem, node, infinity)
return result
def hill_climbing(problem):
"""From the initial node, keep choosing the neighbor with highest value,
stopping when no neighbor is better. [Figure 4.2]"""
current = Node(problem.initial)
while True:
neighbors = current.expand(problem)
if not neighbors:
break
neighbor = argmax_random_tie(neighbors,
key=lambda node: problem.value(node.state))
if problem.value(neighbor.state) <= problem.value(current.state):
break
current = neighbor
return current.state
def exp_schedule(k=20, lam=0.005, limit=100):
"One possible schedule function for simulated annealing"
return lambda t: (k * math.exp(-lam * t) if t < limit else 0)
def simulated_annealing(problem, schedule=exp_schedule()):
"[Figure 4.5]"
current = Node(problem.initial)
for t in range(sys.maxsize):
T = schedule(t)
if T == 0:
return current
neighbors = current.expand(problem)
if not neighbors:
return current
next = random.choice(neighbors)
delta_e = problem.value(next.state) - problem.value(current.state)
if delta_e > 0 or probability(math.exp(delta_e / T)):
current = next
def and_or_graph_search(problem):
"""Used when the environment is nondeterministic and completely observable
Contains OR nodes where the agent is free to choose any action
After every action there is an AND node which contains all possible states
the agent may reach due to stochastic nature of environment
The agent must be able to handle all possible states of the AND node(as it
may end up in any of them) returns a conditional plan to reach goal state,
or failure if the former is not possible"""
"[Figure 4.11]"
# functions used by and_or_search
def or_search(state, problem, path):
if problem.goal_test(state):
return []
if state in path:
return None
for action in problem.actions(state):
plan = and_search(problem.result(state, action),
problem, path + [state, ])
if plan is not None:
return [action, plan]
def and_search(states, problem, path):
"returns plan in form of dictionary where we take action plan[s] if we reach state s" # noqa
plan = {}
for s in states:
plan[s] = or_search(s, problem, path)
if plan[s] is None:
return None
return plan
# body of and or search
return or_search(problem.initial, problem, [])
class OnlineDFSAgent:
"""The abstract class for an OnlineDFSAgent. Override update_state
method to convert percept to state. While initializing the subclass
a problem needs to be provided which is an instance of a subclass
of the Problem class. [Figure 4.21] """
def __init__(self, problem):
self.problem = problem
self.s = None
self.a = None
self.untried = defaultdict(list)
self.unbacktracked = defaultdict(list)
self.result = {}
def __call__(self, percept):
s1 = self.update_state(percept)
if self.problem.goal_test(s1):
self.a = None
else:
if s1 not in self.untried.keys():
self.untried[s1] = self.problem.actions(s1)
if self.s is not None:
if s1 != self.result[(self.s, self.a)]:
self.result[(self.s, self.a)] = s1
unbacktracked[s1].insert(0, self.s)
if len(self.untried[s1]) == 0:
if len(self.unbacktracked[s1]) == 0:
self.a = None
else:
# else a <- an action b such that result[s', b] = POP(unbacktracked[s']) # noqa
unbacktracked_pop = self.unbacktracked[s1].pop(0) # noqa
for (s, b) in self.result.keys():
if self.result[(s, b)] == unbacktracked_pop:
self.a = b
break
else:
self.a = self.untried[s1].pop(0)
self.s = s1
return self.a
def update_state(self, percept):
'''To be overridden in most cases. The default case
assumes the percept to be of type state.'''
return percept
# ______________________________________________________________________________
class OnlineSearchProblem(Problem):
"""
A problem which is solved by an agent executing
actions, rather than by just computation.
Carried in a deterministic and a fully observable environment.
"""
def __init__(self, initial, goal, graph):
self.initial = initial
self.goal = goal
self.graph = graph
def actions(self, state):
return self.graph.dict[state].keys()
def output(self, state, action):
return self.graph.dict[state][action]
def h(self, state):
"""
Returns least possible cost to reach a goal for the given state.
"""
return self.graph.least_costs[state]
def c(self, s, a, s1):
"""
Returns a cost estimate for an agent to move from state 's' to state 's1'
"""
return 1
def update_state(self, percept):
raise NotImplementedError
def goal_test(self, state):
if state == self.goal:
return True
return False
class LRTAStarAgent:
""" [Figure 4.24]
Abstract class for LRTA*-Agent. A problem needs to be
provided which is an instanace of a subclass of Problem Class.
Takes a OnlineSearchProblem [Figure 4.23] as a problem
"""
def __init__(self, problem):
self.problem = problem
# self.result = {} # no need as we are using problem.result
self.H = {}
self.s = None
self.a = None
def __call__(self, s1): # as of now s1 is a state rather than a percept
if self.problem.goal_test(s1):
self.a = None
return self.a
else:
if s1 not in self.H:
self.H[s1] = self.problem.h(s1)
if self.s is not None:
# self.result[(self.s, self.a)] = s1 # no need as we are using problem.output
# minimum cost for action b in problem.actions(s)
self.H[self.s] = min(self.LRTA_cost(self.s, b, self.problem.output(self.s, b), self.H)
for b in self.problem.actions(self.s))
# costs for action b in problem.actions(s1)
costs = [self.LRTA_cost(s1, b, self.problem.output(s1, b), self.H)
for b in self.problem.actions(s1)]
# an action b in problem.actions(s1) that minimizes costs
self.a = list(self.problem.actions(s1))[costs.index(min(costs))]
self.s = s1
return self.a
def LRTA_cost(self, s, a, s1, H):
"""
Returns cost to move from state 's' to state 's1' plus
estimated cost to get to goal from s1
"""
print(s, a, s1)
if s1 is None:
return self.problem.h(s)
else:
# sometimes we need to get H[s1] which we haven't yet added to H
# to replace this try, except: we can initialize H with values from problem.h
try:
return self.problem.c(s, a, s1) + self.H[s1]
except:
return self.problem.c(s, a, s1) + self.problem.h(s1)
# ______________________________________________________________________________
# Genetic Algorithm
def genetic_search(problem, fitness_fn, ngen=1000, pmut=0.1, n=20):
"""
Call genetic_algorithm on the appropriate parts of a problem.
This requires the problem to have states that can mate and mutate,
plus a value method that scores states."""
s = problem.initial_state
states = [problem.result(s, a) for a in problem.actions(s)]
random.shuffle(states)
return genetic_algorithm(states[:n], problem.value, ngen, pmut)
def genetic_algorithm(population, fitness_fn, ngen=1000, pmut=0.1):
"[Figure 4.8]"
for i in range(ngen):
new_population = []
for i in len(population):
fitnesses = map(fitness_fn, population)
p1, p2 = weighted_sample_with_replacement(population, fitnesses, 2)
child = p1.mate(p2)
if random.uniform(0, 1) < pmut:
child.mutate()
new_population.append(child)
population = new_population
return argmax(population, key=fitness_fn)
class GAState:
"Abstract class for individuals in a genetic search."
def __init__(self, genes):
self.genes = genes
def mate(self, other):
"Return a new individual crossing self and other."
c = random.randrange(len(self.genes))
return self.__class__(self.genes[:c] + other.genes[c:])
def mutate(self):
"Change a few of my genes."
raise NotImplementedError
# _____________________________________________________________________________
# The remainder of this file implements examples for the search algorithms.
# ______________________________________________________________________________
# Graphs and Graph Problems
class Graph:
"""A graph connects nodes (vertices) by edges (links). Each edge can also
have a length associated with it. The constructor call is something like:
g = Graph({'A': {'B': 1, 'C': 2}})
this makes a graph with 3 nodes, A, B, and C, with an edge of length 1 from
A to B, and an edge of length 2 from A to C. You can also do:
g = Graph({'A': {'B': 1, 'C': 2}, directed=False)
This makes an undirected graph, so inverse links are also added. The graph
stays undirected; if you add more links with g.connect('B', 'C', 3), then
inverse link is also added. You can use g.nodes() to get a list of nodes,
g.get('A') to get a dict of links out of A, and g.get('A', 'B') to get the
length of the link from A to B. 'Lengths' can actually be any object at
all, and nodes can be any hashable object."""
def __init__(self, dict=None, directed=True):
self.dict = dict or {}
self.directed = directed
if not directed:
self.make_undirected()
def make_undirected(self):
"Make a digraph into an undirected graph by adding symmetric edges."
for a in list(self.dict.keys()):
for (b, distance) in self.dict[a].items():
self.connect1(b, a, distance)
def connect(self, A, B, distance=1):
"""Add a link from A and B of given distance, and also add the inverse
link if the graph is undirected."""
self.connect1(A, B, distance)
if not self.directed:
self.connect1(B, A, distance)
def connect1(self, A, B, distance):
"Add a link from A to B of given distance, in one direction only."
self.dict.setdefault(A, {})[B] = distance
def get(self, a, b=None):
"""Return a link distance or a dict of {node: distance} entries.
.get(a,b) returns the distance or None;
.get(a) returns a dict of {node: distance} entries, possibly {}."""
links = self.dict.setdefault(a, {})
if b is None:
return links
else:
return links.get(b)
def nodes(self):
"Return a list of nodes in the graph."
return list(self.dict.keys())
def UndirectedGraph(dict=None):
"Build a Graph where every edge (including future ones) goes both ways."
return Graph(dict=dict, directed=False)
def RandomGraph(nodes=list(range(10)), min_links=2, width=400, height=300,
curvature=lambda: random.uniform(1.1, 1.5)):
"""Construct a random graph, with the specified nodes, and random links.
The nodes are laid out randomly on a (width x height) rectangle.
Then each node is connected to the min_links nearest neighbors.
Because inverse links are added, some nodes will have more connections.
The distance between nodes is the hypotenuse times curvature(),
where curvature() defaults to a random number between 1.1 and 1.5."""
g = UndirectedGraph()
g.locations = {}
# Build the cities
for node in nodes:
g.locations[node] = (random.randrange(width), random.randrange(height))
# Build roads from each city to at least min_links nearest neighbors.
for i in range(min_links):
for node in nodes:
if len(g.get(node)) < min_links:
here = g.locations[node]
def distance_to_node(n):
if n is node or g.get(node, n):
return infinity
return distance(g.locations[n], here)
neighbor = argmin(nodes, key=distance_to_node)
d = distance(g.locations[neighbor], here) * curvature()
g.connect(node, neighbor, int(d))
return g
""" [Figure 3.2]
Simplified road map of Romania
"""
romania_map = UndirectedGraph(dict(
Arad=dict(Zerind=75, Sibiu=140, Timisoara=118),
Bucharest=dict(Urziceni=85, Pitesti=101, Giurgiu=90, Fagaras=211),
Craiova=dict(Drobeta=120, Rimnicu=146, Pitesti=138),
Drobeta=dict(Mehadia=75),
Eforie=dict(Hirsova=86),
Fagaras=dict(Sibiu=99),
Hirsova=dict(Urziceni=98),
Iasi=dict(Vaslui=92, Neamt=87),
Lugoj=dict(Timisoara=111, Mehadia=70),
Oradea=dict(Zerind=71, Sibiu=151),
Pitesti=dict(Rimnicu=97),
Rimnicu=dict(Sibiu=80),
Urziceni=dict(Vaslui=142)))
romania_map.locations = dict(
Arad=(91, 492), Bucharest=(400, 327), Craiova=(253, 288),
Drobeta=(165, 299), Eforie=(562, 293), Fagaras=(305, 449),
Giurgiu=(375, 270), Hirsova=(534, 350), Iasi=(473, 506),
Lugoj=(165, 379), Mehadia=(168, 339), Neamt=(406, 537),
Oradea=(131, 571), Pitesti=(320, 368), Rimnicu=(233, 410),
Sibiu=(207, 457), Timisoara=(94, 410), Urziceni=(456, 350),
Vaslui=(509, 444), Zerind=(108, 531))
""" [Figure 4.9]
Eight possible states of the vacumm world
Each state is represented as
* "State of the left room" "State of the right room" "Room in which the agent is present"
1 - DDL Dirty Dirty Left
2 - DDR Dirty Dirty Right
3 - DCL Dirty Clean Left
4 - DCR Dirty Clean Right
5 - CDL Clean Dirty Left
6 - CDR Clean Dirty Right
7 - CCL Clean Clean Left
8 - CCR Clean Clean Right
"""
vacumm_world = Graph(dict(
State_1 = dict(Suck = ['State_7', 'State_5'], Right = ['State_2']),
State_2 = dict(Suck = ['State_8', 'State_4'], Left = ['State_2']),
State_3 = dict(Suck = ['State_7'], Right = ['State_4']),
State_4 = dict(Suck = ['State_4', 'State_2'], Left = ['State_3']),
State_5 = dict(Suck = ['State_5', 'State_1'], Right = ['State_6']),
State_6 = dict(Suck = ['State_8'], Left = ['State_5']),
State_7 = dict(Suck = ['State_7', 'State_3'], Right = ['State_8']),
State_8 = dict(Suck = ['State_8', 'State_6'], Left = ['State_7'])
))
""" [Figure 4.23]
One-dimensional state space Graph
"""
one_dim_state_space = Graph(dict(
State_1=dict(Right='State_2'),
State_2=dict(Right='State_3', Left='State_1'),
State_3=dict(Right='State_4', Left='State_2'),
State_4=dict(Right='State_5', Left='State_3'),
State_5=dict(Right='State_6', Left='State_4'),
State_6=dict(Left='State_5')
))
one_dim_state_space.least_costs = dict(
State_1=8,
State_2=9,
State_3=2,
State_4=2,
State_5=4,
State_6=3)
""" [Figure 6.1]
Principal states and territories of Australia
"""
australia_map = UndirectedGraph(dict(
T=dict(),
SA=dict(WA=1, NT=1, Q=1, NSW=1, V=1),
NT=dict(WA=1, Q=1),
NSW=dict(Q=1, V=1)))
australia_map.locations = dict(WA=(120, 24), NT=(135, 20), SA=(135, 30),
Q=(145, 20), NSW=(145, 32), T=(145, 42),
V=(145, 37))
class GraphProblem(Problem):
"The problem of searching a graph from one node to another."
def __init__(self, initial, goal, graph):
Problem.__init__(self, initial, goal)
self.graph = graph
def actions(self, A):
"The actions at a graph node are just its neighbors."
return list(self.graph.get(A).keys())
def result(self, state, action):
"The result of going to a neighbor is just that neighbor."
return action
def path_cost(self, cost_so_far, A, action, B):
return cost_so_far + (self.graph.get(A, B) or infinity)
def h(self, node):
"h function is straight-line distance from a node's state to goal."
locs = getattr(self.graph, 'locations', None)
if locs:
return int(distance(locs[node.state], locs[self.goal]))
else:
return infinity
class GraphProblemStochastic(GraphProblem):
"""
A version of GraphProblem where an action can lead to
nondeterministic output i.e. multiple possible states
Define the graph as dict(A = dict(Action = [[<Result 1>, <Result 2>, ...], <cost>], ...), ...)
A the dictionary format is different, make sure the graph is created as a directed graph
"""
def result(self, state, action):
return self.graph.get(state, action)
def path_cost():
raise NotImplementedError
# ______________________________________________________________________________
class NQueensProblem(Problem):
"""The problem of placing N queens on an NxN board with none attacking
each other. A state is represented as an N-element array, where
a value of r in the c-th entry means there is a queen at column c,
row r, and a value of None means that the c-th column has not been
filled in yet. We fill in columns left to right.
>>> depth_first_tree_search(NQueensProblem(8))
<Node [7, 3, 0, 2, 5, 1, 6, 4]>
"""
def __init__(self, N):
self.N = N
self.initial = [None] * N
def actions(self, state):
"In the leftmost empty column, try all non-conflicting rows."
if state[-1] is not None:
return [] # All columns filled; no successors
else:
col = state.index(None)
return [row for row in range(self.N)
if not self.conflicted(state, row, col)]
def result(self, state, row):
"Place the next queen at the given row."
col = state.index(None)
new = state[:]
new[col] = row
return new
def conflicted(self, state, row, col):
"Would placing a queen at (row, col) conflict with anything?"
return any(self.conflict(row, col, state[c], c)
for c in range(col))
def conflict(self, row1, col1, row2, col2):
"Would putting two queens in (row1, col1) and (row2, col2) conflict?"
return (row1 == row2 or # same row
col1 == col2 or # same column
row1 - col1 == row2 - col2 or # same \ diagonal
row1 + col1 == row2 + col2) # same / diagonal
def goal_test(self, state):
"Check if all columns filled, no conflicts."
if state[-1] is None:
return False
return not any(self.conflicted(state, state[col], col)
for col in range(len(state)))
# ______________________________________________________________________________
# Inverse Boggle: Search for a high-scoring Boggle board. A good domain for
# iterative-repair and related search techniques, as suggested by Justin Boyan.
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
cubes16 = ['FORIXB', 'MOQABJ', 'GURILW', 'SETUPL',
'CMPDAE', 'ACITAO', 'SLCRAE', 'ROMASH',
'NODESW', 'HEFIYE', 'ONUDTK', 'TEVIGN',
'ANEDVZ', 'PINESH', 'ABILYT', 'GKYLEU']
def random_boggle(n=4):
"""Return a random Boggle board of size n x n.
We represent a board as a linear list of letters."""
cubes = [cubes16[i % 16] for i in range(n * n)]
random.shuffle(cubes)
return list(map(random.choice, cubes))
# The best 5x5 board found by Boyan, with our word list this board scores
# 2274 words, for a score of 9837
boyan_best = list('RSTCSDEIAEGNLRPEATESMSSID')
def print_boggle(board):
"Print the board in a 2-d array."
n2 = len(board)
n = exact_sqrt(n2)
for i in range(n2):
if i % n == 0 and i > 0:
print()
if board[i] == 'Q':
print('Qu', end=' ')
else:
print(str(board[i]) + ' ', end=' ')
print()
def boggle_neighbors(n2, cache={}):
"""Return a list of lists, where the i-th element is the list of indexes
for the neighbors of square i."""
if cache.get(n2):
return cache.get(n2)
n = exact_sqrt(n2)
neighbors = [None] * n2
for i in range(n2):
neighbors[i] = []
on_top = i < n
on_bottom = i >= n2 - n
on_left = i % n == 0
on_right = (i+1) % n == 0
if not on_top:
neighbors[i].append(i - n)
if not on_left:
neighbors[i].append(i - n - 1)
if not on_right:
neighbors[i].append(i - n + 1)
if not on_bottom:
neighbors[i].append(i + n)
if not on_left:
neighbors[i].append(i + n - 1)
if not on_right:
neighbors[i].append(i + n + 1)
if not on_left:
neighbors[i].append(i - 1)
if not on_right:
neighbors[i].append(i + 1)
cache[n2] = neighbors
return neighbors
def exact_sqrt(n2):
"If n2 is a perfect square, return its square root, else raise error."
n = int(math.sqrt(n2))
assert n * n == n2
return n
# _____________________________________________________________________________
class Wordlist:
"""This class holds a list of words. You can use (word in wordlist)
to check if a word is in the list, or wordlist.lookup(prefix)
to see if prefix starts any of the words in the list."""
def __init__(self, file, min_len=3):
lines = file.read().upper().split()
self.words = [word for word in lines if len(word) >= min_len]
self.words.sort()
self.bounds = {}
for c in ALPHABET:
c2 = chr(ord(c) + 1)
self.bounds[c] = (bisect.bisect(self.words, c),
bisect.bisect(self.words, c2))
def lookup(self, prefix, lo=0, hi=None):
"""See if prefix is in dictionary, as a full word or as a prefix.
Return two values: the first is the lowest i such that
words[i].startswith(prefix), or is None; the second is
True iff prefix itself is in the Wordlist."""
words = self.words
if hi is None:
hi = len(words)
i = bisect.bisect_left(words, prefix, lo, hi)
if i < len(words) and words[i].startswith(prefix):
return i, (words[i] == prefix)
else:
return None, False
def __contains__(self, word):
return self.lookup(word)[1]
def __len__(self):
return len(self.words)
# _____________________________________________________________________________
class BoggleFinder:
"""A class that allows you to find all the words in a Boggle board. """
wordlist = None # A class variable, holding a wordlist
def __init__(self, board=None):
if BoggleFinder.wordlist is None:
BoggleFinder.wordlist = Wordlist(DataFile("EN-text/wordlist.txt"))
self.found = {}
if board:
self.set_board(board)
def set_board(self, board=None):
"Set the board, and find all the words in it."
if board is None:
board = random_boggle()
self.board = board
self.neighbors = boggle_neighbors(len(board))
self.found = {}
for i in range(len(board)):
lo, hi = self.wordlist.bounds[board[i]]
self.find(lo, hi, i, [], '')
return self
def find(self, lo, hi, i, visited, prefix):
"""Looking in square i, find the words that continue the prefix,
considering the entries in self.wordlist.words[lo:hi], and not
revisiting the squares in visited."""
if i in visited:
return
wordpos, is_word = self.wordlist.lookup(prefix, lo, hi)
if wordpos is not None:
if is_word:
self.found[prefix] = True
visited.append(i)
c = self.board[i]
if c == 'Q':
c = 'QU'
prefix += c
for j in self.neighbors[i]:
self.find(wordpos, hi, j, visited, prefix)
visited.pop()
def words(self):
"The words found."
return list(self.found.keys())
scores = [0, 0, 0, 0, 1, 2, 3, 5] + [11] * 100
def score(self):
"The total score for the words found, according to the rules."
return sum([self.scores[len(w)] for w in self.words()])
def __len__(self):
"The number of words found."
return len(self.found)
# _____________________________________________________________________________
def boggle_hill_climbing(board=None, ntimes=100, verbose=True):
"""Solve inverse Boggle by hill-climbing: find a high-scoring board by
starting with a random one and changing it."""
finder = BoggleFinder()
if board is None:
board = random_boggle()
best = len(finder.set_board(board))
for _ in range(ntimes):
i, oldc = mutate_boggle(board)
new = len(finder.set_board(board))
if new > best:
best = new
if verbose:
print(best, _, board)
else:
board[i] = oldc # Change back
if verbose:
print_boggle(board)
return board, best
def mutate_boggle(board):
i = random.randrange(len(board))
oldc = board[i]
# random.choice(boyan_best)
board[i] = random.choice(random.choice(cubes16))
return i, oldc
# ______________________________________________________________________________
# Code to compare searchers on various problems.
class InstrumentedProblem(Problem):
"""Delegates to a problem, and keeps statistics."""
def __init__(self, problem):
self.problem = problem
self.succs = self.goal_tests = self.states = 0
self.found = None
def actions(self, state):
self.succs += 1
return self.problem.actions(state)
def result(self, state, action):
self.states += 1
return self.problem.result(state, action)
def goal_test(self, state):
self.goal_tests += 1
result = self.problem.goal_test(state)
if result:
self.found = state
return result
def path_cost(self, c, state1, action, state2):
return self.problem.path_cost(c, state1, action, state2)
def value(self, state):
return self.problem.value(state)
def __getattr__(self, attr):
return getattr(self.problem, attr)
def __repr__(self):
return '<%4d/%4d/%4d/%s>' % (self.succs, self.goal_tests,
self.states, str(self.found)[:4])
def compare_searchers(problems, header,
searchers=[breadth_first_tree_search,
breadth_first_search,
depth_first_graph_search,
iterative_deepening_search,
depth_limited_search,
recursive_best_first_search]):
def do(searcher, problem):
p = InstrumentedProblem(problem)
searcher(p)
return p
table = [[name(s)] + [do(s, p) for p in problems] for s in searchers]
print_table(table, header)
def compare_graph_searchers():
"""Prints a table of search results."""
compare_searchers(problems=[GraphProblem('Arad', 'Bucharest', romania_map),
GraphProblem('Oradea', 'Neamt', romania_map),
GraphProblem('Q', 'WA', australia_map)],
header=['Searcher', 'romania_map(Arad, Bucharest)',
'romania_map(Oradea, Neamt)', 'australia_map'])
| mit | 3,359,287,629,173,186,000 | 34.604611 | 105 | 0.571223 | false |
manxueitp/cozmo-test | cozmo_sdk_examples/tutorials/02_cozmo_face/03_alarm_clock.py | 1 | 9691 | #!/usr/bin/env python3
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Cozmo Alarm Clock
Use Cozmo's face to display the current time
Play an alarm (Cozmo tells you to wake up) at a set time
NOTE: This is an example program. Anki takes no responsibility
if Cozmo fails to wake you up on time!
'''
import datetime
import math
import sys
import time
try:
from PIL import Image, ImageDraw, ImageFont
except ImportError:
sys.exit("Cannot import from PIL. Do `pip3 install --user Pillow` to install")
import cozmo
#: bool: Set to True to display the clock as analog
#: (with a small digital readout below)
SHOW_ANALOG_CLOCK = False
def make_text_image(text_to_draw, x, y, font=None):
'''Make a PIL.Image with the given text printed on it
Args:
text_to_draw (string): the text to draw to the image
x (int): x pixel location
y (int): y pixel location
font (PIL.ImageFont): the font to use
Returns:
:class:(`PIL.Image.Image`): a PIL image with the text drawn on it
'''
# make a blank image for the text, initialized to opaque black
text_image = Image.new('RGBA', cozmo.oled_face.dimensions(), (0, 0, 0, 255))
# get a drawing context
dc = ImageDraw.Draw(text_image)
# draw the text
dc.text((x, y), text_to_draw, fill=(255, 255, 255, 255), font=font)
return text_image
# get a font - location depends on OS so try a couple of options
# failing that the default of None will just use a default font
_clock_font = None
try:
_clock_font = ImageFont.truetype("arial.ttf", 20)
except IOError:
try:
_clock_font = ImageFont.truetype("/Library/Fonts/Arial.ttf", 20)
except IOError:
pass
def draw_clock_hand(dc, cen_x, cen_y, circle_ratio, hand_length):
'''Draw a single clock hand (hours, minutes or seconds)
Args:
dc: (:class:`PIL.ImageDraw.ImageDraw`): drawing context to use
cen_x (float): x coordinate of center of hand
cen_y (float): y coordinate of center of hand
circle_ratio (float): ratio (from 0.0 to 1.0) that hand has travelled
hand_length (float): the length of the hand
'''
hand_angle = circle_ratio * math.pi * 2.0
vec_x = hand_length * math.sin(hand_angle)
vec_y = -hand_length * math.cos(hand_angle)
# x_scalar doubles the x size to compensate for the interlacing
# in y that would otherwise make the screen appear 2x tall
x_scalar = 2.0
# pointy end of hand
hand_end_x = int(cen_x + (x_scalar * vec_x))
hand_end_y = int(cen_y + vec_y)
# 2 points, perpendicular to the direction of the hand,
# to give a triangle with some width
hand_width_ratio = 0.1
hand_end_x2 = int(cen_x - ((x_scalar * vec_y) * hand_width_ratio))
hand_end_y2 = int(cen_y + (vec_x * hand_width_ratio))
hand_end_x3 = int(cen_x + ((x_scalar * vec_y) * hand_width_ratio))
hand_end_y3 = int(cen_y - (vec_x * hand_width_ratio))
dc.polygon([(hand_end_x, hand_end_y), (hand_end_x2, hand_end_y2),
(hand_end_x3, hand_end_y3)], fill=(255, 255, 255, 255))
def make_clock_image(current_time):
'''Make a PIL.Image with the current time displayed on it
Args:
text_to_draw (:class:`datetime.time`): the time to display
Returns:
:class:(`PIL.Image.Image`): a PIL image with the time displayed on it
'''
time_text = time.strftime("%I:%M:%S %p")
if not SHOW_ANALOG_CLOCK:
return make_text_image(time_text, 8, 6, _clock_font)
# make a blank image for the text, initialized to opaque black
clock_image = Image.new('RGBA', cozmo.oled_face.dimensions(), (0, 0, 0, 255))
# get a drawing context
dc = ImageDraw.Draw(clock_image)
# calculate position of clock elements
text_height = 9
screen_width, screen_height = cozmo.oled_face.dimensions()
analog_width = screen_width
analog_height = screen_height - text_height
cen_x = analog_width * 0.5
cen_y = analog_height * 0.5
# calculate size of clock hands
sec_hand_length = (analog_width if (analog_width < analog_height) else analog_height) * 0.5
min_hand_length = 0.85 * sec_hand_length
hour_hand_length = 0.7 * sec_hand_length
# calculate rotation for each hand
sec_ratio = current_time.second / 60.0
min_ratio = (current_time.minute + sec_ratio) / 60.0
hour_ratio = (current_time.hour + min_ratio) / 12.0
# draw the clock hands
draw_clock_hand(dc, cen_x, cen_y, hour_ratio, hour_hand_length)
draw_clock_hand(dc, cen_x, cen_y, min_ratio, min_hand_length)
draw_clock_hand(dc, cen_x, cen_y, sec_ratio, sec_hand_length)
# draw the digital time_text at the bottom
x = 32
y = screen_height - text_height
dc.text((x, y), time_text, fill=(255, 255, 255, 255), font=None)
return clock_image
def convert_to_time_int(in_value, time_unit):
'''Convert in_value to an int and ensure it is in the valid range for that time unit
(e.g. 0..23 for hours)'''
max_for_time_unit = {'hours': 23, 'minutes': 59, 'seconds': 59}
max_val = max_for_time_unit[time_unit]
try:
int_val = int(in_value)
except ValueError:
raise ValueError("%s value '%s' is not an int" % (time_unit, in_value))
if int_val < 0:
raise ValueError("%s value %s is negative" % (time_unit, int_val))
if int_val > max_val:
raise ValueError("%s value %s exceeded %s" % (time_unit, int_val, max_val))
return int_val
def extract_time_from_args():
''' Extract a (24-hour-clock) user-specified time from the command-line
Supports colon and space separators - e.g. all 3 of "11 22 33", "11:22:33" and "11 22:33"
would map to the same time.
The seconds value is optional and defaults to 0 if not provided.'''
# split sys.argv further for any args that contain a ":"
split_time_args = []
for i in range(1, len(sys.argv)):
arg = sys.argv[i]
split_args = arg.split(':')
for split_arg in split_args:
split_time_args.append(split_arg)
if len(split_time_args) >= 2:
try:
hours = convert_to_time_int(split_time_args[0], 'hours')
minutes = convert_to_time_int(split_time_args[1], 'minutes')
seconds = 0
if len(split_time_args) >= 3:
seconds = convert_to_time_int(split_time_args[2], 'seconds')
return datetime.time(hours, minutes, seconds)
except ValueError as e:
print("ValueError %s" % e)
# Default to no alarm
return None
def get_in_position(robot: cozmo.robot.Robot):
'''If necessary, Move Cozmo's Head and Lift to make it easy to see Cozmo's face'''
if (robot.lift_height.distance_mm > 45) or (robot.head_angle.degrees < 40):
with robot.perform_off_charger():
robot.set_lift_height(0.0).wait_for_completed()
robot.set_head_angle(cozmo.robot.MAX_HEAD_ANGLE).wait_for_completed()
def alarm_clock(robot: cozmo.robot.Robot):
'''The core of the alarm_clock program'''
alarm_time = extract_time_from_args()
if alarm_time:
print("Alarm set for %s" % alarm_time)
else:
print("No Alarm time provided. Usage example: 'alarm_clock.py 17:23' to set alarm for 5:23 PM. (Input uses the 24-hour clock.)")
print("Press CTRL-C to quit")
get_in_position(robot)
was_before_alarm_time = False
last_displayed_time = None
while True:
# Check the current time, and see if it's time to play the alarm
current_time = datetime.datetime.now().time()
do_alarm = False
if alarm_time:
is_before_alarm_time = current_time < alarm_time
do_alarm = was_before_alarm_time and not is_before_alarm_time # did we just cross the alarm time
was_before_alarm_time = is_before_alarm_time
if do_alarm:
# Cancel the latest image display action so that the alarm actions can play
robot.abort_all_actions()
# Speak The Time (off the charger as it's an animation)
with robot.perform_off_charger():
short_time_string = str(current_time.hour) + ":" + str(current_time.minute)
robot.say_text("Wake up lazy human! it's " + short_time_string).wait_for_completed()
else:
# See if the displayed time needs updating
if (last_displayed_time is None) or (current_time.second != last_displayed_time.second):
# Create the updated image with this time
clock_image = make_clock_image(current_time)
oled_face_data = cozmo.oled_face.convert_image_to_screen_data(clock_image)
# display for 1 second
robot.display_oled_face_image(oled_face_data, 1000.0)
last_displayed_time = current_time
# only sleep for a fraction of a second to ensure we update the seconds as soon as they change
time.sleep(0.1)
cozmo.robot.Robot.drive_off_charger_on_connect = False # Cozmo can stay on his charger for this example
cozmo.run_program(alarm_clock)
| mit | -5,040,915,958,097,453,000 | 33.610714 | 136 | 0.640594 | false |
dnaextrim/django_adminlte_x | adminlte/static/plugins/ckeditor/samples/old/api.html.py | 1 | 6969 | XXXXXXXXX XXXXX
XXXX
XXXXXXXXX XXX XXXXXXXXXX XXXXXXXX X XXXXXXXXX XXXXXXXX XXX XXXXXX XXXXXXXXX
XXX XXXXXXXXXX XXX XXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXX XXXXX XXXXXXX XXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX
XXXXXXXX
XX XXX XXXXXXXXXXXXX XXXXX XX XXXXXX XXXX XX XXXXXXXX XX XXXXXXXX XXX XXXXXXXX
XX XXX XXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXX XX X X
XX XXXX XXX XXXXXX XXXX XXX XXXXXXXXXXX XX XXX XXXXXXX XXXXXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXX X XXXXXXXXX XXXXXXX X XXXXXXXXXXXXXX X XXXXXXXXX XXXXXXXXX
XX XXXX XXXX XXXXXX XXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX X XXXXXXXX
XXX
XXXXXXXX XXXXXXXXXXXX X
XX XXX XXX XXXXXX XXXXXXXX XXXX XX XXXX XX XXXXXXXX XXXXX
XXX XXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXX
XX XXXXX XXX XXXXXX XXXXXXX XXXXX
XX X XXXXXXXXXXX XX XXXXXXXXX X
X
XX XXXXXX XXXX XXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX XXXXX XX
X
XXXX
XXXXXX XXXX XXXX XX XX XXXXXXX XXXXXX XX
X
XXXXXXXX XXXXXXXXXXXX X
XX XXX XXX XXXXXX XXXXXXXX XXXX XX XXXX XX XXXXXXXX XXXXX
XXX XXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXXX
XX XXXXX XXX XXXXXX XXXXXXX XXXXX
XX X XXXXXXXXXXX XX XXXXXXXXX X
X
XX XXXXXX XX XXXXX XXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX XXXXX XX
X
XXXX
XXXXXX XXXX XXXX XX XX XXXXXXX XXXXXX XX
X
XXXXXXXX XXXXXXXXXXXXX X
XX XXX XXX XXXXXX XXXXXXXX XXXX XX XXXX XX XXXXXXXX XXXXX
XXX XXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXX
XX XXX XXXXXX XXXXXXXX XXXXXXXX XXXXXXX XXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXX XX
X
XXXXXXXX XXXXXXXXXXXXX X
XX XXX XXX XXXXXX XXXXXXXX XXXX XXX XXXX XX XXXXXXXX XXXXX
XXX XXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXX XXXXXX XXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXX XX
X
XXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXX X X
XX XXX XXX XXXXXX XXXXXXXX XXXX XX XXXX XX XXXXXXXX XXXXX
XXX XXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXX XXX XXXXXX XXXXXXX XXXXX
XX X XXXXXXXXXXX XX XXXXXXXXX X
X
XX XXXXXXX XXX XXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX XXXXXXXXXXX XX
X
XXXX
XXXXXX XXXX XXXX XX XX XXXXXXX XXXXXX XX
X
XXXXXXXX XXXXXXXXXXXX X
XX XXX XXX XXXXXX XXXXXXXX XXXX XX XXXX XX XXXXXXXX XXXXX
XXX XXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXX XXXXXXX XXX XXXXXXX XXXXXX XXXXXXXX XXXXXXX XXXXXXX XXXX XXXXXXXX
XX XX XXX XXXXXXXX XXXXXX XXXX XXX XXXXXX XX XXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXX XX
X
XXXXXXXX XXXXXXXXXXXX X
XX XXX XXX XXXXXX XXXXXXXX XXXX XX XXXX XX XXXXXXXX XXXXX
XXX XXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXX XXX XXXXXX XXXXXX XX XXX XXXXXX XXXX XXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXX
XXXXXX XXXX XXXXXXXXX XXXXXX XXX XXXX XXXXXX XX
X
XXXXXXXX XXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X
XXXXXXXX XXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXX X XXXXX X XXXXXXXXX X X XX XXXXXXX XXXXXX
X
XXXXXXXX XXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXX X XXXXXXXXX X X XXXX XXXXXXX
X
XXXXXXXXX
XXXXXXX
XXXXXX
XXX XXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX XXXXXXX XXXXX XXXXXXXX XXXXXXXXXX XXX
XXXXX
XXXX XXXXXXXXXXXXXX XXXXXXXXXXXX
XXXX XXXXXX XX XXX XXXXXXXXXX XXXXXXXX XXXXX XXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXX XX XXXXXXXX XXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXXXXX
XXX
XXXX XXXXXX XXXXX XXX XX XXX XXX
XX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXX
XX XXXXXXXX XXXX XXX XXXXXX XX XXXXXXXX
XXXX
XXX
XXX XXXXXXX XX XXX XX XXXXXX XXXX XXXXX XXXXX XXX XXXXXX XXXX XX XXXX XXXXXX XXXXX
XXXX
XXXXXX
XXXX XXXX XXXXX XXXXX XXXXX XXXXXXXX XX XX XXXXXXX XX XXX XXXXXX XXXXX XXX
XXXX XXXXXXXXXXXX
XXXXXXXXXX
XXX
XXXXXXXXXXXXXXXX XXXXXXXX XXXXXXXXXX XX XXXXXXXXXXXXX XX X XXXXXXX XXXX XX XXXXXXXXXX
XXXXXXXX XXXX XXXXXX XXX XXXXXX XXXXX XXX XXX XXXXXXXX XXXXX XXXXX XXX XXX XXXXXX
XX XXXX XX XXXX XX XXXXXXXXX XXXXXXX X XXXX XXXXXX XXXXXXXXXX
XXXX
XXXXXXXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XX XXXX XXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX XXX XXX XXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXX
XX XXXXXXX XXX XXXXXXXXX XXXXXXXXXXXXX XXXX XX XXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXX XXXXXXXXXX X
XXX X
XXXXXX XXXXXXXX
XXXXX XXXXXXX
XX XXXXX XXX XXXXXXXXXXXX XX XXXXXXXXXXXXX XXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXX XXX X X
XXX XXX X XXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXX
XX X XXXXXXXXXXXXXXX XXXXXX X X
XXXXXXXXXXXX XXXXXXXXXXX XXXXXXXXX
XX X XXXXXXXXXXXXXXX XXXXXX X X
XXXXXXXXXXXX XXXXXXXXXXX XXXXXXXXX
X
X
XXX
XXXXXXXXX
XX XXXXXXXXXXXXXX
XXXX
XXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXX
XXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXX XXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXX XXXXXXXX XXXXXXXX
XXXX
XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXX
XXXX
XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXX XXXXXXXXX XXXXX XXXX XXXX XXXX XXXXXXX XXXXXXXXXXXX
XXXXXX XXXX XX XXXX XXXXXXXX XX XXX XXXX XXXXXXXXXXXXXXXXXX
XXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXXX
XXXX XXXXXXXXXXXX
XXXX
XXX
XXXXXXXX X XXX XXXX XXXXXX XXX XXX XXXXXXXX X XX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX
XX XXXXXXXXXX
XXXXXXXXX XXXXXX XXXXXXXXXX XX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XXXXXXXXX
XXXXXXXX XXX XXXXXX XXXXXXXXX
XXXX
XXXXXX
XXXXXXX
XXXXXXX
| mit | 2,327,210,525,161,143,300 | 32.185714 | 216 | 0.841584 | false |
JioCloud/tempest-lib | tempest_lib/services/identity/v3/token_client.py | 1 | 6711 | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from tempest_lib.common import rest_client
from tempest_lib import exceptions
class V3TokenClient(rest_client.RestClient):
def __init__(self, auth_url, disable_ssl_certificate_validation=None,
ca_certs=None, trace_requests=None):
dscv = disable_ssl_certificate_validation
super(V3TokenClient, self).__init__(
None, None, None, disable_ssl_certificate_validation=dscv,
ca_certs=ca_certs, trace_requests=trace_requests)
if 'auth/tokens' not in auth_url:
auth_url = auth_url.rstrip('/') + '/auth/tokens'
self.auth_url = auth_url
def auth(self, user_id=None, username=None, password=None, project_id=None,
project_name=None, user_domain_id=None, user_domain_name=None,
project_domain_id=None, project_domain_name=None, domain_id=None,
domain_name=None, token=None):
"""Obtains a token from the authentication service
:param user_id: user id
:param username: user name
:param user_domain_id: the user domain id
:param user_domain_name: the user domain name
:param project_domain_id: the project domain id
:param project_domain_name: the project domain name
:param domain_id: a domain id to scope to
:param domain_name: a domain name to scope to
:param project_id: a project id to scope to
:param project_name: a project name to scope to
:param token: a token to re-scope.
Accepts different combinations of credentials.
Sample sample valid combinations:
- token
- token, project_name, project_domain_id
- user_id, password
- username, password, user_domain_id
- username, password, project_name, user_domain_id, project_domain_id
Validation is left to the server side.
"""
creds = {
'auth': {
'identity': {
'methods': [],
}
}
}
id_obj = creds['auth']['identity']
if token:
id_obj['methods'].append('token')
id_obj['token'] = {
'id': token
}
if (user_id or username) and password:
id_obj['methods'].append('password')
id_obj['password'] = {
'user': {
'password': password,
}
}
if user_id:
id_obj['password']['user']['id'] = user_id
else:
id_obj['password']['user']['name'] = username
_domain = None
if user_domain_id is not None:
_domain = dict(id=user_domain_id)
elif user_domain_name is not None:
_domain = dict(name=user_domain_name)
if _domain:
id_obj['password']['user']['domain'] = _domain
if (project_id or project_name):
_project = dict()
if project_id:
_project['id'] = project_id
elif project_name:
_project['name'] = project_name
if project_domain_id is not None:
_project['domain'] = {'id': project_domain_id}
elif project_domain_name is not None:
_project['domain'] = {'name': project_domain_name}
creds['auth']['scope'] = dict(project=_project)
elif domain_id:
creds['auth']['scope'] = dict(domain={'id': domain_id})
elif domain_name:
creds['auth']['scope'] = dict(domain={'name': domain_name})
body = json.dumps(creds)
resp, body = self.post(self.auth_url, body=body)
self.expected_success(201, resp.status)
return rest_client.ResponseBody(resp, body)
def request(self, method, url, extra_headers=False, headers=None,
body=None):
"""A simple HTTP request interface."""
if headers is None:
# Always accept 'json', for xml token client too.
# Because XML response is not easily
# converted to the corresponding JSON one
headers = self.get_headers(accept_type="json")
elif extra_headers:
try:
headers.update(self.get_headers(accept_type="json"))
except (ValueError, TypeError):
headers = self.get_headers(accept_type="json")
resp, resp_body = self.raw_request(url, method,
headers=headers, body=body)
self._log_request(method, url, resp)
if resp.status in [401, 403]:
resp_body = json.loads(resp_body)
raise exceptions.Unauthorized(resp_body['error']['message'])
elif resp.status not in [200, 201, 204]:
raise exceptions.IdentityError(
'Unexpected status code {0}'.format(resp.status))
return resp, json.loads(resp_body)
def get_token(self, **kwargs):
"""Returns (token id, token data) for supplied credentials"""
auth_data = kwargs.pop('auth_data', False)
if not (kwargs.get('user_domain_id') or
kwargs.get('user_domain_name')):
kwargs['user_domain_name'] = 'Default'
if not (kwargs.get('project_domain_id') or
kwargs.get('project_domain_name')):
kwargs['project_domain_name'] = 'Default'
body = self.auth(**kwargs)
token = body.response.get('x-subject-token')
if auth_data:
return token, body['token']
else:
return token
class V3TokenClientJSON(V3TokenClient):
LOG = logging.getLogger(__name__)
def _warn(self):
self.LOG.warning("%s class was deprecated and renamed to %s" %
(self.__class__.__name__, 'V3TokenClient'))
def __init__(self, *args, **kwargs):
self._warn()
super(V3TokenClientJSON, self).__init__(*args, **kwargs)
| apache-2.0 | 876,780,352,556,253,000 | 36.49162 | 79 | 0.56996 | false |
spark-test/spark | python/pyspark/sql/tests/test_types.py | 1 | 43002 | # -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import array
import ctypes
import datetime
import os
import pickle
import sys
import unittest
from pyspark.sql import Row
from pyspark.sql.functions import col, UserDefinedFunction
from pyspark.sql.types import *
from pyspark.sql.types import _array_signed_int_typecode_ctype_mappings, _array_type_mappings, \
_array_unsigned_int_typecode_ctype_mappings, _infer_type, _make_type_verifier, _merge_type
from pyspark.testing.sqlutils import ReusedSQLTestCase, ExamplePointUDT, PythonOnlyUDT, \
ExamplePoint, PythonOnlyPoint, MyObject
class TypesTests(ReusedSQLTestCase):
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
with self.tempView("test"):
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
with self.tempView("test2"):
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_schema_specification(self):
from decimal import Decimal
class A(object):
def __init__(self):
self.a = 1
data = [
True,
1,
"a",
u"a",
datetime.date(1970, 1, 1),
datetime.datetime(1970, 1, 1, 0, 0),
1.0,
array.array("d", [1]),
[1],
(1, ),
{"a": 1},
bytearray(1),
Decimal(1),
Row(a=1),
Row("a")(1),
A(),
]
df = self.spark.createDataFrame([data])
actual = list(map(lambda x: x.dataType.simpleString(), df.schema))
expected = [
'boolean',
'bigint',
'string',
'string',
'date',
'timestamp',
'double',
'array<double>',
'array<bigint>',
'struct<_1:bigint>',
'map<string,bigint>',
'binary',
'decimal(38,18)',
'struct<a:bigint>',
'struct<a:bigint>',
'struct<a:bigint>',
]
self.assertEqual(actual, expected)
actual = list(df.first())
expected = [
True,
1,
'a',
u"a",
datetime.date(1970, 1, 1),
datetime.datetime(1970, 1, 1, 0, 0),
1.0,
[1.0],
[1],
Row(_1=1),
{"a": 1},
bytearray(b'\x00'),
Decimal('1.000000000000000000'),
Row(a=1),
Row(a=1),
Row(a=1),
]
self.assertEqual(actual, expected)
def test_infer_schema_not_enough_names(self):
df = self.spark.createDataFrame([["a", "b"]], ["col1"])
self.assertEqual(df.columns, ['col1', '_2'])
def test_infer_schema_fails(self):
with self.assertRaisesRegexp(TypeError, 'field a'):
self.spark.createDataFrame(self.spark.sparkContext.parallelize([[1, 1], ["x", 1]]),
schema=["a", "b"], samplingRatio=0.99)
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_dict_respects_schema(self):
df = self.spark.createDataFrame([{'a': 1}], ["b"])
self.assertEqual(df.columns, ['b'])
def test_negative_decimal(self):
try:
self.spark.sql("set spark.sql.legacy.allowNegativeScaleOfDecimal=true")
df = self.spark.createDataFrame([(1, ), (11, )], ["value"])
ret = df.select(col("value").cast(DecimalType(1, -1))).collect()
actual = list(map(lambda r: int(r.value), ret))
self.assertEqual(actual, [0, 10])
finally:
self.spark.sql("set spark.sql.legacy.allowNegativeScaleOfDecimal=false")
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
with self.tempView("table2"):
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
with self.tempView("test"):
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _make_type_verifier
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_make_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _make_type_verifier(ExamplePointUDT())([1.0, 2.0]))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_make_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))
self.assertRaises(
ValueError,
lambda: _make_type_verifier(PythonOnlyUDT())([1.0, 2.0]))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_infer_schema_with_udt(self):
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
with self.tempView("labeled_point"):
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
with self.tempView("labeled_point"):
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_cast_to_string_with_udt(self):
from pyspark.sql.functions import col
row = (ExamplePoint(1.0, 2.0), PythonOnlyPoint(3.0, 4.0))
schema = StructType([StructField("point", ExamplePointUDT(), False),
StructField("pypoint", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
result = df.select(col('point').cast('string'), col('pypoint').cast('string')).head()
self.assertEqual(result, Row(point=u'(1.0, 2.0)', pypoint=u'[3.0, 4.0]'))
def test_struct_type(self):
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
self.assertRaises(ValueError, lambda: StructType().add("name"))
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
self.assertRaises(KeyError, lambda: struct1["f9"])
self.assertRaises(IndexError, lambda: struct1[9])
self.assertRaises(TypeError, lambda: struct1[9.9])
def test_parse_datatype_string(self):
from pyspark.sql.types import _all_atomic_types, _parse_datatype_string
for k, t in _all_atomic_types.items():
if t != NullType:
self.assertEqual(t(), _parse_datatype_string(k))
self.assertEqual(IntegerType(), _parse_datatype_string("int"))
self.assertEqual(DecimalType(1, 1), _parse_datatype_string("decimal(1 ,1)"))
self.assertEqual(DecimalType(10, 1), _parse_datatype_string("decimal( 10,1 )"))
self.assertEqual(DecimalType(11, 1), _parse_datatype_string("decimal(11,1)"))
self.assertEqual(
ArrayType(IntegerType()),
_parse_datatype_string("array<int >"))
self.assertEqual(
MapType(IntegerType(), DoubleType()),
_parse_datatype_string("map< int, double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("struct<a:int, c:double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a:int, c:double"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a INT, c DOUBLE"))
def test_metadata_null(self):
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
@unittest.skipIf(sys.version < "3", "only Python 3 infers bytes as binary type")
def test_infer_binary_type(self):
binaryrow = [Row(f1='a', f2=b"abcd")]
df = self.sc.parallelize(binaryrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, BinaryType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_binary_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(b"abcd", df1.first().f2)
self.assertEqual(_infer_type(b""), BinaryType())
self.assertEqual(_infer_type(b"1234"), BinaryType())
def test_merge_type(self):
self.assertEqual(_merge_type(LongType(), NullType()), LongType())
self.assertEqual(_merge_type(NullType(), LongType()), LongType())
self.assertEqual(_merge_type(LongType(), LongType()), LongType())
self.assertEqual(_merge_type(
ArrayType(LongType()),
ArrayType(LongType())
), ArrayType(LongType()))
with self.assertRaisesRegexp(TypeError, 'element in array'):
_merge_type(ArrayType(LongType()), ArrayType(DoubleType()))
self.assertEqual(_merge_type(
MapType(StringType(), LongType()),
MapType(StringType(), LongType())
), MapType(StringType(), LongType()))
with self.assertRaisesRegexp(TypeError, 'key of map'):
_merge_type(
MapType(StringType(), LongType()),
MapType(DoubleType(), LongType()))
with self.assertRaisesRegexp(TypeError, 'value of map'):
_merge_type(
MapType(StringType(), LongType()),
MapType(StringType(), DoubleType()))
self.assertEqual(_merge_type(
StructType([StructField("f1", LongType()), StructField("f2", StringType())]),
StructType([StructField("f1", LongType()), StructField("f2", StringType())])
), StructType([StructField("f1", LongType()), StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'field f1'):
_merge_type(
StructType([StructField("f1", LongType()), StructField("f2", StringType())]),
StructType([StructField("f1", DoubleType()), StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([StructField("f1", StructType([StructField("f2", LongType())]))]),
StructType([StructField("f1", StructType([StructField("f2", LongType())]))])
), StructType([StructField("f1", StructType([StructField("f2", LongType())]))]))
with self.assertRaisesRegexp(TypeError, 'field f2 in field f1'):
_merge_type(
StructType([StructField("f1", StructType([StructField("f2", LongType())]))]),
StructType([StructField("f1", StructType([StructField("f2", StringType())]))]))
self.assertEqual(_merge_type(
StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]),
StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())])
), StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'element in array field f1'):
_merge_type(
StructType([
StructField("f1", ArrayType(LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", ArrayType(DoubleType())),
StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())])
), StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'value of map field f1'):
_merge_type(
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", MapType(StringType(), DoubleType())),
StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]),
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))])
), StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]))
with self.assertRaisesRegexp(TypeError, 'key of map element in array field f1'):
_merge_type(
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]),
StructType([StructField("f1", ArrayType(MapType(DoubleType(), LongType())))])
)
# test for SPARK-16542
def test_array_types(self):
# This test need to make sure that the Scala type selected is at least
# as large as the python's types. This is necessary because python's
# array types depend on C implementation on the machine. Therefore there
# is no machine independent correspondence between python's array types
# and Scala types.
# See: https://docs.python.org/2/library/array.html
def assertCollectSuccess(typecode, value):
row = Row(myarray=array.array(typecode, [value]))
df = self.spark.createDataFrame([row])
self.assertEqual(df.first()["myarray"][0], value)
# supported string types
#
# String types in python's array are "u" for Py_UNICODE and "c" for char.
# "u" will be removed in python 4, and "c" is not supported in python 3.
supported_string_types = []
if sys.version_info[0] < 4:
supported_string_types += ['u']
# test unicode
assertCollectSuccess('u', u'a')
if sys.version_info[0] < 3:
supported_string_types += ['c']
# test string
assertCollectSuccess('c', 'a')
# supported float and double
#
# Test max, min, and precision for float and double, assuming IEEE 754
# floating-point format.
supported_fractional_types = ['f', 'd']
assertCollectSuccess('f', ctypes.c_float(1e+38).value)
assertCollectSuccess('f', ctypes.c_float(1e-38).value)
assertCollectSuccess('f', ctypes.c_float(1.123456).value)
assertCollectSuccess('d', sys.float_info.max)
assertCollectSuccess('d', sys.float_info.min)
assertCollectSuccess('d', sys.float_info.epsilon)
# supported signed int types
#
# The size of C types changes with implementation, we need to make sure
# that there is no overflow error on the platform running this test.
supported_signed_int_types = list(
set(_array_signed_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_signed_int_types:
ctype = _array_signed_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assertCollectSuccess(t, max_val - 1)
assertCollectSuccess(t, -max_val)
# supported unsigned int types
#
# JVM does not have unsigned types. We need to be very careful to make
# sure that there is no overflow error.
supported_unsigned_int_types = list(
set(_array_unsigned_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_unsigned_int_types:
ctype = _array_unsigned_int_typecode_ctype_mappings[t]
assertCollectSuccess(t, 2 ** (ctypes.sizeof(ctype) * 8) - 1)
# all supported types
#
# Make sure the types tested above:
# 1. are all supported types
# 2. cover all supported types
supported_types = (supported_string_types +
supported_fractional_types +
supported_signed_int_types +
supported_unsigned_int_types)
self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))
# all unsupported types
#
# Keys in _array_type_mappings is a complete list of all supported types,
# and types not in _array_type_mappings are considered unsupported.
# `array.typecodes` are not supported in python 2.
if sys.version_info[0] < 3:
all_types = set(['c', 'b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'])
else:
# PyPy seems not having array.typecodes.
all_types = set(['b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'q', 'Q', 'f', 'd'])
unsupported_types = all_types - set(supported_types)
# test unsupported types
for t in unsupported_types:
with self.assertRaises(TypeError):
a = array.array(t)
self.spark.createDataFrame([Row(myarray=a)]).collect()
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
# regression test for SPARK-23299
def test_row_without_column_name(self):
row = Row("Alice", 11)
self.assertEqual(repr(row), "<Row('Alice', 11)>")
# test __repr__ with unicode values
if sys.version_info.major >= 3:
self.assertEqual(repr(Row("数", "量")), "<Row('数', '量')>")
else:
self.assertEqual(repr(Row(u"数", u"量")), r"<Row(u'\u6570', u'\u91cf')>")
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
def test_struct_field_type_name(self):
struct_field = StructField("a", IntegerType())
self.assertRaises(TypeError, struct_field.typeName)
def test_invalid_create_row(self):
row_class = Row("c1", "c2")
self.assertRaises(ValueError, lambda: row_class(1, 2, 3))
class DataTypeVerificationTests(unittest.TestCase):
def test_verify_type_exception_msg(self):
self.assertRaisesRegexp(
ValueError,
"test_name",
lambda: _make_type_verifier(StringType(), nullable=False, name="test_name")(None))
schema = StructType([StructField('a', StructType([StructField('b', IntegerType())]))])
self.assertRaisesRegexp(
TypeError,
"field b in field a",
lambda: _make_type_verifier(schema)([["data"]]))
def test_verify_type_ok_nullable(self):
obj = None
types = [IntegerType(), FloatType(), StringType(), StructType([])]
for data_type in types:
try:
_make_type_verifier(data_type, nullable=True)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type))
def test_verify_type_not_nullable(self):
import array
import datetime
import decimal
schema = StructType([
StructField('s', StringType(), nullable=False),
StructField('i', IntegerType(), nullable=True)])
class MyObj:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# obj, data_type
success_spec = [
# String
("", StringType()),
(u"", StringType()),
(1, StringType()),
(1.0, StringType()),
([], StringType()),
({}, StringType()),
# UDT
(ExamplePoint(1.0, 2.0), ExamplePointUDT()),
# Boolean
(True, BooleanType()),
# Byte
(-(2**7), ByteType()),
(2**7 - 1, ByteType()),
# Short
(-(2**15), ShortType()),
(2**15 - 1, ShortType()),
# Integer
(-(2**31), IntegerType()),
(2**31 - 1, IntegerType()),
# Long
(-(2**63), LongType()),
(2**63 - 1, LongType()),
# Float & Double
(1.0, FloatType()),
(1.0, DoubleType()),
# Decimal
(decimal.Decimal("1.0"), DecimalType()),
# Binary
(bytearray([1, 2]), BinaryType()),
# Date/Timestamp
(datetime.date(2000, 1, 2), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), TimestampType()),
# Array
([], ArrayType(IntegerType())),
(["1", None], ArrayType(StringType(), containsNull=True)),
([1, 2], ArrayType(IntegerType())),
((1, 2), ArrayType(IntegerType())),
(array.array('h', [1, 2]), ArrayType(IntegerType())),
# Map
({}, MapType(StringType(), IntegerType())),
({"a": 1}, MapType(StringType(), IntegerType())),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=True)),
# Struct
({"s": "a", "i": 1}, schema),
({"s": "a", "i": None}, schema),
({"s": "a"}, schema),
({"s": "a", "f": 1.0}, schema),
(Row(s="a", i=1), schema),
(Row(s="a", i=None), schema),
(Row(s="a", i=1, f=1.0), schema),
(["a", 1], schema),
(["a", None], schema),
(("a", 1), schema),
(MyObj(s="a", i=1), schema),
(MyObj(s="a", i=None), schema),
(MyObj(s="a"), schema),
]
# obj, data_type, exception class
failure_spec = [
# String (match anything but None)
(None, StringType(), ValueError),
# UDT
(ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),
# Boolean
(1, BooleanType(), TypeError),
("True", BooleanType(), TypeError),
([1], BooleanType(), TypeError),
# Byte
(-(2**7) - 1, ByteType(), ValueError),
(2**7, ByteType(), ValueError),
("1", ByteType(), TypeError),
(1.0, ByteType(), TypeError),
# Short
(-(2**15) - 1, ShortType(), ValueError),
(2**15, ShortType(), ValueError),
# Integer
(-(2**31) - 1, IntegerType(), ValueError),
(2**31, IntegerType(), ValueError),
# Float & Double
(1, FloatType(), TypeError),
(1, DoubleType(), TypeError),
# Decimal
(1.0, DecimalType(), TypeError),
(1, DecimalType(), TypeError),
("1.0", DecimalType(), TypeError),
# Binary
(1, BinaryType(), TypeError),
# Date/Timestamp
("2000-01-02", DateType(), TypeError),
(946811040, TimestampType(), TypeError),
# Array
(["1", None], ArrayType(StringType(), containsNull=False), ValueError),
([1, "2"], ArrayType(IntegerType()), TypeError),
# Map
({"a": 1}, MapType(IntegerType(), IntegerType()), TypeError),
({"a": "1"}, MapType(StringType(), IntegerType()), TypeError),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=False),
ValueError),
# Struct
({"s": "a", "i": "1"}, schema, TypeError),
(Row(s="a"), schema, ValueError), # Row can't have missing field
(Row(s="a", i="1"), schema, TypeError),
(["a"], schema, ValueError),
(["a", "1"], schema, TypeError),
(MyObj(s="a", i="1"), schema, TypeError),
(MyObj(s=None, i="1"), schema, ValueError),
]
# Check success cases
for obj, data_type in success_spec:
try:
_make_type_verifier(data_type, nullable=False)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type))
# Check failure cases
for obj, data_type, exp in failure_spec:
msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp)
with self.assertRaises(exp, msg=msg):
_make_type_verifier(data_type, nullable=False)(obj)
@unittest.skipIf(sys.version_info[:2] < (3, 6), "Create Row without sorting fields")
def test_row_without_field_sorting(self):
sorting_enabled_tmp = Row._row_field_sorting_enabled
Row._row_field_sorting_enabled = False
r = Row(b=1, a=2)
TestRow = Row("b", "a")
expected = TestRow(1, 2)
self.assertEqual(r, expected)
self.assertEqual(repr(r), "Row(b=1, a=2)")
Row._row_field_sorting_enabled = sorting_enabled_tmp
if __name__ == "__main__":
from pyspark.sql.tests.test_types import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 | -5,974,779,664,881,302,000 | 42.076152 | 100 | 0.565062 | false |
okolisny/integration_tests | cfme/tests/infrastructure/test_snapshot.py | 1 | 15608 | # -*- coding: utf-8 -*-
import fauxfactory
import pytest
from widgetastic.exceptions import NoSuchElementException
from cfme import test_requirements
from cfme.automate.explorer.domain import DomainCollection
from cfme.automate.simulation import simulate
from cfme.common.vm import VM
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.infrastructure.virtual_machines import Vm # For Vm.Snapshot
from cfme.utils import testgen
from cfme.utils.conf import credentials
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.path import data_path
from cfme.utils.ssh import SSHClient
from cfme.utils.version import current_version
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.long_running,
pytest.mark.tier(2),
test_requirements.snapshot
]
pytest_generate_tests = testgen.generate([RHEVMProvider, VMwareProvider], scope="module")
@pytest.fixture(scope="module")
def domain(request, appliance):
dom = DomainCollection(appliance).create(name=fauxfactory.gen_alpha(), enabled=True)
request.addfinalizer(dom.delete_if_exists)
return dom
def provision_vm(provider, template):
vm_name = random_vm_name(context="snpst")
vm = VM.factory(vm_name, provider, template_name=template.name)
if not provider.mgmt.does_vm_exist(vm_name):
vm.create_on_provider(find_in_cfme=True, allow_skip="default")
return vm
@pytest.yield_fixture(scope="module")
def small_test_vm(setup_provider_modscope, provider, small_template_modscope, request):
vm = provision_vm(provider, small_template_modscope)
yield vm
try:
vm.delete_from_provider()
except Exception:
logger.exception('Exception deleting test vm "%s" on %s', vm.name, provider.name)
@pytest.yield_fixture(scope="module")
def full_test_vm(setup_provider_modscope, provider, full_template_modscope, request):
vm = provision_vm(provider, full_template_modscope)
yield vm
try:
vm.delete_from_provider()
except Exception:
logger.exception('Exception deleting test vm "%s" on %s', vm.name, provider.name)
def new_snapshot(test_vm, has_name=True, memory=False):
return Vm.Snapshot(
name="snpshot_{}".format(fauxfactory.gen_alphanumeric(8)) if has_name else None,
description="snapshot_{}".format(fauxfactory.gen_alphanumeric(8)),
memory=memory, parent_vm=test_vm)
@pytest.mark.uncollectif(lambda provider:
(provider.one_of(RHEVMProvider) and provider.version < 4) or
current_version() < '5.8', 'Must be RHEVM provider version >= 4')
def test_memory_checkbox(small_test_vm, provider, soft_assert):
# Make sure the VM is powered on
small_test_vm.power_control_from_cfme(option=small_test_vm.POWER_ON, cancel=False)
# Try to create snapshot with memory on powered on VM
has_name = not provider.one_of(RHEVMProvider)
snapshot1 = new_snapshot(small_test_vm, has_name=has_name, memory=True)
snapshot1.create()
assert snapshot1.exists
# Power off the VM
small_test_vm.power_control_from_cfme(option=small_test_vm.POWER_OFF, cancel=False)
small_test_vm.wait_for_vm_state_change(desired_state=small_test_vm.STATE_OFF)
soft_assert(small_test_vm.provider.mgmt.is_vm_stopped(small_test_vm.name), "VM is not stopped!")
# Try to create snapshot with memory on powered off VM
snapshot2 = new_snapshot(small_test_vm, has_name=has_name, memory=True)
try:
snapshot2.create(force_check_memory=True)
except NoSuchElementException:
logger.info("Memory checkbox is not present on powered off VM.")
# Make sure that snapshot with memory was not created
wait_for(lambda: not snapshot2.exists, num_sec=40, delay=20, fail_func=provider.browser.refresh,
handle_exception=True)
@pytest.mark.uncollectif(lambda provider: (provider.one_of(RHEVMProvider) and provider.version < 4),
'Must be RHEVM provider version >= 4')
def test_snapshot_crud(small_test_vm, provider):
"""Tests snapshot crud
Metadata:
test_flag: snapshot, provision
"""
# has_name is false if testing RHEVMProvider
snapshot = new_snapshot(small_test_vm, has_name=(not provider.one_of(RHEVMProvider)))
snapshot.create()
snapshot.delete()
@pytest.mark.uncollectif(lambda provider: not provider.one_of(VMwareProvider),
'Not VMware provider')
def test_delete_all_snapshots(small_test_vm, provider):
"""Tests snapshot removal
Metadata:
test_flag: snapshot, provision
"""
snapshot1 = new_snapshot(small_test_vm)
snapshot1.create()
snapshot2 = new_snapshot(small_test_vm)
snapshot2.create()
snapshot2.delete_all()
# Make sure the snapshots are indeed deleted
wait_for(lambda: not snapshot1.exists, num_sec=300, delay=20, fail_func=snapshot1.refresh)
wait_for(lambda: not snapshot2.exists, num_sec=300, delay=20, fail_func=snapshot1.refresh)
@pytest.mark.uncollectif(lambda provider: (provider.one_of(RHEVMProvider) and provider.version < 4),
'Must be RHEVM provider version >= 4')
def test_verify_revert_snapshot(full_test_vm, provider, soft_assert, register_event):
"""Tests revert snapshot
Metadata:
test_flag: snapshot, provision
"""
if provider.one_of(RHEVMProvider):
snapshot1 = new_snapshot(full_test_vm, has_name=False)
else:
snapshot1 = new_snapshot(full_test_vm)
full_template = getattr(provider.data.templates, 'full_template')
ssh_kwargs = {
'hostname': snapshot1.vm.provider.mgmt.get_ip_address(snapshot1.vm.name),
'username': credentials[full_template.creds]['username'],
'password': credentials[full_template.creds]['password']
}
ssh_client = SSHClient(**ssh_kwargs)
# We need to wait for ssh to become available on the vm, it can take a while. Without
# this wait, the ssh command would fail with 'port 22 not available' error.
# Easiest way to solve this is just mask the exception with 'handle_exception = True'
# and wait for successful completition of the ssh command.
# The 'fail_func' ensures we close the connection that failed with exception.
# Without this, the connection would hang there and wait_for would fail with timeout.
wait_for(lambda: ssh_client.run_command('touch snapshot1.txt').rc == 0, num_sec=300,
delay=20, handle_exception=True, fail_func=ssh_client.close())
snapshot1.create()
register_event(target_type='VmOrTemplate', target_name=full_test_vm.name,
event_type='vm_snapshot_complete')
register_event(target_type='VmOrTemplate', target_name=full_test_vm.name,
event_type='vm_snapshot')
ssh_client.run_command('touch snapshot2.txt')
if provider.one_of(RHEVMProvider):
snapshot2 = new_snapshot(full_test_vm, has_name=False)
else:
snapshot2 = new_snapshot(full_test_vm)
snapshot2.create()
if provider.one_of(RHEVMProvider):
full_test_vm.power_control_from_cfme(option=full_test_vm.POWER_OFF, cancel=False)
full_test_vm.wait_for_vm_state_change(
desired_state=full_test_vm.STATE_OFF, timeout=900)
snapshot1.revert_to()
# Wait for the snapshot to become active
logger.info('Waiting for vm %s to become active', snapshot1.name)
wait_for(lambda: snapshot1.active, num_sec=300, delay=20, fail_func=provider.browser.refresh)
full_test_vm.wait_for_vm_state_change(desired_state=full_test_vm.STATE_OFF, timeout=720)
full_test_vm.power_control_from_cfme(option=full_test_vm.POWER_ON, cancel=False)
full_test_vm.wait_for_vm_state_change(desired_state=full_test_vm.STATE_ON, timeout=900)
current_state = full_test_vm.find_quadicon().data['state']
soft_assert(current_state.startswith('currentstate-on'),
"Quadicon state is {}".format(current_state))
soft_assert(full_test_vm.provider.mgmt.is_vm_running(full_test_vm.name), "vm not running")
wait_for(lambda: ssh_client.run_command('test -e snapshot1.txt').rc == 0,
num_sec=400, delay=20, handle_exception=True, fail_func=ssh_client.close())
try:
result = ssh_client.run_command('test -e snapshot1.txt')
assert not result.rc
result = ssh_client.run_command('test -e snapshot2.txt')
assert result.rc
logger.info('Revert to snapshot %s successful', snapshot1.name)
except:
logger.exception('Revert to snapshot %s Failed', snapshot1.name)
ssh_client.close()
def setup_snapshot_env(test_vm, memory):
logger.info("Starting snapshot setup")
snapshot1 = new_snapshot(test_vm, memory=memory)
snapshot1.create()
snapshot2 = new_snapshot(test_vm, memory=memory)
snapshot2.create()
snapshot1.revert_to()
wait_for(lambda: snapshot1.active,
num_sec=300, delay=20, fail_func=test_vm.provider.browser.refresh)
@pytest.mark.parametrize("parent_vm", ["on_with_memory", "on_without_memory", "off"])
@pytest.mark.uncollectif(lambda provider: not provider.one_of(VMwareProvider),
'Not VMware provider')
def test_verify_vm_state_revert_snapshot(provider, parent_vm, small_test_vm):
"""
test vm state after revert snapshot with parent vm:
- powered on and includes memory
- powered on without memory
- powered off
vm state after revert should be:
- powered on
- powered off
- powered off
"""
power = small_test_vm.POWER_ON if parent_vm.startswith('on') else small_test_vm.POWER_OFF
memory = 'with_memory' in parent_vm
small_test_vm.power_control_from_cfme(option=power, cancel=False)
provider.mgmt.wait_vm_steady(small_test_vm.name)
setup_snapshot_env(small_test_vm, memory)
assert bool(small_test_vm.provider.mgmt.is_vm_running(small_test_vm.name)) == memory
@pytest.mark.uncollectif(lambda provider: not provider.one_of(VMwareProvider),
'Not VMware provider')
def test_operations_suspended_vm(small_test_vm, soft_assert):
"""Tests snapshot operations on suspended vm
Metadata:
test_flag: snapshot, provision
"""
# Create first snapshot when VM is running
snapshot1 = new_snapshot(small_test_vm)
snapshot1.create()
wait_for(lambda: snapshot1.active, num_sec=300, delay=20, fail_func=snapshot1.refresh)
# Suspend the VM
small_test_vm.power_control_from_cfme(option=small_test_vm.SUSPEND, cancel=False)
small_test_vm.wait_for_vm_state_change(desired_state=small_test_vm.STATE_SUSPENDED)
current_state = small_test_vm.find_quadicon().data['state']
soft_assert(current_state.startswith('currentstate-suspended'),
"Quadicon state is {}".format(current_state))
# Create second snapshot when VM is suspended
snapshot2 = new_snapshot(small_test_vm)
snapshot2.create()
wait_for(lambda: snapshot2.active, num_sec=300, delay=20, fail_func=snapshot2.refresh)
# Try to revert to first snapshot while the VM is suspended
snapshot1.revert_to()
wait_for(lambda: snapshot1.active, num_sec=300, delay=20, fail_func=snapshot1.refresh)
# Check VM state, VM should be off
current_state = small_test_vm.find_quadicon().data['state']
soft_assert(current_state.startswith('currentstate-off'),
"Quadicon state is {}".format(current_state))
assert small_test_vm.provider.mgmt.is_vm_stopped(small_test_vm.name)
# Revert back to second snapshot
snapshot2.revert_to()
wait_for(lambda: snapshot2.active, num_sec=300, delay=20, fail_func=snapshot2.refresh)
# Check VM state, VM should be suspended
current_state = small_test_vm.find_quadicon().data['state']
soft_assert(current_state.startswith('currentstate-suspended'),
"Quadicon state is {}".format(current_state))
assert small_test_vm.provider.mgmt.is_vm_suspended(small_test_vm.name)
# Try to delete both snapshots while the VM is suspended
# The delete method will make sure the snapshots are indeed deleted
snapshot1.delete()
snapshot2.delete()
@pytest.mark.uncollectif(lambda provider: not provider.one_of(VMwareProvider),
'Not VMware provider')
def test_operations_powered_off_vm(small_test_vm):
# Make sure the VM is off
small_test_vm.power_control_from_cfme(option=small_test_vm.POWER_OFF, cancel=False)
small_test_vm.wait_for_vm_state_change(desired_state=small_test_vm.STATE_OFF)
# Create first snapshot
snapshot1 = new_snapshot(small_test_vm)
snapshot1.create()
wait_for(lambda: snapshot1.active, num_sec=300, delay=20, fail_func=snapshot1.refresh)
# Create second snapshot
snapshot2 = new_snapshot(small_test_vm)
snapshot2.create()
wait_for(lambda: snapshot2.active, num_sec=300, delay=20, fail_func=snapshot2.refresh)
# Try to revert to first snapshot while the VM is off
snapshot1.revert_to()
wait_for(lambda: snapshot1.active is True, num_sec=300, delay=20, fail_func=snapshot1.refresh)
# Try to delete both snapshots while the VM is off
# The delete method will make sure the snapshots are indeed deleted
snapshot1.delete()
snapshot2.delete()
@pytest.mark.uncollectif(lambda provider: not provider.one_of(VMwareProvider),
'Not VMware provider')
def test_create_snapshot_via_ae(appliance, request, domain, small_test_vm):
"""This test checks whether the vm.create_snapshot works in AE.
Prerequisities:
* A VMware provider
* A VM that has been discovered by CFME
Steps:
* Clone the Request class inside the System namespace into a new domain
* Add a method named ``snapshot`` and insert the provided code there.
* Add an instance named ``snapshot`` and set the methd from previous step
as ``meth5``
* Run the simulation of the method against the VM, preferably setting
``snap_name`` to something that can be checked
* Wait until snapshot with such name appears.
"""
# PREPARE
file = data_path.join("ui").join("automate").join("test_create_snapshot_via_ae.rb")
with file.open("r") as f:
method_contents = f.read()
miq_domain = DomainCollection(appliance).instantiate(name='ManageIQ')
miq_class = miq_domain.namespaces.instantiate(name='System').classes.instantiate(name='Request')
miq_class.copy_to(domain)
request_cls = domain.namespaces.instantiate(name='System').classes.instantiate(name='Request')
request.addfinalizer(request_cls.delete)
method = request_cls.methods.create(name="snapshot", location='inline', script=method_contents)
request.addfinalizer(method.delete)
instance = request_cls.instances.create(
name="snapshot",
fields={
"meth5": {
'value': "snapshot"}})
request.addfinalizer(instance.delete)
# SIMULATE
snap_name = fauxfactory.gen_alpha()
snapshot = Vm.Snapshot(name=snap_name, parent_vm=small_test_vm)
simulate(
instance="Request",
request="snapshot",
target_type='VM and Instance',
target_object=small_test_vm.name,
execute_methods=True,
attributes_values={"snap_name": snap_name})
wait_for(lambda: snapshot.exists, timeout="2m", delay=10,
fail_func=small_test_vm.provider.browser.refresh, handle_exception=True)
# Clean up if it appeared
snapshot.delete()
| gpl-2.0 | -8,239,839,360,710,147,000 | 42.719888 | 100 | 0.695861 | false |
cprogrammer1994/ModernGL | examples/old-examples/GLWindow/particle_system_2.py | 1 | 1794 | import math
import random
import struct
import GLWindow
import ModernGL
# Window & Context
wnd = GLWindow.create_window()
ctx = ModernGL.create_context()
prog = ctx.program(
ctx.vertex_shader('''
#version 330
uniform vec2 Screen;
in vec2 vert;
void main() {
gl_Position = vec4((vert / Screen) * 2.0 - 1.0, 0.0, 1.0);
}
'''),
ctx.fragment_shader('''
#version 330
out vec4 color;
void main() {
color = vec4(0.30, 0.50, 1.00, 1.0);
}
''')
])
tvert = ctx.vertex_shader('''
#version 330
in vec2 in_pos;
in vec2 in_prev;
out vec2 out_pos;
out vec2 out_prev;
void main() {
out_pos = in_pos * 2.0 - in_prev;
out_prev = in_pos;
}
''')
transform = ctx.program(tvert, ['out_pos', 'out_prev'])
def particle():
a = random.uniform(0.0, math.pi * 2.0)
r = random.uniform(0.0, 1.0)
cx, cy = wnd.mouse[0], wnd.mouse[1]
return struct.pack('2f2f', cx, cy, cx + math.cos(a) * r, cy + math.sin(a) * r)
vbo1 = ctx.buffer(b''.join(particle() for i in range(1024)))
vbo2 = ctx.buffer(reserve=vbo1.size)
vao1 = ctx.simple_vertex_array(transform, vbo1, ['in_pos', 'in_prev'])
vao2 = ctx.simple_vertex_array(transform, vbo2, ['in_pos', 'in_prev'])
render_vao = ctx.vertex_array(prog, [
(vbo1, '2f8x', ['vert']),
])
idx = 0
ctx.point_size = 5.0
while wnd.update():
ctx.viewport = wnd.viewport
ctx.clear(0.9, 0.9, 0.9)
prog.uniforms['Screen'].value = wnd.size
for i in range(8):
vbo1.write(particle(), offset=idx * struct.calcsize('2f2f'))
idx = (idx + 1) % 1024
render_vao.render(ModernGL.POINTS, 1024)
vao1.transform(vbo2, ModernGL.POINTS, 1024)
ctx.copy_buffer(vbo1, vbo2)
| mit | 3,263,518,089,570,220,000 | 19.386364 | 82 | 0.574136 | false |
rliskovenko/DataRobot | app/test.py | 1 | 3085 | import random
import string
import app
import json
import unittest
from datetime import datetime
App = app.app
testSet = {
True : [
'''[{"date": "2015-05-12T14:36:00.451765",
"md5checksum": "e8c83e232b64ce94fdd0e4539ad0d44f",
"name": "John Doe",
"uid": "1"},
{"date": "2015-05-13T14:38:00.451765",
"md5checksum": "b419795d50db2a35e94c8364978d898f",
"name": "Jane Doe",
"uid": "2"}]''',
'''{"date": "2015-05-12T14:37:00.451765",
"md5checksum": "e8c83e232b64ce94fdd0e4539ad0d44f",
"name": "Carl Doe",
"uid": "3"}'''
],
False : [
'''[{"date": "2015-05-12T14:36:00.451765",
"md5checksum": "fffffff32b64ce94fdd0e4539ad0d44f",
"name": "John Doe",
"uid": "11"},
{"date": "2015-05-13T14:38:00.451765",
"md5checksum": "b419795d50db2a35e94c8364978d898f",
"name": "Jane Doe",
"uid": "12"}]''',
'''{"date": "2015-05-12T14:37:00.451765",
"md5checksum": "ffffff232b64ce94fdd0e4539ad0d44f",
"name": "Carl Doe",
"uid": "13"}''',
'''{"date": "2015-05-14T14:37:00.451765",
"md5checksum": "ffffff232b64ce94fdd0e4539ad0d44f",
"name": "Rozalie Doe",
"uid": "14"}'''
]
}
class DataRobotTestCase( unittest.TestCase ):
def __dbNameGen(self):
return 'test' + ''.join( random.SystemRandom().choice( string.ascii_uppercase + string.digits ) for _ in range( 8 ) )
def __test_add( self, data ):
return self.app.post( '/', data )
def __test_get( self, data ):
jsonData = json.loads( data )
__makeGetUrl = lambda ( x ): '/' + '/'.join( [ x['uid'], datetime.strptime( x['date'], "%Y-%m-%dT%H:%M:%S.%f" ).strftime( "%Y-%m-%d" ) ] )
if isinstance( jsonData, list ):
return [ self.app.get( __makeGetUrl( obj ) ) for obj in jsonData ]
else:
return self.app.get( __makeGetUrl( jsonData ) )
def __run_test(self, data=testSet, sub=__test_add ):
for ( expected, tests ) in data.iteritems():
for test in tests:
res = sub( test )
if isinstance( res, list ):
for subRes in res:
assert expected == ( 'OK' in json.loads( subRes.data )['status'] )
else:
assert expected == ( 'OK' in json.loads( res.data )['status'] )
def setUp(self):
app.MONGODB_HOST = 'localhost'
app.MONGODB_PORT = 27017
app.MONGODB_DB = self.__dbNameGen()
app.TESTING = True
self.app = App.test_client()
def tearDown(self):
app.connection.drop_database( app.MONGODB_DB )
def test_add(self):
self.__run_test( testSet, self.__test_add )
def test_get(self):
self.__run_test( testSet, self.__test_get )
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | 1,129,024,472,684,088,200 | 33.662921 | 146 | 0.506969 | false |
tanzer/ls-emacs | scripts/lse_compile_language.py | 1 | 3736 | # -*- coding: utf-8 -*-
# Copyright (C) 1994-2017 Mag. Christian Tanzer. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. [email protected]
#
#++
# Name
# lse_compile_language
#
# Purpose
# Compile emacs language definition
#
# Revision Dates
# 14-Nov-2007 (MG) Creation (ported from bash script)
# 24-May-2011 (MG) Allow language filenames as parameter
# Extract `lse_base_dir` out of the filename of the
# script
# 6-Mar-2012 (MG) Use `subprocess` instead of `os.system` to call
# emacs binary
# 29-Aug-2017 (MG) Switch to python 3
# 29-Aug-2017 (MG) Remove dependency to external libraries
# ««revision-date»»···
#--
import glob
import os
import re
import subprocess
def compile_language (* languages, ** kw) :
pjoin = os.path.join
env = os.environ.get
lse_base_dir = os.path.abspath \
(os.path.join (os.path.dirname (__file__), ".."))
lse_dir = env ("EMACSLSESRC", pjoin (lse_base_dir, "lse"))
lsc_dir = env ("EMACSLSEDIR", pjoin (lse_base_dir, "lse"))
emacs_binary = kw.pop ("emacs_binary", "emacs")
emacs_cmd_file = os.path.abspath (kw.pop ("emacs_cmd_file", None))
if not lse_dir :
raise ValueError ("EMACS LSE Source dir not defined")
files = []
pattern = re.compile (".*lse-language-(.+)\.lse")
for lang_pattern in languages :
if os.path.isfile (lang_pattern) :
match = pattern.match (lang_pattern)
if match :
new_files = [(lang_pattern, match.group (1))]
else :
new_files = []
for lse_language in glob.glob \
(pjoin (lse_dir, "lse-language-%s.lse" % (lang_pattern, ))) :
match = pattern.match (lse_language)
if match :
new_files.append \
((lse_language.replace ("\\", "/"), match.group (1)))
files.extend (new_files)
if not new_files :
print ("No laguages found for pattern `%s`" % (lang_pattern, ))
if files :
correct_path = lambda s : s.replace (os.path.sep, "/")
print ("Compile languages %s" % (", ".join (n for f, n in files), ))
emacs_cmd = \
[ '(setq load-path\n'
' (append (list "%s" "%s") load-path)\n'
')' % (correct_path (lse_base_dir), correct_path (lse_dir))
, '(load "ls-emacs")'
, '(setq trim-versions-without-asking t)'
, '(setq delete-old-versions t)'
]
emacs_cmd.extend ('(lse-language:compile "%s")' % n for _, n in files)
open (emacs_cmd_file, "w").write (" ".join (emacs_cmd))
for k, v in ( ("EMACSLSESRC", lse_dir), ("EMACSLSEDIR", lsc_dir)) :
os.environ [k] = v
try :
subprocess.check_call \
([emacs_binary, "-batch", "-l", emacs_cmd_file])
except :
print ("Error compiling language")
if os.path.isfile (emacs_cmd_file) :
os.unlink (emacs_cmd_file)
# end def compile_language
if __name__ == "__main__" :
import argparse
parser = argparse.ArgumentParser ()
parser.add_argument ("language", type = str, nargs = "+")
parser.add_argument ("-b", "--emacs_binary", type = str, default="emacs")
parser.add_argument \
( "-c", "--emacs_cmd_file", type = str
, default="lse_compile_language_cmdfile"
)
cmd = parser.parse_args ()
compile_language \
( emacs_binary = cmd.emacs_binary
, emacs_cmd_file = cmd.emacs_cmd_file
, * cmd.language
)
### __END__ lse_compile_language
| gpl-2.0 | -165,061,289,321,706,660 | 36.666667 | 78 | 0.539287 | false |
bartoldeman/easybuild-framework | test/framework/filetools.py | 1 | 75056 | # #
# Copyright 2012-2018 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Unit tests for filetools.py
@author: Toon Willems (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Stijn De Weirdt (Ghent University)
@author: Ward Poelmans (Ghent University)
"""
import datetime
import glob
import os
import re
import shutil
import stat
import sys
import tempfile
import urllib2
from test.framework.utilities import EnhancedTestCase, TestLoaderFiltered, init_config
from unittest import TextTestRunner
from urllib2 import URLError
import easybuild.tools.filetools as ft
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.multidiff import multidiff
class FileToolsTest(EnhancedTestCase):
""" Testcase for filetools module """
class_names = [
('GCC', 'EB_GCC'),
('7zip', 'EB_7zip'),
('Charm++', 'EB_Charm_plus__plus_'),
('DL_POLY_Classic', 'EB_DL_underscore_POLY_underscore_Classic'),
('0_foo+0x0x#-$__', 'EB_0_underscore_foo_plus_0x0x_hash__minus__dollar__underscore__underscore_'),
]
def test_extract_cmd(self):
"""Test various extract commands."""
tests = [
('test.zip', "unzip -qq test.zip"),
('/some/path/test.tar', "tar xf /some/path/test.tar"),
('test.tar.gz', "tar xzf test.tar.gz"),
('test.TAR.GZ', "tar xzf test.TAR.GZ"),
('test.tgz', "tar xzf test.tgz"),
('test.gtgz', "tar xzf test.gtgz"),
('test.bz2', "bunzip2 -c test.bz2 > test"),
('/some/path/test.bz2', "bunzip2 -c /some/path/test.bz2 > test"),
('test.tbz', "tar xjf test.tbz"),
('test.tbz2', "tar xjf test.tbz2"),
('test.tb2', "tar xjf test.tb2"),
('test.tar.bz2', "tar xjf test.tar.bz2"),
('test.gz', "gunzip -c test.gz > test"),
('untar.gz', "gunzip -c untar.gz > untar"),
("/some/path/test.gz", "gunzip -c /some/path/test.gz > test"),
('test.xz', "unxz test.xz"),
('test.tar.xz', "unxz test.tar.xz --stdout | tar x"),
('test.txz', "unxz test.txz --stdout | tar x"),
('test.iso', "7z x test.iso"),
('test.tar.Z', "tar xZf test.tar.Z"),
]
for (fn, expected_cmd) in tests:
cmd = ft.extract_cmd(fn)
self.assertEqual(expected_cmd, cmd)
self.assertEqual("unzip -qq -o test.zip", ft.extract_cmd('test.zip', True))
def test_find_extension(self):
"""Test find_extension function."""
tests = [
('test.zip', '.zip'),
('/some/path/test.tar', '.tar'),
('test.tar.gz', '.tar.gz'),
('test.TAR.GZ', '.TAR.GZ'),
('test.tgz', '.tgz'),
('test.gtgz', '.gtgz'),
('test.bz2', '.bz2'),
('/some/path/test.bz2', '.bz2'),
('test.tbz', '.tbz'),
('test.tbz2', '.tbz2'),
('test.tb2', '.tb2'),
('test.tar.bz2', '.tar.bz2'),
('test.gz', '.gz'),
('untar.gz', '.gz'),
("/some/path/test.gz", '.gz'),
('test.xz', '.xz'),
('test.tar.xz', '.tar.xz'),
('test.txz', '.txz'),
('test.iso', '.iso'),
('test.tar.Z', '.tar.Z'),
]
for (fn, expected_ext) in tests:
cmd = ft.find_extension(fn)
self.assertEqual(expected_ext, cmd)
def test_convert_name(self):
"""Test convert_name function."""
name = ft.convert_name("test+test-test.mpi")
self.assertEqual(name, "testplustestmintestmpi")
name = ft.convert_name("test+test-test.mpi", True)
self.assertEqual(name, "TESTPLUSTESTMINTESTMPI")
def test_find_base_dir(self):
"""test if we find the correct base dir"""
tmpdir = tempfile.mkdtemp()
foodir = os.path.join(tmpdir, 'foo')
os.mkdir(foodir)
os.mkdir(os.path.join(tmpdir, '.bar'))
os.mkdir(os.path.join(tmpdir, 'easybuild'))
os.chdir(tmpdir)
self.assertTrue(os.path.samefile(foodir, ft.find_base_dir()))
def test_encode_class_name(self):
"""Test encoding of class names."""
for (class_name, encoded_class_name) in self.class_names:
self.assertEqual(ft.encode_class_name(class_name), encoded_class_name)
self.assertEqual(ft.encode_class_name(ft.decode_class_name(encoded_class_name)), encoded_class_name)
def test_decode_class_name(self):
"""Test decoding of class names."""
for (class_name, encoded_class_name) in self.class_names:
self.assertEqual(ft.decode_class_name(encoded_class_name), class_name)
self.assertEqual(ft.decode_class_name(ft.encode_class_name(class_name)), class_name)
def test_patch_perl_script_autoflush(self):
"""Test patching Perl script for autoflush."""
fh, fp = tempfile.mkstemp()
os.close(fh)
perl_lines = [
"$!/usr/bin/perl",
"use strict;",
"print hello",
"",
"print hello again",
]
perltxt = '\n'.join(perl_lines)
ft.write_file(fp, perltxt)
ft.patch_perl_script_autoflush(fp)
txt = ft.read_file(fp)
self.assertTrue(len(txt.split('\n')) == len(perl_lines) + 4)
self.assertTrue(txt.startswith(perl_lines[0] + "\n\nuse IO::Handle qw();\nSTDOUT->autoflush(1);"))
for line in perl_lines[1:]:
self.assertTrue(line in txt)
os.remove(fp)
os.remove("%s.eb.orig" % fp)
def test_which(self):
"""Test which function for locating commands."""
python = ft.which('python')
self.assertTrue(python and os.path.exists(python) and os.path.isabs(python))
path = ft.which('i_really_do_not_expect_a_command_with_a_name_like_this_to_be_available')
self.assertTrue(path is None)
os.environ['PATH'] = '%s:%s' % (self.test_prefix, os.environ['PATH'])
# put a directory 'foo' in place (should be ignored by 'which')
foo = os.path.join(self.test_prefix, 'foo')
ft.mkdir(foo)
ft.adjust_permissions(foo, stat.S_IRUSR|stat.S_IXUSR)
# put executable file 'bar' in place
bar = os.path.join(self.test_prefix, 'bar')
ft.write_file(bar, '#!/bin/bash')
ft.adjust_permissions(bar, stat.S_IRUSR|stat.S_IXUSR)
self.assertEqual(ft.which('foo'), None)
self.assertTrue(os.path.samefile(ft.which('bar'), bar))
# add another location to 'bar', which should only return the first location by default
barbis = os.path.join(self.test_prefix, 'more', 'bar')
ft.write_file(barbis, '#!/bin/bash')
ft.adjust_permissions(barbis, stat.S_IRUSR|stat.S_IXUSR)
os.environ['PATH'] = '%s:%s' % (os.environ['PATH'], os.path.dirname(barbis))
self.assertTrue(os.path.samefile(ft.which('bar'), bar))
# test getting *all* locations to specified command
res = ft.which('bar', retain_all=True)
self.assertEqual(len(res), 2)
self.assertTrue(os.path.samefile(res[0], bar))
self.assertTrue(os.path.samefile(res[1], barbis))
def test_checksums(self):
"""Test checksum functionality."""
fh, fp = tempfile.mkstemp()
os.close(fh)
ft.write_file(fp, "easybuild\n")
known_checksums = {
'adler32': '0x379257805',
'crc32': '0x1457143216',
'md5': '7167b64b1ca062b9674ffef46f9325db',
'sha1': 'db05b79e09a4cc67e9dd30b313b5488813db3190',
'sha256': '1c49562c4b404f3120a3fa0926c8d09c99ef80e470f7de03ffdfa14047960ea5',
'sha512': '7610f6ce5e91e56e350d25c917490e4815f7986469fafa41056698aec256733eb7297da8b547d5e74b851d7c4e475900cec4744df0f887ae5c05bf1757c224b4',
}
# make sure checksums computation/verification is correct
for checksum_type, checksum in known_checksums.items():
self.assertEqual(ft.compute_checksum(fp, checksum_type=checksum_type), checksum)
self.assertTrue(ft.verify_checksum(fp, (checksum_type, checksum)))
# default checksum type is MD5
self.assertEqual(ft.compute_checksum(fp), known_checksums['md5'])
# both MD5 and SHA256 checksums can be verified without specifying type
self.assertTrue(ft.verify_checksum(fp, known_checksums['md5']))
self.assertTrue(ft.verify_checksum(fp, known_checksums['sha256']))
# checksum of length 32 is assumed to be MD5, length 64 to be SHA256, other lengths not allowed
# providing non-matching MD5 and SHA256 checksums results in failed verification
self.assertFalse(ft.verify_checksum(fp, '1c49562c4b404f3120a3fa0926c8d09c'))
self.assertFalse(ft.verify_checksum(fp, '7167b64b1ca062b9674ffef46f9325db7167b64b1ca062b9674ffef46f9325db'))
# checksum of length other than 32/64 yields an error
error_pattern = "Length of checksum '.*' \(\d+\) does not match with either MD5 \(32\) or SHA256 \(64\)"
for checksum in ['tooshort', 'inbetween32and64charactersisnotgoodeither', known_checksums['sha256'] + 'foo']:
self.assertErrorRegex(EasyBuildError, error_pattern, ft.verify_checksum, fp, checksum)
# make sure faulty checksums are reported
broken_checksums = dict([(typ, val[:-3] + 'foo') for (typ, val) in known_checksums.items()])
for checksum_type, checksum in broken_checksums.items():
self.assertFalse(ft.compute_checksum(fp, checksum_type=checksum_type) == checksum)
self.assertFalse(ft.verify_checksum(fp, (checksum_type, checksum)))
# md5 is default
self.assertFalse(ft.compute_checksum(fp) == broken_checksums['md5'])
self.assertFalse(ft.verify_checksum(fp, broken_checksums['md5']))
self.assertFalse(ft.verify_checksum(fp, broken_checksums['sha256']))
# check whether missing checksums are enforced
build_options = {
'enforce_checksums': True,
}
init_config(build_options=build_options)
self.assertErrorRegex(EasyBuildError, "Missing checksum for", ft.verify_checksum, fp, None)
self.assertTrue(ft.verify_checksum(fp, known_checksums['md5']))
self.assertTrue(ft.verify_checksum(fp, known_checksums['sha256']))
# cleanup
os.remove(fp)
def test_common_path_prefix(self):
"""Test get common path prefix for a list of paths."""
self.assertEqual(ft.det_common_path_prefix(['/foo/bar/foo', '/foo/bar/baz', '/foo/bar/bar']), '/foo/bar')
self.assertEqual(ft.det_common_path_prefix(['/foo/bar/', '/foo/bar/baz', '/foo/bar']), '/foo/bar')
self.assertEqual(ft.det_common_path_prefix(['/foo/bar', '/foo']), '/foo')
self.assertEqual(ft.det_common_path_prefix(['/foo/bar/']), '/foo/bar')
self.assertEqual(ft.det_common_path_prefix(['/foo/bar', '/bar', '/foo']), None)
self.assertEqual(ft.det_common_path_prefix(['foo', 'bar']), None)
self.assertEqual(ft.det_common_path_prefix(['foo']), None)
self.assertEqual(ft.det_common_path_prefix([]), None)
def test_download_file(self):
"""Test download_file function."""
fn = 'toy-0.0.tar.gz'
target_location = os.path.join(self.test_buildpath, 'some', 'subdir', fn)
# provide local file path as source URL
test_dir = os.path.abspath(os.path.dirname(__file__))
toy_source_dir = os.path.join(test_dir, 'sandbox', 'sources', 'toy')
source_url = 'file://%s/%s' % (toy_source_dir, fn)
res = ft.download_file(fn, source_url, target_location)
self.assertEqual(res, target_location, "'download' of local file works")
downloads = glob.glob(target_location + '*')
self.assertEqual(len(downloads), 1)
# non-existing files result in None return value
self.assertEqual(ft.download_file(fn, 'file://%s/nosuchfile' % test_dir, target_location), None)
# install broken proxy handler for opening local files
# this should make urllib2.urlopen use this broken proxy for downloading from a file:// URL
proxy_handler = urllib2.ProxyHandler({'file': 'file://%s/nosuchfile' % test_dir})
urllib2.install_opener(urllib2.build_opener(proxy_handler))
# downloading over a broken proxy results in None return value (failed download)
# this tests whether proxies are taken into account by download_file
self.assertEqual(ft.download_file(fn, source_url, target_location), None, "download over broken proxy fails")
# modify existing download so we can verify re-download
ft.write_file(target_location, '')
# restore a working file handler, and retest download of local file
urllib2.install_opener(urllib2.build_opener(urllib2.FileHandler()))
res = ft.download_file(fn, source_url, target_location)
self.assertEqual(res, target_location, "'download' of local file works after removing broken proxy")
# existing file was re-downloaded, so a backup should have been created of the existing file
downloads = glob.glob(target_location + '*')
self.assertEqual(len(downloads), 2)
backup = [d for d in downloads if os.path.basename(d) != fn][0]
self.assertEqual(ft.read_file(backup), '')
self.assertEqual(ft.compute_checksum(target_location), ft.compute_checksum(os.path.join(toy_source_dir, fn)))
# make sure specified timeout is parsed correctly (as a float, not a string)
opts = init_config(args=['--download-timeout=5.3'])
init_config(build_options={'download_timeout': opts.download_timeout})
target_location = os.path.join(self.test_prefix, 'jenkins_robots.txt')
url = 'https://jenkins1.ugent.be/robots.txt'
try:
urllib2.urlopen(url)
res = ft.download_file(fn, url, target_location)
self.assertEqual(res, target_location, "download with specified timeout works")
except urllib2.URLError:
print "Skipping timeout test in test_download_file (working offline)"
# also test behaviour of download_file under --dry-run
build_options = {
'extended_dry_run': True,
'silent': False,
}
init_config(build_options=build_options)
target_location = os.path.join(self.test_prefix, 'foo')
if os.path.exists(target_location):
shutil.rmtree(target_location)
self.mock_stdout(True)
path = ft.download_file(fn, source_url, target_location)
txt = self.get_stdout()
self.mock_stdout(False)
self.assertEqual(path, target_location)
self.assertFalse(os.path.exists(target_location))
self.assertTrue(re.match("^file written: .*/foo$", txt))
ft.download_file(fn, source_url, target_location, forced=True)
self.assertTrue(os.path.exists(target_location))
self.assertTrue(os.path.samefile(path, target_location))
def test_mkdir(self):
"""Test mkdir function."""
def check_mkdir(path, error=None, **kwargs):
"""Create specified directory with mkdir, and check for correctness."""
if error is None:
ft.mkdir(path, **kwargs)
self.assertTrue(os.path.exists(path) and os.path.isdir(path), "Directory %s exists" % path)
else:
self.assertErrorRegex(EasyBuildError, error, ft.mkdir, path, **kwargs)
foodir = os.path.join(self.test_prefix, 'foo')
barfoodir = os.path.join(self.test_prefix, 'bar', 'foo')
check_mkdir(foodir)
# no error on existing paths
check_mkdir(foodir)
# no recursion by defaults, requires parents=True
check_mkdir(barfoodir, error="Failed.*No such file or directory")
check_mkdir(barfoodir, parents=True)
check_mkdir(os.path.join(barfoodir, 'bar', 'foo', 'trolololol'), parents=True)
# group ID and sticky bits are disabled by default
self.assertFalse(os.stat(foodir).st_mode & (stat.S_ISGID | stat.S_ISVTX), "no gid/sticky bit %s" % foodir)
self.assertFalse(os.stat(barfoodir).st_mode & (stat.S_ISGID | stat.S_ISVTX), "no gid/sticky bit %s" % barfoodir)
# setting group ID bit works
giddir = os.path.join(foodir, 'gid')
check_mkdir(giddir, set_gid=True)
self.assertTrue(os.stat(giddir).st_mode & stat.S_ISGID, "gid bit set %s" % giddir)
self.assertFalse(os.stat(giddir).st_mode & stat.S_ISVTX, "no sticky bit %s" % giddir)
# setting stciky bit works
stickydir = os.path.join(barfoodir, 'sticky')
check_mkdir(stickydir, sticky=True)
self.assertFalse(os.stat(stickydir).st_mode & stat.S_ISGID, "no gid bit %s" % stickydir)
self.assertTrue(os.stat(stickydir).st_mode & stat.S_ISVTX, "sticky bit set %s" % stickydir)
# setting both works, bits are set for all new subdirectories
stickygiddirs = [os.path.join(foodir, 'new')]
stickygiddirs.append(os.path.join(stickygiddirs[-1], 'sticky'))
stickygiddirs.append(os.path.join(stickygiddirs[-1], 'and'))
stickygiddirs.append(os.path.join(stickygiddirs[-1], 'gid'))
check_mkdir(stickygiddirs[-1], parents=True, set_gid=True, sticky=True)
for subdir in stickygiddirs:
gid_or_sticky = stat.S_ISGID | stat.S_ISVTX
self.assertEqual(os.stat(subdir).st_mode & gid_or_sticky, gid_or_sticky, "gid bit set %s" % subdir)
# existing parent dirs are untouched, no sticky/group ID bits set
self.assertFalse(os.stat(foodir).st_mode & (stat.S_ISGID | stat.S_ISVTX), "no gid/sticky bit %s" % foodir)
self.assertFalse(os.stat(barfoodir).st_mode & (stat.S_ISGID | stat.S_ISVTX), "no gid/sticky bit %s" % barfoodir)
def test_path_matches(self):
"""Test path_matches function."""
# set up temporary directories
path1 = os.path.join(self.test_prefix, 'path1')
ft.mkdir(path1)
path2 = os.path.join(self.test_prefix, 'path2')
ft.mkdir(path1)
symlink = os.path.join(self.test_prefix, 'symlink')
os.symlink(path1, symlink)
missing = os.path.join(self.test_prefix, 'missing')
self.assertFalse(ft.path_matches(missing, [path1, path2]))
self.assertFalse(ft.path_matches(path1, [missing]))
self.assertFalse(ft.path_matches(path1, [missing, path2]))
self.assertFalse(ft.path_matches(path2, [missing, symlink]))
self.assertTrue(ft.path_matches(path1, [missing, symlink]))
def test_is_readable(self):
"""Test is_readable"""
test_file = os.path.join(self.test_prefix, 'test.txt')
self.assertFalse(ft.is_readable(test_file))
ft.write_file(test_file, 'test')
self.assertTrue(ft.is_readable(test_file))
os.chmod(test_file, 0)
self.assertFalse(ft.is_readable(test_file))
def test_symlink_resolve_path(self):
"""Test symlink and resolve_path function"""
# write_file and read_file tests are elsewhere. so not getting their states
test_dir = os.path.join(os.path.realpath(self.test_prefix), 'test')
ft.mkdir(test_dir)
link_dir = os.path.join(self.test_prefix, 'linkdir')
ft.symlink(test_dir, link_dir)
self.assertTrue(os.path.islink(link_dir))
self.assertTrue(os.path.exists(link_dir))
test_file = os.path.join(link_dir, 'test.txt')
ft.write_file(test_file, "test123")
# creating the link file
link = os.path.join(self.test_prefix, 'test.link')
ft.symlink(test_file, link)
# checking if file is symlink
self.assertTrue(os.path.islink(link))
self.assertTrue(os.path.exists(link_dir))
self.assertTrue(os.path.samefile(os.path.join(self.test_prefix, 'test', 'test.txt'), link))
# test resolve_path
self.assertEqual(test_dir, ft.resolve_path(link_dir))
self.assertEqual(os.path.join(os.path.realpath(self.test_prefix), 'test', 'test.txt'), ft.resolve_path(link))
self.assertEqual(ft.read_file(link), "test123")
self.assertErrorRegex(EasyBuildError, "Resolving path .* failed", ft.resolve_path, None)
def test_remove_symlinks(self):
"""Test remove valid and invalid symlinks"""
# creating test file
fp = os.path.join(self.test_prefix, 'test.txt')
txt = "test_my_link_file"
ft.write_file(fp, txt)
# creating the symlink
link = os.path.join(self.test_prefix, 'test.link')
ft.symlink(fp, link) # test if is symlink is valid is done elsewhere
# Attempting to remove a valid symlink
ft.remove_file(link)
self.assertFalse(os.path.islink(link))
self.assertFalse(os.path.exists(link))
# Testing the removal of invalid symlinks
# Restoring the symlink and removing the file, this way the symlink is invalid
ft.symlink(fp, link)
ft.remove_file(fp)
# attempting to remove the invalid symlink
ft.remove_file(link)
self.assertFalse(os.path.islink(link))
self.assertFalse(os.path.exists(link))
def test_read_write_file(self):
"""Test reading/writing files."""
fp = os.path.join(self.test_prefix, 'test.txt')
txt = "test123"
ft.write_file(fp, txt)
self.assertEqual(ft.read_file(fp), txt)
txt2 = '\n'.join(['test', '123'])
ft.write_file(fp, txt2, append=True)
self.assertEqual(ft.read_file(fp), txt+txt2)
# test backing up of existing file
ft.write_file(fp, 'foo', backup=True)
self.assertEqual(ft.read_file(fp), 'foo')
test_files = glob.glob(fp + '*')
self.assertEqual(len(test_files), 2)
backup1 = [x for x in test_files if os.path.basename(x) != 'test.txt'][0]
self.assertEqual(ft.read_file(backup1), txt + txt2)
ft.write_file(fp, 'bar', append=True, backup=True)
self.assertEqual(ft.read_file(fp), 'foobar')
test_files = glob.glob(fp + '*')
self.assertEqual(len(test_files), 3)
backup2 = [x for x in test_files if x != backup1 and os.path.basename(x) != 'test.txt'][0]
self.assertEqual(ft.read_file(backup1), txt + txt2)
self.assertEqual(ft.read_file(backup2), 'foo')
# also test behaviour of write_file under --dry-run
build_options = {
'extended_dry_run': True,
'silent': False,
}
init_config(build_options=build_options)
foo = os.path.join(self.test_prefix, 'foo.txt')
self.mock_stdout(True)
ft.write_file(foo, 'bar')
txt = self.get_stdout()
self.mock_stdout(False)
self.assertFalse(os.path.exists(foo))
self.assertTrue(re.match("^file written: .*/foo.txt$", txt))
ft.write_file(foo, 'bar', forced=True)
self.assertTrue(os.path.exists(foo))
self.assertEqual(ft.read_file(foo), 'bar')
def test_det_patched_files(self):
"""Test det_patched_files function."""
toy_patch_fn = 'toy-0.0_fix-silly-typo-in-printf-statement.patch'
pf = os.path.join(os.path.dirname(__file__), 'sandbox', 'sources', 'toy', toy_patch_fn)
self.assertEqual(ft.det_patched_files(pf), ['b/toy-0.0/toy.source'])
self.assertEqual(ft.det_patched_files(pf, omit_ab_prefix=True), ['toy-0.0/toy.source'])
def test_guess_patch_level(self):
"Test guess_patch_level."""
# create dummy toy.source file so guess_patch_level can work
f = open(os.path.join(self.test_buildpath, 'toy.source'), 'w')
f.write("This is toy.source")
f.close()
for patched_file, correct_patch_level in [
('toy.source', 0),
('b/toy.source', 1), # b/ prefix is used in +++ line in git diff patches
('a/toy.source', 1), # a/ prefix is used in --- line in git diff patches
('c/toy.source', 1),
('toy-0.0/toy.source', 1),
('b/toy-0.0/toy.source', 2),
]:
self.assertEqual(ft.guess_patch_level([patched_file], self.test_buildpath), correct_patch_level)
def test_back_up_file(self):
"""Test back_up_file function."""
fp = os.path.join(self.test_prefix, 'sandbox', 'test.txt')
txt = 'foobar'
ft.write_file(fp, txt)
known_files = ['test.txt']
self.assertEqual(sorted(os.listdir(os.path.dirname(fp))), known_files)
# Test simple file backup
res = ft.back_up_file(fp)
test_files = os.listdir(os.path.dirname(fp))
self.assertEqual(len(test_files), 2)
new_file = [x for x in test_files if x not in known_files][0]
self.assertTrue(os.path.samefile(res, os.path.join(self.test_prefix, 'sandbox', new_file)))
self.assertTrue(new_file.startswith('test.txt.bak_'))
first_normal_backup = os.path.join(os.path.dirname(fp), new_file)
known_files = os.listdir(os.path.dirname(fp))
self.assertEqual(ft.read_file(os.path.join(os.path.dirname(fp), new_file)), txt)
self.assertEqual(ft.read_file(fp), txt)
# Test hidden simple file backup
ft.back_up_file(fp, hidden=True)
test_files = os.listdir(os.path.dirname(fp))
self.assertEqual(len(test_files), 3)
new_file = [x for x in test_files if x not in known_files][0]
self.assertTrue(new_file.startswith('.test.txt.bak_'))
first_hidden_backup = os.path.join(os.path.dirname(fp), new_file)
known_files = os.listdir(os.path.dirname(fp))
self.assertEqual(ft.read_file(os.path.join(os.path.dirname(fp), new_file)), txt)
self.assertEqual(ft.read_file(fp), txt)
# Test simple file backup with empty extension
ft.back_up_file(fp, backup_extension='')
test_files = os.listdir(os.path.dirname(fp))
self.assertEqual(len(test_files), 4)
new_file = [x for x in test_files if x not in known_files][0]
self.assertTrue(new_file.startswith('test.txt_'))
first_normal_backup = os.path.join(os.path.dirname(fp), new_file)
known_files = os.listdir(os.path.dirname(fp))
self.assertEqual(ft.read_file(os.path.join(os.path.dirname(fp), new_file)), txt)
self.assertEqual(ft.read_file(fp), txt)
# Test hidden simple file backup
ft.back_up_file(fp, hidden=True, backup_extension=None)
test_files = os.listdir(os.path.dirname(fp))
self.assertEqual(len(test_files), 5)
new_file = [x for x in test_files if x not in known_files][0]
self.assertTrue(new_file.startswith('.test.txt_'))
first_hidden_backup = os.path.join(os.path.dirname(fp), new_file)
known_files = os.listdir(os.path.dirname(fp))
self.assertEqual(ft.read_file(os.path.join(os.path.dirname(fp), new_file)), txt)
self.assertEqual(ft.read_file(fp), txt)
# Test simple file backup with custom extension
ft.back_up_file(fp, backup_extension='foobar')
test_files = os.listdir(os.path.dirname(fp))
self.assertEqual(len(test_files), 6)
new_file = [x for x in test_files if x not in known_files][0]
self.assertTrue(new_file.startswith('test.txt.foobar_'))
first_bck_backup = os.path.join(os.path.dirname(fp), new_file)
known_files = os.listdir(os.path.dirname(fp))
self.assertEqual(ft.read_file(os.path.join(os.path.dirname(fp), new_file)), txt)
self.assertEqual(ft.read_file(fp), txt)
# Test hidden simple file backup with custom extension
ft.back_up_file(fp, backup_extension='bck', hidden=True)
test_files = os.listdir(os.path.dirname(fp))
self.assertEqual(len(test_files), 7)
new_file = [x for x in test_files if x not in known_files][0]
self.assertTrue(new_file.startswith('.test.txt.bck_'))
first_hidden_bck_backup = os.path.join(os.path.dirname(fp), new_file)
known_files = os.listdir(os.path.dirname(fp))
self.assertEqual(ft.read_file(os.path.join(os.path.dirname(fp), new_file)), txt)
self.assertEqual(ft.read_file(fp), txt)
new_txt = 'barfoo'
ft.write_file(fp, new_txt)
self.assertEqual(len(os.listdir(os.path.dirname(fp))), 7)
# Test file backup with existing backup
ft.back_up_file(fp)
test_files = os.listdir(os.path.dirname(fp))
self.assertEqual(len(test_files), 8)
new_file = [x for x in test_files if x not in known_files][0]
self.assertTrue(new_file.startswith('test.txt.bak_'))
known_files = os.listdir(os.path.dirname(fp))
self.assertTrue(ft.read_file(first_normal_backup), txt)
self.assertEqual(ft.read_file(os.path.join(os.path.dirname(fp), new_file)), new_txt)
self.assertEqual(ft.read_file(fp), new_txt)
# Test hidden file backup with existing backup
ft.back_up_file(fp, hidden=True, backup_extension=None)
test_files = os.listdir(os.path.dirname(fp))
self.assertEqual(len(test_files), 9)
new_file = [x for x in test_files if x not in known_files][0]
self.assertTrue(new_file.startswith('.test.txt_'))
known_files = os.listdir(os.path.dirname(fp))
self.assertTrue(ft.read_file(first_hidden_backup), txt)
self.assertEqual(ft.read_file(os.path.join(os.path.dirname(fp), new_file)), new_txt)
self.assertEqual(ft.read_file(fp), new_txt)
# Test file backup with extension and existing backup
ft.back_up_file(fp, backup_extension='bck')
test_files = os.listdir(os.path.dirname(fp))
self.assertEqual(len(test_files), 10)
new_file = [x for x in test_files if x not in known_files][0]
self.assertTrue(new_file.startswith('test.txt.bck_'))
known_files = os.listdir(os.path.dirname(fp))
self.assertTrue(ft.read_file(first_bck_backup), txt)
self.assertEqual(ft.read_file(os.path.join(os.path.dirname(fp), new_file)), new_txt)
self.assertEqual(ft.read_file(fp), new_txt)
# Test hidden file backup with extension and existing backup
ft.back_up_file(fp, backup_extension='foobar', hidden=True)
test_files = os.listdir(os.path.dirname(fp))
self.assertEqual(len(test_files), 11)
new_file = [x for x in test_files if x not in known_files][0]
self.assertTrue(new_file.startswith('.test.txt.foobar_'))
known_files = os.listdir(os.path.dirname(fp))
self.assertTrue(ft.read_file(first_hidden_bck_backup), txt)
self.assertEqual(ft.read_file(os.path.join(os.path.dirname(fp), new_file)), new_txt)
self.assertEqual(ft.read_file(fp), new_txt)
# check whether strip_fn works as expected
fp2 = fp + '.lua'
ft.copy_file(fp, fp2)
res = ft.back_up_file(fp2)
self.assertTrue(fp2.endswith('.lua'))
self.assertTrue('.lua' in os.path.basename(res))
res = ft.back_up_file(fp2, strip_fn='.lua')
self.assertFalse('.lua' in os.path.basename(res))
def test_move_logs(self):
"""Test move_logs function."""
fp = os.path.join(self.test_prefix, 'test.txt')
ft.write_file(fp, 'foobar')
ft.write_file(fp + '.1', 'moarfoobar')
ft.move_logs(fp, os.path.join(self.test_prefix, 'foo.log'))
self.assertEqual(ft.read_file(os.path.join(self.test_prefix, 'foo.log')), 'foobar')
self.assertEqual(ft.read_file(os.path.join(self.test_prefix, 'foo.log.1')), 'moarfoobar')
ft.write_file(os.path.join(self.test_prefix, 'bar.log'), 'bar')
ft.write_file(os.path.join(self.test_prefix, 'bar.log_1'), 'barbar')
fp = os.path.join(self.test_prefix, 'test2.txt')
ft.write_file(fp, 'moarbar')
ft.write_file(fp + '.1', 'evenmoarbar')
ft.move_logs(fp, os.path.join(self.test_prefix, 'bar.log'))
logs = sorted([f for f in os.listdir(self.test_prefix) if 'log' in f])
self.assertEqual(len(logs), 7)
self.assertEqual(len([x for x in logs if x.startswith('eb-test-')]), 1)
self.assertEqual(len([x for x in logs if x.startswith('foo')]), 2)
self.assertEqual(len([x for x in logs if x.startswith('bar')]), 4)
self.assertEqual(ft.read_file(os.path.join(self.test_prefix, 'bar.log_1')), 'barbar')
self.assertEqual(ft.read_file(os.path.join(self.test_prefix, 'bar.log')), 'moarbar')
self.assertEqual(ft.read_file(os.path.join(self.test_prefix, 'bar.log.1')), 'evenmoarbar')
# one more 'bar' log, the rotated copy of bar.log
other_bar = [x for x in logs if x.startswith('bar') and x not in ['bar.log', 'bar.log.1', 'bar.log_1']][0]
self.assertEqual(ft.read_file(os.path.join(self.test_prefix, other_bar)), 'bar')
def test_multidiff(self):
"""Test multidiff function."""
test_easyconfigs = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
other_toy_ecs = [
os.path.join(test_easyconfigs, 't', 'toy', 'toy-0.0-deps.eb'),
os.path.join(test_easyconfigs, 't', 'toy', 'toy-0.0-gompi-1.3.12-test.eb'),
]
# default (colored)
toy_ec = os.path.join(test_easyconfigs, 't', 'toy', 'toy-0.0.eb')
lines = multidiff(toy_ec, other_toy_ecs).split('\n')
expected = "Comparing \x1b[0;35mtoy-0.0.eb\x1b[0m with toy-0.0-deps.eb, toy-0.0-gompi-1.3.12-test.eb"
red = "\x1b[0;41m"
green = "\x1b[0;42m"
endcol = "\x1b[0m"
self.assertEqual(lines[0], expected)
self.assertEqual(lines[1], "=====")
# different versionsuffix
self.assertTrue(lines[2].startswith("3 %s- versionsuffix = '-test'%s (1/2) toy-0.0-" % (red, endcol)))
self.assertTrue(lines[3].startswith("3 %s- versionsuffix = '-deps'%s (1/2) toy-0.0-" % (red, endcol)))
# different toolchain in toy-0.0-gompi-1.3.12-test: '+' line (removed chars in toolchain name/version, in red)
expected = "7 %(endcol)s-%(endcol)s toolchain = {"
expected += "'name': '%(endcol)s%(red)sgo%(endcol)sm\x1b[0m%(red)spi%(endcol)s', "
expected = expected % {'endcol': endcol, 'green': green, 'red': red}
self.assertTrue(lines[7].startswith(expected))
# different toolchain in toy-0.0-gompi-1.3.12-test: '+' line (added chars in toolchain name/version, in green)
expected = "7 %(endcol)s+%(endcol)s toolchain = {"
expected += "'name': '%(endcol)s%(green)sdu%(endcol)sm\x1b[0m%(green)smy%(endcol)s', "
expected = expected % {'endcol': endcol, 'green': green, 'red': red}
self.assertTrue(lines[8].startswith(expected))
# no postinstallcmds in toy-0.0-deps.eb
expected = "29 %s+ postinstallcmds = " % green
self.assertTrue(any([line.startswith(expected) for line in lines]))
expected = "30 %s+%s (1/2) toy-0.0" % (green, endcol)
self.assertTrue(any(l.startswith(expected) for l in lines), "Found '%s' in: %s" % (expected, lines))
self.assertEqual(lines[-1], "=====")
lines = multidiff(toy_ec, other_toy_ecs, colored=False).split('\n')
self.assertEqual(lines[0], "Comparing toy-0.0.eb with toy-0.0-deps.eb, toy-0.0-gompi-1.3.12-test.eb")
self.assertEqual(lines[1], "=====")
# different versionsuffix
self.assertTrue(lines[2].startswith("3 - versionsuffix = '-test' (1/2) toy-0.0-"))
self.assertTrue(lines[3].startswith("3 - versionsuffix = '-deps' (1/2) toy-0.0-"))
# different toolchain in toy-0.0-gompi-1.3.12-test: '+' line with squigly line underneath to mark removed chars
expected = "7 - toolchain = {'name': 'gompi', 'version': '1.3.12'} (1/2) toy"
self.assertTrue(lines[7].startswith(expected))
expected = " ? ^^ ^^ "
self.assertTrue(lines[8].startswith(expected))
# different toolchain in toy-0.0-gompi-1.3.12-test: '-' line with squigly line underneath to mark added chars
expected = "7 + toolchain = {'name': 'dummy', 'version': 'dummy'} (1/2) toy"
self.assertTrue(lines[9].startswith(expected))
expected = " ? ^^ ^^ "
self.assertTrue(lines[10].startswith(expected))
# no postinstallcmds in toy-0.0-deps.eb
expected = "29 + postinstallcmds = "
self.assertTrue(any(l.startswith(expected) for l in lines), "Found '%s' in: %s" % (expected, lines))
expected = "30 + (1/2) toy-0.0-"
self.assertTrue(any(l.startswith(expected) for l in lines), "Found '%s' in: %s" % (expected, lines))
self.assertEqual(lines[-1], "=====")
def test_weld_paths(self):
"""Test weld_paths."""
# works like os.path.join is there's no overlap
self.assertEqual(ft.weld_paths('/foo/bar', 'foobar/baz'), '/foo/bar/foobar/baz/')
self.assertEqual(ft.weld_paths('foo', 'bar/'), 'foo/bar/')
self.assertEqual(ft.weld_paths('foo/', '/bar'), '/bar/')
self.assertEqual(ft.weld_paths('/foo/', '/bar'), '/bar/')
# overlap is taken into account
self.assertEqual(ft.weld_paths('foo/bar', 'bar/baz'), 'foo/bar/baz/')
self.assertEqual(ft.weld_paths('foo/bar/baz', 'bar/baz'), 'foo/bar/baz/')
self.assertEqual(ft.weld_paths('foo/bar', 'foo/bar/baz'), 'foo/bar/baz/')
self.assertEqual(ft.weld_paths('foo/bar', 'foo/bar'), 'foo/bar/')
self.assertEqual(ft.weld_paths('/foo/bar', 'foo/bar'), '/foo/bar/')
self.assertEqual(ft.weld_paths('/foo/bar', '/foo/bar'), '/foo/bar/')
self.assertEqual(ft.weld_paths('/foo', '/foo/bar/baz'), '/foo/bar/baz/')
def test_expand_glob_paths(self):
"""Test expand_glob_paths function."""
for dirname in ['empty_dir', 'test_dir']:
ft.mkdir(os.path.join(self.test_prefix, dirname), parents=True)
for filename in ['file1.txt', 'test_dir/file2.txt', 'test_dir/file3.txt', 'test_dir2/file4.dat']:
ft.write_file(os.path.join(self.test_prefix, filename), 'gibberish')
globs = [os.path.join(self.test_prefix, '*.txt'), os.path.join(self.test_prefix, '*', '*')]
expected = [
os.path.join(self.test_prefix, 'file1.txt'),
os.path.join(self.test_prefix, 'test_dir', 'file2.txt'),
os.path.join(self.test_prefix, 'test_dir', 'file3.txt'),
os.path.join(self.test_prefix, 'test_dir2', 'file4.dat'),
]
self.assertEqual(sorted(ft.expand_glob_paths(globs)), sorted(expected))
# passing non-glob patterns is fine too
file2 = os.path.join(self.test_prefix, 'test_dir', 'file2.txt')
self.assertEqual(ft.expand_glob_paths([file2]), [file2])
# test expanding of '~' into $HOME value
# hard overwrite $HOME in environment (used by os.path.expanduser) so we can reliably test this
new_home = os.path.join(self.test_prefix, 'home')
ft.mkdir(new_home, parents=True)
ft.write_file(os.path.join(new_home, 'test.txt'), 'test')
os.environ['HOME'] = new_home
self.assertEqual(ft.expand_glob_paths(['~/*.txt']), [os.path.join(new_home, 'test.txt')])
# check behaviour if glob that has no (file) matches is passed
glob_pat = os.path.join(self.test_prefix, 'test_*')
self.assertErrorRegex(EasyBuildError, "No files found using glob pattern", ft.expand_glob_paths, [glob_pat])
def test_adjust_permissions(self):
"""Test adjust_permissions"""
# set umask hard to run test reliably
orig_umask = os.umask(0022)
# prep files/dirs/(broken) symlinks is test dir
# file: rw-r--r--
ft.write_file(os.path.join(self.test_prefix, 'foo'), 'foo')
foo_perms = os.stat(os.path.join(self.test_prefix, 'foo'))[stat.ST_MODE]
for bit in [stat.S_IRUSR, stat.S_IWUSR, stat.S_IRGRP, stat.S_IROTH]:
self.assertTrue(foo_perms & bit)
for bit in [stat.S_IXUSR, stat.S_IWGRP, stat.S_IXGRP, stat.S_IWOTH, stat.S_IXOTH]:
self.assertFalse(foo_perms & bit)
# dir: rwxr-xr-x
ft.mkdir(os.path.join(self.test_prefix, 'bar'))
bar_perms = os.stat(os.path.join(self.test_prefix, 'bar'))[stat.ST_MODE]
for bit in [stat.S_IRUSR, stat.S_IWUSR, stat.S_IXUSR, stat.S_IRGRP, stat.S_IXGRP, stat.S_IROTH, stat.S_IXOTH]:
self.assertTrue(bar_perms & bit)
for bit in [stat.S_IWGRP, stat.S_IWOTH]:
self.assertFalse(bar_perms & bit)
# file in dir: rw-r--r--
foobar_path = os.path.join(self.test_prefix, 'bar', 'foobar')
ft.write_file(foobar_path, 'foobar')
foobar_perms = os.stat(foobar_path)[stat.ST_MODE]
for bit in [stat.S_IRUSR, stat.S_IWUSR, stat.S_IRGRP, stat.S_IROTH]:
self.assertTrue(foobar_perms & bit)
for bit in [stat.S_IXUSR, stat.S_IWGRP, stat.S_IXGRP, stat.S_IWOTH, stat.S_IXOTH]:
self.assertFalse(foobar_perms & bit)
# include symlink
os.symlink(foobar_path, os.path.join(self.test_prefix, 'foobar_symlink'))
# include broken symlink (symlinks are skipped, so this shouldn't cause problems)
tmpfile = os.path.join(self.test_prefix, 'thiswontbetherelong')
ft.write_file(tmpfile, 'poof!')
os.symlink(tmpfile, os.path.join(self.test_prefix, 'broken_symlink'))
os.remove(tmpfile)
# test default behaviour:
# recursive, add permissions, relative to existing permissions, both files and dirs, skip symlinks
# add user execution, group write permissions
ft.adjust_permissions(self.test_prefix, stat.S_IXUSR|stat.S_IWGRP)
# foo file: rwxrw-r--
foo_perms = os.stat(os.path.join(self.test_prefix, 'foo'))[stat.ST_MODE]
for bit in [stat.S_IRUSR, stat.S_IWUSR, stat.S_IXUSR, stat.S_IRGRP, stat.S_IWGRP, stat.S_IROTH]:
self.assertTrue(foo_perms & bit)
for bit in [stat.S_IXGRP, stat.S_IWOTH, stat.S_IXOTH]:
self.assertFalse(foo_perms & bit)
# bar dir: rwxrwxr-x
bar_perms = os.stat(os.path.join(self.test_prefix, 'bar'))[stat.ST_MODE]
for bit in [stat.S_IRUSR, stat.S_IWUSR, stat.S_IXUSR, stat.S_IRGRP, stat.S_IWGRP, stat.S_IXGRP,
stat.S_IROTH, stat.S_IXOTH]:
self.assertTrue(bar_perms & bit)
self.assertFalse(bar_perms & stat.S_IWOTH)
# foo/foobar file: rwxrw-r--
for path in [os.path.join(self.test_prefix, 'bar', 'foobar'), os.path.join(self.test_prefix, 'foobar_symlink')]:
perms = os.stat(path)[stat.ST_MODE]
for bit in [stat.S_IRUSR, stat.S_IWUSR, stat.S_IXUSR, stat.S_IRGRP, stat.S_IWGRP, stat.S_IROTH]:
self.assertTrue(perms & bit)
for bit in [stat.S_IXGRP, stat.S_IWOTH, stat.S_IXOTH]:
self.assertFalse(perms & bit)
# broken symlinks are trouble if symlinks are not skipped
self.assertErrorRegex(EasyBuildError, "No such file or directory", ft.adjust_permissions, self.test_prefix,
stat.S_IXUSR, skip_symlinks=False)
# restore original umask
os.umask(orig_umask)
def test_adjust_permissions_max_fail_ratio(self):
"""Test ratio of allowed failures when adjusting permissions"""
# set up symlinks in test directory that can be broken to test allowed failure ratio of adjust_permissions
testdir = os.path.join(self.test_prefix, 'test123')
test_files = []
for idx in range(0, 3):
test_files.append(os.path.join(testdir, 'tmp%s' % idx))
ft.write_file(test_files[-1], '')
ft.symlink(test_files[-1], os.path.join(testdir, 'symlink%s' % idx))
# by default, 50% of failures are allowed (to be robust against broken symlinks)
perms = stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR
# one file remove, 1 dir + 2 files + 3 symlinks (of which 1 broken) left => 1/6 (16%) fail ratio is OK
ft.remove_file(test_files[0])
ft.adjust_permissions(testdir, perms, recursive=True, skip_symlinks=False, ignore_errors=True)
# 2 files removed, 1 dir + 1 file + 3 symlinks (of which 2 broken) left => 2/5 (40%) fail ratio is OK
ft.remove_file(test_files[1])
ft.adjust_permissions(testdir, perms, recursive=True, skip_symlinks=False, ignore_errors=True)
# 3 files removed, 1 dir + 3 broken symlinks => 75% fail ratio is too high, so error is raised
ft.remove_file(test_files[2])
error_pattern = r"75.00% of permissions/owner operations failed \(more than 50.00%\), something must be wrong"
self.assertErrorRegex(EasyBuildError, error_pattern, ft.adjust_permissions, testdir, perms,
recursive=True, skip_symlinks=False, ignore_errors=True)
# reconfigure EasyBuild to allow even higher fail ratio (80%)
build_options = {
'max_fail_ratio_adjust_permissions': 0.8,
}
init_config(build_options=build_options)
# 75% < 80%, so OK
ft.adjust_permissions(testdir, perms, recursive=True, skip_symlinks=False, ignore_errors=True)
# reconfigure to allow less failures (10%)
build_options = {
'max_fail_ratio_adjust_permissions': 0.1,
}
init_config(build_options=build_options)
# way too many failures with 3 broken symlinks
error_pattern = r"75.00% of permissions/owner operations failed \(more than 10.00%\), something must be wrong"
self.assertErrorRegex(EasyBuildError, error_pattern, ft.adjust_permissions, testdir, perms,
recursive=True, skip_symlinks=False, ignore_errors=True)
# one broken symlink is still too much with max fail ratio of 10%
ft.write_file(test_files[0], '')
ft.write_file(test_files[1], '')
error_pattern = r"16.67% of permissions/owner operations failed \(more than 10.00%\), something must be wrong"
self.assertErrorRegex(EasyBuildError, error_pattern, ft.adjust_permissions, testdir, perms,
recursive=True, skip_symlinks=False, ignore_errors=True)
# all files restored, no more broken symlinks, so OK
ft.write_file(test_files[2], '')
ft.adjust_permissions(testdir, perms, recursive=True, skip_symlinks=False, ignore_errors=True)
def test_apply_regex_substitutions(self):
"""Test apply_regex_substitutions function."""
testfile = os.path.join(self.test_prefix, 'test.txt')
testtxt = '\n'.join([
"CC = gcc",
"CFLAGS = -O3 -g",
"FC = gfortran",
"FFLAGS = -O3 -g -ffixed-form",
])
ft.write_file(testfile, testtxt)
regex_subs = [
(r"^(CC)\s*=\s*.*$", r"\1 = ${CC}"),
(r"^(FC\s*=\s*).*$", r"\1${FC}"),
(r"^(.FLAGS)\s*=\s*-O3\s-g(.*)$", r"\1 = -O2\2"),
]
ft.apply_regex_substitutions(testfile, regex_subs)
expected_testtxt = '\n'.join([
"CC = ${CC}",
"CFLAGS = -O2",
"FC = ${FC}",
"FFLAGS = -O2 -ffixed-form",
])
new_testtxt = ft.read_file(testfile)
self.assertEqual(new_testtxt, expected_testtxt)
# passing empty list of substitions is a no-op
ft.write_file(testfile, testtxt)
ft.apply_regex_substitutions(testfile, [])
new_testtxt = ft.read_file(testfile)
self.assertEqual(new_testtxt, testtxt)
# clean error on non-existing file
error_pat = "Failed to patch .*/nosuchfile.txt: .*No such file or directory"
path = os.path.join(self.test_prefix, 'nosuchfile.txt')
self.assertErrorRegex(EasyBuildError, error_pat, ft.apply_regex_substitutions, path, regex_subs)
def test_find_flexlm_license(self):
"""Test find_flexlm_license function."""
lic_file1 = os.path.join(self.test_prefix, 'one.lic')
ft.write_file(lic_file1, "This is a license file (no, really!)")
lic_file2 = os.path.join(self.test_prefix, 'two.dat')
ft.write_file(lic_file2, "This is another license file (sure it is!)")
lic_server = '[email protected]'
# make test robust against environment in which $LM_LICENSE_FILE is defined
if 'LM_LICENSE_FILE' in os.environ:
del os.environ['LM_LICENSE_FILE']
# default return value
self.assertEqual(ft.find_flexlm_license(), ([], None))
# provided license spec
self.assertEqual(ft.find_flexlm_license(lic_specs=[lic_file1]), ([lic_file1], None))
self.assertEqual(ft.find_flexlm_license(lic_specs=[lic_server, lic_file2]), ([lic_server, lic_file2], None))
# non-existing license file
os.environ['LM_LICENSE_FILE'] = '/no/such/file/unless/you/aim/to/break/this/check'
self.assertEqual(ft.find_flexlm_license(), ([], None))
# existing license file
os.environ['LM_LICENSE_FILE'] = lic_file2
self.assertEqual(ft.find_flexlm_license(), ([lic_file2], 'LM_LICENSE_FILE'))
# directory with existing license files
os.environ['LM_LICENSE_FILE'] = self.test_prefix
self.assertEqual(ft.find_flexlm_license(), ([lic_file1, lic_file2], 'LM_LICENSE_FILE'))
# server spec
os.environ['LM_LICENSE_FILE'] = lic_server
self.assertEqual(ft.find_flexlm_license(), ([lic_server], 'LM_LICENSE_FILE'))
# duplicates are filtered out, order is maintained
os.environ['LM_LICENSE_FILE'] = ':'.join([lic_file1, lic_server, self.test_prefix, lic_file2, lic_file1])
self.assertEqual(ft.find_flexlm_license(), ([lic_file1, lic_server, lic_file2], 'LM_LICENSE_FILE'))
# invalid server spec (missing port)
os.environ['LM_LICENSE_FILE'] = 'test.license.server'
self.assertEqual(ft.find_flexlm_license(), ([], None))
# env var wins of provided lic spec
os.environ['LM_LICENSE_FILE'] = lic_file2
self.assertEqual(ft.find_flexlm_license(lic_specs=[lic_server]), ([lic_file2], 'LM_LICENSE_FILE'))
# custom env var wins over $LM_LICENSE_FILE
os.environ['INTEL_LICENSE_FILE'] = lic_file1
expected = ([lic_file1], 'INTEL_LICENSE_FILE')
self.assertEqual(ft.find_flexlm_license(custom_env_vars='INTEL_LICENSE_FILE'), expected)
self.assertEqual(ft.find_flexlm_license(custom_env_vars=['INTEL_LICENSE_FILE']), expected)
self.assertEqual(ft.find_flexlm_license(custom_env_vars=['NOSUCHENVVAR', 'INTEL_LICENSE_FILE']), expected)
# $LM_LICENSE_FILE is always considered
os.environ['LM_LICENSE_FILE'] = lic_server
os.environ['INTEL_LICENSE_FILE'] = '/no/such/file/unless/you/aim/to/break/this/check'
expected = ([lic_server], 'LM_LICENSE_FILE')
self.assertEqual(ft.find_flexlm_license(custom_env_vars=['INTEL_LICENSE_FILE']), expected)
# license server *and* file spec; order is preserved
os.environ['LM_LICENSE_FILE'] = ':'.join([lic_file2, lic_server, lic_file1])
self.assertEqual(ft.find_flexlm_license(), ([lic_file2, lic_server, lic_file1], 'LM_LICENSE_FILE'))
# typical usage
os.environ['LM_LICENSE_FILE'] = lic_server
os.environ['INTEL_LICENSE_FILE'] = '/not/a/valid/license/path:%s:/another/bogus/license/file' % lic_file2
expected = ([lic_file2], 'INTEL_LICENSE_FILE')
self.assertEqual(ft.find_flexlm_license(custom_env_vars='INTEL_LICENSE_FILE'), expected)
os.environ['INTEL_LICENSE_FILE'] = '[email protected]:[email protected]:[email protected]'
expected = (['[email protected]', '[email protected]', '[email protected]'], 'INTEL_LICENSE_FILE')
self.assertEqual(ft.find_flexlm_license(custom_env_vars=['INTEL_LICENSE_FILE']), expected)
# make sure find_flexlm_license is robust against None input;
# this occurs if license_file is left unspecified
del os.environ['INTEL_LICENSE_FILE']
del os.environ['LM_LICENSE_FILE']
self.assertEqual(ft.find_flexlm_license(lic_specs=[None]), ([], None))
def test_is_patch_file(self):
"""Test for is_patch_file() function."""
testdir = os.path.dirname(os.path.abspath(__file__))
self.assertFalse(ft.is_patch_file(os.path.join(testdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')))
toy_patch_fn = 'toy-0.0_fix-silly-typo-in-printf-statement.patch'
self.assertTrue(ft.is_patch_file(os.path.join(testdir, 'sandbox', 'sources', 'toy', toy_patch_fn)))
def test_is_alt_pypi_url(self):
"""Test is_alt_pypi_url() function."""
url = 'https://pypi.python.org/packages/source/e/easybuild/easybuild-2.7.0.tar.gz'
self.assertFalse(ft.is_alt_pypi_url(url))
url = url.replace('source/e/easybuild', '5b/03/e135b19fadeb9b1ccb45eac9f60ca2dc3afe72d099f6bd84e03cb131f9bf')
self.assertTrue(ft.is_alt_pypi_url(url))
def test_pypi_source_urls(self):
"""Test pypi_source_urls() function."""
res = ft.pypi_source_urls('easybuild')
eb340_url = 'https://pypi.python.org/packages/'
eb340_url += '93/41/574d01f352671fbc8589a436167e15a7f3e27ac0aa635d208eb29ee8fd4e/'
eb340_url += 'easybuild-3.4.0.tar.gz#md5=267a056a77a8f77fccfbf56354364045'
self.assertTrue(eb340_url, res)
pattern = '^https://pypi.python.org/packages/[a-f0-9]{2}/[a-f0-9]{2}/[a-f0-9]{60}/'
pattern_md5 = pattern + 'easybuild-[0-9rc.]+.tar.gz#md5=[a-f0-9]{32}$'
pattern_sha256 = pattern + 'easybuild-[0-9rc.]+.tar.gz#sha256=[a-f0-9]{64}$'
regex_md5 = re.compile(pattern_md5)
regex_sha256 = re.compile(pattern_sha256)
for url in res:
error_msg = "Pattern '%s' or '%s' matches for '%s'" % (regex_md5.pattern, regex_sha256.pattern, url)
self.assertTrue(regex_md5.match(url) or regex_sha256.match(url), error_msg)
# more than 50 releases at time of writing test, which always stay there
self.assertTrue(len(res) > 50)
def test_derive_alt_pypi_url(self):
"""Test derive_alt_pypi_url() function."""
url = 'https://pypi.python.org/packages/source/e/easybuild/easybuild-2.7.0.tar.gz'
alturl = url.replace('source/e/easybuild', '5b/03/e135b19fadeb9b1ccb45eac9f60ca2dc3afe72d099f6bd84e03cb131f9bf')
self.assertEqual(ft.derive_alt_pypi_url(url), alturl)
# test case to ensure that '.' characters in filename are escaped using '\.'
# if not, the alternative URL for tornado-4.5b1.tar.gz is found...
url = 'https://pypi.python.org/packages/source/t/tornado/tornado-4.5.1.tar.gz'
alturl = url.replace('source/t/tornado', 'df/42/a180ee540e12e2ec1007ac82a42b09dd92e5461e09c98bf465e98646d187')
self.assertEqual(ft.derive_alt_pypi_url(url), alturl)
# no crash on non-existing version
url = 'https://pypi.python.org/packages/source/e/easybuild/easybuild-0.0.0.tar.gz'
self.assertEqual(ft.derive_alt_pypi_url(url), None)
# no crash on non-existing package
url = 'https://pypi.python.org/packages/source/n/nosuchpackageonpypiever/nosuchpackageonpypiever-0.0.0.tar.gz'
self.assertEqual(ft.derive_alt_pypi_url(url), None)
def test_apply_patch(self):
""" Test apply_patch """
testdir = os.path.dirname(os.path.abspath(__file__))
tmpdir = self.test_prefix
path = ft.extract_file(os.path.join(testdir, 'sandbox', 'sources', 'toy', 'toy-0.0.tar.gz'), tmpdir)
toy_patch_fn = 'toy-0.0_fix-silly-typo-in-printf-statement.patch'
toy_patch = os.path.join(testdir, 'sandbox', 'sources', 'toy', toy_patch_fn)
self.assertTrue(ft.apply_patch(toy_patch, path))
patched = ft.read_file(os.path.join(path, 'toy-0.0', 'toy.source'))
pattern = "I'm a toy, and very proud of it"
self.assertTrue(pattern in patched)
# This patch is dependent on the previous one
toy_patch_gz = os.path.join(testdir, 'sandbox', 'sources', 'toy', 'toy-0.0_gzip.patch.gz')
self.assertTrue(ft.apply_patch(toy_patch_gz, path))
patched_gz = ft.read_file(os.path.join(path, 'toy-0.0', 'toy.source'))
pattern = "I'm a toy, and very very proud of it"
self.assertTrue(pattern in patched_gz)
# trying the patch again should fail
self.assertErrorRegex(EasyBuildError, "Couldn't apply patch file", ft.apply_patch, toy_patch, path)
def test_copy_file(self):
""" Test copy_file """
testdir = os.path.dirname(os.path.abspath(__file__))
to_copy = os.path.join(testdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
target_path = os.path.join(self.test_prefix, 'toy.eb')
ft.copy_file(to_copy, target_path)
self.assertTrue(os.path.exists(target_path))
self.assertTrue(ft.read_file(to_copy) == ft.read_file(target_path))
# clean error when trying to copy a directory with copy_file
src, target = os.path.dirname(to_copy), os.path.join(self.test_prefix, 'toy')
self.assertErrorRegex(EasyBuildError, "Failed to copy file.*Is a directory", ft.copy_file, src, target)
# also test behaviour of copy_file under --dry-run
build_options = {
'extended_dry_run': True,
'silent': False,
}
init_config(build_options=build_options)
# remove target file, it shouldn't get copied under dry run
os.remove(target_path)
self.mock_stdout(True)
ft.copy_file(to_copy, target_path)
txt = self.get_stdout()
self.mock_stdout(False)
self.assertFalse(os.path.exists(target_path))
self.assertTrue(re.search("^copied file .*/toy-0.0.eb to .*/toy.eb", txt))
# forced copy, even in dry run mode
self.mock_stdout(True)
ft.copy_file(to_copy, target_path, force_in_dry_run=True)
txt = self.get_stdout()
self.mock_stdout(False)
self.assertTrue(os.path.exists(target_path))
self.assertTrue(ft.read_file(to_copy) == ft.read_file(target_path))
self.assertEqual(txt, '')
def test_copy_dir(self):
"""Test copy_file"""
testdir = os.path.dirname(os.path.abspath(__file__))
to_copy = os.path.join(testdir, 'easyconfigs', 'test_ecs', 'g', 'GCC')
target_dir = os.path.join(self.test_prefix, 'GCC')
self.assertFalse(os.path.exists(target_dir))
self.assertTrue(os.path.exists(os.path.join(to_copy, 'GCC-4.7.2.eb')))
ft.copy_dir(to_copy, target_dir, ignore=lambda src, names: [x for x in names if '4.7.2' in x])
self.assertTrue(os.path.exists(target_dir))
expected = ['GCC-4.6.3.eb', 'GCC-4.6.4.eb', 'GCC-4.8.2.eb', 'GCC-4.8.3.eb', 'GCC-4.9.2.eb', 'GCC-4.9.3-2.25.eb']
self.assertEqual(sorted(os.listdir(target_dir)), expected)
# GCC-4.7.2.eb should not get copied, since it's specified as file too ignore
self.assertFalse(os.path.exists(os.path.join(target_dir, 'GCC-4.7.2.eb')))
# clean error when trying to copy a file with copy_dir
src, target = os.path.join(to_copy, 'GCC-4.6.3.eb'), os.path.join(self.test_prefix, 'GCC-4.6.3.eb')
self.assertErrorRegex(EasyBuildError, "Failed to copy directory.*Not a directory", ft.copy_dir, src, target)
# if directory already exists, we expect a clean error
testdir = os.path.join(self.test_prefix, 'thisdirexists')
ft.mkdir(testdir)
self.assertErrorRegex(EasyBuildError, "Target location .* already exists", ft.copy_dir, to_copy, testdir)
# also test behaviour of copy_file under --dry-run
build_options = {
'extended_dry_run': True,
'silent': False,
}
init_config(build_options=build_options)
shutil.rmtree(target_dir)
self.assertFalse(os.path.exists(target_dir))
# no actual copying in dry run mode, unless forced
self.mock_stdout(True)
ft.copy_dir(to_copy, target_dir)
txt = self.get_stdout()
self.mock_stdout(False)
self.assertFalse(os.path.exists(target_dir))
self.assertTrue(re.search("^copied directory .*/GCC to .*/GCC", txt))
# forced copy, even in dry run mode
self.mock_stdout(True)
ft.copy_dir(to_copy, target_dir, force_in_dry_run=True)
txt = self.get_stdout()
self.mock_stdout(False)
self.assertTrue(os.path.exists(target_dir))
self.assertTrue(sorted(os.listdir(to_copy)) == sorted(os.listdir(target_dir)))
self.assertEqual(txt, '')
def test_copy(self):
"""Test copy function."""
testdir = os.path.dirname(os.path.abspath(__file__))
toy_file = os.path.join(testdir, 'easyconfigs', 'test_ecs', 't', 'toy', 'toy-0.0.eb')
toy_patch_fn = 'toy-0.0_fix-silly-typo-in-printf-statement.patch'
toy_patch = os.path.join(testdir, 'sandbox', 'sources', 'toy', toy_patch_fn)
gcc_dir = os.path.join(testdir, 'easyconfigs', 'test_ecs', 'g', 'GCC')
ft.copy([toy_file, gcc_dir, toy_patch], self.test_prefix)
self.assertTrue(os.path.isdir(os.path.join(self.test_prefix, 'GCC')))
for filepath in ['GCC/GCC-4.6.3.eb', 'GCC/GCC-4.9.2.eb', 'toy-0.0.eb', toy_patch_fn]:
self.assertTrue(os.path.isfile(os.path.join(self.test_prefix, filepath)))
# test copying of a single file, to a non-existing directory
ft.copy(toy_file, os.path.join(self.test_prefix, 'foo'))
self.assertTrue(os.path.isfile(os.path.join(self.test_prefix, 'foo', 'toy-0.0.eb')))
# also test behaviour of copy under --dry-run
build_options = {
'extended_dry_run': True,
'silent': False,
}
init_config(build_options=build_options)
# no actual copying in dry run mode, unless forced
self.mock_stdout(True)
to_copy = [os.path.dirname(toy_file), os.path.join(gcc_dir, 'GCC-4.6.3.eb')]
ft.copy(to_copy, self.test_prefix)
txt = self.get_stdout()
self.mock_stdout(False)
self.assertFalse(os.path.exists(os.path.join(self.test_prefix, 'toy')))
self.assertFalse(os.path.exists(os.path.join(self.test_prefix, 'GCC-4.6.3.eb')))
self.assertTrue(re.search("^copied directory .*/toy to .*/toy", txt, re.M))
self.assertTrue(re.search("^copied file .*/GCC-4.6.3.eb to .*/GCC-4.6.3.eb", txt, re.M))
# forced copy, even in dry run mode
self.mock_stdout(True)
ft.copy(to_copy, self.test_prefix, force_in_dry_run=True)
txt = self.get_stdout()
self.mock_stdout(False)
self.assertTrue(os.path.isdir(os.path.join(self.test_prefix, 'toy')))
self.assertTrue(os.path.isfile(os.path.join(self.test_prefix, 'toy', 'toy-0.0.eb')))
self.assertTrue(os.path.isfile(os.path.join(self.test_prefix, 'GCC-4.6.3.eb')))
self.assertEqual(txt, '')
def test_change_dir(self):
"""Test change_dir"""
prev_dir = ft.change_dir(self.test_prefix)
self.assertTrue(os.path.samefile(os.getcwd(), self.test_prefix))
self.assertNotEqual(prev_dir, None)
# prepare another directory to play around with
test_path = os.path.join(self.test_prefix, 'anotherdir')
ft.mkdir(test_path)
# check return value (previous location)
prev_dir = ft.change_dir(test_path)
self.assertTrue(os.path.samefile(os.getcwd(), test_path))
self.assertTrue(os.path.samefile(prev_dir, self.test_prefix))
# check behaviour when current working directory does not exist anymore
shutil.rmtree(test_path)
prev_dir = ft.change_dir(self.test_prefix)
self.assertTrue(os.path.samefile(os.getcwd(), self.test_prefix))
self.assertEqual(prev_dir, None)
foo = os.path.join(self.test_prefix, 'foo')
self.assertErrorRegex(EasyBuildError, "Failed to change from .* to %s" % foo, ft.change_dir, foo)
def test_extract_file(self):
"""Test extract_file"""
testdir = os.path.dirname(os.path.abspath(__file__))
toy_tarball = os.path.join(testdir, 'sandbox', 'sources', 'toy', 'toy-0.0.tar.gz')
self.assertFalse(os.path.exists(os.path.join(self.test_prefix, 'toy-0.0', 'toy.source')))
path = ft.extract_file(toy_tarball, self.test_prefix)
self.assertTrue(os.path.exists(os.path.join(self.test_prefix, 'toy-0.0', 'toy.source')))
self.assertTrue(os.path.samefile(path, self.test_prefix))
shutil.rmtree(os.path.join(path, 'toy-0.0'))
toy_tarball_renamed = os.path.join(self.test_prefix, 'toy_tarball')
shutil.copyfile(toy_tarball, toy_tarball_renamed)
path = ft.extract_file(toy_tarball_renamed, self.test_prefix, cmd="tar xfvz %s")
self.assertTrue(os.path.exists(os.path.join(self.test_prefix, 'toy-0.0', 'toy.source')))
self.assertTrue(os.path.samefile(path, self.test_prefix))
shutil.rmtree(os.path.join(path, 'toy-0.0'))
# also test behaviour of extract_file under --dry-run
build_options = {
'extended_dry_run': True,
'silent': False,
}
init_config(build_options=build_options)
self.mock_stdout(True)
path = ft.extract_file(toy_tarball, self.test_prefix)
txt = self.get_stdout()
self.mock_stdout(False)
self.assertTrue(os.path.samefile(path, self.test_prefix))
self.assertFalse(os.path.exists(os.path.join(self.test_prefix, 'toy-0.0')))
self.assertTrue(re.search('running command "tar xzf .*/toy-0.0.tar.gz"', txt))
path = ft.extract_file(toy_tarball, self.test_prefix, forced=True)
self.assertTrue(os.path.exists(os.path.join(self.test_prefix, 'toy-0.0', 'toy.source')))
self.assertTrue(os.path.samefile(path, self.test_prefix))
def test_remove_file(self):
"""Test remove_file"""
testfile = os.path.join(self.test_prefix, 'foo')
ft.write_file(testfile, 'bar')
self.assertTrue(os.path.exists(testfile))
ft.remove_file(testfile)
ft.write_file(testfile, 'bar')
ft.adjust_permissions(self.test_prefix, stat.S_IWUSR|stat.S_IWGRP|stat.S_IWOTH, add=False)
self.assertErrorRegex(EasyBuildError, "Failed to remove", ft.remove_file, testfile)
# also test behaviour of remove_file under --dry-run
build_options = {
'extended_dry_run': True,
'silent': False,
}
init_config(build_options=build_options)
self.mock_stdout(True)
ft.remove_file(testfile)
txt = self.get_stdout()
self.mock_stdout(False)
regex = re.compile("^file [^ ]* removed$")
self.assertTrue(regex.match(txt), "Pattern '%s' found in: %s" % (regex.pattern, txt))
def test_search_file(self):
"""Test search_file function."""
test_ecs = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'test_ecs')
# check for default semantics, test case-insensitivity
var_defs, hits = ft.search_file([test_ecs], 'HWLOC', silent=True)
self.assertEqual(var_defs, [])
self.assertEqual(len(hits), 2)
self.assertTrue(all(os.path.exists(p) for p in hits))
self.assertTrue(hits[0].endswith('/hwloc-1.6.2-GCC-4.6.4.eb'))
self.assertTrue(hits[1].endswith('/hwloc-1.6.2-GCC-4.7.2.eb'))
# check filename-only mode
var_defs, hits = ft.search_file([test_ecs], 'HWLOC', silent=True, filename_only=True)
self.assertEqual(var_defs, [])
self.assertEqual(hits, ['hwloc-1.6.2-GCC-4.6.4.eb', 'hwloc-1.6.2-GCC-4.7.2.eb'])
# check specifying of ignored dirs
var_defs, hits = ft.search_file([test_ecs], 'HWLOC', silent=True, ignore_dirs=['hwloc'])
self.assertEqual(var_defs + hits, [])
# check short mode
var_defs, hits = ft.search_file([test_ecs], 'HWLOC', silent=True, short=True)
self.assertEqual(var_defs, [('CFGS1', os.path.join(test_ecs, 'h', 'hwloc'))])
self.assertEqual(hits, ['$CFGS1/hwloc-1.6.2-GCC-4.6.4.eb', '$CFGS1/hwloc-1.6.2-GCC-4.7.2.eb'])
# check terse mode (implies 'silent', overrides 'short')
var_defs, hits = ft.search_file([test_ecs], 'HWLOC', terse=True, short=True)
self.assertEqual(var_defs, [])
expected = [
os.path.join(test_ecs, 'h', 'hwloc', 'hwloc-1.6.2-GCC-4.6.4.eb'),
os.path.join(test_ecs, 'h', 'hwloc', 'hwloc-1.6.2-GCC-4.7.2.eb'),
]
self.assertEqual(hits, expected)
# check combo of terse and filename-only
var_defs, hits = ft.search_file([test_ecs], 'HWLOC', terse=True, filename_only=True)
self.assertEqual(var_defs, [])
self.assertEqual(hits, ['hwloc-1.6.2-GCC-4.6.4.eb', 'hwloc-1.6.2-GCC-4.7.2.eb'])
def test_find_eb_script(self):
"""Test find_eb_script function."""
self.assertTrue(os.path.exists(ft.find_eb_script('rpath_args.py')))
self.assertTrue(os.path.exists(ft.find_eb_script('rpath_wrapper_template.sh.in')))
self.assertErrorRegex(EasyBuildError, "Script 'no_such_script' not found", ft.find_eb_script, 'no_such_script')
# put test script in place relative to location of 'eb'
ft.write_file(os.path.join(self.test_prefix, 'bin', 'eb'), '#!/bin/bash\necho "fake eb"')
ft.adjust_permissions(os.path.join(self.test_prefix, 'bin', 'eb'), stat.S_IXUSR)
os.environ['PATH'] = '%s:%s' % (os.path.join(self.test_prefix, 'bin'), os.getenv('PATH', ''))
justatest = os.path.join(self.test_prefix, 'easybuild', 'scripts', 'justatest.sh')
ft.write_file(justatest, '#!/bin/bash')
self.assertTrue(os.path.samefile(ft.find_eb_script('justatest.sh'), justatest))
def test_move_file(self):
"""Test move_file function"""
test_file = os.path.join(self.test_prefix, 'test.txt')
ft.write_file(test_file, 'test123')
new_test_file = os.path.join(self.test_prefix, 'subdir', 'new_test.txt')
ft.move_file(test_file, new_test_file)
self.assertFalse(os.path.exists(test_file))
self.assertTrue(os.path.exists(new_test_file))
self.assertEqual(ft.read_file(new_test_file), 'test123')
# test moving to an existing file
ft.write_file(test_file, 'gibberish')
ft.move_file(new_test_file, test_file)
self.assertTrue(os.path.exists(test_file))
self.assertEqual(ft.read_file(test_file), 'test123')
self.assertFalse(os.path.exists(new_test_file))
# also test behaviour of move_file under --dry-run
build_options = {
'extended_dry_run': True,
'silent': False,
}
init_config(build_options=build_options)
self.mock_stdout(True)
self.mock_stderr(True)
ft.move_file(test_file, new_test_file)
stdout = self.get_stdout()
stderr = self.get_stderr()
self.mock_stdout(False)
self.mock_stderr(False)
# informative message printed, but file was not actually moved
regex = re.compile("^moved file .*/test\.txt to .*/new_test\.txt$")
self.assertTrue(regex.search(stdout), "Pattern '%s' found in: %s" % (regex.pattern, stdout))
self.assertEqual(stderr, '')
self.assertTrue(os.path.exists(test_file))
self.assertEqual(ft.read_file(test_file), 'test123')
self.assertFalse(os.path.exists(new_test_file))
def test_find_backup_name_candidate(self):
"""Test find_backup_name_candidate"""
test_file = os.path.join(self.test_prefix, 'test.txt')
ft.write_file(test_file, 'foo')
# timestamp should be exactly 14 digits (year, month, day, hours, minutes, seconds)
regex = re.compile('^test\.txt_[0-9]{14}$')
res = ft.find_backup_name_candidate(test_file)
self.assertTrue(os.path.samefile(os.path.dirname(res), self.test_prefix))
fn = os.path.basename(res)
self.assertTrue(regex.match(fn), "'%s' matches pattern '%s'" % (fn, regex.pattern))
# create expected next backup location to (try and) see if it's handled well
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
ft.write_file(os.path.join(self.test_prefix, 'test.txt_%s' % timestamp), '')
res = ft.find_backup_name_candidate(test_file)
self.assertTrue(os.path.samefile(os.path.dirname(res), self.test_prefix))
fn = os.path.basename(res)
self.assertTrue(regex.match(fn), "'%s' matches pattern '%s'" % (fn, regex.pattern))
def test_diff_files(self):
"""Test for diff_files function"""
foo = os.path.join(self.test_prefix, 'foo')
ft.write_file(foo, '\n'.join([
'one',
'two',
'three',
'four',
'five',
]))
bar = os.path.join(self.test_prefix, 'bar')
ft.write_file(bar, '\n'.join([
'zero',
'1',
'two',
'tree',
'four',
'five',
]))
expected = '\n'.join([
"@@ -1,5 +1,6 @@",
"-one",
"+zero",
"+1",
" two",
"-three",
"+tree",
" four",
" five",
'',
])
res = ft.diff_files(foo, bar)
self.assertTrue(res.endswith(expected), "%s ends with %s" % (res, expected))
regex = re.compile('^--- .*/foo\s*\n\+\+\+ .*/bar\s*$', re.M)
self.assertTrue(regex.search(res), "Pattern '%s' found in: %s" % (regex.pattern, res))
def suite():
""" returns all the testcases in this module """
return TestLoaderFiltered().loadTestsFromTestCase(FileToolsTest, sys.argv[1:])
if __name__ == '__main__':
TextTestRunner(verbosity=1).run(suite())
| gpl-2.0 | 6,844,606,036,654,685,000 | 46.806369 | 153 | 0.617366 | false |
Cadair/solarbextrapolation | solarbextrapolation/analyticalmodels/base.py | 1 | 8605 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 19:30:22 2015
@author: alex_
"""
# General Imports
import matplotlib as mpl
mpl.use('TkAgg') # Force mpl backend not to use qt. Else we have a conflict.
import numpy as np
#import pickle
import time
from datetime import datetime
#from collections import namedtuple
import warnings
import inspect
#from sunpy.sun._constants import physical_constants as con
# SunPy imports
import sunpy.map
from sunpy.sun import constants, sun
from sunpy.time import parse_time, is_time
from astropy.table import Table
import astropy.units as u
from mayavi import mlab
# Internal imports
#from solarbextrapolation.utilities import si_this_map
from solarbextrapolation.map3dclasses import Map3D
class AnalyticalModel(object):
"""
Common class for the development of anylitical models of magnetic fields.
Use the models to evaluate the accuracy of an extrapolation routine with
the figures of merit.
"""
def __init__(self, **kwargs):
# Default grid shape and physical ranges for the volume the model covers.
self.shape = kwargs.get('shape', u.Quantity([5, 5, 5] * u.pixel)) # (x,y,z)
self.xrange = kwargs.get('xrange', u.Quantity([-10, 10] * u.Mm))
self.yrange = kwargs.get('yrange', u.Quantity([-10, 10] * u.Mm))
self.yrange = kwargs.get('zrange', u.Quantity([0, 20] * u.Mm))
# Metadata
self.meta = {'ZNAXIS': 3, 'ZNAXIS1': self.shape[0].value, 'ZNAxXIS2': self.shape[0].value, 'ZNAXIS3': self.shape[0].value}
self.meta['analytical_model_notes'] = kwargs.get('notes', '')
self.meta['BUNIT'] = kwargs.get('bunit', u.T)
# CRVALn, CDELTn and NAXIS (alreadu in meta) used for storing range in 2D fits files.
self.filepath = kwargs.get('filepath', None)
self.routine = kwargs.get('analytical_model_routine', type(self))
# Default 3D magnetic field
#X,Y,Z = np.zeros(self.shape.value), np.zeros(self.shape.value), np.zeros(self.shape.value)
npField = np.zeros([3]+list(np.array(self.shape.value, dtype=np.int)))
self.field = Map3D(npField, self.meta)
# Default magnetic field on boundary
magnetogram = np.zeros(np.array(self.shape[0:2].value, dtype=np.int))
magnetogram_header = {'ZNAXIS': 2, 'ZNAXIS1': self.shape[0].value, 'ZNAXIS2': self.shape[1].value}
self.magnetogram = sunpy.map.Map((magnetogram, magnetogram_header))
def _generate_field(self, **kwargs):
"""
The method for running a model to generate the field.
This is the primary method to be edited in subclasses for specific
model implementations.
"""
# Model code goes here.
arr_4d = np.zeros([int(self.map_boundary_data.data.shape[0]), int(self.map_boundary_data.data.shape[1]), 1, 3])
# Turn the 4D array into a Map3D object.
map_output = Map3D( arr_4d, self.meta, xrange=self.xrange, yrange=self.yrange, zrange=self.zrange, xobsrange=self.xrange, yobsrange=self.yrange )
return map_output
def generate(self, **kwargs):
"""
Method to be called to calculate the vector field and return as a Map3D object.
Times and saves the extrapolation where applicable.
"""
# Record the time and duration of the extrapolation.
dt_start = datetime.now()
tim_start = time.time()
arr_output = self._generate_field(**kwargs)
tim_duration = time.time() - tim_start
# Add the duration and time to the meta/header data.
arr_output.meta['extrapolator_start_time'] = dt_start.isoformat()
arr_output.meta['extrapolator_duration'] = tim_duration
arr_output.meta['extrapolator_duration_unit'] = u.s
# Save the Map3D if a filepath has been set. (to avoid loosing work)
if self.filepath:
arr_output.save(self.filepath)
# Add the output map to the object and return.
self.map = arr_output
return arr_output
def to_los_magnetogram(self, **kwargs):
"""
Calculate the LoS vector field as a SunPy map and return.
Generally this will require that you have run generate(self, ``**kwargs``)
first, so in the base class this is checked, but it is not always the
case as some models may allow this to be determined without calculating
the full field.
.. I'm not sure if this is a good default.
"""
return self.magnetogram
def to_vec_magnetogram(self, **kwargs):
"""
Calculate the vector field as a SunPy map and return.
Generally this will require that you have run ``generate(self, **kwargs)``
first, so in the base class this is checked, but it is not always the
case as some models may allow this to be determined without calculating
the full field. ######### I'm not sure if this is a good default.
"""
return self.magnetogram
if __name__ == '__main__':
# User-specified parameters
tup_shape = ( 20, 20, 20 )
x_range = ( -80.0, 80 ) * u.Mm
y_range = ( -80.0, 80 ) * u.Mm
z_range = ( 0.0, 120 ) * u.Mm
# Derived parameters (make SI where applicable)
x_0 = x_range[0].to(u.m).value
Dx = (( x_range[1] - x_range[0] ) / ( tup_shape[0] * 1.0 )).to(u.m).value
x_size = Dx * tup_shape[0]
y_0 = y_range[0].to(u.m).value
Dy = (( y_range[1] - y_range[0] ) / ( tup_shape[1] * 1.0 )).to(u.m).value
y_size = Dy * tup_shape[1]
z_0 = z_range[0].to(u.m).value
Dz = (( z_range[1] - z_range[0] ) / ( tup_shape[2] * 1.0 )).to(u.m).value
z_size = Dy * tup_shape[2]
# Define the extrapolator as a child of the Extrapolators class
class AnaOnes(AnalyticalModel):
def __init__(self, **kwargs):
super(AnaOnes, self).__init__(**kwargs)
def _generate_field(self, **kwargs):
# Adding in custom parameters to the metadata
self.meta['analytical_model_routine'] = 'Ones Model'
# Generate a trivial field and return (X,Y,Z,Vec)
outshape = list(np.array(self.shape.value, dtype=np.int)) + [3]
arr_4d = np.ones(outshape)
return Map3D(arr_4d, self.meta)
# Setup an anylitical model
xrange = u.Quantity([ 50, 300] * u.arcsec)
yrange = u.Quantity([-350, -100] * u.arcsec)
zrange = u.Quantity([ 0, 250] * u.arcsec)
aAnaMod = AnaOnes()
aMap3D = aAnaMod.generate()
# Visualise the 3D vector field
from solarbextrapolation.visualisation_functions import visualise
"""
fig = visualise(aMap3D,
show_boundary_axes=False,
boundary_units=[1.0*u.arcsec, 1.0*u.arcsec],
show_volume_axes=True,
debug=False)
"""
fig = visualise(aMap3D,
show_boundary_axes=False,
show_volume_axes=False,
debug=False)
mlab.show()
"""
# For B_I field only, to save re-creating this interpolator for every cell.
A_I_r_perp_interpolator = interpolate_A_I_from_r_perp(flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, (x_size**2 + y_size**2 + z_size**2)**(0.5)*1.2, 1000`0)
field = np.zeros( ( tup_shape[0], tup_shape[1], tup_shape[2], 3 ) )
for i in range(0, tup_shape[0]):
for j in range(0, tup_shape[1]):
for k in range(0, tup_shape[2]):
# Position of this point in space
x_pos = x_0 + ( i + 0.5 ) * Dx
y_pos = y_0 + ( j + 0.5 ) * Dy
z_pos = z_0 + ( k + 0.5 ) * Dz
#field[i,j,k] = B_theta(x_pos, y_pos, z_pos, flo_TD_a, flo_TD_d, flo_TD_R, flo_TD_I, flo_TD_I_0)
#field[i,j,k] = B_q(x_pos, y_pos, z_pos, flo_TD_L, flo_TD_d, flo_TD_q)
#field[i,j,k] = B_I(x_pos, y_pos, z_pos, flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, Dx, A_I_r_perp_interpolator)
field[i,j,k] = B_theta(x_pos, y_pos, z_pos, flo_TD_a, flo_TD_d, flo_TD_R, flo_TD_I, flo_TD_I_0) + B_q(x_pos, y_pos, z_pos, flo_TD_L, flo_TD_d, flo_TD_q) + B_I(x_pos, y_pos, z_pos, flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, Dx, A_I_r_perp_interpolator)
map_field = Map3D( field, {}, xrange=x_range, yrange=y_range, zrange=z_range )
np_boundary_data = field[:,:,0,2].T
dummyDataToMap(np_boundary_data, x_range, y_range)
#dic_boundary_data = { 'datavals': np_boundary_data.data.shape[0]**2, 'dsun_obs': 147065396219.34, }
visualise(map_field, scale=1.0*u.Mm, show_volume_axes=True, debug=True)
"""
| mit | -4,708,356,561,791,386,000 | 39.21028 | 264 | 0.604648 | false |
RNAcentral/rnacentral-import-pipeline | rnacentral_pipeline/rnacentral/r2dt/parser.py | 1 | 3852 | # -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import logging
import typing as ty
from pathlib import Path
from rnacentral_pipeline import psql
from rnacentral_pipeline.rnacentral.r2dt import data, ribovore
LOGGER = logging.getLogger(__name__)
def load_model_info(handle: ty.TextIO) -> ty.Dict[str, data.ModelDatabaseInfo]:
mapping = {}
for entry in psql.json_handler(handle):
info = data.ModelDatabaseInfo.build(entry)
mapping[entry["model_name"]] = info
if info.source is data.Source.gtrnadb:
mapping[entry["model_name"].replace("_", "-")] = info
mapping[entry["model_name"].replace("-", "_")] = info
if entry["model_name"] == "tRNA":
mapping["RF00005"] = info
return mapping
def load_hit_info(base: Path, allow_missing: bool):
source_directories = [
(base / "crw", data.Source.crw),
(base / "gtrnadb", data.Source.gtrnadb),
(base / "ribovision-lsu", data.Source.ribovision),
(base / "ribovision-ssu", data.Source.ribovision),
(base / "rfam", data.Source.rfam),
(base / "RF00005", data.Source.rfam),
(base / "rnasep", data.Source.rnase_p),
]
has_ribovision = {data.Source.crw, data.Source.ribovision, data.Source.rfam}
hit_info = {}
for (path, source) in source_directories:
if not path.exists():
continue
if source in has_ribovision and path.name != "RF00005":
update = ribovore.as_dict(path, allow_missing=allow_missing)
if update:
hit_info.update(update)
return hit_info
def parse(
info_path: ty.TextIO, base: Path, allow_missing=False
) -> ty.Iterator[data.R2DTResult]:
if not base.exists():
raise ValueError("Cannot parse missing directory: %s" % base)
hit_info = load_hit_info(base, allow_missing)
model_info = load_model_info(info_path)
result_base = base / "results"
metadata_path = result_base / "tsv" / "metadata.tsv"
seen = set()
seen_urs = set()
with metadata_path.open("r") as raw:
reader = csv.reader(raw, delimiter="\t")
for row in reader:
urs = row[0]
model_name = row[1]
source = data.Source.build(row[2])
if model_name not in model_info:
raise ValueError("No info for model %s", model_name)
minfo = model_info[model_name]
info = data.R2DTResultInfo(urs, minfo, source, result_base)
if info in seen:
LOGGER.warn("Dupcliate line in metadata for, %s", info)
continue
seen.add(info)
if info.urs in seen_urs:
raise ValueError(f"Impossible state of >1 hit per URS for {info}")
seen_urs.add(info.urs)
try:
info.validate()
except Exception as e:
if allow_missing:
LOGGER.warn("Did not find all required files for %s", urs)
LOGGER.exception(e)
continue
else:
raise e
hit = None
if info.has_hit_info():
hit = hit_info[urs]
yield data.R2DTResult.from_info(info, hit_info=hit)
| apache-2.0 | 3,470,230,791,381,858,300 | 34.666667 | 82 | 0.60514 | false |
keras-team/autokeras | examples/automodel_with_cnn.py | 1 | 1358 | # Library import
import numpy as np
import tensorflow as tf
import autokeras as ak
# Prepare example Data - Shape 1D
num_instances = 100
num_features = 5
x_train = np.random.rand(num_instances, num_features).astype(np.float32)
y_train = np.zeros(num_instances).astype(np.float32)
y_train[0 : int(num_instances / 2)] = 1
x_test = np.random.rand(num_instances, num_features).astype(np.float32)
y_test = np.zeros(num_instances).astype(np.float32)
y_train[0 : int(num_instances / 2)] = 1
x_train = np.expand_dims(
x_train, axis=2
) # This step it's very important an CNN will only accept this data shape
print(x_train.shape)
print(y_train.shape)
# Prepare Automodel for search
input_node = ak.Input()
output_node = ak.ConvBlock()(input_node)
# output_node = ak.DenseBlock()(output_node) #optional
# output_node = ak.SpatialReduction()(output_node) #optional
output_node = ak.ClassificationHead(num_classes=2, multi_label=True)(output_node)
auto_model = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
# Search
auto_model.fit(x_train, y_train, epochs=1)
print(auto_model.evaluate(x_test, y_test))
# Export as a Keras Model
model = auto_model.export_model()
print(type(model.summary()))
# print model as image
tf.keras.utils.plot_model(
model, show_shapes=True, expand_nested=True, to_file="name.png"
)
| apache-2.0 | 1,371,042,242,022,571,300 | 27.291667 | 81 | 0.72975 | false |
jeremy-miller/life-python | life/display.py | 1 | 1176 | """This module displays the Life 'grid'."""
import numpy
class DisplayClass(object): # pylint: disable=R0903
"""This class displays the Life 'grid'.
No OpenGL or Matplotlib UI is used since this program is being executed
in a Docker container. The 'curses' Python package is also not used
since it also has problems detecting the terminal when executed in a
Docker container.
"""
@staticmethod
def display(grid):
"""This function displays the Life 'grid' to the console.
Each iteration of the game will display a new grid in the console.
This function loops through each index in the grid, checking if
each cell is 'living' or 'dead', and adding the appropriate symbol
to the grid output.
Args:
grid (array): A Numpy two-dimensional array which is the 'grid' to be
displayed in the console.
"""
output = ''
for index, value in numpy.ndenumerate(grid): # example 'index' = (0,0), example 'value' = 1
if value:
output += ' O'
else:
output += ' .'
if index[1] == grid.shape[1] - 1: # check to see if we are at the end of a row
output += '\n'
print(output)
| mit | 8,947,968,542,597,361,000 | 31.666667 | 96 | 0.654762 | false |
edmundgentle/schoolscript | SchoolScript/bin/Debug/pythonlib/Lib/test/test_pipes.py | 1 | 7288 | import pipes
import os
import string
import unittest
from test.support import TESTFN, run_unittest, unlink, reap_children
if os.name != 'posix':
raise unittest.SkipTest('pipes module only works on posix')
TESTFN2 = TESTFN + "2"
# tr a-z A-Z is not portable, so make the ranges explicit
s_command = 'tr %s %s' % (string.ascii_lowercase, string.ascii_uppercase)
class SimplePipeTests(unittest.TestCase):
def tearDown(self):
for f in (TESTFN, TESTFN2):
unlink(f)
def testSimplePipe1(self):
t = pipes.Template()
t.append(s_command, pipes.STDIN_STDOUT)
f = t.open(TESTFN, 'w')
f.write('hello world #1')
f.close()
with open(TESTFN) as f:
self.assertEqual(f.read(), 'HELLO WORLD #1')
def testSimplePipe2(self):
with open(TESTFN, 'w') as f:
f.write('hello world #2')
t = pipes.Template()
t.append(s_command + ' < $IN > $OUT', pipes.FILEIN_FILEOUT)
t.copy(TESTFN, TESTFN2)
with open(TESTFN2) as f:
self.assertEqual(f.read(), 'HELLO WORLD #2')
def testSimplePipe3(self):
with open(TESTFN, 'w') as f:
f.write('hello world #2')
t = pipes.Template()
t.append(s_command + ' < $IN', pipes.FILEIN_STDOUT)
f = t.open(TESTFN, 'r')
try:
self.assertEqual(f.read(), 'HELLO WORLD #2')
finally:
f.close()
def testEmptyPipeline1(self):
# copy through empty pipe
d = 'empty pipeline test COPY'
with open(TESTFN, 'w') as f:
f.write(d)
with open(TESTFN2, 'w') as f:
f.write('')
t=pipes.Template()
t.copy(TESTFN, TESTFN2)
with open(TESTFN2) as f:
self.assertEqual(f.read(), d)
def testEmptyPipeline2(self):
# read through empty pipe
d = 'empty pipeline test READ'
with open(TESTFN, 'w') as f:
f.write(d)
t=pipes.Template()
f = t.open(TESTFN, 'r')
try:
self.assertEqual(f.read(), d)
finally:
f.close()
def testEmptyPipeline3(self):
# write through empty pipe
d = 'empty pipeline test WRITE'
t = pipes.Template()
with t.open(TESTFN, 'w') as f:
f.write(d)
with open(TESTFN) as f:
self.assertEqual(f.read(), d)
def testQuoting(self):
safeunquoted = string.ascii_letters + string.digits + '@%_-+=:,./'
unsafe = '"`$\\!'
self.assertEqual(pipes.quote(''), "''")
self.assertEqual(pipes.quote(safeunquoted), safeunquoted)
self.assertEqual(pipes.quote('test file name'), "'test file name'")
for u in unsafe:
self.assertEqual(pipes.quote('test%sname' % u),
"'test%sname'" % u)
for u in unsafe:
self.assertEqual(pipes.quote("test%s'name'" % u),
"'test%s'\"'\"'name'\"'\"''" % u)
def testRepr(self):
t = pipes.Template()
self.assertEqual(repr(t), "<Template instance, steps=[]>")
t.append('tr a-z A-Z', pipes.STDIN_STDOUT)
self.assertEqual(repr(t),
"<Template instance, steps=[('tr a-z A-Z', '--')]>")
def testSetDebug(self):
t = pipes.Template()
t.debug(False)
self.assertEqual(t.debugging, False)
t.debug(True)
self.assertEqual(t.debugging, True)
def testReadOpenSink(self):
# check calling open('r') on a pipe ending with
# a sink raises ValueError
t = pipes.Template()
t.append('boguscmd', pipes.SINK)
self.assertRaises(ValueError, t.open, 'bogusfile', 'r')
def testWriteOpenSource(self):
# check calling open('w') on a pipe ending with
# a source raises ValueError
t = pipes.Template()
t.prepend('boguscmd', pipes.SOURCE)
self.assertRaises(ValueError, t.open, 'bogusfile', 'w')
def testBadAppendOptions(self):
t = pipes.Template()
# try a non-string command
self.assertRaises(TypeError, t.append, 7, pipes.STDIN_STDOUT)
# try a type that isn't recognized
self.assertRaises(ValueError, t.append, 'boguscmd', 'xx')
# shouldn't be able to append a source
self.assertRaises(ValueError, t.append, 'boguscmd', pipes.SOURCE)
# check appending two sinks
t = pipes.Template()
t.append('boguscmd', pipes.SINK)
self.assertRaises(ValueError, t.append, 'boguscmd', pipes.SINK)
# command needing file input but with no $IN
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd $OUT',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd',
pipes.FILEIN_STDOUT)
# command needing file output but with no $OUT
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd $IN',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd',
pipes.STDIN_FILEOUT)
def testBadPrependOptions(self):
t = pipes.Template()
# try a non-string command
self.assertRaises(TypeError, t.prepend, 7, pipes.STDIN_STDOUT)
# try a type that isn't recognized
self.assertRaises(ValueError, t.prepend, 'tr a-z A-Z', 'xx')
# shouldn't be able to prepend a sink
self.assertRaises(ValueError, t.prepend, 'boguscmd', pipes.SINK)
# check prepending two sources
t = pipes.Template()
t.prepend('boguscmd', pipes.SOURCE)
self.assertRaises(ValueError, t.prepend, 'boguscmd', pipes.SOURCE)
# command needing file input but with no $IN
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd $OUT',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd',
pipes.FILEIN_STDOUT)
# command needing file output but with no $OUT
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd $IN',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd',
pipes.STDIN_FILEOUT)
def testBadOpenMode(self):
t = pipes.Template()
self.assertRaises(ValueError, t.open, 'bogusfile', 'x')
def testClone(self):
t = pipes.Template()
t.append('tr a-z A-Z', pipes.STDIN_STDOUT)
u = t.clone()
self.assertNotEqual(id(t), id(u))
self.assertEqual(t.steps, u.steps)
self.assertNotEqual(id(t.steps), id(u.steps))
self.assertEqual(t.debugging, u.debugging)
def test_main():
run_unittest(SimplePipeTests)
reap_children()
if __name__ == "__main__":
test_main()
| gpl-2.0 | 2,743,070,504,171,000,300 | 32.704762 | 75 | 0.552415 | false |
jeffsilverm/presentation | SeaGL-2018/network_stats_result_2_csv.py | 1 | 3115 | #! /usr/bin/python3
# -*- coding: utf-8 -*-
import csv
import datetime
import sys
def str_to_time_delta(string) -> datetime.timedelta:
"""
:param string: Input in format 0:01:37.083557
:return: datetime.timedelta
"""
flds = string.split(":")
hours = flds[0]
minutes = flds[1]
seconds = flds[2]
td = datetime.timedelta(hours=hours, minutes=minutes, seconds=seconds)
return td
# From
# with open('eggs.csv', 'w', newline='') as csv_file:
# spamwriter = csv.writer(csv_file, delimiter=' ',
# quotechar='|', quoting=csv.QUOTE_MINIMAL)
#
with open(file=sys.argv[2], mode="w", newline="") as csv_file:
spamwriter = csv.writer(csv_file)
# git tag MONDAY
spamwriter.writerow(
['retries', 'elapsed', 'delay', 'loss', 'size', 'rate', 'proto', 'GTRs'])
with open(file=sys.argv[1], mode="r") as f:
for line in f:
# format of a line is:
# Retries: 0 Elapsed time: 0:01:16.489403 Delay: 10.3 loss percent: 20 size: 1000000 bytes data rate:
# 13073.706432249184 bytes/sec protocol: IPv6
# I'm not going to do any sanity checking. I might regret that later
# 0 "Retries:
# 1 retries as an string of an integer
# 2 "Elapsed"
# 3 "time:"
# 4 elapsed_time as a string of a datetime.timedelta
# 5 "Delay:"
# 6 delay_ms as a string of a float
# 7 "loss"
# 8 "percent:"
# 9 loss_percent as a float
# 10 "size:"
# 11 size a string as a integer
# 12 "bytes"
# 13 "data"
# 14 "rate:"
# 15 data_rate a string as a float
# 16 "bytes/sec"
# 17 "protocol:"
# 18 a string either IPv4 or IPv6
# After the November 5th, added Global TCP Retries (GTRs)
# 19: "Global"
# 20: "TCP"
# 21: "retries:"
# 22 GTRs a string as an int
fields = line.split()
# I'm converting the strings to data types and then
# back to strs again because I am doing some sanity checking
retries = int(fields[1])
# Pandas can handle an elapsed time, no need to convert
elapsed_time = fields[4]
delay_ms = float(fields[6])
loss_percent = float(fields[9])
size = int(fields[11])
data_rate = float(fields[15])
if fields[18] == "IPv4":
protocol = "IPv4"
elif fields[18] == "IPv6":
protocol = "IPv6"
else:
raise ValueError("fields[18] should be 'IPv4' or 'IPv6' but is "
f"{fields[18]}")
gtrs = int(fields[22])
row_str = [str(retries), str(elapsed_time), str(delay_ms),
str(loss_percent), str(size), str(data_rate), protocol, gtrs]
spamwriter.writerow(row_str)
| gpl-2.0 | 7,128,494,810,145,858,000 | 36.53012 | 113 | 0.50626 | false |
floydhub/dockerfiles | dl/pytorch/tests/0.3.1/mnist.py | 1 | 5122 | # MNIST from official PyTorch Docs
from __future__ import print_function
import argparse
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import psutil
from torchvision import datasets, transforms
from torch.autograd import Variable
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=5, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
# Log Info
print("-" * 64)
print("TEST INFO - PYTORCH MNIST")
print("-" * 64)
print("PyTorch version:\t {}".format(torch.__version__))
print("Vision version:\t {}".format(torchvision.__version__))
print("Dataset:\t MNIST")
print("Model:\t DNN")
print("CUDA support:\t {}".format(torch.cuda.is_available()))
if torch.cuda.is_available():
print("Number of GPUs:\t {}".format(torch.cuda.device_count()))
print("GPU:\t {}".format(torch.cuda.get_device_name(0)))
else:
print("CPU cores:\t {}".format(psutil.cpu_count()))
print("=" * 64)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': psutil.cpu_count(logical=False), 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('/tmp/', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('/tmp/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
# One hidden Layer NN
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc = nn.Linear(784, 1000)
self.fc2 = nn.Linear(1000, 10)
def forward(self, x):
x = F.relu(self.fc(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
model = Net()
if args.cuda:
model.cuda()
# Create a loss function
criterion = F.nll_loss
# SGD
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
# Resize data from (batch_size, 1, 28, 28) to (batch_size, 28*28)
data = data.view(-1, 28*28)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
# Resize data from (batch_size, 1, 28, 28) to (batch_size, 28*28)
data = data.view(-1, 28*28)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).data[0]() # Sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # Get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
for epoch in range(1, args.epochs + 1):
train(epoch)
test()
| apache-2.0 | 7,196,416,950,406,013,000 | 36.386861 | 98 | 0.613627 | false |
bcgsc/ProbeGenerator | probe_generator/reference.py | 1 | 2779 | """Parse and extract base pair sequences from an Ensembl reference genome.
"""
from probe_generator import sequence
from probe_generator.exceptions import NonFatalError
def bases(sequence_range, genome):
"""Return the bases from a SequenceRange object.
"""
raw_bases = _raw_bases(
sequence_range.chromosome,
sequence_range.start,
sequence_range.end,
genome)
if sequence_range.reverse_complement:
return sequence.reverse_complement(raw_bases)
else:
return raw_bases
def reference_genome(genome):
"""Map chromosomes to base pair sequences.
`genome` is a handle to a reference genome in Ensembl FASTA format.
Returns a dictionary.
"""
genome_map = {}
chromosome = None
for line in genome:
if line.startswith('>'):
chromosome = line[1:].split()[0]
# In an Ensembl reference genome, the chromosome is the first
# string of characters after the '>' but before whitespace.
# E.g.:
# >chr Homo spaiens some chromosome etc etc
# NNN...
genome_map[chromosome] = []
elif chromosome is None:
raise InvalidGenomeFile(
"could not parse input: {!r}".format(
line))
else:
genome_map[chromosome].append(line.strip())
if not genome_map:
raise InvalidGenomeFile("genome file empty!")
return {chromosome: ''.join(bases)
for (chromosome, bases)
in genome_map.items()}
def _raw_bases(chromosome, start, end, genome):
"""Return a string of the base pairs of chromosome from start to end.
The start and end attributes follow the Python convention for slices
(indexed from zero, start inclusive, end exclusive).
The genome is a dictionary relating chromosome names to base pair sequences
(which are strings).
"""
try:
base_pairs = genome[chromosome][start:end]
except KeyError:
raise MissingChromosome(
"no such chromosome: {!r}".format(
chromosome))
if end - start != len(base_pairs):
raise NonContainedRange(
"range [{0}:{1}] outside the "
"range of chromosome {2!r}".format(
start, end, chromosome))
return base_pairs
class NonContainedRange(Exception):
"""Raised when the range of base pairs which is to be sliced from a
chromosome includes base pairs outside the chromosome.
"""
class InvalidGenomeFile(Exception):
"""Raised when a a genome_file cannot be parsed.
"""
class MissingChromosome(NonFatalError):
"""Raised when a chromosome is not present in the reference genome.
"""
| gpl-3.0 | 8,711,707,107,422,563,000 | 28.252632 | 79 | 0.618928 | false |
bakhtout/odoo-educ | addons/openeducat_erp/op_course/__init__.py | 1 | 1082 | # -*- coding: utf-8 -*-
#/#############################################################################
#
# Tech-Receptives Solutions Pvt. Ltd.
# Copyright (C) 2004-TODAY Tech-Receptives(<http://www.tech-receptives.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#/#############################################################################
import op_course
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 997,262,159,016,272,500 | 44.083333 | 80 | 0.617375 | false |
crackinglandia/pype32 | setup.py | 1 | 4235 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Nahuel Riva
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__revision__ = "$Id$"
__all__ = ['metadata', 'setup']
from distutils.core import setup
from distutils import version
from warnings import warn
import re
import os
import sys
import glob
# Distutils hack: in order to be able to build MSI installers with loose
# version numbers, we subclass StrictVersion to accept loose version numbers
# and convert them to the strict format. This works because Distutils will
# happily reinstall a package even if the version number matches exactly the
# one already installed on the system - so we can simply strip all extraneous
# characters and beta/postrelease version numbers will be treated just like
# the base version number.
if __name__ == '__main__':
StrictVersion = version.StrictVersion
class NotSoStrictVersion (StrictVersion):
def parse (self, vstring):
components = []
for token in vstring.split('.'):
token = token.strip()
match = re.search('^[0-9]+', token)
if match:
number = token[ match.start() : match.end() ]
components.append(number)
vstring = '.'.join(components)
return StrictVersion.parse(self, vstring)
version.StrictVersion = NotSoStrictVersion
# Get the base directory
here = os.path.dirname(__file__)
if not here:
here = os.path.curdir
# Text describing the module (reStructured text)
try:
readme = os.path.join(here, 'README')
long_description = open(readme, 'r').read()
except Exception:
warn("README file not found or unreadable!")
long_description = """pype32 is python library to read and write PE/PE+ binary files."""
# Get the list of scripts in the "tools" folder
scripts = glob.glob(os.path.join(here, 'tools', '*.py'))
# Set the parameters for the setup script
metadata = {
# Setup instructions
'provides' : ['pype32'],
'packages' : ['pype32'],
'scripts' : scripts,
# Metadata
'name' : 'pype32',
'version' : '0.1-alpha5',
'description' : 'Yet another Python library to read and write PE/PE+ files.',
'long_description' : long_description,
'author' : 'Nahuel Riva',
'author_email' : 'crackinglandia'+chr(64)+'gmail'+chr(0x2e)+'com',
'url' : 'https://github.com/crackinglandia/pype32',
'keywords' : ['pecoff', 'x86', 'x64', '.net', 'parser'],
'download_url' : 'https://github.com/crackinglandia/pype32/tarball/v0.1-alpha5',
}
# Execute the setup script
if __name__ == '__main__':
setup(**metadata)
| bsd-3-clause | 1,081,775,200,471,822,000 | 39.721154 | 92 | 0.677922 | false |
myvoice-nigeria/myvoice | myvoice/clinics/migrations/0013_auto__chg_field_visit_service.py | 1 | 13197 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Visit.service'
db.alter_column(u'clinics_visit', 'service_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['clinics.Service'], null=True))
def backwards(self, orm):
# Changing field 'Visit.service'
db.alter_column(u'clinics_visit', 'service_id', self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['clinics.Service']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'clinics.clinic': {
'Meta': {'object_name': 'Clinic'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_renovated': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'lga_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'pbf_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'year_opened': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.clinicstaff': {
'Meta': {'object_name': 'ClinicStaff'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'staff_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'year_started': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.clinicstatistic': {
'Meta': {'unique_together': "[('clinic', 'statistic', 'month')]", 'object_name': 'ClinicStatistic'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'float_value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'int_value': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'month': ('django.db.models.fields.DateField', [], {}),
'rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'statistic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['statistics.Statistic']"}),
'text_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'clinics.patient': {
'Meta': {'object_name': 'Patient'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'clinics.region': {
'Meta': {'unique_together': "(('external_id', 'type'),)", 'object_name': 'Region'},
'alternate_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'external_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'lga'", 'max_length': '16'})
},
u'clinics.service': {
'Meta': {'object_name': 'Service'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'clinics.visit': {
'Meta': {'object_name': 'Visit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Patient']"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'staff': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.ClinicStaff']", 'null': 'True', 'blank': 'True'}),
'visit_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'clinics.visitregistrationerror': {
'Meta': {'object_name': 'VisitRegistrationError'},
'error_count': ('django.db.models.fields.PositiveIntegerField', [], {}),
'error_type': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'clinics.visitregistrationerrorlog': {
'Meta': {'object_name': 'VisitRegistrationErrorLog'},
'error_type': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'message_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'statistics.statistic': {
'Meta': {'object_name': 'Statistic'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['statistics.StatisticGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'statistic_type': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
u'statistics.statisticgroup': {
'Meta': {'object_name': 'StatisticGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
}
}
complete_apps = ['clinics'] | bsd-2-clause | -2,682,326,157,635,498,500 | 74.850575 | 195 | 0.548079 | false |
m00nlight/hackerrank | algorithm/contests/Counter-Code-2015/C.py | 1 | 1148 | from __future__ import division
from sys import stdin
def solve(n, m):
if n % 2 == 0:
if m % n == 0:
return n // 2 + 1
elif m % n % 2 == 1:
return (m % n + 1) // 2
else:
return n + 1 - m % n // 2
else:
idx = m % (2 * n)
if idx == 0:
return (n + 1) // 2
else:
if idx <= (n + 1):
if idx == n:
return (n + 1) // 2
elif idx == n + 1:
return n
else:
if idx % 2 == 1:
return (idx + 1) // 2
else:
return (n + 1 - idx // 2)
else:
idx = idx - (n + 1)
if idx % 2 == 1:
return (idx + 1) // 2
else:
return (n - idx // 2)
if __name__ == '__main__':
t = int(stdin.readline())
for _ in range(t):
n, m = map(int, stdin.readline().strip().split())
ans = solve(n, m)
print(str(ans) + ' ' + str(m // n - (1 if m % n == 0 else 0))) | gpl-2.0 | -6,132,813,347,740,916,000 | 27.02439 | 70 | 0.313589 | false |
Bezoar/surrender-rides | bp_includes/handlers.py | 1 | 64168 | # -*- coding: utf-8 -*-
"""
A real simple app for using webapp2 with auth and session.
It just covers the basics. Creating a user, login, logout
and a decorator for protecting certain handlers.
Routes are setup in routes.py and added in main.py
"""
# standard library imports
import logging
import json
# related third party imports
import webapp2
from webapp2_extras import security
from webapp2_extras.auth import InvalidAuthIdError, InvalidPasswordError
from webapp2_extras.i18n import gettext as _
from webapp2_extras.appengine.auth.models import Unique
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.api.datastore_errors import BadValueError
from google.appengine.runtime import apiproxy_errors
from github import github
from linkedin import linkedin
# local application/library specific imports
import models
import forms as forms
from lib import utils, captcha, twitter
from lib.basehandler import BaseHandler
from lib.decorators import user_required
from lib.decorators import taskqueue_method
from lib import facebook
class LoginRequiredHandler(BaseHandler):
def get(self):
continue_url, = self.request.get('continue', allow_multiple=True)
self.redirect(users.create_login_url(dest_url=continue_url))
class RegisterBaseHandler(BaseHandler):
"""
Base class for handlers with registration and login forms.
"""
@webapp2.cached_property
def form(self):
return forms.RegisterForm(self)
class SendEmailHandler(BaseHandler):
"""
Core Handler for sending Emails
Use with TaskQueue
"""
@taskqueue_method
def post(self):
from google.appengine.api import mail, app_identity
to = self.request.get("to")
subject = self.request.get("subject")
body = self.request.get("body")
sender = self.request.get("sender")
if sender != '' or not utils.is_email_valid(sender):
if utils.is_email_valid(self.app.config.get('contact_sender')):
sender = self.app.config.get('contact_sender')
else:
app_id = app_identity.get_application_id()
sender = "%s <no-reply@%s.appspotmail.com>" % (app_id, app_id)
if self.app.config['log_email']:
try:
logEmail = models.LogEmail(
sender=sender,
to=to,
subject=subject,
body=body,
when=utils.get_date_time("datetimeProperty")
)
logEmail.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Email Log in datastore")
try:
message = mail.EmailMessage()
message.sender = sender
message.to = to
message.subject = subject
message.html = body
message.send()
except Exception, e:
logging.error("Error sending email: %s" % e)
class LoginHandler(BaseHandler):
"""
Handler for authentication
"""
def get(self):
""" Returns a simple HTML form for login """
if self.user:
self.redirect_to('home')
params = {}
return self.render_template('login.html', **params)
def post(self):
"""
username: Get the username from POST dict
password: Get the password from POST dict
"""
if not self.form.validate():
return self.get()
username = self.form.username.data.lower()
continue_url = self.request.get('continue_url').encode('ascii', 'ignore')
try:
if utils.is_email_valid(username):
user = self.user_model.get_by_email(username)
if user:
auth_id = user.auth_ids[0]
else:
raise InvalidAuthIdError
else:
auth_id = "own:%s" % username
user = self.user_model.get_by_auth_id(auth_id)
password = self.form.password.data.strip()
remember_me = True if str(self.request.POST.get('remember_me')) == 'on' else False
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
# Try to login user with password
# Raises InvalidAuthIdError if user is not found
# Raises InvalidPasswordError if provided password
# doesn't match with specified user
self.auth.get_user_by_password(
auth_id, password, remember=remember_me)
# if user account is not activated, logout and redirect to home
if (user.activated == False):
# logout
self.auth.unset_session()
# redirect to home with error message
resend_email_uri = self.uri_for('resend-account-activation', user_id=user.get_id(),
token=self.user_model.create_resend_token(user.get_id()))
message = _('Your account has not yet been activated. Please check your email to activate it or') + \
' <a href="' + resend_email_uri + '">' + _('click here') + '</a> ' + _('to resend the email.')
self.add_message(message, 'error')
return self.redirect_to('home')
# check twitter association in session
twitter_helper = twitter.TwitterAuth(self)
twitter_association_data = twitter_helper.get_association_data()
if twitter_association_data is not None:
if models.SocialUser.check_unique(user.key, 'twitter', str(twitter_association_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='twitter',
uid=str(twitter_association_data['id']),
extra_data=twitter_association_data
)
social_user.put()
# check facebook association
fb_data = None
try:
fb_data = json.loads(self.session['facebook'])
except:
pass
if fb_data is not None:
if models.SocialUser.check_unique(user.key, 'facebook', str(fb_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='facebook',
uid=str(fb_data['id']),
extra_data=fb_data
)
social_user.put()
# check linkedin association
li_data = None
try:
li_data = json.loads(self.session['linkedin'])
except:
pass
if li_data is not None:
if models.SocialUser.check_unique(user.key, 'linkedin', str(li_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='linkedin',
uid=str(li_data['id']),
extra_data=li_data
)
social_user.put()
# end linkedin
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in
# the BaseHandler.dispatcher
message = _("Your username or password is incorrect. "
"Please try again (make sure your caps lock is off)")
self.add_message(message, 'error')
self.redirect_to('login', continue_url=continue_url) if continue_url else self.redirect_to('login')
@webapp2.cached_property
def form(self):
return forms.LoginForm(self)
class SocialLoginHandler(BaseHandler):
"""
Handler for Social authentication
"""
def get(self, provider_name):
provider = self.provider_info[provider_name]
if not self.app.config.get('enable_federated_login'):
message = _('Federated login is disabled.')
self.add_message(message, 'warning')
return self.redirect_to('login')
callback_url = "%s/social_login/%s/complete" % (self.request.host_url, provider_name)
if provider_name == "twitter":
twitter_helper = twitter.TwitterAuth(self, redirect_uri=callback_url)
self.redirect(twitter_helper.auth_url())
elif provider_name == "facebook":
self.session['linkedin'] = None
perms = ['email', 'publish_stream']
self.redirect(facebook.auth_url(self.app.config.get('fb_api_key'), callback_url, perms))
elif provider_name == 'linkedin':
self.session['facebook'] = None
authentication = linkedin.LinkedInAuthentication(
self.app.config.get('linkedin_api'),
self.app.config.get('linkedin_secret'),
callback_url,
[linkedin.PERMISSIONS.BASIC_PROFILE, linkedin.PERMISSIONS.EMAIL_ADDRESS])
self.redirect(authentication.authorization_url)
elif provider_name == "github":
scope = 'gist'
github_helper = github.GithubAuth(self.app.config.get('github_server'),
self.app.config.get('github_client_id'), \
self.app.config.get('github_client_secret'),
self.app.config.get('github_redirect_uri'), scope)
self.redirect(github_helper.get_authorize_url())
elif provider_name in models.SocialUser.open_id_providers():
continue_url = self.request.get('continue_url')
if continue_url:
dest_url = self.uri_for('social-login-complete', provider_name=provider_name, continue_url=continue_url)
else:
dest_url = self.uri_for('social-login-complete', provider_name=provider_name)
try:
login_url = users.create_login_url(federated_identity=provider['uri'], dest_url=dest_url)
self.redirect(login_url)
except users.NotAllowedError:
self.add_message('You must enable Federated Login Before for this application.<br> '
'<a href="http://appengine.google.com" target="_blank">Google App Engine Control Panel</a> -> '
'Administration -> Application Settings -> Authentication Options', 'error')
self.redirect_to('login')
else:
message = _('%s authentication is not yet implemented.' % provider.get('label'))
self.add_message(message, 'warning')
self.redirect_to('login')
class CallbackSocialLoginHandler(BaseHandler):
"""
Callback (Save Information) for Social Authentication
"""
def get(self, provider_name):
if not self.app.config.get('enable_federated_login'):
message = _('Federated login is disabled.')
self.add_message(message, 'warning')
return self.redirect_to('login')
continue_url = self.request.get('continue_url')
if provider_name == "twitter":
oauth_token = self.request.get('oauth_token')
oauth_verifier = self.request.get('oauth_verifier')
twitter_helper = twitter.TwitterAuth(self)
user_data = twitter_helper.auth_complete(oauth_token,
oauth_verifier)
logging.info('twitter user_data: ' + str(user_data))
if self.user:
# new association with twitter
user_info = self.user_model.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'twitter', str(user_data['user_id'])):
social_user = models.SocialUser(
user=user_info.key,
provider='twitter',
uid=str(user_data['user_id']),
extra_data=user_data
)
social_user.put()
message = _('Twitter association added.')
self.add_message(message, 'success')
else:
message = _('This Twitter account is already in use.')
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with twitter
social_user = models.SocialUser.get_by_provider_and_uid('twitter',
str(user_data['user_id']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
uid = str(user_data['user_id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
# github association
elif provider_name == "github":
# get our request code back from the social login handler above
code = self.request.get('code')
# create our github auth object
scope = 'gist'
github_helper = github.GithubAuth(self.app.config.get('github_server'),
self.app.config.get('github_client_id'), \
self.app.config.get('github_client_secret'),
self.app.config.get('github_redirect_uri'), scope)
# retrieve the access token using the code and auth object
access_token = github_helper.get_access_token(code)
user_data = github_helper.get_user_info(access_token)
logging.info('github user_data: ' + str(user_data))
if self.user:
# user is already logged in so we set a new association with twitter
user_info = self.user_model.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'github', str(user_data['login'])):
social_user = models.SocialUser(
user=user_info.key,
provider='github',
uid=str(user_data['login']),
extra_data=user_data
)
social_user.put()
message = _('Github association added.')
self.add_message(message, 'success')
else:
message = _('This Github account is already in use.')
self.add_message(message, 'error')
self.redirect_to('edit-profile')
else:
# user is not logged in, but is trying to log in via github
social_user = models.SocialUser.get_by_provider_and_uid('github', str(user_data['login']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
self.redirect_to('home')
else:
uid = str(user_data['id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
#end github
# facebook association
elif provider_name == "facebook":
code = self.request.get('code')
callback_url = "%s/social_login/%s/complete" % (self.request.host_url, provider_name)
token = facebook.get_access_token_from_code(code, callback_url, self.app.config.get('fb_api_key'),
self.app.config.get('fb_secret'))
access_token = token['access_token']
fb = facebook.GraphAPI(access_token)
user_data = fb.get_object('me')
logging.info('facebook user_data: ' + str(user_data))
if self.user:
# new association with facebook
user_info = self.user_model.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'facebook', str(user_data['id'])):
social_user = models.SocialUser(
user=user_info.key,
provider='facebook',
uid=str(user_data['id']),
extra_data=user_data
)
social_user.put()
message = _('Facebook association added!')
self.add_message(message, 'success')
else:
message = _('This Facebook account is already in use!')
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with Facebook
social_user = models.SocialUser.get_by_provider_and_uid('facebook',
str(user_data['id']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
uid = str(user_data['id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
# end facebook
# association with linkedin
elif provider_name == "linkedin":
callback_url = "%s/social_login/%s/complete" % (self.request.host_url, provider_name)
authentication = linkedin.LinkedInAuthentication(
self.app.config.get('linkedin_api'),
self.app.config.get('linkedin_secret'),
callback_url,
[linkedin.PERMISSIONS.BASIC_PROFILE, linkedin.PERMISSIONS.EMAIL_ADDRESS])
authentication.authorization_code = self.request.get('code')
access_token = authentication.get_access_token()
link = linkedin.LinkedInApplication(authentication)
u_data = link.get_profile(selectors=['id', 'first-name', 'last-name', 'email-address'])
user_data = {
'first_name': u_data.get('firstName'),
'last_name': u_data.get('lastName'),
'id': u_data.get('id'),
'email': u_data.get('emailAddress')}
self.session['linkedin'] = json.dumps(user_data)
logging.info('linkedin user_data: ' + str(user_data))
if self.user:
# new association with linkedin
user_info = self.user_model.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'linkedin', str(user_data['id'])):
social_user = models.SocialUser(
user=user_info.key,
provider='linkedin',
uid=str(user_data['id']),
extra_data=user_data
)
social_user.put()
message = _('Linkedin association added!')
self.add_message(message, 'success')
else:
message = _('This Linkedin account is already in use!')
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with Linkedin
social_user = models.SocialUser.get_by_provider_and_uid('linkedin',
str(user_data['id']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
uid = str(user_data['id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
#end linkedin
# google, myopenid, yahoo OpenID Providers
elif provider_name in models.SocialUser.open_id_providers():
provider_display_name = models.SocialUser.PROVIDERS_INFO[provider_name]['label']
# get info passed from OpenID Provider
from google.appengine.api import users
current_user = users.get_current_user()
if current_user:
if current_user.federated_identity():
uid = current_user.federated_identity()
else:
uid = current_user.user_id()
email = current_user.email()
else:
message = _('No user authentication information received from %s. '
'Please ensure you are logging in from an authorized OpenID Provider (OP).'
% provider_display_name)
self.add_message(message, 'error')
return self.redirect_to('login', continue_url=continue_url) if continue_url else self.redirect_to(
'login')
if self.user:
# add social account to user
user_info = self.user_model.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, provider_name, uid):
social_user = models.SocialUser(
user=user_info.key,
provider=provider_name,
uid=uid
)
social_user.put()
message = _('%s association successfully added.' % provider_display_name)
self.add_message(message, 'success')
else:
message = _('This %s account is already in use.' % provider_display_name)
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with OpenID Provider
social_user = models.SocialUser.get_by_provider_and_uid(provider_name, uid)
if social_user:
# Social user found. Authenticate the user
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
self.create_account_from_social_provider(provider_name, uid, email, continue_url)
else:
message = _('This authentication method is not yet implemented.')
self.add_message(message, 'warning')
self.redirect_to('login', continue_url=continue_url) if continue_url else self.redirect_to('login')
def create_account_from_social_provider(self, provider_name, uid, email=None, continue_url=None, user_data=None):
"""Social user does not exist yet so create it with the federated identity provided (uid)
and create prerequisite user and log the user account in
"""
provider_display_name = models.SocialUser.PROVIDERS_INFO[provider_name]['label']
if models.SocialUser.check_unique_uid(provider_name, uid):
# create user
# Returns a tuple, where first value is BOOL.
# If True ok, If False no new user is created
# Assume provider has already verified email address
# if email is provided so set activated to True
auth_id = "%s:%s" % (provider_name, uid)
if email:
unique_properties = ['email']
user_info = self.auth.store.user_model.create_user(
auth_id, unique_properties, email=email,
activated=True
)
else:
user_info = self.auth.store.user_model.create_user(
auth_id, activated=True
)
if not user_info[0]: #user is a tuple
message = _('The account %s is already in use.' % provider_display_name)
self.add_message(message, 'error')
return self.redirect_to('register')
user = user_info[1]
# create social user and associate with user
social_user = models.SocialUser(
user=user.key,
provider=provider_name,
uid=uid,
)
if user_data:
social_user.extra_data = user_data
self.session[provider_name] = json.dumps(user_data) # TODO is this needed?
social_user.put()
# authenticate user
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
message = _('Welcome! You have been registered as a new user '
'and logged in through {}.').format(provider_display_name)
self.add_message(message, 'success')
else:
message = _('This %s account is already in use.' % provider_display_name)
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
class DeleteSocialProviderHandler(BaseHandler):
"""
Delete Social association with an account
"""
@user_required
def post(self, provider_name):
if self.user:
user_info = self.user_model.get_by_id(long(self.user_id))
if len(user_info.get_social_providers_info()['used']) > 1 and user_info.password is not None:
social_user = models.SocialUser.get_by_user_and_provider(user_info.key, provider_name)
if social_user:
social_user.key.delete()
message = _('%s successfully disassociated.' % provider_name)
self.add_message(message, 'success')
else:
message = _('Social account on %s not found for this user.' % provider_name)
self.add_message(message, 'error')
else:
message = ('Social account on %s cannot be deleted for user.'
' Please create a username and password to delete social account.' % provider_name)
self.add_message(message, 'error')
self.redirect_to('edit-profile')
class LogoutHandler(BaseHandler):
"""
Destroy user session and redirect to login
"""
def get(self):
if self.user:
message = _("You've signed out successfully. Warning: Please clear all cookies and logout "
"of OpenID providers too if you logged in on a public computer.")
self.add_message(message, 'info')
self.auth.unset_session()
# User is logged out, let's try redirecting to login page
try:
self.redirect(self.auth_config['login_url'])
except (AttributeError, KeyError), e:
logging.error("Error logging out: %s" % e)
message = _("User is logged out, but there was an error on the redirection.")
self.add_message(message, 'error')
return self.redirect_to('home')
class RegisterHandler(BaseHandler):
"""
Handler for Sign Up Users
"""
def get(self):
""" Returns a simple HTML form for create a new user """
if self.user:
self.redirect_to('home')
params = {}
return self.render_template('register.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
username = self.form.username.data.lower()
name = self.form.name.data.strip()
last_name = self.form.last_name.data.strip()
email = self.form.email.data.lower()
password = self.form.password.data.strip()
country = self.form.country.data
tz = self.form.tz.data
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
# Passing password_raw=password so password will be hashed
# Returns a tuple, where first value is BOOL.
# If True ok, If False no new user is created
unique_properties = ['username', 'email']
auth_id = "own:%s" % username
user = self.auth.store.user_model.create_user(
auth_id, unique_properties, password_raw=password,
username=username, name=name, last_name=last_name, email=email,
ip=self.request.remote_addr, country=country, tz=tz
)
if not user[0]: #user is a tuple
if "username" in str(user[1]):
message = _(
'Sorry, The username <strong>{}</strong> is already registered.').format(username)
elif "email" in str(user[1]):
message = _('Sorry, The email <strong>{}</strong> is already registered.').format(email)
else:
message = _('Sorry, The user is already registered.')
self.add_message(message, 'error')
return self.redirect_to('register')
else:
# User registered successfully
# But if the user registered using the form, the user has to check their email to activate the account ???
try:
if not user[1].activated:
# send email
subject = _("%s Account Verification" % self.app.config.get('app_name'))
confirmation_url = self.uri_for("account-activation",
user_id=user[1].get_id(),
token=self.user_model.create_auth_token(user[1].get_id()),
_full=True)
logging.info("*** confirmation_url is %s" % confirmation_url)
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"username": username,
"confirmation_url": confirmation_url,
"support_url": self.uri_for("contact", _full=True)
}
body_path = "emails/account_activation.txt"
body = self.jinja2.render_template(body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': str(email),
'subject': subject,
'body': body,
})
message = _('You were successfully registered. '
'Please check your email to activate your account.')
self.add_message(message, 'success')
return self.redirect_to('home')
# If the user didn't register using registration form ???
db_user = self.auth.get_user_by_password(user[1].auth_ids[0], password)
# Check Twitter association in session
twitter_helper = twitter.TwitterAuth(self)
twitter_association_data = twitter_helper.get_association_data()
if twitter_association_data is not None:
if models.SocialUser.check_unique(user[1].key, 'twitter', str(twitter_association_data['id'])):
social_user = models.SocialUser(
user=user[1].key,
provider='twitter',
uid=str(twitter_association_data['id']),
extra_data=twitter_association_data
)
social_user.put()
#check Facebook association
fb_data = json.loads(self.session['facebook'])
if fb_data is not None:
if models.SocialUser.check_unique(user.key, 'facebook', str(fb_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='facebook',
uid=str(fb_data['id']),
extra_data=fb_data
)
social_user.put()
#check LinkedIn association
li_data = json.loads(self.session['linkedin'])
if li_data is not None:
if models.SocialUser.check_unique(user.key, 'linkedin', str(li_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='linkedin',
uid=str(li_data['id']),
extra_data=li_data
)
social_user.put()
message = _('Welcome <strong>{}</strong>, you are now logged in.').format(username)
self.add_message(message, 'success')
return self.redirect_to('home')
except (AttributeError, KeyError), e:
logging.error('Unexpected error creating the user %s: %s' % (username, e ))
message = _('Unexpected error creating the user %s' % username)
self.add_message(message, 'error')
return self.redirect_to('home')
@webapp2.cached_property
def form(self):
f = forms.RegisterForm(self)
f.country.choices = self.countries_tuple
f.tz.choices = self.tz
return f
class AccountActivationHandler(BaseHandler):
"""
Handler for account activation
"""
def get(self, user_id, token):
try:
if not self.user_model.validate_auth_token(user_id, token):
message = _('The link is invalid.')
self.add_message(message, 'error')
return self.redirect_to('home')
user = self.user_model.get_by_id(long(user_id))
# activate the user's account
user.activated = True
user.put()
# Login User
self.auth.get_user_by_token(int(user_id), token)
# Delete token
self.user_model.delete_auth_token(user_id, token)
message = _('Congratulations, Your account <strong>{}</strong> has been successfully activated.').format(
user.username)
self.add_message(message, 'success')
self.redirect_to('home')
except (AttributeError, KeyError, InvalidAuthIdError, NameError), e:
logging.error("Error activating an account: %s" % e)
message = _('Sorry, Some error occurred.')
self.add_message(message, 'error')
return self.redirect_to('home')
class ResendActivationEmailHandler(BaseHandler):
"""
Handler to resend activation email
"""
def get(self, user_id, token):
try:
if not self.user_model.validate_resend_token(user_id, token):
message = _('The link is invalid.')
self.add_message(message, 'error')
return self.redirect_to('home')
user = self.user_model.get_by_id(long(user_id))
email = user.email
if (user.activated == False):
# send email
subject = _("%s Account Verification" % self.app.config.get('app_name'))
confirmation_url = self.uri_for("account-activation",
user_id=user.get_id(),
token=self.user_model.create_auth_token(user.get_id()),
_full=True)
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"username": user.username,
"confirmation_url": confirmation_url,
"support_url": self.uri_for("contact", _full=True)
}
body_path = "emails/account_activation.txt"
body = self.jinja2.render_template(body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': str(email),
'subject': subject,
'body': body,
})
self.user_model.delete_resend_token(user_id, token)
message = _('The verification email has been resent to %s. '
'Please check your email to activate your account.' % email)
self.add_message(message, 'success')
return self.redirect_to('home')
else:
message = _('Your account has been activated. Please <a href="/login/">sign in</a> to your account.')
self.add_message(message, 'warning')
return self.redirect_to('home')
except (KeyError, AttributeError), e:
logging.error("Error resending activation email: %s" % e)
message = _('Sorry, Some error occurred.')
self.add_message(message, 'error')
return self.redirect_to('home')
class EditProfileHandler(BaseHandler):
"""
Handler for Edit User Profile
"""
@user_required
def get(self):
""" Returns a simple HTML form for edit profile """
params = {}
if self.user:
user_info = self.user_model.get_by_id(long(self.user_id))
self.form.username.data = user_info.username
self.form.name.data = user_info.name
self.form.last_name.data = user_info.last_name
self.form.country.data = user_info.country
self.form.tz.data = user_info.tz
providers_info = user_info.get_social_providers_info()
if not user_info.password:
params['local_account'] = False
else:
params['local_account'] = True
params['used_providers'] = providers_info['used']
params['unused_providers'] = providers_info['unused']
params['country'] = user_info.country
params['tz'] = user_info.tz
return self.render_template('edit_profile.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
username = self.form.username.data.lower()
name = self.form.name.data.strip()
last_name = self.form.last_name.data.strip()
country = self.form.country.data
tz = self.form.tz.data
try:
user_info = self.user_model.get_by_id(long(self.user_id))
try:
message = ''
# update username if it has changed and it isn't already taken
if username != user_info.username:
user_info.unique_properties = ['username', 'email']
uniques = [
'User.username:%s' % username,
'User.auth_id:own:%s' % username,
]
# Create the unique username and auth_id.
success, existing = Unique.create_multi(uniques)
if success:
# free old uniques
Unique.delete_multi(
['User.username:%s' % user_info.username, 'User.auth_id:own:%s' % user_info.username])
# The unique values were created, so we can save the user.
user_info.username = username
user_info.auth_ids[0] = 'own:%s' % username
message += _('Your new username is <strong>{}</strong>').format(username)
else:
message += _(
'The username <strong>{}</strong> is already taken. Please choose another.').format(
username)
# At least one of the values is not unique.
self.add_message(message, 'error')
return self.get()
user_info.name = name
user_info.last_name = last_name
user_info.country = country
user_info.tz = tz
user_info.put()
message += " " + _('Thanks, your settings have been saved.')
self.add_message(message, 'success')
return self.get()
except (AttributeError, KeyError, ValueError), e:
logging.error('Error updating profile: ' + e)
message = _('Unable to update profile. Please try again later.')
self.add_message(message, 'error')
return self.get()
except (AttributeError, TypeError), e:
login_error_message = _('Your session has expired.')
self.add_message(login_error_message, 'error')
self.redirect_to('login')
@webapp2.cached_property
def form(self):
f = forms.EditProfileForm(self)
f.country.choices = self.countries_tuple
f.tz.choices = self.tz
return f
class EditPasswordHandler(BaseHandler):
"""
Handler for Edit User Password
"""
@user_required
def get(self):
""" Returns a simple HTML form for editing password """
params = {}
return self.render_template('edit_password.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
current_password = self.form.current_password.data.strip()
password = self.form.password.data.strip()
try:
user_info = self.user_model.get_by_id(long(self.user_id))
auth_id = "own:%s" % user_info.username
# Password to SHA512
current_password = utils.hashing(current_password, self.app.config.get('salt'))
try:
user = self.user_model.get_by_auth_password(auth_id, current_password)
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
user.password = security.generate_password_hash(password, length=12)
user.put()
# send email
subject = self.app.config.get('app_name') + " Account Password Changed"
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"first_name": user.name,
"username": user.username,
"email": user.email,
"reset_password_url": self.uri_for("password-reset", _full=True)
}
email_body_path = "emails/password_changed.txt"
email_body = self.jinja2.render_template(email_body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': user.email,
'subject': subject,
'body': email_body,
'sender': self.app.config.get('contact_sender'),
})
#Login User
self.auth.get_user_by_password(user.auth_ids[0], password)
self.add_message(_('Password changed successfully.'), 'success')
return self.redirect_to('edit-profile')
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in
# the BaseHandler.dispatcher
message = _("Incorrect password! Please enter your current password to change your account settings.")
self.add_message(message, 'error')
return self.redirect_to('edit-password')
except (AttributeError, TypeError), e:
login_error_message = _('Your session has expired.')
self.add_message(login_error_message, 'error')
self.redirect_to('login')
@webapp2.cached_property
def form(self):
return forms.EditPasswordForm(self)
class EditEmailHandler(BaseHandler):
"""
Handler for Edit User's Email
"""
@user_required
def get(self):
""" Returns a simple HTML form for edit email """
params = {}
if self.user:
user_info = self.user_model.get_by_id(long(self.user_id))
params['current_email'] = user_info.email
return self.render_template('edit_email.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
new_email = self.form.new_email.data.strip()
password = self.form.password.data.strip()
try:
user_info = self.user_model.get_by_id(long(self.user_id))
auth_id = "own:%s" % user_info.username
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
try:
# authenticate user by its password
user = self.user_model.get_by_auth_password(auth_id, password)
# if the user change his/her email address
if new_email != user.email:
# check whether the new email has been used by another user
aUser = self.user_model.get_by_email(new_email)
if aUser is not None:
message = _("The email %s is already registered." % new_email)
self.add_message(message, 'error')
return self.redirect_to("edit-email")
# send email
subject = _("%s Email Changed Notification" % self.app.config.get('app_name'))
user_token = self.user_model.create_auth_token(self.user_id)
confirmation_url = self.uri_for("email-changed-check",
user_id=user_info.get_id(),
encoded_email=utils.encode(new_email),
token=user_token,
_full=True)
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"first_name": user.name,
"username": user.username,
"new_email": new_email,
"confirmation_url": confirmation_url,
"support_url": self.uri_for("contact", _full=True)
}
old_body_path = "emails/email_changed_notification_old.txt"
old_body = self.jinja2.render_template(old_body_path, **template_val)
new_body_path = "emails/email_changed_notification_new.txt"
new_body = self.jinja2.render_template(new_body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': user.email,
'subject': subject,
'body': old_body,
})
taskqueue.add(url=email_url, params={
'to': new_email,
'subject': subject,
'body': new_body,
})
# display successful message
msg = _(
"Please check your new email for confirmation. Your email will be updated after confirmation.")
self.add_message(msg, 'success')
return self.redirect_to('edit-profile')
else:
self.add_message(_("You didn't change your email."), "warning")
return self.redirect_to("edit-email")
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in
# the BaseHandler.dispatcher
message = _("Incorrect password! Please enter your current password to change your account settings.")
self.add_message(message, 'error')
return self.redirect_to('edit-email')
except (AttributeError, TypeError), e:
login_error_message = _('Your session has expired.')
self.add_message(login_error_message, 'error')
self.redirect_to('login')
@webapp2.cached_property
def form(self):
return forms.EditEmailForm(self)
class PasswordResetHandler(BaseHandler):
"""
Password Reset Handler with Captcha
"""
def get(self):
chtml = captcha.displayhtml(
public_key=self.app.config.get('captcha_public_key'),
use_ssl=(self.request.scheme == 'https'),
error=None)
if self.app.config.get('captcha_public_key') == "PUT_YOUR_RECAPCHA_PUBLIC_KEY_HERE" or \
self.app.config.get('captcha_private_key') == "PUT_YOUR_RECAPCHA_PUBLIC_KEY_HERE":
chtml = '<div class="alert alert-error"><strong>Error</strong>: You have to ' \
'<a href="http://www.google.com/recaptcha/whyrecaptcha" target="_blank">sign up ' \
'for API keys</a> in order to use reCAPTCHA.</div>' \
'<input type="hidden" name="recaptcha_challenge_field" value="manual_challenge" />' \
'<input type="hidden" name="recaptcha_response_field" value="manual_challenge" />'
params = {
'captchahtml': chtml,
}
return self.render_template('password_reset.html', **params)
def post(self):
# check captcha
challenge = self.request.POST.get('recaptcha_challenge_field')
response = self.request.POST.get('recaptcha_response_field')
remote_ip = self.request.remote_addr
cResponse = captcha.submit(
challenge,
response,
self.app.config.get('captcha_private_key'),
remote_ip)
if cResponse.is_valid:
# captcha was valid... carry on..nothing to see here
pass
else:
_message = _('Wrong image verification code. Please try again.')
self.add_message(_message, 'error')
return self.redirect_to('password-reset')
#check if we got an email or username
email_or_username = str(self.request.POST.get('email_or_username')).lower().strip()
if utils.is_email_valid(email_or_username):
user = self.user_model.get_by_email(email_or_username)
_message = _("If the email address you entered") + " (<strong>%s</strong>) " % email_or_username
else:
auth_id = "own:%s" % email_or_username
user = self.user_model.get_by_auth_id(auth_id)
_message = _("If the username you entered") + " (<strong>%s</strong>) " % email_or_username
_message = _message + _("is associated with an account in our records, you will receive "
"an email from us with instructions for resetting your password. "
"<br>If you don't receive instructions within a minute or two, "
"check your email's spam and junk filters, or ") + \
'<a href="' + self.uri_for('contact') + '">' + _('contact us') + '</a> ' + _(
"for further assistance.")
if user is not None:
user_id = user.get_id()
token = self.user_model.create_auth_token(user_id)
email_url = self.uri_for('taskqueue-send-email')
reset_url = self.uri_for('password-reset-check', user_id=user_id, token=token, _full=True)
subject = _("%s Password Assistance" % self.app.config.get('app_name'))
# load email's template
template_val = {
"username": user.username,
"email": user.email,
"reset_password_url": reset_url,
"support_url": self.uri_for("contact", _full=True),
"app_name": self.app.config.get('app_name'),
}
body_path = "emails/reset_password.txt"
body = self.jinja2.render_template(body_path, **template_val)
taskqueue.add(url=email_url, params={
'to': user.email,
'subject': subject,
'body': body,
'sender': self.app.config.get('contact_sender'),
})
self.add_message(_message, 'warning')
return self.redirect_to('login')
class PasswordResetCompleteHandler(BaseHandler):
"""
Handler to process the link of reset password that received the user
"""
def get(self, user_id, token):
verify = self.user_model.get_by_auth_token(int(user_id), token)
params = {}
if verify[0] is None:
message = _('The URL you tried to use is either incorrect or no longer valid. '
'Enter your details again below to get a new one.')
self.add_message(message, 'warning')
return self.redirect_to('password-reset')
else:
return self.render_template('password_reset_complete.html', **params)
def post(self, user_id, token):
verify = self.user_model.get_by_auth_token(int(user_id), token)
user = verify[0]
password = self.form.password.data.strip()
if user and self.form.validate():
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
user.password = security.generate_password_hash(password, length=12)
user.put()
# Delete token
self.user_model.delete_auth_token(int(user_id), token)
# Login User
self.auth.get_user_by_password(user.auth_ids[0], password)
self.add_message(_('Password changed successfully.'), 'success')
return self.redirect_to('home')
else:
self.add_message(_('The two passwords must match.'), 'error')
return self.redirect_to('password-reset-check', user_id=user_id, token=token)
@webapp2.cached_property
def form(self):
return forms.PasswordResetCompleteForm(self)
class EmailChangedCompleteHandler(BaseHandler):
"""
Handler for completed email change
Will be called when the user click confirmation link from email
"""
def get(self, user_id, encoded_email, token):
verify = self.user_model.get_by_auth_token(int(user_id), token)
email = utils.decode(encoded_email)
if verify[0] is None:
message = _('The URL you tried to use is either incorrect or no longer valid.')
self.add_message(message, 'warning')
self.redirect_to('home')
else:
# save new email
user = verify[0]
user.email = email
user.put()
# delete token
self.user_model.delete_auth_token(int(user_id), token)
# add successful message and redirect
message = _('Your email has been successfully updated.')
self.add_message(message, 'success')
self.redirect_to('edit-profile')
class HomeRequestHandler(RegisterBaseHandler):
"""
Handler to show the home page
"""
def get(self):
""" Returns a simple HTML form for home """
params = {}
return self.render_template('home.html', **params)
class RobotsHandler(BaseHandler):
def get(self):
params = {
'scheme': self.request.scheme,
'host': self.request.host,
}
self.response.headers['Content-Type'] = 'text/plain'
def set_variables(text, key):
return text.replace("{{ %s }}" % key, params[key])
self.response.write(reduce(set_variables, params, open("bp_content/themes/%s/templates/seo/robots.txt" % self.get_theme).read()))
class HumansHandler(BaseHandler):
def get(self):
params = {
'scheme': self.request.scheme,
'host': self.request.host,
}
self.response.headers['Content-Type'] = 'text/plain'
def set_variables(text, key):
return text.replace("{{ %s }}" % key, params[key])
self.response.write(reduce(set_variables, params, open("bp_content/themes/%s/templates/seo/humans.txt" % self.get_theme).read()))
class SitemapHandler(BaseHandler):
def get(self):
params = {
'scheme': self.request.scheme,
'host': self.request.host,
}
self.response.headers['Content-Type'] = 'application/xml'
def set_variables(text, key):
return text.replace("{{ %s }}" % key, params[key])
self.response.write(reduce(set_variables, params, open("bp_content/themes/%s/templates/seo/sitemap.xml" % self.get_theme).read()))
class CrossDomainHandler(BaseHandler):
def get(self):
params = {
'scheme': self.request.scheme,
'host': self.request.host,
}
self.response.headers['Content-Type'] = 'application/xml'
def set_variables(text, key):
return text.replace("{{ %s }}" % key, params[key])
self.response.write(reduce(set_variables, params, open("bp_content/themes/%s/templates/seo/crossdomain.xml" % self.get_theme).read()))
| mit | -1,630,761,658,250,618,400 | 42.562797 | 142 | 0.522597 | false |
linsalrob/PhageHosts | code/codon_distance.py | 1 | 2853 | '''
Calculate the distance between two codon usages.
We have two files, the first with just the phages and the second
with their hosts. Then we need to calculate which of the hosts is
closest
'''
import os
import sys
sys.path.append('/home3/redwards/bioinformatics/Modules')
import numpy as np
import scipy
remove_ambiguous = True # do we want ambiguous bases or not
codons = set([
'AAA', 'AAC', 'AAG', 'AAT', 'ACA', 'ACC', 'ACG', 'ACT',
'AGA', 'AGC', 'AGG', 'AGT', 'ATA', 'ATC', 'ATG', 'ATT',
'CAA', 'CAC', 'CAG', 'CAT', 'CCA', 'CCC', 'CCG', 'CCT',
'CGA', 'CGC', 'CGG', 'CGT', 'CTA', 'CTC', 'CTG', 'CTT',
'GAA', 'GAC', 'GAG', 'GAT', 'GCA', 'GCC', 'GCG', 'GCT',
'GGA', 'GGC', 'GGG', 'GGT', 'GTA', 'GTC', 'GTG', 'GTT',
'TAA', 'TAC', 'TAG', 'TAT', 'TCA', 'TCC', 'TCG', 'TCT',
'TGA', 'TGC', 'TGG', 'TGT', 'TTA', 'TTC', 'TTG', 'TTT'
])
def distance(x, y):
'''
Calculate the Euclidean distance between codon usages. An alternate
solution would be to use either np.linalg.norm or
scipy.spatial but neither of these are working on my system'''
return np.sqrt(np.sum((x-y)**2))
def remove_ambiguous_bases(header, cds):
'''
Remove any codons that contain ambiguous bases.
'''
temp=[cds[0]]
for i in range(1,len(header)):
if header[i] in codons:
temp.append(cds[i])
return temp
try:
phageF = sys.argv[1]
bactF = sys.argv[2]
except:
sys.exit(sys.argv[0] + " <phage file> <hosts file>\n")
cds = {}
header = None
with open(bactF, 'r') as bf:
for line in bf:
if line.startswith('Locus'):
header = line.strip().split("\t")
for i in range(len(header)):
header[i] = header[i].strip()
continue
line = line.rstrip()
p = line.split("\t")
if remove_ambiguous:
p = remove_ambiguous_bases(header, p)
cds[p[0]] = np.array([float(x) for x in p[1:len(p)]])
header = None
with open(phageF, 'r') as ph:
for line in ph:
if line.startswith('Locus'):
header = line.strip().split("\t")
for i in range(len(header)):
header[i] = header[i].strip()
continue
line = line.rstrip()
p = line.split("\t")
lowestScore = 1000
bestHits = []
if remove_ambiguous:
p = remove_ambiguous_bases(header, p)
a1 = np.array([float(x) for x in p[1:len(p)]])
for c in cds:
#dist = scipy.spatial.distance.cdist(a1, cds[c])
#dist = np.linalg.norm(a1-cds[c])
dist = distance(a1, cds[c])
if dist < lowestScore:
lowestScore = dist
bestHits = [c]
elif dist == lowestScore:
bestHits.append(c)
print p[0]+ "\t" + "\t".join(bestHits)
| mit | -4,651,376,398,173,520,000 | 29.351064 | 72 | 0.539783 | false |
MTgeophysics/mtpy | tests/analysis/test_pt.py | 1 | 7128 | # -*- coding: utf-8 -*-
"""
TEST mtpy.core.mt.MT
@author: YG
"""
from unittest import TestCase
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 13:19:35 2017
@author: u64125
[email protected]
"""
import os
import numpy as np
from mtpy.core.mt import MT
from tests import TEST_MTPY_ROOT
import mtpy.analysis.geometry as mtg
class Test_PT(TestCase):
def test_pt(self):
self.mtobj = MT(os.path.normpath(os.path.join(TEST_MTPY_ROOT, "examples/data/edi_files/pb42c.edi")))
self.pt_expected = np.array([[[ 1.30644963e+00, -2.67740187e-02],
[ -1.33702443e-02, 1.28968939e+00]],
[[ 1.21678059e+00, -1.07765729e-02],
[ -8.20007589e-03, 1.23374034e+00]],
[[ 1.17164177e+00, 1.09018782e-03],
[ -6.68510048e-03, 1.18271654e+00]],
[[ 1.22540541e+00, 4.38999476e-03],
[ -4.20009647e-03, 1.24116127e+00]],
[[ 1.22262143e+00, -1.27947436e-02],
[ -4.73195876e-03, 1.25493677e+00]],
[[ 1.21501297e+00, -8.79427102e-03],
[ 1.03830156e-02, 1.22427493e+00]],
[[ 1.22785045e+00, 1.39792917e-02],
[ -7.08673035e-03, 1.23846962e+00]],
[[ 1.26661703e+00, -1.11292454e-02],
[ 1.82801360e-03, 1.26240177e+00]],
[[ 1.18539706e+00, 6.39442474e-03],
[ -1.01453767e-02, 1.25514910e+00]],
[[ 1.28549981e+00, -1.00606766e-01],
[ 3.97760695e-02, 1.32053655e+00]],
[[ 1.22555721e+00, -6.29531701e-02],
[ 3.36638894e-02, 1.24514491e+00]],
[[ 1.15217304e+00, 2.47597860e-02],
[ -4.69132792e-02, 1.28928907e+00]],
[[ 1.07175797e+00, -3.58092355e-03],
[ -3.12450311e-02, 1.19733081e+00]],
[[ 1.00918431e+00, -1.48723334e-02],
[ -1.04135860e-03, 1.06274597e+00]],
[[ 9.15517149e-01, -7.13677311e-03],
[ 4.49100302e-03, 9.67281170e-01]],
[[ 7.82696110e-01, 1.70157289e-02],
[ 1.87039067e-02, 8.29411722e-01]],
[[ 7.05442477e-01, 3.78377052e-02],
[ 2.11076586e-02, 7.39844699e-01]],
[[ 6.35185233e-01, 4.73463102e-02],
[ 3.31681155e-02, 6.45232848e-01]],
[[ 5.55546920e-01, 6.54610202e-02],
[ 6.89078895e-02, 5.23858436e-01]],
[[ 5.33096567e-01, 7.08103577e-02],
[ 6.49382268e-02, 4.46884668e-01]],
[[ 5.27354094e-01, 8.09968253e-02],
[ 1.96849609e-02, 3.71188472e-01]],
[[ 5.11384716e-01, 8.77380469e-02],
[ 1.36652476e-02, 2.64391007e-01]],
[[ 5.07676485e-01, 8.88590722e-02],
[ -2.89224644e-03, 2.26830209e-01]],
[[ 5.32226186e-01, 7.99515723e-02],
[ -8.08381040e-03, 1.72606458e-01]],
[[ 5.88599443e-01, 7.82062018e-02],
[ -8.45485953e-03, 1.64746123e-01]],
[[ 6.08649155e-01, 8.25165235e-02],
[ -2.18321304e-02, 1.89799568e-01]],
[[ 6.72877101e-01, 7.17000488e-02],
[ -8.23242896e-02, 2.38847621e-01]],
[[ 7.83704974e-01, 9.35718439e-02],
[ -1.08804893e-01, 2.69048188e-01]],
[[ 8.10341816e-01, 9.92141045e-02],
[ -1.26495824e-01, 2.81539705e-01]],
[[ 9.44396211e-01, 9.79869018e-02],
[ -1.86664281e-01, 3.53878350e-01]],
[[ 1.20372744e+00, 1.43106117e-01],
[ -1.82486049e-01, 4.45265471e-01]],
[[ 1.16782854e+00, 1.13799885e-01],
[ -1.75825646e-01, 4.46497807e-01]],
[[ 1.34754960e+00, 7.86821351e-02],
[ -1.52050649e-01, 5.27637774e-01]],
[[ 1.54766037e+00, 1.07732214e-01],
[ -1.24203091e-01, 6.35758473e-01]],
[[ 1.57964820e+00, 7.39413746e-02],
[ -1.02148722e-01, 6.66546887e-01]],
[[ 1.62101014e+00, 9.00546725e-02],
[ -5.05253680e-02, 7.14423033e-01]],
[[ 1.68957924e+00, 3.97165705e-02],
[ 4.57251401e-02, 7.76737215e-01]],
[[ 1.66003469e+00, 3.22243697e-02],
[ 9.00225059e-02, 8.14143062e-01]],
[[ 1.62779118e+00, 3.26316490e-03],
[ 1.68213765e-01, 7.85939990e-01]],
[[ 1.51783857e+00, -1.45050231e-02],
[ 2.23460898e-01, 7.96441583e-01]],
[[ 1.41377974e+00, -3.64217144e-02],
[ 2.56732302e-01, 8.12803360e-01]],
[[ 1.32448223e+00, -9.04193565e-02],
[ 2.46858147e-01, 8.54516882e-01]],
[[ 1.22981959e+00, -1.86648528e-01],
[ 3.20105326e-01, 8.15014902e-01]]])
assert(np.all(np.abs((self.pt_expected - self.mtobj.pt.pt)/self.pt_expected) < 1e-6))
alpha_expected = np.array([-33.66972565, -65.89384737, -76.59867325, 89.65473659,
-75.76307747, 85.13326608, 73.50684783, -32.810132 ,
-88.46092736, -59.97035554, -61.88664666, -85.4110878 ,
-82.24967714, -81.72640079, -88.53701804, 71.29889577,
60.1345369 , 48.55666153, 38.3651419 , 28.79048968,
16.40517236, 11.16030354, 8.50965433, 5.65066256,
4.67255493, 4.12192474, -0.70110747, -0.84768598,
-1.47667976, -4.27011302, -1.48608617, -2.45732916,
-2.55670157, -0.51738522, -0.88470366, 1.24832387,
2.67364329, 4.11167901, 5.75654718, 8.07694833,
10.06615916, 9.20560479, 8.91737594])
beta_expected = np.array([-0.14790673, -0.03012061, 0.09460956, 0.09976904, -0.09322928,
-0.22522043, 0.24468941, -0.14677427, 0.19414636, -1.54172397,
-1.11970814, 0.84076362, 0.3492499 , -0.19123344, -0.17692124,
-0.02999968, 0.33160131, 0.31720792, -0.09148111, 0.17165854,
1.95175741, 2.72709705, 3.56012648, 3.55975888, 3.28108606,
3.72287137, 4.79442926, 5.44077452, 5.8397381 , 6.18330647,
5.58466467, 5.08560032, 3.50735531, 3.03177428, 2.24126272,
1.7223648 , -0.06979335, -0.66910857, -1.95471268, -2.93540374,
-3.75023764, -4.39936596, -6.95935213])
azimuth_expected = alpha_expected-beta_expected
assert(np.all(np.abs((alpha_expected - self.mtobj.pt.alpha)/alpha_expected) < 1e-6))
assert(np.all(np.abs((beta_expected - self.mtobj.pt.beta)/beta_expected) < 1e-6))
assert(np.all(np.abs((azimuth_expected - self.mtobj.pt.azimuth)/azimuth_expected) < 1e-6))
# pi1 = 0.5*((self.pt_expected[:,0,0] - self.pt_expected[:,1,1])**2 +\
# (self.pt_expected[:,0,1] + self.pt_expected[:,1,0])**2)**0.5
# pi2 = 0.5*((self.pt_expected[:,0,0] + self.pt_expected[:,1,1])**2 +\
# (self.pt_expected[:,0,1] - self.pt_expected[:,1,0])**2)**0.5
# phimin_expected = np.degrees(pi2 - pi1)
# phimax_expected = np.degrees(pi2 + pi1)
# assert(np.all(np.abs(phimin_expected - self.mtobj.pt.phimin)/phimin_expected) < 1e-6)
# assert(np.all(np.abs(phimax_expected - self.mtobj.pt.phimax)/phimax_expected) < 1e-6) | gpl-3.0 | 4,011,371,443,513,376,300 | 34.824121 | 108 | 0.542789 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.