repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
moullos/UnlimitID | UnlimitID/User/views.py | 1 | 3180 | import os
import requests
from flask import redirect, url_for, render_template, request, flash
from forms import RegisterForm, CredentialForm
from petlib.pack import encode, decode
from .cred_user import CredentialUser
def setUpViews(app, crypto_dir, credential_url=None, info_url=None, params=None, ipub=None):
cs = CredentialUser(os.path.join(
app.instance_path, 'User', crypto_dir), info_url, params, ipub)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/get_credential', methods=['GET', 'POST'])
def get_credential():
""" The page for the user to obtain credentials.
The encrypted private attribute is given to the server
along with the public attributes
"""
all_keys = ['name', 'given_name', 'family_name', 'email',
'gender', 'zoneinfo', 'birthdate']
form = CredentialForm(request.form)
form.keys.choices = zip(all_keys, all_keys)
if request.method == 'POST' and form.validate():
email = form.email.data
password = form.password.data
keys = form.keys.data
if keys == []:
form.keys.errors.append(
'A credential needs to contain at least 1 key')
return render_template('credential.html', form=form)
try:
user_token = cs.get_user_token()
r = requests.post(
credential_url,
data=encode((email, password, keys, user_token))
)
cred_token = decode(r.content)
except Exception:
flash('Could not get credential')
return redirect(url_for('home'))
cs.issue_verify(cred_token, user_token)
flash('Got a credential for you')
return redirect(url_for('home'))
return render_template('credential.html', form=form)
@app.route('/show', methods=['GET', 'POST'])
def show():
creds = cs.list_credential_tokens()
if creds == []:
flash('Could not load credential. Do you have one?')
return render_template('home.html')
form = RegisterForm(request.form)
form.credential.choices = creds
form.credential.default = creds[0][0]
if request.method == 'POST' and form.validate():
service_name = form.service_name.data
credential_id = form.credential.data
show_proof = cs.show(service_name, credential_id)
file_dir = os.path.join(
app.instance_path, 'User', 'Show')
filename = 'show_{}'.format(service_name)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
with open(os.path.join(file_dir, filename), 'wb+') as f:
f.write(encode(show_proof))
flash("Created show for {} at {}".format(service_name, filename))
return redirect(url_for('home'))
return render_template('show.html', form=form)
return app, cs
| bsd-2-clause | -6,402,558,580,113,435,000 | 39.253165 | 92 | 0.571698 | false |
painless-software/painless-continuous-delivery | tests/acceptance/environment.py | 1 | 2100 | """
Painless environment setup for acceptance tests. Powered by behave.
Visit the docs at
https://behave.readthedocs.io/en/latest/tutorial.html#environmental-controls
"""
from contextlib import contextmanager
from os import chdir, getcwd, system
from os.path import dirname, join
from shutil import rmtree
from tempfile import mkdtemp
def before_all(context):
"""
Before the first test starts, find out and create directory paths we want
to use.
"""
@contextmanager
def safe_chdir(path):
"""Restore the original directory when leaving the with-clause"""
old_path = getcwd()
chdir(path)
try:
yield
finally:
chdir(old_path)
def set_logfilename(name):
"""Set the logfile context value using for logging system calls"""
context.logfile = join(context.temp_dir, name + '.log')
def log_run(command):
"""Run system commands, log their output, return the exit status"""
context.exit_code = system('{command} > {logfile} 2>&1'.format(
command=command,
logfile=context.logfile,
))
with open(context.logfile) as logfile:
context.log = logfile.read()
return context.exit_code
def explain_log(message):
"""Helper function for assertions"""
return '{message}\n' \
'----------------- (log follows)\n' \
'{log}'.format(message=message, log=context.log)
context.safe_chdir = safe_chdir
context.set_logfilename = set_logfilename
context.log_run = log_run
context.explain_log = explain_log
context.project_dir = dirname(dirname(dirname(__file__)))
context.temp_dir = mkdtemp(prefix='painless-acceptance-tests-')
def before_scenario(context, scenario):
pass
def after_scenario(context, scenario):
"""
Clean up cookiecutter data after each scenario.
"""
if context.generated_dir:
rmtree(context.generated_dir)
def after_all(context):
"""
After all tests, do cleanup work.
"""
rmtree(context.temp_dir)
| apache-2.0 | 1,341,203,438,349,643,800 | 27.767123 | 77 | 0.637143 | false |
iotile/coretools | transport_plugins/jlink/iotile_transport_jlink/structures.py | 1 | 4490 | """Helper routines for processing data received from an IOTile device."""
import struct
from iotile.core.exceptions import HardwareError
class ControlStructure:
"""A shared memory control structure allowing bidirectional communication with an IOTile device."""
# The control structure in RAM is 16 bytes long
# 4 bytes of 0xAA
# The ASCII characters IOTileCN
# 4 bytes of 0xBB
CONTROL_MAGIC_1 = 0xAAAAAAAA
CONTROL_MAGIC_2 = 0x69544f49
CONTROL_MAGIC_3 = 0x4e43656c
CONTROL_MAGIC_4 = 0xBBBBBBBB
KNOWN_VERSIONS = frozenset([1, 2])
# The offset from the start of our debug info to where the tb_fabric_tls_t structure is located
RPC_TLS_OFFSET = 28
QUEUE_OFFSET = 68
FRAME_SIZE = 21
def __init__(self, address, raw_data):
self.base_address = address
magic1, magic2, magic3, magic4, version, flags, length = struct.unpack_from("<LLLLBBH", raw_data)
if magic1 != self.CONTROL_MAGIC_1 or magic2 != self.CONTROL_MAGIC_2 or magic3 != self.CONTROL_MAGIC_3 or magic4 != self.CONTROL_MAGIC_4:
raise HardwareError("Invalid control structure with an incorrect magic number", base_address=address)
self.version = version
self.flags = flags
if len(raw_data) < length:
raise HardwareError("Control structure raw data is too short for encoded length", encoded_length=length, received_length=len(raw_data))
elif len(raw_data) > length:
raw_data = raw_data[:length]
if version not in self.KNOWN_VERSIONS:
raise HardwareError("Unknown version embedded in control structure", version=version, known_versions=self.KNOWN_VERSIONS)
self._parse_control_structure(raw_data)
def _parse_control_structure(self, data):
# Skip the header information
data = data[20:]
self.uuid, = struct.unpack_from("<L", data)
def poll_info(self):
"""Return the address and mask to determine if an RPC is finished."""
return self.base_address + self.RPC_TLS_OFFSET + 11, (1 << 2) | (1 << 3)
def state_info(self):
"""Return the address of state flags."""
return self.base_address + 24
def counter_info(self):
"""Return the address of watch counter that should be incremented by jlink adapter"""
return self.state_info() + 3
def queue_info(self):
"""Return adress of read_index, write_index and size of the circular tracing/streaming queue"""
return self.base_address + self.QUEUE_OFFSET, \
self.base_address + self.QUEUE_OFFSET + 1, \
self.base_address + self.QUEUE_OFFSET + 2
def queue_element_info(self, element_index):
"""Return start address of queue element header and data"""
queue_info_offset = self.base_address + self.QUEUE_OFFSET + 4
header_address = queue_info_offset + element_index * self.FRAME_SIZE
return header_address, header_address + 1
def response_info(self):
"""Return the address and read size of the RPC resonse storage area."""
return self.base_address + self.RPC_TLS_OFFSET + 8, 32
def format_rpc(self, address, rpc_id, payload):
"""Create a formated word list that encodes this rpc."""
addr_word = (rpc_id | (address << 16) | ((1 << 1) << 24))
send_length = len(payload)
if len(payload) < 20:
payload = payload + b'\0'*(20 - len(payload))
payload_words = struct.unpack("<5L", payload)
return self.base_address + self.RPC_TLS_OFFSET + 8, ([addr_word, send_length, 0] + [x for x in payload_words])
def format_response(self, response_data):
"""Format an RPC response."""
_addr, length = self.response_info()
if len(response_data) != length:
raise HardwareError("Invalid response read length, should be the same as what response_info() returns", expected=length, actual=len(response_data))
resp, flags, received_length, payload = struct.unpack("<HxBL4x20s", response_data)
resp = resp & 0xFF
if flags & (1 << 3):
raise HardwareError("Could not grab external gate", external_gate_error=1)
if received_length > 20:
raise HardwareError("Invalid received payload length > 20 bytes", received_length=received_length)
payload = payload[:received_length]
return {
'status': resp,
'payload': payload
}
| gpl-3.0 | 2,727,295,139,619,318,300 | 36.107438 | 159 | 0.641425 | false |
azam-a/batcher | batch_apps/tests/test_integrations.py | 1 | 9337 | from django.test import TestCase
from django_mailbox.models import Message
from batch_apps.models import App, Day, Execution
from batch_apps.generator import get_current_date_in_gmt8
from batch_apps.integration import (
execute_end_to_end_tasks,
get_unexecuted_due_executions,
get_unprocessed_unmatched_emails,
)
from batch_apps.matcher import match_subject
import datetime
import pytz
class EmailExecutionAppPatternMatcherTest(TestCase):
fixtures = ['test_apps.json', 'test_messages.json']
def test_email_matches_single_active_daily_app_with_single_active_pattern_using_low_level_steps(self):
app = App.objects.get(name="SGDailyAppTask SendExpiringNotice")
self.assertTrue(app.is_active)
self.assertEqual(app.frequency, 'daily')
pattern_list = app.pattern_set.filter(is_active=True)
self.assertEqual(len(pattern_list), 1)
pattern = pattern_list[0]
self.assertEqual(pattern.name_pattern, "SendExpiringNotice Success")
self.assertTrue(pattern.is_active)
day = Execution.objects._get_or_create_day_object(datetime.date(2014, 10, 20))
execution = Execution.objects._get_or_create_execution_object(day, app)
self.assertTrue(execution.is_due_today)
self.assertFalse(execution.is_executed)
email = Message.objects.get(message_id="<CAFKhJv21JtjnT74zzsrRuOwyEU1=1bnz2mzKV8e0_DAw0U46KA@mail.gmail.com>")
self.assertEqual(email.subject, "Batch App - SGDailyAppTask SendExpiringNotice Success")
self.assertEqual(str(email.sent_time), "2014-10-20 02:31:25+00:00")
self.assertFalse(email.processed_batch_apps)
self.assertFalse(email.matched_batch_apps)
matched = match_subject(str(pattern), email.subject)
self.assertTrue(matched)
email.matched_batch_apps = True
email.processed_batch_apps = True
email.save()
execution.is_executed = True
execution.email = email
execution.save()
email_recheck = Message.objects.get(message_id="<CAFKhJv21JtjnT74zzsrRuOwyEU1=1bnz2mzKV8e0_DAw0U46KA@mail.gmail.com>")
self.assertEqual(email_recheck, email)
self.assertTrue(email_recheck.processed_batch_apps)
self.assertTrue(email_recheck.matched_batch_apps)
execution_recheck = Execution.objects._get_or_create_execution_object(day, app)
self.assertTrue(execution_recheck.is_executed)
self.assertEqual(execution_recheck.email, email)
def test_execute_end_to_end_module_using_fixture_should_pass(self):
app = App.objects.get(name="SGDailyAppTask SendExpiringNotice")
self.assertTrue(app.is_active)
self.assertEqual(app.frequency, 'daily')
pattern_list = app.pattern_set.filter(is_active=True)
self.assertEqual(len(pattern_list), 1)
pattern = pattern_list[0]
self.assertEqual(pattern.name_pattern, "SendExpiringNotice Success")
self.assertTrue(pattern.is_active)
email = Message.objects.get(message_id="<CAFKhJv21JtjnT74zzsrRuOwyEU1=1bnz2mzKV8e0_DAw0U46KA@mail.gmail.com>")
self.assertEqual(email.subject, "Batch App - SGDailyAppTask SendExpiringNotice Success")
self.assertEqual(str(email.sent_time), "2014-10-20 02:31:25+00:00")
self.assertFalse(email.processed_batch_apps)
self.assertFalse(email.matched_batch_apps)
execute_end_to_end_tasks(datetime.date(2014, 10, 20))
email_recheck = Message.objects.get(message_id="<CAFKhJv21JtjnT74zzsrRuOwyEU1=1bnz2mzKV8e0_DAw0U46KA@mail.gmail.com>")
self.assertEqual(email_recheck, email)
self.assertTrue(email_recheck.processed_batch_apps)
self.assertTrue(email_recheck.matched_batch_apps)
execution_recheck = Execution.objects.get(app=app, day__date=datetime.date(2014, 10, 20))
self.assertTrue(execution_recheck.is_executed)
self.assertEqual(execution_recheck.email, email)
def test_execute_end_to_end_module_should_default_to_today_if_date_is_not_given(self):
execute_end_to_end_tasks()
day = Day.objects.get(pk=1)
self.assertEqual(len(Day.objects.all()), 1)
self.assertEqual(get_current_date_in_gmt8(), day.date)
class EmailFilteringTest(TestCase):
fixtures = ['test_messages.json']
def setUp(self):
self.email1 = Message.objects.get(message_id="<CAFKhJv2VAg2jx7o+Y+Kz_Ze72m7PAPq0Q8QjhC7_J+OVVnUvvg@mail.gmail.com>")
self.email2 = Message.objects.get(message_id="<CAFKhJv1ugtTL=ji5_JxZ9KwVxfqi_haYpGb+wJrekW7RUx0pRw@mail.gmail.com>")
self.email3 = Message.objects.get(message_id="<CAFKhJv21JtjnT74zzsrRuOwyEU1=1bnz2mzKV8e0_DAw0U46KA@mail.gmail.com>")
self.email4 = Message.objects.get(message_id="<CAFKhJv18p+O28UB2nQT1cTKL437GFM7SJpK=30x5j7+dNRtD7A@mail.gmail.com>")
self.email5 = Message.objects.get(message_id="<CAFKhJv0yhRMvdqF9JGabbHDH2Esw86Q9OZ40B52-y=MPLCyYBg@mail.gmail.com>")
self.email6 = Message.objects.get(message_id="<CAG6WN+9O6P7arbVA=M1Mz=_9cSJ-nOL47eB2DaVYN_iJvc-9Lg@mail.gmail.com>")
def test_get_unprocessed_unmatched_emails_should_return_unprocessed_emails(self):
self.email2.processed_batch_apps = True
self.email2.save()
self.assertFalse(self.email1.processed_batch_apps)
self.assertTrue(self.email2.processed_batch_apps)
results = get_unprocessed_unmatched_emails(datetime.date(2014, 10, 20))
self.assertIn(self.email1, results)
self.assertNotIn(self.email2, results)
def test_get_unprocessed_unmatched_emails_should_return_unmatched_emails(self):
self.email4.matched_batch_apps = True
self.email4.save()
self.assertFalse(self.email3.matched_batch_apps)
self.assertTrue(self.email4.matched_batch_apps)
results = get_unprocessed_unmatched_emails(datetime.date(2014, 10, 20))
self.assertIn(self.email3, results)
self.assertNotIn(self.email4, results)
def test_get_unprocessed_unmatched_emails_should_return_emails_with_correct_date(self):
self.email6.sent_time = datetime.datetime.now(pytz.timezone('Asia/Kuala_Lumpur'))
self.email6.save()
date_ = datetime.date(2014, 10, 20)
results = get_unprocessed_unmatched_emails(date_)
self.assertIn(self.email5, results)
self.assertNotIn(self.email6, results)
def test_get_unprocessed_unmatched_email_should_select_email_according_to_gmt8_timezone(self):
self.email1.sent_time = datetime.datetime(2014, 11, 27, hour=15, minute=59, second=59, tzinfo=pytz.utc)
self.email2.sent_time = datetime.datetime(2014, 11, 27, hour=16, minute=00, second=00, tzinfo=pytz.utc)
self.email3.sent_time = datetime.datetime(2014, 11, 28, hour=15, minute=59, second=59, tzinfo=pytz.utc)
self.email4.sent_time = datetime.datetime(2014, 11, 28, hour=16, minute=00, second=00, tzinfo=pytz.utc)
self.email1.save()
self.email2.save()
self.email3.save()
self.email4.save()
date_ = datetime.date(2014, 11, 28)
results = get_unprocessed_unmatched_emails(date_)
self.assertNotIn(self.email1, results)
self.assertIn(self.email2, results)
self.assertIn(self.email3, results)
self.assertNotIn(self.email4, results)
class ExecutionFilteringTest(TestCase):
def test_get_due_executions_should_return_executions_with_correct_date(self):
app1 = App.objects.create(name="My App 001")
day1 = Day.objects.create(date=datetime.date(2014, 10, 20))
day2 = Day.objects.create(date=datetime.date(2014, 10, 21))
execution1 = Execution.objects.create(day=day1, app=app1, is_executed=False, is_due_today=True)
execution2 = Execution.objects.create(day=day2, app=app1, is_executed=False, is_due_today=True)
date_ = datetime.date(2014, 10, 20)
results = get_unexecuted_due_executions(date_)
self.assertIn(execution1, results)
self.assertNotIn(execution2, results)
def test_get_due_executions_should_return_executions_due_on_the_date(self):
app1 = App.objects.create(name="My App 001")
app2 = App.objects.create(name="My App 002")
day = Day.objects.create(date=datetime.date(2014, 10, 20))
execution1 = Execution.objects.create(day=day, app=app1, is_executed=False, is_due_today=True)
execution2 = Execution.objects.create(day=day, app=app2, is_executed=False, is_due_today=False)
date_ = datetime.date(2014, 10, 20)
results = get_unexecuted_due_executions(date_)
self.assertIn(execution1, results)
self.assertNotIn(execution2, results)
def test_get_due_executions_should_return_unexecuted_executions(self):
app1 = App.objects.create(name="My App 001")
app2 = App.objects.create(name="My App 002")
day = Day.objects.create(date=datetime.date(2014, 10, 20))
execution1 = Execution.objects.create(day=day, app=app1, is_executed=False, is_due_today=True)
execution2 = Execution.objects.create(day=day, app=app2, is_executed=True, is_due_today=True)
date_ = datetime.date(2014, 10, 20)
results = get_unexecuted_due_executions(date_)
self.assertIn(execution1, results)
self.assertNotIn(execution2, results)
| mit | 4,092,921,209,710,291,000 | 44.325243 | 126 | 0.700118 | false |
twitterdev/twitter-python-ads-sdk | twitter_ads/resource.py | 1 | 7326 | # Copyright (C) 2015 Twitter, Inc.
"""Container for all plugable resource object logic used by the Ads API SDK."""
import dateutil.parser
import json
from datetime import datetime
from twitter_ads.utils import format_time
from twitter_ads.enum import ENTITY, TRANSFORM
from twitter_ads.http import Request
from twitter_ads.cursor import Cursor
from twitter_ads.utils import extract_response_headers, FlattenParams
def resource_property(klass, name, **kwargs):
"""Builds a resource object property."""
klass.PROPERTIES[name] = kwargs
def getter(self):
return getattr(self, '_%s' % name, kwargs.get('default', None))
if kwargs.get('readonly', False):
setattr(klass, name, property(getter))
else:
def setter(self, value):
setattr(self, '_%s' % name, value)
setattr(klass, name, property(getter, setter))
class Resource(object):
"""Base class for all API resource objects."""
def __init__(self, account):
self._account = account
@property
def account(self):
return self._account
def from_response(self, response, headers=None):
"""
Populates a given objects attributes from a parsed JSON API response.
This helper handles all necessary type coercions as it assigns
attribute values.
"""
if headers is not None:
limits = extract_response_headers(headers)
for k in limits:
setattr(self, k, limits[k])
for name in self.PROPERTIES:
attr = '_{0}'.format(name)
transform = self.PROPERTIES[name].get('transform', None)
value = response.get(name, None)
if transform and transform == TRANSFORM.TIME and value:
setattr(self, attr, dateutil.parser.parse(value))
if isinstance(value, int) and value == 0:
continue # skip attribute
else:
setattr(self, attr, value)
return self
def to_params(self):
"""
Generates a Hash of property values for the current object. This helper
handles all necessary type coercions as it generates its output.
"""
params = {}
for name in self.PROPERTIES:
attr = '_{0}'.format(name)
value = getattr(self, attr, None) or getattr(self, name, None)
# skip attribute
if value is None:
continue
if isinstance(value, datetime):
params[name] = format_time(value)
elif isinstance(value, bool):
params[name] = str(value).lower()
else:
params[name] = value
return params
@classmethod
def all(klass, account, **kwargs):
"""Returns a Cursor instance for a given resource."""
resource = klass.RESOURCE_COLLECTION.format(account_id=account.id)
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(klass, request, init_with=[account])
@classmethod
def load(klass, account, id, **kwargs):
"""Returns an object instance for a given resource."""
resource = klass.RESOURCE.format(account_id=account.id, id=id)
response = Request(account.client, 'get', resource, params=kwargs).perform()
return klass(account).from_response(response.body['data'])
def reload(self, **kwargs):
"""
Reloads all attributes for the current object instance from the API.
"""
if not self.id:
return self
resource = self.RESOURCE.format(account_id=self.account.id, id=self.id)
response = Request(self.account.client, 'get', resource, params=kwargs).perform()
return self.from_response(response.body['data'])
def __repr__(self):
return '<{name} resource at {mem} id={id}>'.format(
name=self.__class__.__name__,
mem=hex(id(self)),
id=getattr(self, 'id', None)
)
def _validate_loaded(self):
if not self.id:
raise ValueError("""
Error! {klass} object not yet initialized,
call {klass}.load first.
""").format(klass=self.__class__)
def _load_resource(self, klass, id, **kwargs):
self._validate_loaded()
if id is None:
return klass.all(self, **kwargs)
else:
return klass.load(self, id, **kwargs)
class Batch(object):
_ENTITY_MAP = {
'LineItem': ENTITY.LINE_ITEM,
'Campaign': ENTITY.CAMPAIGN,
'TargetingCriteria': ENTITY.TARGETING_CRITERION
}
@classmethod
def batch_save(klass, account, objs):
"""
Makes batch request(s) for a passed in list of objects
"""
resource = klass.BATCH_RESOURCE_COLLECTION.format(account_id=account.id)
json_body = []
for obj in objs:
entity_type = klass._ENTITY_MAP[klass.__name__].lower()
obj_json = {'params': obj.to_params()}
if obj.id is None:
obj_json['operation_type'] = 'Create'
elif obj.to_delete is True:
obj_json['operation_type'] = 'Delete'
obj_json['params'][entity_type + '_id'] = obj.id
else:
obj_json['operation_type'] = 'Update'
obj_json['params'][entity_type + '_id'] = obj.id
json_body.append(obj_json)
resource = klass.BATCH_RESOURCE_COLLECTION.format(account_id=account.id)
response = Request(account.client,
'post', resource,
body=json.dumps(json_body),
headers={'Content-Type': 'application/json'}).perform()
# persist each entity
for obj, res_obj in zip(objs, response.body['data']):
obj = obj.from_response(res_obj)
class Persistence(object):
"""
Container for all persistence related logic used by API resource objects.
"""
@classmethod
@FlattenParams
def create(self, account, **kwargs):
"""
Create a new item.
"""
resource = self.RESOURCE_COLLECTION.format(account_id=account.id)
response = Request(account.client, 'post', resource, params=kwargs).perform()
return self(account).from_response(response.body['data'])
def save(self):
"""
Saves or updates the current object instance depending on the
presence of `object.id`.
"""
if self.id:
method = 'put'
resource = self.RESOURCE.format(account_id=self.account.id, id=self.id)
else:
method = 'post'
resource = self.RESOURCE_COLLECTION.format(account_id=self.account.id)
response = Request(
self.account.client, method,
resource, params=self.to_params()).perform()
return self.from_response(response.body['data'])
def delete(self):
"""
Deletes the current object instance depending on the
presence of `object.id`.
"""
resource = self.RESOURCE.format(account_id=self.account.id, id=self.id)
response = Request(self.account.client, 'delete', resource).perform()
self.from_response(response.body['data'])
| mit | 6,827,451,712,188,625,000 | 32.149321 | 89 | 0.585995 | false |
Ziemin/telepathy-gabble | tests/twisted/file-transfer/file_transfer_helper.py | 2 | 23301 | import dbus
import socket
import hashlib
import time
import datetime
import os
from servicetest import EventPattern, assertEquals, assertSameSets, call_async
from gabbletest import exec_test, sync_stream, make_result_iq
import ns
from bytestream import create_from_si_offer, announce_socks5_proxy
import bytestream
from caps_helper import extract_data_forms, add_data_forms
from twisted.words.xish import domish, xpath
import constants as cs
class File(object):
DEFAULT_DATA = "What a nice file"
DEFAULT_NAME = "The foo.txt"
DEFAULT_CONTENT_TYPE = 'text/plain'
DEFAULT_DESCRIPTION = "A nice file to test"
def __init__(self, data=DEFAULT_DATA, name=DEFAULT_NAME,
content_type=DEFAULT_CONTENT_TYPE, description=DEFAULT_DESCRIPTION,
hash_type=cs.FILE_HASH_TYPE_MD5):
self.data = data
self.size = len(self.data)
self.name = name
self.content_type = content_type
self.description = description
self.date = int(time.time())
self.compute_hash(hash_type)
self.offset = 0
self.uri = 'file:///tmp/%s' % self.name
def compute_hash(self, hash_type):
assert hash_type == cs.FILE_HASH_TYPE_MD5
self.hash_type = hash_type
self.hash = hashlib.md5(self.data).hexdigest()
class FileTransferTest(object):
CONTACT_NAME = 'test-ft@localhost'
CONTACT_FULL_JID = 'test-ft@localhost/Telepathy'
service_name = 'a.wacky.service.name'
metadata = {'loads': ['of', 'blahblah', 'stuff'],
'mental': ['data', 'sidf']}
def __init__(self, bytestream_cls, file, address_type, access_control, access_control_param):
self.file = file
self.bytestream_cls = bytestream_cls
self.address_type = address_type
self.access_control = access_control
self.access_control_param = access_control_param
def check_platform_socket_types(self, sock_types):
assertEquals(sock_types.get(cs.SOCKET_ADDRESS_TYPE_IPV4),
[cs.SOCKET_ACCESS_CONTROL_LOCALHOST])
assertEquals(sock_types.get(cs.SOCKET_ADDRESS_TYPE_IPV6),
[cs.SOCKET_ACCESS_CONTROL_LOCALHOST])
if os.name == 'posix':
# true on at least Linux
assertEquals(sock_types.get(cs.SOCKET_ADDRESS_TYPE_UNIX),
[cs.SOCKET_ACCESS_CONTROL_LOCALHOST])
def connect(self):
vcard_event, roster_event, disco_event = self.q.expect_many(
EventPattern('stream-iq', to=None, query_ns='vcard-temp',
query_name='vCard'),
EventPattern('stream-iq', query_ns=ns.ROSTER),
EventPattern('stream-iq', to='localhost', query_ns=ns.DISCO_ITEMS))
roster = make_result_iq(self.stream, roster_event.stanza)
query = roster.firstChildElement()
item = query.addElement('item')
item['jid'] = self.CONTACT_FULL_JID
item['subscription'] = 'both'
self.stream.send(roster)
announce_socks5_proxy(self.q, self.stream, disco_event.stanza)
self.self_handle = self.conn.Properties.Get(cs.CONN, "SelfHandle")
self.self_handle_name = self.conn.inspect_contact_sync(self.self_handle)
def announce_contact(self, name=CONTACT_NAME, metadata=True):
self.contact_name = name
self.contact_full_jid = '%s/Telepathy' % name
self.handle = self.conn.get_contact_handle_sync(name)
presence = domish.Element(('jabber:client', 'presence'))
presence['from'] = self.contact_full_jid
presence['to'] = 'test@localhost/Resource'
c = presence.addElement('c')
c['xmlns'] = 'http://jabber.org/protocol/caps'
c['node'] = 'http://example.com/ISupportFT'
c['ver'] = '1.0'
self.stream.send(presence)
disco_event, presence_event = self.q.expect_many(
EventPattern('stream-iq', iq_type='get',
query_ns='http://jabber.org/protocol/disco#info', to=self.contact_full_jid),
EventPattern('dbus-signal', signal='PresencesChanged', args=[
{self.handle: (cs.PRESENCE_AVAILABLE, u'available', u'')}]))
assert disco_event.query['node'] == \
'http://example.com/ISupportFT#1.0'
result = make_result_iq(self.stream, disco_event.stanza)
query = result.firstChildElement()
feature = query.addElement('feature')
feature['var'] = ns.FILE_TRANSFER
if metadata:
feature = query.addElement('feature')
feature['var'] = ns.TP_FT_METADATA
self.stream.send(result)
sync_stream(self.q, self.stream)
def create_ft_channel(self):
ft_chan = self.bus.get_object(self.conn.object.bus_name, self.ft_path)
self.channel = dbus.Interface(ft_chan, cs.CHANNEL)
self.ft_channel = dbus.Interface(ft_chan, cs.CHANNEL_TYPE_FILE_TRANSFER)
self.ft_props = dbus.Interface(ft_chan, cs.PROPERTIES_IFACE)
def close_channel(self):
self.channel.Close()
self.q.expect('dbus-signal', signal='Closed')
def done(self):
pass
def test(self, q, bus, conn, stream):
self.q = q
self.bus = bus
self.conn = conn
self.stream = stream
for fct in self._actions:
# stop if a function returns True
if fct():
break
def create_socket(self):
if self.address_type == cs.SOCKET_ADDRESS_TYPE_UNIX:
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
elif self.address_type == cs.SOCKET_ADDRESS_TYPE_IPV4:
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif self.address_type == cs.SOCKET_ADDRESS_TYPE_IPV6:
return socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
assert False
class ReceiveFileTest(FileTransferTest):
def __init__(self, bytestream_cls, file, address_type, access_control, access_control_param):
FileTransferTest.__init__(self, bytestream_cls, file, address_type, access_control, access_control_param)
self._actions = [self.connect, self.announce_contact,
self.send_ft_offer_iq, self.check_new_channel, self.create_ft_channel,
self.set_uri, self.accept_file,
self.receive_file, self.close_channel, self.done]
def send_ft_offer_iq(self):
self.bytestream = self.bytestream_cls(self.stream, self.q, 'alpha',
self.contact_full_jid, 'test@localhost/Resource', True)
iq, si = self.bytestream.create_si_offer(ns.FILE_TRANSFER)
file_node = si.addElement((ns.FILE_TRANSFER,'file'))
file_node['name'] = self.file.name
file_node['size'] = str(self.file.size)
file_node['mime-type'] = self.file.content_type
file_node['hash'] = self.file.hash
date = datetime.datetime.utcfromtimestamp(self.file.date).strftime('%FT%H:%M:%SZ')
file_node['date'] = date
file_node.addElement('desc', content=self.file.description)
# we support range transfer
file_node.addElement('range')
# Metadata
if self.service_name:
service_form = {ns.TP_FT_METADATA_SERVICE: {'ServiceName': [self.service_name]}}
add_data_forms(file_node, service_form)
if self.metadata:
metadata_form = {ns.TP_FT_METADATA: self.metadata}
add_data_forms(file_node, metadata_form)
# so... lunch?
iq.send()
def check_new_channel(self):
def is_ft_channel_event(event):
channels, = event.args
if len(channels) > 1:
return False
path, props = channels[0]
return props[cs.CHANNEL_TYPE] == cs.CHANNEL_TYPE_FILE_TRANSFER
e = self.q.expect('dbus-signal', signal='NewChannels',
path=self.conn.object.object_path,
predicate=is_ft_channel_event)
channels = e.args[0]
assert len(channels) == 1
path, props = channels[0]
# check channel properties
# Channel D-Bus properties
assert props[cs.CHANNEL_TYPE] == cs.CHANNEL_TYPE_FILE_TRANSFER
assertSameSets(
[ cs.CHANNEL_IFACE_FILE_TRANSFER_METADATA,
], props[cs.INTERFACES])
assert props[cs.TARGET_HANDLE] == self.handle
assert props[cs.TARGET_ID] == self.contact_name
assert props[cs.TARGET_HANDLE_TYPE] == cs.HT_CONTACT
assert props[cs.REQUESTED] == False
assert props[cs.INITIATOR_HANDLE] == self.handle
assert props[cs.INITIATOR_ID] == self.contact_name
# Channel.Type.FileTransfer D-Bus properties
assert props[cs.FT_STATE] == cs.FT_STATE_PENDING
assert props[cs.FT_CONTENT_TYPE] == self.file.content_type
assert props[cs.FT_FILENAME] == self.file.name
assert props[cs.FT_SIZE] == self.file.size
# FT's protocol doesn't allow us the send the hash info
assert props[cs.FT_CONTENT_HASH_TYPE] == cs.FILE_HASH_TYPE_MD5
assert props[cs.FT_CONTENT_HASH] == self.file.hash
assert props[cs.FT_DESCRIPTION] == self.file.description
assert props[cs.FT_DATE] == self.file.date
assert props[cs.FT_TRANSFERRED_BYTES] == 0
assert props[cs.FT_INITIAL_OFFSET] == 0
self.check_platform_socket_types(props[cs.FT_AVAILABLE_SOCKET_TYPES])
assertEquals(self.service_name, props[cs.FT_SERVICE_NAME])
assertEquals(self.metadata, props[cs.FT_METADATA])
self.ft_path = path
def set_uri(self):
ft_props = dbus.Interface(self.ft_channel, cs.PROPERTIES_IFACE)
# URI is not set yet
uri = ft_props.Get(cs.CHANNEL_TYPE_FILE_TRANSFER, 'URI')
assertEquals('', uri)
# Setting URI
call_async(self.q, ft_props, 'Set',
cs.CHANNEL_TYPE_FILE_TRANSFER, 'URI', self.file.uri)
self.q.expect('dbus-signal', signal='URIDefined', args=[self.file.uri])
self.q.expect('dbus-return', method='Set')
# Check it has the right value now
uri = ft_props.Get(cs.CHANNEL_TYPE_FILE_TRANSFER, 'URI')
assertEquals(self.file.uri, uri)
# We can't change it once it has been set
call_async(self.q, ft_props, 'Set',
cs.CHANNEL_TYPE_FILE_TRANSFER, 'URI', 'badger://snake')
self.q.expect('dbus-error', method='Set', name=cs.INVALID_ARGUMENT)
def accept_file(self):
try:
self.address = self.ft_channel.AcceptFile(self.address_type,
self.access_control, self.access_control_param,
self.file.offset,
byte_arrays=True)
except dbus.DBusException, e:
if self.address_type == cs.SOCKET_ADDRESS_TYPE_IPV6 and \
e.get_dbus_name() == cs.NOT_AVAILABLE and \
e.get_dbus_message() == "Could not set up local socket":
print "Ignoring error for ipv6 address space"
return True
else:
raise e
state_event, iq_event = self.q.expect_many(
EventPattern('dbus-signal', signal='FileTransferStateChanged'),
EventPattern('stream-iq', iq_type='result'))
state, reason = state_event.args
assert state == cs.FT_STATE_ACCEPTED
assert reason == cs.FT_STATE_CHANGE_REASON_REQUESTED
# Got SI reply
self.bytestream.check_si_reply(iq_event.stanza)
if self.file.offset != 0:
range = xpath.queryForNodes('/iq/si/file/range', iq_event.stanza)[0]
assert range['offset'] == str(self.file.offset)
_, events = self.bytestream.open_bytestream([], [
EventPattern('dbus-signal', signal='InitialOffsetDefined'),
EventPattern('dbus-signal', signal='FileTransferStateChanged')])
offset_event, state_event = events
offset = offset_event.args[0]
assert offset == self.file.offset
state, reason = state_event.args
assert state == cs.FT_STATE_OPEN
assert reason == cs.FT_STATE_CHANGE_REASON_NONE
# send the beginning of the file (client didn't connect to socket yet)
self.bytestream.send_data(self.file.data[self.file.offset:self.file.offset + 2])
def receive_file(self):
# Connect to Gabble's socket
s = self.create_socket()
s.connect(self.address)
# send the rest of the file
i = self.file.offset + 2
self.bytestream.send_data(self.file.data[i:])
self._read_file_from_socket(s)
def _read_file_from_socket(self, s):
# Read the file from Gabble's socket
data = ''
to_receive = self.file.size - self.file.offset
e = self.q.expect('dbus-signal', signal='TransferredBytesChanged')
count = e.args[0]
while True:
received = s.recv(1024)
if len(received) == 0:
break
data += received
assert data == self.file.data[self.file.offset:]
while count < to_receive:
# Catch TransferredBytesChanged until we transfered all the data
e = self.q.expect('dbus-signal', signal='TransferredBytesChanged')
count = e.args[0]
e = self.q.expect('dbus-signal', signal='FileTransferStateChanged')
state, reason = e.args
assert state == cs.FT_STATE_COMPLETED
assert reason == cs.FT_STATE_CHANGE_REASON_NONE
class SendFileTest(FileTransferTest):
def __init__(self, bytestream_cls, file, address_type, access_control, acces_control_param):
FileTransferTest.__init__(self, bytestream_cls, file, address_type, access_control, acces_control_param)
self._actions = [self.connect, self.announce_contact,
self.check_ft_available, self.request_ft_channel, self.create_ft_channel,
self.got_send_iq, self.provide_file, self.client_accept_file, self.send_file,
self.close_channel, self.done]
def check_ft_available(self):
properties = self.conn.GetAll(cs.CONN_IFACE_REQUESTS,
dbus_interface=cs.PROPERTIES_IFACE)
# general FT class
assert ({cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_FILE_TRANSFER,
cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT},
[cs.FT_CONTENT_HASH_TYPE, cs.TARGET_HANDLE, cs.TARGET_ID, cs.FT_CONTENT_TYPE,
cs.FT_FILENAME, cs.FT_SIZE, cs.FT_CONTENT_HASH, cs.FT_DESCRIPTION, cs.FT_DATE,
cs.FT_URI, cs.FT_SERVICE_NAME, cs.FT_METADATA]
) in properties.get('RequestableChannelClasses'),\
properties['RequestableChannelClasses']
# FT class with MD5 as HashType
assert ({cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_FILE_TRANSFER,
cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT,
cs.FT_CONTENT_HASH_TYPE: cs.FILE_HASH_TYPE_MD5},
[cs.TARGET_HANDLE, cs.TARGET_ID, cs.FT_CONTENT_TYPE, cs.FT_FILENAME,
cs.FT_SIZE, cs.FT_CONTENT_HASH, cs.FT_DESCRIPTION, cs.FT_DATE,
cs.FT_URI, cs.FT_SERVICE_NAME, cs.FT_METADATA]
) in properties.get('RequestableChannelClasses'),\
properties['RequestableChannelClasses']
def request_ft_channel(self, uri=True):
request = { cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_FILE_TRANSFER,
cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT,
cs.TARGET_HANDLE: self.handle,
cs.FT_CONTENT_TYPE: self.file.content_type,
cs.FT_FILENAME: self.file.name,
cs.FT_SIZE: self.file.size,
cs.FT_CONTENT_HASH_TYPE: self.file.hash_type,
cs.FT_CONTENT_HASH: self.file.hash,
cs.FT_DESCRIPTION: self.file.description,
cs.FT_DATE: self.file.date,
cs.FT_INITIAL_OFFSET: 0,
cs.FT_SERVICE_NAME: self.service_name,
cs.FT_METADATA: dbus.Dictionary(self.metadata, signature='sas')}
if uri:
request[cs.FT_URI] = self.file.uri
self.ft_path, props = self.conn.Requests.CreateChannel(request)
# Channel D-Bus properties
assertEquals(cs.CHANNEL_TYPE_FILE_TRANSFER, props[cs.CHANNEL_TYPE])
assertSameSets(
[ cs.CHANNEL_IFACE_FILE_TRANSFER_METADATA,
], props[cs.INTERFACES])
assertEquals(self.handle, props[cs.TARGET_HANDLE])
assertEquals(self.contact_name, props[cs.TARGET_ID])
assertEquals(cs.HT_CONTACT, props[cs.TARGET_HANDLE_TYPE])
assert props[cs.REQUESTED]
assertEquals(self.self_handle, props[cs.INITIATOR_HANDLE])
assertEquals(self.self_handle_name, props[cs.INITIATOR_ID])
# Channel.Type.FileTransfer D-Bus properties
assertEquals(cs.FT_STATE_PENDING, props[cs.FT_STATE])
assertEquals(self.file.content_type, props[cs.FT_CONTENT_TYPE])
assertEquals(self.file.name, props[cs.FT_FILENAME])
assertEquals(self.file.size, props[cs.FT_SIZE])
assertEquals(self.file.hash_type, props[cs.FT_CONTENT_HASH_TYPE])
assertEquals(self.file.hash, props[cs.FT_CONTENT_HASH])
assertEquals(self.file.description, props[cs.FT_DESCRIPTION])
assertEquals(self.file.date, props[cs.FT_DATE])
assertEquals(0, props[cs.FT_TRANSFERRED_BYTES])
assertEquals(0, props[cs.FT_INITIAL_OFFSET])
assertEquals(self.service_name, props[cs.FT_SERVICE_NAME])
assertEquals(self.metadata, props[cs.FT_METADATA])
if uri:
assertEquals(self.file.uri, props[cs.FT_URI])
else:
assertEquals('', props[cs.FT_URI])
self.check_platform_socket_types(props[cs.FT_AVAILABLE_SOCKET_TYPES])
def got_send_iq(self):
iq_event = self.q.expect('stream-iq', to=self.contact_full_jid)
self._check_file_transfer_offer_iq(iq_event)
def _check_file_transfer_offer_iq(self, iq_event):
self.iq = iq_event.stanza
self.bytestream, profile = create_from_si_offer(self.stream, self.q,
self.bytestream_cls, iq_event.stanza, 'test@localhost/Resource')
assert self.iq['to'] == self.contact_full_jid
assert profile == ns.FILE_TRANSFER
file_node = xpath.queryForNodes('/iq/si/file', self.iq)[0]
assert file_node['name'] == self.file.name
assert file_node['size'] == str(self.file.size)
assert file_node['mime-type'] == self.file.content_type
assert file_node['hash'] == self.file.hash
date = datetime.datetime.utcfromtimestamp(self.file.date).strftime('%FT%H:%M:%SZ')
assert file_node['date'] == date, file_node['date']
desc_node = xpath.queryForNodes("/iq/si/file/desc", self.iq)[0]
self.desc = desc_node.children[0]
assert self.desc == self.file.description
# Gabble supports resume
range = xpath.queryForNodes('/iq/si/file/range', self.iq)[0]
assert range is not None
# Metadata forms
forms = extract_data_forms(xpath.queryForNodes('/iq/si/file/x', self.iq))
if self.service_name:
assertEquals({'ServiceName': [self.service_name]},
forms[ns.TP_FT_METADATA_SERVICE])
else:
assert ns.TP_FT_METADATA_SERVICE not in forms
if self.metadata:
assertEquals(self.metadata, forms[ns.TP_FT_METADATA])
else:
assert ns.TP_FT_METADATA not in forms
def provide_file(self):
try:
self.address = self.ft_channel.ProvideFile(self.address_type,
self.access_control, self.access_control_param,
byte_arrays=True)
except dbus.DBusException, e:
if self.address_type == cs.SOCKET_ADDRESS_TYPE_IPV6 and \
e.get_dbus_name() == cs.NOT_AVAILABLE and \
e.get_dbus_message() == "Could not set up local socket":
print "Ignoring error for ipv6 address space"
return True
else:
raise e
def client_accept_file(self):
# accept SI offer
result, si = self.bytestream.create_si_reply(self.iq)
file_node = si.addElement((ns.FILE_TRANSFER, 'file'))
range = file_node.addElement('range')
range['offset'] = str(self.file.offset)
self.stream.send(result)
self.bytestream.wait_bytestream_open()
def send_file(self):
s = self.create_socket()
s.connect(self.address)
s.send(self.file.data[self.file.offset:])
to_receive = self.file.size - self.file.offset
self.count = 0
def bytes_changed_cb(bytes):
self.count = bytes
self.ft_channel.connect_to_signal('TransferredBytesChanged', bytes_changed_cb)
# FileTransferStateChanged can be fired while we are receiving data
# (in the SOCKS5 case for example)
self.completed = False
def ft_state_changed_cb(state, reason):
if state == cs.FT_STATE_COMPLETED:
self.completed = True
self.ft_channel.connect_to_signal('FileTransferStateChanged', ft_state_changed_cb)
# get data from bytestream
data = ''
while len(data) < to_receive:
data += self.bytestream.get_data()
assert data == self.file.data[self.file.offset:]
if self.completed:
# FileTransferStateChanged has already been received
waiting = []
else:
waiting = [EventPattern('dbus-signal', signal='FileTransferStateChanged')]
events = self.bytestream.wait_bytestream_closed(waiting)
# If not all the bytes transferred have been announced using
# TransferredBytesChanged, wait for them
while self.count < to_receive:
self.q.expect('dbus-signal', signal='TransferredBytesChanged')
assert self.count == to_receive
if len(waiting) > 1:
state, reason = events[0].args
assert state == cs.FT_STATE_COMPLETED
assert reason == cs.FT_STATE_CHANGE_REASON_NONE
def platform_impls():
impls = [
(cs.SOCKET_ADDRESS_TYPE_IPV4, cs.SOCKET_ACCESS_CONTROL_LOCALHOST, ""),
(cs.SOCKET_ADDRESS_TYPE_IPV6, cs.SOCKET_ACCESS_CONTROL_LOCALHOST, ""),
]
if os.name == 'posix':
impls.append((cs.SOCKET_ADDRESS_TYPE_UNIX,
cs.SOCKET_ACCESS_CONTROL_LOCALHOST, ""))
return impls
def exec_file_transfer_test(test_cls, one_run=False):
for bytestream_cls in [
bytestream.BytestreamIBBMsg,
bytestream.BytestreamS5B,
bytestream.BytestreamS5BPidgin,
bytestream.BytestreamSIFallbackS5CannotConnect,
bytestream.BytestreamSIFallbackS5WrongHash,
bytestream.BytestreamS5BRelay,
bytestream.BytestreamS5BRelayBugged]:
for addr_type, access_control, access_control_param in platform_impls():
file = File()
test = test_cls(bytestream_cls, file, addr_type, access_control, access_control_param)
exec_test(test.test)
# test resume
file.offset = 5
test = test_cls(bytestream_cls, file, addr_type, access_control, access_control_param)
exec_test(test.test)
if one_run:
return
| lgpl-2.1 | 3,437,921,144,333,873,700 | 38.49322 | 113 | 0.616197 | false |
benosment/reddit-comic | pycomic/settings.py | 1 | 1358 | import os
import logging
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] [%(funcName)s:%(lineno)d] %(message)s',
'datefmt': '%m/%d/%Y %I:%M:%S %p',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'log.txt',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
}
}
}
query_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'query.txt')
firefox_profile_path = '/home/pi/.mozilla/firefox/t32tl2ku.marvel'
python_venv_path = '/home/pi/pycomic/venv/bin/python'
high_priority = ['kameiko',]
blacklist = ['uncanny_digital',]
try:
account_sid = os.environ.get('ACCOUNT_SID')
auth_token = os.environ.get('AUTH_TOKEN')
twilio_number = os.environ.get('TWILIO_NUMBER')
my_phone = os.environ.get('MY_PHONE')
except:
logging.exception('unable to import twilio credentials')
raise
| gpl-2.0 | -47,024,582,287,319,130 | 24.148148 | 90 | 0.5081 | false |
cgeoffroy/son-analyze | scripts/all.py | 1 | 2996 | #! /usr/bin/env python3
# Copyright (c) 2015 SONATA-NFV, Thales Communications & Security
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Thales Communications & Security
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
"""all.py launch all the tests and checks for this project"""
# pylint: disable=unsubscriptable-object
import sys
import subprocess
import typing # noqa pylint: disable=unused-import
from typing import List, Tuple
from colorama import init, Fore, Style # type: ignore
def launch_command(summaries: List[Tuple[str, int]], name: str,
command: List[str]) -> None: # noqa pylint: disable=invalid-sequence-index
"""Start a command and adds its return code in summaries"""
return_code = subprocess.call(command)
summaries.append((name, return_code))
print()
def print_summaries(summaries: List[Tuple[str, int]]) -> None:
"""Print on the console the summaries of the executed commands"""
def text_summary(name: str, return_code: int) -> str:
"""Returns a colorized string corresponding to the return code"""
if return_code == 0:
return '{} {}: commands succeeded{}'.format(Fore.GREEN,
name, Style.RESET_ALL)
return '{}ERROR, {}: commands failed{}'.format(Fore.RED, name,
Style.RESET_ALL)
print('\n{0} summary {0}'.format('_'*35))
for summary in summaries:
print(text_summary(*summary))
def main() -> None:
"""Main entrypoint"""
init()
args = sys.argv[1:]
summaries = [] # type: List[Tuple[str, int]]
commands = [('flake8', 'scripts/flake8.sh'),
('pylint', 'scripts/pylint.sh'),
('mypy', 'scripts/mypy.sh'),
('py.test', 'scripts/py.test.sh'), ]
for (name, command) in commands:
launch_command(summaries, name, [command] + args)
print_summaries(summaries)
if __name__ == '__main__':
main()
| apache-2.0 | -6,771,787,523,066,724,000 | 38.421053 | 94 | 0.661549 | false |
DragonRoman/rhevm-utils | monitoring/rhev-nagios-host-mem-used.py | 1 | 2977 | #!/usr/bin/env python
#
# Author: Pablo Iranzo Gomez ([email protected])
#
# Description: Script for monitoring host Memory status and VM's rhevm-sdk
# api and produce NAGIOS valid output
#
# Requires rhevm-sdk to work
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import sys
import optparse
from ovirtsdk.xml import params
from rhev_functions import *
description = """
RHEV-nagios-host-mem-used output is a script for querying RHEVM via API to get host status
It's goal is to output a table of host/vm status for simple monitoring via external utilities
"""
# Option parsing
p = optparse.OptionParser("rhev-nagios-host-mem-used.py [arguments]", description=description)
p.add_option("-u", "--user", dest="username", help="Username to connect to RHEVM API", metavar="admin@internal",
default="admin@internal")
p.add_option("-w", "--password", dest="password", help="Password to use with username", metavar="admin",
default="admin")
p.add_option("-W", action="store_true", dest="askpassword", help="Ask for password", metavar="admin", default=False)
p.add_option("-k", action="store_true", dest="keyring", help="use python keyring for user/password", metavar="keyring",
default=False)
p.add_option("-s", "--server", dest="server", help="RHEV-M server address/hostname to contact", metavar="127.0.0.1",
default="127.0.0.1")
p.add_option("-p", "--port", dest="port", help="API port to contact", metavar="443", default="443")
p.add_option('-v', "--verbosity", dest="verbosity", help="Show messages while running", metavar='[0-n]', default=0,
type='int')
p.add_option("--host", dest="host", help="Show messages while running", metavar='host')
(options, args) = p.parse_args()
options.username, options.password = getuserpass(options)
baseurl = "https://%s:%s" % (options.server, options.port)
api = apilogin(url=baseurl, username=options.username, password=options.password)
# MAIN PROGRAM
try:
host = api.hosts.get(name=options.host)
except:
print("Host %s not found" % options.host)
if not host:
print("Host %s not found" % options.host)
sys.exit(3)
# NAGIOS PRIOS:
# 0 -> ok
# 1 -> warning
# 2 -> critical
# 3 -> unknown
memory = host.statistics.get(name="memory.used").values.value[0].datum
memtotal = host.statistics.get(name="memory.total").values.value[0].datum
percentage = int(100 * memory / memtotal)
retorno = 3
if percentage >= 90:
retorno = 1
if percentage >= 95:
retorno = 2
else:
retorno = 0
print(memory / (1024 * 1024))
sys.exit(retorno)
| gpl-3.0 | -3,178,408,871,091,183,000 | 33.218391 | 119 | 0.698018 | false |
fhamborg/news-please | newsplease/pipeline/extractor/comparer/comparer_text.py | 1 | 3329 | import itertools
class ComparerText():
"""This class compares the text of the list of ArticleCandidates and sends the result back to the Comparer."""
def extract(self, item, article_candidate_list):
"""Compares the extracted texts.
:param item: The corresponding NewscrawlerItem
:param article_candidate_list: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely text
"""
list_text = []
# The minimal number of words a text needs to have
min_number_words = 15
# The texts of the article candidates and the respective extractors are saved in a tuple in list_text.
for article_candidate in article_candidate_list:
if article_candidate.text != None:
list_text.append((article_candidate.text, article_candidate.extractor))
# Remove texts that are shorter than min_number_words.
for text_tuple in list_text:
if len(text_tuple[0].split()) < min_number_words:
list_text.remove(text_tuple)
# If there is no value in the list, return None.
if len(list_text) == 0:
return None
# If there is only one solution, return it.
if len(list_text) < 2:
return list_text[0][0]
else:
# If there is more than one solution, do the following:
# Create a list which holds triple of the score and the two extractors
list_score = []
# Compare every text with all other texts at least once
for a, b, in itertools.combinations(list_text, 2):
# Create sets from the texts
set_a = set(a[0].split())
set_b = set(b[0].split())
symmetric_difference_a_b = set_a ^ set_b
intersection_a_b = set_a & set_b
# Replace 0 with -1 in order to elude division by zero
if intersection_a_b == 0:
intersection_a_b = -1
# Create the score. It divides the number of words which are not in both texts by the number of words which
# are in both texts and subtracts the result from 1. The closer to 1 the more similiar they are.
score = 1 - ((len(symmetric_difference_a_b)) / (2 * len(intersection_a_b)))
list_score.append((score, a[1], b[1]))
# Find out which is the highest score
best_score = max(list_score, key=lambda item: item[0])
# If one of the solutions is newspaper return it
if "newspaper" in best_score:
return (list(filter(lambda x: x[1] == "newspaper", list_text))[0][0])
else:
# If not, return the text that is longer
# A list that holds the extracted texts and their extractors which were most similar
top_candidates = []
for tuple in list_text:
if tuple[1] == best_score[1] or tuple[1] == best_score[2]:
top_candidates.append(tuple)
if len(top_candidates[0][0]) > len(top_candidates[1][0]):
return (top_candidates[0][0])
else:
return (top_candidates[1][0])
| apache-2.0 | 3,257,272,962,055,773,700 | 41.139241 | 123 | 0.56924 | false |
jnosal/seth | seth/tests/test_exporting.py | 1 | 6725 | from pyramid.httpexceptions import HTTPError
from seth.tests import UnitTestBase, IntegrationTestBase
from seth.tests.models import SampleModel
from seth import exporting
from seth import filtering
from seth.classy import web
from seth.classy.web import export
class ExporterTestCase(UnitTestBase):
def test_exporter_model_is_not_defined(self):
class SampleExporter(exporting.ExportFactory):
model = None
self.assertRaises(AssertionError, lambda: SampleExporter([]))
def test_exporter_no_items(self):
class SampleExporter(exporting.ExportFactory):
model = SampleModel
data = SampleExporter([]).get_export_data()
self.assertIn('title', data)
self.assertIn('header', data)
self.assertIn('rows', data)
self.assertEqual(data['title'], None)
self.assertEqual(data['rows'], [])
self.assertEqual(data['header'], [])
def test_exporter_header_does_not_match_properties(self):
class SampleExporter(exporting.ExportFactory):
model = SampleModel
header = ['smth']
self.assertRaises(exporting.SethExportException, lambda: SampleExporter([]))
def test_exporter_model_does_not_have_certain_attribute(self):
class SampleExporter(exporting.ExportFactory):
model = SampleModel
header = ['smth']
properties = [exporting.Field('smth')]
self.assertRaises(exporting.SethExportException, lambda: SampleExporter([]))
def test_property_is_not_instance_of_Field(self):
class SampleExporter(exporting.ExportFactory):
model = SampleModel
header = ['smth']
properties = ['int_col']
self.assertRaises(exporting.SethExportException, lambda: SampleExporter([]))
def test_generate_model_export_no_items(self):
class SampleExporter(exporting.ExportFactory):
title = 'abc'
model = SampleModel
header = ['smth']
properties = [exporting.Field('int_col')]
exporter = SampleExporter([])
data = exporter.get_export_data()
self.assertEqual(data['header'], ['smth'])
self.assertEqual(data['rows'], [])
self.assertEqual(data['title'], 'abc')
def test_generate_model_entry_exists_from_query(self):
SampleModel.manager.create(int_col=1)
SampleModel.manager.create(int_col=1)
class SampleExporter(exporting.ExportFactory):
title = 'abc'
model = SampleModel
header = ['smth']
properties = [exporting.Field('int_col')]
exporter = SampleExporter([i for i in SampleModel.query.all()])
data = exporter.get_export_data()
self.assertEqual(data['header'], ['smth'])
self.assertEqual(data['rows'], [[1], [1]])
self.assertEqual(data['title'], 'abc')
SampleModel.manager.create(int_col=1)
def test_generate_model_entry_exists_from_list(self):
SampleModel.manager.create(int_col=1)
SampleModel.manager.create(int_col=1)
class SampleExporter(exporting.ExportFactory):
title = 'abc'
model = SampleModel
header = ['smth']
properties = [exporting.Field('int_col')]
exporter = SampleExporter(SampleModel.query)
data = exporter.get_export_data()
self.assertEqual(data['header'], ['smth'])
self.assertEqual(data['rows'], [[1], [1]])
self.assertEqual(data['title'], 'abc')
class RegisterExportTestCase(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
config.include('pyramid_jinja2')
config.add_jinja2_search_path('seth.tests:templates/')
class SampleExporter(exporting.ExportFactory):
title = 'abc'
model = SampleModel
header = ['smth']
properties = [exporting.Field('int_col')]
class ExportResourceWithoutTemplate(web.export.ExportResource):
export_factory = SampleExporter
config.register_export_resource(ExportResourceWithoutTemplate, '/test/plain/')
class ExportResource(web.export.ExportResource):
template = 'test_export.jinja2'
export_factory = SampleExporter
config.register_export_resource(ExportResource, '/test/export/')
class SimpleFactory(filtering.FilterFactory):
model = SampleModel
int_col = filtering.IntegerFilter()
class ExportResourceWithFilter(web.export.ExportResource):
template = 'test_export.jinja2'
export_factory = SampleExporter
filter_class = SimpleFactory
config.register_export_resource(ExportResourceWithFilter, '/test/filter_export/')
def test_render_pdf_no_template(self):
self.assertRaises(HTTPError, lambda: self.app.get('/test/plain/pdf/', expect_errors=True))
def test_render_pdf_template_exists(self):
r = self.app.get('/test/export/pdf/', expect_errors=True)
self.assertEqual(r.status_int, 200)
self.assertTrue(r.body)
def test_render_csv_no_template(self):
self.assertRaises(HTTPError, lambda: self.app.get('/test/plain/csv/', expect_errors=True))
def test_render_csv_template_specified_but_does_not_exist_so_renderer_exception_is_raised(self):
SampleModel.manager.create(int_col=1)
SampleModel.manager.create(int_col=1)
r = self.app.get('/test/export/csv/', expect_errors=True)
self.assertEqual(r.status_int, 200)
def test_got_to_default_path_returns_response_with_message(self):
r = self.app.get('/test/plain/', expect_errors=True)
self.assertEqual(r.status_int, 200)
self.assertEqual(r.body, "Go to /csv/ or /pdf/ to view export")
def test_export_resource_with_filter_class_specified(self):
SampleModel.manager.create(int_col=1)
SampleModel.manager.create(int_col=3)
r = self.app.get('/test/filter_export/csv/?int_col=3', expect_errors=True)
self.assertEqual(r.status_int, 200)
self.assertIn('smth', r.body)
self.assertIn('3', r.body)
self.assertNotIn('1', r.body)
r = self.app.get('/test/filter_export/csv/?int_col=1', expect_errors=True)
self.assertIn('smth', r.body)
self.assertEqual(r.status_int, 200)
self.assertIn('1', r.body)
self.assertNotIn('3', r.body)
r = self.app.get('/test/filter_export/csv/?int_col=2', expect_errors=True)
self.assertIn('smth', r.body)
self.assertEqual(r.status_int, 200)
self.assertNotIn('3', r.body)
self.assertNotIn('1', r.body) | mit | 5,246,803,346,190,945,000 | 36.366667 | 100 | 0.640892 | false |
Bramsnoek/Bioinformatics_Education | Aftekenopdracht 2/main.py | 1 | 2987 | from pprint import pprint
def print_personal_information():
personal_information = \
{
'name': 'Bram Snoek',
'address': 'Lithsedijk 33, 5397ea, Lith',
'phone': '0636489308',
'college major': 'Bioinformatics'
}
print("\n My name is {}, my address is {}, my phone number is {} and my college major is {}".format(personal_information['name'],
personal_information['address'],
personal_information['phone'],
personal_information['college major']))
def print_sales_prediction(sales_prediction: int):
print("Projected amount of sales after multiplication: {0} \n".format(str(0.23 * sales_prediction)))
def print_total_purchase(prices: list):
total_amount = sum(prices)
print("Subtotal: ${} \n Amount of sales tax: ${} \n Total amount: ${} \n".format(str(total_amount),
str(round(total_amount * 0.07, 2)),
str(round(total_amount * 1.07, 2))))
def print_distance_drive(speed: int, hours: list):
for hour in hours:
print("Distance driven: {} ".format(str(speed * hour)))
print('\n')
def print_price_per_gallon(mpg_info: list):
mpg = round(mpg_info[0] / mpg_info[1], 2)
print("Your mpg value is: {} \n".format(str(mpg)))
def print_how_many_cookies(num_cookies: int, cups_sugar: float, cups_butter: float, cups_flour: float, makeable_cookies: int):
cookies_ratio = num_cookies / makeable_cookies
print("You'll need {} cups of sugar, {} cups of butter and {} cups of flour".format(str(round(cups_sugar * cookies_ratio, 2)),
str(round(cups_butter * cookies_ratio, 2)),
str(round(cups_flour * cookies_ratio, 2))))
def main():
sales_prediction = input("What is your projected amount of sales? ")
print_sales_prediction(int(sales_prediction))
prices = [float(x.strip()) for x in input("What are your prices (seperate each price by ,): ").split(',')]
print_total_purchase(prices)
print_distance_drive(70, [6, 10, 15])
mpg = [float(x.strip()) for x in input("How many miles have you driven and how much gallons of gas has been used (seperate by ,)").split(',')]
print_price_per_gallon(mpg)
num_cookies = int(input("How many cookies do you want to make?"))
print_how_many_cookies(num_cookies, 1.5, 1, 2.75, 48)
print_personal_information()
if __name__ == '__main__':
main() | gpl-3.0 | -2,781,386,519,122,871,300 | 42.304348 | 146 | 0.508537 | false |
rahulunair/nova | nova/tests/unit/virt/xenapi/test_vm_utils.py | 1 | 104339 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
import mock
import os_xenapi
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import fixture as config_fixture
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import utils as compute_utils
import nova.conf
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields as obj_fields
from nova import test
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
from nova.tests.unit.virt.xenapi import stubs
from nova.virt import hardware
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi.image import utils as image_utils
from nova.virt.xenapi import vm_utils
import time
CONF = nova.conf.CONF
XENSM_TYPE = 'xensm'
ISCSI_TYPE = 'iscsi'
def get_fake_connection_data(sr_type):
fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR',
'name_label': 'fake_storage',
'name_description': 'test purposes',
'server': 'myserver',
'serverpath': '/local/scratch/myname',
'sr_type': 'nfs',
'introduce_sr_keys': ['server',
'serverpath',
'sr_type'],
'vdi_uuid': 'falseVDI'},
ISCSI_TYPE: {'volume_id': 'fake_volume_id',
'target_lun': 1,
'target_iqn': 'fake_iqn:volume-fake_volume_id',
'target_portal': u'localhost:3260',
'target_discovered': False}, }
return fakes[sr_type]
def _fake_noop(*args, **kwargs):
return
class VMUtilsTestBase(stubs.XenAPITestBaseNoDB):
pass
class LookupTestCase(VMUtilsTestBase):
def setUp(self):
super(LookupTestCase, self).setUp()
self.session = mock.Mock()
self.name_label = 'my_vm'
def test_normal(self):
self.session.call_xenapi.return_value = ['x']
result = vm_utils.lookup(self.session, self.name_label)
self.assertEqual('x', result)
self.session.call_xenapi.assert_called_once_with(
"VM.get_by_name_label", self.name_label)
def test_no_result(self):
self.session.call_xenapi.return_value = []
result = vm_utils.lookup(self.session, self.name_label)
self.assertIsNone(result)
self.session.call_xenapi.assert_called_once_with(
"VM.get_by_name_label", self.name_label)
def test_too_many(self):
self.session.call_xenapi.return_value = ['a', 'b']
self.assertRaises(exception.InstanceExists,
vm_utils.lookup,
self.session, self.name_label)
self.session.call_xenapi.assert_called_once_with(
"VM.get_by_name_label", self.name_label)
def test_rescue_none(self):
self.session.call_xenapi.side_effect = [[], ['x']]
result = vm_utils.lookup(self.session, self.name_label,
check_rescue=True)
self.assertEqual('x', result)
self.session.call_xenapi.assert_has_calls([
mock.call("VM.get_by_name_label", self.name_label + '-rescue'),
mock.call("VM.get_by_name_label", self.name_label)])
def test_rescue_found(self):
self.session.call_xenapi.return_value = ['y']
result = vm_utils.lookup(self.session, self.name_label,
check_rescue=True)
self.assertEqual('y', result)
self.session.call_xenapi.assert_called_once_with(
"VM.get_by_name_label", self.name_label + '-rescue')
def test_rescue_too_many(self):
self.session.call_xenapi.return_value = ['a', 'b', 'c']
self.assertRaises(exception.InstanceExists,
vm_utils.lookup,
self.session, self.name_label,
check_rescue=True)
self.session.call_xenapi.assert_called_once_with(
"VM.get_by_name_label", self.name_label + '-rescue')
class GenerateConfigDriveTestCase(VMUtilsTestBase):
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
@mock.patch.object(vm_utils, 'safe_find_sr')
@mock.patch.object(vm_utils, "create_vdi", return_value='vdi_ref')
@mock.patch.object(vm_utils.instance_metadata, "InstanceMetadata")
@mock.patch.object(vm_utils.configdrive, 'ConfigDriveBuilder')
@mock.patch('oslo_concurrency.processutils.execute')
@mock.patch.object(vm_utils.volume_utils, 'stream_to_vdi')
@mock.patch.object(vm_utils.os.path, 'getsize', return_value=100)
@mock.patch.object(vm_utils, 'create_vbd', return_value='vbd_ref')
@mock.patch.object(vm_utils.utils, 'tempdir')
def test_no_admin_pass(self, mock_tmpdir, mock_create_vbd, mock_size,
mock_stream, mock_execute, mock_builder,
mock_instance_metadata, mock_create_vdi,
mock_find_sr, mock_disk_op_sema):
mock_tmpdir.return_value.__enter__.return_value = '/mock'
with mock.patch.object(six.moves.builtins, 'open') as mock_open:
mock_open.return_value.__enter__.return_value = 'open_fd'
vm_utils.generate_configdrive('session', 'context', 'instance',
'vm_ref', 'userdevice',
'network_info')
mock_disk_op_sema.__enter__.assert_called_once()
mock_size.assert_called_with('/mock/configdrive.vhd')
mock_open.assert_called_with('/mock/configdrive.vhd')
mock_execute.assert_called_with('qemu-img', 'convert', '-Ovpc',
'/mock/configdrive',
'/mock/configdrive.vhd')
mock_instance_metadata.assert_called_with(
'instance', content=None, extra_md={},
network_info='network_info', request_context='context')
mock_stream.assert_called_with('session', 'instance', 'vhd',
'open_fd', 100, 'vdi_ref')
@mock.patch.object(vm_utils, "destroy_vdi")
@mock.patch.object(vm_utils, 'safe_find_sr')
@mock.patch.object(vm_utils, "create_vdi", return_value='vdi_ref')
@mock.patch.object(vm_utils.instance_metadata, "InstanceMetadata",
side_effect=test.TestingException)
def test_vdi_cleaned_up(self, mock_instance_metadata, mock_create,
mock_find_sr, mock_destroy):
self.assertRaises(test.TestingException, vm_utils.generate_configdrive,
'session', None, None, None, None, None)
mock_destroy.assert_called_once_with('session', 'vdi_ref')
class XenAPIGetUUID(VMUtilsTestBase):
@mock.patch.object(vm_utils, '_get_sys_hypervisor_uuid',
return_value='2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f')
def test_get_this_vm_uuid_new_kernel(self, mock_get_sys_hypervisor_uuid):
result = vm_utils.get_this_vm_uuid(None)
self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', result)
mock_get_sys_hypervisor_uuid.assert_called_once_with()
@mock.patch('nova.virt.xenapi.vm_utils._get_sys_hypervisor_uuid',
side_effect=IOError(13, 'Permission denied'))
@mock.patch('nova.privsep.xenapi.xenstore_read',
side_effect=[('27', ''),
('/vm/2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', '')])
def test_get_this_vm_uuid_old_kernel_reboot(self, fake_read, fake_uuid):
result = vm_utils.get_this_vm_uuid(None)
self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', result)
fake_read.assert_has_calls([
mock.call('domid'),
mock.call('/local/domain/27/vm')])
fake_uuid.assert_called_once_with()
class FakeSession(object):
def call_xenapi(self, *args):
pass
def call_plugin(self, *args):
pass
def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
pass
def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
callback, *args, **kwargs):
pass
class FetchVhdImageTestCase(VMUtilsTestBase):
def setUp(self):
super(FetchVhdImageTestCase, self).setUp()
self.context = context.get_admin_context()
self.context.auth_token = 'auth_token'
self.session = FakeSession()
self.instance = {"uuid": "uuid"}
self.image_handler = image_utils.get_image_handler(
CONF.xenserver.image_handler)
self.flags(group='glance', api_servers=['http://localhost:9292'])
make_uuid_stack_patcher = mock.patch.object(
vm_utils, '_make_uuid_stack', return_value=["uuid_stack"])
self.addCleanup(make_uuid_stack_patcher.stop)
self.mock_make_uuid_stack = make_uuid_stack_patcher.start()
get_sr_path_patcher = mock.patch.object(
vm_utils, 'get_sr_path', return_value='sr_path')
self.addCleanup(get_sr_path_patcher.stop)
self.mock_get_sr_path = get_sr_path_patcher.start()
def _stub_glance_download_vhd(self, raise_exc=None):
call_plugin_patcher = mock.patch.object(
self.session, 'call_plugin_serialized_with_retry')
self.addCleanup(call_plugin_patcher.stop)
self.mock_call_plugin = call_plugin_patcher.start()
if raise_exc:
self.mock_call_plugin.side_effect = raise_exc
else:
self.mock_call_plugin.return_value = {'root': {'uuid': 'vdi'}}
def _assert_make_uuid_stack_and_get_sr_path(self):
self.mock_make_uuid_stack.assert_called_once_with()
self.mock_get_sr_path.assert_called_once_with(self.session)
def _assert_call_plugin_serialized_with_retry(self):
self.mock_call_plugin.assert_called_once_with(
'glance.py',
'download_vhd2',
0,
mock.ANY,
mock.ANY,
extra_headers={'X-Auth-Token': 'auth_token',
'X-Roles': '',
'X-Tenant-Id': None,
'X-User-Id': None,
'X-Identity-Status': 'Confirmed'},
image_id='image_id',
uuid_stack=["uuid_stack"],
sr_path='sr_path')
@mock.patch.object(vm_utils, '_check_vdi_size')
@mock.patch.object(vm_utils, '_scan_sr')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value="sr")
def test_fetch_vhd_image_works_with_glance(self, mock_safe_find_sr,
mock_scan_sr,
mock_check_vdi_size):
self._stub_glance_download_vhd()
result = vm_utils._fetch_vhd_image(self.context, self.session,
self.instance, 'image_id',
self.image_handler)
self.assertEqual("vdi", result['root']['uuid'])
mock_safe_find_sr.assert_called_once_with(self.session)
mock_scan_sr.assert_called_once_with(self.session, "sr")
mock_check_vdi_size.assert_called_once_with(self.context, self.session,
self.instance, "vdi")
self._assert_call_plugin_serialized_with_retry()
self._assert_make_uuid_stack_and_get_sr_path()
@mock.patch.object(vm_utils, 'destroy_vdi',
side_effect=exception.StorageError(reason=""))
@mock.patch.object(FakeSession, 'call_xenapi', return_value="ref")
@mock.patch.object(
vm_utils, '_check_vdi_size',
side_effect=exception.FlavorDiskSmallerThanImage(flavor_size=0,
image_size=1))
@mock.patch.object(vm_utils, '_scan_sr')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value="sr")
def test_fetch_vhd_image_cleans_up_vdi_on_fail(
self, mock_safe_find_sr, mock_scan_sr, mock_check_vdi_size,
mock_call_xenapi, mock_destroy_vdi):
self._stub_glance_download_vhd()
self.assertRaises(exception.FlavorDiskSmallerThanImage,
vm_utils._fetch_vhd_image, self.context, self.session,
self.instance, 'image_id', self.image_handler)
mock_safe_find_sr.assert_called_once_with(self.session)
mock_scan_sr.assert_called_once_with(self.session, "sr")
mock_check_vdi_size.assert_called_once_with(self.context, self.session,
self.instance, "vdi")
mock_call_xenapi.assert_called_once_with("VDI.get_by_uuid", "vdi")
mock_destroy_vdi.assert_called_once_with(self.session, "ref")
self._assert_call_plugin_serialized_with_retry()
self._assert_make_uuid_stack_and_get_sr_path()
def test_fetch_vhd_image_download_exception(self):
self._stub_glance_download_vhd(raise_exc=RuntimeError)
self.assertRaises(RuntimeError, vm_utils._fetch_vhd_image,
self.context, self.session, self.instance, 'image_id',
self.image_handler)
self._assert_call_plugin_serialized_with_retry()
self._assert_make_uuid_stack_and_get_sr_path()
class TestImageCompression(VMUtilsTestBase):
def test_image_compression(self):
# Testing for nova.conf, too low, negative, and a correct value.
self.assertIsNone(vm_utils.get_compression_level())
self.flags(image_compression_level=6, group='xenserver')
self.assertEqual(vm_utils.get_compression_level(), 6)
class ResizeHelpersTestCase(VMUtilsTestBase):
def setUp(self):
super(ResizeHelpersTestCase, self).setUp()
self.context = context.RequestContext('user', 'project')
@mock.patch('nova.privsep.fs.ext_journal_disable')
@mock.patch('nova.privsep.fs.ext_journal_enable')
@mock.patch('nova.privsep.fs.resize_partition')
@mock.patch('nova.privsep.fs.resize2fs')
@mock.patch('nova.privsep.fs.e2fsck')
def test_resize_part_and_fs_down_succeeds(
self, mock_fsck, mock_resize2fs, mock_resize,
mock_disable_journal, mock_enable_journal):
dev_path = '/dev/fake'
partition_path = '%s1' % dev_path
vm_utils._resize_part_and_fs('fake', 0, 20, 10, 'boot')
mock_fsck.assert_has_calls([
mock.call(partition_path)])
mock_resize2fs.assert_has_calls([
mock.call(partition_path, [0], size='10s')])
mock_resize.assert_has_calls([
mock.call(dev_path, 0, 9, True)])
mock_disable_journal.assert_has_calls([
mock.call(partition_path)])
mock_enable_journal.assert_has_calls([
mock.call(partition_path)])
@mock.patch.object(vm_utils.LOG, 'debug')
def test_log_progress_if_required(self, mock_debug):
current = timeutils.utcnow()
time_fixture = self.useFixture(utils_fixture.TimeFixture(current))
time_fixture.advance_time_seconds(
vm_utils.PROGRESS_INTERVAL_SECONDS + 1)
vm_utils._log_progress_if_required(1, current, 2)
mock_debug.assert_called_once_with(
"Sparse copy in progress, %(complete_pct).2f%% complete. "
"%(left)s bytes left to copy",
{"complete_pct": 50.0, "left": 1})
@mock.patch.object(vm_utils.LOG, 'debug')
def test_log_progress_if_not_required(self, mock_debug):
current = timeutils.utcnow()
time_fixture = self.useFixture(utils_fixture.TimeFixture(current))
time_fixture.advance_time_seconds(
vm_utils.PROGRESS_INTERVAL_SECONDS - 1)
vm_utils._log_progress_if_required(1, current, 2)
mock_debug.assert_not_called()
@mock.patch('nova.privsep.fs.ext_journal_disable')
@mock.patch('nova.privsep.fs.resize2fs',
side_effect=processutils.ProcessExecutionError)
@mock.patch('nova.privsep.fs.e2fsck')
def test_resize_part_and_fs_down_fails_disk_too_big(
self, mock_fsck, mock_resize2fs, mock_disable_journal):
self.assertRaises(exception.ResizeError,
vm_utils._resize_part_and_fs,
"fake", 0, 20, 10, "boot")
mock_fsck.assert_has_calls([mock.call('/dev/fake1')])
@mock.patch('nova.privsep.fs.ext_journal_disable')
@mock.patch('nova.privsep.fs.ext_journal_enable')
@mock.patch('nova.privsep.fs.resize_partition')
@mock.patch('nova.privsep.fs.resize2fs')
@mock.patch('nova.privsep.fs.e2fsck')
def test_resize_part_and_fs_up_succeeds(
self, mock_fsck, mock_resize2fs, mock_resize,
mock_disable_journal, mock_enable_journal):
dev_path = '/dev/fake'
partition_path = '%s1' % dev_path
vm_utils._resize_part_and_fs('fake', 0, 20, 30, '')
mock_fsck.assert_has_calls([
mock.call(partition_path)])
mock_resize2fs.assert_has_calls([
mock.call(partition_path, [0])])
mock_resize.assert_has_calls([
mock.call(dev_path, 0, 29, False)])
mock_disable_journal.assert_has_calls([
mock.call(partition_path)])
mock_enable_journal.assert_has_calls([
mock.call(partition_path)])
def test_resize_disk_throws_on_zero_size(self):
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0)
self.assertRaises(exception.ResizeError, vm_utils.resize_disk,
"session", "instance", "vdi_ref", flavor)
def test_auto_config_disk_returns_early_on_zero_size(self):
vm_utils.try_auto_configure_disk("bad_session", "bad_vdi_ref", 0)
class CheckVDISizeTestCase(VMUtilsTestBase):
def setUp(self):
super(CheckVDISizeTestCase, self).setUp()
self.context = 'fakecontext'
self.session = 'fakesession'
self.instance = objects.Instance(uuid=uuids.fake)
self.flavor = objects.Flavor()
self.vdi_uuid = 'fakeuuid'
self.stub_out('nova.objects.Instance.get_flavor',
lambda *a, **kw: self.flavor)
@mock.patch.object(vm_utils, '_get_vdi_chain_size',
return_value=1073741824)
def test_not_too_large(self, mock_get_vdi_chain_size):
self.flavor.root_gb = 1
vm_utils._check_vdi_size(self.context, self.session, self.instance,
self.vdi_uuid)
mock_get_vdi_chain_size.assert_called_once_with(self.session,
self.vdi_uuid)
@mock.patch.object(vm_utils, '_get_vdi_chain_size',
return_value=11811160065) # 10GB overhead allowed
def test_too_large(self, mock_get_vdi_chain_size):
self.flavor.root_gb = 1
self.assertRaises(exception.FlavorDiskSmallerThanImage,
vm_utils._check_vdi_size, self.context,
self.session, self.instance, self.vdi_uuid)
mock_get_vdi_chain_size.assert_called_once_with(self.session,
self.vdi_uuid)
def test_zero_root_gb_disables_check(self):
self.flavor.root_gb = 0
vm_utils._check_vdi_size(self.context, self.session, self.instance,
self.vdi_uuid)
class GetInstanceForVdisForSrTestCase(VMUtilsTestBase):
def setUp(self):
super(GetInstanceForVdisForSrTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d')
self.flags(connection_url='http://localhost',
connection_password='test_pass',
group='xenserver')
def test_get_instance_vdis_for_sr(self):
vm_ref = fake.create_vm("foo", "Running")
sr_ref = fake.create_sr()
vdi_1 = fake.create_vdi('vdiname1', sr_ref)
vdi_2 = fake.create_vdi('vdiname2', sr_ref)
for vdi_ref in [vdi_1, vdi_2]:
fake.create_vbd(vm_ref, vdi_ref)
stubs.stubout_session(self, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.get_instance_vdis_for_sr(
driver._session, vm_ref, sr_ref))
self.assertEqual([vdi_1, vdi_2], result)
def test_get_instance_vdis_for_sr_no_vbd(self):
vm_ref = fake.create_vm("foo", "Running")
sr_ref = fake.create_sr()
stubs.stubout_session(self, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.get_instance_vdis_for_sr(
driver._session, vm_ref, sr_ref))
self.assertEqual([], result)
class VMRefOrRaiseVMFoundTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, 'lookup', return_value='ignored')
def test_lookup_call(self, mock_lookup):
vm_utils.vm_ref_or_raise('session', 'somename')
mock_lookup.assert_called_once_with('session', 'somename')
@mock.patch.object(vm_utils, 'lookup', return_value='vmref')
def test_return_value(self, mock_lookup):
self.assertEqual(
'vmref', vm_utils.vm_ref_or_raise('session', 'somename'))
mock_lookup.assert_called_once_with('session', 'somename')
class VMRefOrRaiseVMNotFoundTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, 'lookup', return_value=None)
def test_exception_raised(self, mock_lookup):
self.assertRaises(
exception.InstanceNotFound,
lambda: vm_utils.vm_ref_or_raise('session', 'somename')
)
mock_lookup.assert_called_once_with('session', 'somename')
@mock.patch.object(vm_utils, 'lookup', return_value=None)
def test_exception_msg_contains_vm_name(self, mock_lookup):
try:
vm_utils.vm_ref_or_raise('session', 'somename')
except exception.InstanceNotFound as e:
self.assertIn('somename', six.text_type(e))
mock_lookup.assert_called_once_with('session', 'somename')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='safe_find_sr')
class CreateCachedImageTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateCachedImageTestCase, self).setUp()
self.session = self.get_fake_session()
@mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
def test_cached(self, mock_clone_vdi, mock_safe_find_sr):
self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
None, None, None, 'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD,
'image_handler'))
@mock.patch.object(vm_utils, '_safe_copy_vdi', return_value='new_vdi_ref')
def test_no_cow(self, mock_safe_copy_vdi, mock_safe_find_sr):
self.flags(use_cow_images=False)
self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
None, None, None, 'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD,
'image_handler'))
def test_no_cow_no_ext(self, mock_safe_find_sr):
self.flags(use_cow_images=False)
self.session.call_xenapi.side_effect = ['non-ext', {'vdi_ref': 2},
'vdi_ref', None, None, None,
'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD,
'image_handler'))
@mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
@mock.patch.object(vm_utils, '_fetch_image',
return_value={'root': {'uuid': 'vdi_uuid',
'file': None}})
def test_noncached(self, mock_fetch_image, mock_clone_vdi,
mock_safe_find_sr):
self.session.call_xenapi.side_effect = ['ext', {}, 'cache_vdi_ref',
None, None, None, None, None,
None, None, 'vdi_uuid']
self.assertEqual((True, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD,
'image_handler'))
class DestroyCachedImageTestCase(VMUtilsTestBase):
def setUp(self):
super(DestroyCachedImageTestCase, self).setUp()
self.session = self.get_fake_session()
@mock.patch.object(vm_utils, '_find_cached_images')
@mock.patch.object(vm_utils, 'destroy_vdi')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(time, 'time')
def test_destroy_cached_image_out_of_keep_days(self,
mock_time,
mock_walk_vdi_chain,
mock_destroy_vdi,
mock_find_cached_images):
fake_cached_time = '0'
mock_find_cached_images.return_value = {'fake_image_id': {
'vdi_ref': 'fake_vdi_ref', 'cached_time': fake_cached_time}}
self.session.call_xenapi.return_value = 'fake_uuid'
mock_walk_vdi_chain.return_value = ('just_one',)
mock_time.return_value = 2 * 3600 * 24
fake_keep_days = 1
expected_return = set()
expected_return.add('fake_uuid')
uuid_return = vm_utils.destroy_cached_images(self.session,
'fake_sr_ref', False, False, fake_keep_days)
mock_find_cached_images.assert_called_once()
mock_walk_vdi_chain.assert_called_once()
mock_time.assert_called()
mock_destroy_vdi.assert_called_once()
self.assertEqual(expected_return, uuid_return)
@mock.patch.object(vm_utils, '_find_cached_images')
@mock.patch.object(vm_utils, 'destroy_vdi')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(time, 'time')
def test_destroy_cached_image(self, mock_time, mock_walk_vdi_chain,
mock_destroy_vdi, mock_find_cached_images):
fake_cached_time = '0'
mock_find_cached_images.return_value = {'fake_image_id': {
'vdi_ref': 'fake_vdi_ref', 'cached_time': fake_cached_time}}
self.session.call_xenapi.return_value = 'fake_uuid'
mock_walk_vdi_chain.return_value = ('just_one',)
mock_time.return_value = 2 * 3600 * 24
fake_keep_days = 1
expected_return = set()
expected_return.add('fake_uuid')
uuid_return = vm_utils.destroy_cached_images(self.session,
'fake_sr_ref', False, False, fake_keep_days)
mock_find_cached_images.assert_called_once()
mock_walk_vdi_chain.assert_called_once()
mock_destroy_vdi.assert_called_once()
self.assertEqual(expected_return, uuid_return)
@mock.patch.object(vm_utils, '_find_cached_images')
@mock.patch.object(vm_utils, 'destroy_vdi')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(time, 'time')
def test_destroy_cached_image_cached_time_not_exceed(
self, mock_time, mock_walk_vdi_chain,
mock_destroy_vdi, mock_find_cached_images):
fake_cached_time = '0'
mock_find_cached_images.return_value = {'fake_image_id': {
'vdi_ref': 'fake_vdi_ref', 'cached_time': fake_cached_time}}
self.session.call_xenapi.return_value = 'fake_uuid'
mock_walk_vdi_chain.return_value = ('just_one',)
mock_time.return_value = 1 * 3600 * 24
fake_keep_days = 2
expected_return = set()
uuid_return = vm_utils.destroy_cached_images(self.session,
'fake_sr_ref', False, False, fake_keep_days)
mock_find_cached_images.assert_called_once()
mock_walk_vdi_chain.assert_called_once()
mock_destroy_vdi.assert_not_called()
self.assertEqual(expected_return, uuid_return)
@mock.patch.object(vm_utils, '_find_cached_images')
@mock.patch.object(vm_utils, 'destroy_vdi')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(time, 'time')
def test_destroy_cached_image_no_cached_time(
self, mock_time, mock_walk_vdi_chain,
mock_destroy_vdi, mock_find_cached_images):
mock_find_cached_images.return_value = {'fake_image_id': {
'vdi_ref': 'fake_vdi_ref', 'cached_time': None}}
self.session.call_xenapi.return_value = 'fake_uuid'
mock_walk_vdi_chain.return_value = ('just_one',)
fake_keep_days = 2
expected_return = set()
uuid_return = vm_utils.destroy_cached_images(self.session,
'fake_sr_ref', False, False, fake_keep_days)
mock_find_cached_images.assert_called_once()
mock_walk_vdi_chain.assert_called_once()
mock_destroy_vdi.assert_not_called()
self.assertEqual(expected_return, uuid_return)
@mock.patch.object(vm_utils, 'is_vm_shutdown', return_value=True)
class ShutdownTestCase(VMUtilsTestBase):
def test_hardshutdown_should_return_true_when_vm_is_shutdown(
self, mock_is_vm_shutdown):
session = FakeSession()
instance = "instance"
vm_ref = "vm-ref"
self.assertTrue(vm_utils.hard_shutdown_vm(
session, instance, vm_ref))
mock_is_vm_shutdown.assert_called_once_with(session, vm_ref)
def test_cleanshutdown_should_return_true_when_vm_is_shutdown(
self, mock_is_vm_shutdown):
session = FakeSession()
instance = "instance"
vm_ref = "vm-ref"
self.assertTrue(vm_utils.clean_shutdown_vm(
session, instance, vm_ref))
mock_is_vm_shutdown.assert_called_once_with(session, vm_ref)
@mock.patch.object(FakeSession, 'call_xenapi', return_value='vbd_ref')
class CreateVBDTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateVBDTestCase, self).setUp()
self.session = FakeSession()
self.vbd_rec = self._generate_vbd_rec()
def _generate_vbd_rec(self):
vbd_rec = {}
vbd_rec['VM'] = 'vm_ref'
vbd_rec['VDI'] = 'vdi_ref'
vbd_rec['userdevice'] = '0'
vbd_rec['bootable'] = False
vbd_rec['mode'] = 'RW'
vbd_rec['type'] = 'disk'
vbd_rec['unpluggable'] = True
vbd_rec['empty'] = False
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
return vbd_rec
def test_create_vbd_default_args(self, mock_call_xenapi):
result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0)
self.assertEqual(result, "vbd_ref")
mock_call_xenapi.assert_called_once_with('VBD.create', self.vbd_rec)
def test_create_vbd_osvol(self, mock_call_xenapi):
result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0,
osvol=True)
self.assertEqual(result, "vbd_ref")
mock_call_xenapi.assert_has_calls([
mock.call('VBD.create', self.vbd_rec),
mock.call('VBD.add_to_other_config', "vbd_ref", "osvol", "True")])
def test_create_vbd_extra_args(self, mock_call_xenapi):
self.vbd_rec['VDI'] = 'OpaqueRef:NULL'
self.vbd_rec['type'] = 'a'
self.vbd_rec['mode'] = 'RO'
self.vbd_rec['bootable'] = True
self.vbd_rec['empty'] = True
self.vbd_rec['unpluggable'] = False
result = vm_utils.create_vbd(self.session, "vm_ref", None, 0,
vbd_type="a", read_only=True, bootable=True,
empty=True, unpluggable=False)
self.assertEqual(result, "vbd_ref")
mock_call_xenapi.assert_called_once_with('VBD.create', self.vbd_rec)
@mock.patch.object(vm_utils, 'create_vbd', return_value='vbd_ref')
def test_attach_cd(self, mock_create_vbd, mock_call_xenapi):
mock_call_xenapi.return_value = None
result = vm_utils.attach_cd(self.session, "vm_ref", "vdi_ref", 1)
self.assertEqual(result, "vbd_ref")
mock_create_vbd.assert_called_once_with(
self.session, "vm_ref", None, 1, vbd_type='cd', read_only=True,
bootable=True, empty=True, unpluggable=False)
mock_call_xenapi.assert_called_once_with('VBD.insert', 'vbd_ref',
'vdi_ref')
class UnplugVbdTestCase(VMUtilsTestBase):
@mock.patch.object(greenthread, 'sleep')
def test_unplug_vbd_works(self, mock_sleep):
session = self.get_fake_session()
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
session.call_xenapi.assert_called_once_with('VBD.unplug', vbd_ref)
self.assertEqual(0, mock_sleep.call_count)
def test_unplug_vbd_raises_unexpected_error(self):
session = self.get_fake_session()
session.XenAPI.Failure = fake.Failure
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
session.call_xenapi.side_effect = test.TestingException()
self.assertRaises(test.TestingException, vm_utils.unplug_vbd,
session, vm_ref, vbd_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def test_unplug_vbd_already_detached_works(self):
error = "DEVICE_ALREADY_DETACHED"
session = self.get_fake_session(error)
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def test_unplug_vbd_already_raises_unexpected_xenapi_error(self):
session = self.get_fake_session("")
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def _test_uplug_vbd_retries(self, mock_sleep, error):
session = self.get_fake_session(error)
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
session, vm_ref, vbd_ref)
self.assertEqual(11, session.call_xenapi.call_count)
self.assertEqual(10, mock_sleep.call_count)
def _test_uplug_vbd_retries_with_neg_val(self):
session = self.get_fake_session()
self.flags(num_vbd_unplug_retries=-1, group='xenserver')
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
@mock.patch.object(greenthread, 'sleep')
def test_uplug_vbd_retries_on_rejected(self, mock_sleep):
self._test_uplug_vbd_retries(mock_sleep,
"DEVICE_DETACH_REJECTED")
@mock.patch.object(greenthread, 'sleep')
def test_uplug_vbd_retries_on_internal_error(self, mock_sleep):
self._test_uplug_vbd_retries(mock_sleep,
"INTERNAL_ERROR")
@mock.patch.object(greenthread, 'sleep')
def test_uplug_vbd_retries_on_missing_pv_drivers_error(self, mock_sleep):
self._test_uplug_vbd_retries(mock_sleep,
"VM_MISSING_PV_DRIVERS")
class VDIOtherConfigTestCase(VMUtilsTestBase):
"""Tests to ensure that the code is populating VDI's `other_config`
attribute with the correct metadta.
"""
def setUp(self):
super(VDIOtherConfigTestCase, self).setUp()
class _FakeSession(object):
def call_xenapi(self, operation, *args, **kwargs):
# VDI.add_to_other_config -> VDI_add_to_other_config
method = getattr(self, operation.replace('.', '_'), None)
if method:
return method(*args, **kwargs)
self.operation = operation
self.args = args
self.kwargs = kwargs
self.session = _FakeSession()
self.context = context.get_admin_context()
self.fake_instance = {'uuid': 'aaaa-bbbb-cccc-dddd',
'name': 'myinstance'}
def test_create_vdi(self):
# Some images are registered with XenServer explicitly by calling
# `create_vdi`
vm_utils.create_vdi(self.session, 'sr_ref', self.fake_instance,
'myvdi', 'root', 1024, read_only=True)
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, self.session.args[0]['other_config'])
@mock.patch.object(vm_utils, '_fetch_image',
return_value={'root': {'uuid': 'fake-uuid'}})
def test_create_image(self, mock_vm_utils):
# Other images are registered implicitly when they are dropped into
# the SR by a dom0 plugin or some other process
self.flags(cache_images='none', group='xenserver')
other_config = {}
def VDI_add_to_other_config(ref, key, value):
other_config[key] = value
# Stubbing on the session object and not class so we don't pollute
# other tests
self.session.VDI_add_to_other_config = VDI_add_to_other_config
self.session.VDI_get_other_config = lambda vdi: {}
vm_utils.create_image(self.context, self.session, self.fake_instance,
'myvdi', 'image1', vm_utils.ImageType.DISK_VHD,
'image_handler')
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, other_config)
@mock.patch.object(os_xenapi.client.vm_management, 'receive_vhd')
@mock.patch.object(vm_utils, 'scan_default_sr')
@mock.patch.object(vm_utils, 'get_sr_path')
def test_import_migrated_vhds(self, mock_sr_path, mock_scan_sr,
mock_recv_vhd):
# Migrated images should preserve the `other_config`
other_config = {}
def VDI_add_to_other_config(ref, key, value):
other_config[key] = value
# Stubbing on the session object and not class so we don't pollute
# other tests
self.session.VDI_add_to_other_config = VDI_add_to_other_config
self.session.VDI_get_other_config = lambda vdi: {}
mock_sr_path.return_value = {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}}
vm_utils._import_migrated_vhds(self.session, self.fake_instance,
"disk_label", "root", "vdi_label")
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, other_config)
mock_scan_sr.assert_called_once_with(self.session)
mock_recv_vhd.assert_called_with(
self.session, "disk_label",
{'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}}, mock.ANY)
mock_sr_path.assert_called_once_with(self.session)
class GenerateDiskTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, 'vdi_attached')
@mock.patch('nova.privsep.fs.mkfs',
side_effect = test.TestingException())
@mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils, 'create_vbd')
def test_generate_disk_with_no_fs_given(self, mock_create_vbd,
mock_create_vdi, mock_findsr,
mock_dom0ref, mock_mkfs,
mock_attached_here):
session = self.get_fake_session()
vdi_ref = mock.MagicMock()
mock_attached_here.return_value = vdi_ref
instance = {'uuid': 'fake_uuid'}
vm_utils._generate_disk(session, instance, 'vm_ref', '2',
'name', 'user', 10, None, None)
mock_attached_here.assert_called_once_with(session, 'vdi_ref',
read_only=False,
dom0=True)
mock_create_vbd.assert_called_with(session, 'vm_ref', 'vdi_ref', '2',
bootable=False)
@mock.patch.object(vm_utils, 'vdi_attached')
@mock.patch('nova.privsep.fs.mkfs')
@mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils.utils, 'make_dev_path',
return_value='/dev/fake_devp1')
@mock.patch.object(vm_utils, 'create_vbd')
def test_generate_disk_swap(self, mock_create_vbd, mock_make_path,
mock_create_vdi,
mock_findsr, mock_dom0ref, mock_mkfs,
mock_attached_here):
session = self.get_fake_session()
vdi_dev = mock.MagicMock()
mock_attached_here.return_value = vdi_dev
vdi_dev.__enter__.return_value = 'fakedev'
instance = {'uuid': 'fake_uuid'}
vm_utils._generate_disk(session, instance, 'vm_ref', '2',
'name', 'user', 10, 'swap',
'swap-1')
mock_attached_here.assert_any_call(session, 'vdi_ref',
read_only=False,
dom0=True)
# As swap is supported in dom0, mkfs will run there
session.call_plugin_serialized.assert_any_call(
'partition_utils.py', 'mkfs', 'fakedev', '1', 'swap', 'swap-1')
mock_create_vbd.assert_called_with(session, 'vm_ref', 'vdi_ref', '2',
bootable=False)
@mock.patch.object(vm_utils, 'vdi_attached')
@mock.patch('nova.privsep.fs.mkfs')
@mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils.utils, 'make_dev_path',
return_value='/dev/fake_devp1')
@mock.patch.object(vm_utils, 'create_vbd')
def test_generate_disk_ephemeral(self, mock_create_vbd, mock_make_path,
mock_create_vdi, mock_findsr,
mock_dom0ref, mock_mkfs,
mock_attached_here):
session = self.get_fake_session()
vdi_ref = mock.MagicMock()
mock_attached_here.return_value = vdi_ref
instance = {'uuid': 'fake_uuid'}
vm_utils._generate_disk(session, instance, 'vm_ref', '2',
'name', 'ephemeral', 10, 'ext4',
'ephemeral-1')
mock_attached_here.assert_any_call(session, 'vdi_ref',
read_only=False,
dom0=True)
# As ext4 is not supported in dom0, mkfs will run in domU
mock_attached_here.assert_any_call(session, 'vdi_ref',
read_only=False)
mock_mkfs.assert_called_with('ext4', '/dev/fake_devp1',
'ephemeral-1')
mock_create_vbd.assert_called_with(session, 'vm_ref', 'vdi_ref', '2',
bootable=False)
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils, '_get_dom0_ref',
side_effect = test.TestingException())
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
def test_generate_disk_ensure_cleanup_called(self, mock_destroy_vdis,
mock_dom0ref,
mock_create_vdi,
mock_findsr):
session = self.get_fake_session()
instance = {'uuid': 'fake_uuid'}
self.assertRaises(test.TestingException, vm_utils._generate_disk,
session, instance, None, '2', 'name', 'user', 10,
None, None)
mock_destroy_vdis.assert_called_once_with(session, ['vdi_ref'])
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils, 'vdi_attached')
@mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref')
@mock.patch.object(vm_utils, 'create_vbd')
def test_generate_disk_ephemeral_no_vmref(self, mock_create_vbd,
mock_dom0_ref,
mock_attached_here,
mock_create_vdi,
mock_findsr):
session = self.get_fake_session()
vdi_ref = mock.MagicMock()
mock_attached_here.return_value = vdi_ref
instance = {'uuid': 'fake_uuid'}
vdi_ref = vm_utils._generate_disk(
session, instance,
None, None, 'name', 'user', 10, None, None)
mock_attached_here.assert_called_once_with(session, 'vdi_ref',
read_only=False, dom0=True)
self.assertFalse(mock_create_vbd.called)
@mock.patch.object(vm_utils, '_generate_disk')
class GenerateEphemeralTestCase(VMUtilsTestBase):
def setUp(self):
super(GenerateEphemeralTestCase, self).setUp()
self.session = "session"
self.instance = "instance"
self.vm_ref = "vm_ref"
self.name_label = "name"
self.ephemeral_name_label = "name ephemeral"
self.userdevice = 4
self.fs_label = "ephemeral"
def test_get_ephemeral_disk_sizes_simple(self, mock_generate_disk):
result = vm_utils.get_ephemeral_disk_sizes(20)
expected = [20]
self.assertEqual(expected, list(result))
def test_get_ephemeral_disk_sizes_three_disks_2000(self,
mock_generate_disk):
result = vm_utils.get_ephemeral_disk_sizes(4030)
expected = [2000, 2000, 30]
self.assertEqual(expected, list(result))
def test_get_ephemeral_disk_sizes_two_disks_1024(self, mock_generate_disk):
result = vm_utils.get_ephemeral_disk_sizes(2048)
expected = [1024, 1024]
self.assertEqual(expected, list(result))
def test_generate_ephemeral_adds_one_disk(self, mock_generate_disk):
mock_generate_disk.return_value = self.userdevice
vm_utils.generate_ephemeral(
self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 20)
mock_generate_disk.assert_called_once_with(
self.session, self.instance, self.vm_ref, str(self.userdevice),
self.ephemeral_name_label, 'ephemeral', 20480, None, self.fs_label)
def test_generate_ephemeral_adds_multiple_disks(self, mock_generate_disk):
mock_generate_disk.side_effect = [self.userdevice,
self.userdevice + 1,
self.userdevice + 2]
vm_utils.generate_ephemeral(
self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 4030)
mock_generate_disk.assert_has_calls([
mock.call(self.session, self.instance, self.vm_ref,
str(self.userdevice), self.ephemeral_name_label,
'ephemeral', 2048000, None, self.fs_label),
mock.call(self.session, self.instance, self.vm_ref,
str(self.userdevice + 1),
self.ephemeral_name_label + " (1)",
'ephemeral', 2048000, None, self.fs_label + "1"),
mock.call(self.session, self.instance, self.vm_ref,
str(self.userdevice + 2),
self.ephemeral_name_label + " (2)",
'ephemeral', 30720, None, self.fs_label + "2")])
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
def test_generate_ephemeral_cleans_up_on_error(
self, mock_safe_destroy_vdis, mock_generate_disk):
mock_generate_disk.side_effect = [self.userdevice,
self.userdevice + 1,
exception.NovaException]
self.assertRaises(
exception.NovaException, vm_utils.generate_ephemeral,
self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 4096)
mock_safe_destroy_vdis.assert_called_once_with(self.session, [4, 5])
mock_generate_disk.assert_has_calls([
mock.call(self.session, self.instance, self.vm_ref,
str(self.userdevice), self.ephemeral_name_label,
'ephemeral', 1048576, None, self.fs_label),
mock.call(self.session, self.instance, self.vm_ref,
str(self.userdevice + 1),
self.ephemeral_name_label + " (1)",
'ephemeral', 1048576, None, self.fs_label + "1"),
mock.call(self.session, self.instance, self.vm_ref,
str(self.userdevice + 2),
"name ephemeral (2)",
'ephemeral', 1048576, None, 'ephemeral2')])
@mock.patch.object(vm_utils, '_write_partition')
@mock.patch.object(vm_utils.utils, 'temporary_chown')
@mock.patch.object(vm_utils.utils, 'make_dev_path', return_value='some_path')
class StreamDiskTestCase(VMUtilsTestBase):
def setUp(self):
super(StreamDiskTestCase, self).setUp()
# NOTE(matelakat): This might hide the fail reason, as test runners
# are unhappy with a mocked out open.
self.image_service_func = mock.Mock()
def test_non_ami(self, mock_make_dev_path, mock_temporary_chown,
mock_write_partition):
mock_temporary_chown.return_value.__enter__.return_value = None
mock_open = mock.mock_open()
with mock.patch.object(six.moves.builtins, 'open', mock_open):
vm_utils._stream_disk("session", self.image_service_func,
vm_utils.ImageType.KERNEL, None, 'dev')
mock_make_dev_path.assert_called_once_with('dev')
mock_temporary_chown.assert_called_once_with('some_path')
mock_write_partition.assert_not_called()
mock_open.assert_called_once_with('some_path', 'wb')
fake_file = mock_open()
fake_file.seek.assert_called_once_with(0)
self.image_service_func.assert_called_once_with(fake_file)
def test_ami_disk(self, mock_make_dev_path, mock_temporary_chown,
mock_write_partition):
mock_temporary_chown.return_value.__enter__.return_value = None
mock_open = mock.mock_open()
with mock.patch.object(six.moves.builtins, 'open', mock_open):
vm_utils._stream_disk("session", self.image_service_func,
vm_utils.ImageType.DISK, 100, 'dev')
mock_write_partition.assert_called_once_with("session", 100, 'dev')
mock_make_dev_path.assert_called_once_with('dev')
mock_temporary_chown.assert_called_once_with('some_path')
mock_open.assert_called_once_with('some_path', 'wb')
fake_file = mock_open()
fake_file.seek.assert_called_once_with(vm_utils.MBR_SIZE_BYTES)
self.image_service_func.assert_called_once_with(fake_file)
@mock.patch('os_xenapi.client.session.XenAPISession.call_xenapi')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
class VMUtilsSRPath(VMUtilsTestBase):
def setUp(self):
super(VMUtilsSRPath, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d')
self.flags(connection_url='http://localhost',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.session.is_local_connection = False
def test_defined(self, mock_safe_find_sr, mock_call_xenapi):
self.session.host_ref = "host_ref"
mock_call_xenapi.return_value = {'pbd_ref': {'device_config':
{'path': 'sr_path'}}}
self.assertEqual('sr_path', vm_utils.get_sr_path(self.session))
mock_safe_find_sr.assert_called_once_with(self.session)
mock_call_xenapi.assert_called_once_with(
'PBD.get_all_records_where',
'field "host"="host_ref" and field "SR"="sr_ref"')
def test_default(self, mock_safe_find_sr, mock_call_xenapi):
self.session.host_ref = "host_ref"
mock_call_xenapi.side_effect = [
{'pbd_ref': {'device_config': {}}},
{'uuid': 'sr_uuid', 'type': 'ext'}]
self.assertEqual('/var/run/sr-mount/sr_uuid',
vm_utils.get_sr_path(self.session))
mock_safe_find_sr.assert_called_once_with(self.session)
mock_call_xenapi.assert_has_calls([
mock.call('PBD.get_all_records_where',
'field "host"="host_ref" and field "SR"="sr_ref"'),
mock.call("SR.get_record", "sr_ref")])
class CreateKernelRamdiskTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateKernelRamdiskTestCase, self).setUp()
self.context = "context"
self.session = FakeSession()
self.instance = {"kernel_id": None, "ramdisk_id": None}
self.name_label = "name"
self.stub_out('os_xenapi.client.session.XenAPISession.call_xenapi',
lambda *a, **k: None)
def test_create_kernel_and_ramdisk_no_create(self):
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual((None, None), result)
@mock.patch.object(uuidutils, 'generate_uuid',
side_effect=['fake_uuid1', 'fake_uuid2'])
@mock.patch.object(os_xenapi.client.disk_management,
'create_kernel_ramdisk')
def test_create_kernel_and_ramdisk_create_both_cached(
self, mock_ramdisk, mock_generate_uuid):
kernel_id = "kernel"
ramdisk_id = "ramdisk"
self.instance["kernel_id"] = kernel_id
self.instance["ramdisk_id"] = ramdisk_id
mock_ramdisk.side_effect = ["k", "r"]
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual(("k", "r"), result)
mock_generate_uuid.assert_has_calls([mock.call(), mock.call()])
@mock.patch.object(uuidutils, 'generate_uuid', return_value='fake_uuid1')
@mock.patch.object(vm_utils, '_fetch_disk_image',
return_value={"kernel": {"file": "k"}})
@mock.patch.object(os_xenapi.client.disk_management,
'create_kernel_ramdisk')
def test_create_kernel_and_ramdisk_create_kernel_not_cached(
self, mock_ramdisk, mock_fetch_disk_image, mock_generate_uuid):
kernel_id = "kernel"
self.instance["kernel_id"] = kernel_id
mock_ramdisk.return_value = ""
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual(("k", None), result)
mock_generate_uuid.assert_called_once_with()
mock_ramdisk.assert_called_once_with(self.session, kernel_id,
'fake_uuid1')
mock_fetch_disk_image.assert_called_once_with(
self.context, self.session, self.instance, self.name_label,
kernel_id, 0)
@mock.patch.object(uuidutils, 'generate_uuid')
@mock.patch.object(vm_utils, '_fetch_disk_image')
def _test_create_kernel_image(self, cache_images, mock_fetch_disk_image,
mock_generate_uuid):
kernel_id = "kernel"
self.instance["kernel_id"] = kernel_id
self.flags(cache_images=cache_images, group='xenserver')
if cache_images == 'all':
mock_generate_uuid.return_value = 'fake_uuid1'
else:
mock_fetch_disk_image.return_value = {
"kernel": {"file": "new_image", "uuid": None}}
result = vm_utils._create_kernel_image(self.context,
self.session,
self.instance,
self.name_label,
kernel_id, 0)
if cache_images == 'all':
self.assertEqual(result, {"kernel":
{"file": "cached_image", "uuid": None}})
mock_generate_uuid.assert_called_once_with()
mock_fetch_disk_image.assert_not_called()
else:
self.assertEqual(result, {"kernel":
{"file": "new_image", "uuid": None}})
mock_fetch_disk_image.assert_called_once_with(
self.context, self.session, self.instance, self.name_label,
kernel_id, 0)
mock_generate_uuid.assert_not_called()
@mock.patch.object(os_xenapi.client.disk_management,
'create_kernel_ramdisk')
def test_create_kernel_image_cached_config(self, mock_ramdisk):
mock_ramdisk.return_value = "cached_image"
self._test_create_kernel_image('all')
mock_ramdisk.assert_called_once_with(self.session, "kernel",
"fake_uuid1")
def test_create_kernel_image_uncached_config(self):
self._test_create_kernel_image('none')
class ScanSrTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, "_scan_sr")
@mock.patch.object(vm_utils, "safe_find_sr")
def test_scan_default_sr(self, mock_safe_find_sr, mock_scan_sr):
mock_safe_find_sr.return_value = "sr_ref"
self.assertEqual("sr_ref", vm_utils.scan_default_sr("fake_session"))
mock_scan_sr.assert_called_once_with("fake_session", "sr_ref")
def test_scan_sr_works(self):
session = mock.Mock()
vm_utils._scan_sr(session, "sr_ref")
session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
def test_scan_sr_unknown_error_fails_once(self):
session = mock.Mock()
session.XenAPI.Failure = fake.Failure
session.call_xenapi.side_effect = test.TestingException
self.assertRaises(test.TestingException,
vm_utils._scan_sr, session, "sr_ref")
session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
@mock.patch.object(greenthread, 'sleep')
def test_scan_sr_known_error_retries_then_throws(self, mock_sleep):
session = mock.Mock()
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session.XenAPI.Failure = FakeException
session.call_xenapi.side_effect = FakeException
self.assertRaises(FakeException,
vm_utils._scan_sr, session, "sr_ref")
session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
self.assertEqual(4, session.call_xenapi.call_count)
mock_sleep.assert_has_calls([mock.call(2), mock.call(4), mock.call(8)])
@mock.patch.object(greenthread, 'sleep')
def test_scan_sr_known_error_retries_then_succeeds(self, mock_sleep):
session = mock.Mock()
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session.XenAPI.Failure = FakeException
def fake_call_xenapi(*args):
fake_call_xenapi.count += 1
if fake_call_xenapi.count != 2:
raise FakeException()
fake_call_xenapi.count = 0
session.call_xenapi.side_effect = fake_call_xenapi
vm_utils._scan_sr(session, "sr_ref")
session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
self.assertEqual(2, session.call_xenapi.call_count)
mock_sleep.assert_called_once_with(2)
@mock.patch.object(flavors, 'extract_flavor',
return_value={
'memory_mb': 1024,
'vcpus': 1,
'vcpu_weight': 1.0,
})
class CreateVmTestCase(VMUtilsTestBase):
def test_vss_provider(self, mock_extract):
self.flags(vcpu_pin_set="2,3")
session = self.get_fake_session()
instance = objects.Instance(uuid=uuids.nova_uuid,
os_type="windows",
system_metadata={})
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor._from_db_object(
None, objects.Flavor(), test_flavor.fake_flavor)
vm_utils.create_vm(session, instance, "label",
"kernel", "ramdisk")
vm_rec = {
'VCPUs_params': {'cap': '0', 'mask': '2,3', 'weight': '1'},
'PV_args': '',
'memory_static_min': '0',
'ha_restart_priority': '',
'HVM_boot_policy': 'BIOS order',
'PV_bootloader': '', 'tags': [],
'VCPUs_max': '4',
'memory_static_max': '1073741824',
'actions_after_shutdown': 'destroy',
'memory_dynamic_max': '1073741824',
'user_version': '0',
'xenstore_data': {'vm-data/allowvssprovider': 'false'},
'blocked_operations': {},
'is_a_template': False,
'name_description': '',
'memory_dynamic_min': '1073741824',
'actions_after_crash': 'destroy',
'memory_target': '1073741824',
'PV_ramdisk': '',
'PV_bootloader_args': '',
'PCI_bus': '',
'other_config': {'nova_uuid': uuids.nova_uuid},
'name_label': 'label',
'actions_after_reboot': 'restart',
'VCPUs_at_startup': '4',
'HVM_boot_params': {'order': 'dc'},
'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
'timeoffset': '0', 'viridian': 'true',
'acpi': 'true'},
'PV_legacy_args': '',
'PV_kernel': '',
'affinity': '',
'recommendations': '',
'ha_always_run': False
}
session.call_xenapi.assert_called_once_with("VM.create", vm_rec)
def test_invalid_cpu_mask_raises(self, mock_extract):
self.flags(vcpu_pin_set="asdf")
session = mock.Mock()
instance = objects.Instance(uuid=uuids.fake, system_metadata={})
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor._from_db_object(
None, objects.Flavor(), test_flavor.fake_flavor)
self.assertRaises(exception.Invalid,
vm_utils.create_vm,
session, instance, "label",
"kernel", "ramdisk")
def test_destroy_vm(self, mock_extract):
session = mock.Mock()
instance = objects.Instance(uuid=uuids.fake)
vm_utils.destroy_vm(session, instance, "vm_ref")
session.VM.destroy.assert_called_once_with("vm_ref")
def test_destroy_vm_silently_fails(self, mock_extract):
session = mock.Mock()
exc = test.TestingException()
session.XenAPI.Failure = test.TestingException
session.VM.destroy.side_effect = exc
instance = objects.Instance(uuid=uuids.fake)
vm_utils.destroy_vm(session, instance, "vm_ref")
session.VM.destroy.assert_called_once_with("vm_ref")
class DetermineVmModeTestCase(VMUtilsTestBase):
def _fake_object(self, updates):
return fake_instance.fake_instance_obj(None, **updates)
def test_determine_vm_mode_returns_xen_mode(self):
instance = self._fake_object({"vm_mode": "xen"})
self.assertEqual(obj_fields.VMMode.XEN,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_mode(self):
instance = self._fake_object({"vm_mode": "hvm"})
self.assertEqual(obj_fields.VMMode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_xen_for_linux(self):
instance = self._fake_object({"vm_mode": None, "os_type": "linux"})
self.assertEqual(obj_fields.VMMode.XEN,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_for_windows(self):
instance = self._fake_object({"vm_mode": None, "os_type": "windows"})
self.assertEqual(obj_fields.VMMode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_by_default(self):
instance = self._fake_object({"vm_mode": None, "os_type": None})
self.assertEqual(obj_fields.VMMode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_xen_for_VHD(self):
instance = self._fake_object({"vm_mode": None, "os_type": None})
self.assertEqual(obj_fields.VMMode.XEN,
vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK_VHD))
def test_determine_vm_mode_returns_xen_for_DISK(self):
instance = self._fake_object({"vm_mode": None, "os_type": None})
self.assertEqual(obj_fields.VMMode.XEN,
vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK))
class CallXenAPIHelpersTestCase(VMUtilsTestBase):
def test_vm_get_vbd_refs(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vm_get_vbd_refs(session, "vm_ref"))
session.call_xenapi.assert_called_once_with("VM.get_VBDs", "vm_ref")
def test_vbd_get_rec(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vbd_get_rec(session, "vbd_ref"))
session.call_xenapi.assert_called_once_with("VBD.get_record",
"vbd_ref")
def test_vdi_get_rec(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vdi_get_rec(session, "vdi_ref"))
session.call_xenapi.assert_called_once_with("VDI.get_record",
"vdi_ref")
def test_vdi_snapshot(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vdi_snapshot(session, "vdi_ref"))
session.call_xenapi.assert_called_once_with("VDI.snapshot",
"vdi_ref", {})
def test_vdi_get_virtual_size(self):
session = mock.Mock()
session.call_xenapi.return_value = "123"
self.assertEqual(123, vm_utils._vdi_get_virtual_size(session, "ref"))
session.call_xenapi.assert_called_once_with("VDI.get_virtual_size",
"ref")
@mock.patch.object(vm_utils, '_get_resize_func_name')
def test_vdi_resize(self, mock_get_resize_func_name):
session = mock.Mock()
mock_get_resize_func_name.return_value = "VDI.fake"
vm_utils._vdi_resize(session, "ref", 123)
session.call_xenapi.assert_called_once_with("VDI.fake", "ref", "123")
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_works(self, mock_get_size, mock_resize):
mock_get_size.return_value = (1024 ** 3) - 1
instance = {"uuid": "a"}
vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
mock_resize.assert_called_once_with("s", "ref", 1024 ** 3)
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_skips_resize_down(self, mock_get_size,
mock_resize):
mock_get_size.return_value = 1024 ** 3
instance = {"uuid": "a"}
vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_raise_if_disk_big(self, mock_get_size,
mock_resize):
mock_get_size.return_value = 1024 ** 3 + 1
instance = {"uuid": "a"}
self.assertRaises(exception.ResizeError,
vm_utils.update_vdi_virtual_size,
"s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, '_vdi_get_rec')
@mock.patch.object(vm_utils, '_vbd_get_rec')
@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
class GetVdiForVMTestCase(VMUtilsTestBase):
def test_get_vdi_for_vm_safely(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_rec):
session = "session"
vm_get_vbd_refs.return_value = ["a", "b"]
vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
vdi_get_rec.return_value = {}
result = vm_utils.get_vdi_for_vm_safely(session, "vm_ref")
self.assertEqual(('vdi_ref', {}), result)
vm_get_vbd_refs.assert_called_once_with(session, "vm_ref")
vbd_get_rec.assert_called_once_with(session, "a")
vdi_get_rec.assert_called_once_with(session, "vdi_ref")
def test_get_vdi_for_vm_safely_fails(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_rec):
session = "session"
vm_get_vbd_refs.return_value = ["a", "b"]
vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
self.assertRaises(exception.NovaException,
vm_utils.get_vdi_for_vm_safely,
session, "vm_ref", userdevice='1')
self.assertEqual([], vdi_get_rec.call_args_list)
self.assertEqual(2, len(vbd_get_rec.call_args_list))
@mock.patch.object(vm_utils, '_vdi_get_uuid')
@mock.patch.object(vm_utils, '_vbd_get_rec')
@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
class GetAllVdiForVMTestCase(VMUtilsTestBase):
def _setup_get_all_vdi_uuids_for_vm(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
def fake_vbd_get_rec(session, vbd_ref):
return {'userdevice': vbd_ref, 'VDI': "vdi_ref_%s" % vbd_ref}
def fake_vdi_get_uuid(session, vdi_ref):
return vdi_ref
vm_get_vbd_refs.return_value = ["0", "2"]
vbd_get_rec.side_effect = fake_vbd_get_rec
vdi_get_uuid.side_effect = fake_vdi_get_uuid
def test_get_all_vdi_uuids_for_vm_works(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid)
result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref")
expected = ['vdi_ref_0', 'vdi_ref_2']
self.assertEqual(expected, list(result))
def test_get_all_vdi_uuids_for_vm_finds_none(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid)
result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref",
min_userdevice=1)
expected = ["vdi_ref_2"]
self.assertEqual(expected, list(result))
class GetAllVdisTestCase(VMUtilsTestBase):
def test_get_all_vdis_in_sr(self):
def fake_get_rec(record_type, ref):
if ref == "2":
return "vdi_rec_2"
session = mock.Mock()
session.call_xenapi.return_value = ["1", "2"]
session.get_rec.side_effect = fake_get_rec
sr_ref = "sr_ref"
actual = list(vm_utils._get_all_vdis_in_sr(session, sr_ref))
self.assertEqual(actual, [('2', 'vdi_rec_2')])
session.call_xenapi.assert_called_once_with("SR.get_VDIs", sr_ref)
class SnapshotAttachedHereTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, '_snapshot_attached_here_impl')
def test_snapshot_attached_here(self, mock_impl):
def fake_impl(session, instance, vm_ref, label, userdevice,
post_snapshot_callback):
self.assertEqual("session", session)
self.assertEqual("instance", instance)
self.assertEqual("vm_ref", vm_ref)
self.assertEqual("label", label)
self.assertEqual('0', userdevice)
self.assertIsNone(post_snapshot_callback)
yield "fake"
mock_impl.side_effect = fake_impl
with vm_utils.snapshot_attached_here("session", "instance", "vm_ref",
"label") as result:
self.assertEqual("fake", result)
mock_impl.assert_called_once_with("session", "instance", "vm_ref",
"label", '0', None)
@mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(vm_utils, '_wait_for_vhd_coalesce')
@mock.patch.object(vm_utils, '_vdi_get_uuid')
@mock.patch.object(vm_utils, '_vdi_snapshot')
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
def test_snapshot_attached_here_impl(self, mock_get_vdi_for_vm_safely,
mock_vdi_snapshot, mock_vdi_get_uuid,
mock_wait_for_vhd_coalesce, mock_walk_vdi_chain,
mock_safe_destroy_vdis, mock_delete_snapshots_in_vdi_chain):
session = "session"
instance = {"uuid": "uuid"}
mock_callback = mock.Mock()
mock_get_vdi_for_vm_safely.return_value = ("vdi_ref",
{"SR": "sr_ref",
"uuid": "vdi_uuid"})
mock_vdi_snapshot.return_value = "snap_ref"
mock_vdi_get_uuid.return_value = "snap_uuid"
mock_walk_vdi_chain.return_value = [{"uuid": "a"}, {"uuid": "b"}]
try:
with vm_utils.snapshot_attached_here(session, instance, "vm_ref",
"label", '2', mock_callback) as result:
self.assertEqual(["a", "b"], result)
raise test.TestingException()
self.assertTrue(False)
except test.TestingException:
pass
mock_get_vdi_for_vm_safely.assert_called_once_with(session, "vm_ref",
'2')
mock_vdi_snapshot.assert_called_once_with(session, "vdi_ref")
mock_wait_for_vhd_coalesce.assert_called_once_with(session, instance,
"sr_ref", "vdi_ref", ['a', 'b'])
mock_vdi_get_uuid.assert_called_once_with(session, "snap_ref")
mock_walk_vdi_chain.assert_has_calls([mock.call(session, "vdi_uuid"),
mock.call(session, "snap_uuid")])
mock_callback.assert_called_once_with(
task_state="image_pending_upload")
mock_safe_destroy_vdis.assert_called_once_with(session, ["snap_ref"])
mock_delete_snapshots_in_vdi_chain.assert_called_once_with(session,
instance, ['a', 'b'], "sr_ref")
@mock.patch.object(greenthread, 'sleep')
def test_wait_for_vhd_coalesce_leaf_node(self, mock_sleep):
instance = {"uuid": "fake"}
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid"])
self.assertFalse(mock_sleep.called)
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(greenthread, 'sleep')
def test_wait_for_vhd_coalesce_parent_snapshot(self, mock_sleep,
mock_count):
mock_count.return_value = 2
instance = {"uuid": "fake"}
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertFalse(mock_sleep.called)
self.assertTrue(mock_count.called)
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(vm_utils, '_scan_sr')
def test_wait_for_vhd_coalesce_raises(self, mock_scan_sr,
mock_count, mock_get_vhd_parent_uuid, mock_sleep):
mock_count.return_value = 1
instance = {"uuid": "fake"}
self.assertRaises(exception.NovaException,
vm_utils._wait_for_vhd_coalesce, "session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertTrue(mock_count.called)
self.assertEqual(20, mock_sleep.call_count)
self.assertEqual(20, mock_scan_sr.call_count)
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(vm_utils, '_scan_sr')
def test_wait_for_vhd_coalesce_success(self, mock_scan_sr,
mock_count, mock_get_vhd_parent_uuid, mock_sleep):
mock_count.return_value = 1
instance = {"uuid": "fake"}
mock_get_vhd_parent_uuid.side_effect = ["bad", "uuid2"]
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertEqual(1, mock_sleep.call_count)
self.assertEqual(2, mock_scan_sr.call_count)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_count_children(self, mock_get_all_vdis_in_sr):
vdis = [('child1', {'sm_config': {'vhd-parent': 'parent1'}}),
('child2', {'sm_config': {'vhd-parent': 'parent2'}}),
('child3', {'sm_config': {'vhd-parent': 'parent1'}})]
mock_get_all_vdis_in_sr.return_value = vdis
self.assertEqual(2, vm_utils._count_children('session',
'parent1', 'sr'))
class ImportMigratedDisksTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks')
@mock.patch.object(vm_utils, '_import_migrated_root_disk')
def test_import_all_migrated_disks(self, mock_root, mock_ephemeral):
session = "session"
instance = "instance"
mock_root.return_value = "root_vdi"
mock_ephemeral.return_value = ["a", "b"]
result = vm_utils.import_all_migrated_disks(session, instance)
expected = {'root': 'root_vdi', 'ephemerals': ["a", "b"]}
self.assertEqual(expected, result)
mock_root.assert_called_once_with(session, instance)
mock_ephemeral.assert_called_once_with(session, instance)
@mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks')
@mock.patch.object(vm_utils, '_import_migrated_root_disk')
def test_import_all_migrated_disks_import_root_false(self, mock_root,
mock_ephemeral):
session = "session"
instance = "instance"
mock_root.return_value = "root_vdi"
mock_ephemeral.return_value = ["a", "b"]
result = vm_utils.import_all_migrated_disks(session, instance,
import_root=False)
expected = {'root': None, 'ephemerals': ["a", "b"]}
self.assertEqual(expected, result)
self.assertEqual(0, mock_root.call_count)
mock_ephemeral.assert_called_once_with(session, instance)
@mock.patch.object(vm_utils, '_import_migrated_vhds')
def test_import_migrated_root_disk(self, mock_migrate):
mock_migrate.return_value = "foo"
instance = {"uuid": "uuid", "name": "name"}
result = vm_utils._import_migrated_root_disk("s", instance)
self.assertEqual("foo", result)
mock_migrate.assert_called_once_with("s", instance, "uuid", "root",
"name")
@mock.patch.object(vm_utils, '_import_migrated_vhds')
def test_import_migrate_ephemeral_disks(self, mock_migrate):
mock_migrate.return_value = "foo"
instance = objects.Instance(id=1, uuid=uuids.fake)
instance.old_flavor = objects.Flavor(ephemeral_gb=4000)
result = vm_utils._import_migrate_ephemeral_disks("s", instance)
self.assertEqual({'4': 'foo', '5': 'foo'}, result)
inst_uuid = instance.uuid
inst_name = instance.name
expected_calls = [mock.call("s", instance,
"%s_ephemeral_1" % inst_uuid,
"ephemeral",
"%s ephemeral (1)" % inst_name),
mock.call("s", instance,
"%s_ephemeral_2" % inst_uuid,
"ephemeral",
"%s ephemeral (2)" % inst_name)]
self.assertEqual(expected_calls, mock_migrate.call_args_list)
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_import_migrate_ephemeral_disks_use_old_flavor(self,
mock_get_sizes):
mock_get_sizes.return_value = []
instance = objects.Instance(id=1, uuid=uuids.fake, ephemeral_gb=2000)
instance.old_flavor = objects.Flavor(ephemeral_gb=4000)
vm_utils._import_migrate_ephemeral_disks("s", instance)
mock_get_sizes.assert_called_once_with(4000)
@mock.patch.object(os_xenapi.client.vm_management, 'receive_vhd')
@mock.patch.object(vm_utils, '_set_vdi_info')
@mock.patch.object(vm_utils, 'scan_default_sr')
@mock.patch.object(vm_utils, 'get_sr_path')
def test_import_migrated_vhds(self, mock_get_sr_path, mock_scan_sr,
mock_set_info, mock_recv_vhd):
session = mock.Mock()
instance = {"uuid": "uuid"}
mock_recv_vhd.return_value = {"root": {"uuid": "a"}}
session.call_xenapi.return_value = "vdi_ref"
mock_get_sr_path.return_value = "sr_path"
result = vm_utils._import_migrated_vhds(session, instance,
'chain_label', 'disk_type', 'vdi_label')
expected = {'uuid': "a", 'ref': "vdi_ref"}
self.assertEqual(expected, result)
mock_get_sr_path.assert_called_once_with(session)
mock_recv_vhd.assert_called_once_with(session, 'chain_label',
'sr_path', mock.ANY)
mock_scan_sr.assert_called_once_with(session)
session.call_xenapi.assert_called_once_with('VDI.get_by_uuid', 'a')
mock_set_info.assert_called_once_with(session, 'vdi_ref', 'disk_type',
'vdi_label', 'disk_type', instance)
def test_get_vhd_parent_uuid_rec_provided(self):
session = mock.Mock()
vdi_ref = 'vdi_ref'
vdi_rec = {'sm_config': {}}
self.assertIsNone(vm_utils._get_vhd_parent_uuid(session,
vdi_ref,
vdi_rec))
self.assertFalse(session.call_xenapi.called)
class MigrateVHDTestCase(VMUtilsTestBase):
def _assert_transfer_called(self, session, label):
session.call_plugin_serialized.assert_called_once_with(
'migration.py', 'transfer_vhd', instance_uuid=label, host="dest",
vdi_uuid="vdi_uuid", sr_path="sr_path", seq_num=2)
@mock.patch.object(os_xenapi.client.vm_management, 'transfer_vhd')
def test_migrate_vhd_root(self, mock_trans_vhd):
session = mock.Mock()
instance = {"uuid": "a"}
vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
"sr_path", 2)
mock_trans_vhd.assert_called_once_with(session, "a",
"dest", "vdi_uuid", "sr_path",
2)
@mock.patch.object(os_xenapi.client.vm_management, 'transfer_vhd')
def test_migrate_vhd_ephemeral(self, mock_trans_vhd):
session = mock.Mock()
instance = {"uuid": "a"}
vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
"sr_path", 2, 2)
mock_trans_vhd.assert_called_once_with(session, "a_ephemeral_2",
"dest", "vdi_uuid", "sr_path",
2)
@mock.patch.object(os_xenapi.client.vm_management, 'transfer_vhd')
def test_migrate_vhd_converts_exceptions(self, mock_trans_vhd):
session = mock.Mock()
session.XenAPI.Failure = test.TestingException
mock_trans_vhd.side_effect = test.TestingException()
instance = {"uuid": "a"}
self.assertRaises(exception.MigrationError, vm_utils.migrate_vhd,
session, instance, "vdi_uuid", "dest", "sr_path", 2)
mock_trans_vhd.assert_called_once_with(session, "a",
"dest", "vdi_uuid", "sr_path",
2)
class StripBaseMirrorTestCase(VMUtilsTestBase):
def test_strip_base_mirror_from_vdi_works(self):
session = mock.Mock()
vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
session.call_xenapi.assert_called_once_with(
"VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
def test_strip_base_mirror_from_vdi_hides_error(self):
session = mock.Mock()
session.XenAPI.Failure = test.TestingException
session.call_xenapi.side_effect = test.TestingException()
vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
session.call_xenapi.assert_called_once_with(
"VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
@mock.patch.object(vm_utils, '_try_strip_base_mirror_from_vdi')
def test_strip_base_mirror_from_vdis(self, mock_strip):
def call_xenapi(method, arg):
if method == "VM.get_VBDs":
return ['VBD_ref_1', 'VBD_ref_2']
if method == "VBD.get_VDI":
return 'VDI' + arg[3:]
return "Unexpected call_xenapi: %s.%s" % (method, arg)
session = mock.Mock()
session.call_xenapi.side_effect = call_xenapi
vm_utils.strip_base_mirror_from_vdis(session, "vm_ref")
expected = [mock.call('VM.get_VBDs', "vm_ref"),
mock.call('VBD.get_VDI', "VBD_ref_1"),
mock.call('VBD.get_VDI', "VBD_ref_2")]
self.assertEqual(expected, session.call_xenapi.call_args_list)
expected = [mock.call(session, "VDI_ref_1"),
mock.call(session, "VDI_ref_2")]
self.assertEqual(expected, mock_strip.call_args_list)
class DeviceIdTestCase(VMUtilsTestBase):
def test_device_id_is_none_if_not_specified_in_meta_data(self):
image_meta = objects.ImageMeta.from_dict({})
session = mock.Mock()
session.product_version = (6, 1, 0)
self.assertIsNone(vm_utils.get_vm_device_id(session, image_meta))
def test_get_device_id_if_hypervisor_version_is_greater_than_6_1(self):
image_meta = objects.ImageMeta.from_dict(
{'properties': {'xenapi_device_id': '0002'}})
session = mock.Mock()
session.product_version = (6, 2, 0)
self.assertEqual(2,
vm_utils.get_vm_device_id(session, image_meta))
session.product_version = (6, 3, 1)
self.assertEqual(2,
vm_utils.get_vm_device_id(session, image_meta))
def test_raise_exception_if_device_id_not_supported_by_hyp_version(self):
image_meta = objects.ImageMeta.from_dict(
{'properties': {'xenapi_device_id': '0002'}})
session = mock.Mock()
session.product_version = (6, 0)
exc = self.assertRaises(exception.NovaException,
vm_utils.get_vm_device_id, session, image_meta)
self.assertEqual("Device id 2 specified is not supported by "
"hypervisor version (6, 0)", exc.message)
session.product_version = ('6a')
exc = self.assertRaises(exception.NovaException,
vm_utils.get_vm_device_id, session, image_meta)
self.assertEqual("Device id 2 specified is not supported by "
"hypervisor version 6a", exc.message)
class CreateVmRecordTestCase(VMUtilsTestBase):
@mock.patch.object(flavors, 'extract_flavor')
def test_create_vm_record_linux(self, mock_extract_flavor):
instance = objects.Instance(uuid=uuids.nova_uuid,
os_type="linux")
self._test_create_vm_record(mock_extract_flavor, instance, False)
@mock.patch.object(flavors, 'extract_flavor')
def test_create_vm_record_windows(self, mock_extract_flavor):
instance = objects.Instance(uuid=uuids.nova_uuid,
os_type="windows")
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor._from_db_object(
None, objects.Flavor(), test_flavor.fake_flavor)
self._test_create_vm_record(mock_extract_flavor, instance, True)
def _test_create_vm_record(self, mock_extract_flavor, instance,
is_viridian):
session = self.get_fake_session()
flavor = {"memory_mb": 1024, "vcpus": 1, "vcpu_weight": 2}
mock_extract_flavor.return_value = flavor
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor(memory_mb=1024,
vcpus=1,
vcpu_weight=2)
vm_utils.create_vm(session, instance, "name", "kernel", "ramdisk",
device_id=2)
is_viridian_str = str(is_viridian).lower()
expected_vm_rec = {
'VCPUs_params': {'cap': '0', 'weight': '2'},
'PV_args': '',
'memory_static_min': '0',
'ha_restart_priority': '',
'HVM_boot_policy': 'BIOS order',
'PV_bootloader': '',
'tags': [],
'VCPUs_max': '1',
'memory_static_max': '1073741824',
'actions_after_shutdown': 'destroy',
'memory_dynamic_max': '1073741824',
'user_version': '0',
'xenstore_data': {'vm-data/allowvssprovider': 'false'},
'blocked_operations': {},
'is_a_template': False,
'name_description': '',
'memory_dynamic_min': '1073741824',
'actions_after_crash': 'destroy',
'memory_target': '1073741824',
'PV_ramdisk': '',
'PV_bootloader_args': '',
'PCI_bus': '',
'other_config': {'nova_uuid': uuids.nova_uuid},
'name_label': 'name',
'actions_after_reboot': 'restart',
'VCPUs_at_startup': '1',
'HVM_boot_params': {'order': 'dc'},
'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
'timeoffset': '0', 'viridian': is_viridian_str,
'acpi': 'true', 'device_id': '0002'},
'PV_legacy_args': '',
'PV_kernel': '',
'affinity': '',
'recommendations': '',
'ha_always_run': False}
session.call_xenapi.assert_called_with('VM.create', expected_vm_rec)
def test_list_vms(self):
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d')
self.flags(connection_url='http://localhost',
connection_password='test_pass',
group='xenserver')
fake.create_vm("foo1", "Halted")
vm_ref = fake.create_vm("foo2", "Running")
stubs.stubout_session(self, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.list_vms(driver._session))
# Will have 3 VMs - but one is Dom0 and one is not running on the host
self.assertEqual(len(driver._session.call_xenapi('VM.get_all')), 3)
self.assertEqual(len(result), 1)
result_keys = [key for (key, value) in result]
self.assertIn(vm_ref, result_keys)
class ChildVHDsTestCase(test.NoDBTestCase):
all_vdis = [
("my-vdi-ref",
{"uuid": "my-uuid", "sm_config": {},
"is_a_snapshot": False, "other_config": {}}),
("non-parent",
{"uuid": "uuid-1", "sm_config": {},
"is_a_snapshot": False, "other_config": {}}),
("diff-parent",
{"uuid": "uuid-1", "sm_config": {"vhd-parent": "other-uuid"},
"is_a_snapshot": False, "other_config": {}}),
("child",
{"uuid": "uuid-child", "sm_config": {"vhd-parent": "my-uuid"},
"is_a_snapshot": False, "other_config": {}}),
("child-snap",
{"uuid": "uuid-child-snap", "sm_config": {"vhd-parent": "my-uuid"},
"is_a_snapshot": True, "other_config": {}}),
]
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_defaults(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"])
self.assertJsonEqual(['uuid-child', 'uuid-child-snap'], result)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_only_snapshots(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"],
old_snapshots_only=True)
self.assertEqual(['uuid-child-snap'], result)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_chain(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref",
["my-uuid", "other-uuid"], old_snapshots_only=True)
self.assertEqual(['uuid-child-snap'], result)
def test_is_vdi_a_snapshot_works(self):
vdi_rec = {"is_a_snapshot": True,
"other_config": {}}
self.assertTrue(vm_utils._is_vdi_a_snapshot(vdi_rec))
def test_is_vdi_a_snapshot_base_images_false(self):
vdi_rec = {"is_a_snapshot": True,
"other_config": {"image-id": "fake"}}
self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
def test_is_vdi_a_snapshot_false_for_non_snapshot(self):
vdi_rec = {"is_a_snapshot": False,
"other_config": {}}
self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
class RemoveOldSnapshotsTestCase(test.NoDBTestCase):
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
def test_remove_old_snapshots(self, mock_delete, mock_walk, mock_get):
instance = {"uuid": "fake"}
mock_get.return_value = ("ref", {"uuid": "vdi", "SR": "sr_ref"})
mock_walk.return_value = [{"uuid": "uuid1"}, {"uuid": "uuid2"}]
vm_utils.remove_old_snapshots("session", instance, "vm_ref")
mock_delete.assert_called_once_with("session", instance,
["uuid1", "uuid2"], "sr_ref")
mock_get.assert_called_once_with("session", "vm_ref")
mock_walk.assert_called_once_with("session", "vdi")
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_no_chain(self, mock_child):
instance = {"uuid": "fake"}
vm_utils._delete_snapshots_in_vdi_chain("session", instance,
["uuid"], "sr")
self.assertFalse(mock_child.called)
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_no_snapshots(self, mock_child):
instance = {"uuid": "fake"}
mock_child.return_value = []
vm_utils._delete_snapshots_in_vdi_chain("session", instance,
["uuid1", "uuid2"], "sr")
mock_child.assert_called_once_with("session", "sr", ["uuid2"],
old_snapshots_only=True)
@mock.patch.object(vm_utils, '_scan_sr')
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_calls_destroy(self, mock_child,
mock_destroy, mock_scan):
instance = {"uuid": "fake"}
mock_child.return_value = ["suuid1", "suuid2"]
session = mock.Mock()
session.VDI.get_by_uuid.side_effect = ["ref1", "ref2"]
vm_utils._delete_snapshots_in_vdi_chain(session, instance,
["uuid1", "uuid2"], "sr")
mock_child.assert_called_once_with(session, "sr", ["uuid2"],
old_snapshots_only=True)
session.VDI.get_by_uuid.assert_has_calls([
mock.call("suuid1"), mock.call("suuid2")])
mock_destroy.assert_called_once_with(session, ["ref1", "ref2"])
mock_scan.assert_called_once_with(session, "sr")
class ResizeFunctionTestCase(test.NoDBTestCase):
def _call_get_resize_func_name(self, brand, version):
session = mock.Mock()
session.product_brand = brand
session.product_version = version
return vm_utils._get_resize_func_name(session)
def _test_is_resize(self, brand, version):
result = self._call_get_resize_func_name(brand, version)
self.assertEqual("VDI.resize", result)
def _test_is_resize_online(self, brand, version):
result = self._call_get_resize_func_name(brand, version)
self.assertEqual("VDI.resize_online", result)
def test_xenserver_5_5(self):
self._test_is_resize_online("XenServer", (5, 5, 0))
def test_xenserver_6_0(self):
self._test_is_resize("XenServer", (6, 0, 0))
def test_xcp_1_1(self):
self._test_is_resize_online("XCP", (1, 1, 0))
def test_xcp_1_2(self):
self._test_is_resize("XCP", (1, 2, 0))
def test_xcp_2_0(self):
self._test_is_resize("XCP", (2, 0, 0))
def test_random_brand(self):
self._test_is_resize("asfd", (1, 1, 0))
def test_default(self):
self._test_is_resize(None, None)
def test_empty(self):
self._test_is_resize("", "")
class VMInfoTests(VMUtilsTestBase):
def setUp(self):
super(VMInfoTests, self).setUp()
self.session = mock.Mock()
def test_get_power_state_valid(self):
# Save on test setup calls by having these simple tests in one method
self.session.call_xenapi.return_value = "Running"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.RUNNING)
self.session.call_xenapi.return_value = "Halted"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.SHUTDOWN)
self.session.call_xenapi.return_value = "Paused"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.PAUSED)
self.session.call_xenapi.return_value = "Suspended"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.SUSPENDED)
self.session.call_xenapi.return_value = "Crashed"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.CRASHED)
def test_get_power_state_invalid(self):
self.session.call_xenapi.return_value = "Invalid"
self.assertRaises(KeyError,
vm_utils.get_power_state, self.session, "ref")
_XAPI_record = {'power_state': 'Running',
'memory_static_max': str(10 << 10),
'memory_dynamic_max': str(9 << 10),
'VCPUs_max': '5'}
def test_compile_info(self):
def call_xenapi(method, *args):
if method.startswith('VM.get_') and args[0] == 'dummy':
return self._XAPI_record[method[7:]]
self.session.call_xenapi.side_effect = call_xenapi
info = vm_utils.compile_info(self.session, "dummy")
self.assertEqual(hardware.InstanceInfo(state=power_state.RUNNING),
info)
| apache-2.0 | 5,899,845,017,866,450,000 | 42.438385 | 79 | 0.574799 | false |
bluesliverx/smartthings-src | apps/wifi-104-server/lights-app.py | 1 | 7007 | #!/usr/bin/env python3
from flask import Flask, request, abort, jsonify
from timeout_decorator import timeout
import logging
import socket
import platform
import os
# App for uwsgi
app = Flask(__name__)
ENDPOINT = os.getenv('ENDPOINT', '192.168.1.104')
ENDPOINT_PORT = 8899
INTERNAL_ADDRESS = [0x11, 0x0e, 0x59]
BUFFER_SIZE = 1024
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if platform.system()=='Darwin':
logging.basicConfig()
else:
logging.basicConfig(filename='/var/log/uwsgi/wifi-104-server.log')
frame_index = 0
def get_frame_index():
"""
Retrieve and increment the frame index, wrapping around if needed.
"""
global frame_index
ret = frame_index
frame_index += 1
if frame_index > 255:
frame_index = 0
return ret
def message_response(message):
return jsonify({'message':message})
@app.route('/zones/<zone>/on', methods=['PUT'])
def zone_on(zone):
return message_response(switch_lights(zone, True))
@app.route('/zones/<zone>/off', methods=['PUT'])
def zone_off(zone):
return message_response(switch_lights(zone, False))
@app.route('/zones/<zone>/color', methods=['PUT'])
def zone_color(zone):
if 'red' not in request.args or 'green' not in request.args or 'blue' not in request.args or 'white' not in request.args:
return app.response_class(
response=json.dumps({'message':'Please include "red", "green", "blue", and "white" query params as integers'}),
status=400,
mimetype='application/json'
)
# Default to full brightness
message = set_color(
zone,
convert_color(request.args.get('red')),
convert_color(request.args.get('green')),
convert_color(request.args.get('blue')),
convert_color(request.args.get('white')),
convert_color(request.args.get('brightness', 0xff)),
)
return message_response(message)
@app.route('/zones/all/status')
def status():
logger.info('Retrieving status')
# While 1 arg is expected, it is never set in the app, so set it explicitly to 0
response = send_command(create_command(0x0f, 0x09, get_zone_mask('all'), [0x00]), receive=True, validate_index=True)
zone_statuses1 = int(response[11])
zone_statuses2 = int(response[12]) >> 4
logger.info('Zone statuses are {} and {}'.format(zone_statuses1, zone_statuses2))
statuses = {}
for zone in range(0, 8):
statuses[zone+1] = bool((1 << zone) & zone_statuses1)
for zone in range(8, 12):
statuses[zone+1] = bool((1 << (zone - 8)) & zone_statuses2)
return jsonify({'zones':statuses})
def convert_color(value):
if type(value) is int:
return value
if not value or value=='null':
return 0
return int(value)
def switch_lights(zone=None, on=True):
logger.info('Turning lights on for zone %s', zone)
all_zones = get_zone_mask(zone)
zone_status = get_zone_status(on, on)
# Send command twice since this is what the java app does - maybe something with frame index incrementing?
send_command(create_command(0x0f, 0x08, all_zones, zone_status))
send_command(create_command(0x0f, 0x08, all_zones, zone_status), receive=True)
return 'Successfully turned lights {}'.format('on' if on else 'off')
def set_color(zone, red, green, blue, white, brightness):
logger.info('Setting color for zone %d to red %s green %d blue %d white %d brightness %d',
zone, red, green, blue, white, brightness)
command = create_command(0x04, 0x01,
# Only target the one zone
get_zone_mask(zone),
# Arguments: RGBW + brightness
[red, green, blue, white, brightness]
)
send_command(command, receive=True)
return 'Successfully set color for zone {}'.format(zone)
def get_zone_mask(zone=None):
if zone=='all':
return [255, 255]
return [int(zone), 0]
def get_zone_status(zone1On, zone2On):
if zone1On and zone2On:
return [0x03, 0x00]
elif zone1On:
return [0x01, 0x00]
elif zone2On:
return [0x02, 0x00]
return [0x00, 0x00]
def create_command(command1, command2, zones, args=[]):
# Begin command
#ret = [85, -86]
ret = [0x55, 0xaa]
# Add frame index (auto incrementing)
ret.append(get_frame_index())
# Add commands
ret.extend([command1, command2])
# Add "internal" address
ret.extend(INTERNAL_ADDRESS)
# Add zones (255 for all zones)
if len(zones) != 2:
raise Exception('zones parameter should be an array of 2 ints')
ret.extend(zones)
# Add number of args
num_args = 0
if command2==8:
# Control zones
num_args = 2
elif command2==1:
# RGBW + brightness
num_args = 5
elif command2==9:
# Query zone status
num_args = 1
ret.append(num_args)
# Add args and make sure they match
if len(args)!=num_args:
raise Exception('The number of arguments passed ({}) does not match the number expected for command {} ({})'.format(len(args), command2, num_args))
ret.extend(args)
# End command
#ret.extend([0, 0, -22])
ret.extend([0x00, 0x00, 0xea])
return ret
def send_command(command, receive=False, validate_index=False):
global ENDPOINT
global ENDPOINT_PORT
command_bytes = bytearray(command)
frame_index = command[2]
logger.debug('Sending command %d to endpoint %s:%d - %s', frame_index, ENDPOINT, ENDPOINT_PORT, command)
response = None
tries = 5
while not response and tries > 0:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ENDPOINT, ENDPOINT_PORT))
s.send(command_bytes)
if not receive:
return None
try:
response = receive_response(s, validate_index, frame_index)
# Do not return if there is no response (the frame index did not match)
if response:
return response
except Exception as e:
logger.warn(str(e))
logger.warn('Timed out while waiting for a response, resending command {} more times'.format(tries))
tries -= 1
finally:
s.close()
raise Exception('No response received from the controller')
@timeout(1, use_signals=False)
def receive_response(s, validate_index, expected_frame_index):
global BUFFER_SIZE
logger.debug('Waiting for response from endpoint')
byte_data = None
while not byte_data:
data = s.recv(BUFFER_SIZE)
byte_data = bytearray(data)
frame_index = int(byte_data[2])
logger.debug('Received %s from endpoint (frame index %d)', list(data), frame_index)
if validate_index and frame_index!=expected_frame_index:
logger.debug('Frame index received ({}) does not match expected ({}), ignoring'.format(frame_index, expected_frame_index))
byte_data = None
return byte_data
| apache-2.0 | 8,989,622,392,067,946,000 | 33.180488 | 155 | 0.634223 | false |
Nukesor/mitra | mitra/api/category.py | 1 | 1531 | from flask import jsonify,request
from flask.ext.login import current_user
from mitra import app,db
from mitra.models.category import Category
@app.route('/_addCategory', methods=['PUT', 'POST'])
def AddCategory():
data = {}
# TODO Check for valid Userinput
if current_user.is_authenticated():
parsed = request.get_json()
category = Category(parsed['name'], current_user.id)
db.session.add(category)
db.session.commit()
data['success'] = 'Category added'
return jsonify(data)
else:
data['redirect'] = 'login'
return jsonify(data)
@app.route('/_removeCategory', methods=['PUT', 'POST'])
def RemoveCategory():
data = {}
if current_user.is_authenticated():
parsed = request.get_json()
current_user.filter_by(name=parsed['name'], userid=current_user.id).delete()
db.session.commit()
data['success'] = 'Category deleted'
return jsonify(data)
else:
data['redirect'] = 'login'
return jsonify(data)
@app.route('/_getCategories', methods=['GET', 'POST'])
def getCategories():
data = {}
if current_user.is_authenticated():
categories = current_user.categories.all()
if categories:
data['categories'] = []
for category in categories:
data['categories'].append({
'name':category.name
})
return jsonify(data)
else:
data['redirect'] = 'login'
return jsonify(data)
| mit | 6,259,498,564,645,138,000 | 29.62 | 84 | 0.596995 | false |
imajunryou/RealPython2 | flask-blog/blog.py | 1 | 2562 | # blog.py - controller
# imports
from flask import Flask, render_template, request, session, \
flash, redirect, url_for, g
import sqlite3
from functools import wraps
# configuration
DATABASE = "blog.db"
app = Flask(__name__)
# pulls in app configuration by looking for UPPERCASE variables in this file
app.config.from_object(__name__)
# Pulls in config from environment variable
# use the following line to establish the default path for it:
# export FLASK_BLOG_SETTINGS=settings.cfg
app.config.from_envvar("FLASK_BLOG_SETTINGS")
# function used for connecting to the database
def connect_db():
return sqlite3.connect(app.config["DATABASE"])
# used as a decorator to require routes to have valid login credentials
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if "logged_in" in session:
return test(*args, **kwargs)
else:
flash("You need to log in first.")
return redirect(url_for("login"))
return wrap
# views
@app.route("/", methods=["GET", "POST"])
def login():
error = None
status_code = 200
if request.method == "POST":
if request.form["username"] != app.config["USERNAME"] or \
request.form["password"] != app.config["PASSWORD"]:
error = "Invalid credentials. Please try again."
status_code = 401
else:
session["logged_in"] = True
return redirect(url_for("main"))
return render_template("login.html", error=error), status_code
@app.route("/logout")
def logout():
session.pop("logged_in", None)
flash("You were logged out")
return redirect(url_for("login"))
@app.route("/add", methods=["POST"])
@login_required
def add():
title = request.form["title"]
post = request.form["post"]
if not title or not post:
flash("All fields are required. Please try again.")
return redirect(url_for("main"))
else:
g.db = connect_db()
g.db.execute("INSERT INTO posts (title, post) VALUES (?, ?)",
[request.form["title"], request.form["post"]])
g.db.commit()
g.db.close()
flash("New entry was successfully posted!")
return redirect(url_for("main"))
@app.route("/main")
@login_required
def main():
g.db = connect_db()
cur = g.db.execute("SELECT * FROM posts")
posts = [dict(title=row[0], post=row[1]) for row in cur.fetchall()]
g.db.close()
return render_template("main.html", posts=posts)
if __name__ == "__main__":
app.run(debug=True)
| mit | 4,175,787,638,160,791,600 | 27.153846 | 76 | 0.625683 | false |
MrCreosote/jgi_kbase_integration_tests | scripts/translate_jgi_project_id_to_portal_org_name.py | 1 | 1211 | #! /usr/bin/env python
'''
Translates a set of JGI project IDs to their portal organism name. The
input is via python fileinput
(https://docs.python.org/2/library/fileinput.html).
@author: [email protected]
'''
from __future__ import print_function
import fileinput
import urllib2
import sys
# JGI_URL = 'http://genome.jgi.doe.gov/ext-api/genome-admin/' +\
# 'getPortalIdByParameter?parameterName=jgiProjectId¶meterValue='
JGI_URL = 'http://128.55.71.129/ext-api/genome-admin/' +\
'getPortalIdByParameter?parameterName=jgiProjectId¶meterValue='
def main():
for line in fileinput.input():
line = line.strip()
url = JGI_URL + line
failed = True
while (failed):
try:
projects = urllib2.urlopen(url).read()
except urllib2.HTTPError as e:
print(line + '\t' + '***ERROR***: ' + str(e))
failed = False
except urllib2.URLError as e:
print('Error for ' + line + ': ' + str(e) + ', retrying',
file=sys.stderr)
else:
print(line + '\t' + projects)
failed = False
if __name__ == '__main__':
main()
| mit | -110,908,718,174,458,540 | 29.275 | 73 | 0.577209 | false |
CBA2011/ros_wasabi | devel/_setup_util.py | 1 | 12281 | #!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'CPATH': 'include',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolder):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolder: str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/basano/GIT/ros_wasabi/devel;/opt/ros/indigo'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potantial "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| lgpl-3.0 | 7,057,874,097,536,871,000 | 41.790941 | 213 | 0.657601 | false |
MiiRaGe/miilibrary | tests/test_remote_execution.py | 1 | 1567 | import mock
from django.conf import settings
from django.test import TestCase
from middleware.remote_execution import link, symlink, unrar, remove_dir
@mock.patch('middleware.remote_execution.shell_connection')
class TestRemoteExecution(TestCase):
def test_link(self, shell):
link('a', 'b')
shell.run.assert_called_with(['ln', 'a', 'b'])
def test_symlink(self, shell):
symlink('a', 'b')
shell.run.assert_called_with(['ln', '-s', 'a', 'b'])
def test_unrar(self, shell):
unrar('a', 'b')
shell.run.assert_called_with([settings.REMOTE_UNRAR_PATH, 'e', '-y', 'a', 'b'])
@mock.patch('middleware.remote_execution.delete_dir', new=mock.MagicMock())
def test_remove_dir(self, shell):
remove_dir('a')
shell.run.assert_called_with(['rm', '-rf', 'a'])
class TestNonRemoteExecution(TestCase):
@mock.patch('middleware.remote_execution.delete_dir')
def test_remove_dir(self, delete_dir):
remove_dir('a')
delete_dir.assert_called_with('a')
@mock.patch('middleware.remote_execution.os.symlink')
def test_symlink(self, mocked_symlink):
symlink('a', 'b')
mocked_symlink.assert_called_with('a', 'b')
@mock.patch('middleware.remote_execution.os.link')
def test_link(self, mocked_link):
link('a', 'b')
mocked_link.assert_called_with('a', 'b')
@mock.patch('middleware.remote_execution.subprocess')
def test_unrar(self, mocked_subprocess):
unrar('a', 'b')
assert mocked_subprocess.check_output.called | mit | 5,797,025,024,752,267,000 | 32.361702 | 87 | 0.639438 | false |
philippotto/Sublime-MultiEditUtils | tests/testSelectionFields.py | 1 | 6519 | # coding: utf8
import sublime
from unittest import TestCase
_ST3 = sublime.version() >= "3000"
version = sublime.version()
content_string = """Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,
quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse
cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non
proident, sunt in culpa qui officia deserunt mollit anim id est laborum."""
def to_region(v):
if isinstance(v, int):
region = sublime.Region(v, v)
elif isinstance(v, sublime.Region):
region = v
else:
region = sublime.Region(v[0], v[1])
return region
class TestSelectionFields(TestCase):
def setUp(self):
self.view = sublime.active_window().new_file()
regions = [(12, 14), (32, 30), 50, 60]
self.start_regions = list(map(to_region, regions))
self.view.run_command("insert", {"characters": content_string})
self.select_regions(self.start_regions)
def tearDown(self):
if self.view:
self.view.set_scratch(True)
self.view.window().run_command("close_file")
def assertSelectionEqual(self, sel1, sel2):
self.assertEqual(len(sel1), len(sel2))
for i in range(len(sel1)):
self.assertEqual(to_region(sel1[i]), to_region(sel2[i]))
def select_regions(self, regions):
self.view.sel().clear()
if _ST3:
self.view.sel().add_all(map(to_region, regions))
else:
for region in regions:
self.view.sel().add(to_region(region))
def test_toggle(self):
"""Test whether the toggle works."""
view = self.view
regions = list(self.start_regions)
view.run_command("selection_fields", {"mode": "toggle"})
self.assertEqual(len(view.sel()), 1)
self.assertEqual(view.sel()[0], regions[0])
stored_regions = view.get_regions("meu_sf_stored_selections")
self.assertSelectionEqual(regions[1:], stored_regions)
view.run_command("selection_fields", {"mode": "toggle"})
self.assertEqual(len(view.sel()), len(regions))
self.assertSelectionEqual(view.sel(), regions)
def test_smart_run(self):
"""Test whether a full run with the smart mode works."""
view = self.view
regions = list(self.start_regions)
view.run_command("selection_fields", {"mode": "smart"})
for i in range(len(regions)):
self.assertEqual(len(view.sel()), 1)
self.assertEqual(view.sel()[0], regions[i])
stored_regions = view.get_regions("meu_sf_stored_selections")
self.assertSelectionEqual(regions[:i] + regions[i+1:],
stored_regions)
view.run_command("selection_fields", {"mode": "smart"})
self.assertSelectionEqual(view.sel(), regions)
def test_smart_move(self):
"""
Test whether moving during a run, results in the corresponding
caret positions after the run.
"""
view = self.view
regions = list(self.start_regions)
view.run_command("selection_fields", {"mode": "smart"})
for i in range(len(regions)):
sel = view.sel()[0]
if sel.empty():
regions[i] = sel.end() + i + 1
else:
regions[i] = sel.end() + i
for _ in range(i + 1):
view.run_command("move", {"by": "characters", "forward": True})
view.run_command("selection_fields", {"mode": "smart"})
self.assertSelectionEqual(view.sel(), regions)
def test_smart_add_selections(self):
"""
Test whether adding carets during a run, results in the
corresponding caret positions after the run.
"""
view = self.view
regions = list(self.start_regions)
view.run_command("selection_fields", {"mode": "smart"})
for i, v in enumerate(self.start_regions):
sel = view.sel()[0]
new_sel = to_region(sel.begin() - 1)
view.sel().add(new_sel)
regions.insert(i * 2, to_region(new_sel))
view.run_command("selection_fields", {"mode": "smart"})
self.assertSelectionEqual(view.sel(), regions)
def test_jump_remove(self):
"""
Test whether jumps remove other selections.
"""
view = self.view
view.run_command("selection_fields", {"mode": "smart"})
jumps = 3
for _ in range(jumps):
view.run_command("selection_fields", {"mode": "smart"})
self.assertSelectionEqual(view.sel(), [self.start_regions[jumps]])
def test_add(self):
"""
Test whether it is possible to add fields via the add mode.
"""
view = self.view
regions = list(self.start_regions)
add_regions_list = [(16, 17), 54, 109]
view.run_command("selection_fields", {"mode": "add"})
view.sel().clear()
self.select_regions(add_regions_list)
view.run_command("selection_fields", {"mode": "add"})
view.run_command("move", {"by": "characters", "forward": True})
view.run_command("selection_fields",
{"mode": "pop", "only_other": True})
# add the added regions and sort it to retrieve the desired selections
regions.extend(map(to_region, add_regions_list))
regions.sort(key=lambda sel: sel.begin())
self.assertSelectionEqual(view.sel(), regions)
def test_subtract(self):
"""Test whether subtract fields works properly."""
view = self.view
regions_list = [(16, 35), 54, 60, (100, 103)]
subtract_regions_list = [(2, 10), (14, 20), 54, (99, 120)]
result_regions_list = [(20, 35), 60]
self.select_regions(regions_list)
view.run_command("selection_fields", {"mode": "add"})
self.select_regions(subtract_regions_list)
view.run_command("selection_fields", {"mode": "subtract"})
view.run_command("selection_fields",
{"mode": "pop", "only_other": True})
# add the added regions and sort it to retrieve the desired selections
regions = list(map(to_region, result_regions_list))
self.assertSelectionEqual(view.sel(), regions)
| mit | -1,665,543,656,733,438,000 | 34.818681 | 92 | 0.592576 | false |
Dushistov/rust_swig | python_tests/python/main.py | 1 | 2537 | #!/usr/bin/python3
from rust_swig_test_python import TestStaticClass, TestEnum, TestClass, TestArc, TestArcMutex, TestBox, Error as TestError
def test_static_methods():
assert TestStaticClass.hello() == "Hello from rust"
assert TestStaticClass.format_number(123) == "format_number: 123"
assert TestStaticClass.format_str("python str") == "format_str: python str"
assert TestStaticClass.format_string("python string") == "format_string: python string"
assert TestStaticClass.add(1, 2) == 3
assert TestStaticClass.get_tuple() == (0, "0")
def test_enum():
assert TestEnum.A == 0
assert TestEnum.B == 1
enum = TestEnum.A
assert TestStaticClass.reverse_enum(enum) == TestEnum.B
exception_occured = False
try:
# Pass invalid enum value
TestStaticClass.reverse_enum(2)
except ValueError as ex:
exception_occured = True
assert exception_occured
def test_class():
test_class = TestClass()
assert test_class.format() == "TestClass::i: 0"
test_class.increment()
assert test_class.format() == "TestClass::i: 1"
test_class.add(3)
assert test_class.get() == 4
# pass this class as an argument
assert TestStaticClass.call_test_class_format(test_class) == "TestClass::i: 4"
test_class.add_ref(1)
assert test_class.get_ref() == 5
def test_options():
test_class = TestClass()
assert test_class.maybe_add(1) == 1
assert test_class.maybe_add(None) == None
def test_arrays():
assert TestStaticClass.increment_vec([1, 2]) == [2, 3]
assert TestStaticClass.return_slice([3, 4]) == [3, 4]
assert TestStaticClass.count_slice_of_objects([TestClass(), TestClass()]) == 2
def test_results():
TestStaticClass.test_result_ok()
exception_occured = False
try:
TestStaticClass.test_result_err()
except TestError as ex:
exception_occured = True
assert exception_occured
def test_arc():
arc = TestArc()
assert str(arc) == "0"
assert TestArc.to_string_arc(arc) == "0"
assert TestArc.to_string_ref_arc(arc) == "0"
def test_arc_mutex():
arc = TestArcMutex()
assert str(arc) == "0"
arc.inc()
assert TestArcMutex.to_string_arc(arc) == "1"
assert TestArcMutex.to_string_ref_arc(arc) == "1"
def test_box():
box = TestBox()
assert str(box) == "0"
print("Testing python API")
test_enum()
test_static_methods()
test_class()
test_options()
test_arrays()
test_results()
test_arc()
test_arc_mutex()
test_box()
print("Testing python API successful")
| bsd-3-clause | -5,595,113,001,600,638,000 | 28.5 | 122 | 0.663776 | false |
scikit-learn-contrib/categorical-encoding | tests/test_polynomial.py | 1 | 5846 | import pandas as pd
from unittest import TestCase # or `from unittest import ...` if on Python 3.4+
import numpy as np
import category_encoders as encoders
from tests.helpers import deep_round
a_encoding = [1, -0.7071067811865476, 0.40824829046386313]
b_encoding = [1, -5.551115123125783e-17, -0.8164965809277261]
c_encoding = [1, 0.7071067811865475, 0.4082482904638631]
class TestPolynomialEncoder(TestCase):
def test_polynomial_encoder_preserve_dimension_1(self):
train = ['A', 'B', 'C']
test = ['A', 'D', 'E']
encoder = encoders.PolynomialEncoder(handle_unknown='value', handle_missing='value')
encoder.fit(train)
test_t = encoder.transform(test)
expected = [a_encoding,
[1, 0, 0],
[1, 0, 0]]
self.assertEqual(deep_round(test_t.values.tolist()), deep_round(expected))
def test_polynomial_encoder_preserve_dimension_2(self):
train = ['A', 'B', 'C']
test = ['B', 'D', 'E']
encoder = encoders.PolynomialEncoder(handle_unknown='value', handle_missing='value')
encoder.fit(train)
test_t = encoder.transform(test)
expected = [b_encoding,
[1, 0, 0],
[1, 0, 0]]
self.assertEqual(deep_round(test_t.values.tolist()), deep_round(expected))
def test_polynomial_encoder_preserve_dimension_3(self):
train = ['A', 'B', 'C']
test = ['A', 'B', 'C', None]
encoder = encoders.PolynomialEncoder(handle_unknown='value', handle_missing='value')
encoder.fit(train)
test_t = encoder.transform(test)
expected = [a_encoding,
b_encoding,
c_encoding,
[1, 0, 0]]
self.assertEqual(deep_round(test_t.values.tolist()), deep_round(expected))
def test_polynomial_encoder_preserve_dimension_4(self):
train = ['A', 'B', 'C']
test = ['D', 'B', 'C', None]
encoder = encoders.PolynomialEncoder(handle_unknown='value', handle_missing='value')
encoder.fit(train)
test_t = encoder.transform(test)
expected = [[1, 0, 0],
b_encoding,
c_encoding,
[1, 0, 0]]
self.assertEqual(deep_round(test_t.values.tolist()), deep_round(expected))
def test_polynomial_encoder_2cols(self):
train = [['A', 'A'], ['B', 'B'], ['C', 'C']]
encoder = encoders.PolynomialEncoder(handle_unknown='value', handle_missing='value')
encoder.fit(train)
obtained = encoder.transform(train)
expected = [[1, a_encoding[1], a_encoding[2], a_encoding[1], a_encoding[2]],
[1, b_encoding[1], b_encoding[2], b_encoding[1], b_encoding[2]],
[1, c_encoding[1], c_encoding[2], c_encoding[1], c_encoding[2]]]
self.assertEqual(deep_round(obtained.values.tolist()), deep_round(expected))
def test_polynomial_encoder_2StringCols_ExpectCorrectOrder(self):
train = pd.DataFrame({'col1': [1, 2, 3, 4],
'col2': ['A', 'B', 'C', 'D'],
'col3': [1, 2, 3, 4],
'col4': ['A', 'B', 'C', 'A']
},
columns=['col1', 'col2', 'col3', 'col4'])
expected_columns = ['intercept', 'col1', 'col2_0', 'col2_1', 'col2_2', 'col3', 'col4_0', 'col4_1']
encoder = encoders.PolynomialEncoder(handle_unknown='value', handle_missing='value')
encoder.fit(train)
columns = encoder.transform(train).columns.values
self.assertTrue(np.array_equal(expected_columns, columns))
def test_HandleMissingIndicator_NanInTrain_ExpectAsColumn(self):
train = ['A', 'B', np.nan]
encoder = encoders.PolynomialEncoder(handle_missing='indicator', handle_unknown='value')
result = encoder.fit_transform(train)
expected = [a_encoding,
b_encoding,
c_encoding]
self.assertTrue(np.array_equal(deep_round(result.values.tolist()), deep_round(expected)))
def test_HandleMissingIndicator_HaveNoNan_ExpectSecondColumn(self):
train = ['A', 'B']
encoder = encoders.PolynomialEncoder(handle_missing='indicator', handle_unknown='value')
result = encoder.fit_transform(train)
expected = [a_encoding,
b_encoding]
self.assertEqual(deep_round(result.values.tolist()), deep_round(expected))
def test_HandleMissingIndicator_NanNoNanInTrain_ExpectAsNanColumn(self):
train = ['A', 'B']
test = ['A', 'B', np.nan]
encoder = encoders.PolynomialEncoder(handle_missing='indicator', handle_unknown='value')
encoder.fit(train)
result = encoder.transform(test)
expected = [a_encoding,
b_encoding,
c_encoding]
self.assertEqual(deep_round(result.values.tolist()), deep_round(expected))
def test_HandleUnknown_HaveNoUnknownInTrain_ExpectIndicatorInTest(self):
train = ['A', 'B']
test = ['A', 'B', 'C']
encoder = encoders.PolynomialEncoder(handle_unknown='indicator')
encoder.fit(train)
result = encoder.transform(test)
expected = [a_encoding,
b_encoding,
c_encoding]
self.assertEqual(deep_round(result.values.tolist()), deep_round(expected))
def test_HandleUnknown_HaveOnlyKnown_ExpectSecondColumn(self):
train = ['A', 'B']
encoder = encoders.PolynomialEncoder(handle_unknown='indicator')
result = encoder.fit_transform(train)
expected = [a_encoding,
b_encoding]
self.assertEqual(deep_round(result.values.tolist()), deep_round(expected))
| bsd-3-clause | -1,910,405,756,056,152,600 | 37.973333 | 106 | 0.57766 | false |
360skyeye/kael | examples/micro_service/run.py | 1 | 3373 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import time
from pprint import pprint
import click
from kael.microservice import micro_server
from kael.work_frame import WORK_FRAME
file_path = os.path.abspath(os.path.dirname(__file__))
if file_path not in sys.path:
sys.path.insert(0, file_path)
path = os.path.split(file_path)
if path not in sys.path:
sys.path.insert(0, path[0])
AMQ_URI = os.environ.get('AMQ_URI')
@click.group()
def cli():
pass
@cli.command()
def s():
server = micro_server("s1", auri=AMQ_URI)
@server.service("hha")
def h(s):
print "HHHHH", s, os.getpid()
return {"b": s}
server.start_service(4, daemon=False)
r = server.hha(123123)
print server.hha.src
print "--------------", r
print "done"
print server.services
@cli.command()
def c():
server = micro_server("s1", auri=AMQ_URI)
r = server.hha(s=12312, qid="a")
print server.hha.src
print r
@cli.command()
def p():
conf_dir = os.path.join(file_path, 'setting.yaml')
w = WORK_FRAME("test", auri=AMQ_URI, service_group_conf=conf_dir)
w.frame_start()
@cli.command()
def pc():
server = WORK_FRAME("test", auri=AMQ_URI)
print server.calculate__add(10, 20)
print server.calculate__minus(10, 20)
print server.time__add(1)
r = server.command("_restart_service")
print r
time.sleep(3)
print server.get_response(r)
@cli.command()
def status():
print '-' * 10, 'service', '-' * 10
server = WORK_FRAME("test", auri=AMQ_URI)
r = server.command("_get_pkg_version", pkg_type='service')
pprint(server.get_response(r))
print '\n\n', '-' * 10, 'crontab', '-' * 10
r = server.command("_get_pkg_version", pkg_type='crontab')
pprint(server.get_response(r))
@cli.command()
def restart_service():
server = WORK_FRAME("test", auri=AMQ_URI)
print server.restart_servers('service', timeout=3)
@cli.command()
def restart_crontab():
server = WORK_FRAME("test", auri=AMQ_URI)
r = server.command("_restart_crontab")
print server.get_response(r, timeout=5)
@cli.command()
def update_s():
service = 'time'
server = WORK_FRAME("test", auri=AMQ_URI)
r = server.command("_get_pkg_version")
pprint(server.get_response(r, timeout=5, ))
pprint(server.update_service(service))
@cli.command()
def update_c():
crontab = 'print'
server = WORK_FRAME("test", auri=AMQ_URI)
r = server.command("_get_pkg_version", pkg_type='crontab')
pprint(server.get_response(r, timeout=5, ))
pprint(server.update_crontab(crontab, version=1.0))
@cli.command()
def install():
server = WORK_FRAME("test", auri=AMQ_URI)
service = 'calculate'
pprint(server.get_last_version(service))
pprint(server.install_service(service, './caccu'))
# cron tab
@cli.command()
def scron():
"""micro server crontab"""
server = micro_server("test", auri=AMQ_URI)
server.add_crontab(cron_name='haha', command='echo 2', time_str='* * * * *')
server.start_crontab()
print '-' * 100
print 'USER ALL CRONTAB'
pprint(server.cron_manage.user_cron_jobs())
print '-' * 100
@cli.command()
def wfcron():
"""work frame crontab"""
server = WORK_FRAME("test", auri=AMQ_URI)
pprint(server.get_all_crontab_status())
if __name__ == "__main__":
cli()
| apache-2.0 | 2,488,542,644,253,334,500 | 22.423611 | 80 | 0.625259 | false |
130s/ros_buildfarm | ros_buildfarm/jenkins.py | 1 | 9548 | from __future__ import print_function
from ast import literal_eval
import copy
import difflib
import sys
try:
from urllib.request import urlopen
from urllib.error import HTTPError
except ImportError:
from urllib2 import urlopen
from urllib2 import HTTPError
from xml.etree import ElementTree
from jenkinsapi.jenkins import Jenkins
from jenkinsapi.utils.requester import Requester
from jenkinsapi.views import Views
from .jenkins_credentials import get_credentials
from .templates import expand_template
JENKINS_MANAGEMENT_VIEW = 'Manage'
class CrumbRequester(Requester):
"""Adapter for Requester inserting the crumb in every request."""
def __init__(self, *args, **kwargs):
super(CrumbRequester, self).__init__(*args, **kwargs)
self._baseurl = kwargs['baseurl']
self._last_crumb_data = None
def post_url(self, *args, **kwargs):
if self._last_crumb_data:
# first try request with previous crumb if available
response = self._post_url_with_crumb(
self._last_crumb_data, *args, **kwargs)
# code 403 might indicate that the crumb is not valid anymore
if response.status_code != 403:
return response
# fetch new crumb (if server has crumbs enabled)
if self._last_crumb_data is not False:
self._last_crumb_data = self._get_crumb_data()
return self._post_url_with_crumb(
self._last_crumb_data, *args, **kwargs)
def _get_crumb_data(self):
response = self.get_url(self._baseurl + '/crumbIssuer/api/python')
if response.status_code in [404]:
print('The Jenkins master does not require a crumb')
return False
if response.status_code not in [200]:
raise RuntimeError('Failed to fetch crumb: %s' % response.text)
crumb_issuer_response = literal_eval(response.text)
crumb_request_field = crumb_issuer_response['crumbRequestField']
crumb = crumb_issuer_response['crumb']
print('Fetched crumb: %s' % crumb)
return {crumb_request_field: crumb}
def _post_url_with_crumb(self, crumb_data, *args, **kwargs):
if crumb_data:
if len(args) >= 5:
headers = args[4]
else:
headers = kwargs.setdefault('headers', {})
headers.update(crumb_data)
return super(CrumbRequester, self).post_url(*args, **kwargs)
class JenkinsProxy(Jenkins):
"""Proxy for Jenkins instance caching data for performance reasons."""
def __init__(self, *args, **kwargs):
requester_kwargs = copy.copy(kwargs)
requester_kwargs['baseurl'] = args[0]
kwargs['requester'] = CrumbRequester(**requester_kwargs)
super(JenkinsProxy, self).__init__(*args, **kwargs)
self.__jobs = None
@property
def jobs(self):
if self.__jobs is None:
self.__jobs = super(JenkinsProxy, self).jobs
return self.__jobs
def connect(jenkins_url):
print("Connecting to Jenkins '%s'" % jenkins_url)
username, password = get_credentials(jenkins_url)
jenkins = JenkinsProxy(jenkins_url, username=username, password=password)
print("Connected to Jenkins version '%s'" % jenkins.version)
return jenkins
def configure_management_view(jenkins):
return configure_view(
jenkins, JENKINS_MANAGEMENT_VIEW, include_regex='^((?!__).)*$')
def configure_view(
jenkins, view_name, include_regex=None,
template_name='generic_view.xml.em'):
view_config = get_view_config(
template_name, view_name, include_regex=include_regex)
view_type = _get_view_type(view_config)
create_view = view_name not in jenkins.views
if create_view:
print("Creating view '%s' of type '%s'" % (view_name, view_type))
view = jenkins.views.create(view_name, view_type=view_type)
remote_view_config = view.get_config()
else:
print("Ensure that view '%s' exists" % view_name)
view = jenkins.views[view_name]
remote_view_config = view.get_config()
remote_view_type = _get_view_type(remote_view_config)
if remote_view_type != view_type:
del jenkins.views[view_name]
print("Recreating view '%s' of type '%s'" % (view_name, view_type))
view = jenkins.views.create(view_name, view_type=view_type)
remote_view_config = view.get_config()
diff = _diff_configs(remote_view_config, view_config)
if not diff:
print("Skipped '%s' because the config is the same" % view_name)
else:
print("Updating view '%s'" % view_name)
if not create_view:
print(' ', '<<<')
for line in diff:
print(' ', line.rstrip('\n'))
print(' ', '>>>')
try:
response_text = view.update_config(view_config)
except Exception:
print("Failed to configure view '%s' with config:\n%s" %
(view_name, view_config), file=sys.stderr)
raise
if response_text:
raise RuntimeError(
"Failed to configure view '%s':\n%s" %
(view_name, response_text))
return view
def get_view_config(template_name, view_name, include_regex=None, data=None):
view_data = copy.deepcopy(data) if data is not None else {}
view_data.update({
'view_name': view_name,
'include_regex': include_regex,
})
view_config = expand_template(template_name, view_data)
return view_config
def _get_view_type(view_config):
root = ElementTree.fromstring(view_config)
root.tag
if root.tag == 'hudson.model.ListView':
return Views.LIST_VIEW
if root.tag == 'hudson.plugins.view.dashboard.Dashboard':
return Views.DASHBOARD_VIEW
assert False, 'Unknown list type: ' + root.tag
def configure_job(jenkins, job_name, job_config, view=None):
response_text = None
try:
if not jenkins.has_job(job_name):
print("Creating job '%s'" % job_name)
job = jenkins.create_job(job_name, job_config)
else:
job = jenkins.get_job(job_name)
remote_job_config = job.get_config()
diff = _diff_configs(remote_job_config, job_config)
if not diff:
print("Skipped '%s' because the config is the same" % job_name)
else:
print("Updating job '%s'" % job_name)
print(' ', '<<<')
for line in diff:
print(' ', line.rstrip('\n'))
print(' ', '>>>')
response_text = job.update_config(job_config)
if response_text:
print('Failed to update job config:\n%s' % response_text)
raise RuntimeError()
except Exception:
print("Failed to configure job '%s' with config:\n%s" %
(job_name, job_config), file=sys.stderr)
raise
if response_text:
raise RuntimeError(
"Failed to configure job '%s':\n%s" % (job_name, response_text))
if view is not None:
if job_name not in view:
print("Adding job '%s' to view '%s'" % (job_name, view.name))
job = view.add_job(job_name, job)
else:
print("Job '%s' is already in view '%s'" % (job_name, view.name))
return job
def invoke_job(jenkins, job_name, cause=None):
try:
if not jenkins.has_job(job_name):
print("Failed to invoke job '%s' because it does not exist" %
job_name, file=sys.stderr)
return False
job = jenkins.get_job(job_name)
if not job.is_enabled():
print("Failed to invoke job '%s' because it is disabled" %
job_name, file=sys.stderr)
return False
if job.is_queued():
print("Skipped to invoke job '%s' because it is queued" %
job_name, file=sys.stderr)
return False
if job.is_running():
print("Skipped to invoke job '%s' because it is running" %
job_name, file=sys.stderr)
return False
print("Invoking job '%s'" % job_name)
job.invoke(cause=cause)
except Exception:
print("Failed to invoke job '%s'" % job_name, file=sys.stderr)
raise
return True
def _diff_configs(remote_config, new_config):
remote_root = ElementTree.fromstring(remote_config)
new_root = ElementTree.fromstring(new_config)
# ignore description which contains timestamp
if remote_root.find('description') is not None:
remote_root.find('description').text = ''
if new_root.find('description') is not None:
new_root.find('description').text = ''
if ElementTree.tostring(remote_root) == ElementTree.tostring(new_root):
return []
xml1 = ElementTree.tostring(remote_root, encoding='unicode')
xml2 = ElementTree.tostring(new_root, encoding='unicode')
lines1 = xml1.splitlines()
lines2 = xml2.splitlines()
return difflib.unified_diff(
lines1, lines2, 'remote config', 'new config', n=0)
def remove_jobs(jenkins, job_prefix, excluded_job_names):
for job_name in jenkins.jobs.keys():
if not job_name.startswith(job_prefix):
continue
if job_name in excluded_job_names:
continue
print("Deleting job '%s'" % job_name)
jenkins.delete_job(job_name)
| apache-2.0 | -9,148,623,834,193,465,000 | 35.304183 | 79 | 0.599288 | false |
svox1/e2openplugin-OpenWebif | plugin/local.py | 1 | 14799 | # -*- coding: utf-8 -*-
from __init__ import _
tstrings = {'mo': _("Mo"),
'tu': _("Tu"),
'we': _("We"),
'th': _("Th"),
'fr': _("Fr"),
'sa': _("Sa"),
'su': _("Su"),
'day_0': _("Sun"),
'day_1': _("Mon"),
'day_2': _("Tue"),
'day_3': _("Wed"),
'day_4': _("Thu"),
'day_5': _("Fri"),
'day_6': _("Sat"),
'monday': _("Monday"),
'tuesday': _("Tuesday"),
'wednesday': _("Wednesday"),
'thursday': _("Thursday"),
'friday': _("Friday"),
'saturday': _("Saturday"),
'sunday': _("Sunday"),
'month_01': _("January"),
'month_02': _("February"),
'month_03': _("March"),
'month_04': _("April"),
'month_05': _("May"),
'month_06': _("June"),
'month_07': _("July"),
'month_08': _("August"),
'month_09': _("September"),
'month_10': _("October"),
'month_11': _("November"),
'month_12': _("December"),
'about': _("About"),
'add_timer': _("Add Timer"),
'add_autotimer': _("Add AutoTimer"),
'add_zaptimer': _("Add Zap Timer"),
'after_event': _("After Event"),
'agc': _("AGC"),
'all': _("All"),
'all_channels': _("All Channels"),
'authors': _("Authors"),
'auto': _("Auto"),
'back': _("Back"),
'begin': _("Begin"),
'ber': _("BER"),
'bouquets': _("Bouquets"),
'box_info': _("Box Info"),
'box': _("Box"),
'boxcontrol': _("Box Control"),
'box_uptime': _("Box Uptime"),
'brand': _("Brand"),
'cancel': _("Cancel"),
'capacity': _("Capacity"),
'channel': _("Channel"),
'channels': _("Channels"),
'chipset': _("Chipset"),
'cleanup_timer':_("Cleanup Timers"),
'close':_("Close"),
'contributors': _("Contributors"),
'control': _("Control"),
'current': _("Current"),
'current_event': _("Current Event"),
'date': _("Date"),
'deep_standby': _("Deep-Standby"),
'default': _("Default"),
'delete_recording': _("Delete Recording"),
'delete_recording_question': _("Really delete the recording"),
'delete_timer': _("Delete Timer"),
'delete_timer_question': _("Really delete the timer"),
'description': _("Description"),
'dhcp': _("DHCP"),
'disable_timer': _("Disable Timer"),
'disabled': _("disabled"),
'distro_version': _("Distro"),
'dolby': _("Dolby"),
'done': _("Done"),
'download': _("Download"),
'download_playlist': _("Download Playlist for"),
'driver_date': _("Drivers"),
'duration': _("Duration"),
'edit_timer': _("Edit Timer"),
'enable_timer': _("Enable Timer"),
'start': _("Start"),
'end': _("End"),
'enabled': _("Enabled"),
'encrypted': _("Encrypted"),
'epg': _("EPG"),
'epgsearch': _("Epg Search"),
'epgsearchextended': _("Include description"),
'error': _("Error"),
'every_timer': _("Every"),
'extras': _("Extras"),
'finished': _("finished"),
'firmware_version': _("Firmware version"),
'fp_version': _("Frontprocessor Version"),
'free': _("Free"),
'free_memory': _("Free Memory"),
'gateway': _("Gateway"),
'grabscreenshot': _("Grab Screenshot"),
'gui_version': _("Gui version"),
'hidefullremote': _("Hide full remote"),
'high_resolution': _("High Resolution"),
'hdd_model': _("Hard disk model"),
'hour': _("Hour"),
'ipv4_address': _("IPv4 address"),
'ipv4only_kernel': _("IPv4-only kernel"),
'ipv4only_network': _("none/IPv4-only network"),
'ipv4only_python': _("IPv4-only Python/Twisted"),
'ipv6_address': _("IPv6 address(es)"),
'info': _("Infos"),
'instant_record': _("Instant Record"),
'javalib': _("Javascript Libraries"),
'just_play': _("Just play"),
'kernel_version': _("Kernel"),
'license': _("LICENSE"),
'loading': _("loading"),
'location': _("Location"),
'locked': _("Locked"),
'mac_address': _("MAC address"),
'main': _("Main"),
'minute': _("Minute"),
'model': _("Model"),
'movielist': _("Movielist"),
'movies': _("Movies"),
'multi_epg': _("MultiEPG"),
'name': _("Name"),
'namespace': _("Namespace"),
'network_interface': _("Network Interface"),
'no_description_available': _("no description available"),
'not_implemented': _("Sorry this page is not yet implemented"),
'nothing': _("Nothing"),
'nothing_play': _("Nothing playing."),
'now': _("Now"),
'oe_version': _("System OE"),
'on': _("On"),
'openwebif_header': _("Open Source Web Interface for Linux set-top box"),
'show_boxname': _("Show box name in header"),
'use_custom_boxname': _("Use custom box name"),
'custom_boxname': _("Custom box name"),
'osd': _("OSD"),
'open_in_new_window': _("Open in new window"),
'playback': _("Playback"),
'playlist': _("Playlist"),
'powercontrol': _("Power Control"),
'provider': _("Provider"),
'providers': _("Providers"),
'radio': _("Radio"),
'reboot_box': _("Reboot Box"),
'rec_status': _("Recording Status"),
'refresh': _("Refresh"),
'refresh_auto': _("Refresh automatically every"),
'refresh_timer': _("Refresh Timer"),
'remote': _("Remote"),
'rename_recording': _("Rename Recording"),
'rename_recording_question': _("Really rename the recording"),
'repeated': _("Repeated"),
'restart_gui': _("Restart GUI"),
'running': _("running"),
'safe_mode': _("Safe mode"),
'satellites': _("Satellites"),
'satfinder': _("Satfinder"),
'save': _("Save"),
'screenshot': _("Screenshot"),
'search': _("Search"),
'search_imdb': _("Search IMDb"),
'search_kinopoisk': _("Search KinoPoisk"),
'seconds': _("seconds"),
'send_message': _("Send Message"),
'sent_wait': _('Waiting for answer ...'),
'sendamessage': _("Send a Message"),
'service': _("Service"),
'settings': _("Settings"),
'Bouquet_Editor': _("Bouquet Editor"),
'shiftforlong': _("(shift + click for long pressure)"),
'show_full_openwebif': _("Show Full OpenWebif"),
'showfullremote': _("Show full remote"),
'show_epg_for': _("Show EPG for"),
'shutdown': _("Shutdown"),
'site_source': _("Site and sources"),
'snr': _("SNR"),
'software': _("Software"),
'standby': _("Standby"),
'standby_toggle': _("Standby Toggle"),
'start_after_end': _("Start time is after end time"),
'start_instant_record': _("Start Instant Record"),
'stream': _("Stream"),
'subnet_mask': _("Subnet mask"),
'subservices': _("Subservices"),
'tags': _("Tags"),
'teletext': _("Teletext"),
'television': _("Television"),
'template_engine': _("Template Engine"),
'text': _("Text"),
'time': _("Time"),
'timeout': _("Timeout"),
'timer_added': _("Timer added"),
'timer_list': _("Timerlist"),
'timer_newname': _("New Name"),
'timer_preview': _("Autotimer Preview"),
'timer': _("Timer"),
'timers': _("Timers"),
'title': _("Title"),
'main_memory': _("Main Memory"),
'transcoded': _("transcoded"),
'transcode': _("Transcode"),
'tuner_ber': _("Tuner Bit Error Rate BER"),
'tuner_number': _("Tuner Number"),
'tuner_signal': _("Tuner Signal"),
'tuner_signal_snr': _("Tuner Signal Quality SNR"),
'tuner_signal_snr_db': _("Tuner Signal Quality SNR_DB"),
'tuner_signal_agc': _("Tuner Signal Power AGC"),
'tuner_type': _("Tuner Type"),
'tuners': _("Tuners"),
'tv': _("TV"),
'tv_multi_epg': _("TV Multi EPG"),
'type': _("Type"),
'upcoming_events': _("Upcoming Events"),
'version': _("Version"),
'video': _("Video"),
'video_height': _("Video Height"),
'video_wide': _("Video Wide"),
'video_width': _("Video Width"),
'vps': _("VPS"),
'volume': _("Volume"),
'volumecontrol': _("Volume Control"),
'waiting': _("waiting"),
'warning': _("Warning"),
'yes_no': _("Yes/No"),
'zap': _("Zap"),
'zapbeforestream': _("zap before Stream"),
'zap_to': _("Zap to"),
'zapped_to': _("Zapped to"),
'translation_spanish': _('Translation to Spanish'),
'license_text_01': _('All Files of this Software are open source software;'),
'license_text_02': _('you can redistribute it and/or modify it under the'),
'license_text_03': _('terms of the GNU General Public License version 2 as'),
'license_text_04': _('published by the Free Software Foundation.'),
'license_text_m': _('All Files of this Software are open source software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation.'),
'Root': _('Root'),
'at_list': _("AutoTimer List"),
'at_at_edit': _("AutoTimer Edit"),
'at_enabled': _("Enabled"),
'at_description': _("Description"),
'at_title_match': _("Match title"),
'at_encoding': _("EPG encoding"),
'at_search_type': _("Search type"),
'at_partial_match': _("partial match"),
'at_exact_match': _("exact match"),
'at_description_match': _("description match"),
'at_title_or_description_match': _("title or description match"),
'at_title_or_short_description_match': _("title or short description match"),
'at_short_description_match': _("short description match"),
'at_start_match': _("start match"),
'at_search_strictness': _("Search strictness"),
'at_case_sensitive': _("case-sensitive search"),
'at_case_insensitive': _("case-insensitive search"),
'at_timer_type': _("Timer type"),
'at_record': _("record"),
'at_zap': _("zap"),
'at_override_alt': _("Override found with alternative service"),
'at_timespan': _("Only match during timespan"),
'at_timespan_begin': _("Begin of timespan"),
'at_timespan_end': _("End of Timespan"),
'at_datespan': _("Restrict to events on certain dates"),
'at_datespan_after': _("Not after"),
'at_datespan_before': _("Not before"),
'at_timer_offset': _("Custom offset"),
'at_timer_offset_before': _("Offset before recording (in m)"),
'at_timer_offset_after': _("Offset after recording (in m)"),
'at_max_duration': _("Set maximum duration"),
'at_after_event': _("After event"),
'at_after_event_standard': _("standard"),
'at_after_event_auto': _("auto"),
'at_after_event_nothing': _("do nothing"),
'at_after_event_standby': _("go to standby"),
'at_after_event_deepstandby': _("go to deep standby"),
'at_event_timespan': _('Execute "after event" during timespan'),
'at_event_timespan_begin': _('Begin of "after event" timespan'),
'at_event_timespan_end': _('End of "after event" timespan'),
'at_max_counter': _("Record a maximum of x times"),
'at_left': _("Amount of recordings left"),
'at_never': _("Never"),
'at_monthly': _("Monthly"),
'at_weekly_sun': _("Weekly (Sunday)"),
'at_weekly_mon': _("Weekly (Monday)"),
'at_reset_count': _("Reset count"),
'at_avoid_dup': _("Require description to be unique"),
'at_avoid_dup_no': _("No"),
'at_avoid_dup_same_service': _("On same service"),
'at_avoid_dup_any_service': _("On any service"),
'at_avoid_dup_any_service_rec': _("Any service/recording"),
'at_location': _("Use a custom location"),
'at_tags': _("Tags"),
'at_select_tags': _("select Tags"),
'at_channels': _("Channels"),
'at_select_channels': _("select Channels"),
'at_bouquets': _("Bouquets"),
'at_select_bouquets': _("select Bouquets"),
'at_filter': _("Enable Filtering"),
'at_filter_include': _("Include"),
'at_filter_exclude': _("Exclude"),
'at_filter_title': _("in Title"),
'at_filter_short_desc': _("in Shortdescription"),
'at_filter_desc': _("in Description"),
'at_filter_day': _("on Weekday"),
'at_filter_weekend': _("Weekend"),
'at_filter_weekday': _("Weekday"),
'at_add': _("Add"),
'at_del': _("Delete"),
'at_reload': _("Reload"),
'at_save': _("Save"),
'at_parse': _("Parse"),
'at_simulate': _("Simulate"),
'at_timers': _("Timers"),
'at_settings': _("Settings"),
'at_delete_autotimer_question': _("Do you really want to delete the AT"),
'at_label_series': _("Label series"),
'ats_auto_timer_settings': _("AutoTimer Settings"),
'ats_autopoll': _("AutoPoll"),
'ats_interval': _("Interval (in h)"),
'ats_maxdaysinfuture': _("Max Days"),
'ats_try_guessing': _("Try Guessing"),
'ats_fastscan': _("Fastscan"),
'ats_show_in_extensionsmenu': _("Show in Extensionsmenu"),
'ats_disabled_on_conflict': _("Disabled on Conflict"),
'ats_addsimilar_on_conflict': _("Add Similar on Conflict"),
'ats_notifconflict': _("Notify if Conflict"),
'ats_notifsimilar': _("Notify if Similar"),
'ats_add_autotimer_to_tags': _("Add Autotimer to Tags"),
'ats_add_name_to_tags': _("Add Name to Tags"),
'ats_refresh': _("Refresh"),
'ats_refresh_none': _("None"),
'ats_refresh_auto': _("Auto"),
'ats_refresh_all': _("All"),
'ats_editor': _("Editor"),
'ats_editor_plain': _("Plain"),
'ats_editor_wizzard': _("Wizzard"),
'er_enabled': _("Enabled"),
'er_enable_messages': _("Enable Messages"),
'er_begin': _("Begin"),
'er_end': _("End"),
'er_delay_standby': _("Delay Standby"),
'er_interval_min': _("Interval (min.)"),
'er_interval_sec': _("Interval (sec.)"),
'er_afterevent': _("After Event"),
'er_force': _("Force"),
'er_wakeup': _("Wakeup"),
'er_inherit_autotimer': _("Inherit Autotimer"),
'er_parse_autotimer': _("Parse Autotimer"),
'er_always': _("Always"),
'er_never': _("Never"),
'er_bg_only': _("Background only"),
'er_ask_yes': _("Ask, default Yes"),
'er_ask_no': _("Ask, default No"),
'er_adapter': _("Method"),
'er_main': _("Visibly"),
'er_pip': _("Picture in Picture"),
'er_pip_hidden': _("Picture in Picture (hidden)"),
'er_fake_recording': _("Fake Recording"),
'er_reload': _("Reload"),
'er_save': _("Save"),
'er_refresh': _("Refresh now"),
'bqe_add_provider_as_bouquet': _("Add Provider as new Bouquet"),
'bqe_add_channel': _("Add channel(s) to Bouquet"),
'bqe_add_alternative': _("Add channel(s) as alternate"),
'bqe_search': _("Search"),
'bqe_reload': _("Reload"),
'bqe_export': _("Export"),
'bqe_import': _("Import"),
'bqe_add_bq': _("Add Bouquet"),
'bqe_rename_bq': _("Rename Bouquet"),
'bqe_delete_bq': _("Delete Bouquet"),
'bqe_add_marker': _("Add Marker"),
'bqe_rename': _("Rename"),
'bqe_delete_channel': _("Delete Channel(s)"),
'bqe_del_channel_question': _("Do you really want to delete the channel(s)"),
'bqe_del_bouquet_question': _("Do you really want to delete the bouquet"),
'bqe_name_bouquet': _("Name of the Bouquet"),
'bqe_name_marker': _("Name of the Marker"),
'bqe_rename_bouquet': _("Enter new name for the bouquet"),
'bqe_rename_marker': _("Enter new name for the marker"),
'bqe_filename': _("Please enter filename"),
'bqe_restore_question': _("Do you really want to restore from file"),
'via': _("via"),
'record_zap': _("Record+ZAP"),
'pmt_pid': _("Pmtpid"),
'a_pid': _("Apid"),
'v_pid': _("Vpid"),
'pcr_pid': _("Pcrpid"),
'ts_id': _("Tsid"),
'on_id': _("Onid"),
's_id': _("Sid"),
's_ref': _("Sref"),
's_orb': _("Orbital Position"),
'tstr_error_load_page': _("error! Loading Page"),
'tstr_timer_added': _("Timer Added"),
'tstr_event_not_found': _("Event not found"),
'tstr_show_picon_in_channel_list': _("Show Picon in channel list"),
'tstr_ow_browser_settings': _("OpenWebif Browser Settings"),
'tstr_ow_settings': _("OpenWebif Settings"),
'tstr_theme': _("Theme"),
'tstr_show_picon_in_channel_list': _("Show Picons in channel list"),
'tstr_spinner': _("Spinner"),
'display_duration_s': _("Display duration (s)"),
'display_mode': _("Display Mode"),
'tv_guide': _("TV Guide"),
'timeline': _("Timeline"),
'linkspeed': _("Link Speed"),
'networkshares': _("Network Shares"),
'using': _("using"),
}
| gpl-2.0 | -7,639,181,828,118,465,000 | 33.496503 | 224 | 0.599432 | false |
wpj-cz/Spritemapper | spritecss/packing/sprites.py | 1 | 1651 | from contextlib import contextmanager
import logging
from ..image import Image
from ..png import FormatError
from . import Rect
logger = logging.getLogger(__name__)
class SpriteNode(Rect):
def __init__(self, im, width, height, fname=None, pad=(0, 0)):
Rect.__init__(self, (0, 0, width, height))
self.im = im
self.fname = fname
(self.pad_x, self.pad_y) = pad
self.close = im.close
def __str__(self):
clsnam = type(self).__name__
arg = self.fname if self.fname else self.im
args = (clsnam, arg, self.width, self.height)
return "<%s %s (%dx%d)>" % args
def calc_box(self, pos):
x1, y1 = pos
return (x1, y1, x1 + self.width, y1 + self.height)
@classmethod
def from_image(cls, im, *args, **kwds):
args = im.size + args
return cls(im, *args, **kwds)
@classmethod
def load_file(cls, fo, fname=None, pad=(0, 0), **kwds):
if not hasattr(fo, "read"):
if not fname:
fname = fo
fo = open(fo, "rb")
elif not fname and hasattr(fo, "name"):
fname = fo.name
return cls.from_image(Image.load(fo), fname=fname, pad=pad)
@contextmanager
def open_sprites(fnames, **kwds):
fs = [(fn, open(str(fn), "rb")) for fn in fnames]
sprites = []
try:
for fn, fo in fs:
try:
sprites.append(SpriteNode.load_file(fo, fname=fn, **kwds))
except FormatError, e:
logger.warn('%s: invalid image file: %s', fn, e)
yield sprites
finally:
for fn, fo in fs:
fo.close()
| mit | -4,243,201,966,748,089,300 | 27.964912 | 74 | 0.543913 | false |
vitormazzi/django-jython | doj/backends/zxjdbc/mssql2k/creation.py | 1 | 18467 | from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
'''
Overloaded bits of the database creation code
'''
data_types = {
'AutoField': 'int IDENTITY (1, 1)',
'BooleanField': 'bit',
'CharField': 'nvarchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'nvarchar(%(max_length)s)',
'DateField': 'smalldatetime',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'nvarchar(%(max_length)s)',
'FilePathField': 'nvarchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'int',
'IPAddressField': 'nvarchar(15)',
'NullBooleanField': 'bit',
'OneToOneField': 'int',
'PositiveIntegerField': 'int CHECK ([%(column)s] >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK ([%(column)s] >= 0)',
'SlugField': 'nvarchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'ntext',
'TimeField': 'datetime',
}
def __init__(self, connection):
super(DatabaseCreation,self).__init__(connection)
# Keep track of all unique nullable fields
self.unique_fields = []
# We need to keep track of all seen models and created models for
# ourself so that we can properly generate all the constraint triggers
self._seen_models = set()
self._created_models = set()
self._trigger_sql = set()
def create_test_db(self, verbosity=1, autoclobber=False):
result = super(DatabaseCreation, self).create_test_db(verbosity, autoclobber)
# Force the SQL2k command to run now.
from jtds.mssql2kext.management.commands import sql2kext
sql2kext.Command().handle_noargs()
return result
def _destroy_test_db(self, test_database_name, verbosity):
cursor = self.connection.cursor()
if not self.connection.connection.autocommit:
self.connection.connection.commit()
self.connection.connection.autocommit = True
cursor.execute("ALTER DATABASE %s SET SINGLE_USER WITH ROLLBACK IMMEDIATE " % self.connection.ops.quote_name(test_database_name))
cursor.execute("DROP DATABASE %s" %self.connection.ops.quote_name(test_database_name))
self.connection.close()
def sql_for_many_to_many(self, model, style):
"""
We need to inject the trigger code for a model after all the tables for this application have been
created.
The code goes in this method only because it's known that the syncdb command in
django.core.management.commands.syncdb call this last.
A better option would be to have a signal hook after all models have been
created, but before the the applications are signalled so that the database
backend can respond to creation prior to individual applications respond.
"""
final_output = super(DatabaseCreation, self).sql_for_many_to_many(model, style)
from django.db import models
opts = model._meta
app_label = opts.app_label
app = [app for app in models.get_apps() if app.__name__.split('.')[-2] == app_label][0]
app_model_set = set(models.get_models(app))
# Wait until the app_model_set is finished loading
if app_model_set != (app_model_set & self._seen_models | set([model])):
return final_output
# This is the last model - we can safely assume it's ok to
# inject all the constraint trigger code now
model_fkeys = {}
for model in app_model_set:
opts = model._meta
model_fkeys[model] = []
for f in opts.local_fields:
if f.rel:
model_fkeys[model].append(f)
qn = self.connection.ops.quote_name
for model, model_keys in model_fkeys.items():
sql_block = []
# For each model, we want the list of all foreign keys
# to clear out references to other objects
# and to clear all references
tmpl = '''UPDATE %(table)s SET %(this_to_rel)s = NULL where %(this_pkey)s in (SELECT %(this_pkey)s from deleted)'''
opts = model._meta
table = opts.db_table
this_pkey = [f for f in opts.local_fields if f.primary_key][0].column
for model_f in model_keys:
sql_dict = {'table': qn(table),
'this_to_rel': qn(model_f.column),
'this_pkey': qn(this_pkey),}
if model_f.null:
sql_block.append(tmpl % sql_dict)
# Generate all inbound relationships and clear the foreign keys
for inbound_model in app_model_set:
inbound_rels = [(inbound_model, f) for f in model_fkeys[inbound_model] if f.rel.to == model]
for in_model, in_f in inbound_rels:
tmpl = '''UPDATE %(other_table)s SET %(fkey)s = NULL where %(fkey)s in (SELECT %(this_pkey)s from deleted)'''
rel_opts = in_model._meta
other_table = rel_opts.db_table
sql_dict = {'other_table': qn(other_table),
'fkey': qn(in_f.column),
'this_pkey': qn(this_pkey),
}
if in_f.null:
sql_block.append(tmpl % sql_dict)
trigger_name = '%s_%x' % (table, abs(hash(table)))
instead_of_sql = """
CREATE TRIGGER %(instead_trigger_name)s ON %(table)s
INSTEAD OF DELETE
AS
BEGIN
%(sql)s
DELETE FROM %(table)s WHERE %(this_pkey)s IN (SELECT %(this_pkey)s FROM deleted)
print '%(escaped_sql)s'
END
;
""" % {
'instead_trigger_name': qn('instead_%s' % trigger_name),
'table': qn(table),
'sql': '\n'.join([' %s' % stmt for stmt in sql_block]),
'escaped_sql': ('\n'.join([' %s' % stmt for stmt in sql_block])).replace("'", "\\'"),
'this_pkey': qn(this_pkey),
}
if instead_of_sql not in self._trigger_sql:
# We only want to generate the instead trigger if there is an actual
# code block
if len(sql_block) <> 0:
self._trigger_sql.add(instead_of_sql)
final_output.append(instead_of_sql)
return final_output
def sql_for_pending_references(self, model, style, pending_references):
"""
SQL Server 2000 needs to inject trigger code to emulate deferrable
constraints.
On object delete, we manually set the foreign keys to NULL with an
INSTEAD OF DELETE trigger, and then actually delete the record in the
AFTER DELETE trigger.
If the columns are specified with NOT NULL constraints, the trigger will fail
and will exhibit the correct behaviour. If NULL is allowed, this will
allow us to emulate DEFERRABLE constraints.
Note that SQL Server 2000 will automatically delete triggers that are
bound to tables when the table is dropped.
"""
import copy
# Make a shallow copy of the pending_references
pending_references_orig = copy.copy(pending_references)
final_output = super(DatabaseCreation, self).sql_for_pending_references(model, style, pending_references)
return final_output
def sql_create_model(self, model, style, known_models=set()):
'''
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
overload this to create a view with SCHEMABINDING applied to the original table
to support fields marked as unique and nullable
The key differences between this and the super class implementation is that
we do not generate unique constriants for nullable field types, or
unique_together fieldsets.
'''
self._seen_models.update(known_models)
self._created_models.add(model)
from django.db import models
opts = model._meta
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
col_type = f.db_type()
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
field_output.append(style.SQL_KEYWORD('%sNULL' % (not f.null and 'NOT ' or '')))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
if not f.null:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
self.unique_fields.append(f)
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
field_output.append(self.connection.ops.tablespace_sql(tablespace, inline=True))
if f.rel:
ref_output, pending = self.sql_for_inline_foreign_key_references(f, known_models, style)
if pending:
pr = pending_references.setdefault(f.rel.to, []).append((model, f))
else:
field_output.extend(ref_output)
table_output.append(' '.join(field_output))
if opts.order_with_respect_to:
table_output.append(style.SQL_FIELD(qn('_order')) + ' ' + \
style.SQL_COLTYPE(models.IntegerField().db_type()) + ' ' + \
style.SQL_KEYWORD('NULL'))
for field_constraints in opts.unique_together:
contraint_fields = [opts.get_field(f) for f in field_constraints]
null_allowed = [f for f in contraint_fields if f.null]
# Only do an inline UNIQUE constraint if none of the unique_together columns
# allow nulls. Otherwise - let the schemabinding hack build the unique index
if len(null_allowed) == 0:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' % \
", ".join([style.SQL_FIELD(qn(opts.get_field(f).column)) for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(' %s%s' % (line, i < len(table_output)-1 and ',' or ''))
full_statement.append(')')
if opts.db_tablespace:
full_statement.append(self.connection.ops.tablespace_sql(opts.db_tablespace))
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if self.unique_fields:
final_output.extend(self._create_schemabinding_view(style, opts))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table, auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references
def _create_schemabinding_view(self, style, opts):
'''
Walk the list of unique_fields and generate a view to enforce
uniqueness on
'''
# Do a quick check to see if we have nullable and unique fields
# defined
if len([f for f in self.unique_fields if f.null and f.unique]) == 0:
return []
sql_stmts = []
#sql_stmts.append("-- Start SCHEMABINDING hack for %s" % style.SQL_TABLE(qn(db_table)))
db_table, local_fields = opts.db_table, opts.local_fields
qn = self.connection.ops.quote_name
d ={'view_name': style.SQL_TABLE(qn("%s_vw" % db_table)),
'fields': ', \n '.join([" %s" % style.SQL_FIELD(qn(f.column)) for f in local_fields]),
'table_name': style.SQL_TABLE(qn(db_table)),
'null_parts': ' OR\n '.join(['%s IS NOT NULL' % style.SQL_FIELD(qn(f.column)) for f in local_fields if f.null]),
}
sql_parts = []
sql_parts.append("CREATE VIEW %(view_name)s WITH SCHEMABINDING " % d)
sql_parts.append(" AS")
sql_parts.append(" SELECT")
sql_parts.append(" %(fields)s" % d)
sql_parts.append(" FROM")
sql_parts.append(" [dbo].%(table_name)s" % d)
sql_parts.append(" WHERE")
sql_parts.append(" %(null_parts)s" % d)
sql_stmts.append('\n'.join(sql_parts))
sql_parts = []
# Now create all the indices
unique_nullable = [f for f in local_fields if f.null and f.unique]
for i, f in enumerate(unique_nullable):
d ={'vidx_name' : style.SQL_TABLE(qn("%s_vidx_%s" % (db_table, i))),
'idx_name' : style.SQL_TABLE(qn("%s_idx_%s" % (db_table, i))),
'table_name': style.SQL_TABLE(qn(db_table)),
'view_name': style.SQL_TABLE(qn("%s_vw" % db_table)),
'col_name': style.SQL_FIELD(qn(f.column)),
}
if i == 0:
sql_stmts.append("CREATE UNIQUE CLUSTERED INDEX %(vidx_name)s on %(view_name)s (%(col_name)s);" % d)
else:
sql_stmts.append("CREATE UNIQUE INDEX %(vidx_name)s on %(view_name)s (%(col_name)s);" % d)
sql_stmts.append("CREATE INDEX %(idx_name)s on %(table_name)s (%(col_name)s);" % d)
# To synthesize unique_together over fields where NULLs are allowed,
# we create a view per unique_together clause
for fc_idx, field_constraints in enumerate(opts.unique_together):
fields = [opts.get_field(f) for f in field_constraints]
unique_together_fields = set([f for f in opts.local_fields if f.null]).intersection(set(fields))
null_bits = ['%s IS NOT NULL' % style.SQL_FIELD(qn(f.column)) for f in fields if f.null]
if len(null_bits) == 0:
# No NULLable columns, skip this
continue
d ={'view_name': style.SQL_TABLE(qn("%s_%s_utvw" % (db_table, fc_idx))),
'fields': ', \n '.join([style.SQL_FIELD(qn(f.column)) for f in fields]),
'table_name': style.SQL_TABLE(qn(db_table)),
'null_parts': ' OR\n '.join(null_bits),
}
sql_parts = []
sql_parts.append("CREATE VIEW %(view_name)s WITH SCHEMABINDING " % d)
sql_parts.append(" AS")
sql_parts.append(" SELECT")
sql_parts.append(" %(fields)s" % d)
sql_parts.append(" FROM")
sql_parts.append(" [dbo].%(table_name)s" % d)
sql_parts.append(" WHERE")
sql_parts.append(" %(null_parts)s" % d)
sql_stmts.append('\n'.join(sql_parts))
d ={'vidx_name' : style.SQL_TABLE(qn("%s_utidx_%s" % (db_table, fc_idx))),
'view_name': style.SQL_TABLE(qn("%s_%s_utvw" % (db_table, fc_idx))),
'table_name': style.SQL_TABLE(qn(db_table)),
'col_names': ', '.join([style.SQL_FIELD(qn(f.column)) for f in fields]),
}
# Create a unique clustered index on the VIEW to enforce uniqueness
# Note that the view itself will filter out the NULLable column
sql_stmts.append("CREATE UNIQUE CLUSTERED INDEX %(vidx_name)s on %(view_name)s (%(col_names)s);" % d)
# Now, finally create a NON-unique index across the unique_together fields on the TABLE
# to provide index speed
d ={'idx_name' : style.SQL_TABLE(qn("%s_%s_ut_idx" % (db_table, fc_idx))),
'table_name': style.SQL_TABLE(qn(db_table)),
'col_name': ', '.join([style.SQL_FIELD(qn(f.column)) for f in fields]),
}
sql_stmts.append("CREATE INDEX %(idx_name)s on %(table_name)s (%(col_name)s);" % d)
#sql_stmts.append("-- END SCHEMABINDING hack for %s" % style.SQL_TABLE(qn(db_table)))
"""
Now for some closure magic. We just grab the first field in the local_fields list
and obtain the post_create_sql code, substituting in a lambda function if nothing
is available.
We apply a closure and extends the post_create_sql method with the SQL we've just
generated to synthesize proper UNIQUE+NULL capability
"""
# We need to bind the sql_stmts to the first field
field = opts.local_fields[0]
def wrap_statements(old_post_create_sql, stmts):
def closure(style, db_table):
result = []
if old_post_create_sql:
result.extend([sql for sql in old_post_create_sql(style, db_table)])
result.extend(stmts)
return result
return closure
old_func = getattr(field, 'post_create_sql', lambda x, y : [])
field.post_create_sql = wrap_statements(old_func, sql_stmts)
return []
# Stored procedure code
| bsd-3-clause | 5,518,086,605,718,068,000 | 43.710396 | 137 | 0.55342 | false |
csparpa/pyowm | tests/unit/alertapi30/test_condition.py | 1 | 2465 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import pyowm.commons.exceptions
from pyowm.alertapi30.condition import Condition
from pyowm.alertapi30.enums import WeatherParametersEnum, OperatorsEnum
class TestCondition(unittest.TestCase):
def test_condition_fails_with_wrong_parameters(self):
self.assertRaises(AssertionError, Condition,
None, OperatorsEnum.EQUAL, 67.8)
self.assertRaises(AssertionError, Condition,
123, OperatorsEnum.EQUAL, 67.8)
self.assertRaises(AssertionError, Condition,
WeatherParametersEnum.HUMIDITY, None, 67.8)
self.assertRaises(AssertionError, Condition,
WeatherParametersEnum.HUMIDITY, 123, 67.8)
self.assertRaises(AssertionError, Condition,
WeatherParametersEnum.HUMIDITY, OperatorsEnum.EQUAL, None)
self.assertRaises(AssertionError, Condition,
WeatherParametersEnum.HUMIDITY, OperatorsEnum.EQUAL, 'string')
def test_from_dict(self):
expected = Condition(WeatherParametersEnum.TEMPERATURE, OperatorsEnum.GREATER_THAN, 78.6, id='123456')
the_dict = dict(name='temp', expression='$gt',
amount=78.6, _id='123456')
result = Condition.from_dict(the_dict)
self.assertEqual(expected.weather_param, result.weather_param)
self.assertEqual(expected.operator, result.operator)
self.assertEqual(expected.amount, result.amount)
self.assertEqual(expected.id, result.id)
with self.assertRaises(pyowm.commons.exceptions.ParseAPIResponseError):
Condition.from_dict(None)
with self.assertRaises(pyowm.commons.exceptions.ParseAPIResponseError):
Condition.from_dict(dict(nonexistent='key'))
def test_to_dict(self):
instance = Condition(WeatherParametersEnum.TEMPERATURE, OperatorsEnum.GREATER_THAN, 78.6, id='123456')
result = instance.to_dict()
self.assertIsInstance(result, dict)
self.assertEqual('123456', result['id'])
self.assertEqual(WeatherParametersEnum.TEMPERATURE, result['weather_param'])
self.assertEqual(OperatorsEnum.GREATER_THAN, result['operator'])
self.assertEqual(78.6, result['amount'])
def test_repr(self):
print(Condition(WeatherParametersEnum.TEMPERATURE, OperatorsEnum.GREATER_THAN, 78.6, id='123456'))
| mit | 4,674,347,672,437,818,000 | 45.509434 | 110 | 0.677079 | false |
Jsgoller1/cybernetic | skynet/docker/src/kubernetes.py | 1 | 2269 | import os
import requests
from jinja2 import Environment
from jinja2 import FileSystemLoader
def launch_terminator_job(logger, config, job_id):
# Do this stuff to load jinja templates
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
j2_env = Environment(loader=FileSystemLoader(THIS_DIR),
trim_blocks=True)
k8s_deploy = j2_env.get_template("deployment.terminator.yml.j2").render(
job=job_id,
aws_access_key_id=config["aws_access_key_id"],
aws_secret_access_key=config["aws_secret_access_key"],
s3_bucket=config["s3_bucket"],
kubernetes_api_protocol=config["kubernetes_api_protocol"],
kubernetes_service_host=config["kubernetes_service_host"],
kubernetes_api_port=config["kubernetes_api_port"],
kubernetes_api_token=config["kubernetes_api_token"],
skynet_service_host=config["skynet_service_host"])
k8s_api_endpoint = config["kubernetes_api_protocol"] + '://' + config["kubernetes_service_host"] + ":" + config["kubernetes_api_port"] + "/apis/batch/v1/namespaces/default/jobs"
logger.info("POSTing to " + k8s_api_endpoint)
resp = requests.post(k8s_api_endpoint,
headers={"Content-Type": "application/yaml",
"Authorization": "Bearer " + config["kubernetes_api_token"]},
verify=False,
data=k8s_deploy)
logger.info(k8s_deploy)
logger.info(resp.text)
logger.info("Created terminator deployment for terminator job " + job_id)
def delete_terminator_job(logger, config, job_id):
k8s_api_endpoint = config["kubernetes_api_protocol"] + '://' + config["kubernetes_service_host"] + ":" + config["kubernetes_api_port"] + "/apis/batch/v1/namespaces/default/jobs/terminator-" + job_id
logger.info("DELETEing to " + k8s_api_endpoint)
resp = requests.delete(k8s_api_endpoint,
headers={"Authorization": "Bearer " + config["kubernetes_api_token"]},
verify=False)
logger.info(resp.text)
logger.info("Delete terminator job " + job_id)
| gpl-3.0 | 1,108,205,586,822,259,500 | 55.725 | 202 | 0.601587 | false |
Svolcano/python_exercise | dianhua/worker/crawler/china_telecom/jiangsu/main.py | 1 | 14319 | #!/user/bin/env python
# -*- coding:utf-8 -*-
import random
import re
import json
import traceback
import sys
import time
import calendar
import datetime
from datetime import date
# 这段代码是用于解决中文报错的问题
from dateutil.relativedelta import relativedelta
reload(sys)
sys.setdefaultencoding("utf8")
if __name__ == '__main__':
sys.path.append('../..')
sys.path.append('../../..')
sys.path.append('../../../..')
from crawler.base_crawler import BaseCrawler
else:
from worker.crawler.base_crawler import BaseCrawler
class Crawler(BaseCrawler):
"""
kwargs 包含
'tel': str,
'pin_pwd': str,
'id_card': str,
'full_name': unicode,
'sms_code': str,
'captcha_code': str
錯誤等級
0: 成功
1: 帳號密碼錯誤
2: 認證碼錯誤
9: 其他錯誤
"""
def __init__(self, **kwargs):
"""
初始化
"""
super(Crawler, self).__init__(**kwargs)
def need_parameters(self, **kwargs):
return ['pin_pwd','sms_verify']
def get_verify_type(self, **kwargs):
return 'SMS'
def login(self, **kwargs):
url = 'http://js.189.cn/nservice/listQuery/index'
code, key, resp = self.get(url)
if code != 0:
return code, key
try:
# uuid = re.findall("var dataR = queryUserLoginInfoByKey\('(.*)?'\);", resp.text)[0]
uuid = re.findall("var dataR = queryUserLoginInfoByKey\('([a-zA-Z0-9\-]+)'\);", resp.text)[0]
except:
msg = traceback.format_exc()
self.log('website', msg, resp)
return 9, 'crawl_error'
login_url = 'http://js.189.cn/nservice/login/doLogin?TargetURL=http://js.189.cn/nservice/listQuery/index&menuType=0'
headers = {
'Referer': 'http://js.189.cn/nservice/listQuery/index',
'Content-Type': 'application/x-www-form-urlencoded',
}
# 4位字母数字
codetype = 2004
for i in range(self.max_retry):
url = 'http://js.189.cn/nservice/verication/getCodeImage'
code, key, resp = self.get(url)
if code != 0:
return code, key
try:
key, result, cid = self._dama(resp.content, codetype)
except:
msg = traceback.format_exc()
self.log('crawler', u'打码失败:{}'.format(msg), '')
continue
if key == "success" and result != "":
Captcha = str(result).lower()
else:
continue
data = {
'userType': '2000004',
'logonPattern': '2',
'newUamType': '-1',
'productId': kwargs['tel'],
'userPwd': kwargs['pin_pwd'],
'validateCodeNumber': Captcha,
# 'validateCodeNumber': 'ssss',
}
code, key, resp = self.post(login_url, data=data, headers=headers)
if code != 0:
return code, key
if 'showTimeMsgPupup' in resp.text:
if u'验证码错误,请重新输入' in resp.text:
self.log('user', 'verify_error', resp)
self._dama_report(cid)
continue
if u'帐号或密码错误' in resp.text:
self.log('user', 'pin_pwd_error', resp)
return 1, 'pin_pwd_error'
if u'帐号被锁定' in resp.text:
self.log('user', 'account_locked', resp)
return 9, 'account_locked'
if u'系统繁忙,请稍后' in resp.text:
self.log('website', 'website_busy_error', resp)
return 9, 'website_busy_error'
self.log('user', u'未知错误', resp)
return 9, 'crawl_error'
url = 'http://js.189.cn/nservice/login/queryUserLoginInfoByKey'
data = {
'uuid': uuid,
}
code, key, resp = self.post(url, data=data, headers=headers)
if code != 0:
return code, key
if '"TSR_CODE":"0"' in resp.text:
return 0, 'success'
self.log('crawler', u'未知错误', resp)
return 9, 'crawl_error'
else:
self.log('crawler', u'两次打码都失败', '')
return 9, 'auto_captcha_code_error'
def send_verify_request(self, **kwargs):
url = 'http://js.189.cn/nservice/wec/sendMsg'
data = {
'accNbr':kwargs['tel'],
}
code, key, resp = self.post(url, data=data)
if code != 0:
return code, key, ''
if 'yes' in resp.text:
return 0, 'success', ''
# if 'no' in resp.text:
self.log('crawler', u'send_sms_error', resp)
return 9, 'send_sms_error', ''
def verify(self, **kwargs):
url = 'http://js.189.cn/nservice/checkCode/checkVeiCode'
data = {
'accNbr': kwargs['tel'],
'code':kwargs['sms_code'],
}
code, key, resp = self.post(url, data=data)
if code != 0:
return code, key
if 'yes' in resp.text:
return 0, 'success'
resp.encoding = 'utf-8'
self.log('crawler', u'verify_error', resp)
return 9, 'verify_error'
def crawl_call_log(self, **kwargs):
missing_list = []
possibly_missing_list = []
call_log = []
crawl_num = 0
call_log_url = 'http://js.189.cn/nservice/listQuery/queryList'
today = date.today()
page_and_retry = []
search_month = [x for x in range(0, -6, -1)]
for each_month in search_month:
query_date = today + relativedelta(months=each_month)
begDate = "%d-%02d-01" % (query_date.year, query_date.month)
endDay = calendar.monthrange(query_date.year, query_date.month)[1]
if each_month == 0:
endDate = time.strftime('%Y-%m-%d', time.localtime(time.time()))
else:
endDate = "%d-%02d-%s" % (query_date.year, query_date.month, endDay)
call_log_data = {
'listType': '1',
'stTime': begDate,
'endTime': endDate,
}
query_month = "%d%02d" % (query_date.year, query_date.month)
page_and_retry.append((call_log_data, query_month, self.max_retry))
st_time = time.time()
et_time = st_time + 30
log_for_retry_request = []
while page_and_retry:
call_log_data, m_query_month, m_retry_times = page_and_retry.pop(0)
log_for_retry_request.append((m_query_month, m_retry_times))
m_retry_times -= 1
code, key, resp = self.post(call_log_url, data=call_log_data)
result = []
if code == 0:
try:
if u'无记录' in resp.text:
self.log('website', u'无记录', resp)
possibly_missing_list.append(m_query_month)
continue
call_log_res = json.loads(resp.text)
if call_log_res['respCode'] == "0000":
result = self.call_log_get(call_log_res, m_query_month)
if result:
call_log.extend(result)
continue
except:
crawl_num += 1
error = traceback.format_exc()
self.log('crawler', "json_error :%s" % error, resp)
now_time = time.time()
if m_retry_times > 0:
page_and_retry.append((call_log_data, m_query_month, m_retry_times))
elif now_time < et_time:
rand_sleep = random.randint(2, 4)
if m_retry_times > -10:
page_and_retry.append((call_log_data, m_query_month, m_retry_times))
time.sleep(rand_sleep)
else:
if code == 0 and not result:
possibly_missing_list.append(m_query_month)
else:
missing_list.append(m_query_month)
self.log('website', u'未找到指定数据1', resp)
else:
if code == 0 and not result:
possibly_missing_list.append(m_query_month)
else:
missing_list.append(m_query_month)
self.log('website', u'未找到指定数据2', resp)
# print('possibly_missing_list:', possibly_missing_list)
# print('missing_list:', missing_list)
self.log("crawler", "重试记录: {}".format(log_for_retry_request), "")
if len(missing_list) + len(possibly_missing_list) == 6:
if crawl_num > 0:
return 9, 'crawl_error', call_log, missing_list, possibly_missing_list
return 9, 'website_busy_error', call_log, missing_list, possibly_missing_list
return 0, "success", call_log, missing_list, possibly_missing_list
def call_log_get(self, response, month):
records = []
for item in response['respMsg']:
item = item[0]
data = {}
data['month'] = month
data['update_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
data['call_cost'] = item['ticketChargeCh']
# data['call_time'] = item['startDateNew'] +" "+ item['startTimeNew']
# 以下几行为了转换时间戳
call_time = re.findall('\d{2}', item['startDateNew'] + " " + item['startTimeNew'])
call_time_change = call_time[0] + call_time[1] + '-' + call_time[2] + '-' + call_time[3] + ' ' + call_time[
4] + ':' + call_time[5] + ':' + call_time[6]
timeArray = time.strptime(call_time_change, "%Y-%m-%d %H:%M:%S")
call_time_timeStamp = str(int(time.mktime(timeArray)))
data['call_time'] = call_time_timeStamp
data['call_method'] = item['ticketTypeNew']
data['call_type'] = item['durationType']
# data['call_from'] = item['areaCode']
raw_call_from = item['areaCode'].strip()
call_from, error = self.formatarea(raw_call_from)
if call_from:
data['call_from'] = call_from
else:
data['call_from'] = raw_call_from
data['call_to'] = ''
data['call_tel'] = item['nbr']
# 以下几行是为了把时间转换成秒
duration = item['duartionCh']
call_durations = duration.split(':')
call_duration = int(call_durations[0]) * 3600 + int(call_durations[1]) * 60 + int(call_durations[2])
data['call_duration'] = str(call_duration)
records.append(data)
return records
def crawl_info(self, **kwargs):
return 9, 'unknown_error', {}
def crawl_phone_bill(self, **kwargs):
phone_bill = list()
missing_list = []
crawl_num = 0
data = {
'zhangqi':'',
'style':'0',
}
url = 'http://js.189.cn/nservice/billQuery/consumptionQuery'
for searchMonth in self.__monthly_period(6, '%Y-%m'):
data['zhangqi'] = searchMonth
code, key, resp = self.post(url, data=data)
if code != 0:
missing_list.append(searchMonth)
continue
try:
result = self.get_phone_bill(resp, searchMonth)
phone_bill.append(result)
except:
msg = traceback.format_exc()
self.log('crawler', msg, resp)
missing_list.append(searchMonth)
continue
if len(missing_list) == 6:
if crawl_num > 0:
return 9, 'crawl_error', phone_bill, missing_list
return 9, 'website_busy_error', phone_bill, missing_list
today = date.today()
today_month = "%d%02d" % (today.year, today.month)
if today_month in missing_list:
missing_list.remove(today_month)
return 0, 'success', phone_bill, missing_list
def __monthly_period(self, length=6, strf='%Y%m'):
current_time = datetime.datetime.now()
for month_offset in range(0, length):
yield (current_time - relativedelta(months=month_offset)).strftime(strf)
def get_phone_bill(self, resp, month):
phone_bill = json.loads(resp.text)
bill_list = phone_bill['consumptionList'][0]
bill_data = {
'bill_month': month.replace('-', ''),
'bill_amount': '',
'bill_package': '',
'bill_ext_calls': '',
'bill_ext_data': '',
'bill_ext_sms': '',
'bill_zengzhifei': '',
'bill_daishoufei': '',
'bill_qita': '',
}
bill_data['bill_amount'] = bill_list['dccBillFee']
for item in bill_list['dccBillList'][0]['dccBillList']:
# print(item)
if item['dccBillItemName'] == u'语音通信费':
bill_data['bill_ext_calls'] = item['dccBillFee']
if item['dccBillItemName'] == u'短信彩信费':
bill_data['bill_ext_sms'] = item['dccBillFee']
if item['dccBillItemName'] == u'优惠费用':
bill_data['bill_package'] = item['dccBillFee']
return bill_data
if __name__ == '__main__':
c = Crawler()
USER_ID = "17368357716"
USER_PASSWORD = "488496"
# self_test
c.self_test(tel=USER_ID, pin_pwd=USER_PASSWORD)
| mit | 6,394,243,637,282,430,000 | 36.144414 | 124 | 0.483035 | false |
Cal-CS-61A-Staff/ok | tests/test_auth.py | 1 | 4104 | import flask
import urllib.request
from tests import OkTestCase
from server.models import db
class TestAuth(OkTestCase):
email = '[email protected]'
staff_email = '[email protected]'
def test_ssl(self):
response = urllib.request.urlopen('https://accounts.google.com')
assert response.code == 200
def test_login(self):
"""GET /login/ should redirect to Google OAuth (in production)."""
response = self.client.get('/login/')
self.assertRedirects(response, '/testing-login/')
self.app.config['TESTING_LOGIN'] = False
response = self.client.get('/login/')
assert response.location.startswith('https://accounts.google.com/o/oauth2/auth')
def test_testing_login(self):
"""GET /testing-login/ should show a test login page."""
response = self.client.get('/testing-login/')
self.assert_200(response)
self.assert_template_used('testing-login.html')
def test_testing_login_fail(self):
"""GET /testing-login/ should 404 if TESTING_LOGIN config is not set."""
app = self.create_app()
app.config['TESTING_LOGIN'] = False
response = app.test_client().get('/testing-login/')
self.assert_404(response)
def test_restricted(self):
"""User should see courses on / if logged in, but not if logged out."""
# Load Landing Page
response = self.client.get('/')
self.assert_200(response)
self.assert_template_used('index.html')
assert self.email not in str(response.data)
self.login(self.email)
response = self.client.get('/')
self.assert_200(response)
assert self.email in str(response.data)
assert 'Courses | Ok' in str(response.data)
def test_create_course(self):
"""Users should be able to create a course if logged in."""
response = self.client.get('/admin/course/new')
self.assertRedirects(response, '/login/')
self.login(self.email)
response = self.client.get('/admin/course/new')
self.assert_200(response)
self.assert_template_used('staff/course/course.new.html')
def test_sudo(self):
""" Unauthorized users should not be able to sudo"""
def attempt_sudo(email, expected, success):
with self.client as c:
response = c.get('/sudo/{0}/'.format(email))
self.assertEqual(response.status_code, expected)
s_user = flask.session.get('sudo-user')
if success:
assert s_user
else:
assert not s_user
def attempt_suite(email, authorized=False):
""" Try accessing a variety of users undo sudo mode. """
if authorized:
err_failure = 404
err_success = 302
elif not email:
err_failure = 302
err_success = 302
else:
err_success = 403
err_failure = 403
# Normal sudo logins
if email: self.login(email)
attempt_sudo(self.user1.email, err_success, authorized)
self.logout()
# Do not reveal existence of user unless admin
if email: self.login(email)
attempt_sudo("[email protected]", err_failure, False)
self.logout()
# Check attempt to login as staff
if email: self.login(email)
attempt_sudo(self.staff1.email, err_success, authorized)
self.logout()
self.setup_course()
# Login as student
attempt_suite(self.user1.email, authorized=False)
# Login as staff
attempt_suite(self.staff_email, authorized=False)
attempt_suite(self.staff1.email, authorized=False)
# Login as admin
attempt_suite(self.admin.email, authorized=True)
# Login as lab assistant
attempt_suite(self.lab_assistant1.email, authorized=False)
# Logged out user
attempt_suite(None, authorized=False)
| apache-2.0 | 1,166,336,458,106,371,600 | 33.2 | 88 | 0.592105 | false |
ErnieAllen/qpid-dispatch | python/qpid_dispatch_internal/policy/policy_local.py | 1 | 40167 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
"""
"""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import json
import pdb
from .policy_util import PolicyError, HostStruct, HostAddr, PolicyAppConnectionMgr, is_ipv6_enabled
from ..compat import PY_STRING_TYPE
from ..compat import PY_TEXT_TYPE
from ..compat import dict_iteritems
from ..compat import dict_keys
"""
Entity implementing the business logic of user connection/access policy.
"""
#
#
class PolicyKeys(object):
"""
String constants
"""
# Common key words
KW_IGNORED_NAME = "name"
KW_IGNORED_IDENTITY = "identity"
KW_IGNORED_TYPE = "type"
KW_VHOST_NAME = "hostname"
KW_VHOST_DEPRECATED_ID = "id"
# Policy ruleset key words
KW_MAXCONN = "maxConnections"
KW_MAXCONNPERHOST = "maxConnectionsPerHost"
KW_MAXCONNPERUSER = "maxConnectionsPerUser"
KW_CONNECTION_ALLOW_DEFAULT = "allowUnknownUser"
KW_GROUPS = "groups"
# Policy settings key words
KW_USERS = "users"
KW_REMOTE_HOSTS = "remoteHosts"
KW_MAX_FRAME_SIZE = "maxFrameSize"
KW_MAX_MESSAGE_SIZE = "maxMessageSize"
KW_MAX_SESSION_WINDOW = "maxSessionWindow"
KW_MAX_SESSIONS = "maxSessions"
KW_MAX_SENDERS = "maxSenders"
KW_MAX_RECEIVERS = "maxReceivers"
KW_ALLOW_DYNAMIC_SRC = "allowDynamicSource"
KW_ALLOW_ANONYMOUS_SENDER = "allowAnonymousSender"
KW_ALLOW_USERID_PROXY = "allowUserIdProxy"
KW_ALLOW_WAYPOINT_LINKS = "allowWaypointLinks"
KW_ALLOW_DYNAMIC_LINK_ROUTES = "allowDynamicLinkRoutes"
KW_ALLOW_ADMIN_STATUS_UPDATE = "allowAdminStatusUpdate"
KW_SOURCES = "sources"
KW_TARGETS = "targets"
KW_SOURCE_PATTERN = "sourcePattern"
KW_TARGET_PATTERN = "targetPattern"
# Policy stats key words
KW_CONNECTIONS_APPROVED = "connectionsApproved"
KW_CONNECTIONS_DENIED = "connectionsDenied"
KW_CONNECTIONS_CURRENT = "connectionsCurrent"
KW_PER_USER_STATE = "perUserState"
KW_PER_HOST_STATE = "perHostState"
# What settings does a user get when allowed to connect but
# not restricted by a user group?
KW_DEFAULT_SETTINGS = "$default"
# Config file separator character for two IP addresses in a range
KC_CONFIG_IP_SEP = "-"
# Config file separator character for names in a list
KC_CONFIG_LIST_SEP = ","
# user-to-group computed map in compiled ruleset
RULESET_U2G_MAP = "U2G"
# policy stats controlled by C code but referenced by settings
KW_CSTATS = "denialCounts"
# Username subsitituion token in link source and target names and patterns
KC_TOKEN_USER = "${user}"
# Link target/source name wildcard tuple keys
KC_TUPLE_ABSENT = 'a'
KC_TUPLE_PREFIX = 'p'
KC_TUPLE_SUFFIX = 's'
KC_TUPLE_EMBED = 'e'
KC_TUPLE_WILDCARD = '*'
#
#
class PolicyCompiler(object):
"""
Validate incoming configuration for legal schema.
- Warn about section options that go unused.
- Disallow negative max connection numbers.
- Check that connectionOrigins resolve to IP hosts.
- Enforce internal consistency,
"""
allowed_ruleset_options = [
PolicyKeys.KW_IGNORED_NAME,
PolicyKeys.KW_IGNORED_IDENTITY,
PolicyKeys.KW_IGNORED_TYPE,
PolicyKeys.KW_VHOST_NAME,
PolicyKeys.KW_MAXCONN,
PolicyKeys.KW_MAXCONNPERHOST,
PolicyKeys.KW_MAXCONNPERUSER,
PolicyKeys.KW_CONNECTION_ALLOW_DEFAULT,
PolicyKeys.KW_GROUPS
]
allowed_settings_options = [
PolicyKeys.KW_USERS,
PolicyKeys.KW_REMOTE_HOSTS,
PolicyKeys.KW_MAX_FRAME_SIZE,
PolicyKeys.KW_MAX_MESSAGE_SIZE,
PolicyKeys.KW_MAX_SESSION_WINDOW,
PolicyKeys.KW_MAX_SESSIONS,
PolicyKeys.KW_MAX_SENDERS,
PolicyKeys.KW_MAX_RECEIVERS,
PolicyKeys.KW_ALLOW_DYNAMIC_SRC,
PolicyKeys.KW_ALLOW_ANONYMOUS_SENDER,
PolicyKeys.KW_ALLOW_USERID_PROXY,
PolicyKeys.KW_ALLOW_WAYPOINT_LINKS,
PolicyKeys.KW_ALLOW_DYNAMIC_LINK_ROUTES,
PolicyKeys.KW_ALLOW_ADMIN_STATUS_UPDATE,
PolicyKeys.KW_SOURCES,
PolicyKeys.KW_TARGETS,
PolicyKeys.KW_SOURCE_PATTERN,
PolicyKeys.KW_TARGET_PATTERN
]
def __init__(self):
"""
Create a validator
"""
pass
def validateNumber(self, val, v_min, v_max, errors):
"""
Range check a numeric int policy value
@param[in] val policy value to check
@param[in] v_min minumum value
@param[in] v_max maximum value. zero disables check
@param[out] errors failure message
@return v_min <= val <= v_max
"""
try:
v_int = int(val)
except Exception as e:
errors.append("Value '%s' does not resolve to an integer." % val)
return False
if v_int < v_min:
errors.append("Value '%s' is below minimum '%s'." % (val, v_min))
return False
if v_max > 0 and v_int > v_max:
errors.append("Value '%s' is above maximum '%s'." % (val, v_max))
return False
return True
def compile_connection_group(self, vhostname, groupname, val, list_out, warnings, errors):
"""
Handle an ingressHostGroups submap.
Each origin value is verified. On a successful run the submap
is replaced parsed lists of HostAddr objects.
@param[in] vhostname vhost name
@param[in] groupname vhost/group name
@param[in] val origin list as text string
@param[out] list_out user inputs replaced with HostAddr objects
@param[out] warnings nonfatal irregularities observed
@param[out] errors descriptions of failure
@return - origins is usable. If True then warnings[] may contain useful
information about fields that are ignored. If False then
warnings[] may contain info and errors[0] will hold the
description of why the origin was rejected.
"""
key = PolicyKeys.KW_REMOTE_HOSTS
# convert val string to list of host specs
if isinstance(val, list):
# ['abc', 'def', 'mytarget']
pass
elif isinstance(val, (PY_STRING_TYPE, PY_TEXT_TYPE)):
val = [x.strip(' ') for x in val.split(PolicyKeys.KC_CONFIG_LIST_SEP)]
else:
errors.append(
"Policy vhost '%s' user group '%s' option '%s' has illegal value '%s'. Type must be 'str' or 'list' but is '%s;" %
(vhostname, groupname, key, val, type(val)))
return False
for coname in val:
try:
coha = HostAddr(coname, PolicyKeys.KC_CONFIG_IP_SEP)
list_out.append(coha)
except Exception as e:
errors.append("Policy vhost '%s' user group '%s' option '%s' connectionOption '%s' failed to translate: '%s'." %
(vhostname, groupname, key, coname, e))
return False
return True
def compile_app_settings(self, vhostname, usergroup, policy_in, policy_out, warnings, errors):
"""
Compile a schema from processed json format to local internal format.
@param[in] name vhost name
@param[in] policy_in user config settings
@param[out] policy_out validated Internal format
@param[out] warnings nonfatal irregularities observed
@param[out] errors descriptions of failure
@return - settings are usable. If True then warnings[] may contain useful
information about fields that are ignored. If False then
warnings[] may contain info and errors[0] will hold the
description of why the policy was rejected.
"""
# rulesets may not come through standard config so make nice defaults
policy_out[PolicyKeys.KW_USERS] = ''
policy_out[PolicyKeys.KW_REMOTE_HOSTS] = ''
# DISPATCH-1277 - KW_MAX_FRAME_SIZE must be defaulted to 16384 not 2147483647
policy_out[PolicyKeys.KW_MAX_FRAME_SIZE] = 16384
policy_out[PolicyKeys.KW_MAX_MESSAGE_SIZE] = 0
policy_out[PolicyKeys.KW_MAX_SESSION_WINDOW] = 2147483647
policy_out[PolicyKeys.KW_MAX_SESSIONS] = 65536
policy_out[PolicyKeys.KW_MAX_SENDERS] = 2147483647
policy_out[PolicyKeys.KW_MAX_RECEIVERS] = 2147483647
policy_out[PolicyKeys.KW_ALLOW_DYNAMIC_SRC] = False
policy_out[PolicyKeys.KW_ALLOW_ANONYMOUS_SENDER] = False
policy_out[PolicyKeys.KW_ALLOW_USERID_PROXY] = False
policy_out[PolicyKeys.KW_ALLOW_WAYPOINT_LINKS] = True
policy_out[PolicyKeys.KW_ALLOW_DYNAMIC_LINK_ROUTES] = True
policy_out[PolicyKeys.KW_ALLOW_ADMIN_STATUS_UPDATE] = True
policy_out[PolicyKeys.KW_SOURCES] = ''
policy_out[PolicyKeys.KW_TARGETS] = ''
policy_out[PolicyKeys.KW_SOURCE_PATTERN] = ''
policy_out[PolicyKeys.KW_TARGET_PATTERN] = ''
cerror = []
user_sources = False
user_targets = False
user_src_pattern = False
user_tgt_pattern = False
for key, val in dict_iteritems(policy_in):
if key not in self.allowed_settings_options:
warnings.append("Policy vhost '%s' user group '%s' option '%s' is ignored." %
(vhostname, usergroup, key))
if key in [PolicyKeys.KW_MAX_FRAME_SIZE,
PolicyKeys.KW_MAX_MESSAGE_SIZE,
PolicyKeys.KW_MAX_RECEIVERS,
PolicyKeys.KW_MAX_SENDERS,
PolicyKeys.KW_MAX_SESSION_WINDOW,
PolicyKeys.KW_MAX_SESSIONS
]:
if not self.validateNumber(val, 0, 0, cerror):
errors.append("Policy vhost '%s' user group '%s' option '%s' has error '%s'." %
(vhostname, usergroup, key, cerror[0]))
return False
policy_out[key] = int(val)
elif key == PolicyKeys.KW_REMOTE_HOSTS:
# Conection groups are lists of IP addresses that need to be
# converted into binary structures for comparisons.
val_out = []
if not self.compile_connection_group(vhostname, usergroup, val, val_out, warnings, errors):
return False
policy_out[key] = val_out
elif key in [PolicyKeys.KW_ALLOW_ANONYMOUS_SENDER,
PolicyKeys.KW_ALLOW_DYNAMIC_SRC,
PolicyKeys.KW_ALLOW_USERID_PROXY,
PolicyKeys.KW_ALLOW_WAYPOINT_LINKS,
PolicyKeys.KW_ALLOW_DYNAMIC_LINK_ROUTES,
PolicyKeys.KW_ALLOW_ADMIN_STATUS_UPDATE
]:
if isinstance(val, (PY_STRING_TYPE, PY_TEXT_TYPE)) and val.lower() in ['true', 'false']:
val = True if val == 'true' else False
if not type(val) is bool:
errors.append("Policy vhost '%s' user group '%s' option '%s' has illegal boolean value '%s'." %
(vhostname, usergroup, key, val))
return False
policy_out[key] = val
elif key in [PolicyKeys.KW_USERS,
PolicyKeys.KW_SOURCES,
PolicyKeys.KW_TARGETS,
PolicyKeys.KW_SOURCE_PATTERN,
PolicyKeys.KW_TARGET_PATTERN
]:
# accept a string or list
if isinstance(val, list):
# ['abc', 'def', 'mytarget']
pass
elif isinstance(val, (PY_STRING_TYPE, PY_TEXT_TYPE)):
# 'abc, def, mytarget'
val = [x.strip(' ') for x in val.split(PolicyKeys.KC_CONFIG_LIST_SEP)]
else:
errors.append("Policy vhost '%s' user group '%s' option '%s' has illegal value '%s'. Type must be 'str' or 'list' but is '%s;" %
(vhostname, usergroup, key, val, type(val)))
# deduplicate address lists
val = list(set(val))
# val is CSV string with no white space between values: 'abc,def,mytarget,tmp-${user}'
if key == PolicyKeys.KW_USERS:
# user name list items are literal strings and need no special handling
policy_out[key] = ','.join(val)
else:
# source and target names get special handling for the '${user}' substitution token
# The literal string is translated to a (key, prefix, suffix) set of three strings.
# C code does not have to search for the username token and knows with authority
# how to construct match strings.
# A wildcard is also signaled.
utoken = PolicyKeys.KC_TOKEN_USER
eVal = []
for v in val:
vcount = v.count(utoken)
if vcount > 1:
errors.append("Policy vhost '%s' user group '%s' policy key '%s' item '%s' contains multiple user subtitution tokens" %
(vhostname, usergroup, key, v))
return False
elif vcount == 1:
# a single token is present as a prefix, suffix, or embedded
# construct cChar, S1, S2 encodings to be added to eVal description
if v.startswith(utoken):
# prefix
eVal.append(PolicyKeys.KC_TUPLE_PREFIX)
eVal.append('')
eVal.append(v[v.find(utoken) + len(utoken):])
elif v.endswith(utoken):
# suffix
eVal.append(PolicyKeys.KC_TUPLE_SUFFIX)
eVal.append(v[0:v.find(utoken)])
eVal.append('')
else:
# embedded
if key in [PolicyKeys.KW_SOURCE_PATTERN,
PolicyKeys.KW_TARGET_PATTERN]:
errors.append("Policy vhost '%s' user group '%s' policy key '%s' item '%s' may contain match pattern '%s' as a prefix or a suffix only." %
(vhostname, usergroup, key, v, utoken))
return False
eVal.append(PolicyKeys.KC_TUPLE_EMBED)
eVal.append(v[0:v.find(utoken)])
eVal.append(v[v.find(utoken) + len(utoken):])
else:
# ${user} token is absent
if v == PolicyKeys.KC_TUPLE_WILDCARD:
eVal.append(PolicyKeys.KC_TUPLE_WILDCARD)
eVal.append('')
eVal.append('')
else:
eVal.append(PolicyKeys.KC_TUPLE_ABSENT)
eVal.append(v)
eVal.append('')
policy_out[key] = ','.join(eVal)
if key == PolicyKeys.KW_SOURCES:
user_sources = True
if key == PolicyKeys.KW_TARGETS:
user_targets = True
if key == PolicyKeys.KW_SOURCE_PATTERN:
user_src_pattern = True
if key == PolicyKeys.KW_TARGET_PATTERN:
user_tgt_pattern = True
if user_sources and user_src_pattern:
errors.append("Policy vhost '%s' user group '%s' specifies conflicting 'sources' and 'sourcePattern' attributes. Use only one or the other." % (vhostname, usergroup))
return False
if user_targets and user_tgt_pattern:
errors.append("Policy vhost '%s' user group '%s' specifies conflicting 'targets' and 'targetPattern' attributes. Use only one or the other." % (vhostname, usergroup))
return False
return True
def compile_access_ruleset(self, name, policy_in, policy_out, warnings, errors):
"""
Compile a schema from processed json format to local internal format.
@param[in] name vhost name
@param[in] policy_in raw policy to be validated
@param[out] policy_out validated Internal format
@param[out] warnings nonfatal irregularities observed
@param[out] errors descriptions of failure
@return - policy is usable. If True then warnings[] may contain useful
information about fields that are ignored. If False then
warnings[] may contain info and errors[0] will hold the
description of why the policy was rejected.
"""
cerror = []
# rulesets may not come through standard config so make nice defaults
policy_out[PolicyKeys.KW_MAXCONN] = 65535
policy_out[PolicyKeys.KW_MAXCONNPERHOST] = 65535
policy_out[PolicyKeys.KW_MAXCONNPERUSER] = 65535
policy_out[PolicyKeys.KW_CONNECTION_ALLOW_DEFAULT] = False
policy_out[PolicyKeys.KW_GROUPS] = {}
# validate the options
for key, val in dict_iteritems(policy_in):
if key not in self.allowed_ruleset_options:
warnings.append("Policy vhost '%s' option '%s' is ignored." %
(name, key))
if key in [PolicyKeys.KW_MAXCONN,
PolicyKeys.KW_MAXCONNPERHOST,
PolicyKeys.KW_MAXCONNPERUSER
]:
if not self.validateNumber(val, 0, 65535, cerror):
msg = ("Policy vhost '%s' option '%s' has error '%s'." %
(name, key, cerror[0]))
errors.append(msg)
return False
policy_out[key] = val
elif key in [PolicyKeys.KW_CONNECTION_ALLOW_DEFAULT]:
if not type(val) is bool:
errors.append("Policy vhost '%s' option '%s' must be of type 'bool' but is '%s'" %
(name, key, type(val)))
return False
policy_out[key] = val
elif key in [PolicyKeys.KW_GROUPS]:
if not type(val) is dict:
errors.append("Policy vhost '%s' option '%s' must be of type 'dict' but is '%s'" %
(name, key, type(val)))
return False
for skey, sval in dict_iteritems(val):
newsettings = {}
if not self.compile_app_settings(name, skey, sval, newsettings, warnings, errors):
return False
policy_out[key][skey] = {}
policy_out[key][skey].update(newsettings)
# Verify that each user is in only one group.
# Create user-to-group map for looking up user's group
policy_out[PolicyKeys.RULESET_U2G_MAP] = {}
if PolicyKeys.KW_GROUPS in policy_out:
for group, groupsettings in dict_iteritems(policy_out[PolicyKeys.KW_GROUPS]):
if PolicyKeys.KW_USERS in groupsettings:
users = [x.strip(' ') for x in groupsettings[PolicyKeys.KW_USERS].split(PolicyKeys.KC_CONFIG_LIST_SEP)]
for user in users:
if user in policy_out[PolicyKeys.RULESET_U2G_MAP]:
errors.append("Policy vhost '%s' user '%s' is in multiple user groups '%s' and '%s'" %
(name, user, policy_out[PolicyKeys.RULESET_U2G_MAP][user], group))
return False
else:
policy_out[PolicyKeys.RULESET_U2G_MAP][user] = group
else:
warnings.append("Policy vhost '%s' user group '%s' has no defined users. This policy has no effect" % (name, group))
# Default connections require a default settings
if policy_out[PolicyKeys.KW_CONNECTION_ALLOW_DEFAULT]:
if not PolicyKeys.KW_DEFAULT_SETTINGS in policy_out[PolicyKeys.KW_GROUPS]:
errors.append("Policy vhost '%s' allows connections by default but default settings are not defined" %
(name))
return False
return True
#
#
class AppStats(object):
"""
Maintain live state and statistics for an vhost.
"""
def __init__(self, id, manager, ruleset):
self.my_id = id
self._manager = manager
self.conn_mgr = PolicyAppConnectionMgr(
ruleset[PolicyKeys.KW_MAXCONN],
ruleset[PolicyKeys.KW_MAXCONNPERUSER],
ruleset[PolicyKeys.KW_MAXCONNPERHOST])
self._cstats = self._manager.get_agent().qd.qd_dispatch_policy_c_counts_alloc()
self._manager.get_agent().add_implementation(self, "vhostStats")
def update_ruleset(self, ruleset):
"""
The parent ruleset has changed.
Propagate settings into the connection manager.
@param ruleset: new ruleset
@return:
"""
self.conn_mgr.update(
ruleset[PolicyKeys.KW_MAXCONN],
ruleset[PolicyKeys.KW_MAXCONNPERHOST],
ruleset[PolicyKeys.KW_MAXCONNPERUSER])
def refresh_entity(self, attributes):
"""Refresh management attributes"""
entitymap = {}
entitymap[PolicyKeys.KW_VHOST_NAME] = self.my_id
entitymap[PolicyKeys.KW_VHOST_DEPRECATED_ID] = self.my_id
entitymap[PolicyKeys.KW_CONNECTIONS_APPROVED] = self.conn_mgr.connections_approved
entitymap[PolicyKeys.KW_CONNECTIONS_DENIED] = self.conn_mgr.connections_denied
entitymap[PolicyKeys.KW_CONNECTIONS_CURRENT] = self.conn_mgr.connections_active
entitymap[PolicyKeys.KW_PER_USER_STATE] = self.conn_mgr.per_user_state
entitymap[PolicyKeys.KW_PER_HOST_STATE] = self.conn_mgr.per_host_state
self._manager.get_agent().qd.qd_dispatch_policy_c_counts_refresh(self._cstats, entitymap)
attributes.update(entitymap)
def can_connect(self, conn_id, user, host, diags):
return self.conn_mgr.can_connect(conn_id, user, host, diags)
def disconnect(self, conn_id, user, host):
self.conn_mgr.disconnect(conn_id, user, host)
def count_other_denial(self):
self.conn_mgr.count_other_denial()
def get_cstats(self):
return self._cstats
#
#
class ConnectionFacts(object):
def __init__(self, user, host, app, conn_name):
self.user = user
self.host = host
self.app = app
self.conn_name = conn_name
#
#
class PolicyLocal(object):
"""
The local policy database.
"""
def __init__(self, manager):
"""
Create instance
@params manager policy manager class
"""
# manager is a class
# It provides access the dispatch system functions
self._manager = manager
# rulesetdb is a map
# key : vhost name
# val : ruleset for this app
# created by configuration
# augmented by policy compiler
self.rulesetdb = {}
# settingsdb is a map
# key : <vhost name>
# val : a map
# key : <user group name>
# val : settings to use for user's connection
# created by configuration
self.settingsdb = {}
# statsdb is a map
# key : <vhost name>
# val : AppStats object
self.statsdb = {}
# _policy_compiler is a function
# validates incoming policy and readies it for internal use
self._policy_compiler = PolicyCompiler()
# _connections is a map
# key : numeric connection id
# val : ConnectionFacts
# Entries created as connection AMQP Opens arrive
# Entries destroyed as sockets closed
self._connections = {}
# _default_vhost is a string
# holds the name of the vhost to use when the
# open.hostname is not found in the rulesetdb
self._default_vhost = ""
# _use_hostname_patterns
# holds policy setting.
# When true policy ruleset definitions are propagated to C code
self.use_hostname_patterns = False
#
# Service interfaces
#
def create_ruleset(self, attributes):
"""
Create or update named policy ruleset.
@param[in] attributes: from config
"""
warnings = []
diag = []
candidate = {}
name = attributes[PolicyKeys.KW_VHOST_NAME]
result = self._policy_compiler.compile_access_ruleset(name, attributes, candidate, warnings, diag)
if not result:
raise PolicyError("Policy '%s' is invalid: %s" % (name, diag[0]))
if len(warnings) > 0:
for warning in warnings:
self._manager.log_warning(warning)
# Reject if parse tree optimized name collision
if self.use_hostname_patterns:
agent = self._manager.get_agent()
if not agent.qd.qd_dispatch_policy_host_pattern_add(agent.dispatch, name):
raise PolicyError("Policy '%s' optimized pattern conflicts with existing pattern" % name)
if name not in self.rulesetdb:
if name not in self.statsdb:
self.statsdb[name] = AppStats(name, self._manager, candidate)
self._manager.log_info("Created policy rules for vhost %s" % name)
else:
self.statsdb[name].update_ruleset(candidate)
self._manager.log_info("Updated policy rules for vhost %s" % name)
# TODO: ruleset lock
self.rulesetdb[name] = {}
self.rulesetdb[name].update(candidate)
def policy_delete(self, name):
"""
Delete named policy
@param[in] name vhost name
"""
if name not in self.rulesetdb:
raise PolicyError("Policy '%s' does not exist" % name)
# TODO: ruleset lock
if self.use_hostname_patterns:
agent = self._manager.get_agent()
agent.qd.qd_dispatch_policy_host_pattern_remove(agent.dispatch, name)
del self.rulesetdb[name]
#
# db enumerator
#
def policy_db_get_names(self):
"""
Return a list of vhost names in this policy
"""
return dict_keys(self.rulesetdb)
def set_default_vhost(self, name):
"""
Set the default vhost name.
@param name: the name of the default vhost
@return: none
"""
self._default_vhost = name
self._manager.log_info("Policy fallback defaultVhost is defined: '%s'" % name)
def default_vhost_enabled(self):
"""
The default vhost is enabled if the name is not blank and
the vhost is defined in rulesetdb.
@return:
"""
return not self._default_vhost == "" and self._default_vhost in self.rulesetdb
#
# Runtime query interface
#
def lookup_user(self, user, rhost, vhost_in, conn_name, conn_id):
"""
Lookup function called from C.
Determine if a user on host accessing vhost through AMQP Open is allowed
according to the policy access rules.
If allowed then return the policy vhost settings name. If stats.can_connect
returns true then it has registered and counted the connection.
@param[in] user connection authId
@param[in] rhost connection remote host numeric IP address as string
@param[in] vhost_in vhost user is accessing
@param[in] conn_name connection name used for tracking reports
@param[in] conn_id internal connection id
@return settings user-group name if allowed; "" if not allowed
"""
try:
# choose rule set based on incoming vhost or default vhost
# or potential vhost found by pattern matching
vhost = vhost_in
if self.use_hostname_patterns:
agent = self._manager.get_agent()
vhost = agent.qd.qd_dispatch_policy_host_pattern_lookup(agent.dispatch, vhost)
if vhost not in self.rulesetdb:
if self.default_vhost_enabled():
vhost = self._default_vhost
else:
self._manager.log_info(
"DENY AMQP Open for user '%s', rhost '%s', vhost '%s': "
"No policy defined for vhost" % (user, rhost, vhost_in))
return ""
if vhost != vhost_in:
self._manager.log_debug(
"AMQP Open for user '%s', rhost '%s', vhost '%s': "
"proceeds using vhost '%s' ruleset" % (user, rhost, vhost_in, vhost))
ruleset = self.rulesetdb[vhost]
# look up the stats
if vhost not in self.statsdb:
msg = (
"DENY AMQP Open for user '%s', rhost '%s', vhost '%s': "
"INTERNAL: Policy is defined but stats are missing" % (user, rhost, vhost))
raise PolicyError(msg)
stats = self.statsdb[vhost]
# Get settings for user in a user group or in default
if user in ruleset[PolicyKeys.RULESET_U2G_MAP]:
usergroup = ruleset[PolicyKeys.RULESET_U2G_MAP][user]
elif "*" in ruleset[PolicyKeys.RULESET_U2G_MAP]:
usergroup = ruleset[PolicyKeys.RULESET_U2G_MAP]["*"]
else:
if ruleset[PolicyKeys.KW_CONNECTION_ALLOW_DEFAULT]:
usergroup = PolicyKeys.KW_DEFAULT_SETTINGS
else:
self._manager.log_info(
"DENY AMQP Open for user '%s', rhost '%s', vhost '%s': "
"User is not in a user group and unknown users are denied" % (user, rhost, vhost))
stats.count_other_denial()
return ""
groupsettings = ruleset[PolicyKeys.KW_GROUPS][usergroup]
# User in usergroup allowed to connect from rhost?
allowed = False
if PolicyKeys.KW_REMOTE_HOSTS in groupsettings:
# Users are restricted to connecting from a rhost
# defined by the group's remoteHost list
cglist = groupsettings[PolicyKeys.KW_REMOTE_HOSTS]
uhs = HostStruct(rhost)
for cohost in cglist:
if cohost.match_bin(uhs):
allowed = True
break
if not allowed:
self._manager.log_info(
"DENY AMQP Open for user '%s', rhost '%s', vhost '%s': "
"User is not allowed to connect from this network host" % (user, rhost, vhost))
stats.count_other_denial()
return ""
# This user passes administrative approval.
# Now check live connection counts
diags = []
if not stats.can_connect(conn_name, user, rhost, diags):
for diag in diags:
self._manager.log_info(
"DENY AMQP Open for user '%s', rhost '%s', vhost '%s': "
"%s" % (user, rhost, vhost, diag))
return ""
# Record facts about this connection to use during teardown
facts = ConnectionFacts(user, rhost, vhost, conn_name)
self._connections[conn_id] = facts
# Return success
return usergroup
except Exception as e:
self._manager.log_info(
"DENY AMQP Open lookup_user failed for user '%s', rhost '%s', vhost '%s': "
"Internal error: %s" % (user, rhost, vhost, e))
# return failure
return ""
def lookup_settings(self, vhost_in, groupname, upolicy):
"""
Given a settings name, return the aggregated policy blob.
@param[in] vhost_in: vhost user is accessing
@param[in] groupname: user group name
@param[out] upolicy: dict holding policy values - the settings blob
TODO: make this a c struct
@return if lookup worked
# Note: the upolicy output is a non-nested dict with settings of interest
"""
try:
vhost = vhost_in
if self.use_hostname_patterns:
agent = self._manager.get_agent()
vhost = agent.qd.qd_dispatch_policy_host_pattern_lookup(agent.dispatch, vhost)
if vhost not in self.rulesetdb:
if self.default_vhost_enabled():
vhost = self._default_vhost
if vhost != vhost_in:
self._manager.log_debug(
"AMQP Open lookup settings for vhost '%s': "
"proceeds using vhost '%s' ruleset" % (vhost_in, vhost))
if vhost not in self.rulesetdb:
self._manager.log_info(
"lookup_settings fail for vhost '%s', user group '%s': "
"No policy defined for this vhost" % (vhost, groupname))
return False
ruleset = self.rulesetdb[vhost]
if groupname not in ruleset[PolicyKeys.KW_GROUPS]:
self._manager.log_trace(
"lookup_settings fail for vhost '%s', user group '%s': "
"This vhost has no settings for the user group" % (vhost, groupname))
return False
upolicy.update(ruleset[PolicyKeys.KW_GROUPS][groupname])
upolicy[PolicyKeys.KW_CSTATS] = self.statsdb[vhost].get_cstats()
return True
except Exception as e:
return False
def close_connection(self, conn_id):
"""
Close the connection.
@param conn_id:
@return:
"""
try:
if conn_id in self._connections:
facts = self._connections[conn_id]
stats = self.statsdb[facts.app]
stats.disconnect(facts.conn_name, facts.user, facts.host)
del self._connections[conn_id]
except Exception as e:
self._manager.log_trace(
"Policy internal error closing connection id %s. %s" % (conn_id, str(e)))
#
#
def test_load_config(self):
"""
Test function to load a policy.
@return:
"""
ruleset_str = '["vhost", {"hostname": "photoserver", "maxConnections": 50, "maxConnectionsPerUser": 5, "maxConnectionsPerHost": 20, "allowUnknownUser": true,'
ruleset_str += '"groups": {'
ruleset_str += '"anonymous": { "users": "anonymous", "remoteHosts": "*", "maxFrameSize": 111111, "maxMessageSize": 111111, "maxSessionWindow": 111111, "maxSessions": 1, "maxSenders": 11, "maxReceivers": 11, "allowDynamicSource": false, "allowAnonymousSender": false, "sources": "public", "targets": "" },'
ruleset_str += '"users": { "users": "u1, u2", "remoteHosts": "*", "maxFrameSize": 222222, "maxMessageSize": 222222, "maxSessionWindow": 222222, "maxSessions": 2, "maxSenders": 22, "maxReceivers": 22, "allowDynamicSource": false, "allowAnonymousSender": false, "sources": "public, private", "targets": "public" },'
ruleset_str += '"paidsubscribers": { "users": "p1, p2", "remoteHosts": "*", "maxFrameSize": 333333, "maxMessageSize": 333333, "maxSessionWindow": 333333, "maxSessions": 3, "maxSenders": 33, "maxReceivers": 33, "allowDynamicSource": true, "allowAnonymousSender": false, "sources": "public, private", "targets": "public, private" },'
ruleset_str += '"test": { "users": "zeke, ynot", "remoteHosts": "10.48.0.0-10.48.255.255, 192.168.100.0-192.168.100.255", "maxFrameSize": 444444, "maxMessageSize": 444444, "maxSessionWindow": 444444, "maxSessions": 4, "maxSenders": 44, "maxReceivers": 44, "allowDynamicSource": true, "allowAnonymousSender": true, "sources": "private", "targets": "private" },'
if is_ipv6_enabled():
ruleset_str += '"admin": { "users": "alice, bob", "remoteHosts": "10.48.0.0-10.48.255.255, 192.168.100.0-192.168.100.255, 10.18.0.0-10.18.255.255, 127.0.0.1, ::1", "maxFrameSize": 555555, "maxMessageSize": 555555, "maxSessionWindow": 555555, "maxSessions": 5, "maxSenders": 55, "maxReceivers": 55, "allowDynamicSource": true, "allowAnonymousSender": true, "sources": "public, private, management", "targets": "public, private, management" },'
ruleset_str += '"superuser": { "users": "ellen", "remoteHosts": "72.135.2.9, 127.0.0.1, ::1", "maxFrameSize": 666666, "maxMessageSize": 666666, "maxSessionWindow": 666666, "maxSessions": 6, "maxSenders": 66, "maxReceivers": 66, "allowDynamicSource": false, "allowAnonymousSender": false, "sources": "public, private, management, root", "targets": "public, private, management, root" },'
else:
ruleset_str += '"admin": { "users": "alice, bob", "remoteHosts": "10.48.0.0-10.48.255.255, 192.168.100.0-192.168.100.255, 10.18.0.0-10.18.255.255, 127.0.0.1", "maxFrameSize": 555555, "maxMessageSize": 555555, "maxSessionWindow": 555555, "maxSessions": 5, "maxSenders": 55, "maxReceivers": 55, "allowDynamicSource": true, "allowAnonymousSender": true, "sources": "public, private, management", "targets": "public, private, management" },'
ruleset_str += '"superuser": { "users": "ellen", "remoteHosts": "72.135.2.9, 127.0.0.1", "maxFrameSize": 666666, "maxMessageSize": 666666, "maxSessionWindow": 666666, "maxSessions": 6, "maxSenders": 66, "maxReceivers": 66, "allowDynamicSource": false, "allowAnonymousSender": false, "sources": "public, private, management, root", "targets": "public, private, management, root" },'
ruleset_str += '"$default": { "remoteHosts": "*", "maxFrameSize": 222222, "maxMessageSize": 222222, "maxSessionWindow": 222222, "maxSessions": 2, "maxSenders": 22, "maxReceivers": 22, "allowDynamicSource": false, "allowAnonymousSender": false, "sources": "public, private", "targets": "public" }'
ruleset_str += '}}]'
ruleset = json.loads(ruleset_str)
self.create_ruleset(ruleset[1])
| apache-2.0 | 9,164,368,960,652,223,000 | 45.814685 | 464 | 0.564194 | false |
thom-at-redhat/cfme_tests | scripts/install_vddk.py | 1 | 1213 | #!/usr/bin/env python2
"""SSH into a running appliance and install VMware VDDK.
"""
import argparse
import sys
from urlparse import urlparse
from utils.appliance import IPAppliance
from utils.conf import env
def log(message):
print "[VDDK-INSTALL] {}".format(message)
def main():
parser = argparse.ArgumentParser(epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--address',
help='hostname or ip address of target appliance', default=env.get("base_url", None))
parser.add_argument('--vddk_url', help='url to download vddk pkg')
parser.add_argument('--reboot', help='reboot after installation ' +
'(required for proper operation)', action="store_true")
parser.add_argument('--force',
help='force installation if version detected', action="store_true")
args = parser.parse_args()
address = urlparse(args.address).netloc
appliance = IPAppliance(address=address)
appliance.install_vddk(
reboot=args.reboot, force=args.force, vddk_url=args.vddk_url, log_callback=log)
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 | -5,883,549,056,253,606,000 | 30.102564 | 93 | 0.652102 | false |
rwightman/tensorflow-litterbox | litterbox/fabric/dataset_record.py | 1 | 2306 | # Copyright (C) 2016 Ross Wightman. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# ==============================================================================
# Based on original Work Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from abc import ABCMeta
from abc import abstractmethod
from .dataset import Dataset
from .dataset import FLAGS
class DatasetRecord(Dataset):
"""A simple class for handling data sets."""
__metaclass__ = ABCMeta
def __init__(self, name, subset):
super(DatasetRecord, self).__init__(name, subset, is_record=True)
def data_files(self):
"""Returns a python list of all (sharded) data subset files.
Returns:
python list of all (sharded) data set files.
Raises:
ValueError: if there are not data_files matching the subset.
"""
tf_record_pattern = os.path.join(FLAGS.data_dir, '%s-*' % self.subset)
data_files = tf.gfile.Glob(tf_record_pattern)
if not data_files:
print('No files found for dataset %s/%s at %s' %
(self.name, self.subset, FLAGS.data_dir))
exit(-1)
return data_files
def reader(self):
"""Return a reader for a single entry from the data set.
See io_ops.py for details of Reader class.
Returns:
Reader object that reads the data set.
"""
return tf.TFRecordReader()
| apache-2.0 | -2,322,583,809,205,931,500 | 32.42029 | 80 | 0.654814 | false |
jdob/okaara | okaara/prompt.py | 1 | 34929 | # Copyright (c) 2011-2013 Jason Dobies
#
# This file is part of Okaara.
#
# Okaara is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version.
#
# Okaara is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with Okaara.
# If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
from builtins import str
from builtins import object
from functools import reduce
import copy
import fcntl
import getpass
import gettext
import logging
import os
import struct
import sys
import termios
t = gettext.translation('okaara', fallback=True)
if sys.version_info[0] < 3:
_ = t.ugettext
else:
_ = t.gettext
# -- constants ----------------------------------------------------------------
LOG = logging.getLogger(__name__)
# Returned to indicate the user has interrupted the input
ABORT = object()
# Indicates the automatic wrap should use the current width of the screen,
# calculated at the time of rendering
WIDTH_TERMINAL = object()
COLOR_WHITE = '\033[0m'
COLOR_BRIGHT_WHITE = '\033[1m'
COLOR_GRAY = '\033[30m'
COLOR_RED = '\033[31m'
COLOR_GREEN = '\033[32m'
COLOR_YELLOW = '\033[33m'
COLOR_BLUE = '\033[34m'
COLOR_PURPLE = '\033[35m'
COLOR_CYAN = '\033[36m'
COLOR_LIGHT_GRAY = '\033[90m'
COLOR_LIGHT_RED = '\033[91m'
COLOR_LIGHT_GREEN = '\033[92m'
COLOR_LIGHT_YELLOW = '\033[93m'
COLOR_LIGHT_BLUE = '\033[94m'
COLOR_LIGHT_PURPLE = '\033[95m'
COLOR_LIGHT_CYAN = '\033[96m'
COLOR_BG_GRAY = '\033[40m'
COLOR_BG_RED = '\033[41m'
COLOR_BG_GREEN = '\033[42m'
COLOR_BG_YELLOW = '\033[43m'
COLOR_BG_BLUE = '\033[44m'
COLOR_BG_PURPLE = '\033[45m'
COLOR_BG_CYAN = '\033[46m'
POSITION_SAVE = '\033[s'
POSITION_RESET = '\033[u'
MOVE_UP = '\033[%dA' # sub in number of lines
MOVE_DOWN = '\033[%dB' # sub in number of lines
MOVE_FORWARD = '\033[%dC' # sub in number of characters
MOVE_BACKWARD = '\033[%dD' # sub in number of characters
CLEAR = '\033[2J'
CLEAR_EOL = '\033[K'
CLEAR_REMAINDER = '\033[J'
TAG_READ = 'read'
TAG_WRITE = 'write'
# -- classes ------------------------------------------------------------------
class Prompt(object):
"""
Used to communicate between the application and the user. The Prompt class can be
subclassed to provide custom implementations of read and write to alter the input/output
sources. By default, stdin and stdout will be used.
"""
def __init__(self, input=sys.stdin, output=sys.stdout, normal_color=COLOR_WHITE,
enable_color=True, wrap_width=None, record_tags=False):
"""
Creates a new instance that will read and write to the given streams.
:param input: stream to read from; defaults to stdin
:type input: file
:param output: stream to write prompt statements to; defaults to stdout
:type output: file
:param normal_color: color of the text to write; this will be used in the color
function to reset the text after coloring it
:type normal_color: str (one of the COLOR_* variables in this module)
:param enable_color: determines if this prompt instance will output any modified
colors; if this is false the color() method will
always render the text as the normal_color
:type enable_color: bool
:param wrap_width: if specified, content written by this prompt will
automatically be wrapped to this width
:type wrap_width: int or None
:param record_tags: if true, the prompt will keep track of tags passed
to all write calls
:type record_tags: bool
"""
self.input = input
self.output = output
self.normal_color = normal_color
self.enable_color = enable_color
self.wrap_width = wrap_width
self.record_tags = record_tags
self.tags = []
# Initialize the screen with the normal color
if self.enable_color:
self.write(self.normal_color, new_line=False)
# -- general --------------------------------------------------------------
def read(self, prompt, tag=None, interruptable=True):
"""
Reads user input. This will likely not be called in favor of one of the prompt_* methods.
:param prompt: the prompt displayed to the user when the input is requested
:type prompt: string
:return: the input specified by the user
:rtype: string
"""
self._record_tag(TAG_READ, tag)
self.write(prompt, new_line=False)
try:
r = self.input.readline().rstrip() # rstrip removes the trailing \n
return r
except (EOFError, KeyboardInterrupt) as e:
if interruptable:
self.write('') # the ^C won't cause a line break but we probably want one
return ABORT
else:
raise e
def write(self, content, new_line=True, center=False, color=None, tag=None, skip_wrap=False):
"""
Writes content to the prompt's output stream.
:param content: content to display to the user
:type content: string
:param skip_wrap: if true, auto-wrapping won't be applied; defaults to false
:type skip_wrap: bool
"""
self._record_tag(TAG_WRITE, tag)
content = str(content)
if not skip_wrap:
content = self.wrap(content)
if center: content = self.center(content)
if color is not None: content = self.color(content, color)
if new_line: content += '\n'
self.output.write(content)
def color(self, text, color):
"""
Colors the given text with the given color, resetting the output back to whatever
color is defined in this instance's normal_color. Nothing is output to the screen;
the formatted string is returned.
:param text: text to color
:type text: str
:param color: coding for the color (see the COLOR_* variables in this module)
:type color: str
:return: new string with the proper color escape sequences in place
:rtype: str
"""
# Skip the wrapping if color is disabled at the instance level
if not self.enable_color or color is None:
return text
return '%s%s%s' % (color, text, self.normal_color)
def center(self, text, width=None):
"""
Centers the given text. Nothing is output to the screen; the formatted string
is returned. The width in which to center is the first non-None value
in the following order:
* Provided width parameter
* Instance-level wrap_width value
* Terminal width
:param text: text to center
:type text: str
:param width: width to center the text between
:type width: int
:return: string with spaces padding the left to center it
:rtype: str
"""
if width is None:
if self.wrap_width is None or self.wrap_width is WIDTH_TERMINAL:
width = self.terminal_size()[0]
else:
width = self.wrap_width
if len(text) >= width:
return text
else:
spacer = ' ' * ((width - len(text)) // 2)
return spacer + text
def wrap(self, content, wrap_width=None, remaining_line_indent=0):
"""
If the wrap_width is specified, this call will introduce new line
characters to maintain that width.
:param content: text to wrap
:type content: str
:param wrap_width: number of characters to wrap to
:type wrap_width: int
:param remaining_line_indent: number of characters to indent any new
lines generated from this call
:type remaining_line_indent: int
:return: wrapped version of the content string
:rtype: str
"""
# If it's not overridden, use the instance-configured wrap width
if wrap_width is None:
wrap_width = self.wrap_width
# If the instance isn't configured with a wrap width, we're done
if wrap_width is None:
return content
# If the instance is configured to dynamically calculate it based on
# the terminal width, figure that value out now
if wrap_width is WIDTH_TERMINAL:
wrap_width = self.terminal_size()[0]
# Actual splitting algorithm
def _rightmost_space_index(str):
for i in range(len(str) - 1, -1, -1):
if str[i] == ' ' : return i
return None
lines = [] # running track of split apart lines; assemble at the end
content = copy.copy(content)
first_pass = True
while True:
# If there's nothing left, we're done
if len(content) is 0:
break
# Strip off any leading whitespace to left justify the new line;
# don't strip for the first pass through it in case the user indented.
# After stipping off any accidental whitespace, add in the indent
# for non-first lines.
if not first_pass:
content = content.lstrip()
content = (' ' * remaining_line_indent) + content
else:
first_pass = False
# Ideal situation is the text fills the width
end_index = wrap_width
chunk = content[:end_index]
# If this is the last chunk left, we're done
if end_index >= len(content):
lines.append(chunk)
break
# If the next character is a space, we've broken at a good point
if end_index < len(content) and content[end_index] == ' ':
lines.append(chunk)
content = content[end_index:]
continue
# This is the ugly case. Backtrack to the right-most space and make
# that the new chunk.
# I'd like to use rpartition here, but for outside reasons I have
# to stay 2.4 compliant and that's a 2.5 method. Damn.
last_space_index = _rightmost_space_index(chunk)
# If we found a space we can backtrack to and split there, use that
# as the chunk. If not, just leave the split as is.
if last_space_index is not None:
# In the case of a remaining line indent, we need to make sure
# the right-most space isn't just the indent, otherwise we're
# going to loop indefinitely.
if remaining_line_indent is not None and last_space_index > remaining_line_indent:
end_index = last_space_index
chunk = content[:end_index]
lines.append(chunk)
content = content[end_index:]
assembled = '\n'.join(lines)
return assembled
def move(self, direction):
"""
Writes the given move cursor character to the screen without a new
line character. Values for direction should be one of the MOVE_*
variables.
:param direction: move character to write
:type direction: str
"""
self.write(direction, new_line=False)
def clear(self, clear_character=CLEAR):
"""
Writes one of the clear characters to the screen. If none is given,
the entire screen is cleared. One of the CLEAR_* variables can be
used to scope the cleared space.
:param clear_character: character code to write; should be one of
the CLEAR_* variables
:type clear_character: str
"""
self.write(clear_character, new_line=False)
def save_position(self):
"""
Saves the current location of the cursor. The cursor can be moved back
to this position by using the reset_position call.
"""
self.write(POSITION_SAVE, new_line=False)
def reset_position(self):
"""
Moves the cursor back to the location of the cursor at the last point
save_position was called.
"""
self.write(POSITION_RESET, new_line=False)
@classmethod
def terminal_size(cls):
"""
Returns the width and height of the terminal.
:return: tuple of width and height values
:rtype: (int, int)
"""
ioctl = fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0))
h, w, hp, wp = struct.unpack('HHHH', ioctl)
return w, h
# -- prompts --------------------------------------------------------------
def prompt_file(self, question, allow_directory=False, allow_empty=False, interruptable=True):
"""
Prompts the user for the full path to a file, reprompting if the file does not
exist. If allow_empty is specified, the validation will only be performed if the
user enters a value.
"""
f = self.prompt(question, allow_empty=allow_empty, interruptable=interruptable)
if (f is None or f.strip() == '') and allow_empty:
return f
elif os.path.exists(f) and (allow_directory or os.path.isfile(f)):
return f
else:
self.write(_('Cannot find file, please enter a valid path'))
self.write('')
return self.prompt_file(question, allow_directory=allow_directory, allow_empty=allow_empty, interruptable=interruptable)
def prompt_values(self, question, values, interruptable=True):
"""
Prompts the user for the answer to a question where only an enumerated set of values
should be accepted.
:param values: list of acceptable answers to the question
:type values: list
:return: will be one of the entries in the values parameter
:rtype: string
"""
a = None
while a not in values:
a = self.prompt(question, interruptable=interruptable)
return a
def prompt_y_n(self, question, interruptable=True):
"""
Prompts the user for the answer to a yes/no question, assuming the value 'y' for yes and
'n' for no. If neither is entered, the user will be re-prompted until one of the two is
indicated.
:return: True if 'y' was specified, False otherwise
:rtype: boolean
"""
a = ''
while a != 'y' and a != 'n' and a is not ABORT:
a = self.prompt(question, interruptable=interruptable)
if a is ABORT:
return a
return a.lower() == 'y'
def prompt_range(self, question, high_number, low_number=1, interruptable=True):
"""
Prompts the user to enter a number between the given range. If the input is invalid, the
user wil be re-prompted until a valid number is provided.
"""
while True:
a = self.prompt_number(question, interruptable=interruptable)
if a > high_number or a < low_number:
self.write(_('Please enter a number between %d and %d') % (low_number, high_number))
continue
return a
def prompt_number(self, question, allow_negatives=False, allow_zero=False, default_value=None, interruptable=True):
"""
Prompts the user for a numerical input. If the given value does not represent a number,
the user will be re-prompted until a valid number is provided.
:return: number entered by the user that conforms to the parameters in this call
:rtype: int
"""
while True:
a = self.prompt(question, allow_empty=default_value is not None, interruptable=interruptable)
if a is ABORT:
return a
if (a is None or a == '') and default_value is not None:
return default_value
try:
i = int(a)
except ValueError:
self.write(_('Please enter a number'))
continue
if not allow_negatives and i < 0:
self.write(_('Please enter a number greater than zero'))
continue
if not allow_zero and i == 0:
self.write(_('Please enter a non-zero value'))
continue
return i
def prompt_default(self, question, default_value, interruptable=True):
"""
Prompts the user for an answer to the given question. If the user does not enter a value,
the default will be returned.
:param default_value: if the user does not enter a value, this value is returned
:type default_value: string
"""
answer = self.prompt(question, allow_empty=True, interruptable=interruptable)
if answer is None or answer == '':
return default_value
else:
return answer
def prompt_multiselect_menu(self, question, menu_values, interruptable=True):
"""
Displays a list of items, allowing the user to select 1 or more items before continuing.
The items selected by the user are returned.
:return: list of indices of the items the user selected, empty list if none are selected;
ABORT is returned if the user selects to abort the menu
:rtype: list or ABORT
"""
selected_indices = []
q = _('Enter value (1-%s) to toggle selection, \'c\' to confirm selections, or \'?\' for more commands: ') % len(menu_values)
while True:
self.write(question)
# Print the current state of the list
for index, value in enumerate(menu_values):
if index in selected_indices:
is_selected = 'x'
else:
is_selected = '-'
self.write(' %s %-2d: %s' % (is_selected, index + 1, value))
selection = self.prompt(q, interruptable=interruptable)
self.write('')
if selection is ABORT:
return ABORT
elif selection == '?':
self.write(_(' <num> : toggles selection, value values between 1 and %s') % len(menu_values))
self.write(_(' x-y : toggle the selection of a range of items (example: "2-5" toggles items 2 through 5)'))
self.write(_(' a : select all items'))
self.write(_(' n : select no items'))
self.write(_(' c : confirm the currently selected items'))
self.write(_(' b : abort the item selection'))
self.write(_(' l : clears the screen and redraws the menu'))
self.write('')
elif selection == 'c':
return selected_indices
elif selection == 'a':
selected_indices = list(range(0, len(menu_values)))
elif selection == 'n':
selected_indices = []
elif selection == 'b':
return ABORT
elif selection == 'l':
self.clear()
elif self._is_range(selection, len(menu_values)):
lower, upper = self._range(selection)
for i in range(lower, upper + 1):
if i in selected_indices:
selected_indices.remove(i)
else:
selected_indices.append(i)
elif selection.isdigit() and int(selection) < (len(menu_values) + 1):
value_index = int(selection) - 1
if value_index in selected_indices:
selected_indices.remove(value_index)
else:
selected_indices.append(value_index)
def prompt_multiselect_sectioned_menu(self, question, section_items, section_post_text=None, interruptable=True):
"""
Displays a multiselect menu for the user where the items are broken up by section,
however the numbering is consecutive to provide unique indices for the user to use
for selection. Entries from one or more section may be toggled; the section
headers are merely used for display purposes.
Each key in section_items is displayed as the section header. Each item in the
list at that key will be rendered as belonging to that section.
The returned value will be a dict that maps each section header (i.e. key in section_items)
and the value is a list of indices that were selected from the original list passed in
section_items under that key. If no items were selected under a given section, an empty
list is the value in the return for each section key.
For example, given the input data::
{ 'Section 1' : ['Item 1.1', 'Item 1.2'],
'Section 2' : ['Item 2.1'],}
The following is rendered for the user::
Section 1
- 1 : Item 1.1
- 2 : Item 1.2
Section 2
- 3 : Item 2.1
If the user entered 1, 2, and 3, thus toggling them as selected, the following would be returned::
{ 'Section 1' : [0, 1],
'Section 2' : [0],}
However, if only 2 was toggled, the return would be::
{ 'Section 1' : [1],
'Section 2' : [],}
If the user chooses the "abort" option, None is returned.
:param question: displayed to the user before displaying the menu
:type question: str
:param section_items: data to be rendered; each key must be a string and each value must
be a list of strings
:type section_items: dict {str : list[str]}
:param section_post_text: if specified, this string will be displayed on its own line between
each section
:type section_post_text: str
:return: selected indices for each list specified in each section; ABORT
if the user elected to abort the selection
:rtype: dict {str : list[int]} or ABORT
"""
selected_index_map = {}
for key in section_items:
selected_index_map[key] = []
total_item_count = reduce(lambda count, key: count + len(section_items[key]), list(section_items.keys()), 0)
q = _('Enter value (1-%s) to toggle selection, \'c\' to confirm selections, or \'?\' for more commands: ') % total_item_count
while True:
self.write(question)
# Print current state of the list, keeping a running tuple that maps the index
# displayed to/used by the user to the section key and index that item was found in
mapper = []
counter = 1
for key in section_items:
# Write the section header
self.write(' %s' % key)
# Render the list, using an incrementing toggle number that transcends any one section
for index, item in enumerate(section_items[key]):
if index in selected_index_map[key]:
is_selected = 'x'
else:
is_selected = '-'
self.write(' %s %-2d: %s' % (is_selected, counter, item))
mapper.append((key, index))
counter += 1
# If the caller wants something between sections, display it now
if section_post_text is not None:
self.write(section_post_text)
selection = self.prompt(q, interruptable=interruptable)
self.write('')
if selection is ABORT:
return ABORT
elif selection == '?':
self.write(_(' <num> : toggles selection, value values between 1 and %s') % total_item_count)
self.write(_(' x-y : toggle the selection of a range of items (example: "2-5" toggles items 2 through 5)'))
self.write(_(' a : select all items'))
self.write(_(' n : select no items'))
self.write(_(' c : confirm the currently selected items'))
self.write(_(' b : abort the item selection'))
self.write(_(' l : clears the screen and redraws the menu'))
self.write('')
elif selection == 'c':
return selected_index_map
elif selection == 'a':
# Recreate the selected index map, adding in indices for each item
selected_index_map = {}
for key in section_items:
selected_index_map[key] = list(range(0, len(section_items[key])))
elif selection == 'n':
selected_index_map = {}
for key in section_items:
selected_index_map[key] = []
elif selection == 'b':
return ABORT
elif selection == 'l':
os.system('clear')
elif self._is_range(selection, total_item_count):
lower, upper = self._range(selection)
for i in range(lower, upper + 1):
section_key = mapper[i][0]
section_index = mapper[i][1]
if section_index in selected_index_map[section_key]:
selected_index_map[section_key].remove(section_index)
else:
selected_index_map[section_key].append(section_index)
elif selection.isdigit() and int(selection) < (total_item_count + 1):
value_index = int(selection) - 1
section_key = mapper[value_index][0]
section_index = mapper[value_index][1]
if section_index in selected_index_map[section_key]:
selected_index_map[section_key].remove(section_index)
else:
selected_index_map[section_key].append(section_index)
def prompt_menu(self, question, menu_values, interruptable=True):
"""
Displays a list of items, allowing the user to select a single item in the
list. The index of the selected item is returned. If interruptable is
set to true and the user exits (through ctrl+c), the ABORT constant
is returned.
:param question: displayed to the user prior to rendering the list
:type question: str
:param menu_values: list of items to display in the menu; the returned value
will be one of the items in this list
:type menu_values: list of str
:return: index of the selected item; ABORT if the user elected to abort
:rtype: int or ABORT
"""
self.write(question)
for index, value in enumerate(menu_values):
self.write(' %-2d - %s' % (index + 1, value))
q = _('Enter value (1-%d) or \'b\' to abort: ') % len(menu_values)
while True:
selection = self.prompt(q, interruptable=interruptable)
if selection is ABORT or selection == 'b':
return ABORT
elif selection.isdigit() and int(selection) < (len(menu_values) + 1):
return int(selection) - 1 # to remove the +1 for display purposes
def prompt_password(self, question, verify_question=None, unmatch_msg=None, interruptable=True):
"""
Prompts the user for a password. If a verify question is specified, the
user will be prompted to match the previously entered password (suitable
for things such as changing a password). If it is not specified, the first
value entered will be returned.
The user entered text will not be echoed to the screen.
:return: entered password
:rtype: str
"""
while True:
try:
password_1 = self._get_password(question)
except KeyboardInterrupt:
if interruptable:
return ABORT
raise
if verify_question is None:
return password_1
try:
password_2 = self._get_password(verify_question)
except KeyboardInterrupt:
if interruptable:
return ABORT
raise
if password_1 != password_2:
self.write(unmatch_msg)
self.write('')
else:
return password_1
def _get_password(self, question):
"""
Gets a password from the user interactively, supporting degraded
behavior when called in python 2.4. The degraded behavior is explained
in-line below.
:param question: displayed to the user when prompting for input
:type question: str
:return: password that the user entered
:rtype: basestring
"""
try:
return getpass.getpass(question, stream=self.output)
# In python 2.4, getpass.getpass does not have the "stream" parameter
# and thus raises a TypeError for the above call. We will handle that
# by simply not passing an argument for it, thus not allowing python
# 2.4 users to take advantage of the self.output abstraction.
except TypeError:
return getpass.getpass(question)
def prompt(self, question, allow_empty=False, interruptable=True):
"""
Prompts the user for an answer to the given question, re-prompting if the answer is
blank.
:param question: displayed to the user when prompting for input
:type question: str
:param allow_empty: if True, a blank line will be accepted as input
:type allow_empty: bool
:param interruptable: if True, keyboard interrupts will be caught and None will
be returned; if False, keyboard interrupts will raise as
normal
:type interruptable: bool
:return: answer to the given question or the ABORT constant in this
module if it was interrupted
"""
answer = None
while answer is None or answer.strip() == '':
answer = self.read(question, interruptable=interruptable)
if allow_empty: break
if answer is ABORT: break
return answer
# -- utility --------------------------------------------------------------
def get_tags(self):
"""
Returns all tags for both read and write calls. Unlike read_tags and
write_tags, the return value is a list of tuples. The first entry in
the tuple will be one of [TAG_READ, TAG_WRITE] to indicate what
triggered the tag. The second value in the tuple is the tag itself.
:return: list of tag tuples: (tag_type, tag_value); empty list if
record_tags was set to false
:rtype: list
"""
return self.tags
def get_read_tags(self):
"""
Returns the values for all tags that were passed to read calls.
If record_tags is enabled on this instance and a tag was not
specified, an empty string will be added in its place.
:return: list of tag values; empty list if record_tags was set to false
:rtype: list
"""
r = [t[1] for t in self.tags if t[0] == TAG_READ]
return r
def get_write_tags(self):
"""
Returns the values for all tags that were passed to write calls.
If record_tags is enabled on this instance and a tag was not
specified, an empty string will be added in its place.
:return: list of tag values; empty list if record_tags was set to false
:rtype: list
"""
w = [t[1] for t in self.tags if t[0] == TAG_WRITE]
return w
# -- private --------------------------------------------------------------
def _is_range(self, input, selectable_item_count):
"""
:return: True if the input represents a range in a multiselect menu,
False otherwise
:rtype: bool
"""
parsed = input.split('-')
if len(parsed) != 2:
return False
lower = parsed[0].strip()
upper = parsed[1].strip()
return lower.isdigit() and int(lower) > 0 and \
upper.isdigit() and int(upper) <= selectable_item_count and \
int(lower) < int(upper)
def _range(self, input):
"""
If an input is determined to be a range by _is_range, this call will
return the lower and upper indices of the range (the entered values
will be subtracted by 1 to accomodate for UI view).
:return: tuple of (lower boundary, upper boundary)
:rtype: (int, int)
"""
parsed = input.split('-')
return int(parsed[0].strip()) - 1, int(parsed[1].strip()) - 1
def _record_tag(self, io, tag):
"""
Stores the given tag in the prompt if it is configued to track them.
If tag is None, nothing is recorded.
:param io: flag indicating if it's recording a read or a write;
should be one of the TAG_* constants
:type io: str
:param tag: value passed into the write call
:type tag: object
"""
if not self.record_tags or tag is None:
return
# Store in a tuple with the io direction
t = (io, tag or '')
self.tags.append(t)
class Recorder(object):
"""
Suitable for passing to the Prompt constructor as the output, an instance
of this class will store every line written to it in an internal list.
"""
def __init__(self):
self.lines = []
def write(self, line):
self.lines.append(line)
class Script(object):
"""
Suitable for passing to the Prompt constructor as the input, an instance
of this class will return each line set within on each call to read.
"""
# If this is present in the list of lines, a KeyboardInterrupt will be raised
INTERRUPT = object()
def __init__(self, lines):
self.lines = lines
def readline(self, size=None):
value = self.lines.pop(0)
if value is Script.INTERRUPT:
raise KeyboardInterrupt()
return value
| gpl-2.0 | -922,248,598,433,704,800 | 35.806112 | 133 | 0.575539 | false |
wasiahmad/Paraphrase-Identification-Task | source_code_in_theano/lstm.py | 1 | 5735 | import numpy as np
import theano as theano
import theano.tensor as T
from theano.gradient import grad_clip
import time
import operator
# Theano implementation of a single-layer LSTM
class LSTM:
def __init__(self, word_dim, hidden_dim=128, bptt_truncate=-1):
# Assign instance variables
self.word_dim = word_dim
self.hidden_dim = hidden_dim
self.bptt_truncate = bptt_truncate
# Initialize the network parameters
E = np.random.uniform(-np.sqrt(1. / word_dim), np.sqrt(1. / word_dim), (hidden_dim, word_dim))
U = np.random.uniform(-np.sqrt(1. / hidden_dim), np.sqrt(1. / hidden_dim), (4, hidden_dim, hidden_dim))
W = np.random.uniform(-np.sqrt(1. / hidden_dim), np.sqrt(1. / hidden_dim), (4, hidden_dim, hidden_dim))
V = np.random.uniform(-np.sqrt(1. / hidden_dim), np.sqrt(1. / hidden_dim), (word_dim, hidden_dim))
b = np.zeros((4, hidden_dim))
c = np.zeros(word_dim)
# Theano: Created shared variables
self.E = theano.shared(name='E', value=E.astype(theano.config.floatX))
self.U = theano.shared(name='U', value=U.astype(theano.config.floatX))
self.W = theano.shared(name='W', value=W.astype(theano.config.floatX))
self.V = theano.shared(name='V', value=V.astype(theano.config.floatX))
self.b = theano.shared(name='b', value=b.astype(theano.config.floatX))
self.c = theano.shared(name='c', value=c.astype(theano.config.floatX))
# SGD / rmsprop: Initialize parameters
self.mE = theano.shared(name='mE', value=np.zeros(E.shape).astype(theano.config.floatX))
self.mU = theano.shared(name='mU', value=np.zeros(U.shape).astype(theano.config.floatX))
self.mV = theano.shared(name='mV', value=np.zeros(V.shape).astype(theano.config.floatX))
self.mW = theano.shared(name='mW', value=np.zeros(W.shape).astype(theano.config.floatX))
self.mb = theano.shared(name='mb', value=np.zeros(b.shape).astype(theano.config.floatX))
self.mc = theano.shared(name='mc', value=np.zeros(c.shape).astype(theano.config.floatX))
# We store the Theano graph here
self.theano = {}
self.__theano_build__()
def __theano_build__(self):
E, V, U, W, b, c = self.E, self.V, self.U, self.W, self.b, self.c
x = T.ivector('x')
y = T.ivector('y')
def forward_prop_step(x_t, h_t_prev, c_t_prev):
# Word embedding layer
x_e = E[:, x_t]
# LSTM Layer
i_t = T.nnet.hard_sigmoid(U[0].dot(x_e) + W[0].dot(h_t_prev) + b[0])
f_t = T.nnet.hard_sigmoid(U[1].dot(x_e) + W[1].dot(h_t_prev) + b[1])
o_t = T.nnet.hard_sigmoid(U[2].dot(x_e) + W[2].dot(h_t_prev) + b[2])
g_t = T.tanh(U[3].dot(x_e) + W[3].dot(h_t_prev) + b[3])
c_t = c_t_prev * f_t + g_t * i_t
h_t = T.tanh(c_t) * o_t
# Final output calculation
output_t = T.nnet.softmax(V.dot(h_t) + c)[0]
return [output_t, h_t, c_t]
[output, hidden_state, cell_state], updates = theano.scan(
forward_prop_step,
sequences=x,
truncate_gradient=self.bptt_truncate,
outputs_info=[None,
dict(initial=T.zeros(self.hidden_dim)),
dict(initial=T.zeros(self.hidden_dim))])
prediction = T.argmax(output, axis=1)
output_error = T.sum(T.nnet.categorical_crossentropy(output, y))
# Total cost, we can add regularization here
cost = output_error
# Gradients
dE = T.grad(cost, E)
dU = T.grad(cost, U)
dW = T.grad(cost, W)
db = T.grad(cost, b)
dV = T.grad(cost, V)
dc = T.grad(cost, c)
# Assign functions
self.predict = theano.function([x], output)
self.predict_class = theano.function([x], prediction)
self.ce_error = theano.function([x, y], cost)
self.bptt = theano.function([x, y], [dE, dU, dW, db, dV, dc])
self.cell_states = theano.function([x], cell_state)
self.hidden_states = theano.function([x], hidden_state)
# SGD parameters
learning_rate = T.scalar('learning_rate')
decay = T.scalar('decay')
# rmsprop cache updates
mE = decay * self.mE + (1 - decay) * dE ** 2
mU = decay * self.mU + (1 - decay) * dU ** 2
mW = decay * self.mW + (1 - decay) * dW ** 2
mV = decay * self.mV + (1 - decay) * dV ** 2
mb = decay * self.mb + (1 - decay) * db ** 2
mc = decay * self.mc + (1 - decay) * dc ** 2
self.sgd_step = theano.function(
[x, y, learning_rate, theano.Param(decay, default=0.9)],
[],
updates=[(E, E - learning_rate * dE / T.sqrt(mE + 1e-6)),
(U, U - learning_rate * dU / T.sqrt(mU + 1e-6)),
(W, W - learning_rate * dW / T.sqrt(mW + 1e-6)),
(V, V - learning_rate * dV / T.sqrt(mV + 1e-6)),
(b, b - learning_rate * db / T.sqrt(mb + 1e-6)),
(c, c - learning_rate * dc / T.sqrt(mc + 1e-6)),
(self.mE, mE),
(self.mU, mU),
(self.mW, mW),
(self.mV, mV),
(self.mb, mb),
(self.mc, mc)
])
def calculate_total_loss(self, X, Y):
return np.sum([self.ce_error(x, y) for x, y in zip(X, Y)])
def calculate_loss(self, X, Y):
# Divide calculate_loss by the number of words
num_words = np.sum([len(y) for y in Y])
return self.calculate_total_loss(X, Y) / float(num_words)
| mit | -3,497,113,918,323,928,600 | 42.120301 | 111 | 0.539669 | false |
biln/airflow | airflow/jobs.py | 1 | 43658 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from past.builtins import basestring
from collections import defaultdict, Counter
from datetime import datetime, timedelta
import getpass
import logging
import socket
import subprocess
import multiprocessing
import math
from time import sleep
from sqlalchemy import Column, Integer, String, DateTime, func, Index, or_
from sqlalchemy.orm.session import make_transient
from airflow import executors, models, settings
from airflow import configuration as conf
from airflow.exceptions import AirflowException
from airflow.utils.state import State
from airflow.utils.db import provide_session, pessimistic_connection_handling
from airflow.utils.email import send_email
from airflow.utils.logging import LoggingMixin
from airflow.utils import asciiart
from airflow.settings import Stats
DagRun = models.DagRun
Base = models.Base
ID_LEN = models.ID_LEN
Stats = settings.Stats
class BaseJob(Base, LoggingMixin):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have it's own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(DateTime())
end_date = Column(DateTime())
latest_heartbeat = Column(DateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
)
def __init__(
self,
executor=executors.DEFAULT_EXECUTOR,
heartrate=conf.getfloat('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = socket.getfqdn()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = datetime.now()
self.latest_heartbeat = datetime.now()
self.heartrate = heartrate
self.unixname = getpass.getuser()
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(datetime.now() - self.latest_heartbeat).seconds <
(conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
def kill(self):
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = datetime.now()
try:
self.on_kill()
except:
self.logger.error('on_kill() method failed')
session.merge(job)
session.commit()
session.close()
raise AirflowException("Job shut down externally.")
def on_kill(self):
'''
Will be called when an external kill command is received
'''
pass
def heartbeat_callback(self, session=None):
pass
def heartbeat(self):
'''
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
'''
session = settings.Session()
job = session.query(BaseJob).filter_by(id=self.id).one()
if job.state == State.SHUTDOWN:
self.kill()
if job.latest_heartbeat:
sleep_for = self.heartrate - (
datetime.now() - job.latest_heartbeat).total_seconds()
if sleep_for > 0:
sleep(sleep_for)
job.latest_heartbeat = datetime.now()
session.merge(job)
session.commit()
self.heartbeat_callback(session=session)
session.close()
self.logger.debug('[heart] Boom.')
def run(self):
Stats.incr(self.__class__.__name__.lower()+'_start', 1, 1)
# Adding an entry in the DB
session = settings.Session()
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
# Run
self._execute()
# Marking the success in the DB
self.end_date = datetime.now()
self.state = State.SUCCESS
session.merge(self)
session.commit()
session.close()
Stats.incr(self.__class__.__name__.lower()+'_end', 1, 1)
def _execute(self):
raise NotImplementedError("This method needs to be overridden")
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs indefinitely and constantly schedules the jobs
that are ready to run. It figures out the latest runs for each
task and see if the dependencies for the next schedules are met.
If so it triggers the task instance. It does this for each task
in each DAG and repeats.
:param dag_id: to run the scheduler for a single specific DAG
:type dag_id: string
:param subdir: to search for DAG under a certain folder only
:type subdir: string
:param test_mode: used for unit testing this class only, runs a single
schedule run
:type test_mode: bool
:param refresh_dags_every: force refresh the DAG definition every N
runs, as specified here
:type refresh_dags_every: int
:param do_pickle: to pickle the DAG object and send over to workers
for non-local executors
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=None,
test_mode=False,
refresh_dags_every=10,
num_runs=None,
do_pickle=False,
*args, **kwargs):
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
if test_mode:
self.num_runs = 1
else:
self.num_runs = num_runs
self.refresh_dags_every = refresh_dags_every
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
self.max_threads = min(conf.getint('scheduler', 'max_threads'), multiprocessing.cpu_count())
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
if self.max_threads > 1:
self.logger.error("Cannot use more than 1 thread when using sqlite. Setting max_threads to 1")
self.max_threads = 1
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.filter(TI.dag_id == dag.dag_id)
.filter(TI.state == State.SUCCESS)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = datetime.now()
SlaMiss = models.SlaMiss
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if task.sla:
dttm = dag.following_schedule(dttm)
while dttm < datetime.now():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < datetime.now():
session.merge(models.SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.email_sent == False or SlaMiss.notification_sent == False)
.filter(SlaMiss.dag_id == dag.dag_id)
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(TI.state != State.SUCCESS)
.filter(TI.execution_date.in_(sla_dates))
.filter(TI.dag_id == dag.dag_id)
.all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
blocking_tis = ([ti for ti in blocking_tis
if ti.are_dependencies_met(session=session)])
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.logger.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
email_content = """\
Here's a list of tasks thas missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(bug=asciiart.bug, **locals())
emails = []
for t in dag.tasks:
if t.email:
if isinstance(t.email, basestring):
l = [t.email]
elif isinstance(t.email, (list, tuple)):
l = t.email
for email in l:
if email not in emails:
emails.append(email)
if emails and len(slas):
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
session.close()
def import_errors(self, dagbag):
session = settings.Session()
session.query(models.ImportError).delete()
for filename, stacktrace in list(dagbag.import_errors.items()):
session.add(models.ImportError(
filename=filename, stacktrace=stacktrace))
session.commit()
@provide_session
def schedule_dag(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval:
DagRun = models.DagRun
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
if len(active_runs) >= dag.max_active_runs:
return
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < datetime.now() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = datetime.now()
session.commit()
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False,
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX+'%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.logger.debug("Next run date based on tasks {}"
.format(next_run_date))
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.logger.debug("Dag start date: {}. Next run date: {}"
.format(dag.start_date, next_run_date))
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= datetime.now():
next_run = dag.create_dagrun(
run_id='scheduled__' + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=datetime.now(),
state=State.RUNNING,
external_trigger=False
)
return next_run
def process_dag(self, dag, queue):
"""
This method schedules a single DAG by looking at the latest
run for each task and attempting to schedule the following run.
As multiple schedulers may be running for redundancy, this
function takes a lock on the DAG and timestamps the last run
in ``last_scheduler_run``.
"""
DagModel = models.DagModel
session = settings.Session()
# picklin'
pickle_id = None
if self.do_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle_id = dag.pickle(session).id
# obtain db lock
db_dag = session.query(DagModel).filter_by(
dag_id=dag.dag_id
).with_for_update().one()
last_scheduler_run = db_dag.last_scheduler_run or datetime(2000, 1, 1)
secs_since_last = (datetime.now() - last_scheduler_run).total_seconds()
if secs_since_last < self.heartrate:
# release db lock
session.commit()
session.close()
return None
# Release the db lock
# the assumption here is that process_dag will take less
# time than self.heartrate otherwise we might unlock too
# quickly and this should moved below, but that would increase
# the time the record is locked and is blocking for other calls.
db_dag.last_scheduler_run = datetime.now()
session.commit()
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
# do not consider runs that are executed in the future
if run.execution_date > datetime.now():
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
# this needs a fresh session sometimes tis get detached
tis = run.get_task_instances(state=(State.NONE,
State.UP_FOR_RETRY))
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
# future: remove adhoc
if task.adhoc:
continue
if ti.is_runnable(flag_upstream_failed=True):
self.logger.debug('Queuing task: {}'.format(ti))
queue.put((ti.key, pickle_id))
session.close()
@provide_session
def prioritize_queued(self, session, executor, dagbag):
# Prioritizing queued task instances
pools = {p.pool: p for p in session.query(models.Pool).all()}
TI = models.TaskInstance
queued_tis = (
session.query(TI)
.filter(TI.state == State.QUEUED)
.all()
)
self.logger.info(
"Prioritizing {} queued jobs".format(len(queued_tis)))
session.expunge_all()
d = defaultdict(list)
for ti in queued_tis:
if ti.dag_id not in dagbag.dags:
self.logger.info(
"DAG no longer in dagbag, deleting {}".format(ti))
session.delete(ti)
session.commit()
elif not dagbag.dags[ti.dag_id].has_task(ti.task_id):
self.logger.info(
"Task no longer exists, deleting {}".format(ti))
session.delete(ti)
session.commit()
else:
d[ti.pool].append(ti)
dag_blacklist = set(dagbag.paused_dags())
for pool, tis in list(d.items()):
if not pool:
# Arbitrary:
# If queued outside of a pool, trigger no more than
# non_pooled_task_slot_count per run
open_slots = conf.getint('core', 'non_pooled_task_slot_count')
else:
open_slots = pools[pool].open_slots(session=session)
queue_size = len(tis)
self.logger.info("Pool {pool} has {open_slots} slots, {queue_size} "
"task instances in queue".format(**locals()))
if open_slots <= 0:
continue
tis = sorted(
tis, key=lambda ti: (-ti.priority_weight, ti.start_date))
for ti in tis:
if open_slots <= 0:
continue
task = None
try:
task = dagbag.dags[ti.dag_id].get_task(ti.task_id)
except:
self.logger.error("Queued task {} seems gone".format(ti))
session.delete(ti)
session.commit()
continue
if not task:
continue
ti.task = task
# picklin'
dag = dagbag.dags[ti.dag_id]
pickle_id = None
if self.do_pickle and self.executor.__class__ not in (
executors.LocalExecutor,
executors.SequentialExecutor):
self.logger.info("Pickling DAG {}".format(dag))
pickle_id = dag.pickle(session).id
if dag.dag_id in dag_blacklist:
continue
if dag.concurrency_reached:
dag_blacklist.add(dag.dag_id)
continue
if ti.are_dependencies_met():
executor.queue_task_instance(ti, pickle_id=pickle_id)
open_slots -= 1
else:
session.delete(ti)
session.commit()
continue
ti.task = task
session.commit()
def _split(self, items, size):
"""
This function splits a list of items into chunks of int size.
_split([1,2,3,4,5,6], 3) becomes [[1,2,3],[4,5,6]]
"""
size = max(1, size)
return [items[i:i + size] for i in range(0, len(items), size)]
def _do_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and schedules and processes them
"""
for dag in dags:
self.logger.debug("Scheduling {}".format(dag.dag_id))
dag = dagbag.get_dag(dag.dag_id)
if not dag:
continue
try:
self.schedule_dag(dag)
self.process_dag(dag, tis_out)
self.manage_slas(dag)
except Exception as e:
self.logger.exception(e)
@provide_session
def _reset_state_for_orphaned_tasks(self, dag_run, session=None):
"""
This function checks for a DagRun if there are any tasks
that have a scheduled state but are not known by the
executor. If it finds those it will reset the state to None
so they will get picked up again.
"""
queued_tis = self.executor.queued_tasks
# also consider running as the state might not have changed in the db yet
running = self.executor.running
tis = dag_run.get_task_instances(state=State.SCHEDULED, session=session)
for ti in tis:
if ti.key not in queued_tis and ti.key not in running:
ti.state = State.NONE
self.logger.debug("Rescheduling orphaned task {}".format(ti))
session.commit()
def _execute(self):
session = settings.Session()
TI = models.TaskInstance
pessimistic_connection_handling()
logging.basicConfig(level=logging.DEBUG)
self.logger.info("Starting the scheduler")
dagbag = models.DagBag(self.subdir, sync_to_db=True)
executor = self.executor = dagbag.executor
executor.start()
# grab orphaned tasks and make sure to reset their state
active_runs = DagRun.find(
state=State.RUNNING,
external_trigger=False,
session=session
)
for dr in active_runs:
self._reset_state_for_orphaned_tasks(dr, session=session)
self.runs = 0
while not self.num_runs or self.num_runs > self.runs:
try:
loop_start_dttm = datetime.now()
try:
self.prioritize_queued(executor=executor, dagbag=dagbag)
except Exception as e:
self.logger.exception(e)
self.runs += 1
try:
if self.runs % self.refresh_dags_every == 0:
dagbag = models.DagBag(self.subdir, sync_to_db=True)
else:
dagbag.collect_dags(only_if_updated=True)
except Exception as e:
self.logger.error("Failed at reloading the dagbag. {}".format(e))
Stats.incr('dag_refresh_error', 1, 1)
sleep(5)
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values() if dag.dag_id in self.dag_ids]
else:
dags = [
dag for dag in dagbag.dags.values()
if not dag.parent_dag]
paused_dag_ids = dagbag.paused_dags()
dags = [x for x in dags if x.dag_id not in paused_dag_ids]
# dags = filter(lambda x: x.dag_id not in paused_dag_ids, dags)
self.logger.debug("Total Cores: {} Max Threads: {} DAGs:{}".
format(multiprocessing.cpu_count(),
self.max_threads,
len(dags)))
dags = self._split(dags, math.ceil(len(dags) / self.max_threads))
tis_q = multiprocessing.Queue()
jobs = [multiprocessing.Process(target=self._do_dags,
args=(dagbag, dags[i], tis_q))
for i in range(len(dags))]
self.logger.info("Starting {} scheduler jobs".format(len(jobs)))
for j in jobs:
j.start()
while any(j.is_alive() for j in jobs):
while not tis_q.empty():
ti_key, pickle_id = tis_q.get()
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
if ti.state == State.SCHEDULED:
session.commit()
self.logger.debug("Task {} was picked up by another scheduler"
.format(ti))
continue
elif ti.state is State.NONE:
ti.state = State.SCHEDULED
self.executor.queue_task_instance(ti, pickle_id=pickle_id)
session.merge(ti)
session.commit()
for j in jobs:
j.join()
self.logger.info("Done queuing tasks, calling the executor's "
"heartbeat")
duration_sec = (datetime.now() - loop_start_dttm).total_seconds()
self.logger.info("Loop took: {} seconds".format(duration_sec))
Stats.timing("scheduler_loop", duration_sec * 1000)
try:
self.import_errors(dagbag)
except Exception as e:
self.logger.exception(e)
try:
dagbag.kill_zombies()
except Exception as e:
self.logger.exception(e)
try:
# We really just want the scheduler to never ever stop.
executor.heartbeat()
self.heartbeat()
except Exception as e:
self.logger.exception(e)
self.logger.error("Tachycardia!")
except Exception as deep_e:
self.logger.exception(deep_e)
raise
finally:
settings.Session.remove()
executor.end()
session.close()
@provide_session
def heartbeat_callback(self, session=None):
Stats.gauge('scheduler_heartbeat', 1, 1)
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
def __init__(
self,
dag, start_date=None, end_date=None, mark_success=False,
include_adhoc=False,
donot_pickle=False,
ignore_dependencies=False,
ignore_first_depends_on_past=False,
pool=None,
*args, **kwargs):
self.dag = dag
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.include_adhoc = include_adhoc
self.donot_pickle = donot_pickle
self.ignore_dependencies = ignore_dependencies
self.ignore_first_depends_on_past = ignore_first_depends_on_past
self.pool = pool
super(BackfillJob, self).__init__(*args, **kwargs)
def _execute(self):
"""
Runs a dag for a specified date range.
"""
session = settings.Session()
start_date = self.bf_start_date
end_date = self.bf_end_date
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = models.DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
executor_fails = Counter()
# Build a list of all instances to run
tasks_to_run = {}
failed = set()
succeeded = set()
started = set()
skipped = set()
not_ready = set()
deadlocked = set()
for task in self.dag.tasks:
if (not self.include_adhoc) and task.adhoc:
continue
start_date = start_date or task.start_date
end_date = end_date or task.end_date or datetime.now()
for dttm in self.dag.date_range(start_date, end_date=end_date):
ti = models.TaskInstance(task, dttm)
tasks_to_run[ti.key] = ti
session.merge(ti)
session.commit()
# Triggering what is ready to get triggered
while tasks_to_run and not deadlocked:
not_ready.clear()
for key, ti in list(tasks_to_run.items()):
ti.refresh_from_db(session=session, lock_for_update=True)
ignore_depends_on_past = (
self.ignore_first_depends_on_past and
ti.execution_date == (start_date or ti.start_date))
# The task was already marked successful or skipped by a
# different Job. Don't rerun it.
if key not in started:
if ti.state == State.SUCCESS:
succeeded.add(key)
tasks_to_run.pop(key)
session.commit()
continue
elif ti.state == State.SKIPPED:
skipped.add(key)
tasks_to_run.pop(key)
session.commit()
continue
# Is the task runnable? -- then run it
if ti.is_queueable(
include_queued=True,
ignore_depends_on_past=ignore_depends_on_past,
flag_upstream_failed=True):
self.logger.debug('Sending {} to executor'.format(ti))
if ti.state == State.NONE:
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
pickle_id=pickle_id,
ignore_dependencies=self.ignore_dependencies,
ignore_depends_on_past=ignore_depends_on_past,
pool=self.pool)
started.add(key)
# Mark the task as not ready to run
elif ti.state in (State.NONE, State.UPSTREAM_FAILED):
not_ready.add(key)
session.commit()
self.heartbeat()
executor.heartbeat()
# If the set of tasks that aren't ready ever equals the set of
# tasks to run, then the backfill is deadlocked
if not_ready and not_ready == set(tasks_to_run):
deadlocked.update(tasks_to_run.values())
tasks_to_run.clear()
# Reacting to events
for key, state in list(executor.get_event_buffer().items()):
dag_id, task_id, execution_date = key
if key not in tasks_to_run:
continue
ti = tasks_to_run[key]
ti.refresh_from_db()
# executor reports failure
if state == State.FAILED:
# task reports running
if ti.state == State.RUNNING:
msg = (
'Executor reports that task instance {} failed '
'although the task says it is running.'.format(key))
self.logger.error(msg)
ti.handle_failure(msg)
tasks_to_run.pop(key)
# task reports skipped
elif ti.state == State.SKIPPED:
self.logger.error("Skipping {} ".format(key))
skipped.add(key)
tasks_to_run.pop(key)
# anything else is a failure
else:
self.logger.error("Task instance {} failed".format(key))
failed.add(key)
tasks_to_run.pop(key)
# executor reports success
elif state == State.SUCCESS:
# task reports success
if ti.state == State.SUCCESS:
self.logger.info(
'Task instance {} succeeded'.format(key))
succeeded.add(key)
tasks_to_run.pop(key)
# task reports failure
elif ti.state == State.FAILED:
self.logger.error("Task instance {} failed".format(key))
failed.add(key)
tasks_to_run.pop(key)
# task reports skipped
elif ti.state == State.SKIPPED:
self.logger.info("Task instance {} skipped".format(key))
skipped.add(key)
tasks_to_run.pop(key)
# this probably won't ever be triggered
elif ti in not_ready:
self.logger.info(
"{} wasn't expected to run, but it did".format(ti))
# executor reports success but task does not - this is weird
elif ti.state not in (
State.SCHEDULED,
State.QUEUED,
State.UP_FOR_RETRY):
self.logger.error(
"The airflow run command failed "
"at reporting an error. This should not occur "
"in normal circumstances. Task state is '{}',"
"reported state is '{}'. TI is {}"
"".format(ti.state, state, ti))
# if the executor fails 3 or more times, stop trying to
# run the task
executor_fails[key] += 1
if executor_fails[key] >= 3:
msg = (
'The airflow run command failed to report an '
'error for task {} three or more times. The '
'task is being marked as failed. This is very '
'unusual and probably means that an error is '
'taking place before the task even '
'starts.'.format(key))
self.logger.error(msg)
ti.handle_failure(msg)
tasks_to_run.pop(key)
msg = ' | '.join([
"[backfill progress]",
"waiting: {0}",
"succeeded: {1}",
"kicked_off: {2}",
"failed: {3}",
"skipped: {4}",
"deadlocked: {5}"
]).format(
len(tasks_to_run),
len(succeeded),
len(started),
len(failed),
len(skipped),
len(deadlocked))
self.logger.info(msg)
executor.end()
session.close()
err = ''
if failed:
err += (
"---------------------------------------------------\n"
"Some task instances failed:\n{}\n".format(failed))
if deadlocked:
err += (
'---------------------------------------------------\n'
'BackfillJob is deadlocked.')
deadlocked_depends_on_past = any(
t.are_dependencies_met() != t.are_dependencies_met(
ignore_depends_on_past=True)
for t in deadlocked)
if deadlocked_depends_on_past:
err += (
'Some of the deadlocked tasks were unable to run because '
'of "depends_on_past" relationships. Try running the '
'backfill with the option '
'"ignore_first_depends_on_past=True" or passing "-I" at '
'the command line.')
err += ' These tasks were unable to run:\n{}\n'.format(deadlocked)
if err:
raise AirflowException(err)
self.logger.info("Backfill done. Exiting.")
class LocalTaskJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'LocalTaskJob'
}
def __init__(
self,
task_instance,
ignore_dependencies=False,
ignore_depends_on_past=False,
force=False,
mark_success=False,
pickle_id=None,
pool=None,
*args, **kwargs):
self.task_instance = task_instance
self.ignore_dependencies = ignore_dependencies
self.ignore_depends_on_past = ignore_depends_on_past
self.force = force
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
# terminating state is used so that a job don't try to
# terminate multiple times
self.terminating = False
# Keeps track of the fact that the task instance has been observed
# as running at least once
self.was_running = False
super(LocalTaskJob, self).__init__(*args, **kwargs)
def _execute(self):
command = self.task_instance.command(
raw=True,
ignore_dependencies=self.ignore_dependencies,
ignore_depends_on_past=self.ignore_depends_on_past,
force=self.force,
pickle_id=self.pickle_id,
mark_success=self.mark_success,
job_id=self.id,
pool=self.pool,
)
self.process = subprocess.Popen(['bash', '-c', command])
return_code = None
while return_code is None:
self.heartbeat()
return_code = self.process.poll()
def on_kill(self):
self.process.terminate()
@provide_session
def heartbeat_callback(self, session=None):
"""Self destruct task if state has been moved away from running externally"""
if self.terminating:
# task is already terminating, let it breathe
return
# Suicide pill
TI = models.TaskInstance
ti = self.task_instance
state = session.query(TI.state).filter(
TI.dag_id==ti.dag_id, TI.task_id==ti.task_id,
TI.execution_date==ti.execution_date).scalar()
if state == State.RUNNING:
self.was_running = True
elif self.was_running and hasattr(self, 'process'):
logging.warning(
"State of this instance has been externally set to "
"{self.task_instance.state}. "
"Taking the poison pill. So long.".format(**locals()))
self.process.terminate()
self.terminating = True
| apache-2.0 | 8,467,749,364,340,650,000 | 36.996519 | 110 | 0.518507 | false |
nisavid/bedframe | bedframe/auth/_provisions.py | 1 | 13350 | """Security provisions."""
__copyright__ = "Copyright (C) 2014 Ivan D Vasin"
__docformat__ = "restructuredtext"
import abc as _abc
from functools import reduce as _reduce
from itertools import chain as _chain, combinations as _combinations
from operator import or_ as _or
from spruce.collections \
import frozenusetset as _frozenusetset, usetset as _usetset
from spruce.lang import namedflagset_classes as _namedflagset_classes
def provisionsets_combinations(provisionsets, choose0=True):
"""
The authentication security provision sets that are yielded by
combinations of given sets.
:param provisionsets:
A set of security provision sets.
:type provisionsets: ~[:class:`ProvisionSetABC`]
:param bool choose0:
Whether to include the empty choice. If true, then the empty set of
security provisions is always included in the result. Otherwise, it
is included only if *provisionsets* contains it.
:rtype: :class:`ProvisionSetABC`
"""
if choose0:
yield iter(provisionsets).next().__class__()
for combination \
in _chain(*(_combinations(provisionsets, nchoices)
for nchoices in range(1, len(provisionsets) + 1))):
yield _reduce(_or, combination)
_ProvisionSet_doc_body = \
"""
An :class:`!ProvisionSet` object is a set of security properties
that is could be provided by an authentication mechanism.
Example usage::
auth_provisions = \\
ProvisionSet(SECPROV_SERVER_NONCE
& SECPROV_SERVER_NONCE_USE_COUNT)
if SECPROV_SERVER_NONCE not in auth_provisions:
print 'vulnerability to a simple replay attack'
if SECPROV_SERVER_NONCE_USE_COUNT not in auth_provisions:
print 'vulnerability to a simple online replay attack'
if SECPROV_CLIENT_NONCE not in auth_provisions:
print 'vulnerability to a chosen plaintext attack'
if SECPROV_REQUEST_ENTITY_INTEGRITY not in auth_provisions:
print 'vulnerability to a mangled request entity'
.. seealso::
:class:`spruce.lang.NamedFlagSet \
<spruce.lang._datatypes._misc.NamedFlagSet>`
"""
_ProvisionSet_doc = \
"""Authentication security provisions.
""" \
+ _ProvisionSet_doc_body
_FrozenProvisionSet_doc = \
"""Immutable authentication security provisions.
""" \
+ _ProvisionSet_doc_body
ProvisionSetABC, ProvisionSet, FrozenProvisionSet = \
_namedflagset_classes('ProvisionSet', doc=_ProvisionSet_doc,
frozendoc=_ProvisionSet_doc)
class ProvisionSetSetABC(object):
__metaclass__ = _abc.ABCMeta
def __repr__(self):
if not self:
items_repr = ''
elif self == self._universe():
items_repr = '*'
else:
ubi_provisionsets, nonubi_provisionsets = \
self._condensed_repr_provisionsets()
if ubi_provisionsets:
ubi_provisionsets = sorted(ubi_provisionsets)
ubi_provisionsets_repr = \
' | '.join(repr(provisions)
for provisions in ubi_provisionsets)
if nonubi_provisionsets == '*':
items_repr = '{}*'.format(ubi_provisionsets_repr)
elif nonubi_provisionsets in ((), (FrozenProvisionSet(),)):
items_repr = ubi_provisionsets_repr
else:
nonubi_provisionsets = sorted(nonubi_provisionsets)
items_repr = \
'(({}) | {{p}} for p in {{{}}})'\
.format(ubi_provisionsets_repr,
', '.join(repr(provisions)
for provisions
in nonubi_provisionsets))
else:
items_repr = repr(self._items)
return '{}({})'.format(self.__class__.__name__, items_repr)
def __str__(self):
if not self:
return '{}'
elif self == self._universe():
return '*'
else:
ubi_provisionsets, nonubi_provisionsets = \
self._condensed_repr_provisionsets()
if ubi_provisionsets:
ubi_provisionsets = sorted(ubi_provisionsets)
ubi_provisionsets_str = \
', '.join(str(provisions)
for provisions in ubi_provisionsets)
if nonubi_provisionsets == '*':
items_str = 'any with ' + ubi_provisionsets_str
elif nonubi_provisionsets == (FrozenProvisionSet(),):
items_str = ubi_provisionsets_str
else:
nonubi_provisionsets = sorted(nonubi_provisionsets)
nonubi_provisionsets_str = \
' or '.join(str(provisions)
for provisions in nonubi_provisionsets)
if ubi_provisionsets:
if nonubi_provisionsets:
if len(ubi_provisionsets) > 1:
ubi_provisionsets_str = \
'({})'.format(ubi_provisionsets_str)
if len(nonubi_provisionsets) > 1:
nonubi_provisionsets_str = \
'any of ({})'\
.format(nonubi_provisionsets_str)
items_str = \
'{} with {}'.format(ubi_provisionsets_str,
nonubi_provisionsets_str)
else:
items_str = ubi_provisionsets_str
elif nonubi_provisionsets:
items_str = nonubi_provisionsets_str
else:
items_str = ''
return '{{{}}}'.format(items_str)
elif nonubi_provisionsets:
return '{{{}}}'.format(' or '.join(str(provisions)
for provisions
in nonubi_provisionsets))
else:
return str(self._items)
def all_contain(self, provisions):
return all(provisions <= self_provisions for self_provisions in self)
def any_contain(self, provisions):
return any(provisions <= self_provisions for self_provisions in self)
def _condensed_repr_provisionsets(self):
if not self._items:
return set(), set()
ubi_provisionsets = set()
ubi_mask = 0
items = [ProvisionSet(item) for item in self._items]
for provisions in reversed(sorted(ProvisionSet.valid_flags(),
key=int)):
if all(provisions <= item for item in items):
ubi_provisionsets.add(provisions)
ubi_mask |= int(provisions)
for item in items:
item &= ~int(provisions)
nonubi_provisionsets = set()
for item in set(provisionsets_combinations
(ProvisionSet.valid_flags())):
item_ = FrozenProvisionSet(item & ~ubi_mask)
if item_ in items:
nonubi_provisionsets.add(item_)
if len(nonubi_provisionsets) == 1 \
and not iter(nonubi_provisionsets).next():
nonubi_provisionsets = ()
elif nonubi_provisionsets \
== set(set(provisionsets_combinations
(ProvisionSet.valid_flags()))
- ubi_provisionsets):
nonubi_provisionsets = '*'
return ubi_provisionsets, nonubi_provisionsets
@classmethod
@_abc.abstractmethod
def _item_class(cls):
pass
@classmethod
def _universe(cls):
if cls._universe_ is None:
cls._universe_ = \
cls(provisionsets_combinations
(ProvisionSet.valid_flags()))\
._items
return cls._universe_
_universe_ = None
class ProvisionSetSet(ProvisionSetSetABC, _usetset):
"""A set of provision sets.
.. seealso::
:class:`spruce.collections.usetset \
<spruce.collections._sets._universalizable.usetset>`
"""
@classmethod
def _item_class(cls):
return FrozenProvisionSet
class FrozenProvisionSetSet(ProvisionSetSetABC, _frozenusetset):
"""An immutable set of provision sets.
.. seealso::
:class:`spruce.collections.frozenusetset \
<spruce.collections._sets._universalizable.frozenusetset>`
"""
@classmethod
def _item_class(cls):
return FrozenProvisionSet
SECPROV_CLIENT_AUTH = ProvisionSet.register_flag('SECPROV_CLIENT_AUTH',
'client authentication')
"""Authentication verifies the identity of the client."""
SECPROV_SERVER_AUTH = ProvisionSet.register_flag('SECPROV_SERVER_AUTH',
'server authentication')
"""Authentication verifies the identity of the server."""
SECPROV_CLIENT_ENCRYPTED_SECRET = \
ProvisionSet.register_flag('SECPROV_CLIENT_ENCRYPTED_SECRET',
'encrypted client secret')
"""Client authentication uses an encrypted secret.
The client never sends its authentication secret in cleartext or in an
insecure coding. It either sends the secret in an encrypted form or it
sends some derived certificate of its identity.
"""
SECPROV_CLIENT_NEVER_SENDS_SECRET = \
ProvisionSet.register_flag('SECPROV_CLIENT_NEVER_SENDS_SECRET',
'client never sends secret',
implied=SECPROV_CLIENT_ENCRYPTED_SECRET)
"""The client never sends its secret in any form.
The client sends a verifiable claim of its authentication secret rather
than sending the secret itself in any form.
.. note::
This implies :const:`SECPROV_CLIENT_ENCRYPTED_SECRET`.
"""
SECPROV_SERVER_NONCE = ProvisionSet.register_flag('SECPROV_SERVER_NONCE',
'server-side nonce')
"""Authentication uses a server-side nonce.
The server challenges the client with a nonce value and requires it in
the client's response.
When serving a response to a request with a particular server-side
nonce, the server verifies that it is valid for the request. This flag
does not identify how such validity is determined.
"""
SECPROV_SERVER_NONCE_PER_RESOURCE = \
ProvisionSet.register_flag('SECPROV_SERVER_NONCE_PER_RESOURCE',
'server-side nonce per resource',
implied=SECPROV_SERVER_NONCE)
"""Authentication uses a unique server-side nonce for each resource.
.. note::
This implies :const:`SECPROV_SERVER_NONCE`.
"""
SECPROV_SERVER_NONCE_PER_REQUEST = \
ProvisionSet\
.register_flag('SECPROV_SERVER_NONCE_PER_REQUEST',
'server-side nonce per request',
implied=(SECPROV_SERVER_NONCE
| SECPROV_SERVER_NONCE_PER_RESOURCE))
"""Authentication uses a unique server-side nonce for each request.
The server does not allow a previously used server-side nonce to be
reused, even if the client's authentication token is otherwise valid.
In other words, each server-side nonce expires after its first use.
As a consequence, every request involves a challenge-response handshake.
.. note::
This implies :const:`SECPROV_SERVER_NONCE` and
:const:`SECPROV_SERVER_NONCE_PER_RESOURCE`.
"""
SECPROV_SERVER_NONCE_USE_COUNT = \
ProvisionSet.register_flag('SECPROV_SERVER_NONCE_USE_COUNT',
'server-side nonce use count',
implied=SECPROV_SERVER_NONCE)
"""Authentication verifies the server-side nonce use count.
The server maintains a record of how many times each nonce has been
used and requires each subsequent use to identify itself with an
incremented count.
When serving a response to a request with a particular nonce use count,
the server includes the same count. The client should verify that the
counts match.
.. note::
This implies :const:`SECPROV_SERVER_NONCE`.
"""
SECPROV_CLIENT_NONCE = ProvisionSet.register_flag('SECPROV_CLIENT_NONCE',
'client-side nonce')
"""Authentication uses a client-side nonce.
The client challenges the server with a nonce value and requires it in
the server's response.
When serving a response to a request with a particular client-side
nonce, the server includes the same nonce. The client should verify
that the nonce is valid.
"""
SECPROV_REQUEST_ENTITY_INTEGRITY = \
ProvisionSet.register_flag('SECPROV_REQUEST_ENTITY_INTEGRITY',
'request entity integrity')
"""Authentication verifies the integrity of each request entity.
The client provides an authentication token that includes information
that the server uses to verify the integrity of any entity that is
included in the request.
"""
| lgpl-3.0 | 6,038,507,939,569,562,000 | 34.505319 | 77 | 0.585094 | false |
jfelectron/python-driver | cassandra/cqlengine/models.py | 1 | 33196 | # Copyright 2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import six
import warnings
from cassandra.cqlengine import CQLEngineException, ValidationError
from cassandra.cqlengine import columns
from cassandra.cqlengine import connection
from cassandra.cqlengine import query
from cassandra.cqlengine.query import DoesNotExist as _DoesNotExist
from cassandra.cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned
from cassandra.metadata import protect_name
from cassandra.util import OrderedDict
log = logging.getLogger(__name__)
class ModelException(CQLEngineException):
pass
class ModelDefinitionException(ModelException):
pass
class PolymorphicModelException(ModelException):
pass
class UndefinedKeyspaceWarning(Warning):
pass
DEFAULT_KEYSPACE = None
class hybrid_classmethod(object):
"""
Allows a method to behave as both a class method and
normal instance method depending on how it's called
"""
def __init__(self, clsmethod, instmethod):
self.clsmethod = clsmethod
self.instmethod = instmethod
def __get__(self, instance, owner):
if instance is None:
return self.clsmethod.__get__(owner, owner)
else:
return self.instmethod.__get__(instance, owner)
def __call__(self, *args, **kwargs):
"""
Just a hint to IDEs that it's ok to call this
"""
raise NotImplementedError
class QuerySetDescriptor(object):
"""
returns a fresh queryset for the given model
it's declared on everytime it's accessed
"""
def __get__(self, obj, model):
""" :rtype: ModelQuerySet """
if model.__abstract__:
raise CQLEngineException('cannot execute queries against abstract models')
queryset = model.__queryset__(model)
# if this is a concrete polymorphic model, and the discriminator
# key is an indexed column, add a filter clause to only return
# logical rows of the proper type
if model._is_polymorphic and not model._is_polymorphic_base:
name, column = model._discriminator_column_name, model._discriminator_column
if column.partition_key or column.index:
# look for existing poly types
return queryset.filter(**{name: model.__discriminator_value__})
return queryset
def __call__(self, *args, **kwargs):
"""
Just a hint to IDEs that it's ok to call this
:rtype: ModelQuerySet
"""
raise NotImplementedError
class TransactionDescriptor(object):
"""
returns a query set descriptor
"""
def __get__(self, instance, model):
if instance:
def transaction_setter(*prepared_transaction, **unprepared_transactions):
if len(prepared_transaction) > 0:
transactions = prepared_transaction[0]
else:
transactions = instance.objects.iff(**unprepared_transactions)._transaction
instance._transaction = transactions
return instance
return transaction_setter
qs = model.__queryset__(model)
def transaction_setter(**unprepared_transactions):
transactions = model.objects.iff(**unprepared_transactions)._transaction
qs._transaction = transactions
return qs
return transaction_setter
def __call__(self, *args, **kwargs):
raise NotImplementedError
class TTLDescriptor(object):
"""
returns a query set descriptor
"""
def __get__(self, instance, model):
if instance:
# instance = copy.deepcopy(instance)
# instance method
def ttl_setter(ts):
instance._ttl = ts
return instance
return ttl_setter
qs = model.__queryset__(model)
def ttl_setter(ts):
qs._ttl = ts
return qs
return ttl_setter
def __call__(self, *args, **kwargs):
raise NotImplementedError
class TimestampDescriptor(object):
"""
returns a query set descriptor with a timestamp specified
"""
def __get__(self, instance, model):
if instance:
# instance method
def timestamp_setter(ts):
instance._timestamp = ts
return instance
return timestamp_setter
return model.objects.timestamp
def __call__(self, *args, **kwargs):
raise NotImplementedError
class IfNotExistsDescriptor(object):
"""
return a query set descriptor with a if_not_exists flag specified
"""
def __get__(self, instance, model):
if instance:
# instance method
def ifnotexists_setter(ife):
instance._if_not_exists = ife
return instance
return ifnotexists_setter
return model.objects.if_not_exists
def __call__(self, *args, **kwargs):
raise NotImplementedError
class ConsistencyDescriptor(object):
"""
returns a query set descriptor if called on Class, instance if it was an instance call
"""
def __get__(self, instance, model):
if instance:
# instance = copy.deepcopy(instance)
def consistency_setter(consistency):
instance.__consistency__ = consistency
return instance
return consistency_setter
qs = model.__queryset__(model)
def consistency_setter(consistency):
qs._consistency = consistency
return qs
return consistency_setter
def __call__(self, *args, **kwargs):
raise NotImplementedError
class ColumnQueryEvaluator(query.AbstractQueryableColumn):
"""
Wraps a column and allows it to be used in comparator
expressions, returning query operators
ie:
Model.column == 5
"""
def __init__(self, column):
self.column = column
def __unicode__(self):
return self.column.db_field_name
def _get_column(self):
""" :rtype: ColumnQueryEvaluator """
return self.column
class ColumnDescriptor(object):
"""
Handles the reading and writing of column values to and from
a model instance's value manager, as well as creating
comparator queries
"""
def __init__(self, column):
"""
:param column:
:type column: columns.Column
:return:
"""
self.column = column
self.query_evaluator = ColumnQueryEvaluator(self.column)
def __get__(self, instance, owner):
"""
Returns either the value or column, depending
on if an instance is provided or not
:param instance: the model instance
:type instance: Model
"""
try:
return instance._values[self.column.column_name].getval()
except AttributeError:
return self.query_evaluator
def __set__(self, instance, value):
"""
Sets the value on an instance, raises an exception with classes
TODO: use None instance to create update statements
"""
if instance:
return instance._values[self.column.column_name].setval(value)
else:
raise AttributeError('cannot reassign column values')
def __delete__(self, instance):
"""
Sets the column value to None, if possible
"""
if instance:
if self.column.can_delete:
instance._values[self.column.column_name].delval()
else:
raise AttributeError('cannot delete {0} columns'.format(self.column.column_name))
class BaseModel(object):
"""
The base model class, don't inherit from this, inherit from Model, defined below
"""
class DoesNotExist(_DoesNotExist):
pass
class MultipleObjectsReturned(_MultipleObjectsReturned):
pass
objects = QuerySetDescriptor()
ttl = TTLDescriptor()
consistency = ConsistencyDescriptor()
iff = TransactionDescriptor()
# custom timestamps, see USING TIMESTAMP X
timestamp = TimestampDescriptor()
if_not_exists = IfNotExistsDescriptor()
# _len is lazily created by __len__
__table_name__ = None
__keyspace__ = None
__discriminator_value__ = None
__options__ = None
# the queryset class used for this class
__queryset__ = query.ModelQuerySet
__dmlquery__ = query.DMLQuery
__consistency__ = None # can be set per query
_timestamp = None # optional timestamp to include with the operation (USING TIMESTAMP)
_if_not_exists = False # optional if_not_exists flag to check existence before insertion
_table_name = None # used internally to cache a derived table name
def __init__(self, **values):
self._values = {}
self._ttl = self.__default_ttl__
self._timestamp = None
self._transaction = None
for name, column in self._columns.items():
value = values.get(name, None)
if value is not None or isinstance(column, columns.BaseContainerColumn):
value = column.to_python(value)
value_mngr = column.value_manager(self, column, value)
if name in values:
value_mngr.explicit = True
self._values[name] = value_mngr
# a flag set by the deserializer to indicate
# that update should be used when persisting changes
self._is_persisted = False
self._batch = None
self._timeout = connection.NOT_SET
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__,
', '.join('{0}={1!r}'.format(k, getattr(self, k))
for k in self._defined_columns.keys()
if k != self._discriminator_column_name))
def __str__(self):
"""
Pretty printing of models by their primary key
"""
return '{0} <{1}>'.format(self.__class__.__name__,
', '.join('{0}={1}'.format(k, getattr(self, k)) for k in self._primary_keys.keys()))
@classmethod
def _discover_polymorphic_submodels(cls):
if not cls._is_polymorphic_base:
raise ModelException('_discover_polymorphic_submodels can only be called on polymorphic base classes')
def _discover(klass):
if not klass._is_polymorphic_base and klass.__discriminator_value__ is not None:
cls._discriminator_map[klass.__discriminator_value__] = klass
for subklass in klass.__subclasses__():
_discover(subklass)
_discover(cls)
@classmethod
def _get_model_by_discriminator_value(cls, key):
if not cls._is_polymorphic_base:
raise ModelException('_get_model_by_discriminator_value can only be called on polymorphic base classes')
return cls._discriminator_map.get(key)
@classmethod
def _construct_instance(cls, values):
"""
method used to construct instances from query results
this is where polymorphic deserialization occurs
"""
# we're going to take the values, which is from the DB as a dict
# and translate that into our local fields
# the db_map is a db_field -> model field map
items = values.items()
field_dict = dict([(cls._db_map.get(k, k), v) for k, v in items])
if cls._is_polymorphic:
disc_key = field_dict.get(cls._discriminator_column_name)
if disc_key is None:
raise PolymorphicModelException('discriminator value was not found in values')
poly_base = cls if cls._is_polymorphic_base else cls._polymorphic_base
klass = poly_base._get_model_by_discriminator_value(disc_key)
if klass is None:
poly_base._discover_polymorphic_submodels()
klass = poly_base._get_model_by_discriminator_value(disc_key)
if klass is None:
raise PolymorphicModelException(
'unrecognized discriminator column {0} for class {1}'.format(disc_key, poly_base.__name__)
)
if not issubclass(klass, cls):
raise PolymorphicModelException(
'{0} is not a subclass of {1}'.format(klass.__name__, cls.__name__)
)
field_dict = dict((k, v) for k, v in field_dict.items() if k in klass._columns.keys())
else:
klass = cls
instance = klass(**field_dict)
instance._is_persisted = True
return instance
def _can_update(self):
"""
Called by the save function to check if this should be
persisted with update or insert
:return:
"""
if not self._is_persisted:
return False
return all([not self._values[k].changed for k in self._primary_keys])
@classmethod
def _get_keyspace(cls):
"""
Returns the manual keyspace, if set, otherwise the default keyspace
"""
return cls.__keyspace__ or DEFAULT_KEYSPACE
@classmethod
def _get_column(cls, name):
"""
Returns the column matching the given name, raising a key error if
it doesn't exist
:param name: the name of the column to return
:rtype: Column
"""
return cls._columns[name]
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
# check attribute keys
keys = set(self._columns.keys())
other_keys = set(other._columns.keys())
if keys != other_keys:
return False
# check that all of the attributes match
for key in other_keys:
if getattr(self, key, None) != getattr(other, key, None):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def column_family_name(cls, include_keyspace=True):
"""
Returns the column family name if it's been defined
otherwise, it creates it from the module and class name
"""
cf_name = protect_name(cls._raw_column_family_name())
if include_keyspace:
return '{0}.{1}'.format(protect_name(cls._get_keyspace()), cf_name)
return cf_name
@classmethod
def _raw_column_family_name(cls):
if not cls._table_name:
if cls.__table_name__:
cls._table_name = cls.__table_name__.lower()
else:
if cls._is_polymorphic and not cls._is_polymorphic_base:
cls._table_name = cls._polymorphic_base._raw_column_family_name()
else:
camelcase = re.compile(r'([a-z])([A-Z])')
ccase = lambda s: camelcase.sub(lambda v: '{0}_{1}'.format(v.group(1), v.group(2).lower()), s)
cf_name = ccase(cls.__name__)
# trim to less than 48 characters or cassandra will complain
cf_name = cf_name[-48:]
cf_name = cf_name.lower()
cf_name = re.sub(r'^_+', '', cf_name)
cls._table_name = cf_name
return cls._table_name
def validate(self):
"""
Cleans and validates the field values
"""
for name, col in self._columns.items():
v = getattr(self, name)
if v is None and not self._values[name].explicit and col.has_default:
v = col.get_default()
val = col.validate(v)
setattr(self, name, val)
# Let an instance be used like a dict of its columns keys/values
def __iter__(self):
""" Iterate over column ids. """
for column_id in self._columns.keys():
yield column_id
def __getitem__(self, key):
""" Returns column's value. """
if not isinstance(key, six.string_types):
raise TypeError
if key not in self._columns.keys():
raise KeyError
return getattr(self, key)
def __setitem__(self, key, val):
""" Sets a column's value. """
if not isinstance(key, six.string_types):
raise TypeError
if key not in self._columns.keys():
raise KeyError
return setattr(self, key, val)
def __len__(self):
"""
Returns the number of columns defined on that model.
"""
try:
return self._len
except:
self._len = len(self._columns.keys())
return self._len
def keys(self):
""" Returns a list of column IDs. """
return [k for k in self]
def values(self):
""" Returns list of column values. """
return [self[k] for k in self]
def items(self):
""" Returns a list of column ID/value tuples. """
return [(k, self[k]) for k in self]
def _as_dict(self):
""" Returns a map of column names to cleaned values """
values = self._dynamic_columns or {}
for name, col in self._columns.items():
values[name] = col.to_database(getattr(self, name, None))
return values
@classmethod
def create(cls, **kwargs):
"""
Create an instance of this model in the database.
Takes the model column values as keyword arguments.
Returns the instance.
"""
extra_columns = set(kwargs.keys()) - set(cls._columns.keys())
if extra_columns:
raise ValidationError("Incorrect columns passed: {0}".format(extra_columns))
return cls.objects.create(**kwargs)
@classmethod
def all(cls):
"""
Returns a queryset representing all stored objects
This is a pass-through to the model objects().all()
"""
return cls.objects.all()
@classmethod
def filter(cls, *args, **kwargs):
"""
Returns a queryset based on filter parameters.
This is a pass-through to the model objects().:method:`~cqlengine.queries.filter`.
"""
return cls.objects.filter(*args, **kwargs)
@classmethod
def get(cls, *args, **kwargs):
"""
Returns a single object based on the passed filter constraints.
This is a pass-through to the model objects().:method:`~cqlengine.queries.get`.
"""
return cls.objects.get(*args, **kwargs)
def timeout(self, timeout):
"""
Sets a timeout for use in :meth:`~.save`, :meth:`~.update`, and :meth:`~.delete`
operations
"""
assert self._batch is None, 'Setting both timeout and batch is not supported'
self._timeout = timeout
return self
def save(self):
"""
Saves an object to the database.
.. code-block:: python
#create a person instance
person = Person(first_name='Kimberly', last_name='Eggleston')
#saves it to Cassandra
person.save()
"""
# handle polymorphic models
if self._is_polymorphic:
if self._is_polymorphic_base:
raise PolymorphicModelException('cannot save polymorphic base model')
else:
setattr(self, self._discriminator_column_name, self.__discriminator_value__)
self.validate()
self.__dmlquery__(self.__class__, self,
batch=self._batch,
ttl=self._ttl,
timestamp=self._timestamp,
consistency=self.__consistency__,
if_not_exists=self._if_not_exists,
transaction=self._transaction,
timeout=self._timeout).save()
# reset the value managers
for v in self._values.values():
v.reset_previous_value()
self._is_persisted = True
self._ttl = self.__default_ttl__
self._timestamp = None
return self
def update(self, **values):
"""
Performs an update on the model instance. You can pass in values to set on the model
for updating, or you can call without values to execute an update against any modified
fields. If no fields on the model have been modified since loading, no query will be
performed. Model validation is performed normally.
It is possible to do a blind update, that is, to update a field without having first selected the object out of the database.
See :ref:`Blind Updates <blind_updates>`
"""
for k, v in values.items():
col = self._columns.get(k)
# check for nonexistant columns
if col is None:
raise ValidationError("{0}.{1} has no column named: {2}".format(self.__module__, self.__class__.__name__, k))
# check for primary key update attempts
if col.is_primary_key:
raise ValidationError("Cannot apply update to primary key '{0}' for {1}.{2}".format(k, self.__module__, self.__class__.__name__))
setattr(self, k, v)
# handle polymorphic models
if self._is_polymorphic:
if self._is_polymorphic_base:
raise PolymorphicModelException('cannot update polymorphic base model')
else:
setattr(self, self._discriminator_column_name, self.__discriminator_value__)
self.validate()
self.__dmlquery__(self.__class__, self,
batch=self._batch,
ttl=self._ttl,
timestamp=self._timestamp,
consistency=self.__consistency__,
transaction=self._transaction,
timeout=self._timeout).update()
# reset the value managers
for v in self._values.values():
v.reset_previous_value()
self._is_persisted = True
self._ttl = self.__default_ttl__
self._timestamp = None
return self
def delete(self):
"""
Deletes the object from the database
"""
self.__dmlquery__(self.__class__, self,
batch=self._batch,
timestamp=self._timestamp,
consistency=self.__consistency__,
timeout=self._timeout).delete()
def get_changed_columns(self):
"""
Returns a list of the columns that have been updated since instantiation or save
"""
return [k for k, v in self._values.items() if v.changed]
@classmethod
def _class_batch(cls, batch):
return cls.objects.batch(batch)
def _inst_batch(self, batch):
assert self._timeout is connection.NOT_SET, 'Setting both timeout and batch is not supported'
self._batch = batch
return self
batch = hybrid_classmethod(_class_batch, _inst_batch)
class ModelMetaClass(type):
def __new__(cls, name, bases, attrs):
# move column definitions into columns dict
# and set default column names
column_dict = OrderedDict()
primary_keys = OrderedDict()
pk_name = None
# get inherited properties
inherited_columns = OrderedDict()
for base in bases:
for k, v in getattr(base, '_defined_columns', {}).items():
inherited_columns.setdefault(k, v)
# short circuit __abstract__ inheritance
is_abstract = attrs['__abstract__'] = attrs.get('__abstract__', False)
# short circuit __discriminator_value__ inheritance
attrs['__discriminator_value__'] = attrs.get('__discriminator_value__')
options = attrs.get('__options__') or {}
attrs['__default_ttl__'] = options.get('default_time_to_live')
def _transform_column(col_name, col_obj):
column_dict[col_name] = col_obj
if col_obj.primary_key:
primary_keys[col_name] = col_obj
col_obj.set_column_name(col_name)
# set properties
attrs[col_name] = ColumnDescriptor(col_obj)
column_definitions = [(k, v) for k, v in attrs.items() if isinstance(v, columns.Column)]
column_definitions = sorted(column_definitions, key=lambda x: x[1].position)
is_polymorphic_base = any([c[1].discriminator_column for c in column_definitions])
column_definitions = [x for x in inherited_columns.items()] + column_definitions
discriminator_columns = [c for c in column_definitions if c[1].discriminator_column]
is_polymorphic = len(discriminator_columns) > 0
if len(discriminator_columns) > 1:
raise ModelDefinitionException('only one discriminator_column can be defined in a model, {0} found'.format(len(discriminator_columns)))
if attrs['__discriminator_value__'] and not is_polymorphic:
raise ModelDefinitionException('__discriminator_value__ specified, but no base columns defined with discriminator_column=True')
discriminator_column_name, discriminator_column = discriminator_columns[0] if discriminator_columns else (None, None)
if isinstance(discriminator_column, (columns.BaseContainerColumn, columns.Counter)):
raise ModelDefinitionException('counter and container columns cannot be used as discriminator columns')
# find polymorphic base class
polymorphic_base = None
if is_polymorphic and not is_polymorphic_base:
def _get_polymorphic_base(bases):
for base in bases:
if getattr(base, '_is_polymorphic_base', False):
return base
klass = _get_polymorphic_base(base.__bases__)
if klass:
return klass
polymorphic_base = _get_polymorphic_base(bases)
defined_columns = OrderedDict(column_definitions)
# check for primary key
if not is_abstract and not any([v.primary_key for k, v in column_definitions]):
raise ModelDefinitionException("At least 1 primary key is required.")
counter_columns = [c for c in defined_columns.values() if isinstance(c, columns.Counter)]
data_columns = [c for c in defined_columns.values() if not c.primary_key and not isinstance(c, columns.Counter)]
if counter_columns and data_columns:
raise ModelDefinitionException('counter models may not have data columns')
has_partition_keys = any(v.partition_key for (k, v) in column_definitions)
# transform column definitions
for k, v in column_definitions:
# don't allow a column with the same name as a built-in attribute or method
if k in BaseModel.__dict__:
raise ModelDefinitionException("column '{0}' conflicts with built-in attribute/method".format(k))
# counter column primary keys are not allowed
if (v.primary_key or v.partition_key) and isinstance(v, (columns.Counter, columns.BaseContainerColumn)):
raise ModelDefinitionException('counter columns and container columns cannot be used as primary keys')
# this will mark the first primary key column as a partition
# key, if one hasn't been set already
if not has_partition_keys and v.primary_key:
v.partition_key = True
has_partition_keys = True
_transform_column(k, v)
partition_keys = OrderedDict(k for k in primary_keys.items() if k[1].partition_key)
clustering_keys = OrderedDict(k for k in primary_keys.items() if not k[1].partition_key)
# setup partition key shortcut
if len(partition_keys) == 0:
if not is_abstract:
raise ModelException("at least one partition key must be defined")
if len(partition_keys) == 1:
pk_name = [x for x in partition_keys.keys()][0]
attrs['pk'] = attrs[pk_name]
else:
# composite partition key case, get/set a tuple of values
_get = lambda self: tuple(self._values[c].getval() for c in partition_keys.keys())
_set = lambda self, val: tuple(self._values[c].setval(v) for (c, v) in zip(partition_keys.keys(), val))
attrs['pk'] = property(_get, _set)
# some validation
col_names = set()
for v in column_dict.values():
# check for duplicate column names
if v.db_field_name in col_names:
raise ModelException("{0} defines the column {1} more than once".format(name, v.db_field_name))
if v.clustering_order and not (v.primary_key and not v.partition_key):
raise ModelException("clustering_order may be specified only for clustering primary keys")
if v.clustering_order and v.clustering_order.lower() not in ('asc', 'desc'):
raise ModelException("invalid clustering order {0} for column {1}".format(repr(v.clustering_order), v.db_field_name))
col_names.add(v.db_field_name)
# create db_name -> model name map for loading
db_map = {}
for field_name, col in column_dict.items():
db_map[col.db_field_name] = field_name
# add management members to the class
attrs['_columns'] = column_dict
attrs['_primary_keys'] = primary_keys
attrs['_defined_columns'] = defined_columns
# maps the database field to the models key
attrs['_db_map'] = db_map
attrs['_pk_name'] = pk_name
attrs['_dynamic_columns'] = {}
attrs['_partition_keys'] = partition_keys
attrs['_clustering_keys'] = clustering_keys
attrs['_has_counter'] = len(counter_columns) > 0
# add polymorphic management attributes
attrs['_is_polymorphic_base'] = is_polymorphic_base
attrs['_is_polymorphic'] = is_polymorphic
attrs['_polymorphic_base'] = polymorphic_base
attrs['_discriminator_column'] = discriminator_column
attrs['_discriminator_column_name'] = discriminator_column_name
attrs['_discriminator_map'] = {} if is_polymorphic_base else None
# setup class exceptions
DoesNotExistBase = None
for base in bases:
DoesNotExistBase = getattr(base, 'DoesNotExist', None)
if DoesNotExistBase is not None:
break
DoesNotExistBase = DoesNotExistBase or attrs.pop('DoesNotExist', BaseModel.DoesNotExist)
attrs['DoesNotExist'] = type('DoesNotExist', (DoesNotExistBase,), {})
MultipleObjectsReturnedBase = None
for base in bases:
MultipleObjectsReturnedBase = getattr(base, 'MultipleObjectsReturned', None)
if MultipleObjectsReturnedBase is not None:
break
MultipleObjectsReturnedBase = DoesNotExistBase or attrs.pop('MultipleObjectsReturned', BaseModel.MultipleObjectsReturned)
attrs['MultipleObjectsReturned'] = type('MultipleObjectsReturned', (MultipleObjectsReturnedBase,), {})
# create the class and add a QuerySet to it
klass = super(ModelMetaClass, cls).__new__(cls, name, bases, attrs)
udts = []
for col in column_dict.values():
columns.resolve_udts(col, udts)
for user_type in set(udts):
user_type.register_for_keyspace(klass._get_keyspace())
return klass
@six.add_metaclass(ModelMetaClass)
class Model(BaseModel):
__abstract__ = True
"""
*Optional.* Indicates that this model is only intended to be used as a base class for other models.
You can't create tables for abstract models, but checks around schema validity are skipped during class construction.
"""
__table_name__ = None
"""
*Optional.* Sets the name of the CQL table for this model. If left blank, the table name will be the name of the model, with it's module name as it's prefix. Manually defined table names are not inherited.
"""
__keyspace__ = None
"""
Sets the name of the keyspace used by this model.
"""
__options__ = None
"""
*Optional* Table options applied with this model
(e.g. compaction, default ttl, cache settings, tec.)
"""
__discriminator_value__ = None
"""
*Optional* Specifies a value for the discriminator column when using model inheritance.
"""
| apache-2.0 | 7,909,401,571,655,610,000 | 34.165254 | 209 | 0.596066 | false |
felipead/cassandra-logger | integration-tests/conftest.py | 1 | 1525 | # Standard py.test configuration file. Fixtures defined here will be available in all modules.
from cassandra.cluster import Cluster
import pytest
from log_entry import LogEntryStore
# This variable tells py.test which files and folders to ignore
collect_ignore = ["env"]
@pytest.fixture(scope="module")
def fixture_keyspace():
return "logger_test"
@pytest.fixture(scope="module")
def log_keyspace():
return "logger"
@pytest.fixture(scope="module")
def log_table():
return "log"
@pytest.fixture(scope="module")
def log_trigger_name():
return "com.felipead.cassandra.logger.LogTrigger"
# noinspection PyShadowingNames
@pytest.fixture(scope="module")
def log_table_identifier(log_keyspace, log_table):
return "%s.%s" % (log_keyspace, log_table)
@pytest.fixture(scope="module")
def cluster():
return Cluster(["127.0.0.1"])
# noinspection PyShadowingNames
@pytest.fixture(scope="module")
def session(cluster):
return cluster.connect()
# noinspection PyShadowingNames
@pytest.fixture(scope="module")
def log_entry_store(session, log_table_identifier):
return LogEntryStore(session, log_table_identifier)
# noinspection PyShadowingNames
@pytest.fixture(scope="module")
def create_fixture_keyspace(session, fixture_keyspace):
session.execute("DROP KEYSPACE IF EXISTS %s" % fixture_keyspace)
session.execute(
"""
CREATE KEYSPACE %s
WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':1};
"""
% fixture_keyspace
)
| apache-2.0 | 2,637,115,291,656,845,300 | 22.461538 | 94 | 0.719344 | false |
dhinakg/BitSTAR | plugins/cacheutils.py | 1 | 2177 | # Copyright 2017 Starbot Discord Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Get cache usage info'''
import glob
from api import command, message, plugin, bot
def onInit(plugin_in):
'''List commands for plugin.'''
desc_base = 'Count the number of cached items '
desc_cachecount = desc_base + 'for a command'
desc_caches = desc_base + 'per command'
desc_totalcache = desc_base + 'stored'
cachecount_command = command.Command(plugin_in, 'cachecount', shortdesc=desc_cachecount, devcommand=True)
caches_command = command.Command(plugin_in, 'caches', shortdesc=desc_caches, devcommand=True)
totalcache_command = command.Command(plugin_in, 'totalcache', shortdesc=desc_totalcache, devcommand=True)
return plugin.Plugin(plugin_in, 'cacheutils', [cachecount_command, caches_command, totalcache_command])
async def onCommand(message_in):
'''Run plugin commands.'''
if message_in.command == 'cachecount':
if message_in.body == '':
return message.Message(body='No plugin specified')
return message.Message(body='```{}```'.format(len(glob.glob('cache/{}_*'.format(message_in.body.strip())))))
if message_in.command == 'caches':
cache_str = ''
for cmd in bot.Bot.commands:
cmd_cache_size = len(glob.glob('cache/{}_*'.format(cmd.name)))
if cmd_cache_size > 0:
cache_str += '{} - {}\n'.format(cmd.name, cmd_cache_size)
return message.Message(body='```{}```'.format(cache_str))
if message_in.command == 'totalcache':
return message.Message(body='```{}```'.format(len(glob.glob('cache/*'))))
| apache-2.0 | -4,483,572,499,845,762,600 | 45.319149 | 116 | 0.66927 | false |
supermarkion/Life-Backup | Python/4Sum.py | 1 | 1307 | '''
Given an array S of n integers, are there elements a, b, c, and d in S such that a + b + c + d = target? Find all
unique quadruplets in the array which gives the sum of target.
Link: https://leetcode.com/problems/4sum/#/description
Example:
For example, given array S = [1, 0, -1, 0, -2, 2], and target = 0.
A solution set is:
[
[-1, 0, 0, 1],
[-2, -1, 1, 2],
[-2, 0, 0, 2]
]
Solution:
Here I use dic to store the indices for two sum values, then using set to handle the duplicate results and
convert answers into list when finished.
Source: None
'''
import collections
import itertools
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
nums, dic = sorted(nums), collections.defaultdict(list)
for (i, a), (j, b) in itertools.combinations(enumerate(nums), 2):
dic[a + b].append([i, j])
res = set(tuple(sorted(nums[i] for i in (head + tail))) for ab in dic \
if target - ab in dic for head in dic[ab] for tail in dic[target - ab] \
if len(set(head + tail)) == 4)
return list(map(list, res))
| mit | 1,927,763,487,455,768,300 | 28.704545 | 117 | 0.556236 | false |
OSSOS/MOP | src/ossos/core/ossos/vospace.py | 1 | 1041 | """This module abstracts all vospace activities. Including a switch to using username/password pairs."""
from getpass import getpass
from requests.auth import HTTPBasicAuth
from vos.vos import Client, Connection
import sys
import types
import netrc
import logging
logging.getLogger('vos').setLevel(logging.ERROR)
VOSPACE_SERVER = "www.canfar.phys.uvic.ca"
class Wrapper(Client):
def __getattribute__(self, item):
func = object.__getattribute__(self, item)
if isinstance(func, types.MethodType):
def my_func(*args, **kwargs):
sys.stderr.write("{} -> {} {}\n".format(item, args[1:], kwargs))
result = func(*args[1:], **kwargs)
sys.stderr.write("{} <- {}\n".format(item, result))
return result
meth = types.MethodType(my_func, self, Client)
elif isinstance(func, Connection):
sys.stderr.write("*-"*40+"\n")
meth = func
else:
meth = func
return meth
client = Client()
| gpl-3.0 | 5,671,532,576,452,423,000 | 28.742857 | 105 | 0.607109 | false |
malinoff/pytest | _pytest/hookspec.py | 1 | 11977 | """ hook specifications for pytest plugins, invoked from main.py and builtin plugins. """
from _pytest._pluggy import HookspecMarker
hookspec = HookspecMarker("pytest")
# -------------------------------------------------------------------------
# Initialization hooks called for every plugin
# -------------------------------------------------------------------------
@hookspec(historic=True)
def pytest_addhooks(pluginmanager):
"""called at plugin registration time to allow adding new hooks via a call to
pluginmanager.add_hookspecs(module_or_class, prefix)."""
@hookspec(historic=True)
def pytest_namespace():
"""return dict of name->object to be made globally available in
the pytest namespace. This hook is called at plugin registration
time.
"""
@hookspec(historic=True)
def pytest_plugin_registered(plugin, manager):
""" a new pytest plugin got registered. """
@hookspec(historic=True)
def pytest_addoption(parser):
"""register argparse-style options and ini-style config values,
called once at the beginning of a test run.
.. note::
This function should be implemented only in plugins or ``conftest.py``
files situated at the tests root directory due to how pytest
:ref:`discovers plugins during startup <pluginorder>`.
:arg parser: To add command line options, call
:py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`.
To add ini-file values call :py:func:`parser.addini(...)
<_pytest.config.Parser.addini>`.
Options can later be accessed through the
:py:class:`config <_pytest.config.Config>` object, respectively:
- :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to
retrieve the value of a command line option.
- :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve
a value read from an ini-style file.
The config object is passed around on many internal objects via the ``.config``
attribute or can be retrieved as the ``pytestconfig`` fixture or accessed
via (deprecated) ``pytest.config``.
"""
@hookspec(historic=True)
def pytest_configure(config):
""" called after command line options have been parsed
and all plugins and initial conftest files been loaded.
This hook is called for every plugin.
"""
# -------------------------------------------------------------------------
# Bootstrapping hooks called for plugins registered early enough:
# internal and 3rd party plugins as well as directly
# discoverable conftest.py local plugins.
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_cmdline_parse(pluginmanager, args):
"""return initialized config object, parsing the specified args. """
def pytest_cmdline_preparse(config, args):
"""(deprecated) modify command line arguments before option parsing. """
@hookspec(firstresult=True)
def pytest_cmdline_main(config):
""" called for performing the main command line action. The default
implementation will invoke the configure hooks and runtest_mainloop. """
def pytest_load_initial_conftests(early_config, parser, args):
""" implements the loading of initial conftest files ahead
of command line option parsing. """
# -------------------------------------------------------------------------
# collection hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_collection(session):
""" perform the collection protocol for the given session. """
def pytest_collection_modifyitems(session, config, items):
""" called after collection has been performed, may filter or re-order
the items in-place."""
def pytest_collection_finish(session):
""" called after collection has been performed and modified. """
@hookspec(firstresult=True)
def pytest_ignore_collect(path, config):
""" return True to prevent considering this path for collection.
This hook is consulted for all files and directories prior to calling
more specific hooks.
"""
@hookspec(firstresult=True)
def pytest_collect_directory(path, parent):
""" called before traversing a directory for collection files. """
def pytest_collect_file(path, parent):
""" return collection Node or None for the given path. Any new node
needs to have the specified ``parent`` as a parent."""
# logging hooks for collection
def pytest_collectstart(collector):
""" collector starts collecting. """
def pytest_itemcollected(item):
""" we just collected a test item. """
def pytest_collectreport(report):
""" collector finished collecting. """
def pytest_deselected(items):
""" called for test items deselected by keyword. """
@hookspec(firstresult=True)
def pytest_make_collect_report(collector):
""" perform ``collector.collect()`` and return a CollectReport. """
# -------------------------------------------------------------------------
# Python test function related hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_pycollect_makemodule(path, parent):
""" return a Module collector or None for the given path.
This hook will be called for each matching test module path.
The pytest_collect_file hook needs to be used if you want to
create test modules for files that do not match as a test module.
"""
@hookspec(firstresult=True)
def pytest_pycollect_makeitem(collector, name, obj):
""" return custom item/collector for a python object in a module, or None. """
@hookspec(firstresult=True)
def pytest_pyfunc_call(pyfuncitem):
""" call underlying test function. """
def pytest_generate_tests(metafunc):
""" generate (multiple) parametrized calls to a test function."""
@hookspec(firstresult=True)
def pytest_make_parametrize_id(config, val):
"""Return a user-friendly string representation of the given ``val`` that will be used
by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``.
"""
# -------------------------------------------------------------------------
# generic runtest related hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_runtestloop(session):
""" called for performing the main runtest loop
(after collection finished). """
def pytest_itemstart(item, node):
""" (deprecated, use pytest_runtest_logstart). """
@hookspec(firstresult=True)
def pytest_runtest_protocol(item, nextitem):
""" implements the runtest_setup/call/teardown protocol for
the given test item, including capturing exceptions and calling
reporting hooks.
:arg item: test item for which the runtest protocol is performed.
:arg nextitem: the scheduled-to-be-next test item (or None if this
is the end my friend). This argument is passed on to
:py:func:`pytest_runtest_teardown`.
:return boolean: True if no further hook implementations should be invoked.
"""
def pytest_runtest_logstart(nodeid, location):
""" signal the start of running a single test item. """
def pytest_runtest_setup(item):
""" called before ``pytest_runtest_call(item)``. """
def pytest_runtest_call(item):
""" called to execute the test ``item``. """
def pytest_runtest_teardown(item, nextitem):
""" called after ``pytest_runtest_call``.
:arg nextitem: the scheduled-to-be-next test item (None if no further
test item is scheduled). This argument can be used to
perform exact teardowns, i.e. calling just enough finalizers
so that nextitem only needs to call setup-functions.
"""
@hookspec(firstresult=True)
def pytest_runtest_makereport(item, call):
""" return a :py:class:`_pytest.runner.TestReport` object
for the given :py:class:`pytest.Item` and
:py:class:`_pytest.runner.CallInfo`.
"""
def pytest_runtest_logreport(report):
""" process a test setup/call/teardown report relating to
the respective phase of executing a test. """
# -------------------------------------------------------------------------
# Fixture related hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_fixture_setup(fixturedef, request):
""" performs fixture setup execution. """
@hookspec(firstresult=True)
def pytest_fixture_finalize(fixturedef, finalizer):
""" performs fixture finalization. """
def pytest_fixture_post_finalizer(fixturedef):
""" called after fixture teardown, but before the cache is cleared so
the fixture result cache ``fixturedef.cached_result`` can
still be accessed."""
# -------------------------------------------------------------------------
# test session related hooks
# -------------------------------------------------------------------------
def pytest_sessionstart(session):
""" before session.main() is called. """
def pytest_sessionfinish(session, exitstatus):
""" whole test run finishes. """
def pytest_unconfigure(config):
""" called before test process is exited. """
# -------------------------------------------------------------------------
# hooks for customising the assert methods
# -------------------------------------------------------------------------
def pytest_assertrepr_compare(config, op, left, right):
"""return explanation for comparisons in failing assert expressions.
Return None for no custom explanation, otherwise return a list
of strings. The strings will be joined by newlines but any newlines
*in* a string will be escaped. Note that all but the first line will
be indented sligthly, the intention is for the first line to be a summary.
"""
# -------------------------------------------------------------------------
# hooks for influencing reporting (invoked from _pytest_terminal)
# -------------------------------------------------------------------------
def pytest_report_header(config, startdir):
""" return a string to be displayed as header info for terminal reporting."""
@hookspec(firstresult=True)
def pytest_report_teststatus(report):
""" return result-category, shortletter and verbose word for reporting."""
def pytest_terminal_summary(terminalreporter, exitstatus):
""" add additional section in terminal summary reporting. """
@hookspec(historic=True)
def pytest_logwarning(message, code, nodeid, fslocation):
""" process a warning specified by a message, a code string,
a nodeid and fslocation (both of which may be None
if the warning is not tied to a partilar node/location)."""
# -------------------------------------------------------------------------
# doctest hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_doctest_prepare_content(content):
""" return processed content for a given doctest"""
# -------------------------------------------------------------------------
# error handling and internal debugging hooks
# -------------------------------------------------------------------------
def pytest_internalerror(excrepr, excinfo):
""" called for internal errors. """
def pytest_keyboard_interrupt(excinfo):
""" called for keyboard interrupt. """
def pytest_exception_interact(node, call, report):
"""called when an exception was raised which can potentially be
interactively handled.
This hook is only called if an exception was raised
that is not an internal exception like ``skip.Exception``.
"""
def pytest_enter_pdb(config):
""" called upon pdb.set_trace(), can be used by plugins to take special
action just before the python debugger enters in interactive mode.
:arg config: pytest config object
:type config: _pytest.config.Config
"""
| mit | -2,296,602,434,055,089,700 | 36.663522 | 90 | 0.617266 | false |
skinny121/MCEdit-TallWorlds | pymclevel/leveldbpocket.py | 1 | 40558 | import itertools
import time
from math import floor
from level import FakeChunk, MCLevel
import logging
from materials import pocketMaterials
import os
from mclevelbase import ChunkNotPresent, ChunkMalformed
import nbt
import numpy
import struct
from infiniteworld import ChunkedLevelMixin, SessionLockLost, AnvilChunkData
from level import LightedChunk
from contextlib import contextmanager
from pymclevel import entity, BoundingBox, Entity, TileEntity
logger = logging.getLogger(__name__)
leveldb_available = True
leveldb_mcpe = None
try:
import leveldb_mcpe
leveldb_mcpe.Options()
except Exception as e:
leveldb_available = False
logger.info("Error while trying to import leveldb_mcpe, starting without PE support ({0})".format(e))
leveldb_mcpe = None
def loadNBTCompoundList(data, littleEndian=True):
"""
Loads a list of NBT Compound tags from a bunch of data.
Uses sep to determine where the next Compound tag starts.
:param data: str, the NBT to load from
:param littleEndian: bool. Determines endianness
:return: list of TAG_Compounds
"""
if type(data) is unicode:
data = str(data)
def load(_data):
sep = "\x00\x00\x00\x00\n"
sep_data = _data.split(sep)
compounds = []
for d in sep_data:
if len(d) != 0:
if not d.startswith("\n"):
d = "\n" + d
tag = (nbt.load(buf=(d + '\x00\x00\x00\x00')))
compounds.append(tag)
return compounds
if littleEndian:
with nbt.littleEndianNBT():
return load(data)
else:
return load(data)
def TagProperty(tagName, tagType, default_or_func=None):
"""
Copied from infiniteworld.py. Custom property object to handle NBT-tag properties.
:param tagName: str, Name of the NBT-tag
:param tagType: int, (nbt.TAG_TYPE) Type of the NBT-tag
:param default_or_func: function or default value. If function, function should return the default.
:return: property
"""
def getter(self):
if tagName not in self.root_tag:
if hasattr(default_or_func, "__call__"):
default = default_or_func(self)
else:
default = default_or_func
self.root_tag[tagName] = tagType(default)
return self.root_tag[tagName].value
def setter(self, val):
self.root_tag[tagName] = tagType(value=val)
return property(getter, setter)
class PocketLeveldbDatabase(object):
"""
Not to be confused with leveldb_mcpe.DB
A PocketLeveldbDatabase is an interface around leveldb_mcpe.DB, providing various functions
to load/write chunk data, and access the level.dat file.
The leveldb_mcpe.DB object handles the actual leveldb database.
To access the actual database, world_db() should be called.
"""
holdDatabaseOpen = True
_world_db = None
@contextmanager
def world_db(self):
"""
Opens a leveldb and keeps it open until editing finished.
:yield: DB
"""
if PocketLeveldbDatabase.holdDatabaseOpen:
if self._world_db is None:
self._world_db = leveldb_mcpe.DB(self.options, os.path.join(str(self.path), 'db'))
yield self._world_db
pass
else:
db = leveldb_mcpe.DB(self.options, os.path.join(str(self.path), 'db'))
yield db
del db
def __init__(self, path, create=False):
"""
:param path: string, path to file
:return: None
"""
self.path = path
if not os.path.exists(path):
file(path, 'w').close()
self.options = leveldb_mcpe.Options()
self.writeOptions = leveldb_mcpe.WriteOptions()
self.readOptions = leveldb_mcpe.ReadOptions()
if create:
self.options.create_if_missing = True # The database will be created once needed first.
return
needsRepair = False
try:
with self.world_db() as db:
it = db.NewIterator(self.readOptions)
it.SeekToFirst()
if not db.Get(self.readOptions, it.key()) == it.value():
needsRepair = True
it.status()
del it
except RuntimeError as err:
logger.error("Error while opening world database from %s (%s)"%(path, err))
needsRepair = True
if needsRepair:
logger.info("Trying to repair world %s"%path)
try:
leveldb_mcpe.RepairWrapper(os.path.join(path, 'db'))
except RuntimeError as err:
logger.error("Error while repairing world %s %s"%(path, err))
def close(self):
"""
Should be called before deleting this instance of the level.
Not calling this method may result in corrupted worlds
:return: None
"""
if PocketLeveldbDatabase.holdDatabaseOpen:
if self._world_db is not None:
del self._world_db
self._world_db = None
def _readChunk(self, cx, cz, readOptions=None):
"""
:param cx, cz: int Coordinates of the chunk
:param readOptions: ReadOptions
:return: None
"""
key = struct.pack('<i', cx) + struct.pack('<i', cz)
with self.world_db() as db:
rop = self.readOptions if readOptions is None else readOptions
# Only way to see if value exists is by failing db.Get()
try:
terrain = db.Get(rop, key + "0")
except RuntimeError:
return None
try:
tile_entities = db.Get(rop, key + "1")
except RuntimeError:
tile_entities = None
try:
entities = db.Get(rop, key + "2")
except RuntimeError:
entities = None
if len(terrain) != 83200:
raise ChunkMalformed(str(len(terrain)))
logger.debug("CHUNK LOAD %s %s"%(cx, cz))
return terrain, tile_entities, entities
def saveChunk(self, chunk, batch=None, writeOptions=None):
"""
:param chunk: PocketLeveldbChunk
:param batch: WriteBatch
:param writeOptions: WriteOptions
:return: None
"""
cx, cz = chunk.chunkPosition
data = chunk.savedData()
key = struct.pack('<i', cx) + struct.pack('<i', cz)
if batch is None:
with self.world_db() as db:
wop = self.writeOptions if writeOptions is None else writeOptions
db.Put(wop, key + "0", data[0])
if data[1] is not None:
db.Put(wop, key + "1", data[1])
if data[2] is not None:
db.Put(wop, key + "2", data[2])
else:
batch.Put(key + "0", data[0])
if data[1] is not None:
batch.Put(key + "1", data[1])
if data[2] is not None:
batch.Put(key + "2", data[2])
def loadChunk(self, cx, cz, world):
"""
:param cx, cz: int Coordinates of the chunk
:param world: PocketLeveldbWorld
:return: PocketLeveldbChunk
"""
data = self._readChunk(cx, cz)
if data is None:
raise ChunkNotPresent((cx, cz, self))
chunk = PocketLeveldbChunk(cx, cz, world, data)
return chunk
_allChunks = None
def deleteChunk(self, cx, cz, batch=None, writeOptions=None):
if batch is None:
with self.world_db() as db:
key = struct.pack('<i', cx) + struct.pack('<i', cz) + "0"
wop = self.writeOptions if writeOptions is None else writeOptions
db.Delete(wop, key)
else:
key = struct.pack('<i', cx) + struct.pack('<i', cz) + "0"
batch.Delete(key)
logger.debug("DELETED CHUNK %s %s"%(cx, cz))
def getAllChunks(self, readOptions=None):
"""
Returns a list of all chunks that have terrain data in the database.
Chunks with only Entities or TileEntities are ignored.
:param readOptions: ReadOptions
:return: list
"""
with self.world_db() as db:
allChunks = []
rop = self.readOptions if readOptions is None else readOptions
it = db.NewIterator(rop)
it.SeekToFirst()
while it.Valid():
key = it.key()
if len(key) != 9: # Bad. Hardcode since nether has length 13. Someone go fix nether.
it.Next()
continue
raw_x = key[0:4]
raw_z = key[4:8]
t = key[8]
if t == "0":
cx, cz = struct.unpack('<i', raw_x), struct.unpack('<i', raw_z)
allChunks.append((cx[0], cz[0]))
it.Next()
it.status() # All this does is cause an exception if something went wrong. Might be unneeded?
del it
return allChunks
def getAllPlayerData(self, readOptions=None):
"""
Returns the raw NBT data of all players in the database.
Every player is stored as player_<player-id>. The single-player player is stored as ~local_player
:param readOptions:
:return: dictonary key, value: key: player-id, value = player nbt data as str
"""
with self.world_db() as db:
allPlayers = {}
rop = self.readOptions if readOptions is None else readOptions
it = db.NewIterator(rop)
it.SeekToFirst()
while it.Valid():
key = it.key()
if key == "~local_player": # Singleplayer
allPlayers[key] = it.value()
elif key.startswith('player_'): # Multiplayer player
allPlayers[key] = it.value()
it.Next()
it.status()
del it
return allPlayers
def savePlayer(self, player, playerData, batch=None, writeOptions=None):
if writeOptions is None:
writeOptions = self.writeOptions
if batch is None:
with self.world_db() as db:
db.Put(writeOptions, player, playerData)
else:
batch.Put(player, playerData)
class InvalidPocketLevelDBWorldException(Exception):
pass
class PocketLeveldbWorld(ChunkedLevelMixin, MCLevel):
Height = 128
Width = 0
Length = 0
isInfinite = True
materials = pocketMaterials
noTileTicks = True
_bounds = None
oldPlayerFolderFormat = False
_allChunks = None # An array of cx, cz pairs.
_loadedChunks = {} # A dictionary of actual PocketLeveldbChunk objects mapped by (cx, cz)
_playerData = None
playerTagCache = {}
_playerList = None
entityClass = entity.PocketEntity
@property
def LevelName(self):
if "LevelName" not in self.root_tag:
with open(os.path.join(self.worldFile.path, "levelname.txt"), 'r') as f:
name = f.read()
if name is None:
name = os.path.basename(self.worldFile.path)
self.root_tag["LevelName"] = name
return self.root_tag["LevelName"].value
@LevelName.setter
def LevelName(self, name):
self.root_tag["LevelName"] = nbt.TAG_String(value=name)
with open(os.path.join(self.worldFile.path, "levelname.txt"), 'w') as f:
f.write(name)
@property
def allChunks(self):
"""
:return: list with all chunks in the world.
"""
if self._allChunks is None:
self._allChunks = self.worldFile.getAllChunks()
return self._allChunks
@property
def players(self):
if self._playerList is None:
self._playerList = []
for key in self.playerData.keys():
self._playerList.append(key)
return self._playerList
@property
def playerData(self):
if self._playerData is None:
self._playerData = self.worldFile.getAllPlayerData()
return self._playerData
@staticmethod
def getPlayerPath(player, dim=0):
"""
player.py loads players from files, but PE caches them differently. This is necessary to make it work.
:param player: str
:param dim: int
:return: str
"""
if dim == 0:
return player
def __init__(self, filename=None, create=False, random_seed=None, last_played=None, readonly=False):
"""
:param filename: path to the root dir of the level
:return:
"""
if not os.path.isdir(filename):
filename = os.path.dirname(filename)
self.filename = filename
self.worldFile = PocketLeveldbDatabase(filename, create=create)
self.readonly = readonly
self.loadLevelDat(create, random_seed, last_played)
def _createLevelDat(self, random_seed, last_played):
"""
Creates a new level.dat root_tag, and puts it in self.root_tag.
To write it to the disk, self.save() should be called.
:param random_seed: long
:param last_played: long
:return: None
"""
with nbt.littleEndianNBT():
root_tag = nbt.TAG_Compound()
root_tag["SpawnX"] = nbt.TAG_Int(0)
root_tag["SpawnY"] = nbt.TAG_Int(2)
root_tag["SpawnZ"] = nbt.TAG_Int(0)
if last_played is None:
last_played = long(time.time() * 100)
if random_seed is None:
random_seed = long(numpy.random.random() * 0xffffffffffffffffL) - 0x8000000000000000L
self.root_tag = root_tag
self.LastPlayed = long(last_played)
self.RandomSeed = long(random_seed)
self.SizeOnDisk = 0
self.Time = 1
self.LevelName = os.path.basename(self.worldFile.path)
def loadLevelDat(self, create=False, random_seed=None, last_played=None):
"""
Loads the level.dat from the worldfolder.
:param create: bool. If it's True, a fresh level.dat will be created instead.
:param random_seed: long
:param last_played: long
:return: None
"""
def _loadLevelDat(filename):
root_tag_buf = open(filename, 'rb').read()
magic, length, root_tag_buf = root_tag_buf[:4], root_tag_buf[4:8], root_tag_buf[8:]
if struct.Struct('<i').unpack(magic)[0] < 3:
logger.info("Found an old level.dat file. Aborting world load")
raise InvalidPocketLevelDBWorldException() # Maybe try convert/load old PE world?
if len(root_tag_buf) != struct.Struct('<i').unpack(length)[0]:
raise nbt.NBTFormatError()
self.root_tag = nbt.load(buf=root_tag_buf)
if create:
self._createLevelDat(random_seed, last_played)
return
try:
with nbt.littleEndianNBT():
_loadLevelDat(os.path.join(self.worldFile.path, "level.dat"))
return
except (nbt.NBTFormatError, IOError) as err:
logger.info("Failed to load level.dat, trying to load level.dat_old ({0})".format(err))
try:
with nbt.littleEndianNBT():
_loadLevelDat(os.path.join(self.worldFile.path, "level.dat_old"))
return
except (nbt.NBTFormatError, IOError) as err:
logger.info("Failed to load level.dat_old, creating new level.dat ({0})".format(err))
self._createLevelDat(random_seed, last_played)
# --- NBT Tag variables ---
SizeOnDisk = TagProperty('SizeOnDisk', nbt.TAG_Int, 0)
RandomSeed = TagProperty('RandomSeed', nbt.TAG_Long, 0)
# TODO PE worlds have a different day length, this has to be changed to that.
Time = TagProperty('Time', nbt.TAG_Long, 0)
LastPlayed = TagProperty('LastPlayed', nbt.TAG_Long, lambda self: long(time.time() * 1000))
GeneratorName = TagProperty('Generator', nbt.TAG_String, 'Infinite')
GameType = TagProperty('GameType', nbt.TAG_Int, 0)
def defaultDisplayName(self):
return os.path.basename(os.path.dirname(self.filename))
def __str__(self):
"""
How to represent this level
:return: str
"""
return "PocketLeveldbWorld(\"%s\")" % os.path.basename(os.path.dirname(self.worldFile.path))
def getChunk(self, cx, cz):
"""
Used to obtain a chunk from the database.
:param cx, cz: cx, cz coordinates of the chunk
:return: PocketLeveldbChunk
"""
c = self._loadedChunks.get((cx, cz))
if c is None:
c = self.worldFile.loadChunk(cx, cz, self)
self._loadedChunks[(cx, cz)] = c
return c
def unload(self):
"""
Unload all chunks and close all open file-handlers.
"""
self._loadedChunks.clear()
self._allChunks = None
self.worldFile.close()
def close(self):
"""
Unload all chunks and close all open file-handlers. Discard any unsaved data.
"""
self.unload()
try:
pass # Setup a way to close a work-folder?
except SessionLockLost:
pass
def deleteChunk(self, cx, cz, batch=None):
"""
Deletes a chunk at given cx, cz. Deletes using the batch if batch is given, uses world_db() otherwise.
:param cx, cz Coordinates of the chunk
:param batch WriteBatch
:return: None
"""
self.worldFile.deleteChunk(cx, cz, batch=batch)
if self._loadedChunks is not None and (cx, cz) in self._loadedChunks: # Unnecessary check?
del self._loadedChunks[(cx, cz)]
self.allChunks.remove((cx, cz))
def deleteChunksInBox(self, box):
"""
Deletes all chunks in a given box.
:param box pymclevel.box.BoundingBox
:return: None
"""
logger.info(u"Deleting {0} chunks in {1}".format((box.maxcx - box.mincx) * (box.maxcz - box.mincz),
((box.mincx, box.mincz), (box.maxcx, box.maxcz))))
i = 0
ret = []
batch = leveldb_mcpe.WriteBatch()
for cx, cz in itertools.product(xrange(box.mincx, box.maxcx), xrange(box.mincz, box.maxcz)):
i += 1
if self.containsChunk(cx, cz):
self.deleteChunk(cx, cz, batch=batch)
ret.append((cx, cz))
assert not self.containsChunk(cx, cz), "Just deleted {0} but it didn't take".format((cx, cz))
if i % 100 == 0:
logger.info(u"Chunk {0}...".format(i))
with self.worldFile.world_db() as db:
wop = self.worldFile.writeOptions
db.Write(wop, batch)
del batch
return ret
@property
def bounds(self):
"""
Returns a boundingbox containing the entire level
:return: pymclevel.box.BoundingBox
"""
if self._bounds is None:
self._bounds = self._getWorldBounds()
return self._bounds
@property
def size(self):
return self.bounds.size
def _getWorldBounds(self):
if len(self.allChunks) == 0:
return BoundingBox((0, 0, 0), (0, 0, 0))
allChunks = numpy.array(list(self.allChunks))
min_cx = (allChunks[:, 0]).min()
max_cx = (allChunks[:, 0]).max()
min_cz = (allChunks[:, 1]).min()
max_cz = (allChunks[:, 1]).max()
origin = (min_cx << 4, 0, min_cz << 4)
size = ((max_cx - min_cx + 1) << 4, self.Height, (max_cz - min_cz + 1) << 4)
return BoundingBox(origin, size)
@classmethod
def _isLevel(cls, filename):
"""
Determines whether or not the path in filename has a Pocket Edition 0.9.0 or later in it
:param filename string with path to level root directory.
"""
clp = ("db", "level.dat")
if not os.path.isdir(filename):
f = os.path.basename(filename)
if f not in clp:
return False
filename = os.path.dirname(filename)
return all([os.path.exists(os.path.join(filename, fl)) for fl in clp])
def saveInPlaceGen(self):
"""
Save all chunks to the database, and write the root_tag back to level.dat.
"""
self.saving = True
batch = leveldb_mcpe.WriteBatch()
dirtyChunkCount = 0
for c in self.chunksNeedingLighting:
self.getChunk(*c).genFastLights()
for chunk in self._loadedChunks.itervalues():
if chunk.dirty:
dirtyChunkCount += 1
self.worldFile.saveChunk(chunk, batch=batch)
chunk.dirty = False
yield
with nbt.littleEndianNBT():
for p in self.players:
playerData = self.playerTagCache[p]
if playerData is not None:
playerData = playerData.save(compressed=False) # It will get compressed in the DB itself
self.worldFile.savePlayer(p, playerData, batch=batch)
with self.worldFile.world_db() as db:
wop = self.worldFile.writeOptions
db.Write(wop, batch)
self.saving = False
logger.info(u"Saved {0} chunks to the database".format(dirtyChunkCount))
path = os.path.join(self.worldFile.path, 'level.dat')
with nbt.littleEndianNBT():
rootTagData = self.root_tag.save(compressed=False)
rootTagData = struct.Struct('<i').pack(4) + struct.Struct('<i').pack(len(rootTagData)) + rootTagData
with open(path, 'w') as f:
f.write(rootTagData)
def containsChunk(self, cx, cz):
"""
Determines if the chunk exist in this world.
:param cx, cz: int, Coordinates of the chunk
:return: bool (if chunk exists)
"""
return (cx, cz) in self.allChunks
def createChunk(self, cx, cz):
"""
Creates an empty chunk at given cx, cz coordinates, and stores it in self._loadedChunks
:param cx, cz: int, Coordinates of the chunk
:return:
"""
if self.containsChunk(cx, cz):
raise ValueError("{0}:Chunk {1} already present!".format(self, (cx, cz)))
if self.allChunks is not None:
self.allChunks.append((cx, cz))
self._loadedChunks[(cx, cz)] = PocketLeveldbChunk(cx, cz, self, create=True)
self._bounds = None
def saveGeneratedChunk(self, cx, cz, tempChunkBytes):
"""
Chunks get generated using Anvil generation. This is a (slow) way of importing anvil chunk bytes
and converting them to MCPE chunk data. Could definitely use some improvements, but at least it works.
:param cx, cx: Coordinates of the chunk
:param tempChunkBytes: str. Raw MCRegion chunk data.
:return:
"""
loaded_data = nbt.load(buf=tempChunkBytes)
class fake:
def __init__(self):
self.Height = 128
tempChunk = AnvilChunkData(fake(), (0, 0), loaded_data)
if not self.containsChunk(cx, cz):
self.createChunk(cx, cz)
chunk = self.getChunk(cx, cz)
chunk.Blocks = numpy.array(tempChunk.Blocks, dtype='uint16')
chunk.Data = numpy.array(tempChunk.Data, dtype='uint8')
chunk.SkyLight = numpy.array(tempChunk.SkyLight, dtype='uint8')
chunk.BlockLight = numpy.array(tempChunk.BlockLight, dtype='uint8')
chunk.dirty = True
self.worldFile.saveChunk(chunk)
else:
logger.info("Tried to import generated chunk at %s, %s but the chunk already existed."%(cx, cz))
@property
def chunksNeedingLighting(self):
"""
Generator containing all chunks that need lighting.
:yield: int (cx, cz) Coordinates of the chunk
"""
for chunk in self._loadedChunks.itervalues():
if chunk.needsLighting:
yield chunk.chunkPosition
# -- Entity Stuff --
# A lot of this code got copy-pasted from MCInfDevOldLevel
# Slight modifications to make it work with MCPE
def getTileEntitiesInBox(self, box):
"""
Returns the Tile Entities in given box.
:param box: pymclevel.box.BoundingBox
:return: list of nbt.TAG_Compound
"""
tileEntites = []
for chunk, slices, point in self.getChunkSlices(box):
tileEntites += chunk.getTileEntitiesInBox(box)
return tileEntites
def getEntitiesInBox(self, box):
"""
Returns the Entities in given box.
:param box: pymclevel.box.BoundingBox
:return: list of nbt.TAG_Compound
"""
entities = []
for chunk, slices, point in self.getChunkSlices(box):
entities += chunk.getEntitiesInBox(box)
return entities
def getTileTicksInBox(self, box):
"""
Always returns None, as MCPE has no TileTicks.
:param box: pymclevel.box.BoundingBox
:return: list
"""
return []
def addEntity(self, entityTag):
"""
Adds an entity to the level.
:param entityTag: nbt.TAG_Compound containing the entity's data.
:return:
"""
assert isinstance(entityTag, nbt.TAG_Compound)
x, y, z = map(lambda p: int(floor(p)), Entity.pos(entityTag))
try:
chunk = self.getChunk(x >> 4, z >> 4)
except (ChunkNotPresent, ChunkMalformed):
return
chunk.addEntity(entityTag)
chunk.dirty = True
def addTileEntity(self, tileEntityTag):
"""
Adds an entity to the level.
:param tileEntityTag: nbt.TAG_Compound containing the Tile entity's data.
:return:
"""
assert isinstance(tileEntityTag, nbt.TAG_Compound)
if 'x' not in tileEntityTag:
return
x, y, z = TileEntity.pos(tileEntityTag)
try:
chunk = self.getChunk(x >> 4, z >> 4)
except (ChunkNotPresent, ChunkMalformed):
return
chunk.addTileEntity(tileEntityTag)
chunk.dirty = True
def addTileTick(self, tickTag):
"""
MCPE doesn't have Tile Ticks, so this can't be added.
:param tickTag: nbt.TAG_Compound
:return: None
"""
return
def tileEntityAt(self, x, y, z):
"""
Retrieves a tile tick at given x, y, z coordinates
:param x: int
:param y: int
:param z: int
:return: nbt.TAG_Compound or None
"""
chunk = self.getChunk(x >> 4, z >> 4)
return chunk.tileEntityAt(x, y, z)
def removeEntitiesInBox(self, box):
"""
Removes all entities in given box
:param box: pymclevel.box.BoundingBox
:return: int, count of entities removed
"""
count = 0
for chunk, slices, point in self.getChunkSlices(box):
count += chunk.removeEntitiesInBox(box)
logger.info("Removed {0} entities".format(count))
return count
def removeTileEntitiesInBox(self, box):
"""
Removes all tile entities in given box
:param box: pymclevel.box.BoundingBox
:return: int, count of tile entities removed
"""
count = 0
for chunk, slices, point in self.getChunkSlices(box):
count += chunk.removeTileEntitiesInBox(box)
logger.info("Removed {0} tile entities".format(count))
return count
def removeTileTicksInBox(self, box):
"""
MCPE doesn't have TileTicks, so this does nothing.
:param box: pymclevel.box.BoundingBox
:return: int, count of TileTicks removed.
"""
return 0
# -- Player and spawn stuff
def playerSpawnPosition(self, player=None):
"""
Returns the default spawn position for the world. If player is given, the players spawn is returned instead.
:param player: nbt.TAG_Compound, root tag of the player.
:return: tuple int (x, y, z), coordinates of the spawn.
"""
dataTag = self.root_tag
if player is None:
playerSpawnTag = dataTag
else:
playerSpawnTag = self.getPlayerTag(player)
return [playerSpawnTag.get(i, dataTag[i]).value for i in ("SpawnX", "SpawnY", "SpawnZ")]
def setPlayerSpawnPosition(self, pos, player=None):
"""
Sets the worlds spawn point to pos. If player is given, sets that players spawn point instead.
:param pos: tuple int (x, y, z)
:param player: nbt.TAG_Compound, root tag of the player
:return: None
"""
if player is None:
playerSpawnTag = self.root_tag
else:
playerSpawnTag = self.getPlayerTag(player)
for name, val in zip(("SpawnX", "SpawnY", "SpawnZ"), pos):
playerSpawnTag[name] = nbt.TAG_Int(val)
def getPlayerTag(self, player='Player'):
"""
Obtains a player from the world.
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
:return: nbt.TAG_Compound, root tag of the player.
"""
if player == '[No players]': # Apparently this is being called somewhere?
return None
if player == 'Player':
player = '~local_player'
_player = self.playerTagCache.get(player)
if _player is not None:
return _player
playerData = self.playerData[player]
with nbt.littleEndianNBT():
_player = nbt.load(buf=playerData)
self.playerTagCache[player] = _player
return _player
def getPlayerDimension(self, player="Player"):
"""
Always returns 0, as MCPE only has the overworld dimension.
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
:return: int
"""
return 0
def setPlayerPosition(self, (x, y, z), player="Player"):
"""
Sets the players position to x, y, z
:param (x, y, z): tuple of the coordinates of the player
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
:return:
"""
posList = nbt.TAG_List([nbt.TAG_Double(p) for p in (x, y - 1.75, z)])
playerTag = self.getPlayerTag(player)
playerTag["Pos"] = posList
def getPlayerPosition(self, player="Player"):
"""
Gets the players position
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
:return: tuple int (x, y, z): Coordinates of the player.
"""
playerTag = self.getPlayerTag(player)
posList = playerTag["Pos"]
x, y, z = map(lambda c: c.value, posList)
return x, y + 1.75, z
def setPlayerOrientation(self, yp, player="Player"):
"""
Gets the players orientation.
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
:param yp: int tuple (yaw, pitch)
:return: None
"""
self.getPlayerTag(player)["Rotation"] = nbt.TAG_List([nbt.TAG_Float(p) for p in yp])
def getPlayerOrientation(self, player="Player"):
"""
Gets the players orientation.
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
:return: tuple int (yaw, pitch)
"""
yp = map(lambda x: x.value, self.getPlayerTag(player)["Rotation"])
y, p = yp
if p == 0:
p = 0.000000001
if p == 180.0:
p -= 0.000000001
yp = y, p
return numpy.array(yp)
@staticmethod # Editor keeps putting this in. Probably unnecesary
def setPlayerAbilities(gametype, player="Player"):
"""
This method is just to override the standard one, as MCPE has no abilities, as it seems.
:parm gametype, int of gamemode player gets set at.
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
"""
pass
def setPlayerGameType(self, gametype, player="Player"):
"""
Sets the game type for player
:param gametype: int (0=survival, 1=creative, 2=adventure, 3=spectator)
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
:return: None
"""
# This annoyingly works differently between single- and multi-player.
if player == "Player":
self.GameType = gametype
self.setPlayerAbilities(gametype, player)
else:
playerTag = self.getPlayerTag(player)
playerTag['playerGameType'] = nbt.TAG_Int(gametype)
self.setPlayerAbilities(gametype, player)
def getPlayerGameType(self, player="Player"):
"""
Obtains the players gametype.
:param player: string of the name of the player. "Player" for SSP player, player_<client-id> for SMP player.
:return: int (0=survival, 1=creative, 2=adventure, 3=spectator)
"""
if player == "Player":
return self.GameType
else:
playerTag = self.getPlayerTag(player)
return playerTag["playerGameType"].value
def markDirtyChunk(self, cx, cz):
self.getChunk(cx, cz).chunkChanged()
def markDirtyBox(self, box):
for cx, cz in box.chunkPositions:
self.markDirtyChunk(cx, cz)
class PocketLeveldbChunk(LightedChunk):
HeightMap = FakeChunk.HeightMap
# _Entities = _TileEntities = nbt.TAG_List()
_Entities = nbt.TAG_List()
_TileEntities = nbt.TAG_List()
dirty = False
def __init__(self, cx, cz, world, data=None, create=False):
"""
:param cx, cz int, int Coordinates of the chunk
:param data List of 3 strings. (83200 bytes of terrain data, tile-entity data, entity data)
:param world PocketLeveldbWorld, instance of the world the chunk belongs too
"""
self.chunkPosition = (cx, cz)
self.world = world
if create:
self.Blocks = numpy.zeros(32768, 'uint16')
self.Data = numpy.zeros(16384, 'uint8')
self.SkyLight = numpy.zeros(16384, 'uint8')
self.BlockLight = numpy.zeros(16384, 'uint8')
self.DirtyColumns = numpy.zeros(256, 'uint8')
self.GrassColors = numpy.zeros(1024, 'uint8')
self.TileEntities = nbt.TAG_List()
self.Entities = nbt.TAG_List()
else:
terrain = numpy.fromstring(data[0], dtype='uint8')
if data[1] is not None:
TileEntities = loadNBTCompoundList(data[1])
self.TileEntities = nbt.TAG_List(TileEntities, list_type=nbt.TAG_COMPOUND)
if data[2] is not None:
Entities = loadNBTCompoundList(data[2])
# PE saves entities with their int ID instead of string name. We swap them to make it work in mcedit.
# Whenever we save an entity, we need to make sure to swap back.
invertEntities = {v: k for k, v in entity.PocketEntity.entityList.items()}
for ent in Entities:
# Get the string id, or a build one
v = ent["id"].value
id = invertEntities.get(v, "Entity %s"%v)
# Add the built one to the entities
if id not in entity.PocketEntity.entityList.keys():
logger.warning("Found unknown entity '%s'"%v)
entity.PocketEntity.entityList[id] = v
ent["id"] = nbt.TAG_String(id)
self.Entities = nbt.TAG_List(Entities, list_type=nbt.TAG_COMPOUND)
self.Blocks, terrain = terrain[:32768], terrain[32768:]
self.Data, terrain = terrain[:16384], terrain[16384:]
self.SkyLight, terrain = terrain[:16384], terrain[16384:]
self.BlockLight, terrain = terrain[:16384], terrain[16384:]
self.DirtyColumns, terrain = terrain[:256], terrain[256:]
# Unused at the moment. Might need a special editor? Maybe hooked up to biomes?
self.GrassColors = terrain[:1024]
self._unpackChunkData()
self.shapeChunkData()
def _unpackChunkData(self):
"""
Unpacks the terrain data to match mcedit's formatting.
"""
for key in ('SkyLight', 'BlockLight', 'Data'):
dataArray = getattr(self, key)
dataArray.shape = (16, 16, 64)
s = dataArray.shape
unpackedData = numpy.zeros((s[0], s[1], s[2] * 2), dtype='uint8')
unpackedData[:, :, ::2] = dataArray
unpackedData[:, :, ::2] &= 0xf
unpackedData[:, :, 1::2] = dataArray
unpackedData[:, :, 1::2] >>= 4
setattr(self, key, unpackedData)
def shapeChunkData(self):
"""
Determines the shape of the terrain data.
:return:
"""
chunkSize = 16
self.Blocks.shape = (chunkSize, chunkSize, self.world.Height)
self.SkyLight.shape = (chunkSize, chunkSize, self.world.Height)
self.BlockLight.shape = (chunkSize, chunkSize, self.world.Height)
self.Data.shape = (chunkSize, chunkSize, self.world.Height)
self.DirtyColumns.shape = chunkSize, chunkSize
def savedData(self):
"""
Returns the data of the chunk to save to the database.
:return: str of 83200 bytes of chunk data.
"""
def packData(dataArray):
"""
Repacks the terrain data to Mojang's leveldb library's format.
"""
assert dataArray.shape[2] == self.world.Height
data = numpy.array(dataArray).reshape(16, 16, self.world.Height / 2, 2)
data[..., 1] <<= 4
data[..., 1] |= data[..., 0]
return numpy.array(data[:, :, :, 1])
if self.dirty:
# elements of DirtyColumns are bitfields. Each bit corresponds to a
# 16-block segment of the column. We set all of the bits because
# we only track modifications at the chunk level.
self.DirtyColumns[:] = 255
with nbt.littleEndianNBT():
entityData = ""
tileEntityData = ""
for ent in self.TileEntities:
tileEntityData += ent.save(compressed=False)
for ent in self.Entities:
v = ent["id"].value
# ent["id"] = nbt.TAG_Int(entity.PocketEntity.entityList[v])
id = entity.PocketEntity.getNumId(v)
# print id
if id >= 1000:
print id
print type(ent)
print ent
ent['id'] = nbt.TAG_Int(id)
entityData += ent.save(compressed=False)
# We have to re-invert after saving otherwise the next save will fail.
ent["id"] = nbt.TAG_String(v)
terrain = ''.join([self.Blocks.tostring(),
packData(self.Data).tostring(),
packData(self.SkyLight).tostring(),
packData(self.BlockLight).tostring(),
self.DirtyColumns.tostring(),
self.GrassColors.tostring(),
])
return terrain, tileEntityData, entityData
# -- Entities and TileEntities
@property
def Entities(self):
return self._Entities
@Entities.setter
def Entities(self, Entities):
"""
:param Entities: list
:return:
"""
self._Entities = Entities
@property
def TileEntities(self):
return self._TileEntities
@TileEntities.setter
def TileEntities(self, TileEntities):
"""
:param TileEntities: list
:return:
"""
self._TileEntities = TileEntities
| isc | -1,070,274,285,691,429,800 | 34.546012 | 117 | 0.571675 | false |
tkolhar/robottelo | tests/foreman/smoke/test_api_smoke.py | 1 | 39496 | # -*- encoding: utf-8 -*-
"""Smoke tests for the ``API`` end-to-end scenario."""
import random
from fauxfactory import gen_string
from nailgun import client, entities
from robottelo import helpers, manifests
from robottelo.api.utils import (
enable_rhrepo_and_fetchid,
promote,
upload_manifest,
)
from robottelo.config import settings
from robottelo.constants import (
DEFAULT_LOC,
DEFAULT_ORG,
DEFAULT_SUBSCRIPTION_NAME,
FAKE_0_PUPPET_REPO,
GOOGLE_CHROME_REPO,
PRDS,
REPOS,
REPOSET,
)
from robottelo.decorators import (
bz_bug_is_open,
skip_if_bug_open,
skip_if_not_set,
)
from robottelo.helpers import get_nailgun_config
from robottelo.vm import VirtualMachine
from robottelo.test import TestCase
from six.moves import http_client
# (too many public methods) pylint: disable=R0904
API_PATHS = {
# flake8:noqa (line-too-long) pylint:disable=C0301
u'activation_keys': (
u'/katello/api/activation_keys',
u'/katello/api/activation_keys',
u'/katello/api/activation_keys/:id',
u'/katello/api/activation_keys/:id',
u'/katello/api/activation_keys/:id',
u'/katello/api/activation_keys/:id/add_subscriptions',
u'/katello/api/activation_keys/:id/content_override',
u'/katello/api/activation_keys/:id/copy',
u'/katello/api/activation_keys/:id/host_collections/available',
u'/katello/api/activation_keys/:id/product_content',
u'/katello/api/activation_keys/:id/releases',
u'/katello/api/activation_keys/:id/remove_subscriptions',
),
u'api': (),
u'architectures': (
u'/api/architectures',
u'/api/architectures',
u'/api/architectures/:id',
u'/api/architectures/:id',
u'/api/architectures/:id',
),
u'arf_reports': (
u'/api/arf/:cname/:policy_id/:date',
),
u'audits': (
u'/api/audits',
u'/api/audits/:id',
),
u'auth_source_ldaps': (
u'/api/auth_source_ldaps',
u'/api/auth_source_ldaps',
u'/api/auth_source_ldaps/:id',
u'/api/auth_source_ldaps/:id',
u'/api/auth_source_ldaps/:id',
),
u'autosign': (
u'/api/smart_proxies/smart_proxy_id/autosign',
),
u'base': (),
u'bookmarks': (
u'/api/bookmarks',
u'/api/bookmarks',
u'/api/bookmarks/:id',
u'/api/bookmarks/:id',
u'/api/bookmarks/:id',
),
u'candlepin_proxies': (
u'/katello/api/systems/:id/enabled_repos',
),
u'capsule_content': (
u'/katello/api/capsules/:id/content/available_lifecycle_environments',
u'/katello/api/capsules/:id/content/lifecycle_environments',
u'/katello/api/capsules/:id/content/lifecycle_environments',
u'/katello/api/capsules/:id/content/lifecycle_environments/:environment_id',
u'/katello/api/capsules/:id/content/sync',
),
u'capsules': (
u'/katello/api/capsules',
u'/katello/api/capsules/:id',
),
u'common_parameters': (
u'/api/common_parameters',
u'/api/common_parameters',
u'/api/common_parameters/:id',
u'/api/common_parameters/:id',
u'/api/common_parameters/:id',
),
u'compute_attributes': (
u'/api/compute_resources/:compute_resource_id/compute_profiles/:compute_profile_id/compute_attributes',
u'/api/compute_resources/:compute_resource_id/compute_profiles/:compute_profile_id/compute_attributes/:id',
),
u'compute_profiles': (
u'/api/compute_profiles',
u'/api/compute_profiles',
u'/api/compute_profiles/:id',
u'/api/compute_profiles/:id',
u'/api/compute_profiles/:id',
),
u'compute_resources': (
u'/api/compute_resources',
u'/api/compute_resources',
u'/api/compute_resources/:id',
u'/api/compute_resources/:id',
u'/api/compute_resources/:id',
u'/api/compute_resources/:id/associate',
u'/api/compute_resources/:id/available_clusters',
u'/api/compute_resources/:id/available_clusters/:cluster_id/available_resource_pools',
u'/api/compute_resources/:id/available_folders',
u'/api/compute_resources/:id/available_images',
u'/api/compute_resources/:id/available_networks',
u'/api/compute_resources/:id/available_storage_domains',
),
u'config_groups': (
u'/api/config_groups',
u'/api/config_groups',
u'/api/config_groups/:id',
u'/api/config_groups/:id',
u'/api/config_groups/:id',
),
u'config_templates': (
u'/api/config_templates',
u'/api/config_templates',
u'/api/config_templates/build_pxe_default',
u'/api/config_templates/:id',
u'/api/config_templates/:id',
u'/api/config_templates/:id',
),
u'content_uploads': (
u'/katello/api/repositories/:repository_id/content_uploads',
u'/katello/api/repositories/:repository_id/content_uploads/:id',
u'/katello/api/repositories/:repository_id/content_uploads/:id',
),
u'content_view_filter_rules': (
u'/katello/api/content_view_filters/:content_view_filter_id/rules',
u'/katello/api/content_view_filters/:content_view_filter_id/rules',
u'/katello/api/content_view_filters/:content_view_filter_id/rules/:id',
u'/katello/api/content_view_filters/:content_view_filter_id/rules/:id',
u'/katello/api/content_view_filters/:content_view_filter_id/rules/:id',
),
u'content_view_filters': (
u'/katello/api/content_views/:content_view_id/filters',
u'/katello/api/content_views/:content_view_id/filters',
u'/katello/api/content_views/:content_view_id/filters/:id',
u'/katello/api/content_views/:content_view_id/filters/:id',
u'/katello/api/content_views/:content_view_id/filters/:id',
u'/katello/api/content_views/:content_view_id/filters/:id/available_errata',
u'/katello/api/content_views/:content_view_id/filters/:id/available_package_groups',
),
u'content_view_puppet_modules': (
u'/katello/api/content_views/:content_view_id/content_view_puppet_modules',
u'/katello/api/content_views/:content_view_id/content_view_puppet_modules',
u'/katello/api/content_views/:content_view_id/content_view_puppet_modules/:id',
u'/katello/api/content_views/:content_view_id/content_view_puppet_modules/:id',
u'/katello/api/content_views/:content_view_id/content_view_puppet_modules/:id',
),
u'content_views': (
u'/katello/api/content_views/:id',
u'/katello/api/content_views/:id',
u'/katello/api/content_views/:id',
u'/katello/api/content_views/:id/available_puppet_module_names',
u'/katello/api/content_views/:id/available_puppet_modules',
u'/katello/api/content_views/:id/copy',
u'/katello/api/content_views/:id/environments/:environment_id',
u'/katello/api/content_views/:id/history',
u'/katello/api/content_views/:id/publish',
u'/katello/api/content_views/:id/remove',
u'/katello/api/organizations/:organization_id/content_views',
u'/katello/api/organizations/:organization_id/content_views',
),
u'containers': (
u'/docker/api/v2/containers',
u'/docker/api/v2/containers',
u'/docker/api/v2/containers/:id',
u'/docker/api/v2/containers/:id',
u'/docker/api/v2/containers/:id/logs',
u'/docker/api/v2/containers/:id/power',
),
u'content_reports':(
u'/katello/api/content_reports/status_trend',
u'/katello/api/content_reports/system_status',
u'/katello/api/content_reports/system_trend',
),
u'content_view_versions': (
u'/katello/api/content_view_versions',
u'/katello/api/content_view_versions/:id',
u'/katello/api/content_view_versions/:id',
u'/katello/api/content_view_versions/:id/promote',
u'/katello/api/content_view_versions/incremental_update',
),
u'dashboard': (
u'/api/dashboard',
),
u'discovered_hosts': (
u'/api/v2/discovered_hosts',
u'/api/v2/discovered_hosts',
u'/api/v2/discovered_hosts/auto_provision_all',
u'/api/v2/discovered_hosts/facts',
u'/api/v2/discovered_hosts/:id',
u'/api/v2/discovered_hosts/:id',
u'/api/v2/discovered_hosts/:id',
u'/api/v2/discovered_hosts/:id/auto_provision',
u'/api/v2/discovered_hosts/:id/reboot',
u'/api/v2/discovered_hosts/:id/refresh_facts',
),
u'discovery_rules': (
u'/api/v2/discovery_rules',
u'/api/v2/discovery_rules',
u'/api/v2/discovery_rules/:id',
u'/api/v2/discovery_rules/:id',
u'/api/v2/discovery_rules/:id',
),
u'disks': (
u'/bootdisk/api',
u'/bootdisk/api/generic',
u'/bootdisk/api/hosts/:host_id',
),
u'distributions': (
u'/katello/api/repositories/:repository_id/distributions',
u'/katello/api/repositories/:repository_id/distributions/:id',
),
u'docker_images': (
u'/katello/api/docker_images',
u'/katello/api/docker_images/:id',
),
u'docker_tags': (
u'/katello/api/docker_tags',
u'/katello/api/docker_tags/:id',
),
u'domains': (
u'/api/domains',
u'/api/domains',
u'/api/domains/:id',
u'/api/domains/:id',
u'/api/domains/:id',
),
u'environments': (
u'/api/environments',
u'/api/environments',
u'/api/environments/:id',
u'/api/environments/:id',
u'/api/environments/:id',
u'/api/smart_proxies/:id/import_puppetclasses',
),
u'errata': (
u'/katello/api/errata',
u'/katello/api/errata/compare',
u'/katello/api/errata/:id',
),
u'external_usergroups': (
u'/api/usergroups/:usergroup_id/external_usergroups',
u'/api/usergroups/:usergroup_id/external_usergroups',
u'/api/usergroups/:usergroup_id/external_usergroups/:id',
u'/api/usergroups/:usergroup_id/external_usergroups/:id',
u'/api/usergroups/:usergroup_id/external_usergroups/:id',
u'/api/usergroups/:usergroup_id/external_usergroups/:id/refresh',
),
u'fact_values': (
u'/api/fact_values',
),
u'filters': (
u'/api/filters',
u'/api/filters',
u'/api/filters/:id',
u'/api/filters/:id',
u'/api/filters/:id',
),
u'foreman_tasks': (
u'/foreman_tasks/api/tasks',
u'/foreman_tasks/api/tasks/:id',
u'/foreman_tasks/api/tasks/bulk_resume',
u'/foreman_tasks/api/tasks/bulk_search',
u'/foreman_tasks/api/tasks/summary',
),
u'gpg_keys': (
u'/katello/api/gpg_keys',
u'/katello/api/gpg_keys',
u'/katello/api/gpg_keys/:id',
u'/katello/api/gpg_keys/:id',
u'/katello/api/gpg_keys/:id',
u'/katello/api/gpg_keys/:id/content',
),
u'home': (
u'/api',
u'/api/status',
),
u'host_classes': (
u'/api/hosts/:host_id/puppetclass_ids',
u'/api/hosts/:host_id/puppetclass_ids',
u'/api/hosts/:host_id/puppetclass_ids/:id',
),
u'host_collections': (
u'/katello/api/host_collections',
u'/katello/api/host_collections',
u'/katello/api/host_collections/:id',
u'/katello/api/host_collections/:id',
u'/katello/api/host_collections/:id',
u'/katello/api/host_collections/:id/add_systems',
u'/katello/api/host_collections/:id/copy',
u'/katello/api/host_collections/:id/remove_systems',
u'/katello/api/host_collections/:id/systems',
),
u'hostgroup_classes': (
u'/api/hostgroups/:hostgroup_id/puppetclass_ids',
u'/api/hostgroups/:hostgroup_id/puppetclass_ids',
u'/api/hostgroups/:hostgroup_id/puppetclass_ids/:id',
),
u'hostgroups': (
u'/api/hostgroups',
u'/api/hostgroups',
u'/api/hostgroups/:id',
u'/api/hostgroups/:id',
u'/api/hostgroups/:id',
u'/api/hostgroups/:id/clone',
),
u'hosts': (
u'/api/hosts',
u'/api/hosts',
u'/api/hosts/facts',
u'/api/hosts/:id',
u'/api/hosts/:id',
u'/api/hosts/:id',
u'/api/hosts/:id/boot',
u'/api/hosts/:id/disassociate',
u'/api/hosts/:id/power',
u'/api/hosts/:id/puppetrun',
u'/api/hosts/:id/status',
),
u'images': (
u'/api/compute_resources/:compute_resource_id/images',
u'/api/compute_resources/:compute_resource_id/images',
u'/api/compute_resources/:compute_resource_id/images/:id',
u'/api/compute_resources/:compute_resource_id/images/:id',
u'/api/compute_resources/:compute_resource_id/images/:id',
),
u'interfaces': (
u'/api/hosts/:host_id/interfaces',
u'/api/hosts/:host_id/interfaces',
u'/api/hosts/:host_id/interfaces/:id',
u'/api/hosts/:host_id/interfaces/:id',
u'/api/hosts/:host_id/interfaces/:id',
),
u'lifecycle_environments': (
u'/katello/api/environments',
u'/katello/api/environments',
u'/katello/api/environments/:id',
u'/katello/api/environments/:id',
u'/katello/api/environments/:id',
u'/katello/api/organizations/:organization_id/environments/:id/repositories',
u'/katello/api/organizations/:organization_id/environments/paths',
),
u'locations': (
u'/api/locations',
u'/api/locations',
u'/api/locations/:id',
u'/api/locations/:id',
u'/api/locations/:id',
),
u'mail_notifications': (
u'/api/mail_notifications',
u'/api/mail_notifications/:id',
),
u'media': (
u'/api/media',
u'/api/media',
u'/api/media/:id',
u'/api/media/:id',
u'/api/media/:id',
),
u'models': (
u'/api/models',
u'/api/models',
u'/api/models/:id',
u'/api/models/:id',
u'/api/models/:id',
),
u'operatingsystems': (
u'/api/operatingsystems',
u'/api/operatingsystems',
u'/api/operatingsystems/:id',
u'/api/operatingsystems/:id',
u'/api/operatingsystems/:id',
u'/api/operatingsystems/:id/bootfiles',
),
u'organizations': (
u'/katello/api/organizations',
u'/katello/api/organizations',
u'/katello/api/organizations/:id',
u'/katello/api/organizations/:id',
u'/katello/api/organizations/:id',
u'/katello/api/organizations/:id/autoattach_subscriptions',
u'/katello/api/organizations/:id/redhat_provider',
u'/katello/api/organizations/:id/repo_discover',
u'/katello/api/organizations/:label/cancel_repo_discover',
u'/katello/api/organizations/:label/download_debug_certificate',
),
u'os_default_templates': (
u'/api/operatingsystems/:operatingsystem_id/os_default_templates',
u'/api/operatingsystems/:operatingsystem_id/os_default_templates',
u'/api/operatingsystems/:operatingsystem_id/os_default_templates/:id',
u'/api/operatingsystems/:operatingsystem_id/os_default_templates/:id',
u'/api/operatingsystems/:operatingsystem_id/os_default_templates/:id',
),
u'override_values': (
u'/api/smart_variables/:smart_variable_id/override_values',
u'/api/smart_variables/:smart_variable_id/override_values',
u'/api/smart_variables/:smart_variable_id/override_values/:id',
u'/api/smart_variables/:smart_variable_id/override_values/:id',
u'/api/smart_variables/:smart_variable_id/override_values/:id',
),
u'package_groups': (
u'/katello/api/package_groups',
u'/katello/api/package_groups/:id',
),
u'packages': (
u'/katello/api/packages',
u'/katello/api/packages/:id',
),
u'parameters': (
u'/api/hosts/:host_id/parameters',
u'/api/hosts/:host_id/parameters',
u'/api/hosts/:host_id/parameters',
u'/api/hosts/:host_id/parameters/:id',
u'/api/hosts/:host_id/parameters/:id',
u'/api/hosts/:host_id/parameters/:id',
),
u'permissions': (
u'/api/permissions',
u'/api/permissions/:id',
u'/api/permissions/resource_types',
),
u'ping': (
u'/katello/api/ping',
u'/katello/api/status',
),
u'plugins': (
u'/api/plugins',
),
u'products_bulk_actions': (
u'/katello/api/products/bulk/destroy',
u'/katello/api/products/bulk/sync_plan',
),
u'products': (
u'/katello/api/products',
u'/katello/api/products',
u'/katello/api/products/:id',
u'/katello/api/products/:id',
u'/katello/api/products/:id',
u'/katello/api/products/:id/sync',
),
u'ptables': (
u'/api/ptables',
u'/api/ptables',
u'/api/ptables/:id',
u'/api/ptables/:id',
u'/api/ptables/:id',
),
u'puppetclasses': (
u'/api/puppetclasses',
u'/api/puppetclasses',
u'/api/puppetclasses/:id',
u'/api/puppetclasses/:id',
u'/api/puppetclasses/:id',
),
u'puppet_modules': (
u'/katello/api/puppet_modules',
u'/katello/api/puppet_modules/:id',
),
u'realms': (
u'/api/realms',
u'/api/realms',
u'/api/realms/:id',
u'/api/realms/:id',
u'/api/realms/:id',
),
u'registries': (
u'/docker/api/v2/registries',
u'/docker/api/v2/registries',
u'/docker/api/v2/registries/:id',
u'/docker/api/v2/registries/:id',
u'/docker/api/v2/registries/:id',
),
u'reports': (
u'/api/hosts/:host_id/reports/last',
u'/api/reports',
u'/api/reports',
u'/api/reports/:id',
u'/api/reports/:id',
),
u'repositories_bulk_actions': (
u'/katello/api/repositories/bulk/destroy',
u'/katello/api/repositories/bulk/sync',
),
u'repositories': (
u'/katello/api/repositories',
u'/katello/api/repositories',
u'/katello/api/repositories/:id',
u'/katello/api/repositories/:id',
u'/katello/api/repositories/:id',
u'/katello/api/repositories/:id/gpg_key_content',
u'/katello/api/repositories/:id/import_uploads',
u'/katello/api/repositories/:id/sync',
u'/katello/api/repositories/:id/upload_content',
),
u'repository_sets': (
u'/katello/api/products/:product_id/repository_sets',
u'/katello/api/products/:product_id/repository_sets/:id',
u'/katello/api/products/:product_id/repository_sets/:id/available_repositories',
u'/katello/api/products/:product_id/repository_sets/:id/disable',
u'/katello/api/products/:product_id/repository_sets/:id/enable',
),
u'roles': (
u'/api/roles',
u'/api/roles',
u'/api/roles/:id',
u'/api/roles/:id',
u'/api/roles/:id',
),
u'root': (),
u'scaptimony_policies': (
u'/api/v2/compliance/policies/:id/content',
),
u'settings': (
u'/api/settings',
u'/api/settings/:id',
u'/api/settings/:id',
),
u'smart_class_parameters': (
u'/api/smart_class_parameters',
u'/api/smart_class_parameters/:id',
u'/api/smart_class_parameters/:id',
),
u'smart_proxies': (
u'/api/smart_proxies',
u'/api/smart_proxies',
u'/api/smart_proxies/:id',
u'/api/smart_proxies/:id',
u'/api/smart_proxies/:id',
u'/api/smart_proxies/:id/import_puppetclasses',
u'/api/smart_proxies/:id/refresh',
),
u'smart_variables': (
u'/api/smart_variables',
u'/api/smart_variables',
u'/api/smart_variables/:id',
u'/api/smart_variables/:id',
u'/api/smart_variables/:id',
),
u'statistics': (
u'/api/statistics',
),
u'subnet_disks': (
u'/bootdisk/api',
u'/bootdisk/api/subnets/:subnet_id',
),
u'subnets': (
u'/api/subnets',
u'/api/subnets',
u'/api/subnets/:id',
u'/api/subnets/:id',
u'/api/subnets/:id',
),
u'subscriptions': (
u'/katello/api/organizations/:organization_id/subscriptions',
u'/katello/api/organizations/:organization_id/subscriptions/delete_manifest',
u'/katello/api/organizations/:organization_id/subscriptions/:id',
u'/katello/api/organizations/:organization_id/subscriptions/manifest_history',
u'/katello/api/organizations/:organization_id/subscriptions/refresh_manifest',
u'/katello/api/organizations/:organization_id/subscriptions/upload',
u'/katello/api/subscriptions/:id',
u'/katello/api/subscriptions/:id',
u'/katello/api/systems/:system_id/subscriptions/available',
),
u'sync_plans': (
u'/katello/api/organizations/:organization_id/sync_plans',
u'/katello/api/organizations/:organization_id/sync_plans/:id',
u'/katello/api/organizations/:organization_id/sync_plans/:id',
u'/katello/api/organizations/:organization_id/sync_plans/:id',
u'/katello/api/organizations/:organization_id/sync_plans/:id/add_products',
u'/katello/api/organizations/:organization_id/sync_plans/:id/available_products',
u'/katello/api/organizations/:organization_id/sync_plans/:id/remove_products',
u'/katello/api/sync_plans',
),
u'sync': (
u'/katello/api/organizations/:organization_id/products/:product_id/sync',
),
u'system_errata': (
u'/katello/api/systems/:system_id/errata',
u'/katello/api/systems/:system_id/errata/:id',
u'/katello/api/systems/:system_id/errata/apply',
),
u'system_packages': (
u'/katello/api/systems/:system_id/packages/install',
u'/katello/api/systems/:system_id/packages/remove',
u'/katello/api/systems/:system_id/packages/upgrade_all',
),
u'systems_bulk_actions': (
u'/katello/api/systems/bulk/add_host_collections',
u'/katello/api/systems/bulk/applicable_errata',
u'/katello/api/systems/bulk/available_incremental_updates',
u'/katello/api/systems/bulk/destroy',
u'/katello/api/systems/bulk/environment_content_view',
u'/katello/api/systems/bulk/install_content',
u'/katello/api/systems/bulk/remove_content',
u'/katello/api/systems/bulk/remove_host_collections',
u'/katello/api/systems/bulk/update_content',
),
u'systems': (
u'/katello/api/environments/:environment_id/systems/report',
u'/katello/api/systems',
u'/katello/api/systems',
u'/katello/api/systems/:id',
u'/katello/api/systems/:id',
u'/katello/api/systems/:id',
u'/katello/api/systems/:id/available_host_collections',
u'/katello/api/systems/:id/events',
u'/katello/api/systems/:id/packages',
u'/katello/api/systems/:id/pools',
u'/katello/api/systems/:id/refresh_subscriptions',
u'/katello/api/systems/:id/releases',
),
u'tasks': (
u'/api/orchestration/:id/tasks',
),
u'template_combinations': (
u'/api/config_templates/:config_template_id/template_combinations',
u'/api/config_templates/:config_template_id/template_combinations',
u'/api/template_combinations/:id',
u'/api/template_combinations/:id',
),
u'template_kinds': (
u'/api/template_kinds',
),
u'uebercerts': (
u'/katello/api/organizations/:organization_id/uebercert',
),
u'usergroups': (
u'/api/usergroups',
u'/api/usergroups',
u'/api/usergroups/:id',
u'/api/usergroups/:id',
u'/api/usergroups/:id',
),
u'users': (
u'/api/users',
u'/api/users',
u'/api/users/:id',
u'/api/users/:id',
u'/api/users/:id',
),
}
class AvailableURLsTestCase(TestCase):
"""Tests for ``api/v2``."""
longMessage = True
def setUp(self):
"""Define commonly-used variables."""
self.path = '{0}/api/v2'.format(settings.server.get_url())
def test_positive_get_status_code(self):
"""@Test: GET ``api/v2`` and examine the response.
@Feature: API
@Assert: HTTP 200 is returned with an ``application/json`` content-type
"""
response = client.get(
self.path,
auth=settings.server.get_credentials(),
verify=False,
)
self.assertEqual(response.status_code, http_client.OK)
self.assertIn('application/json', response.headers['content-type'])
def test_positive_get_links(self):
"""@Test: GET ``api/v2`` and check the links returned.
@Feature: API
@Assert: The paths returned are equal to ``API_PATHS``.
"""
# Did the server give us any paths at all?
response = client.get(
self.path,
auth=settings.server.get_credentials(),
verify=False,
)
response.raise_for_status()
# See below for an explanation of this transformation.
api_paths = response.json()['links']
for group, path_pairs in api_paths.items():
api_paths[group] = path_pairs.values()
if bz_bug_is_open(1166875):
# The server returns incorrect paths.
api_paths['docker_images'].append(u'/katello/api/docker_images')
api_paths['docker_images'].remove(u'/katello/api/compare')
api_paths['docker_tags'].append(u'/katello/api/docker_tags')
api_paths['docker_tags'].remove(u'/katello/api/compare')
api_paths['errata'].append(u'/katello/api/errata')
api_paths['errata'].append(u'/katello/api/errata/compare')
api_paths['errata'].remove(u'/katello/api/compare')
self.assertEqual(
frozenset(api_paths.keys()),
frozenset(API_PATHS.keys())
)
for group in api_paths.keys():
self.assertItemsEqual(api_paths[group], API_PATHS[group], group)
# (line-too-long) pylint:disable=C0301
# response.json()['links'] is a dict like this:
#
# {u'content_views': {
# u'…': u'/katello/api/content_views/:id',
# u'…': u'/katello/api/content_views/:id/available_puppet_modules',
# u'…': u'/katello/api/organizations/:organization_id/content_views',
# u'…': u'/katello/api/organizations/:organization_id/content_views',
# }, …}
#
# We don't care about prose descriptions. It doesn't matter if those
# change. Transform it before running any assertions:
#
# {u'content_views': [
# u'/katello/api/content_views/:id',
# u'/katello/api/content_views/:id/available_puppet_modules',
# u'/katello/api/organizations/:organization_id/content_views',
# u'/katello/api/organizations/:organization_id/content_views',
# ], …}
class SmokeTestCase(TestCase):
"""End-to-end tests using the ``API`` path."""
def test_positive_find_default_org(self):
"""@Test: Check if 'Default Organization' is present
@Feature: Smoke Test
@Assert: 'Default Organization' is found
"""
results = entities.Organization().search(
query={'search': 'name="{0}"'.format(DEFAULT_ORG)}
)
self.assertEqual(len(results), 1)
self.assertEqual(results[0].name, DEFAULT_ORG)
def test_positive_find_default_loc(self):
"""@Test: Check if 'Default Location' is present
@Feature: Smoke Test
@Assert: 'Default Location' is found
"""
results = entities.Location().search(
query={'search': 'name="{0}"'.format(DEFAULT_LOC)}
)
self.assertEqual(len(results), 1)
self.assertEqual(results[0].name, DEFAULT_LOC)
def test_positive_find_admin_user(self):
"""@Test: Check if Admin User is present
@Feature: Smoke Test
@Assert: Admin User is found and has Admin role
"""
results = entities.User().search(query={'search': 'login=admin'})
self.assertEqual(len(results), 1)
self.assertEqual(results[0].login, 'admin')
def test_positive_ping(self):
"""@Test: Check if all services are running
@Feature: Smoke Test
@Assert: Overall and individual services status should be 'ok'.
"""
response = entities.Ping().search_json()
self.assertEqual(response['status'], u'ok') # overall status
# Check that all services are OK. ['services'] is in this format:
#
# {u'services': {
# u'candlepin': {u'duration_ms': u'40', u'status': u'ok'},
# u'candlepin_auth': {u'duration_ms': u'41', u'status': u'ok'},
# …
# }, u'status': u'ok'}
services = response['services']
self.assertTrue(
all([service['status'] == u'ok' for service in services.values()]),
u'Not all services seem to be up and running!'
)
def test_positive_smoke(self):
"""@Test: Check that basic content can be created
1. Create a new user with admin permissions
2. Using the new user from above:
1. Create a new organization
2. Create two new lifecycle environments
3. Create a custom product
4. Create a custom YUM repository
5. Create a custom PUPPET repository
6. Synchronize both custom repositories
7. Create a new content view
8. Associate both repositories to new content view
9. Publish content view
10. Promote content view to both lifecycles
11. Create a new libvirt compute resource
12. Create a new subnet
13. Create a new domain
14. Create a new hostgroup and associate previous entities to it
@Feature: Smoke Test
@Assert: All entities are created and associated.
"""
# prep work
#
# FIXME: Use a larger charset when authenticating users.
#
# It is possible to create a user with a wide range of characters. (see
# the "User" entity). However, Foreman supports only HTTP Basic
# authentication, and the requests lib enforces the latin1 charset in
# this auth mode. We then further restrict ourselves to the
# alphanumeric charset, because Foreman complains about incomplete
# multi-byte chars when latin1 chars are used.
login = gen_string('alphanumeric')
password = gen_string('alphanumeric')
# step 1: Create a new user with admin permissions
entities.User(admin=True, login=login, password=password).create()
# step 2.1: Create a new organization
server_config = get_nailgun_config()
server_config.auth = (login, password)
org = entities.Organization(server_config).create()
# step 2.2: Create 2 new lifecycle environments
le1 = entities.LifecycleEnvironment(
server_config,
organization=org
).create()
le2 = entities.LifecycleEnvironment(
server_config,
organization=org,
prior=le1,
).create()
# step 2.3: Create a custom product
prod = entities.Product(server_config, organization=org).create()
# step 2.4: Create custom YUM repository
repo1 = entities.Repository(
server_config,
product=prod,
content_type=u'yum',
url=GOOGLE_CHROME_REPO
).create()
# step 2.5: Create custom PUPPET repository
repo2 = entities.Repository(
server_config,
product=prod,
content_type=u'puppet',
url=FAKE_0_PUPPET_REPO
).create()
# step 2.6: Synchronize both repositories
for repo in [repo1, repo2]:
repo.sync()
# step 2.7: Create content view
content_view = entities.ContentView(
server_config,
organization=org
).create()
# step 2.8: Associate YUM repository to new content view
content_view.repository = [repo1]
content_view = content_view.update(['repository'])
# Fetch all available puppet modules
puppet_mods = content_view.available_puppet_modules()
self.assertGreater(puppet_mods['results'], 0)
# Select a random puppet module from the results
puppet_module = random.choice(puppet_mods['results'])
# ... and associate it to the content view
puppet = entities.ContentViewPuppetModule(
author=puppet_module['author'],
name=puppet_module['name'],
content_view=content_view,
).create()
self.assertEqual(
puppet.name,
puppet_module['name'],
)
# step 2.9: Publish content view
content_view.publish()
# step 2.10: Promote content view to both lifecycles
content_view = content_view.read()
self.assertEqual(len(content_view.version), 1)
cv_version = content_view.version[0].read()
self.assertEqual(len(cv_version.environment), 1)
promote(cv_version, le1.id)
# Check that content view exists in 2 lifecycles
content_view = content_view.read()
self.assertEqual(len(content_view.version), 1)
cv_version = cv_version.read()
self.assertEqual(len(cv_version.environment), 2)
promote(cv_version, le2.id)
# Check that content view exists in 2 lifecycles
content_view = content_view.read()
self.assertEqual(len(content_view.version), 1)
cv_version = cv_version.read()
self.assertEqual(len(cv_version.environment), 3)
# BONUS: Create a content host and associate it with promoted
# content view and last lifecycle where it exists
content_host = entities.System(
server_config,
content_view=content_view,
environment=le2
).create()
# Check that content view matches what we passed
self.assertEqual(content_host.content_view.id, content_view.id)
# Check that lifecycle environment matches
self.assertEqual(content_host.environment.id, le2.id)
# step 2.11: Create a new libvirt compute resource
entities.LibvirtComputeResource(
server_config,
url=u'qemu+tcp://{0}:16509/system'.format(
settings.server.hostname),
).create()
# step 2.12: Create a new subnet
subnet = entities.Subnet(server_config).create()
# step 2.13: Create a new domain
domain = entities.Domain(server_config).create()
# step 2.14: Create a new hostgroup and associate previous entities to
# it
entities.HostGroup(
server_config,
domain=domain,
subnet=subnet
).create()
@skip_if_not_set('clients')
def test_positive_end_to_end(self):
"""@Test: Perform end to end smoke tests using RH repos.
1. Create new organization and environment
2. Clone and upload manifest
3. Sync a RedHat repository
4. Create content-view
5. Add repository to contet-view
6. Promote/publish content-view
7. Create an activation-key
8. Add product to activation-key
9. Create new virtualmachine
10. Pull rpm from Foreman server and install on client
11. Register client with foreman server using activation-key
12. Install rpm on client
@Feature: Smoke test
@Assert: All tests should succeed and Content should be successfully
fetched by client
"""
activation_key_name = gen_string('alpha')
# step 1.1: Create a new organization
org = entities.Organization().create()
# step 1.2: Create new lifecycle environments
lifecycle_env = entities.LifecycleEnvironment(
organization=org
).create()
# step 2: Upload manifest
with manifests.clone() as manifest:
upload_manifest(org.id, manifest.content)
# step 3.1: Enable RH repo and fetch repository_id
repository = entities.Repository(id=enable_rhrepo_and_fetchid(
basearch='x86_64',
org_id=org.id,
product=PRDS['rhel'],
repo=REPOS['rhva6']['name'],
reposet=REPOSET['rhva6'],
releasever='6Server',
))
# step 3.2: sync repository
repository.sync()
# step 4: Create content view
content_view = entities.ContentView(organization=org).create()
# step 5: Associate repository to new content view
content_view.repository = [repository]
content_view = content_view.update(['repository'])
# step 6.1: Publish content view
content_view.publish()
# step 6.2: Promote content view to lifecycle_env
content_view = content_view.read()
self.assertEqual(len(content_view.version), 1)
promote(content_view.version[0], lifecycle_env.id)
# step 7: Create activation key
activation_key = entities.ActivationKey(
name=activation_key_name,
environment=lifecycle_env,
organization=org,
content_view=content_view,
).create()
# step 7.1: Walk through the list of subscriptions.
# Find the "Red Hat Employee Subscription" and attach it to the
# recently-created activation key.
for sub in entities.Subscription(organization=org).search():
if sub.read_json()['product_name'] == DEFAULT_SUBSCRIPTION_NAME:
# 'quantity' must be 1, not subscription['quantity']. Greater
# values produce this error: "RuntimeError: Error: Only pools
# with multi-entitlement product subscriptions can be added to
# the activation key with a quantity greater than one."
activation_key.add_subscriptions(data={
'quantity': 1,
'subscription_id': sub.id,
})
break
# step 7.2: Enable product content
activation_key.content_override(data={'content_override': {
u'content_label': u'rhel-6-server-rhev-agent-rpms',
u'value': u'1',
}})
# Create VM
package_name = 'python-kitchen'
with VirtualMachine(distro='rhel66') as vm:
vm.install_katello_ca()
result = vm.register_contenthost(activation_key_name, org.label)
self.assertEqual(result.return_code, 0)
# Install contents from sat6 server
result = vm.run('yum install -y {0}'.format(package_name))
self.assertEqual(result.return_code, 0)
# Verify if package is installed by query it
result = vm.run('rpm -q {0}'.format(package_name))
self.assertEqual(result.return_code, 0)
| gpl-3.0 | -1,579,052,074,403,348,200 | 35.523589 | 115 | 0.593942 | false |
liberation/sesql | sesql/daemon/cmdline.py | 1 | 3231 | # -*- coding: utf-8 -*-
# Copyright (c) Pilot Systems and Libération, 2010-2011
# This file is part of SeSQL.
# SeSQL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# SeSQL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
"""Command line parser, should be converted to argparse ?"""
import sys, getopt
class CmdLine(object):
"The command line parser"
def __init__(self, argv):
self.argv = argv
self.all = []
self.longs = []
self.shorts = ""
self.convert = {}
self.values = {}
self.add_opt("help", "h",
help_msg = "display this help")
self.add_opt("version", "v",
help_msg = "display version number and exits")
def __getitem__(self, item):
return self.values[item]
def __setitem__(self, item, value):
self.values[item] = value
def has_key(self, key):
return self.values.has_key(key)
def items(self):
return self.values.items()
def add_opt(self, long, short = "", value = None, help_msg = ""):
"Adds an option to the list of known ones"
self.all.append((long, short, value, help_msg))
self.values[long] = value
self.convert["--" + long] = long
if(short):
self.convert["-" + short] = long
self.shorts = self.shorts + short
if(not(value is None)):
self.longs.append(long + "=")
if(short):
self.shorts = self.shorts + ":"
else:
self.longs.append(long)
def parse_opt(self):
"Parse the command line"
try:
optlist, args = getopt.getopt(self.argv[1:], self.shorts, self.longs)
except getopt.GetoptError, s:
print self.argv[0] + ":", s, ". Try --help."
sys.exit(2)
self.args = args
for opt, val in optlist:
# Extra key from options
while(self.convert.has_key(opt)):
opt = self.convert[opt]
if(val):
self.values[opt] = val
else:
self.values[opt] = True
def show_help(self, extra = ""):
print "Syntax: %s %s [<options>]" % (self.argv[0], extra)
print "Options:"
longest = max([ len(l) for l in self.convert.keys() ])
for long, short, value, help_msg in self.all:
default = value and "(default: %s)" % value or ""
name = "--" + long
name += " " * (longest - len(name))
if short:
name += ", -" + short
else:
name += " "
print " %s: %s %s" % (name, help_msg, default)
| gpl-2.0 | 5,738,396,317,340,861,000 | 32.298969 | 81 | 0.542105 | false |
wukan1986/kquant_data | kquant_data/wind/tdays.py | 1 | 1026 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
w.tdays("2017-02-02", "2017-03-02", "")
w.tdays("2017-02-02", "2017-03-02", "Days=Weekdays")
w.tdays("2017-02-02", "2017-03-02", "Days=Alldays")
w.tdays("2017-02-02", "2017-03-02", "TradingCalendar=SHFE")
"""
import pandas as pd
from .utils import asDateTime
def download_tdays(w, startdate, enddate, option=''):
"""
下载交易日数据
:param w:
:param startdate:
:param enddate:
:param option:
:return:
"""
w.asDateTime = asDateTime
w_tdays_data = w.tdays(startdate, enddate, option)
df = pd.DataFrame(w_tdays_data.Data, )
df = df.T
df.columns = ['date']
df['date'] = pd.to_datetime(df['date'])
return df
def read_tdays(path):
try:
df = pd.read_csv(path, parse_dates=True)
except:
return None
df['date'] = pd.to_datetime(df['date'])
df.index = df['date']
return df
def write_tdays(path, df):
df.to_csv(path, date_format='%Y-%m-%d', encoding='utf-8', index=False)
| bsd-2-clause | -6,473,286,853,818,110,000 | 21.488889 | 74 | 0.593874 | false |
jumpinjackie/fdo-swig | Lang/Python/UnitTest/Src/ClientServicesTest.py | 1 | 2448 | #
# Copyright (C) 2004-2007 Autodesk, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser
# General Public License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import traceback
import string
import os.path
import os
from FDO import *
import unittest
class ClientServicesTest(unittest.TestCase):
"""
Unit test for the ClientServices classes. The Provider Registry and
the FdoIConnections are tested.
"""
def testClientServices(self):
"""FeatureAccessManager accessor functions should return the correct type"""
manager = FdoFeatureAccessManager.GetConnectionManager()
registry = FdoFeatureAccessManager.GetProviderRegistry()
providerCollection = registry.GetProviders()
# Verify the instance classnames
self.assert_(manager.__class__.__name__ == "IConnectionManager")
self.assert_(registry.__class__.__name__ == "IProviderRegistry")
self.assert_(providerCollection.__class__.__name__ == "FdoProviderCollection")
def testConnectionCreation(self):
"""Check that FdoIConnections can be created correctly"""
manager = FdoFeatureAccessManager.GetConnectionManager()
registry = FdoFeatureAccessManager.GetProviderRegistry()
providerCollection = registry.GetProviders()
# Iterate through each provider; instantiate the provider
for index in range(providerCollection.Count):
provider = providerCollection.GetItem(index)
name = provider.Name
self.assert_( provider.__class__.__name__ == 'FdoProvider')
# Unable to load the SDF provider for some reason.
if name == "OSGeo.SDF.3.9":
connection = manager.CreateConnection(name)
self.assert_(connection.__class__.__name__ == 'FdoIConnection')
# Check if the library exists in the path
path = provider.LibraryPath
if "SDFProvider.dll" not in path and "libSDFProvider.so" not in path:
self.fail("Invalid provider.GetLibraryPath(). Path was: " + path)
| lgpl-2.1 | -477,665,189,705,039,550 | 35.537313 | 80 | 0.743056 | false |
DDMAL/Gamera | gamera/plugins/segmentation.py | 1 | 8124 | #
# Copyright (C) 2001-2005 Ichiro Fujinaga, Michael Droettboom, and Karl MacMillan
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from gamera.plugin import PluginFunction, PluginModule
from gamera.args import ImageType, Args, ImageList, FloatVector, Float, Int
from gamera.enums import ONEBIT
from gamera import util
import _segmentation
class Segmenter(PluginFunction):
self_type = ImageType([ONEBIT])
return_type = ImageList("ccs")
doc_examples = [(ONEBIT,)]
class cc_analysis(Segmenter):
"""
Performs connected component analysis on the image.
This algorithm assumes 8-connected components, meaning any two
pixels are considered "connected" if they are adjacent in any
direction, including diagonally.
The original image will have all of its pixels "labeled" with a
number representing each connected component. This is so the
connected components can share data with their source image and
makes things much more efficient.
Returns a list of ccs found in the image. Since all the CC's
share the same data with the original image, changing the CC's
will affect the original. If you do not want this behavior, use
the image_copy_ function on each of the CCs::
ccs = [x.image_copy() for x in ccs]
.. _image_copy: utility.html#image-copy
"""
pass
class cc_and_cluster(Segmenter):
"""
Performs connected component analysis using cc_analysis_ and then
clusters the CC's according to their similarity.
TODO: We need some more detailed documentation here.
"""
pure_python = True
args = Args([Float('ratio', default=1.0), Int('distance', default=2)])
return_type = ImageList("ccs")
def __call__(image, ratio=1.0, distance=2):
from gamera import cluster
cc = image.cc_analysis()
return cluster.cluster(cc, ratio, distance)
__call__ = staticmethod(__call__)
doc_examples = [(ONEBIT,)]
class splitx(Segmenter):
"""
Splits an image vertically.
The split point is determined automatically by finding a valley in
the projections near *center*.
This function is overloaded to work both with a single value
and a list of splitting point candidates as input.
"""
args = Args([FloatVector("center", default=[0.5])])
doc_examples = [(ONEBIT,)]
def __call__(self, center=0.5):
if not util.is_sequence(center):
return _segmentation.splitx(self, [center])
else:
return _segmentation.splitx(self, center)
__call__ = staticmethod(__call__)
author = "Michael Droettboom, Karl MacMillan and Christoph Dalitz"
class splitx_max(Segmenter):
"""Splits an image vertically.
The split point is determined automatically by finding a peak in
the projections near *center*.
This function is overloaded to work both with a single value and a
list of splitting point canidates as input.
"""
args = Args([FloatVector("center", default=[0.5])])
def __call__(self, center=0.5):
if not util.is_sequence(center):
return _segmentation.splitx_max(self, [center])
else:
return _segmentation.splitx_max(self, center)
__call__ = staticmethod(__call__)
author = "Michael Droettboom, Karl MacMillan and Christoph Dalitz"
class splity(Segmenter):
"""
Splits an image horizontally.
The split point is determined automatically by finding a valley in
the projections near *center*.
This function is overloaded to work both with a single value and a
list of splitting point canidates as input.
"""
args = Args([FloatVector("center", default=[0.5])])
def __call__(self, center=[0.5]):
if not util.is_sequence(center):
return _segmentation.splity(self, [center])
else:
return _segmentation.splity(self, center)
__call__ = staticmethod(__call__)
author = "Michael Droettboom, Karl MacMillan and Christoph Dalitz"
class splitx_base(Segmenter):
pure_python = True
return_type = ImageList("splits")
class splitx_left(splitx_base):
"""
Splits an image vertically.
The split point is determined automatically by finding a valley in
the projections near the left of the image.
"""
_center = 0.25
def __call__(self):
return self.splitx(0.25)
__call__ = staticmethod(__call__)
class splitx_right(splitx_base):
"""
Splits an image vertically.
The split point is determined automatically by finding a valley in
the projections near the right of the image.
"""
_center = 0.75
def __call__(self):
return self.splitx(0.75)
__call__ = staticmethod(__call__)
class splity_base(Segmenter):
pure_python = True
return_type = ImageList("splits")
class splity_top(splity_base):
"""
Splits an image horizontally.
The split point is determined automatically by finding a valley in
the projections near the top of the image.
"""
_center = 0.25
def __call__(self):
return self.splity(0.25)
__call__ = staticmethod(__call__)
class splity_bottom(splity_base):
"""
Splits an image horizontally.
The split point is determined automatically by finding a valley in
the projections near the bottom of the image.
"""
_center = 0.75
def __call__(self):
return self.splity(0.75)
__call__ = staticmethod(__call__)
# connected-component filters
def filter_wide(ccs, max_width):
tmp = []
for x in ccs:
if x.ncols > max_width:
x.fill_white()
else:
tmp.append(x)
return tmp
def filter_narrow(ccs, min_width):
tmp = []
for x in ccs:
if x.ncols < min_width:
x.fill_white()
else:
tmp.append(x)
return tmp
def filter_tall(ccs, max_height):
tmp = []
for x in ccs:
if x.nrows > max_height:
x.fill_white()
else:
tmp.append(x)
return tmp
def filter_short(ccs, min_height):
tmp = []
for x in ccs:
if x.nrows < min_height:
x.fill_white()
else:
tmp.append(x)
return tmp
def filter_small(ccs, min_size):
tmp = []
for x in ccs:
if x.nrows < min_size or x.ncols < min_size:
x.fill_white()
else:
tmp.append(x)
return tmp
def filter_large(ccs, max_size):
tmp = []
for x in ccs:
if x.nrows > max_size or x.ncols > max_size:
x.fill_white()
else:
tmp.append(x)
return tmp
def filter_black_area_small(ccs, min_size):
tmp = []
for x in ccs:
if x.black_area()[0] < min_size:
x.fill_white()
else:
tmp.append(x)
return tmp
def filter_black_area_large(ccs, max_size):
tmp = []
for x in ccs:
if x.black_area()[0] > max_size:
x.fill_white()
else:
tmp.append(x)
return tmp
class SegmentationModule(PluginModule):
category = "Segmentation"
cpp_headers = ["segmentation.hpp"]
functions = [cc_analysis, cc_and_cluster, splitx, splity,
splitx_left, splitx_right, splity_top, splity_bottom,
splitx_max]
author = "Michael Droettboom and Karl MacMillan"
url = "http://gamera.sourceforge.net/"
module = SegmentationModule()
del Segmenter
del splitx_base
del splity_base
| gpl-2.0 | -1,454,504,026,732,024,300 | 26.08 | 81 | 0.641064 | false |
williballenthin/synapse | synapse/tests/test_swarm_runtime.py | 1 | 8937 |
import time
import synapse.cortex as s_cortex
import synapse.daemon as s_daemon
import synapse.telepath as s_telepath
import synapse.lib.service as s_service
import synapse.lib.userauth as s_userauth
import synapse.swarm.runtime as s_runtime
from synapse.tests.common import *
class SwarmRunBase(SynTest):
def getSwarmEnv(self):
tenv = TestEnv()
core0 = s_cortex.openurl('ram://')
core1 = s_cortex.openurl('ram://')
tenv.add('core0',core0,fini=True)
tenv.add('core1',core1,fini=True)
tufo0 = core0.formTufoByProp('foo:bar','baz',vvv='visi')
tufo1 = core0.formTufoByProp('foo:bar','faz',vvv='visi')
tufo2 = core1.formTufoByProp('foo:bar','lol',vvv='visi')
tufo3 = core1.formTufoByProp('foo:bar','hai',vvv='visi')
tufo4 = core0.formTufoByProp('zzz:woot',10,vvv='visi')
tufo5 = core1.formTufoByProp('zzz:woot',12,vvv='romp')
tenv.add('tufo0',tufo0)
tenv.add('tufo1',tufo1)
tenv.add('tufo2',tufo2)
tenv.add('tufo3',tufo3)
dmon = s_daemon.Daemon()
link = dmon.listen('tcp://127.0.0.1:0')
tenv.add('link',link)
tenv.add('dmon',dmon,fini=True)
port = link[1].get('port')
svcbus = s_service.SvcBus()
tenv.add('svcbus',svcbus,fini=True)
dmon.share('syn.svcbus',svcbus)
svcrmi = s_telepath.openurl('tcp://127.0.0.1/syn.svcbus', port=port)
tenv.add('svcrmi',svcrmi,fini=True)
s_service.runSynSvc('cortex',core0,svcrmi,tags=('hehe.haha',))
s_service.runSynSvc('cortex',core1,svcrmi,tags=('hehe.hoho',))
runt = s_runtime.Runtime(svcrmi)
tenv.add('runt',runt,fini=True)
return tenv
class SwarmRunTest(SwarmRunBase):
def test_swarm_runtime_eq(self):
tenv = self.getSwarmEnv()
answ = tenv.runt.ask('foo:bar="baz"')
data = answ.get('data')
self.assertEqual( data[0][0], tenv.tufo0[0] )
# FIXME check for other expected results info!
answ = tenv.runt.ask('foo:bar:vvv')
data = answ.get('data')
self.assertEqual( len(data), 4 )
answ = tenv.runt.ask('hehe.haha/foo:bar:vvv')
data = answ.get('data')
self.assertEqual( len(data), 2 )
answ = tenv.runt.ask('hehe.haha/foo:bar:vvv="visi"')
data = answ.get('data')
self.assertEqual( len(data), 2 )
tenv.fini()
def test_swarm_runtime_pivot(self):
tenv = self.getSwarmEnv()
data = tenv.runt.eval('foo:bar="baz" foo:bar:vvv->foo:bar:vvv')
self.assertEqual( len(data), 4 )
tenv.fini()
def test_swarm_runtime_opts(self):
tenv = self.getSwarmEnv()
answ = tenv.runt.ask('%foo')
self.assertEqual( answ['options'].get('foo'), 1 )
answ = tenv.runt.ask('opts(foo=10)')
self.assertEqual( answ['options'].get('foo'), 10 )
answ = tenv.runt.ask('%foo=10')
self.assertEqual( answ['options'].get('foo'), 10 )
answ = tenv.runt.ask('opts(foo="bar")')
self.assertEqual( answ['options'].get('foo'), 'bar' )
answ = tenv.runt.ask('%foo="bar"')
self.assertEqual( answ['options'].get('foo'), 'bar' )
tenv.fini()
def test_swarm_runtime_opts_uniq(self):
tenv = self.getSwarmEnv()
answ = tenv.runt.ask('%uniq foo:bar="baz" foo:bar="baz"')
self.assertEqual( len(answ['data']), 1 )
answ = tenv.runt.ask('%uniq=0 foo:bar="baz" foo:bar="baz"')
self.assertEqual( len(answ['data']), 2 )
tenv.fini()
def test_swarm_runtime_join(self):
tenv = self.getSwarmEnv()
answ = tenv.runt.ask('foo:bar="baz" join("foo:bar:vvv")')
data = answ.get('data')
self.assertEqual( len(data), 4 )
answ = tenv.runt.ask('foo:bar="baz" join("zzz:woot:vvv","foo:bar:vvv")')
data = answ.get('data')
self.assertEqual( len(data), 2 )
tenv.fini()
def test_swarm_runtime_gele(self):
env = self.getSwarmEnv()
answ = env.runt.ask('zzz:woot>=11')
data = answ.get('data')
self.assertEqual( len(data), 1 )
self.assertEqual( data[0][1].get('zzz:woot'), 12 )
answ = env.runt.ask('zzz:woot>10')
data = answ.get('data')
self.assertEqual( len(data), 1 )
self.assertEqual( data[0][1].get('zzz:woot'), 12 )
answ = env.runt.ask('zzz:woot>=10')
data = answ.get('data')
self.assertEqual( len(data), 2 )
answ = env.runt.ask('zzz:woot<=11')
data = answ.get('data')
self.assertEqual( len(data), 1 )
self.assertEqual( data[0][1].get('zzz:woot'), 10 )
answ = env.runt.ask('zzz:woot<12')
data = answ.get('data')
self.assertEqual( len(data), 1 )
self.assertEqual( data[0][1].get('zzz:woot'), 10 )
answ = env.runt.ask('zzz:woot<=13')
data = answ.get('data')
self.assertEqual( len(data), 2 )
answ = env.runt.ask('zzz:woot -zzz:woot<=11')
data = answ.get('data')
self.assertEqual( len(data), 1 )
env.fini()
def test_swarm_runtime_regex(self):
env = self.getSwarmEnv()
answ = env.runt.ask('foo:bar +foo:bar~="^l"')
data = answ.get('data')
self.assertEqual( data[0][1].get('foo:bar'), 'lol')
answ = env.runt.ask('foo:bar +foo:bar~="^Q"')
self.assertEqual( len(answ.get('data')), 0)
answ = env.runt.ask('foo:bar +foo:bar~="^Q"')
self.assertEqual( len(answ.get('data')), 0)
answ = env.runt.ask('foo:bar -foo:bar~="^[a-z]{3}$"')
self.assertEqual( len(answ.get('data')), 0)
env.fini()
def test_swarm_runtime_or(self):
env = self.getSwarmEnv()
answ = env.runt.ask('foo:bar +foo:bar="baz"|foo:bar="faz"')
tufos = answ.get('data')
foobars = [ t[1].get('foo:bar') for t in tufos ]
foobars.sort()
self.assertEqual( foobars, ['baz','faz'] )
env.fini()
def test_swarm_runtime_and(self):
with self.getSwarmEnv() as env:
answ = env.runt.ask('foo:bar -foo:bar="baz" & foo:bar:vvv="newp" ')
tufos = answ.get('data')
foobars = [ t[1].get('foo:bar') for t in tufos ]
foobars.sort()
self.assertEqual( foobars, ['baz','faz','hai','lol'] )
def test_swarm_runtime_clear(self):
env = self.getSwarmEnv()
answ = env.runt.ask('foo:bar clear()')
tufos = answ.get('data')
self.assertEqual( len(tufos), 0 )
env.fini()
def test_swarm_runtime_saveload(self):
env = self.getSwarmEnv()
answ = env.runt.ask('foo:bar="baz" save("woot") clear() load("woot")')
tufos = answ.get('data')
self.assertEqual( len(tufos), 1 )
self.assertEqual( tufos[0][1].get('foo:bar'), 'baz' )
env.fini()
def test_swarm_runtime_has(self):
env = self.getSwarmEnv()
# use the lift code for has()
answ = env.runt.ask('foo:bar')
tufos = answ.get('data')
self.assertEqual( len(tufos), 4 )
self.assertEqual( tufos[0][1].get('tufo:form'), 'foo:bar' )
# use the filter code for has()
answ = env.runt.ask('tufo:form +foo:bar')
tufos = answ.get('data')
self.assertEqual( len(tufos), 4 )
self.assertEqual( tufos[0][1].get('tufo:form'), 'foo:bar' )
env.fini()
def test_swarm_runtime_maxtime(self):
env = self.getSwarmEnv()
self.assertRaises(HitStormLimit, env.runt.eval, 'foo:bar', timeout=0)
env.fini()
def test_swarm_runtime_by(self):
env = self.getSwarmEnv()
# test out long form using range
#answ = env.runt.ask('by("range","zzz:woot",(10,13))')
#print('answ: %r' % (answ,))
#tufos = answ.get('data')
#self.eq( len(tufos), 2 )
answ = env.runt.ask('zzz:woot*range=(10,13)')
tufos = answ.get('data')
self.eq( len(tufos), 2 )
answ = env.runt.ask('zzz:woot*range=(10,12)')
tufos = answ.get('data')
self.eq( len(tufos), 1 )
answ = env.runt.ask('zzz:woot#1*range=(10,13)')
tufos = answ.get('data')
self.eq( len(tufos), 2 )
env.fini()
def test_swarm_runtime_frob(self):
env = self.getSwarmEnv()
env.core0.formTufoByProp('inet:ipv4', 0x01020304)
answ = env.runt.ask('inet:ipv4="1.2.3.4"') #foo:bar="baz" save("woot") clear() load("woot")')
tufos = answ.get('data')
self.assertEqual( len(tufos), 1 )
self.assertEqual( tufos[0][1].get('inet:ipv4'), 0x01020304 )
answ = env.runt.ask('inet:ipv4=0x01020304')
tufos = answ.get('data')
self.assertEqual( len(tufos), 1 )
self.assertEqual( tufos[0][1].get('inet:ipv4'), 0x01020304 )
env.fini()
| apache-2.0 | 7,544,642,235,553,549,000 | 26 | 101 | 0.557122 | false |
bitcoinfees/bitcoin-feemodel | feemodel/tests/test_pseudoproxy.py | 1 | 1032 | import unittest
from feemodel.txmempool import MemBlock
from feemodel.tests.pseudoproxy import proxy
from feemodel.tests.config import test_memblock_dbfile as dbfile
AVAILABLE_HEIGHTS = range(333931, 333954) + [334655, 334656]
class PseudoProxyTests(unittest.TestCase):
def test_A(self):
# Just test that no KeyError is raised: we have the blocks
# in AVAILABLE_HEIGHTS
for height in AVAILABLE_HEIGHTS:
blockhash = proxy.getblockhash(height)
block = proxy.getblock(blockhash)
self.assertTrue(block)
def test_B(self):
# Test the setting of rawmempool
proxy.set_rawmempool(333931)
rawmempool = proxy.getrawmempool()
b = MemBlock.read(333931, dbfile=dbfile)
self.assertEqual(set(b.entries), set(rawmempool))
for txid, rawentry in rawmempool.items():
for key, val in rawentry.items():
self.assertEqual(val, getattr(b.entries[txid], key))
if __name__ == "__main__":
unittest.main()
| mit | 3,947,077,472,323,034,000 | 32.290323 | 68 | 0.661822 | false |
bbglab/wok | wok/core/runstates.py | 1 | 2959 | ###############################################################################
#
# Copyright 2009-2011, Universitat Pompeu Fabra
#
# This file is part of Wok.
#
# Wok is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Wok is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses
#
###############################################################################
class RunState(object):
def __init__(self, id, title, symbol):
self.id = id
self.title = title
self.symbol = symbol
def __eq__(self, other):
return isinstance(other, RunState) and self.id == other.id
def __hash__(self):
return hash(self.id)
def __str__(self):
return self.title
def __repr__(self):
return "{}({})".format(self.title, self.id)
class UndefinedState(Exception):
def __init__(self, id=None, symbol=None, title=None):
if id is not None:
Exception.__init__(self, "Undefined state id: {}".format(id))
elif symbol is not None:
Exception.__init__(self, "Undefined state symbol: {}".format(symbol))
elif title is not None:
Exception.__init__(self, "Undefined state title: {}".format(title))
else:
Exception.__init__(self, "Undefined state")
# Primary states
READY = RunState(1, "ready", "RD")
WAITING = RunState(2, "waiting", "W")
RUNNING = RunState(3, "running", "R")
PAUSED = RunState(4, "paused", "P")
ABORTING = RunState(5, "aborting", "AG")
FINISHED = RunState(6, "finished", "F")
RETRY = RunState(7, "retry", "RT")
FAILED = RunState(8, "failed", "E")
ABORTED = RunState(9, "aborted", "A")
UNKNOWN = RunState(9, "unknown", "U")
STATES = [READY, WAITING, RUNNING, PAUSED, ABORTING, FINISHED, RETRY, FAILED, ABORTED, UNKNOWN]
TERMINAL_STATES = [FINISHED, FAILED, ABORTED]
# Sub states
JOB_CREATED = RunState(10, "job_created", "JC")
LOGS_RETRIEVAL = RunState(11, "logs_retrieval", "LR")
JOINING = RunState(12, "joining", "J")
SUBSTATES = [JOB_CREATED, LOGS_RETRIEVAL, JOINING]
# -----------------------------------
__ID_MAP = {}
__SYMBOL_MAP = {}
__TITLE_MAP = {}
for s in STATES + SUBSTATES:
__ID_MAP[s.id] = s
__SYMBOL_MAP[s.symbol] = s
__TITLE_MAP[s.title] = s
def from_title(title):
if title not in __TITLE_MAP:
raise UndefinedState(title=title)
return __TITLE_MAP[title]
def from_id(id):
if id not in __ID_MAP:
raise UndefinedState(id=id)
return __ID_MAP[id]
def from_symbol(symbol):
if symbol not in __SYMBOL_MAP:
raise UndefinedState(symbol=symbol)
return __SYMBOL_MAP[symbol] | gpl-3.0 | -8,364,119,999,754,507,000 | 28.6 | 95 | 0.630281 | false |
rqlite/pyrqlite | src/pyrqlite/connections.py | 1 | 5326 |
from __future__ import unicode_literals
import codecs
import logging
try:
from http.client import HTTPConnection, HTTPSConnection
except ImportError:
# pylint: disable=import-error
from httplib import HTTPConnection, HTTPSConnection
try:
from urllib.parse import urlparse
except ImportError:
# pylint: disable=import-error
from urlparse import urlparse
from .constants import (
UNLIMITED_REDIRECTS,
)
from .cursors import Cursor
from ._ephemeral import EphemeralRqlited as _EphemeralRqlited
from .extensions import PARSE_DECLTYPES, PARSE_COLNAMES
class Connection(object):
from .exceptions import (
Warning,
Error,
InterfaceError,
DatabaseError,
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
)
def __init__(self, scheme='http', host='localhost', port=4001,
user=None, password=None, connect_timeout=None,
detect_types=0, max_redirects=UNLIMITED_REDIRECTS):
self.messages = []
self.scheme = scheme
self.host = host
self.port = port
self._headers = {}
if not (user is None or password is None):
self._headers['Authorization'] = 'Basic ' + \
codecs.encode('{}:{}'.format(user, password).encode('utf-8'),
'base64').decode('utf-8').rstrip('\n')
self.connect_timeout = connect_timeout
self.max_redirects = max_redirects
self.detect_types = detect_types
self.parse_decltypes = detect_types & PARSE_DECLTYPES
self.parse_colnames = detect_types & PARSE_COLNAMES
self._ephemeral = None
if scheme == ':memory:':
self._ephemeral = _EphemeralRqlited().__enter__()
self.host, self.port = self._ephemeral.http
self._connection = self._init_connection()
def _init_connection(self):
if self.scheme in ('http', ':memory:'):
cls = HTTPConnection
elif self.scheme == 'https':
cls = HTTPSConnection
else:
raise Connection.ProgrammingError('Unsupported scheme %r' % self.scheme)
return cls(self.host, port=self.port,
timeout=None if self.connect_timeout is None else float(self.connect_timeout))
def _retry_request(self, method, uri, body=None, headers={}):
tries = 10
while tries:
tries -= 1
try:
self._connection.request(method, uri, body=body,
headers=dict(self._headers, **headers))
return self._connection.getresponse()
except Exception:
if not tries:
raise
self._connection.close()
self._connection = self._init_connection()
def _fetch_response(self, method, uri, body=None, headers={}):
"""
Fetch a response, handling redirection.
"""
response = self._retry_request(method, uri, body=body, headers=headers)
redirects = 0
while response.status == 301 and \
response.getheader('Location') is not None and \
(self.max_redirects == UNLIMITED_REDIRECTS or redirects < self.max_redirects):
redirects += 1
uri = response.getheader('Location')
location = urlparse(uri)
logging.getLogger(__name__).debug("status: %s reason: '%s' location: '%s'",
response.status, response.reason, uri)
if self.host != location.hostname or self.port != location.port:
self._connection.close()
self.host = location.hostname
self.port = location.port
self._connection = self._init_connection()
response = self._retry_request(method, uri, body=body, headers=headers)
return response
def close(self):
"""Close the connection now (rather than whenever .__del__() is
called).
The connection will be unusable from this point forward; an
Error (or subclass) exception will be raised if any operation
is attempted with the connection. The same applies to all
cursor objects trying to use the connection. Note that closing
a connection without committing the changes first will cause an
implicit rollback to be performed."""
self._connection.close()
if self._ephemeral is not None:
self._ephemeral.__exit__(None, None, None)
self._ephemeral = None
def __del__(self):
self.close()
def commit(self):
"""Database modules that do not support transactions should
implement this method with void functionality."""
pass
def rollback(self):
"""This method is optional since not all databases provide
transaction support. """
pass
def cursor(self, factory=None):
"""Return a new Cursor Object using the connection."""
if factory:
return factory(self)
else:
return Cursor(self)
def execute(self, *args, **kwargs):
return self.cursor().execute(*args, **kwargs)
| mit | -7,211,873,513,507,476,000 | 33.36129 | 97 | 0.591626 | false |
yaukwankiu/armor | geometry/regrid_backup2013-08-26.py | 1 | 4802 | #regrid.py
# to redraw the grids, to interpolate, etc
"""
USE:
from armor import pattern
from armor.geometry import regrid
reload(pattern); reload(regrid)
a = pattern.a ; c = pattern.c ; a.load(); c.load(); e = regrid.regrid(a,c)
e.show()
"""
""" input: DBZ object, new_horizontal_dimension, new_vertical_dimension,
, new coords for the lowerleft corner
output: new DBZ object"""
import numpy as np
from scipy import interpolate # added 2013-08-26
rbs = interpolate.RectBivariateSpline
from armor import defaultParameters
from armor.defaultParameters import *
from armor import pattern
DBZ = pattern.DBZ
def interpolate(arr_old, arr_new, I_old, J_old):
# deprecated 2013-08-26
"""
input: array, i, j
output: value
(int(x),
int(y)+1)
+ + (int(x)+1, int(y)+1)
(x,y)
+ + (int(x)+1, int(y))
(int(x),
int(y))
be careful - floor(x)=ceil(x)=x for integer x,
so we really want floor(x) and floor(x)+1
"""
I = I_old.copy()
J = J_old.copy()
arr_new2 = arr_new * 0
arr_new2 += (-999)
height_new, width_new = arr_new.shape
height_old, width_old = arr_old.shape
# set all out-of-bounds to (0,0) for convenience
I = (I>=0) * (I<height_old-1) * I #e.g. i>=0 and i<=4 for i=[0,1,2,3,4], width=5
J = (J>=0) * (J<width_old -1) * J
# the loopings are necessary since we don't know beforehand where the (i_old, j_old)
# would land
for i in range(height_new):
for j in range(width_new):
i0 = int(I[i,j])
j0 = int(J[i,j])
i1 = i0 + 1
j1 = j0 + 1
i_frac = i % 1
j_frac = j % 1
f00 = arr_old[i0,j0]
f01 = arr_old[i0,j1]
f10 = arr_old[i1,j0]
f11 = arr_old[i1,j1]
arr_new2[i, j] = (1-i_frac)*(1-j_frac) * f00 + \
(1-i_frac)*( j_frac) * f01 + \
( i_frac)*(1-j_frac) * f00 + \
( i_frac)*( j_frac) * f00
return arr_new2
def regrid(a, b):
"""
a is the object to be resized
b provides the relevant shape information for the process
"""
gridSizeOld = a.matrix.shape
gridSizeNew = b.matrix.shape
height, width = gridSizeNew
X, Y = np.meshgrid(range(width), range(height))
J, I = X, Y
# I, J = I_new, J_new
a_new = DBZ(name=a.name+"rescaled to "+str(gridSizeNew),
matrix = np.zeros(gridSizeNew),
lowerLeftCornerLatitudeLongitude=b.lowerLeftCornerLatitudeLongitude,
)
latOld, longOld = a.lowerLeftCornerLatitudeLongitude
latNew, longNew = b.lowerLeftCornerLatitudeLongitude
latDegreePerGridOld = 1.*(a.upperRightCornerLatitudeLongitude[0]-latOld)/gridSizeOld[0]
longDegreePerGridOld= 1.*(a.upperRightCornerLatitudeLongitude[1]-longOld)/gridSizeOld[1]
latDegreePerGridNew = 1.*(b.upperRightCornerLatitudeLongitude[0]-latOld)/gridSizeNew[0]
longDegreePerGridNew= 1.*(b.upperRightCornerLatitudeLongitude[0]-longOld)/gridSizeNew[1]
#I_old = (1.* I/gridSizeNew[0]+latNew -latOld) * gridSizeOld[0] # this is wrong
#J_old = (1.* J/gridSizeNew[0]+latNew -latOld) * gridSizeOld[0] # we should convert
# with the degree per grid
# as the replacement below
I_old = (1.* I*latDegreePerGridNew +latNew -latOld) / latDegreePerGridOld
J_old = (1.* J*longDegreePerGridNew +longNew -longOld) /longDegreePerGridOld
# debug
print I, J
print I_old, J_old, I_old.shape
print "latDegreePerGridOld , longDegreePerGridOld", latDegreePerGridOld , longDegreePerGridOld
print "latDegreePerGridNew , longDegreePerGridNew", latDegreePerGridNew , longDegreePerGridNew
print "gridSizeOld", gridSizeOld
print "gridSizeNew", gridSizeNew
print "I_old[0,0], J_old[0,0]", I_old[0,0], J_old[0,0]
testmat = np.zeros((1000,1000))
for ii in range(I_old.shape[0]):
for jj in range(I_old.shape[1]):
testmat[I_old[ii,jj]*(I_old[ii,jj]>0), J_old[ii,jj]*(J_old[ii,jj]>0)] = 1
from matplotlib import pyplot as plt
plt.imshow(testmat)
plt.show()
# end debug
arr_old = a.matrix
arr_new = np.zeros((height, width))
a_new.matrix = interpolate(arr_old, arr_new, I_old, J_old)
return a_new
########################
# samples
a = pattern.a
c = pattern.c
| cc0-1.0 | -3,683,625,517,059,778,000 | 33.308824 | 99 | 0.551437 | false |
markfinal/BuildAMation | codingtools/strip_trailing_whitespace.py | 1 | 1371 | #!/usr/bin/python
from convert_line_endings import convert_line_endings
import os
import re
import sys
def strip_trailing_whitespace(file_path):
with open(file_path, mode='rt') as infile:
lines = infile.readlines()
with open(file_path, mode='wt') as outfile:
for line in lines:
stripped = re.sub('[ \t]+$', '', line)
outfile.write(stripped)
if sys.platform.startswith("win"):
convert_line_endings(file_path)
def process_path(path, extension_list):
if os.path.isfile(path):
strip_trailing_whitespace(path)
else:
for root, dirs, files in os.walk(path):
# ignore hidden files and directories
files = [f for f in files if not f[0] == '.']
dirs[:] = [d for d in dirs if not d[0] == '.']
for file_path in files:
file_ext = os.path.splitext(file_path)[1]
if file_ext in extension_list:
full_path = os.path.join(root, file_path)
strip_trailing_whitespace(full_path)
if __name__ == "__main__":
if len(sys.argv) > 1:
extensions = sys.argv[2:]
if not extensions:
extensions = ['.cs']
process_path(sys.argv[1], extensions)
else:
process_path('.', ['.cs'])
process_path('tests', ['.h', '.c', '.cpp', '.m', '.mm'])
| bsd-3-clause | -3,944,897,385,696,669,000 | 31.642857 | 64 | 0.552881 | false |
googleapis/googleapis-gen | google/api/servicecontrol/v1/google-cloud-servicecontrol-v1-py/google/cloud/servicecontrol_v1/types/distribution.py | 1 | 8021 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.api import distribution_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.api.servicecontrol.v1',
manifest={
'Distribution',
},
)
class Distribution(proto.Message):
r"""Distribution represents a frequency distribution of double-
alued sample points. It contains the size of the population of
sample points plus additional optional information:
- the arithmetic mean of the samples
- the minimum and maximum of the samples
- the sum-squared-deviation of the samples, used to compute
variance - a histogram of the values of the sample points
Attributes:
count (int):
The total number of samples in the
distribution. Must be >= 0.
mean (float):
The arithmetic mean of the samples in the distribution. If
``count`` is zero then this field must be zero.
minimum (float):
The minimum of the population of values. Ignored if
``count`` is zero.
maximum (float):
The maximum of the population of values. Ignored if
``count`` is zero.
sum_of_squared_deviation (float):
The sum of squared deviations from the mean:
Sum[i=1..count]((x_i - mean)^2) where each x_i is a sample
values. If ``count`` is zero then this field must be zero,
otherwise validation of the request fails.
bucket_counts (Sequence[int]):
The number of samples in each histogram bucket.
``bucket_counts`` are optional. If present, they must sum to
the ``count`` value.
The buckets are defined below in ``bucket_option``. There
are N buckets. ``bucket_counts[0]`` is the number of samples
in the underflow bucket. ``bucket_counts[1]`` to
``bucket_counts[N-1]`` are the numbers of samples in each of
the finite buckets. And
``bucket_counts[N] is the number of samples in the overflow bucket. See the comments of``\ bucket_option\`
below for more details.
Any suffix of trailing zeros may be omitted.
linear_buckets (google.cloud.servicecontrol_v1.types.Distribution.LinearBuckets):
Buckets with constant width.
exponential_buckets (google.cloud.servicecontrol_v1.types.Distribution.ExponentialBuckets):
Buckets with exponentially growing width.
explicit_buckets (google.cloud.servicecontrol_v1.types.Distribution.ExplicitBuckets):
Buckets with arbitrary user-provided width.
exemplars (Sequence[google.api.distribution_pb2.Exemplar]):
Example points. Must be in increasing order of ``value``
field.
"""
class LinearBuckets(proto.Message):
r"""Describing buckets with constant width.
Attributes:
num_finite_buckets (int):
The number of finite buckets. With the underflow and
overflow buckets, the total number of buckets is
``num_finite_buckets`` + 2. See comments on
``bucket_options`` for details.
width (float):
The i'th linear bucket covers the interval [offset + (i-1)
\* width, offset + i \* width) where i ranges from 1 to
num_finite_buckets, inclusive. Must be strictly positive.
offset (float):
The i'th linear bucket covers the interval [offset + (i-1)
\* width, offset + i \* width) where i ranges from 1 to
num_finite_buckets, inclusive.
"""
num_finite_buckets = proto.Field(
proto.INT32,
number=1,
)
width = proto.Field(
proto.DOUBLE,
number=2,
)
offset = proto.Field(
proto.DOUBLE,
number=3,
)
class ExponentialBuckets(proto.Message):
r"""Describing buckets with exponentially growing width.
Attributes:
num_finite_buckets (int):
The number of finite buckets. With the underflow and
overflow buckets, the total number of buckets is
``num_finite_buckets`` + 2. See comments on
``bucket_options`` for details.
growth_factor (float):
The i'th exponential bucket covers the interval [scale \*
growth_factor^(i-1), scale \* growth_factor^i) where i
ranges from 1 to num_finite_buckets inclusive. Must be
larger than 1.0.
scale (float):
The i'th exponential bucket covers the interval [scale \*
growth_factor^(i-1), scale \* growth_factor^i) where i
ranges from 1 to num_finite_buckets inclusive. Must be > 0.
"""
num_finite_buckets = proto.Field(
proto.INT32,
number=1,
)
growth_factor = proto.Field(
proto.DOUBLE,
number=2,
)
scale = proto.Field(
proto.DOUBLE,
number=3,
)
class ExplicitBuckets(proto.Message):
r"""Describing buckets with arbitrary user-provided width.
Attributes:
bounds (Sequence[float]):
'bound' is a list of strictly increasing boundaries between
buckets. Note that a list of length N-1 defines N buckets
because of fenceposting. See comments on ``bucket_options``
for details.
The i'th finite bucket covers the interval [bound[i-1],
bound[i]) where i ranges from 1 to bound_size() - 1. Note
that there are no finite buckets at all if 'bound' only
contains a single element; in that special case the single
bound defines the boundary between the underflow and
overflow buckets.
bucket number lower bound upper bound i == 0 (underflow)
-inf bound[i] 0 < i < bound_size() bound[i-1] bound[i] i ==
bound_size() (overflow) bound[i-1] +inf
"""
bounds = proto.RepeatedField(
proto.DOUBLE,
number=1,
)
count = proto.Field(
proto.INT64,
number=1,
)
mean = proto.Field(
proto.DOUBLE,
number=2,
)
minimum = proto.Field(
proto.DOUBLE,
number=3,
)
maximum = proto.Field(
proto.DOUBLE,
number=4,
)
sum_of_squared_deviation = proto.Field(
proto.DOUBLE,
number=5,
)
bucket_counts = proto.RepeatedField(
proto.INT64,
number=6,
)
linear_buckets = proto.Field(
proto.MESSAGE,
number=7,
oneof='bucket_option',
message=LinearBuckets,
)
exponential_buckets = proto.Field(
proto.MESSAGE,
number=8,
oneof='bucket_option',
message=ExponentialBuckets,
)
explicit_buckets = proto.Field(
proto.MESSAGE,
number=9,
oneof='bucket_option',
message=ExplicitBuckets,
)
exemplars = proto.RepeatedField(
proto.MESSAGE,
number=10,
message=distribution_pb2.Distribution.Exemplar,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 8,883,325,737,808,401,000 | 35.459091 | 118 | 0.588954 | false |
Featuretools/featuretools | featuretools/tests/entityset_tests/test_es_metadata.py | 1 | 7723 | import pandas as pd
import pytest
import featuretools as ft
from featuretools import EntitySet, Relationship, variable_types
from featuretools.tests.testing_utils import backward_path, forward_path
def test_cannot_re_add_relationships_that_already_exists(es):
before_len = len(es.relationships)
es.add_relationship(es.relationships[0])
after_len = len(es.relationships)
assert before_len == after_len
def test_add_relationships_convert_type(es):
for r in es.relationships:
assert type(r.parent_variable) == variable_types.Index
assert type(r.child_variable) == variable_types.Id
def test_get_forward_entities(es):
entities = es.get_forward_entities('log')
path_to_sessions = forward_path(es, ['log', 'sessions'])
path_to_products = forward_path(es, ['log', 'products'])
assert list(entities) == [('sessions', path_to_sessions), ('products', path_to_products)]
def test_get_backward_entities(es):
entities = es.get_backward_entities('customers')
path_to_sessions = backward_path(es, ['customers', 'sessions'])
assert list(entities) == [('sessions', path_to_sessions)]
def test_get_forward_entities_deep(es):
entities = es.get_forward_entities('log', deep=True)
path_to_sessions = forward_path(es, ['log', 'sessions'])
path_to_products = forward_path(es, ['log', 'products'])
path_to_customers = forward_path(es, ['log', 'sessions', 'customers'])
path_to_regions = forward_path(es, ['log', 'sessions', 'customers', u'régions'])
path_to_cohorts = forward_path(es, ['log', 'sessions', 'customers', 'cohorts'])
assert list(entities) == [
('sessions', path_to_sessions),
('customers', path_to_customers),
('cohorts', path_to_cohorts),
(u'régions', path_to_regions),
('products', path_to_products),
]
def test_get_backward_entities_deep(es):
entities = es.get_backward_entities('customers', deep=True)
path_to_log = backward_path(es, ['customers', 'sessions', 'log'])
path_to_sessions = backward_path(es, ['customers', 'sessions'])
assert list(entities) == [('sessions', path_to_sessions), ('log', path_to_log)]
def test_get_forward_relationships(es):
relationships = es.get_forward_relationships('log')
assert len(relationships) == 2
assert relationships[0].parent_entity.id == 'sessions'
assert relationships[0].child_entity.id == 'log'
assert relationships[1].parent_entity.id == 'products'
assert relationships[1].child_entity.id == 'log'
relationships = es.get_forward_relationships('sessions')
assert len(relationships) == 1
assert relationships[0].parent_entity.id == 'customers'
assert relationships[0].child_entity.id == 'sessions'
def test_get_backward_relationships(es):
relationships = es.get_backward_relationships('sessions')
assert len(relationships) == 1
assert relationships[0].parent_entity.id == 'sessions'
assert relationships[0].child_entity.id == 'log'
relationships = es.get_backward_relationships('customers')
assert len(relationships) == 1
assert relationships[0].parent_entity.id == 'customers'
assert relationships[0].child_entity.id == 'sessions'
def test_find_forward_paths(es):
paths = list(es.find_forward_paths('log', 'customers'))
assert len(paths) == 1
path = paths[0]
assert len(path) == 2
assert path[0].child_entity.id == 'log'
assert path[0].parent_entity.id == 'sessions'
assert path[1].child_entity.id == 'sessions'
assert path[1].parent_entity.id == 'customers'
def test_find_forward_paths_multiple_paths(diamond_es):
paths = list(diamond_es.find_forward_paths('transactions', 'regions'))
assert len(paths) == 2
path1, path2 = paths
r1, r2 = path1
assert r1.child_entity.id == 'transactions'
assert r1.parent_entity.id == 'stores'
assert r2.child_entity.id == 'stores'
assert r2.parent_entity.id == 'regions'
r1, r2 = path2
assert r1.child_entity.id == 'transactions'
assert r1.parent_entity.id == 'customers'
assert r2.child_entity.id == 'customers'
assert r2.parent_entity.id == 'regions'
def test_find_forward_paths_multiple_relationships(games_es):
paths = list(games_es.find_forward_paths('games', 'teams'))
assert len(paths) == 2
path1, path2 = paths
assert len(path1) == 1
assert len(path2) == 1
r1 = path1[0]
r2 = path2[0]
assert r1.child_entity.id == 'games'
assert r2.child_entity.id == 'games'
assert r1.parent_entity.id == 'teams'
assert r2.parent_entity.id == 'teams'
assert r1.child_variable.id == 'home_team_id'
assert r2.child_variable.id == 'away_team_id'
assert r1.parent_variable.id == 'id'
assert r2.parent_variable.id == 'id'
def test_find_forward_paths_ignores_loops():
employee_df = pd.DataFrame({'id': [0], 'manager_id': [0]})
entities = {'employees': (employee_df, 'id')}
relationships = [('employees', 'id', 'employees', 'manager_id')]
es = ft.EntitySet(entities=entities, relationships=relationships)
paths = list(es.find_forward_paths('employees', 'employees'))
assert len(paths) == 1
assert paths[0] == []
def test_find_backward_paths(es):
paths = list(es.find_backward_paths('customers', 'log'))
assert len(paths) == 1
path = paths[0]
assert len(path) == 2
assert path[0].child_entity.id == 'sessions'
assert path[0].parent_entity.id == 'customers'
assert path[1].child_entity.id == 'log'
assert path[1].parent_entity.id == 'sessions'
def test_find_backward_paths_multiple_paths(diamond_es):
paths = list(diamond_es.find_backward_paths('regions', 'transactions'))
assert len(paths) == 2
path1, path2 = paths
r1, r2 = path1
assert r1.child_entity.id == 'stores'
assert r1.parent_entity.id == 'regions'
assert r2.child_entity.id == 'transactions'
assert r2.parent_entity.id == 'stores'
r1, r2 = path2
assert r1.child_entity.id == 'customers'
assert r1.parent_entity.id == 'regions'
assert r2.child_entity.id == 'transactions'
assert r2.parent_entity.id == 'customers'
def test_find_backward_paths_multiple_relationships(games_es):
paths = list(games_es.find_backward_paths('teams', 'games'))
assert len(paths) == 2
path1, path2 = paths
assert len(path1) == 1
assert len(path2) == 1
r1 = path1[0]
r2 = path2[0]
assert r1.child_entity.id == 'games'
assert r2.child_entity.id == 'games'
assert r1.parent_entity.id == 'teams'
assert r2.parent_entity.id == 'teams'
assert r1.child_variable.id == 'home_team_id'
assert r2.child_variable.id == 'away_team_id'
assert r1.parent_variable.id == 'id'
assert r2.parent_variable.id == 'id'
def test_has_unique_path(diamond_es):
assert diamond_es.has_unique_forward_path('customers', 'regions')
assert not diamond_es.has_unique_forward_path('transactions', 'regions')
def test_raise_key_error_missing_entity(es):
error_text = "Entity this entity doesn't exist does not exist in ecommerce"
with pytest.raises(KeyError, match=error_text):
es["this entity doesn't exist"]
es_without_id = EntitySet()
error_text = "Entity this entity doesn't exist does not exist in entity set"
with pytest.raises(KeyError, match=error_text):
es_without_id["this entity doesn't exist"]
def test_add_parent_not_index_variable(es):
error_text = "Parent variable.*is not the index of entity Entity.*"
with pytest.raises(AttributeError, match=error_text):
es.add_relationship(Relationship(es[u'régions']['language'],
es['customers'][u'région_id']))
| bsd-3-clause | -6,780,506,672,667,664,000 | 33.927602 | 93 | 0.663946 | false |
digitalfox/yokadi | yokadi/ycli/taskcmd.py | 1 | 40969 | # -*- coding: UTF-8 -*-
"""
Task related commands.
@author: Aurélien Gâteau <[email protected]>
@author: Sébastien Renard <[email protected]>
@license: GPL v3 or later
"""
import os
import readline
import re
from datetime import datetime, timedelta
from dateutil import rrule
from sqlobject import LIKE, AND, OR, NOT, SQLObjectNotFound
from sqlobject.sqlbuilder import LEFTJOINOn
from yokadi.core.db import Config, Keyword, Project, Task, \
TaskKeyword, Recurrence
from yokadi.core import bugutils
from yokadi.core import dbutils
from yokadi.core import ydateutils
from yokadi.ycli import parseutils
from yokadi.ycli import tui
from yokadi.ycli.completers import ProjectCompleter, projectAndKeywordCompleter, \
taskIdCompleter, recurrenceCompleter, dueDateCompleter
from yokadi.core.yokadiexception import YokadiException, BadUsageException
from yokadi.ycli.textlistrenderer import TextListRenderer
from yokadi.ycli.xmllistrenderer import XmlListRenderer
from yokadi.ycli.csvlistrenderer import CsvListRenderer
from yokadi.ycli.htmllistrenderer import HtmlListRenderer
from yokadi.ycli.plainlistrenderer import PlainListRenderer
from yokadi.core.yokadioptionparser import YokadiOptionParser
gRendererClassDict = dict(
text=TextListRenderer,
xml=XmlListRenderer,
csv=CsvListRenderer,
html=HtmlListRenderer,
plain=PlainListRenderer,
)
NOTE_KEYWORD = "_note"
class TaskCmd(object):
def __init__(self):
self.lastTaskId = None # Last id created, used
self.lastProjectName = None # Last project name used
self.lastTaskIds = [] # Last list of ids selected with t_list
self.kFilters = [] # Permanent keyword filters (List of KeywordFilter)
self.pFilter = "" # Permanent project filter (name of project)
for name in bugutils.PROPERTY_NAMES:
dbutils.getOrCreateKeyword(name, interactive=False)
dbutils.getOrCreateKeyword(NOTE_KEYWORD, interactive=False)
def _parser_t_add(self, cmd):
"""Code shared by t_add, bug_add and n_add parsers."""
parser = YokadiOptionParser()
parser.usage = "%s [options] <projectName> [@<keyword1>] [@<keyword2>] <title>" % cmd
parser.description = "Add new %s. Will prompt to create keywords if they do not exist." % cmd
parser.add_argument("-c", dest="crypt", default=False, action="store_true",
help="Encrypt title")
parser.add_argument("-d", "--describe", dest="describe", default=False, action="store_true",
help="Directly open editor to describe task")
parser.add_argument('cmd', nargs='*')
return parser
def _t_add(self, cmd, line):
"""Code shared by t_add, bug_add and n_add."""
parser = self._parser_t_add(cmd)
args = parser.parse_args(line)
line = " ".join(args.cmd)
if not line:
raise BadUsageException("Missing parameters")
projectName, title, keywordDict = parseutils.parseLine(line)
projectName = self._realProjectName(projectName)
if not title:
raise BadUsageException("Missing title")
if args.crypt:
# Obfuscate line in history
length = readline.get_current_history_length()
if length > 0: # Ensure history is positive to avoid crash with bad readline setup
readline.replace_history_item(length - 1, "%s %s " % (cmd,
line.replace(title, "<...encrypted...>")))
# Encrypt title
title = self.cryptoMgr.encrypt(title)
task = dbutils.addTask(projectName, title, keywordDict)
if not task:
tui.reinjectInRawInput(u"%s %s" % (cmd, line))
return None
self.lastTaskId = task.id
if args.describe:
self.do_t_describe(self.lastTaskId)
return task
def do_t_add(self, line):
"""Add new task. Will prompt to create keywords if they do not exist.
t_add <projectName> [@<keyword1>] [@<keyword2>] <title>"""
task = self._t_add("t_add", line)
if task:
if self.cryptoMgr.isEncrypted(task.title):
title = "<... encrypted data...>"
else:
title = task.title
print "Added task '%s' (id=%d)" % (title, task.id)
complete_t_add = projectAndKeywordCompleter
def do_bug_add(self, line):
"""Add a bug-type task. Will create a task and ask additional info.
bug_add <project_name> [@<keyword1>] [@<keyword2>] <title>
"""
task = self._t_add("bug_add", line)
if not task:
return
keywordDict = task.getKeywordDict()
bugutils.editBugKeywords(keywordDict)
task.setKeywordDict(keywordDict)
task.urgency = bugutils.computeUrgency(keywordDict)
if self.cryptoMgr.isEncrypted(task.title):
title = "<... encrypted data...>"
else:
title = task.title
print "Added bug '%s' (id=%d, urgency=%d)" % (title, task.id, task.urgency)
complete_bug_add = ProjectCompleter(1)
def do_n_add(self, line):
"""Add a note. A note is a task with the @_note keyword.
n_add <project_name> [@<keyword1>] [@<keyword2>] <title>
"""
task = self._t_add("n_add", line)
if not task:
return
keywordDict = task.getKeywordDict()
keywordDict[NOTE_KEYWORD] = None
task.setKeywordDict(keywordDict)
if self.cryptoMgr.isEncrypted(task.title):
title = "<... encrypted data...>"
else:
title = task.title
print "Added note '%s' (id=%d)" % (title, task.id)
complete_n_add = projectAndKeywordCompleter
def do_bug_edit(self, line):
"""Edit a bug.
bug_edit <id>"""
task = self._t_edit(line)
if not task:
return
keywordDict = task.getKeywordDict()
bugutils.editBugKeywords(keywordDict)
task.setKeywordDict(keywordDict)
task.urgency = bugutils.computeUrgency(keywordDict)
complete_bug_edit = taskIdCompleter
def getTaskFromId(self, tid):
if tid == '_':
if self.lastTaskId is None:
raise YokadiException("No previous task defined")
tid = self.lastTaskId
task = dbutils.getTaskFromId(tid)
if tid != '_':
self.lastTaskId = task.id
return task
def do_t_describe(self, line):
"""Starts an editor to enter a longer description of a task.
t_describe <id>"""
def updateDescription(description):
if self.cryptoMgr.isEncrypted(task.title):
task.description = self.cryptoMgr.encrypt(description)
else:
task.description = description
task = self.getTaskFromId(line)
try:
if self.cryptoMgr.isEncrypted(task.title):
# As title is encrypted, we assume description will be encrypted as well
self.cryptoMgr.force_decrypt = True # Decryption must be turned on to edit
description = tui.editText(self.cryptoMgr.decrypt(task.description),
onChanged=updateDescription,
lockManager=dbutils.TaskLockManager(task),
prefix=u"yokadi-%s-%s-" % (task.project, task.title))
except Exception, e:
raise YokadiException(e)
updateDescription(description)
complete_t_describe = taskIdCompleter
def do_t_set_urgency(self, line):
"""@deprecated: should be removed"""
tui.warnDeprecated("t_set_urgency", "t_urgency")
self.do_t_urgency(line)
def do_t_urgency(self, line):
"""Defines urgency of a task.
t_urgency <id> <value>"""
tokens = parseutils.simplifySpaces(line).split(" ")
if len(tokens) != 2:
raise BadUsageException("You must provide a taskId and an urgency value")
task = self.getTaskFromId(tokens[0])
try:
# Do not use isdigit(), so that we can set negative urgency. This
# make it possible to stick tasks to the bottom of the list.
urgency = int(tokens[1])
except ValueError:
raise BadUsageException("Task urgency must be a digit")
if urgency > 100:
tui.warning("Max urgency is 100")
urgency = 100
elif urgency < -99:
tui.warning("Min urgency is -99")
urgency = -99
task.urgency = urgency
complete_t_set_urgency = taskIdCompleter
complete_t_urgency = taskIdCompleter
def do_t_mark_started(self, line):
"""Mark task as started.
t_mark_started <id>"""
self._t_set_status(line, 'started')
complete_t_mark_started = taskIdCompleter
def do_t_mark_done(self, line):
"""Mark task as done.
t_mark_done <id>"""
self._t_set_status(line, 'done')
complete_t_mark_done = taskIdCompleter
def do_t_mark_new(self, line):
"""Mark task as new (not started).
t_mark_new <id>"""
self._t_set_status(line, 'new')
complete_t_mark_new = taskIdCompleter
def _t_set_status(self, line, status):
task = self.getTaskFromId(line)
if task.recurrence and status == "done":
task.dueDate = task.recurrence.getNext(task.dueDate)
print "Task '%s' next occurrence is scheduled at %s" % (task.title, task.dueDate)
print "To *really* mark this task done and forget it, remove its recurrence first with t_recurs %s none" % task.id
else:
task.status = status
if status == "done":
task.doneDate = datetime.now()
else:
task.doneDate = None
print "Task '%s' marked as %s" % (task.title, status)
def do_t_apply(self, line):
"""Apply a command to several tasks.
t_apply <id1>[,<id2>,[<id3>]...]] <command> <args>
Use x-y to select task range from x to y
Use __ to select all tasks previously selected with t_list"""
ids = []
if "__" in line:
if self.lastTaskIds:
line = line.replace("__", ",".join([str(i) for i in self.lastTaskIds]))
else:
raise BadUsageException("You must select tasks with t_list prior to use __")
rangeId = re.compile("(\d+)-(\d+)")
tokens = re.split("[\s|,]", line)
if len(tokens) < 2:
raise BadUsageException("Give at least a task id and a command")
idScan = True # Indicate we are parsing ids
cmdTokens = [] # Command that we want to apply
for token in tokens:
if token == "":
continue
if idScan:
result = rangeId.match(token)
if result:
ids.extend(range(int(result.group(1)), int(result.group(2)) + 1))
elif token.isdigit():
ids.append(int(token))
else:
# Id list is finished. Grab rest of line.
cmdTokens.append(token)
idScan = False
else:
cmdTokens.append(token)
if not cmdTokens:
raise BadUsageException("Give a command to apply")
cmd = cmdTokens.pop(0)
for id in ids:
line = " ".join([cmd, str(id), " ".join(cmdTokens)])
print "Executing: %s" % line
self.onecmd(line.strip())
complete_t_apply = taskIdCompleter
def parser_t_remove(self):
parser = YokadiOptionParser()
parser.usage = "t_remove [options] <id>"
parser.description = "Delete a task."
parser.add_argument("-f", dest="force", default=False, action="store_true",
help="Skip confirmation prompt")
parser.add_argument("id")
return parser
def do_t_remove(self, line):
parser = self.parser_t_remove()
args = parser.parse_args(line)
task = self.getTaskFromId(args.id)
if not args.force:
if not tui.confirm("Remove task '%s'" % task.title):
return
projectId = task.project.id
task.destroySelf()
print "Task '%s' removed" % (task.title)
# Delete project with no associated tasks
if Task.select(Task.q.projectID == projectId).count() == 0:
Project.delete(projectId)
complete_t_remove = taskIdCompleter
def parser_t_purge(self):
parser = YokadiOptionParser()
parser.usage = "t_purge [options]"
parser.description = "Remove old done tasks from all projects."
parser.add_argument("-f", "--force", dest="force", default=False, action="store_true",
help="Skip confirmation prompt")
delay = int(Config.byName("PURGE_DELAY").value)
parser.add_argument("-d", "--delay", dest="delay", default=delay,
type=int, help="Delay (in days) after which done tasks are destroyed. Default is %d." % delay)
return parser
def do_t_purge(self, line):
parser = self.parser_t_purge()
args = parser.parse_args(line)
filters = []
filters.append(Task.q.status == "done")
filters.append(Task.q.doneDate < (datetime.now() - timedelta(days=args.delay)))
tasks = Task.select(AND(*filters))
if tasks.count() == 0:
print "No tasks need to be purged"
return
print "The following tasks will be removed:"
print "\n".join(["%s: %s" % (task.id, task.title) for task in tasks])
if args.force or tui.confirm("Do you really want to remove those tasks (this action cannot be undone)?"):
Task.deleteMany(AND(*filters))
print "Tasks deleted"
else:
print "Purge canceled"
def parser_t_list(self):
parser = YokadiOptionParser()
parser.usage = "t_list [options] <project_or_keyword_filter>"
parser.description = "List tasks filtered by project and/or keywords. " \
"'%' can be used as a wildcard in the project name: " \
"to list projects starting with 'foo', use 'foo%'. " \
"Keyword filtering is achieved with '@'. Ex.: " \
"t_list @home, t_list @_bug=2394"
parser.add_argument("-a", "--all", dest="status",
action="store_const", const="all",
help="all tasks (done and to be done)")
parser.add_argument("--started", dest="status",
action="store_const", const="started",
help="only started tasks")
rangeList = ["today", "thisweek", "thismonth", "all"]
parser.add_argument("-d", "--done", dest="done",
help="only done tasks. <range> must be either one of %s or a date using the same format as t_due" % ", ".join(rangeList),
metavar="<range>")
parser.add_argument("-u", "--urgency", dest="urgency",
type=int,
help="tasks with urgency greater or equal than <urgency>",
metavar="<urgency>")
parser.add_argument("-t", "--top-due", dest="topDue",
default=False, action="store_true",
help="top 5 urgent tasks of each project based on due date")
parser.add_argument("--overdue", dest="due",
action="append_const", const="now",
help="all overdue tasks")
parser.add_argument("--due", dest="due",
action="append",
help="""only list tasks due before/after <limit>. <limit> is a
date optionaly prefixed with a comparison operator.
Valid operators are: <, <=, >=, and >.
Example of valid limits:
- tomorrow: due date <= tomorrow, 23:59:59
- today: due date <= today, 23:59:59
- >today: due date > today: 23:59:59
""",
metavar="<limit>")
parser.add_argument("-k", "--keyword", dest="keyword",
help="Group tasks by given keyword instead of project. The %% wildcard can be used.",
metavar="<keyword>")
parser.add_argument("-s", "--search", dest="search",
action="append",
help="only list tasks whose title or description match <value>. You can repeat this option to search on multiple words.",
metavar="<value>")
formatList = ["auto"] + gRendererClassDict.keys()
parser.add_argument("-f", "--format", dest="format",
default="auto", choices=formatList,
help="how should the task list be formated. <format> can be %s" % ", ".join(formatList),
metavar="<format>")
parser.add_argument("-o", "--output", dest="output",
help="Output task list to <file>",
metavar="<file>")
parser.add_argument("--decrypt", dest="decrypt", default=False, action="store_true",
help="Decrypt task title and description")
parser.add_argument("filter", nargs="*", metavar="<project_or_keyword_filter>")
return parser
def _realProjectName(self, name):
if name == '_':
if self.lastProjectName is None:
raise YokadiException("No previous project used")
else:
self.lastProjectName = name
return self.lastProjectName
def _parseListLine(self, parser, line):
"""
Parse line with parser, returns a tuple of the form
(options, projectList, filters)
"""
args = parser.parse_args(line)
if len(args.filter) > 0:
projectName, keywordFilters = parseutils.extractKeywords(u" ".join(args.filter))
else:
projectName = ""
keywordFilters = []
if self.kFilters:
# Add keyword filter
keywordFilters.extend(self.kFilters)
if not projectName:
if self.pFilter:
# If a project filter is defined, use it as none was provided
projectName = self.pFilter
else:
# Take all project if none provided
projectName = "%"
if projectName.startswith("!"):
projectName = self._realProjectName(projectName[1:])
projectList = Project.select(NOT(LIKE(Project.q.name, projectName)))
else:
projectName = self._realProjectName(projectName)
projectList = Project.select(LIKE(Project.q.name, projectName))
if projectList.count() == 0:
raise YokadiException("Found no project matching '%s'" % projectName)
# Check keywords exist
parseutils.warnIfKeywordDoesNotExist(keywordFilters)
# Filtering and sorting according to parameters
filters = []
# Filter on keywords
for keywordFilter in keywordFilters:
filters.append(keywordFilter.filter())
# Search
if args.search:
for word in args.search:
if word.startswith("@"):
tui.warning("Maybe you want keyword search (without -s option) "
"instead of plain text search?")
filters.append(OR(LIKE(Task.q.title, "%" + word + "%"),
LIKE(Task.q.description, "%" + word + "%")))
return args, projectList, filters
def _renderList(self, renderer, projectList, filters, order,
limit=None, groupKeyword=None):
"""
Render a list using renderer, according to the restrictions set by the
other parameters
@param renderer: renderer class (for example: TextListRenderer)
@param projectList: list of project name (as unicode string)
@param filters: filters in sqlobject format (example: Task.q.status == 'done')
@param order: ordering in sqlobject format (example: -Task.q.urgency)
@param limit: limit number tasks (int) or None for no limit
@param groupKeyword: keyword used for grouping (as unicode string) or None
"""
if groupKeyword:
if groupKeyword.startswith("@"):
groupKeyword = groupKeyword[1:]
for keyword in Keyword.select(LIKE(Keyword.q.name, groupKeyword)):
if unicode(keyword.name).startswith("_") and not groupKeyword.startswith("_"):
# BUG: cannot filter on db side because sqlobject does not understand ESCAPE needed whith _
continue
taskList = Task.select(AND(TaskKeyword.q.keywordID == keyword.id,
*filters),
orderBy=order, limit=limit, distinct=True,
join=LEFTJOINOn(Task, TaskKeyword, Task.q.id == TaskKeyword.q.taskID))
taskList = list(taskList)
if projectList:
taskList = [x for x in taskList if x.project in projectList]
if len(taskList) > 0:
self.lastTaskIds.extend([t.id for t in taskList]) # Keep selected id for further use
renderer.addTaskList(unicode(keyword), taskList)
renderer.end()
else:
hiddenProjectNames = []
for project in projectList:
if not project.active:
hiddenProjectNames.append(project.name)
continue
taskList = Task.select(AND(Task.q.projectID == project.id, *filters),
orderBy=order, limit=limit, distinct=True,
join=LEFTJOINOn(Task, TaskKeyword, Task.q.id == TaskKeyword.q.taskID))
taskList = list(taskList)
if len(taskList) > 0:
self.lastTaskIds.extend([t.id for t in taskList]) # Keep selected id for further use
renderer.addTaskList(unicode(project), taskList)
renderer.end()
if len(hiddenProjectNames) > 0:
tui.info("hidden projects: %s" % ", ".join(hiddenProjectNames))
def do_t_list(self, line):
def selectRendererClass():
if args.format != "auto":
return gRendererClassDict[args.format]
defaultRendererClass = TextListRenderer
if not args.output:
return defaultRendererClass
ext = os.path.splitext(args.output)[1]
if not ext:
return defaultRendererClass
return gRendererClassDict.get(ext[1:], defaultRendererClass)
# Reset last tasks id list
self.lastTaskIds = []
# BUG: completion based on parameter position is broken when parameter is given
args, projectList, filters = self._parseListLine(self.parser_t_list(), line)
# Skip notes
filters.append(parseutils.KeywordFilter("!@" + NOTE_KEYWORD).filter())
# Handle t_list specific options
order = -Task.q.urgency, Task.q.creationDate
limit = None
if args.done:
filters.append(Task.q.status == 'done')
if args.done != "all":
minDate = ydateutils.parseMinDate(args.done)
filters.append(Task.q.doneDate >= minDate)
elif args.status == "all":
pass
elif args.status == "started":
filters.append(Task.q.status == 'started')
else:
filters.append(Task.q.status != 'done')
if args.urgency:
order = -Task.q.urgency
filters.append(Task.q.urgency >= args.urgency)
if args.topDue:
filters.append(Task.q.dueDate != None)
order = Task.q.dueDate
limit = 5
if args.due:
for due in args.due:
dueOperator, dueLimit = ydateutils.parseDateLimit(due)
filters.append(dueOperator(Task.q.dueDate, dueLimit))
order = Task.q.dueDate
if args.decrypt:
self.cryptoMgr.force_decrypt = True
# Define output
if args.output:
out = open(args.output, "w")
else:
out = tui.stdout
# Instantiate renderer
rendererClass = selectRendererClass()
renderer = rendererClass(out, cryptoMgr=self.cryptoMgr)
# Fill the renderer
self._renderList(renderer, projectList, filters, order, limit, args.keyword)
complete_t_list = projectAndKeywordCompleter
def parser_n_list(self):
parser = YokadiOptionParser()
parser.usage = "n_list [options] <project_or_keyword_filter>"
parser.description = "List notes filtered by project and/or keywords. " \
"'%' can be used as a wildcard in the project name: " \
"to list projects starting with 'foo', use 'foo%'. " \
"Keyword filtering is achieved with '@'. Ex.: " \
"n_list @home, n_list @_bug=2394"
parser.add_argument("-s", "--search", dest="search",
action="append",
help="only list notes whose title or description match <value>. You can repeat this option to search on multiple words.",
metavar="<value>")
parser.add_argument("-k", "--keyword", dest="keyword",
help="Group tasks by given keyword instead of project. The % wildcard can be used.",
metavar="<keyword>")
parser.add_argument("--decrypt", dest="decrypt", default=False, action="store_true",
help="Decrypt note title and description")
parser.add_argument("filter", nargs="*", metavar="<project_or_keyword_filter>")
return parser
def do_n_list(self, line):
args, projectList, filters = self._parseListLine(self.parser_n_list(), line)
if args.decrypt:
self.cryptoMgr.force_decrypt = True
filters.append(parseutils.KeywordFilter("@" + NOTE_KEYWORD).filter())
order = Task.q.creationDate
renderer = TextListRenderer(tui.stdout, cryptoMgr=self.cryptoMgr, renderAsNotes=True)
self._renderList(renderer, projectList, filters, order, limit=None,
groupKeyword=args.keyword)
complete_n_list = projectAndKeywordCompleter
def do_t_reorder(self, line):
"""Reorder tasks of a project.
It works by starting an editor with the task list: you can then change
the order of the lines and save the list. The urgency field will be
updated to match the order.
t_reorder <project_name>"""
try:
project = Project.byName(line)
except SQLObjectNotFound:
raise BadUsageException("You must provide a valid project name")
taskList = Task.select(AND(Task.q.projectID == project.id,
Task.q.status != 'done'),
orderBy=-Task.q.urgency)
lines = ["%d,%s" % (x.id, x.title) for x in taskList]
text = tui.editText("\n".join(lines))
ids = []
for line in text.split("\n"):
line = line.strip()
if not "," in line:
continue
id = int(line.split(",")[0])
ids.append(id)
ids.reverse()
for urgency, id in enumerate(ids):
task = Task.get(id)
task.urgency = urgency
complete_t_reorder = ProjectCompleter(1)
def parser_t_show(self):
parser = YokadiOptionParser()
parser.usage = "t_show [options] <id>"
parser.description = "Display details of a task."
choices = ["all", "summary", "description"]
parser.add_argument("--output", dest="output",
choices=choices,
default="all",
help="<output> can be one of %s. If not set, it defaults to all." % ", ".join(choices),
metavar="<output>")
parser.add_argument("--decrypt", dest="decrypt", default=False, action="store_true",
help="Decrypt task title and description")
parser.add_argument("id")
return parser
def do_t_show(self, line):
parser = self.parser_t_show()
args = parser.parse_args(line)
if args.decrypt:
self.cryptoMgr.force_decrypt = True
task = self.getTaskFromId(args.id)
title = self.cryptoMgr.decrypt(task.title)
description = self.cryptoMgr.decrypt(task.description)
if args.output in ("all", "summary"):
keywordDict = task.getKeywordDict()
keywordArray = []
for name, value in keywordDict.items():
txt = name
if value:
txt += "=" + str(value)
keywordArray.append(txt)
keywordArray.sort()
keywords = ", ".join(keywordArray)
fields = [
("Project", task.project.name),
("Title", title),
("ID", task.id),
("Created", task.creationDate),
("Due", task.dueDate),
("Status", task.status),
("Urgency", task.urgency),
("Recurrence", task.recurrence),
("Keywords", keywords),
]
if task.status == "done":
fields.append(("Done", task.doneDate))
tui.renderFields(fields)
if args.output in ("all", "description") and task.description:
if args.output == "all":
print
print description
complete_t_show = taskIdCompleter
def _t_edit(self, line):
"""Code shared by t_edit and bug_edit."""
def editComplete(text, state):
""" Specific completer for the edit prompt.
This subfunction should stay here because it needs to access to cmd members"""
if state == 0:
origline = readline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = readline.get_begidx() - stripped
endidx = readline.get_endidx() - stripped
if begidx > 0:
self.completion_matches = projectAndKeywordCompleter("", text, line, begidx, endidx, shift=1)
else:
self.completion_matches = []
try:
return self.completion_matches[state]
except IndexError:
return None
task = self.getTaskFromId(line)
if self.cryptoMgr.isEncrypted(task.title):
self.cryptoMgr.force_decrypt = True # Decryption must be turned on to edit
title = self.cryptoMgr.decrypt(task.title)
# Create task line
taskLine = parseutils.createLine("", title, task.getKeywordDict())
oldCompleter = readline.get_completer() # Backup previous completer to restore it in the end
readline.set_completer(editComplete) # Switch to specific completer
while True:
# Edit
print "(Press Ctrl+C to cancel)"
try:
line = tui.editLine(taskLine)
if not line.strip():
tui.warning("Missing title")
continue
except KeyboardInterrupt:
print
print "Cancelled"
task = None
break
foo, title, keywordDict = parseutils.parseLine(task.project.name + " " + line)
if self.cryptoMgr.isEncrypted(task.title):
title = self.cryptoMgr.encrypt(title)
if dbutils.updateTask(task, task.project.name, title, keywordDict):
break
readline.set_completer(oldCompleter) # Restore standard completer
return task
def do_t_edit(self, line):
"""Edit a task.
t_edit <id>"""
self._t_edit(line)
complete_t_edit = taskIdCompleter
def do_t_set_project(self, line):
"""@deprecated: should be removed"""
tui.warnDeprecated("t_set_project", "t_project")
self.do_t_project(line)
def do_t_project(self, line):
"""Set task's project.
t_project <id> <project>"""
tokens = parseutils.simplifySpaces(line).split(" ")
if len(tokens) != 2:
raise YokadiException("You should give two arguments: <task id> <project>")
task = self.getTaskFromId(tokens[0])
projectName = tokens[1]
projectName = self._realProjectName(projectName)
task.project = dbutils.getOrCreateProject(projectName)
if task.project:
print "Moved task '%s' to project '%s'" % (task.title, projectName)
complete_t_set_project = ProjectCompleter(2)
complete_t_project = ProjectCompleter(2)
def do_t_set_due(self, line):
"""@deprecated: should be removed"""
tui.warnDeprecated("t_set_due", "t_due")
self.do_t_due(line)
def do_t_due(self, line):
"""Set task's due date
t_due <id> <date>
Date can be specified as a relative offset:
- +5M: in 5 minutes
- +3H: in 3 hours
- +1D: in 1 day
- +6W: in 6 weeks
As a day in the week:
- tomorrow: tomorrow, same hour
- tuesday 12:10: next tuesday, at 12:10
- fr 15:30: next friday, at 15:30
Or as an absolute date or time:
- 10:38: at 10:38 today
- 25/09/2010 12:10: on the 25th of September, 2010, at 12:10
- 23/02/2010: on the 23th of February, 2010
- 01/04: on the 1st of April
- 12: on the 12th of current month
To reset a due date, use "none"."""
line = parseutils.simplifySpaces(line)
if len(line.split()) < 2:
raise YokadiException("Give a task id and time, date or date & time")
taskId, line = line.strip().split(" ", 1)
task = self.getTaskFromId(taskId)
if line.lower() == "none":
task.dueDate = None
print "Due date for task '%s' reset" % task.title
else:
task.dueDate = ydateutils.parseHumaneDateTime(line)
print "Due date for task '%s' set to %s" % (task.title, task.dueDate.ctime())
complete_t_set_due = dueDateCompleter
complete_t_due = dueDateCompleter
def do_t_add_keywords(self, line):
"""Add keywords to an existing task
t_add_keywords <id> <@keyword1> <@keyword2>[=<value>]...
"""
tokens = parseutils.simplifySpaces(line).split(" ", 1)
if len(tokens) < 2:
raise YokadiException("You should give at least two arguments: <task id> <keyword>")
task = dbutils.getTaskFromId(tokens[0])
garbage, keywordFilters = parseutils.extractKeywords(tokens[1])
newKwDict = parseutils.keywordFiltersToDict(keywordFilters)
if garbage:
raise YokadiException("Cannot parse line, got garbage (%s). Maybe you forgot to add @ before keyword ?"
% garbage)
if not dbutils.createMissingKeywords(newKwDict.keys()):
# User cancel keyword creation
return
kwDict = task.getKeywordDict()
kwDict.update(newKwDict)
task.setKeywordDict(kwDict)
def do_t_recurs(self, line):
"""Make a task recurs
t_recurs <id> yearly <dd/mm> <HH:MM>
t_recurs <id> monthly <dd> <HH:MM>
t_recurs <id> monthly <first/second/third/last> <mo, tu, we, th, fr, sa, su> <hh:mm>
t_recurs <id> quarterly <dd> <HH:MM>
t_recurs <id> quarterly <first/second/third/last> <mo, tu, we, th, fr, sa, su> <hh:mm>
t_recurs <id> weekly <mo, tu, we, th, fr, sa, su> <hh:mm>
t_recurs <id> daily <HH:MM>
t_recurs <id> none (remove recurrence)"""
tokens = parseutils.simplifySpaces(line).split()
if len(tokens) < 2:
raise YokadiException("You should give at least two arguments: <task id> <recurrence>")
task = self.getTaskFromId(tokens[0])
# Define recurrence:
freq = byminute = byhour = byweekday = bymonthday = bymonth = None
tokens[1] = tokens[1].lower()
if tokens[1] == "none":
if task.recurrence:
task.recurrence.destroySelf()
task.recurrence = None
return
elif tokens[1] == "daily":
if len(tokens) != 3:
raise YokadiException("You should give time for daily task")
freq = rrule.DAILY
byhour, byminute = ydateutils.getHourAndMinute(tokens[2])
elif tokens[1] == "weekly":
freq = rrule.WEEKLY
if len(tokens) != 4:
raise YokadiException("You should give day and time for weekly task")
byweekday = ydateutils.getWeekDayNumberFromDay(tokens[2].lower())
byhour, byminute = ydateutils.getHourAndMinute(tokens[3])
elif tokens[1] in ("monthly", "quarterly"):
if tokens[1] == "monthly":
freq = rrule.MONTHLY
else:
# quarterly
freq = rrule.YEARLY
bymonth = [1, 4, 7, 10]
if len(tokens) < 4:
raise YokadiException("You should give day and time for %s task" % (tokens[1],))
try:
bymonthday = int(tokens[2])
byhour, byminute = ydateutils.getHourAndMinute(tokens[3])
except ValueError:
POSITION = {"first": 1, "second": 2, "third": 3, "fourth": 4, "last":-1}
if tokens[2].lower() in POSITION.keys() and len(tokens) == 5:
byweekday = rrule.weekday(ydateutils.getWeekDayNumberFromDay(tokens[3].lower()),
POSITION[tokens[2]])
byhour, byminute = ydateutils.getHourAndMinute(tokens[4])
bymonthday = None # Default to current day number - need to be blanked
else:
raise YokadiException("Unable to understand date. See help t_recurs for details")
elif tokens[1] == "yearly":
freq = rrule.YEARLY
rDate = ydateutils.parseHumaneDateTime(" ".join(tokens[2:]))
bymonth = rDate.month
bymonthday = rDate.day
byhour = rDate.hour
byminute = rDate.minute
else:
raise YokadiException("Unknown frequency. Available: daily, weekly, monthly and yearly")
if task.recurrence is None:
task.recurrence = Recurrence()
rr = rrule.rrule(freq, byhour=byhour, byminute=byminute, byweekday=byweekday,
bymonthday=bymonthday, bymonth=bymonth)
task.recurrence.setRrule(rr)
task.dueDate = task.recurrence.getNext()
complete_t_recurs = recurrenceCompleter
def do_t_filter(self, line):
"""Define permanent keyword filter used by t_list
Ex.:
- t_filter @work (filter all task that have the "work" keyword)
- t_filter none (remove filter)"""
# TODO: add completion
if not line:
raise YokadiException("You must give keyword as argument or 'none' to reset filter")
if parseutils.simplifySpaces(line).lower() == "none":
self.kFilters = []
self.pFilter = ""
self.prompt = "yokadi> "
else:
projectName, keywordFilters = parseutils.extractKeywords(line)
self.kFilters = keywordFilters
self.pFilter = projectName
prompt = "y"
if self.pFilter:
prompt += " %s" % projectName
if self.kFilters:
parseutils.warnIfKeywordDoesNotExist(self.kFilters)
prompt += " %s" % (" ".join([str(k) for k in keywordFilters]))
self.prompt = "%s> " % prompt
# vi: ts=4 sw=4 et
| gpl-3.0 | 6,851,783,011,159,029,000 | 40.171859 | 147 | 0.559122 | false |
Alaxe/judgeSystem | judge/migrations/0051_auto_20160521_1932.py | 1 | 1608 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-05-21 16:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('judge', '0050_merge'),
]
operations = [
migrations.AlterModelOptions(
name='problem',
options={'ordering': ['-id'], 'permissions': (('retest_problem', 'Can start a retest'), ('change_visibility_of_problem', 'Can change the visibility of a problem'), ('see_hidden_problems', 'Can see hidden problems'), ('add_media_to_problem', 'Can upload media for a problem'), ('add_checker_to_problem', 'Can add a checker for a problem'), ('add_grader_to_problem', 'Can add a custom grader for a problem'), ('import_problem', 'Can import problems'))},
),
migrations.AddField(
model_name='problem',
name='custom_grader',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='problem',
name='grader_header',
field=models.TextField(blank=True, verbose_name='Grader header code'),
),
migrations.AddField(
model_name='problem',
name='grader_header_file_name',
field=models.CharField(blank=True, max_length=32, verbose_name='Filename for the grader header file'),
),
migrations.AddField(
model_name='problem',
name='grader_source',
field=models.TextField(blank=True, verbose_name='Grader source code'),
),
]
| gpl-2.0 | 5,183,483,593,779,452,000 | 40.230769 | 463 | 0.600746 | false |
shaunduncan/breezeminder | breezeminder/views/marta.py | 1 | 3770 | import json
from datetime import datetime, timedelta
from flask import (flash,
make_response,
render_template,
request)
from breezeminder.app import app
from breezeminder.forms.marta import MartaRouteStatusForm
from breezeminder.models.marta import (Route,
Schedule,
Stop,
ScheduledStop,
Bus)
from breezeminder.util.views import same_origin, nocache
@nocache
def status():
form = MartaRouteStatusForm(request.form)
context = {
'title': 'Check MARTA Bus Status',
'description': 'Check the current status of MARTA Buses on any route',
'form': form
}
if request.method == 'POST' and form.validate():
try:
context['route'] = Route.objects.get(name=form.route.data)
print context['route']
except Route.DoesNotExist:
flash('Oops! An error occurred looking up route %s' % form.route.data, 'error')
return render_template('marta/realtime.html', **context)
@same_origin
def route_details(route_id):
route = Route.objects.get_or_404(name=route_id)
schedule = Schedule.objects.for_today()
resp = make_response(json.dumps(route.to_json(schedule=schedule)))
resp.cache_control.no_cache = True
resp.headers['Content-Type'] = 'application/json'
return resp
@same_origin
def route_realtime(route_id):
# Only get "recent" buses
current = datetime.now() - timedelta(hours=1)
qs = Bus.objects.filter(route=route_id,
status_time__gte=current,
is_stale=False)
data = [bus.to_json() for bus in qs]
resp = make_response(json.dumps(data))
resp.cache_control.no_cache = True
resp.headers['Content-Type'] = 'application/json'
return resp
@same_origin
def route_upcoming(route_id):
route = Route.objects.get_or_404(name=route_id)
schedule = Schedule.objects.for_today()
data = {}
start = ScheduledStop.arrival_now()
# Filter the next arrivals for 1hour
# This should limit the results enough so we don't do more work
qs = ScheduledStop.objects.filter(route_id=route.id,
schedule_id=schedule.id,
arrival__gt=start,
arrival__lt=start + 3600)
# Optionally accept a stop argument
if 'stop' in request.args:
try:
stop = Stop.objects.get(id=request.args['stop'])
except (Stop.DoesNotExist, Stop.MultipleObjectsReturned, ValueError):
pass
else:
qs = qs.filter(stop_id=stop.id)
# Process ordered by arrival - grouped by stop
for stop in qs.only('stop_id', 'arrival').order_by('arrival'):
if stop.stop_id not in data:
data[stop.stop_id] = {
'refresh': stop.arrival - start,
'times': []
}
if len(data[stop.stop_id]['times']) >= 3:
continue
data[stop.stop_id]['times'].append(
ScheduledStop.seconds_to_timestring(stop.arrival)
)
resp = make_response(json.dumps(data))
resp.cache_control.no_cache = True
resp.headers['Content-Type'] = 'application/json'
return resp
app.add_url_rule('/marta/', 'marta.status', status, methods=['GET', 'POST'])
app.add_url_rule('/marta/route/<route_id>.json', 'marta.route.details', route_details)
app.add_url_rule('/marta/realtime/<route_id>.json', 'marta.route.realtime', route_realtime)
app.add_url_rule('/marta/upcoming/<route_id>.json', 'marta.route.upcoming', route_upcoming)
| mit | 3,574,729,019,332,970,500 | 31.222222 | 91 | 0.590716 | false |
liyi193328/seq2seq | seq2seq/tasks/dump_attention.py | 1 | 5396 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Task where both the input and output sequence are plain text.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
from tensorflow import gfile
from seq2seq.tasks.decode_text import _get_prediction_length
from seq2seq.tasks.inference_task import InferenceTask, unbatch_dict
def _get_scores(predictions_dict):
"""Returns the attention scores, sliced by source and target length.
"""
prediction_len = _get_prediction_length(predictions_dict)
source_len = predictions_dict["features.source_len"]
return predictions_dict["attention_scores"][:prediction_len, :source_len]
def _create_figure(predictions_dict):
"""Creates and returns a new figure that visualizes
attention scores for for a single model predictions.
"""
# Find out how long the predicted sequence is
target_words = list(predictions_dict["predicted_tokens"])
prediction_len = _get_prediction_length(predictions_dict)
# Get source words
source_len = predictions_dict["features.source_len"]
source_words = predictions_dict["features.source_tokens"][:source_len]
# Plot
fig = plt.figure(figsize=(8, 8))
plt.imshow(
X=predictions_dict["attention_scores"][:prediction_len, :source_len],
interpolation="nearest",
cmap=plt.cm.Blues)
plt.xticks(np.arange(source_len), source_words, rotation=45)
plt.yticks(np.arange(prediction_len), target_words, rotation=-45)
fig.tight_layout()
return fig
class DumpAttention(InferenceTask):
"""Defines inference for tasks where both the input and output sequences
are plain text.
Params:
delimiter: Character by which tokens are delimited. Defaults to space.
unk_replace: If true, enable unknown token replacement based on attention
scores.
unk_mapping: If `unk_replace` is true, this can be the path to a file
defining a dictionary to improve UNK token replacement. Refer to the
documentation for more details.
dump_attention_dir: Save attention scores and plots to this directory.
dump_attention_no_plot: If true, only save attention scores, not
attention plots.
dump_beams: Write beam search debugging information to this file.
"""
def __init__(self, params):
super(DumpAttention, self).__init__(params)
self._attention_scores_accum = []
self._idx = 0
if not self.params["output_dir"]:
raise ValueError("Must specify output_dir for DumpAttention")
@staticmethod
def default_params():
params = {}
params.update({"output_dir": "", "dump_plots": False})
return params
def begin(self):
super(DumpAttention, self).begin()
gfile.MakeDirs(self.params["output_dir"])
def before_run(self, _run_context):
fetches = {}
fetches["predicted_tokens"] = self._predictions["predicted_tokens"]
fetches["features.source_len"] = self._predictions["features.source_len"]
fetches["features.source_tokens"] = self._predictions["features.source_tokens"]
if "attention_scores" in self._predictions:
fetches["attention_scores"] = self._predictions["attention_scores"]
elif "beam_search_output.original_outputs.attention_scores" in self._predictions:
# fetches["beam_search_output.original_outputs.attention_scores"] = self._predictions["beam_search_output.original_outputs.attention_scores"]
fetches["attention_scores"] = self._predictions["beam_search_output.original_outputs.attention_scores"]
return tf.train.SessionRunArgs(fetches)
def after_run(self, _run_context, run_values):
fetches_batch = run_values.results
for fetches in unbatch_dict(fetches_batch):
# Convert to unicode
fetches["predicted_tokens"] = np.char.decode(
fetches["predicted_tokens"].astype("S"), "utf-8")
fetches["predicted_tokens"] = fetches["predicted_tokens"][:, 0]
fetches["attention_scores"] = fetches["attention_scores"][:, 0, :]
fetches["features.source_tokens"] = np.char.decode(
fetches["features.source_tokens"].astype("S"), "utf-8")
if self.params["dump_plots"]:
output_path = os.path.join(self.params["output_dir"],
"{:05d}.png".format(self._idx))
_create_figure(fetches)
plt.savefig(output_path)
plt.close()
tf.logging.info("Wrote %s", output_path)
self._idx += 1
self._attention_scores_accum.append(_get_scores(fetches))
def end(self, _session):
scores_path = os.path.join(self.params["output_dir"],
"attention_scores.npz")
np.savez(scores_path, *self._attention_scores_accum)
tf.logging.info("Wrote %s", scores_path)
| apache-2.0 | 562,419,923,570,596,600 | 36.213793 | 147 | 0.699222 | false |
PMBio/gptwosample | gptwosample/examples/gptwosample_confounder_example.py | 1 | 2023 | '''
Small Example application of GPTwoSample
========================================
Please run script "generateToyExampleFiles.py" to generate Toy Data.
This Example shows how to apply GPTwoSample to toy data, generated above.
Created on Feb 25, 2011
@author: Max Zwiessele, Oliver Stegle
'''
from gptwosample.data.dataIO import get_data_from_csv
import logging as LG
import scipy as SP
import numpy
from gptwosample.confounder.confounder import TwoSampleConfounder
if __name__ == '__main__':
cond1_file = './ToyCondition1.csv'; cond2_file = './ToyCondition2.csv'
#full debug info:
LG.basicConfig(level=LG.INFO)
#1. read csv file
cond1 = get_data_from_csv(cond1_file, delimiter=',')
cond2 = get_data_from_csv(cond2_file, delimiter=",")
#range where to create time local predictions ?
#note: this needs to be [T x 1] dimensional: (newaxis)
Tpredict = SP.linspace(cond1["input"].min(), cond1["input"].max(), 100)[:, SP.newaxis]
T1 = cond1.pop("input")
T2 = cond2.pop("input")
gene_names = cond1.keys()
assert gene_names == cond2.keys()
#expression levels: replicates x #time points
Y0 = numpy.array(cond1.values()).T.swapaxes(0,1)
Y1 = numpy.array(cond2.values()).T.swapaxes(0,1)
T = numpy.array([numpy.tile(T1[:,None], Y0.shape[0]),
numpy.tile(T2[:,None], Y1.shape[0])]).swapaxes(1,2)
Y = numpy.array([Y0,Y1])
# Test for nans in data:
n,r,t,d = Y.shape
ri = numpy.random.randint
for _ in range(4):
Y[ri(n), ri(r), ri(t), ri(d)] = numpy.nan
confounder_object = TwoSampleConfounder(T, Y, q=2)
confounder_object.learn_confounder_matrix()
confounder_object.predict_likelihoods(T,Y)
Tpredict = numpy.linspace(T1.min(), T1.max(), 100)[:,None]
it = confounder_object.predict_means_variances(Tpredict)
import pylab
pylab.ion()
pylab.figure()
for _ in confounder_object.plot():
raw_input("Press Enter to continue...")
| apache-2.0 | 2,073,420,866,599,686,000 | 31.126984 | 90 | 0.633712 | false |
opennumber/opennumber | src/err.py | 1 | 3722 | # -*- coding: utf-8 -*-
"""
该模块用于表示错误.
code_map = dict(code=XError class)
"""
import types
import constants
class BaseError(Exception):
code = -1
message = 'base_error'
def __init__(self, message=''):
if message and isinstance(message, types.UnicodeType):
message = message.encode('utf-8')
pass
if not message:
message = self.message
pass
self.args = (self.code, message)
self.message = message
pass
def __str__(self):
code, message = self.args
return '%s: code: %s, message: %s' %(self.__class__, code, message)
def __repr__(self):
return self.__str__ #
pass #end class BaseError
class Success(BaseError):
"""
"""
code = 0
message = 'success'
pass #end class Success
class InternalError(BaseError):
"""
internal error
"""
code = 1
message = 'internal error'
pass #end class InternalError
class MissingParameter(BaseError):
code = 2
message = "missing parameter '%s'"
def __init__(self, parameter_name):
super(MissingParameter, self).__init__(self.message % parameter_name)
return
pass
class ParameterTypeError(BaseError):
code = 3
message = 'parameter type error'
pass
class IntegerParameterError(ParameterTypeError):
code = 4
def __init__(self, pname):
message = "'%s' type error. except integer" % (pname)
super(IntegerParameterError, self).__init__(message)
return
pass
class DatetimeParameterError(ParameterTypeError):
code = 5
def __init__(self, pname):
message = "'%s' type error. except datetime. e.g: '2016-01-01 20:00:00'" % (pname)
super(DatetimeParameterError, self).__init__(self, message)
return
pass
class DateParameterError(ParameterTypeError):
code = 6
def __init__(self, pname):
message = "'%s' type error. except date. e.g: '2016-01-01'" % (pname)
super(DateParameterError, self).__init__(message)
return
pass
class MissingTimestampError(BaseError):
code = 7
message = 'missing parameter "timestamp". timestamp is used for debug. e.g: timestamp=time()'
pass
class InvalidPhoneNumber(BaseError):
code = 8
message = 'invalid phone number. regex: "%s"' % (constants.phone_number_regex.pattern)
class InvalidAction(BaseError):
code = 9
message = 'invalid action. valid action %s' % ([x.value for x in constants.ActionEnum])
pass
class NotFoundToken(BaseError):
code = 10
message = 'not found token'
pass
class AccessReject(BaseError):
code = 11
message = 'access reject, email [email protected] to get help.'
pass
class QuotaOverFlow(BaseError):
code = 12
message = 'daily quota overflow. get help to increment quota by contact administrator'
pass
class InvalidIp(BaseError):
code = 13
message = 'invalid ip value. except ipv4 & ipv6'
pass
class InvalidRating(BaseError):
code = 14
message = 'invalid rating. valid rating %s' % ([e.value for e in constants.RatingEnum])
pass
class InvaildSign(BaseError):
code = 15
message = 'invalid sign'
pass
# 下面的代码对所有的错误的代码进行校验,保证error.code不会重复
_locals_keys = locals().keys()
code_map = {}
for key in _locals_keys:
obj = locals()[key]
if not issubclass(type(obj), type):
continue
if issubclass(obj, BaseError):
if obj.code in code_map:
raise RuntimeError('duplicate code: code: %s' %(obj.code))
code_map[obj.code] = obj
pass
pass
| gpl-3.0 | 2,755,212,175,510,790,000 | 21.555556 | 97 | 0.617953 | false |
NicoVarg99/daf-recipes | ckan/ckan/ckan/ckan/plugins/toolkit_sphinx_extension.py | 1 | 6868 | # encoding: utf-8
'''A Sphinx extension to automatically document CKAN's crazy plugins toolkit,
autodoc-style.
Sphinx's autodoc extension can document modules or classes, but although it
masquerades as a module CKAN's plugins toolkit is actually neither a module nor
a class, it's an object-instance of a class, and it's an object with weird
__getattr__ behavior too. Autodoc can't handle it, so we have this custom
Sphinx extension to automate documenting it instead.
This extension plugs into the reading phase of the Sphinx build. It intercepts
the 'toolkit' document (extensions/plugins-toolkit.rst) after Sphinx has read
the reStructuredText source from file. It modifies the source, adding in Sphinx
directives for everything in the plugins toolkit, and then the Sphinx build
continues as normal (just as if the generated reStructuredText had been entered
into plugins-toolkit.rst manually before running Sphinx).
'''
import types
import inspect
import ckan.plugins.toolkit as toolkit
def setup(app):
'''Setup this Sphinx extension. Called once when initializing Sphinx.
'''
# Connect to Sphinx's source-read event, the callback function will be
# called after each source file is read.
app.connect('source-read', source_read)
def format_function(name, function, docstring=None):
'''Return a Sphinx .. function:: directive for the given function.
The directive includes the function's docstring if it has one.
:param name: the name to give to the function in the directive,
eg. 'get_converter'
:type name: string
:param function: the function itself
:type function: function
:param docstring: if given, use this instead of introspecting the function
to find its actual docstring
:type docstring: string
:returns: a Sphinx .. function:: directive for the function
:rtype: string
'''
# The template we'll use to render the Sphinx function directive.
template = ('.. py:function:: ckan.plugins.toolkit.{function}{args}\n'
'\n'
'{docstring}\n'
'\n')
# Get the arguments of the function, as a string like:
# "(foo, bar=None, ...)"
argstring = inspect.formatargspec(*inspect.getargspec(function))
docstring = docstring or inspect.getdoc(function)
if docstring is None:
docstring = ''
else:
# Indent the docstring by 3 spaces, as needed for the Sphinx directive.
docstring = '\n'.join([' ' + line for line in docstring.split('\n')])
return template.format(function=name, args=argstring, docstring=docstring)
def format_class(name, class_, docstring=None):
'''Return a Sphinx .. class:: directive for the given class.
The directive includes the class's docstring if it has one.
:param name: the name to give to the class in the directive,
eg. 'DefaultDatasetForm'
:type name: string
:param class_: the class itself
:type class_: class
:param docstring: if given, use this instead of introspecting the class
to find its actual docstring
:type docstring: string
:returns: a Sphinx .. class:: directive for the class
:rtype: string
'''
# The template we'll use to render the Sphinx class directive.
template = ('.. py:class:: ckan.plugins.toolkit.{cls}\n'
'\n'
'{docstring}\n'
'\n')
docstring = docstring or inspect.getdoc(class_)
if docstring is None:
docstring = ''
else:
# Indent the docstring by 3 spaces, as needed for the Sphinx directive.
docstring = '\n'.join([' ' + line for line in docstring.split('\n')])
return template.format(cls=name, docstring=docstring)
def format_object(name, object_, docstring=None):
'''Return a Sphinx .. attribute:: directive for the given object.
The directive includes the object's class's docstring if it has one.
:param name: the name to give to the object in the directive,
eg. 'request'
:type name: string
:param object_: the object itself
:type object_: object
:param docstring: if given, use this instead of introspecting the object
to find its actual docstring
:type docstring: string
:returns: a Sphinx .. attribute:: directive for the object
:rtype: string
'''
# The template we'll use to render the Sphinx attribute directive.
template = ('.. py:attribute:: ckan.plugins.toolkit.{obj}\n'
'\n'
'{docstring}\n'
'\n')
docstring = docstring or inspect.getdoc(object_)
if docstring is None:
docstring = ''
else:
# Indent the docstring by 3 spaces, as needed for the Sphinx directive.
docstring = '\n'.join([' ' + line for line in docstring.split('\n')])
return template.format(obj=name, docstring=docstring)
def source_read(app, docname, source):
'''Transform the contents of plugins-toolkit.rst to contain reference docs.
'''
# We're only interested in the 'plugins-toolkit' doc (plugins-toolkit.rst).
if docname != 'extensions/plugins-toolkit':
return
source_ = ''
for name, thing in inspect.getmembers(toolkit):
# The plugins toolkit can override the docstrings of some of its
# members (e.g. things that are imported from third-party libraries)
# by putting custom docstrings in this docstring_overrides dict.
custom_docstring = toolkit.docstring_overrides.get(name)
if inspect.isfunction(thing):
source_ += format_function(name, thing, docstring=custom_docstring)
elif inspect.ismethod(thing):
# We document plugins toolkit methods as if they're functions. This
# is correct because the class ckan.plugins.toolkit._Toolkit
# actually masquerades as a module ckan.plugins.toolkit, and you
# call its methods as if they were functions.
source_ += format_function(name, thing, docstring=custom_docstring)
elif inspect.isclass(thing):
source_ += format_class(name, thing, docstring=custom_docstring)
elif isinstance(thing, types.ObjectType):
source_ += format_object(name, thing, docstring=custom_docstring)
else:
assert False, ("Someone added {name}:{thing} to the plugins "
"toolkit and this Sphinx extension doesn't know "
"how to document that yet. If you're that someone, "
"you need to add a new format_*() function for it "
"here or the docs won't build.".format(
name=name, thing=thing))
source[0] += source_
# This is useful for debugging the generated RST.
#open('/tmp/source', 'w').write(source[0])
| gpl-3.0 | 6,916,009,595,914,275,000 | 35.727273 | 79 | 0.657833 | false |
Sakshisaraswat/todoman | tests/test_cli.py | 1 | 24483 | import datetime
import sys
from os.path import isdir
from unittest import mock
from unittest.mock import patch
import click
import hypothesis.strategies as st
import pytest
from dateutil.tz import tzlocal
from freezegun import freeze_time
from hypothesis import given
from todoman.cli import cli, exceptions
from todoman.model import Database, Todo
# TODO: test --grep
def test_list(tmpdir, runner, create):
result = runner.invoke(cli, ['list'], catch_exceptions=False)
assert not result.exception
assert not result.output.strip()
create(
'test.ics',
'SUMMARY:harhar\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert 'harhar' in result.output
def test_no_default_list(runner):
result = runner.invoke(cli, ['new', 'Configure a default list'])
assert result.exception
assert ('Error: Invalid value for "--list" / "-l": You must set '
'"default_list" or use -l.' in result.output)
def test_no_extra_whitespace(tmpdir, runner, create):
"""
Test that we don't output extra whitespace
Test that we don't output a lot of extra whitespace when there are no
tasks, or when there are tasks (eg: both scenarios).
Note: Other tests should be set up so that comparisons don't care much
about whitespace, so that if this changes, only this test should fail.
"""
result = runner.invoke(cli, ['list'], catch_exceptions=False)
assert not result.exception
assert result.output == '\n'
create(
'test.ics',
'SUMMARY:harhar\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert len(result.output.splitlines()) == 1
def test_percent(tmpdir, runner, create):
create(
'test.ics',
'SUMMARY:harhar\n'
'PERCENT-COMPLETE:78\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert '78%' in result.output
def test_list_inexistant(tmpdir, runner, create):
result = runner.invoke(cli, ['list', 'nonexistant'])
assert result.exception
assert 'Error: Invalid value for "lists":' in result.output
def test_show_existing(tmpdir, runner, create):
create(
'test.ics',
'SUMMARY:harhar\n'
'DESCRIPTION:Lots of text. Yum!\n'
)
result = runner.invoke(cli, ['list'])
result = runner.invoke(cli, ['show', '1'])
assert not result.exception
assert 'harhar' in result.output
assert 'Lots of text. Yum!' in result.output
def test_show_inexistant(tmpdir, runner, create):
create(
'test.ics',
'SUMMARY:harhar\n'
)
result = runner.invoke(cli, ['list'])
result = runner.invoke(cli, ['show', '2'])
assert result.exit_code == 20
assert result.output == 'No todo with id 2.\n'
def test_human(runner):
result = runner.invoke(cli, [
'new', '-l', 'default', '-d', 'tomorrow', 'hail belzebub'
])
assert not result.exception
assert 'belzebub' in result.output
result = runner.invoke(cli, ['list'])
assert not result.exception
assert 'belzebub' in result.output
@pytest.mark.xfail(reason='issue#9')
def test_two_events(tmpdir, runner):
tmpdir.join('default/test.ics').write(
'BEGIN:VCALENDAR\n'
'BEGIN:VTODO\n'
'SUMMARY:task one\n'
'END:VTODO\n'
'BEGIN:VTODO\n'
'SUMMARY:task two\n'
'END:VTODO\n'
'END:VCALENDAR'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert len(result.output.splitlines()) == 2
assert 'task one' in result.output
assert 'task two' in result.output
def test_default_command(tmpdir, runner, create):
create(
'test.ics',
'SUMMARY:harhar\n'
)
result = runner.invoke(cli)
assert not result.exception
assert 'harhar' in result.output
def test_delete(runner, create):
create(
'test.ics',
'SUMMARY:harhar\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
result = runner.invoke(cli, ['delete', '1', '--yes'])
assert not result.exception
result = runner.invoke(cli, ['list'])
assert not result.exception
assert not result.output.strip()
def test_delete_prompt(todo_factory, runner, todos):
todo_factory()
result = runner.invoke(cli, ['delete', '1'], input='yes')
assert not result.exception
assert '[y/N]: yes\nDeleting "YARR!"' in result.output
assert len(list(todos())) == 0
def test_copy(tmpdir, runner, create):
tmpdir.mkdir('other_list')
create(
'test.ics',
'SUMMARY:test_copy\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert 'test_copy' in result.output
assert 'default' in result.output
assert 'other_list' not in result.output
result = runner.invoke(cli, ['copy', '-l', 'other_list', '1'])
assert not result.exception
result = runner.invoke(cli, ['list'])
assert not result.exception
assert 'test_copy' in result.output
assert 'default' in result.output
assert 'other_list' in result.output
def test_move(tmpdir, runner, create):
tmpdir.mkdir('other_list')
create(
'test.ics',
'SUMMARY:test_move\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert 'test_move' in result.output
assert 'default' in result.output
assert 'other_list' not in result.output
result = runner.invoke(cli, ['move', '-l', 'other_list', '1'])
assert not result.exception
result = runner.invoke(cli, ['list'])
assert not result.exception
assert 'test_move' in result.output
assert 'default' not in result.output
assert 'other_list' in result.output
@freeze_time('2017-03-17 20:22:19')
def test_dtstamp(tmpdir, runner, create):
"""Test that we add the DTSTAMP to new entries as per RFC5545."""
result = runner.invoke(cli, ['new', '-l', 'default', 'test event'])
assert not result.exception
db = Database([tmpdir.join('default')],
tmpdir.join('/dtstamp_cache'))
todo = list(db.todos())[0]
assert todo.dtstamp is not None
assert todo.dtstamp == datetime.datetime.now(tz=tzlocal())
def test_default_list(tmpdir, runner, create):
"""Test the default_list config parameter"""
result = runner.invoke(cli, ['new', 'test default list'])
assert result.exception
path = tmpdir.join('config')
path.write('default_list = default\n', 'a')
result = runner.invoke(cli, ['new', 'test default list'])
assert not result.exception
db = Database([tmpdir.join('default')],
tmpdir.join('/default_list'))
todo = list(db.todos())[0]
assert todo.summary == 'test default list'
@pytest.mark.parametrize(
'default_due, expected_due_hours', [(None, 24), (1, 1), (0, None)],
ids=['not specified', 'greater than 0', '0']
)
def test_default_due(
tmpdir, runner, create, default_due, expected_due_hours
):
"""Test setting the due date using the default_due config parameter"""
if default_due is not None:
path = tmpdir.join('config')
path.write('default_due = {}\n'.format(default_due), 'a')
runner.invoke(cli, ['new', '-l', 'default', 'aaa'])
db = Database([tmpdir.join('default')], tmpdir.join('/default_list'))
todo = list(db.todos())[0]
if expected_due_hours is None:
assert todo.due is None
else:
assert (todo.due - todo.created_at) == datetime.timedelta(
hours=expected_due_hours
)
@freeze_time(datetime.datetime.now())
def test_default_due2(tmpdir, runner, create, todos):
cfg = tmpdir.join('config')
cfg.write('default_due = 24\n', 'a')
r = runner.invoke(cli, ['new', '-ldefault', '-dtomorrow', 'aaa'])
assert not r.exception
r = runner.invoke(cli, ['new', '-ldefault', 'bbb'])
assert not r.exception
r = runner.invoke(cli, ['new', '-ldefault', '-d', 'one hour', 'ccc'])
assert not r.exception
todos = {t.summary: t for t in todos(status='ANY')}
assert todos['aaa'].due.date() == todos['bbb'].due.date()
assert todos['ccc'].due == todos['bbb'].due - datetime.timedelta(hours=23)
def test_sorting_fields(tmpdir, runner, default_database):
tasks = []
for i in range(1, 10):
days = datetime.timedelta(days=i)
todo = Todo(new=True)
todo.list = next(default_database.lists())
todo.due = datetime.datetime.now() + days
todo.created_at = datetime.datetime.now() - days
todo.summary = 'harhar{}'.format(i)
tasks.append(todo)
default_database.save(todo)
fields = (
'id',
'uid',
'summary',
'due',
'priority',
'created_at',
'completed_at',
'dtstamp',
'status',
'description',
'location',
'categories',
)
@given(sort_key=st.lists(
st.sampled_from(fields + tuple('-' + x for x in fields)),
unique=True
))
def run_test(sort_key):
sort_key = ','.join(sort_key)
result = runner.invoke(cli, ['list', '--sort', sort_key])
assert not result.exception
assert result.exit_code == 0
assert len(result.output.strip().splitlines()) == len(tasks)
run_test()
def test_sorting_output(tmpdir, runner, create):
create(
'test.ics',
'SUMMARY:aaa\n'
'DUE;VALUE=DATE-TIME;TZID=ART:20160102T000000\n'
)
create(
'test2.ics',
'SUMMARY:bbb\n'
'DUE;VALUE=DATE-TIME;TZID=ART:20160101T000000\n'
)
examples = [
('-summary', ['aaa', 'bbb']),
('due', ['aaa', 'bbb'])
]
# Normal sorting, reversed by default
all_examples = [(['--sort', key], order) for key, order in examples]
# Testing --reverse, same exact output
all_examples.extend((['--reverse', '--sort', key], order)
for key, order in examples)
# Testing --no-reverse
all_examples.extend((['--no-reverse', '--sort', key], reversed(order))
for key, order in examples)
for args, order in all_examples:
result = runner.invoke(cli, ['list'] + args)
assert not result.exception
lines = result.output.splitlines()
for i, task in enumerate(order):
assert task in lines[i]
def test_sorting_null_values(tmpdir, runner, create):
create(
'test.ics',
'SUMMARY:aaa\n'
'PRIORITY:9\n'
)
create(
'test2.ics',
'SUMMARY:bbb\n'
'DUE;VALUE=DATE-TIME;TZID=ART:20160101T000000\n'
)
result = runner.invoke(cli)
assert not result.exception
assert 'bbb' in result.output.splitlines()[0]
assert 'aaa' in result.output.splitlines()[1]
def test_sort_invalid_fields(runner):
result = runner.invoke(cli, ['list', '--sort', 'hats'])
assert result.exception
assert 'Invalid value for "--sort": Unknown field "hats"' in result.output
@pytest.mark.parametrize('hours', [72, -72])
def test_color_due_dates(tmpdir, runner, create, hours):
due = datetime.datetime.now() + datetime.timedelta(hours=hours)
create(
'test.ics',
'SUMMARY:aaa\n'
'STATUS:IN-PROCESS\n'
'DUE;VALUE=DATE-TIME;TZID=ART:{}\n'
.format(due.strftime('%Y%m%dT%H%M%S'))
)
result = runner.invoke(cli, ['--color', 'always'])
assert not result.exception
due_str = due.strftime('%Y-%m-%d')
if hours == 72:
assert result.output == \
'1 [ ] {} aaa @default\x1b[0m\n'.format(due_str)
else:
assert result.output == \
'1 [ ] \x1b[31m{}\x1b[0m aaa @default\x1b[0m\n' \
.format(due_str)
def test_color_flag(runner, todo_factory):
todo_factory(due=datetime.datetime(2007, 3, 22))
result = runner.invoke(cli, ['--color', 'always'], color=True)
assert(
result.output.strip() ==
'1 [ ] \x1b[31m2007-03-22\x1b[0m YARR! @default\x1b[0m'
)
result = runner.invoke(cli, color=True)
assert(
result.output.strip() ==
'1 [ ] \x1b[31m2007-03-22\x1b[0m YARR! @default\x1b[0m'
)
result = runner.invoke(cli, ['--color', 'never'], color=True)
assert(
result.output.strip() ==
'1 [ ] 2007-03-22 YARR! @default'
)
def test_flush(tmpdir, runner, create):
create(
'test.ics',
'SUMMARY:aaa\n'
'STATUS:COMPLETED\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
create(
'test2.ics',
'SUMMARY:bbb\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert '2 [ ] bbb @default' in result.output
result = runner.invoke(cli, ['flush'], input='y\n', catch_exceptions=False)
assert not result.exception
create(
'test2.ics',
'SUMMARY:bbb\n'
)
result = runner.invoke(cli, ['list'])
assert not result.exception
assert '1 [ ] bbb @default' in result.output
def test_edit(runner, default_database, todos):
todo = Todo(new=True)
todo.list = next(default_database.lists())
todo.summary = 'Eat paint'
todo.due = datetime.datetime(2016, 10, 3)
default_database.save(todo)
result = runner.invoke(cli, ['edit', '1', '--due', '2017-02-01'])
assert not result.exception
assert '2017-02-01' in result.output
todo = next(todos(status='ANY'))
assert todo.due == datetime.datetime(2017, 2, 1, tzinfo=tzlocal())
assert todo.summary == 'Eat paint'
def test_edit_move(runner, todo_factory, default_database, tmpdir, todos):
"""
Test that editing the list in the UI edits the todo as expected
The goal of this test is not to test the editor itself, but rather the
`edit` command and its slightly-complex moving logic.
"""
tmpdir.mkdir('another_list')
default_database.paths = [
str(tmpdir.join('default')),
str(tmpdir.join('another_list')),
]
default_database.update_cache()
todo_factory(summary='Eat some headphones')
lists = list(default_database.lists())
another_list = next(filter(lambda x: x.name == 'another_list', lists))
def moving_edit(self):
self.current_list = another_list
self._save_inner()
with patch('todoman.interactive.TodoEditor.edit', moving_edit):
result = runner.invoke(cli, ['edit', '1'])
assert not result.exception
todos = list(todos())
assert len(todos) == 1
assert todos[0].list.name == 'another_list'
def test_edit_retains_id(runner, todos, todo_factory):
"""Tests that we retain a todo's ID after editing."""
original_id = todo_factory().id
result = runner.invoke(cli, ['edit', '1', '--due', '2017-04-01'])
assert not result.exception
todo = next(todos())
assert todo.due == datetime.datetime(2017, 4, 1, tzinfo=tzlocal())
assert todo.id == original_id
def test_edit_inexistant(runner):
"""Tests that we show the right output and exit code for inexistant ids."""
result = runner.invoke(cli, ['edit', '1', '--due', '2017-04-01'])
assert result.exception
assert result.exit_code == exceptions.NoSuchTodo.EXIT_CODE
assert result.output.strip() == 'No todo with id 1.'
def test_empty_list(tmpdir, runner, create):
for item in tmpdir.listdir():
if isdir(str(item)):
item.remove()
result = runner.invoke(cli)
expected = ("No lists found matching {}/*, create"
" a directory for a new list").format(tmpdir)
assert expected in result.output
def test_show_location(tmpdir, runner, create):
create(
'test.ics',
'SUMMARY:harhar\n'
'LOCATION:Boston\n'
)
result = runner.invoke(cli, ['show', '1'])
assert 'Boston' in result.output
def test_location(runner):
result = runner.invoke(cli, [
'new', '-l', 'default', '--location', 'Chembur', 'Event Test'
])
assert 'Chembur' in result.output
def test_sort_mixed_timezones(runner, create):
"""
Test sorting mixed timezones.
The times on this tests are carefully chosen so that a TZ-naive comparison
gives the opposite results.
"""
create(
'test.ics',
'SUMMARY:first\n'
'DUE;VALUE=DATE-TIME;TZID=CET:20170304T180000\n' # 1700 UTC
)
create(
'test2.ics',
'SUMMARY:second\n'
'DUE;VALUE=DATE-TIME;TZID=HST:20170304T080000\n' # 1800 UTC
)
result = runner.invoke(cli, ['list', '--status', 'ANY'])
assert not result.exception
output = result.output.strip()
assert len(output.splitlines()) == 2
assert 'second' in result.output.splitlines()[0]
assert 'first' in result.output.splitlines()[1]
def test_humanize_interactive(runner):
result = runner.invoke(cli, ['--humanize', '--porcelain', 'list'])
assert result.exception
assert result.output.strip() == \
"Error: --porcelain and --humanize cannot be used at the same time."
def test_due_bad_date(runner):
result = runner.invoke(cli, ['new', '--due', 'Not a date', 'Blargh!'])
assert result.exception
assert (
'Error: Invalid value for "--due" / "-d": Time description not '
'recognized: Not a date' == result.output.strip().splitlines()[-1]
)
def test_multiple_todos_in_file(runner, create):
create(
'test.ics',
'SUMMARY:a\n'
'END:VTODO\n'
'BEGIN:VTODO\n'
'SUMMARY:b\n'
)
for _ in range(2):
result = runner.invoke(cli, ['list'])
assert ' a ' in result.output
assert ' b ' in result.output
assert 'warning: Todo is in read-only mode' in result.output
result = runner.invoke(cli, ['done', '1'])
assert result.exception
assert 'Todo is in read-only mode because there are multiple todos' \
in result.output
result = runner.invoke(cli, ['show', '1'])
assert not result.exception
result = runner.invoke(cli, ['show', '2'])
assert not result.exception
def test_todo_new(runner, default_database):
# This isn't a very thurough test, but at least catches obvious regressions
# like startup crashes or typos.
with patch('urwid.MainLoop'):
result = runner.invoke(cli, ['new', '-l', 'default'])
# No SUMMARY error after UI runs
assert isinstance(result.exception, SystemExit)
assert result.exception.args == (2,)
assert 'Error: No SUMMARY specified' in result.output
def test_todo_edit(runner, default_database, todo_factory):
# This isn't a very thurough test, but at least catches obvious regressions
# like startup crashes or typos.
todo_factory()
with patch('urwid.MainLoop'):
result = runner.invoke(cli, ['edit', '1'])
assert not result.exception
assert 'YARR!' in result.output
@freeze_time('2017, 3, 20')
def test_list_startable(tmpdir, runner, todo_factory):
todo_factory(summary='started', start=datetime.datetime(2017, 3, 15))
todo_factory(summary='nostart')
todo_factory(summary='unstarted', start=datetime.datetime(2017, 3, 24))
result = runner.invoke(
cli,
['list', '--startable'],
catch_exceptions=False,
)
assert not result.exception
assert 'started' in result.output
assert 'nostart' in result.output
assert 'unstarted' not in result.output
result = runner.invoke(
cli,
['list'],
catch_exceptions=False,
)
assert not result.exception
assert 'started' in result.output
assert 'nostart' in result.output
assert 'unstarted' in result.output
path = tmpdir.join('config')
path.write('startable = yes\n', 'a')
result = runner.invoke(cli, ['list'], catch_exceptions=False)
assert not result.exception
assert 'started' in result.output
assert 'nostart' in result.output
assert 'unstarted' not in result.output
def test_bad_start_date(runner):
result = runner.invoke(cli, ['list', '--start'])
assert result.exception
assert (
result.output.strip() == 'Error: --start option requires 2 arguments'
)
result = runner.invoke(cli, ['list', '--start', 'before'])
assert result.exception
assert (
result.output.strip() == 'Error: --start option requires 2 arguments'
)
result = runner.invoke(cli, ['list', '--start', 'before', 'not_a_date'])
assert result.exception
assert (
'Invalid value for "--start": Time description not recognized: '
'not_a_date' in result.output
)
result = runner.invoke(cli, ['list', '--start', 'godzilla', '2017-03-22'])
assert result.exception
assert ("Format should be '[before|after] [DATE]'" in result.output)
def test_done(runner, todo_factory, todos):
todo = todo_factory()
result = runner.invoke(cli, ['done', '1'])
assert not result.exception
todo = next(todos(status='ANY'))
assert todo.percent_complete == 100
assert todo.is_completed is True
result = runner.invoke(cli, ['done', '17'])
assert result.exception
assert result.output.strip() == 'No todo with id 17.'
def test_cancel(runner, todo_factory, todos):
todo = todo_factory()
result = runner.invoke(cli, ['cancel', '1'])
assert not result.exception
todo = next(todos(status='ANY'))
assert todo.status == 'CANCELLED'
def test_id_printed_for_new(runner):
result = runner.invoke(cli, [
'new', '-l', 'default', 'show me an id'
])
assert not result.exception
assert result.output.strip().startswith('1')
def test_repl(runner):
"""Test that repl registers properly."""
if 'click_repl' not in sys.modules:
pytest.skip('Optional dependency "click_repl" is not installed')
result = runner.invoke(cli, ['--help'])
assert not result.exception
assert 'repl Start an interactive shell.' in result.output
assert 'shell Start an interactive shell.' in result.output
def test_no_repl(runner):
"""Test that we work fine without click_repl installed."""
modules = sys.modules
if 'click_repl' in modules:
pytest.skip("Test can't be run with click_repl installed")
result = runner.invoke(cli, ['--help'])
assert not result.exception
assert 'repl' not in result.output
assert 'shell' not in result.output
assert 'Start an interactive shell.' not in result.output
def test_status_validation():
from todoman import cli
@given(statuses=st.lists(
st.sampled_from(Todo.VALID_STATUSES + ('ANY',)),
min_size=1,
max_size=5,
unique=True
))
def run_test(statuses):
validated = cli.validate_status(val=','.join(statuses))
if 'ANY' in statuses:
assert len(validated) == 4
else:
assert len(validated) == len(statuses)
for status in validated:
assert status in Todo.VALID_STATUSES
run_test()
def test_bad_status_validation():
from todoman import cli
with pytest.raises(click.BadParameter):
cli.validate_status(val='INVALID')
with pytest.raises(click.BadParameter):
cli.validate_status(val='IN-PROGRESS')
def test_status_filtering(runner, todo_factory):
todo_factory(summary='one', status='CANCELLED')
todo_factory(summary='two')
result = runner.invoke(cli, ['list', '--status', 'cancelled'])
assert not result.exception
assert len(result.output.splitlines()) == 1
assert 'one 'in result.output
result = runner.invoke(cli, ['list', '--status', 'NEEDS-action'])
assert not result.exception
assert len(result.output.splitlines()) == 1
assert 'two' in result.output
def test_invoke_command(runner, tmpdir):
path = tmpdir.join('config')
path.write('default_command = flush\n', 'a')
flush = mock.MagicMock()
with patch.dict(cli.commands, values=dict(flush=flush)):
result = runner.invoke(cli, catch_exceptions=False)
assert not result.exception
assert not result.output.strip()
assert flush.call_count == 1
def test_invoke_invalid_command(runner, tmpdir):
path = tmpdir.join('config')
path.write('default_command = DoTheRobot\n', 'a')
result = runner.invoke(cli, catch_exceptions=False)
assert result.exception
assert (
'Error: Invalid setting for [main][default_command]' in result.output
)
| isc | -2,388,368,933,553,996,000 | 27.769683 | 79 | 0.622881 | false |
au9ustine/org.au9ustine.puzzles.codility | lessons/lesson02_counting_elements/MissingInteger.py | 1 | 1952 | """MissingInteger (https://codility.com/demo/take-sample-test/missing_integer/)
Analysis:
- Pigeonhole Principle
- Radix sort
Comment:
- The problem description did not provide the default case, which puzzled me at first.
After referencing solutions by others, I found some of them just create the default case,
e.g. the -1 returned value (though practically it could not be reached)
Updated analysis:
As min_pos_int would be appear either in [1,N] or [N,2**31-1], we would consider these few of cases.
If A are all falled in [N,2**31-1], min_pos_int must be [1,N].
If A spans [1,N] and [N,2**31-1], min_pos_int must in [1,N] as well.
If A are all falled in [1,N], we must do a check. The ultimate case is that A is [1,N]. So min_pos_int would be N+1. If not, min_pos_int would be still in [1,N].
If A spans [negatives, N], min_pos_int is still in [1,N].
Tip: Minimal measurable unit is integer.
"""
__author__ = 'au9ustine'
def solution(A):
'''https://codility.com/demo/results/trainingAB55MN-EST/'''
# write your code in Python 2.7
N = len(A)
cnt = [0] * N
for i in A:
if i >= 1 and i <= N:
cnt[i-1] += 1
try:
return next(k+1 for k, v in enumerate(cnt) if v == 0)
except:
return N+1
def solution_old(A):
hit_count = [False] * (len(A)+1)
for val in A:
if 1 <= val <= len(A)+1:
hit_count[val-1] = True
for i, val in enumerate(hit_count):
if val is False:
return i+1
return -1
def solution_scored77(A):
positive_numbers = [x for x in set(A) if x > 0]
if len(positive_numbers) == 0:
max_val = 0
else:
max_val = max(positive_numbers)
if len(positive_numbers) == max_val:
max_val += 1
missing_number_gen = (i+1 for i in xrange(len(positive_numbers)) if positive_numbers[i] != i+1)
result = next(missing_number_gen, max_val)
return result
| mit | -5,423,153,813,235,830,000 | 32.084746 | 163 | 0.614242 | false |
balta2ar/coursera-dl | coursera/define.py | 1 | 3964 | # -*- coding: utf-8 -*-
"""
This module defines the global constants.
"""
import os
import getpass
import tempfile
COURSERA_URL = 'https://www.coursera.org'
AUTH_URL = 'https://accounts.coursera.org/api/v1/login'
AUTH_URL_V3 = 'https://www.coursera.org/api/login/v3'
CLASS_URL = 'https://class.coursera.org/{class_name}'
OPENCOURSE_CONTENT_URL = 'https://www.coursera.org/api/opencourse.v1/course/{class_name}'
OPENCOURSE_VIDEO_URL = 'https://www.coursera.org/api/opencourse.v1/video/{video_id}'
OPENCOURSE_SUPPLEMENT_URL = 'https://www.coursera.org/api/onDemandSupplements.v1/'\
'{course_id}~{element_id}?includes=asset&fields=openCourseAssets.v1%28typeName%29,openCourseAssets.v1%28definition%29'
OPENCOURSE_PROGRAMMING_ASSIGNMENTS_URL = \
'https://www.coursera.org/api/onDemandProgrammingLearnerAssignments.v1/{course_id}~{element_id}?fields=submissionLearnerSchema'
# These are ids that are present in <asset> tag in assignment text:
#
# <asset id=\"yeJ7Q8VAEeWPRQ4YsSEORQ\"
# name=\"statement-pca\"
# extension=\"pdf\"
# assetType=\"generic\"/>
#
# Sample response:
#
# {
# "elements": [
# {
# "id": "yeJ7Q8VAEeWPRQ4YsSEORQ",
# "url": "<some url>",
# "expires": 1454371200000
# }
# ],
# "paging": null,
# "linked": null
# }
OPENCOURSE_ASSET_URL = \
'https://www.coursera.org/api/assetUrls.v1?ids={ids}'
# These ids are provided in lecture json:
#
# {
# "id": "6ydIh",
# "name": "Введение в теорию игр",
# "elements": [
# {
# "id": "ujNfj",
# "name": "Что изучает теория игр?",
# "content": {
# "typeName": "lecture",
# "definition": {
# "duration": 536000,
# "videoId": "pGNiQYo-EeWNvA632PIn3w",
# "optional": false,
# "assets": [
# "giAxucdaEeWJTQ5WTi8YJQ@1"
# ]
# }
# },
# "slug": "chto-izuchaiet-tieoriia-ighr",
# "timeCommitment": 536000
# }
# ],
# "slug": "vviedieniie-v-tieoriiu-ighr",
# "timeCommitment": 536000,
# "optional": false
# }
#
# Sample response:
#
# {
# "elements": [
# {
# "id": "giAxucdaEeWJTQ5WTi8YJQ",
# "typeName": "asset",
# "definition": {
# "name": "",
# "assetId": "Vq8hwsdaEeWGlA7xclFASw"
# }
# }
# ],
# "paging": null,
# "linked": null
# }
OPENCOURSE_ASSETS_URL = \
'https://www.coursera.org/api/openCourseAssets.v1/{id}'
# These asset ids are ids returned from OPENCOURSE_ASSETS_URL request:
# See example above.
#
# Sample response:
#
# {
# "elements": [
# {
# "id": "Vq8hwsdaEeWGlA7xclFASw",
# "name": "1_Strategic_Interactions.pdf",
# "typeName": "generic",
# "url": {
# "url": "<some url>",
# "expires": 1454371200000
# }
# }
# ],
# "paging": null,
# "linked": null
# }
OPENCOURSE_API_ASSETS_V1_URL = \
'https://www.coursera.org/api/assets.v1/{id}'
ABOUT_URL = ('https://api.coursera.org/api/catalog.v1/courses?'
'fields=largeIcon,photo,previewLink,shortDescription,smallIcon,'
'smallIconHover,universityLogo,universityLogoSt,video,videoId,'
'aboutTheCourse,targetAudience,faq,courseSyllabus,courseFormat,'
'suggestedReadings,instructor,estimatedClassWorkload,'
'aboutTheInstructor,recommendedBackground,subtitleLanguagesCsv&'
'q=search&query={class_name}')
AUTH_REDIRECT_URL = ('https://class.coursera.org/{class_name}'
'/auth/auth_redirector?type=login&subtype=normal')
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# define a per-user cache folder
if os.name == "posix": # pragma: no cover
import pwd
_USER = pwd.getpwuid(os.getuid())[0]
else:
_USER = getpass.getuser()
PATH_CACHE = os.path.join(tempfile.gettempdir(), _USER + "_coursera_dl_cache")
PATH_COOKIES = os.path.join(PATH_CACHE, 'cookies')
| lgpl-3.0 | 3,818,030,178,759,966 | 27.664234 | 131 | 0.609626 | false |
wesyoung/pyzyre | pyzyre/utils/color.py | 1 | 3935 | # http://stackoverflow.com/a/1336640
# http://creativecommons.org/licenses/by-sa/3.0/
import logging
# now we patch Python code to add color support to logging.StreamHandler
def add_coloring_to_emit_windows(fn):
# add methods we need to the class
def _out_handle(self):
import ctypes
return ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)
out_handle = property(_out_handle)
def _set_color(self, code):
import ctypes
# Constants from the Windows API
self.STD_OUTPUT_HANDLE = -11
hdl = ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)
ctypes.windll.kernel32.SetConsoleTextAttribute(hdl, code)
setattr(logging.StreamHandler, '_set_color', _set_color)
def new(*args):
FOREGROUND_BLUE = 0x0001 # text color contains blue.
FOREGROUND_GREEN = 0x0002 # text color contains green.
FOREGROUND_RED = 0x0004 # text color contains red.
FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
FOREGROUND_WHITE = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED
# winbase.h
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h
FOREGROUND_BLACK = 0x0000
FOREGROUND_BLUE = 0x0001
FOREGROUND_GREEN = 0x0002
FOREGROUND_CYAN = 0x0003
FOREGROUND_RED = 0x0004
FOREGROUND_MAGENTA = 0x0005
FOREGROUND_YELLOW = 0x0006
FOREGROUND_GREY = 0x0007
FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.
BACKGROUND_BLACK = 0x0000
BACKGROUND_BLUE = 0x0010
BACKGROUND_GREEN = 0x0020
BACKGROUND_CYAN = 0x0030
BACKGROUND_RED = 0x0040
BACKGROUND_MAGENTA = 0x0050
BACKGROUND_YELLOW = 0x0060
BACKGROUND_GREY = 0x0070
BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
levelno = args[1].levelno
if (levelno >= 50):
color = BACKGROUND_YELLOW | FOREGROUND_RED | FOREGROUND_INTENSITY | BACKGROUND_INTENSITY
elif (levelno >= 40):
color = FOREGROUND_RED | FOREGROUND_INTENSITY
elif (levelno >= 30):
color = FOREGROUND_YELLOW | FOREGROUND_INTENSITY
elif (levelno >= 20):
color = FOREGROUND_GREEN
elif (levelno >= 10):
color = FOREGROUND_MAGENTA
else:
color = FOREGROUND_WHITE
args[0]._set_color(color)
ret = fn(*args)
args[0]._set_color(FOREGROUND_WHITE)
# print "after"
return ret
return new
def add_coloring_to_emit_ansi(fn):
# add methods we need to the class
def new(*args):
levelno = args[1].levelno
if (levelno >= 50):
color = '\x1b[31m' # red
elif (levelno >= 40):
color = '\x1b[31m' # red
elif (levelno >= 30):
color = '\x1b[33m' # yellow
elif (levelno >= 20):
color = '\x1b[32m' # green
elif (levelno >= 10):
color = '\x1b[35m' # pink
else:
color = '\x1b[0m' # normal
if not args[1].msg:
color = '\x1b[31m'
args[1].msg = 'NoneType'
args[1].msg = color + str(args[1].msg) + '\x1b[0m' # normal
# print "after"
return fn(*args)
return new
import platform
if platform.system() == 'Windows':
# Windows does not support ANSI escapes and we are using API calls to set the console color
logging.StreamHandler.emit = add_coloring_to_emit_windows(logging.StreamHandler.emit)
else:
# all non-Windows platforms are supporting ANSI escapes so we use them
logging.StreamHandler.emit = add_coloring_to_emit_ansi(logging.StreamHandler.emit)
# log = logging.getLogger()
# log.addFilter(log_filter())
# //hdlr = logging.StreamHandler()
# //hdlr.setFormatter(formatter())
| mpl-2.0 | -8,429,500,301,556,750,000 | 32.067227 | 100 | 0.608132 | false |
google/gazoo-device | gazoo_device/capabilities/switch_power_dli_powerswitch.py | 1 | 7104 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the switch_power_dli_powerswitch capability."""
from gazoo_device import decorators
from gazoo_device import errors
from gazoo_device import gdm_logger
from gazoo_device.capabilities.interfaces import switch_power_base
logger = gdm_logger.get_logger()
OFF = "off"
ON = "on"
class SwitchPowerDliPowerswitch(switch_power_base.SwitchPowerBase):
"""Definition of the switch_power_dli_powerswitch capability."""
_REQUIRED_COMMANDS = ["ADJUST_PORTS_MODE"]
_REQUIRED_HEADERS = ["GET_PROP", "SET_PROP"]
def __init__(self, http_fn, ip_address, command_dict, headers_dict,
device_name, total_ports):
"""Create an instance of the powerswitch power capability.
Args:
http_fn (func): A function to send GET and PUT http commands
ip_address (str): The IP address of the Powerswitch.
command_dict (dict): A dictionary containing the command used for
each method.
The dictionary must contain the following keys:
- ADJUST_PORTS_MODE
headers_dict (dict): A dictionary containg the required headers for
the HTTP requests. - GET_PROP - SET_PROP
device_name (str): name of the device this capability is attached
to.
total_ports (int): number of ports on device
"""
super().__init__(device_name=device_name)
self._http_fn = http_fn
self._ip_address = ip_address
self._command_dict = command_dict
self._headers_dict = headers_dict
self._total_ports = total_ports
self.validate_required_keys(self._REQUIRED_COMMANDS, command_dict,
"command_dict")
self.validate_required_keys(self._REQUIRED_HEADERS, headers_dict,
"headers_dict")
@decorators.PersistentProperty
def supported_modes(self):
"""Get the Powerswitch power modes supported by the dli_powerswitch."""
return [OFF, ON]
@decorators.PersistentProperty
def total_ports(self):
"""Get the number of device ports.
Returns:
int: the number of device ports
"""
return int(self._total_ports)
def get_all_ports_mode(self):
"""Get mode of all the device ports.
Returns:
list: a list of the all the ports mode.
"""
response = self._http_fn(
"GET",
self._command_dict["ADJUST_PORTS_MODE"].format(
"all;", ip=self._ip_address),
headers=self._headers_dict["GET_PROP"])
ports_value = response.split(",")
ports_mode = [ON if port == "true" else OFF for port in ports_value]
return ports_mode
def get_mode(self, port):
"""Get mode of the specified port.
Args:
port (int): device port number
Raises:
DeviceError: if port is None, port < 0, or port >= total_ports
Returns:
str: mode of specified port, either 'on' or 'off'.
"""
self._validate_port("get_mode", port)
response = self._http_fn(
"GET",
self._command_dict["ADJUST_PORTS_MODE"].format(
"=" + str(port), ip=self._ip_address),
headers=self._headers_dict["GET_PROP"])
if response == "true":
return ON
else:
return OFF
@decorators.CapabilityLogDecorator(logger)
def power_on(self, port):
"""Powers on the specified port.
Args:
port (int): device port number
Raises:
DeviceError: if port is None, port < 0, or port >= total_ports
"""
self._validate_port("power_on", port)
logger.debug("{} Powering on powerswitch port {}".format(
self._device_name, port))
self._http_fn(
"POST",
self._command_dict["ADJUST_PORTS_MODE"].format(
"=" + str(port), ip=self._ip_address),
headers=self._headers_dict["SET_PROP"],
data={"value": "true"})
@decorators.CapabilityLogDecorator(logger)
def power_off(self, port):
"""Powers off the specified port.
Args:
port (int): device port number
Raises:
DeviceError: if port is None, port < 0, or port >= total_ports
"""
self._validate_port("power_off", port)
logger.debug("{} Powering off powerswitch port {}".format(
self._device_name, port))
self._http_fn(
"POST",
self._command_dict["ADJUST_PORTS_MODE"].format(
"=" + str(port), ip=self._ip_address),
headers=self._headers_dict["SET_PROP"],
data={"value": "false"})
@decorators.CapabilityLogDecorator(logger)
def set_mode(self, mode, port):
"""Sets the given Powerswitch port to the mode specified.
Args:
mode (str): mode to set Powerswitch ports to. e.g. 'off', 'on'
port (int): the port to set.
Raises:
DeviceError: invalid mode.
"""
self._validate_mode(mode)
if mode == ON:
self.power_on(port=port)
else:
self.power_off(port=port)
@decorators.CapabilityLogDecorator(logger)
def set_all_ports_mode(self, mode):
"""Set all device ports to the specified mode.
Args:
mode (str): Mode to set all Powerswitch ports to. Valid modes are
"on" or "off".
Raises:
DeviceError: if input mode is not either 'on' or 'off'
"""
self._validate_mode(mode)
if mode == ON:
data_value = "true"
else:
data_value = "false"
self._http_fn(
"POST",
self._command_dict["ADJUST_PORTS_MODE"].format(
"all;", ip=self._ip_address),
headers=self._headers_dict["SET_PROP"],
data={"value": data_value})
def _validate_mode(self, mode):
"""Verify mode given resides in the valid mode list.
Args:
mode (str): mode e.g. 'off', 'on'
Raises:
DeviceError: if mode given is not found in the valid mode list.
"""
if mode not in list(self.supported_modes):
raise errors.DeviceError(
"Mode {} not found in supported modes {!r}".format(
mode, self.supported_modes))
def _validate_port(self, method_name, port):
"""Ensure port is a valid port number.
Args:
method_name(str): name of the method where _validate_port is being
called
port (int): device port number
Raises:
DeviceError: if input port is not valid
"""
if port is None or port <= -1 or port >= self._total_ports:
raise errors.DeviceError("Device {} {} failed. "
"Port {} is invalid.".format(
self._device_name, method_name, port))
| apache-2.0 | 7,759,900,317,060,049,000 | 30.856502 | 76 | 0.616836 | false |
blazek/lrs | lrs/lrs/error/lrserror.py | 1 | 7207 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
LrsError
A QGIS plugin
Linear reference system builder and editor
-------------------
begin : 2013-10-02
copyright : (C) 2013 by Radim Blažek
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from hashlib import md5
from ..utils import *
# Origin of geometry part used for error checksums, it allows to update errors when
# geometry is changed, but error remains.
# The identification by origin unfortunately fails if geometry part is deleted and thus
# geoPart numbers are changed. That is why there is also nGeoParts
# Class representing error in LRS
class LrsError(QObject):
# Error type enums
DUPLICATE_LINE = 1
DUPLICATE_POINT = 2
FORK = 3 # more than 2 lines connected in one node
ORPHAN = 4 # orphan point, no line with such routeId
OUTSIDE_THRESHOLD = 5 # out of the threshold from line
NOT_ENOUGH_MILESTONES = 6 # part has less than 2 milestones attached
NO_ROUTE_ID = 7 # missing route id
NO_MEASURE = 8 # missing point measure attribute value
DIRECTION_GUESS = 9 # cannot guess part direction
WRONG_MEASURE = 10 # milestones in wrong position
DUPLICATE_REFERENCING = 11 # multiple route segments measures overlap
PARALLEL = 12 # parallel line
FORK_LINE = 13 # parts connected in fork
def __init__(self, type, geo, **kwargs):
super(LrsError, self).__init__()
self.type = type
self.geo = QgsGeometry(geo) # store copy of QgsGeometry
self.message = kwargs.get('message', '')
self.routeId = kwargs.get('routeId', None)
self.measure = kwargs.get('measure', None) # may be list !
# self.lineFid = kwargs.get('lineFid', None)
# self.pointFid = kwargs.get('pointFid', None) # may be list !
# multigeometry part
# self.geoPart = kwargs.get('geoPart', None) # may be list !
self.origins = kwargs.get('origins', []) # list of LrsOrigin
# checksum cache
self.originChecksum_ = None
self.checksum_ = None
# self.fullChecksum_ = None
# initialized here to allow stranslation, how to translate static variables?
self.typeLabels = {
self.DUPLICATE_LINE: self.tr('Duplicate line'),
self.DUPLICATE_POINT: self.tr('Duplicate point'),
self.FORK: self.tr('Fork'),
self.ORPHAN: self.tr('Orphan point'),
self.OUTSIDE_THRESHOLD: self.tr('Out of threshold'),
self.NOT_ENOUGH_MILESTONES: self.tr('Not enough points'),
self.NO_ROUTE_ID: self.tr('Missing route id'),
self.NO_MEASURE: self.tr('Missing measure'),
self.DIRECTION_GUESS: self.tr('Cannot guess direction'),
self.WRONG_MEASURE: self.tr('Wrong measure'),
self.DUPLICATE_REFERENCING: self.tr('Duplicate referencing'),
self.PARALLEL: self.tr('Parallel line'),
self.FORK_LINE: self.tr('Fork line'),
}
def __str__(self):
return "error: %s %s %s %s %s" % (self.type, self.typeLabel(), self.message, self.routeId, self.measure)
def typeLabel(self):
if not self.type in self.typeLabels:
return "Unknown error"
return self.typeLabels[self.type]
# get string of simple value or list
def getValueString(self, value):
if value == None:
return ""
elif isinstance(value, list):
vals = list(value)
vals.sort()
return " ".join(map(str, vals))
else:
return str(value)
def getMeasureString(self):
return self.getValueString(self.measure)
# def getPointFidString(self):
# return self.getValueString ( self.pointFid )
# def getGeoPartString(self):
# return self.getValueString ( self.geoPart )
def getOriginChecksum(self):
if not self.originChecksum_:
checksums = []
for origin in self.origins:
checksums.append(origin.getChecksum())
checksums.sort()
m = md5()
for checksum in checksums:
m.update(checksum)
self.originChecksum_ = m.digest()
return self.originChecksum_
# base checksum, mostly using origin, maybe used to update errors,
# calculation depends on error type
def getChecksum(self):
if not self.checksum_:
m = md5(str(self.type).encode())
if self.type == self.DUPLICATE_LINE:
m.update(self.geo.asWkb())
elif self.type == self.DUPLICATE_POINT:
m.update(self.geo.asWkb())
elif self.type == self.FORK:
m.update(str(self.routeId).encode())
m.update(self.geo.asWkb())
elif self.type == self.ORPHAN:
m.update(self.getOriginChecksum())
elif self.type == self.OUTSIDE_THRESHOLD:
m.update(self.getOriginChecksum())
elif self.type == self.NOT_ENOUGH_MILESTONES:
m.update(str(self.routeId).encode())
m.update(self.getOriginChecksum())
elif self.type == self.NO_ROUTE_ID:
m.update(self.getOriginChecksum())
elif self.type == self.NO_MEASURE:
m.update(self.getOriginChecksum())
elif self.type == self.DIRECTION_GUESS:
m.update(self.getOriginChecksum())
elif self.type == self.WRONG_MEASURE:
m.update(self.getOriginChecksum())
elif self.type == self.DUPLICATE_REFERENCING:
m.update(str(self.routeId).encode())
m.update(self.geo.asWkb())
m.update(self.getMeasureString().encode())
elif self.type == self.PARALLEL:
m.update(self.getOriginChecksum())
elif self.type == self.FORK_LINE:
m.update(self.getOriginChecksum())
self.checksum_ = m.digest()
return self.checksum_
# full checksum
# def getFullChecksum(self):
# if not self.fullChecksum_:
# s = "%s-%s-%s-%s-%s" % ( self.type, self.geo.asWkb(), self.routeId, self.getMeasureString(), self.getOriginChecksum() )
# m = md5( s )
# self.fullChecksum_ = m.digest()
# return self.fullChecksum_
| gpl-2.0 | 2,827,248,011,770,686,500 | 40.653179 | 130 | 0.546767 | false |
davy39/eric | UI/FindFileDialog.py | 1 | 26494 | # -*- coding: utf-8 -*-
# Copyright (c) 2002 - 2014 Detlev Offenbach <[email protected]>
#
"""
Module implementing a dialog to search for text in files.
"""
from __future__ import unicode_literals
import os
import re
from PyQt5.QtCore import pyqtSignal, Qt, pyqtSlot
from PyQt5.QtGui import QCursor
from PyQt5.QtWidgets import QDialog, QApplication, QMenu, QDialogButtonBox, \
QTreeWidgetItem
from E5Gui.E5Application import e5App
from E5Gui import E5MessageBox, E5FileDialog
from .Ui_FindFileDialog import Ui_FindFileDialog
import Utilities
import Preferences
import UI.PixmapCache
class FindFileDialog(QDialog, Ui_FindFileDialog):
"""
Class implementing a dialog to search for text in files.
The occurrences found are displayed in a QTreeWidget showing the filename,
the linenumber and the found text. The file will be opened upon a double
click onto the respective entry of the list.
@signal sourceFile(str, int, str, int, int) emitted to open a source file
at a line
@signal designerFile(str) emitted to open a Qt-Designer file
"""
sourceFile = pyqtSignal(str, int, str, int, int)
designerFile = pyqtSignal(str)
lineRole = Qt.UserRole + 1
startRole = Qt.UserRole + 2
endRole = Qt.UserRole + 3
replaceRole = Qt.UserRole + 4
md5Role = Qt.UserRole + 5
def __init__(self, project, replaceMode=False, parent=None):
"""
Constructor
@param project reference to the project object
@param replaceMode flag indicating the replace dialog mode (boolean)
@param parent parent widget of this dialog (QWidget)
"""
super(FindFileDialog, self).__init__(parent)
self.setupUi(self)
self.setWindowFlags(Qt.WindowFlags(Qt.Window))
self.dirSelectButton.setIcon(UI.PixmapCache.getIcon("open.png"))
self.__replaceMode = replaceMode
self.stopButton = \
self.buttonBox.addButton(self.tr("Stop"),
QDialogButtonBox.ActionRole)
self.stopButton.setEnabled(False)
self.findButton = \
self.buttonBox.addButton(self.tr("Find"),
QDialogButtonBox.ActionRole)
self.findButton.setEnabled(False)
self.findButton.setDefault(True)
if self.__replaceMode:
self.replaceButton.setEnabled(False)
self.setWindowTitle(self.tr("Replace in Files"))
else:
self.replaceLabel.hide()
self.replacetextCombo.hide()
self.replaceButton.hide()
self.findProgressLabel.setMaximumWidth(550)
self.findtextCombo.setCompleter(None)
self.replacetextCombo.setCompleter(None)
self.searchHistory = Preferences.toList(
Preferences.Prefs.settings.value(
"FindFileDialog/SearchHistory"))
self.replaceHistory = Preferences.toList(
Preferences.Prefs.settings.value(
"FindFileDialog/ReplaceHistory"))
self.dirHistory = Preferences.toList(
Preferences.Prefs.settings.value(
"FindFileDialog/DirectoryHistory"))
self.findtextCombo.addItems(self.searchHistory)
self.replacetextCombo.addItems(self.replaceHistory)
self.dirCombo.addItems(self.dirHistory)
self.project = project
self.findList.headerItem().setText(self.findList.columnCount(), "")
self.findList.header().setSortIndicator(0, Qt.AscendingOrder)
self.__section0Size = self.findList.header().sectionSize(0)
self.findList.setExpandsOnDoubleClick(False)
if self.__replaceMode:
font = Preferences.getEditorOtherFonts("MonospacedFont")
self.findList.setFont(font)
# Qt Designer form files
self.filterForms = r'.*\.ui$'
self.formsExt = ['*.ui']
# Corba interface files
self.filterInterfaces = r'.*\.idl$'
self.interfacesExt = ['*.idl']
# Qt resources files
self.filterResources = r'.*\.qrc$'
self.resourcesExt = ['*.qrc']
self.__cancelSearch = False
self.__lastFileItem = None
self.__populating = False
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.__contextMenuRequested)
def __createItem(self, file, line, text, start, end, replTxt="", md5=""):
"""
Private method to create an entry in the file list.
@param file filename of file (string)
@param line line number (integer)
@param text text found (string)
@param start start position of match (integer)
@param end end position of match (integer)
@param replTxt text with replacements applied (string)
@keyparam md5 MD5 hash of the file (string)
"""
if self.__lastFileItem is None:
# It's a new file
self.__lastFileItem = QTreeWidgetItem(self.findList, [file])
self.__lastFileItem.setFirstColumnSpanned(True)
self.__lastFileItem.setExpanded(True)
if self.__replaceMode:
self.__lastFileItem.setFlags(
self.__lastFileItem.flags() |
Qt.ItemFlags(Qt.ItemIsUserCheckable | Qt.ItemIsTristate))
# Qt bug:
# item is not user checkable if setFirstColumnSpanned
# is True (< 4.5.0)
self.__lastFileItem.setData(0, self.md5Role, md5)
itm = QTreeWidgetItem(self.__lastFileItem)
itm.setTextAlignment(0, Qt.AlignRight)
itm.setData(0, Qt.DisplayRole, line)
itm.setData(1, Qt.DisplayRole, text)
itm.setData(0, self.lineRole, line)
itm.setData(0, self.startRole, start)
itm.setData(0, self.endRole, end)
itm.setData(0, self.replaceRole, replTxt)
if self.__replaceMode:
itm.setFlags(itm.flags() | Qt.ItemFlags(Qt.ItemIsUserCheckable))
itm.setCheckState(0, Qt.Checked)
self.replaceButton.setEnabled(True)
def show(self, txt=""):
"""
Public method to enable/disable the project button.
@param txt text to be shown in the searchtext combo (string)
"""
if self.project and self.project.isOpen():
self.projectButton.setEnabled(True)
else:
self.projectButton.setEnabled(False)
self.dirButton.setChecked(True)
self.findtextCombo.setEditText(txt)
self.findtextCombo.lineEdit().selectAll()
self.findtextCombo.setFocus()
if self.__replaceMode:
self.findList.clear()
self.replacetextCombo.setEditText("")
super(FindFileDialog, self).show()
def on_findtextCombo_editTextChanged(self, text):
"""
Private slot to handle the editTextChanged signal of the find
text combo.
@param text (ignored)
"""
self.__enableFindButton()
def on_replacetextCombo_editTextChanged(self, text):
"""
Private slot to handle the editTextChanged signal of the replace
text combo.
@param text (ignored)
"""
self.__enableFindButton()
def on_dirCombo_editTextChanged(self, text):
"""
Private slot to handle the textChanged signal of the directory
combo box.
@param text (ignored)
"""
self.__enableFindButton()
@pyqtSlot()
def on_projectButton_clicked(self):
"""
Private slot to handle the selection of the project radio button.
"""
self.__enableFindButton()
@pyqtSlot()
def on_dirButton_clicked(self):
"""
Private slot to handle the selection of the project radio button.
"""
self.__enableFindButton()
@pyqtSlot()
def on_filterCheckBox_clicked(self):
"""
Private slot to handle the selection of the file filter check box.
"""
self.__enableFindButton()
@pyqtSlot(str)
def on_filterEdit_textEdited(self, text):
"""
Private slot to handle the textChanged signal of the file filter edit.
@param text (ignored)
"""
self.__enableFindButton()
def __enableFindButton(self):
"""
Private slot called to enable the find button.
"""
if self.findtextCombo.currentText() == "" or \
(self.__replaceMode and
self.replacetextCombo.currentText() == "") or \
(self.dirButton.isChecked() and
(self.dirCombo.currentText() == "" or
not os.path.exists(os.path.abspath(
self.dirCombo.currentText())))) or \
(self.filterCheckBox.isChecked() and self.filterEdit.text() == ""):
self.findButton.setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Close).setDefault(True)
else:
self.findButton.setEnabled(True)
self.findButton.setDefault(True)
def on_buttonBox_clicked(self, button):
"""
Private slot called by a button of the button box clicked.
@param button button that was clicked (QAbstractButton)
"""
if button == self.findButton:
self.__doSearch()
elif button == self.stopButton:
self.__stopSearch()
def __stripEol(self, txt):
"""
Private method to strip the eol part.
@param txt line of text that should be treated (string)
@return text with eol stripped (string)
"""
return txt.replace("\r", "").replace("\n", "")
def __stopSearch(self):
"""
Private slot to handle the stop button being pressed.
"""
self.__cancelSearch = True
def __doSearch(self):
"""
Private slot to handle the find button being pressed.
"""
if self.__replaceMode and \
not e5App().getObject("ViewManager").checkAllDirty():
return
self.__cancelSearch = False
if self.filterCheckBox.isChecked():
fileFilter = self.filterEdit.text()
fileFilterList = \
["^{0}$".format(filter.replace(".", "\.").replace("*", ".*"))
for filter in fileFilter.split(";")]
filterRe = re.compile("|".join(fileFilterList))
if self.projectButton.isChecked():
if self.filterCheckBox.isChecked():
files = [self.project.getRelativePath(file)
for file in
self.__getFileList(
self.project.getProjectPath(), filterRe)]
else:
files = []
if self.sourcesCheckBox.isChecked():
files += self.project.pdata["SOURCES"]
if self.formsCheckBox.isChecked():
files += self.project.pdata["FORMS"]
if self.interfacesCheckBox.isChecked():
files += self.project.pdata["INTERFACES"]
if self.resourcesCheckBox.isChecked():
files += self.project.pdata["RESOURCES"]
elif self.dirButton.isChecked():
if not self.filterCheckBox.isChecked():
filters = []
if self.sourcesCheckBox.isChecked():
filters.extend(
["^{0}$".format(
assoc.replace(".", "\.").replace("*", ".*"))
for assoc in list(
Preferences.getEditorLexerAssocs().keys())
if assoc not in self.formsExt + self.interfacesExt])
if self.formsCheckBox.isChecked():
filters.append(self.filterForms)
if self.interfacesCheckBox.isChecked():
filters.append(self.filterInterfaces)
if self.resourcesCheckBox.isChecked():
filters.append(self.filterResources)
filterString = "|".join(filters)
filterRe = re.compile(filterString)
files = self.__getFileList(
os.path.abspath(self.dirCombo.currentText()),
filterRe)
elif self.openFilesButton.isChecked():
vm = e5App().getObject("ViewManager")
vm.checkAllDirty()
files = vm.getOpenFilenames()
self.findList.clear()
QApplication.processEvents()
QApplication.processEvents()
self.findProgress.setMaximum(len(files))
# retrieve the values
reg = self.regexpCheckBox.isChecked()
wo = self.wordCheckBox.isChecked()
cs = self.caseCheckBox.isChecked()
ct = self.findtextCombo.currentText()
if reg:
txt = ct
else:
txt = re.escape(ct)
if wo:
txt = "\\b{0}\\b".format(txt)
flags = re.UNICODE | re.LOCALE
if not cs:
flags |= re.IGNORECASE
try:
search = re.compile(txt, flags)
except re.error as why:
E5MessageBox.critical(
self,
self.tr("Invalid search expression"),
self.tr("""<p>The search expression is not valid.</p>"""
"""<p>Error: {0}</p>""").format(str(why)))
self.stopButton.setEnabled(False)
self.findButton.setEnabled(True)
self.findButton.setDefault(True)
return
# reset the findtextCombo
if ct in self.searchHistory:
self.searchHistory.remove(ct)
self.searchHistory.insert(0, ct)
self.findtextCombo.clear()
self.findtextCombo.addItems(self.searchHistory)
Preferences.Prefs.settings.setValue(
"FindFileDialog/SearchHistory",
self.searchHistory[:30])
if self.__replaceMode:
replTxt = self.replacetextCombo.currentText()
if replTxt in self.replaceHistory:
self.replaceHistory.remove(replTxt)
self.replaceHistory.insert(0, replTxt)
self.replacetextCombo.clear()
self.replacetextCombo.addItems(self.replaceHistory)
Preferences.Prefs.settings.setValue(
"FindFileDialog/ReplaceHistory",
self.replaceHistory[:30])
if self.dirButton.isChecked():
searchDir = self.dirCombo.currentText()
if searchDir in self.dirHistory:
self.dirHistory.remove(searchDir)
self.dirHistory.insert(0, searchDir)
self.dirCombo.clear()
self.dirCombo.addItems(self.dirHistory)
Preferences.Prefs.settings.setValue(
"FindFileDialog/DirectoryHistory",
self.dirHistory[:30])
# set the button states
self.stopButton.setEnabled(True)
self.stopButton.setDefault(True)
self.findButton.setEnabled(False)
# now go through all the files
self.__populating = True
self.findList.setUpdatesEnabled(False)
progress = 0
breakSearch = False
occurrences = 0
fileOccurrences = 0
for file in files:
self.__lastFileItem = None
found = False
if self.__cancelSearch or breakSearch:
break
self.findProgressLabel.setPath(file)
if self.projectButton.isChecked():
fn = os.path.join(self.project.ppath, file)
else:
fn = file
# read the file and split it into textlines
try:
text, encoding, hash = Utilities.readEncodedFileWithHash(fn)
lines = text.splitlines(True)
except (UnicodeError, IOError):
progress += 1
self.findProgress.setValue(progress)
continue
# now perform the search and display the lines found
count = 0
for line in lines:
if self.__cancelSearch:
break
count += 1
contains = search.search(line)
if contains:
occurrences += 1
found = True
start = contains.start()
end = contains.end()
if self.__replaceMode:
rline = search.sub(replTxt, line)
else:
rline = ""
line = self.__stripEol(line)
if len(line) > 1024:
line = "{0} ...".format(line[:1024])
if self.__replaceMode:
if len(rline) > 1024:
rline = "{0} ...".format(line[:1024])
line = "- {0}\n+ {1}".format(
line, self.__stripEol(rline))
self.__createItem(file, count, line, start, end,
rline, hash)
if self.feelLikeCheckBox.isChecked():
fn = os.path.join(self.project.ppath, file)
self.sourceFile.emit(fn, count, "", start, end)
QApplication.processEvents()
breakSearch = True
break
QApplication.processEvents()
if found:
fileOccurrences += 1
progress += 1
self.findProgress.setValue(progress)
if not files:
self.findProgress.setMaximum(1)
self.findProgress.setValue(1)
resultFormat = self.tr("{0} / {1}", "occurrences / files")
self.findProgressLabel.setPath(resultFormat.format(
self.tr("%n occurrence(s)", "", occurrences),
self.tr("%n file(s)", "", fileOccurrences)))
self.findList.setUpdatesEnabled(True)
self.findList.sortItems(self.findList.sortColumn(),
self.findList.header().sortIndicatorOrder())
self.findList.resizeColumnToContents(1)
if self.__replaceMode:
self.findList.header().resizeSection(0, self.__section0Size + 30)
self.findList.header().setStretchLastSection(True)
self.__populating = False
self.stopButton.setEnabled(False)
self.findButton.setEnabled(True)
self.findButton.setDefault(True)
if breakSearch:
self.close()
def on_findList_itemDoubleClicked(self, itm, column):
"""
Private slot to handle the double click on a file item.
It emits the signal
sourceFile or designerFile depending on the file extension.
@param itm the double clicked tree item (QTreeWidgetItem)
@param column column that was double clicked (integer) (ignored)
"""
if itm.parent():
file = itm.parent().text(0)
line = itm.data(0, self.lineRole)
start = itm.data(0, self.startRole)
end = itm.data(0, self.endRole)
else:
file = itm.text(0)
line = 1
start = 0
end = 0
if self.project:
fn = os.path.join(self.project.ppath, file)
else:
fn = file
if fn.endswith('.ui'):
self.designerFile.emit(fn)
else:
self.sourceFile.emit(fn, line, "", start, end)
@pyqtSlot()
def on_dirSelectButton_clicked(self):
"""
Private slot to display a directory selection dialog.
"""
directory = E5FileDialog.getExistingDirectory(
self,
self.tr("Select directory"),
self.dirCombo.currentText(),
E5FileDialog.Options(E5FileDialog.ShowDirsOnly))
if directory:
self.dirCombo.setEditText(Utilities.toNativeSeparators(directory))
def __getFileList(self, path, filterRe):
"""
Private method to get a list of files to search.
@param path the root directory to search in (string)
@param filterRe regular expression defining the filter
criteria (regexp object)
@return list of files to be processed (list of strings)
"""
path = os.path.abspath(path)
files = []
for dirname, _, names in os.walk(path):
files.extend([os.path.join(dirname, f)
for f in names
if re.match(filterRe, f)]
)
return files
def setSearchDirectory(self, searchDir):
"""
Public slot to set the name of the directory to search in.
@param searchDir name of the directory to search in (string)
"""
self.dirButton.setChecked(True)
self.dirCombo.setEditText(Utilities.toNativeSeparators(searchDir))
def setOpenFiles(self):
"""
Public slot to set the mode to search in open files.
"""
self.openFilesButton.setChecked(True)
@pyqtSlot()
def on_replaceButton_clicked(self):
"""
Private slot to perform the requested replace actions.
"""
self.findProgress.setMaximum(self.findList.topLevelItemCount())
self.findProgress.setValue(0)
progress = 0
for index in range(self.findList.topLevelItemCount()):
itm = self.findList.topLevelItem(index)
if itm.checkState(0) in [Qt.PartiallyChecked, Qt.Checked]:
file = itm.text(0)
origHash = itm.data(0, self.md5Role)
self.findProgressLabel.setPath(file)
if self.projectButton.isChecked():
fn = os.path.join(self.project.ppath, file)
else:
fn = file
# read the file and split it into textlines
try:
text, encoding, hash = \
Utilities.readEncodedFileWithHash(fn)
lines = text.splitlines(True)
except (UnicodeError, IOError) as err:
E5MessageBox.critical(
self,
self.tr("Replace in Files"),
self.tr(
"""<p>Could not read the file <b>{0}</b>."""
""" Skipping it.</p><p>Reason: {1}</p>""")
.format(fn, str(err))
)
progress += 1
self.findProgress.setValue(progress)
continue
# Check the original and the current hash. Skip the file,
# if hashes are different.
if origHash != hash:
E5MessageBox.critical(
self,
self.tr("Replace in Files"),
self.tr(
"""<p>The current and the original hash of the"""
""" file <b>{0}</b> are different. Skipping it."""
"""</p><p>Hash 1: {1}</p><p>Hash 2: {2}</p>""")
.format(fn, origHash, hash)
)
progress += 1
self.findProgress.setValue(progress)
continue
# replace the lines authorized by the user
for cindex in range(itm.childCount()):
citm = itm.child(cindex)
if citm.checkState(0) == Qt.Checked:
line = citm.data(0, self.lineRole)
rline = citm.data(0, self.replaceRole)
lines[line - 1] = rline
# write the file
txt = "".join(lines)
try:
Utilities.writeEncodedFile(fn, txt, encoding)
except (IOError, Utilities.CodingError, UnicodeError) as err:
E5MessageBox.critical(
self,
self.tr("Replace in Files"),
self.tr(
"""<p>Could not save the file <b>{0}</b>."""
""" Skipping it.</p><p>Reason: {1}</p>""")
.format(fn, str(err))
)
progress += 1
self.findProgress.setValue(progress)
self.findProgressLabel.setPath("")
self.findList.clear()
self.replaceButton.setEnabled(False)
self.findButton.setEnabled(True)
self.findButton.setDefault(True)
def __contextMenuRequested(self, pos):
"""
Private slot to handle the context menu request.
@param pos position the context menu shall be shown (QPoint)
"""
menu = QMenu(self)
menu.addAction(self.tr("Open"), self.__openFile)
menu.addAction(self.tr("Copy Path to Clipboard"),
self.__copyToClipboard)
menu.exec_(QCursor.pos())
def __openFile(self):
"""
Private slot to open the currently selected entry.
"""
itm = self.findList.selectedItems()[0]
self.on_findList_itemDoubleClicked(itm, 0)
def __copyToClipboard(self):
"""
Private method to copy the path of an entry to the clipboard.
"""
itm = self.findList.selectedItems()[0]
if itm.parent():
fn = itm.parent().text(0)
else:
fn = itm.text(0)
cb = QApplication.clipboard()
cb.setText(fn)
| gpl-3.0 | 7,800,255,193,380,840,000 | 36.106443 | 78 | 0.533593 | false |
TEAM-HRA/hra_suite | HRACore/src/hra_core/collections_utils.py | 1 | 9983 | '''
Created on 25-08-2012
@author: jurek
'''
import re
def get_other_keys(_dict, keys):
keys = [key for key in _dict if key not in keys]
if len(keys) == 1:
return keys[0]
elif len(keys) > 1:
return keys
def get_any_key(**_dict):
""" a method to get all keys as a list or
if there is only one key returns that key """
keys = [key for key in _dict]
return keys[0] if len(keys) == 1 else keys
def get_keys_for_value(_dict, _value, _regexpr=False, _one_key_only=False):
## Method returns all keys of a dictionary which have values as passed
# value or as regular expression value
# @param _dict: a dictionary
# @param _value: a value for comparison
# @param _regexpr: if is True value parameter is treated as
# a regular expression
# @param _one_key_only: if is True only one key is returned
value_re = re.compile(_value) if _regexpr else None
_keys = [key for key, value in _dict.items()
if value == _value or (value_re and value_re.search(value))]
if len(_keys) > 0:
return _keys[0] if _one_key_only else _keys
def get_for_regexpr(_iterable, _regexpr):
## Method which returns all items of iterable which correspond
# to regular expression
# @param _iterable: an iterable
# @param _regexpr: a regular expression
if _iterable and _regexpr:
compiled_re = re.compile(_regexpr) if _regexpr else None
return [value for value in _iterable if compiled_re.search(value)]
def replace_all_by_dict(_string, _dict):
## Method which replaces all occurrences of dictionary keys
# which are presented in a string in a form of '{dictionary key}'
# by corresponding dictionary values
# @param _string: a string to be replaced
# @param _dict: a dictionary of identifiers and values
if _string and _dict and len(_dict) > 0:
for key, value in _dict.items():
_string = _string.replace("{" + key + "}", value)
return _string
def create_list(element, size):
"""
create a list consists of one element and specified size
"""
return [element for _ in range(size)]
def empty_string(value):
return "" if value == None else str(value)
def any_indexes(iterable):
"""
returns all indexes for items in iterable for which items are true
"""
if iterable:
return [idx for idx, item in enumerate(iterable) if item == True]
def or_values(iterable):
if iterable:
if len(iterable) == 0:
return None
elif len(iterable) >= 1:
value = iterable[0]
for num, _iter in enumerate(iterable):
if num > 0:
value = value | _iter
return value
def all_true_values(_object, _names):
"""
check if all members of passed _object has value True
"""
for name in _names:
if hasattr(_object, name):
if getattr(_object, name, False) == False:
return False
else:
return False
return True
def nvl(*iterable):
"""
returns first not None value in collection
"""
for _iter in iterable:
if not _iter == None:
return _iter
def nvl_and_positive(*iterable):
"""
returns first not None and positive value in collection
"""
for _iter in iterable:
if not _iter == None and _iter > 0:
return _iter
def get_subdict(_dict, keys=None, not_keys=None):
"""
function which returns sub dict of _dict dictionary
with specified keys or without not_keys
"""
d = _dict
if keys:
d = dict([(key, d[key]) for key in d if key in keys])
if not_keys:
d = dict([(key, d[key]) for key in d if key not in not_keys])
return d
def get_namedtuple_fields_as_list(_named_tuple):
return list(_named_tuple._fields) if _named_tuple else None
def get_as_list(_string, separator=',', strip_characters=' '):
"""
convert a string into a list divided by a specified separator
"""
if not _string == None:
return [name.strip(strip_characters)
if not strip_characters == None else name
for name in _string.split(separator)]
def not_empty_nvl(*iterable):
"""
returns first not None and not empty value in collection
"""
for _iter in iterable:
if not _iter == None and len(str(_iter)) > 0:
return _iter
def get_as_tuple(_string, separator=',', strip_characters=' ', convert=None):
"""
convert a string divided by a specified separator into a tuple
"""
if isinstance(_string, tuple):
return _string
if _string is not None:
_list = get_as_list(_string, separator, strip_characters)
return tuple(_list if convert == None else map(convert, _list))
def commas(*iterable, **params):
"""
method used to join iterable by comma;
"""
#if iterable has only one element of string type then
#change it into iterable with element of list type
#to avoid splitting the string by a comma
if len(iterable) == 1 and isinstance(iterable[0], str):
iterable = ([iterable[0]],)
c = map(str, *iterable)
return params.get('_default', None) if len(c) == 0 else ', '.join(c)
def get_ordered_list_of_strings(ordered_identifiers, list_to_order,
order_identifier_separator=',', case_sensitive=False,
ordered_aliases_identifiers=None):
"""
functions sorts a list of string items according to sorted
strings included in ordered_identifiers parameter;
this function returns a new list object
ordered_identifiers parameter could be a string of identifiers
separated by separator or a list of identifiers
ordered_aliases_identifiers is a set of aliases identifiers
which are used in returned list instead of identifiers included
in ordered_identifiers parameter;
number of items in ordered_aliases_identifiers should be the same
as in ordered_identifiers
"""
if ordered_identifiers == None or list_to_order == None \
or len(ordered_identifiers) == 0 or len(list_to_order) == 0:
return list_to_order
list_ordered = []
if isinstance(ordered_identifiers, list):
ordered_names = ordered_identifiers
else:
ordered_names = get_as_list(
ordered_identifiers, separator=order_identifier_separator)
ordered_aliases_names = None
if not ordered_aliases_identifiers == None:
if isinstance(ordered_aliases_identifiers, list):
ordered_aliases_names = ordered_aliases_identifiers
else:
ordered_aliases_names = get_as_list(ordered_aliases_identifiers,
separator=order_identifier_separator)
for idx, ordered_name in enumerate(ordered_names):
for name in list_to_order:
if (case_sensitive is False
and name.lower() == ordered_name.lower()) \
or (case_sensitive is True and name == ordered_name):
if ordered_aliases_names == None:
list_ordered.append(name)
else:
if idx < len(ordered_aliases_names):
list_ordered.append(ordered_aliases_names[idx])
else:
list_ordered.append(name)
break
if ordered_aliases_identifiers == None:
#append to the end items not founded in ordered_identifiers
#do it only when alias are not specified
list_ordered[len(list_ordered):] = \
[name for name in list_to_order if name not in list_ordered]
if not len(list_ordered) == len(list_to_order):
raise RuntimeError("size if ordered list doesn't equal source list")
return list_ordered
def get_or_put(_dict, _key, _default):
"""
function which puts a default value for a key if value is not occurs
in dictionary
"""
if not _dict == None:
value = _dict.get(_key, None)
if value == None:
value = _default
_dict[_key] = value
return value
def pop_from_list(_list, _value):
"""
function pop an element from a list;
it doesn't throw an exception if the element doesn't exist in the list
"""
if not _list == None and _list.count(_value) > 0:
_list.pop(_list.index(_value))
def remove_suffix(_collection, _suffix):
"""
function removes suffix from all elements in collection
"""
return [name[:-len(_suffix)] for name in
[name for name in _collection if name.endswith(_suffix)]]
def get_chunks(arr, chunk_size=10):
"""
function generates chunks of arrays of chunk_size size
"""
chunks = [arr[start:start + chunk_size]
for start in range(0, len(arr), chunk_size)]
return chunks
def get_index_of_string(_string, values, _separator=None):
"""
function searches for a occurrence of parameter _string
in values and returns its index position, parameter values could
a string or a collection of strings or subcollections of strings;
if a _string value is not found -1 is returned
"""
if not _string == None:
_string = _string.lower().strip()
if hasattr(values, 'lower'):
for idx, v in enumerate(
[v.lower().strip() for v in values.rsplit(_separator)]):
if v == _string:
return idx
else:
for idx, value in enumerate(values):
if hasattr(value, 'lower') and \
value.lower().strip() == _string:
return idx
idx = get_index_of_string(_string, value,
_separator=_separator)
if idx > -1:
return idx
return -1
| lgpl-3.0 | -4,111,708,774,639,351,300 | 31.838816 | 78 | 0.601222 | false |
projectshift/shift-media | shiftmedia/storage.py | 1 | 5190 | import os
from pathlib import Path
from shiftmedia import utils, exceptions as x
from shiftmedia.paths import PathBuilder
from shiftmedia.resizer import Resizer
class Storage:
def __init__(self, backend, secret_key, local_temp):
"""
Init
:param backend:, shiftmedia.backend.Backend instance
:param secret_key: string, random salt
:param local_temp: string, path to local temp directory
"""
self.backend = backend
self.paths = PathBuilder(secret_key)
self._tmp_path = local_temp
@property
def tmp(self):
"""
Get temp path
Returns path to local temp and creates one if necessary
"""
if not os.path.exists(self._tmp_path):
os.makedirs(self._tmp_path)
return self._tmp_path
def put(self, src, delete_local=True, fix_orientation=False):
"""
Put local file to storage
Generates a uuid for the file, tells backend to accept
it by that id and removes original on success.
"""
if not os.path.exists(src):
msg = 'Unable to find local file [{}]'
raise x.LocalFileNotFound(msg.format(src))
path = Path(src)
extension = ''.join(path.suffixes)[1:]
name = path.name.replace('.' + extension, '')
extension = utils.normalize_extension(extension)
filename = name + '.' + extension
id = utils.generate_id(filename)
# fix image orientation before accepting
if fix_orientation:
Resizer.fix_orientation_and_save(src)
self.backend.put_variant(src, id, filename.lower())
if delete_local:
os.remove(src)
return id
def delete(self, id):
"""
Delete
Removes file and all its artifacts from storage by id
"""
return self.backend.delete(id)
def get_original_url(self, id):
"""
Get original URL
Combines backend base url, path to object id and original filename.
:return: string - full object url
"""
base = self.backend.get_url().rstrip('/')
parts = self.backend.id_to_path(id)
filename = parts[5]
path = '/'.join(parts)
return base + '/' + path + '/' + filename
def get_auto_crop_url(self, *args, **kwargs):
"""
Get auto crop URL
Combines backend base url, path to object id and generated filename.
:param args: positional args to be passed to filename generator
:param kwargs: keyword args to be passed to filename generator
:return: string - full object url
"""
id = kwargs['id'] if 'id' in kwargs else args[0]
base = self.backend.get_url().rstrip('/')
parts = self.backend.id_to_path(id)
path = '/'.join(parts)
filename = self.paths.get_auto_crop_filename(*args, **kwargs)
return base + '/' + path + '/' + filename
def get_manual_crop_url(self, *args, **kwargs):
"""
Get manual crop URL
Combines backend base url, path to object id and generated filename.
:param args: positional args to be passed to filename generator
:param kwargs: keyword args to be passed to filename generator
:return: string - full object url
"""
id = kwargs['id'] if 'id' in kwargs else args[0]
base = self.backend.get_url().rstrip('/')
parts = self.backend.id_to_path(id)
path = '/'.join(parts)
filename = self.paths.get_manual_crop_filename(*args, **kwargs)
return base + '/' + path + '/' + filename
def create_resize(self, url):
"""
Create resize
Accepts storage URL of a resize, parses and validates it and then
creates the resize to be put back to storage.
:param url: string - url of resize to be created
:return: string - same url on success
"""
id, filename = self.backend.parse_url(url)
params = self.paths.filename_to_resize_params(id, filename)
mode = params['resize_mode']
modes = ['auto', 'manual']
if mode not in modes:
err = 'Resize mode [' + mode + '] is not yet implemented.'
raise x.NotImplementedError(err)
local_original = self.backend.retrieve_original(id, self._tmp_path)
local_resize = os.path.join(self._tmp_path, id, params['filename'])
factor = Resizer.RESIZE_TO_FIT
if params['factor'] == 'fill':
factor = Resizer.RESIZE_TO_FILL
resize = Resizer.auto_crop(
src=local_original,
dst=local_resize,
size=params['target_size'],
mode= factor,
upscale=params['upscale'],
format=params['output_format'],
quality=params['quality']
)
try:
self.backend.put_variant(resize, id, filename, force=True)
except x.FileExists:
pass
os.remove(local_original)
os.remove(resize)
tmp_dir = os.path.join(self._tmp_path, id)
if not os.listdir(tmp_dir):
os.rmdir(tmp_dir)
return url
| mit | -1,391,660,566,361,394,700 | 33.370861 | 76 | 0.581888 | false |
anthonyng2/Machine-Learning-For-Finance | Classification Based Machine Learning for Algorithmic Trading/default_predictions/Random forest.py | 1 | 2153 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 17 19:12:52 2017
@author: Anthony
Default Predictions
"""
import numpy as np
import pandas as pd
df = pd.read_csv("dataset_2.csv")
df['default'].describe()
sum(df['default'] == 0)
sum(df['default'] == 1)
X = df.iloc[:, 1:6].values
y = df['default'].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,
random_state=0)
shuffle_index = np.random.permutation(len(X_train))
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# Machine Learning Algorithm
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=0)
clf.fit(X_train, y_train)
# Cross Validation
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_predict
cross_val_score(clf, X_train, y_train, cv=3, scoring='accuracy')
y_train_pred = cross_val_predict(clf, X_train, y_train, cv=3)
cm = confusion_matrix(y_train, y_train_pred)
print(cm)
from sklearn.metrics import precision_score, recall_score
print("precision score = {0:.4f}".format(precision_score(y_train, y_train_pred)))
print("recall score = {0:.4f}".format(recall_score(y_train, y_train_pred)))
# Predicting the Test set results
y_pred = clf.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
print("precision score = {0:.4f}".format(precision_score(y_test, y_pred)))
print("recall score = {0:.4f}".format(recall_score(y_test, y_pred)))
'''
Logistic Regression Training set:
precision score = 0.7788
recall score = 0.8351
Logistic Regression Test set:
precision score = 0.8158
recall score = 0.8378
'''
| mit | 4,266,509,932,854,244,400 | 27.094595 | 81 | 0.689271 | false |
tensorflow/probability | tensorflow_probability/python/experimental/composite_tensor.py | 1 | 12217 | # Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Use `tfp.distributions.Distribution`s as `tf.CompositeTensor`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import six
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import distributions
from tensorflow_probability.python.internal import tensor_util
from tensorflow.python.framework.composite_tensor import CompositeTensor # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.saved_model import nested_structure_coder # pylint: disable=g-direct-tensorflow-import
__all__ = ['as_composite', 'register_composite']
_registry = {} # Mapping from (python pkg, class name) -> class.
class _TFPTypeSpec(tf.TypeSpec):
"""A tf.TypeSpec for `tfp.distributions.Distribution` and related objects."""
__slots__ = ('_clsid', '_kwargs', '_param_specs')
def __init__(self, clsid, param_specs, kwargs):
self._clsid = clsid
self._kwargs = kwargs
self._param_specs = param_specs
@property
def value_type(self):
return _registry[self._clsid]
def _to_components(self, obj):
return {
k: getattr(obj, k, obj.parameters[k]) for k in sorted(self._param_specs)
}
def _from_components(self, components):
kwargs = dict(self._kwargs)
kwargs.update(components)
return self.value_type(**kwargs) # pylint: disable=not-callable
@property
def _component_specs(self):
return self._param_specs
def _serialize(self):
return 1, self._clsid, self._param_specs, self._kwargs
@classmethod
def _deserialize(cls, encoded):
version, clsid, param_specs, kwargs = encoded
if version != 1:
raise ValueError('Unexpected version')
if _find_clsid(clsid) is None:
raise ValueError(
'Unable to identify distribution type for {}. For user-defined '
'distributions (not in TFP), make sure the distribution is decorated '
'with `@tfp.experimental.register_composite` and its module is '
'imported before calling `tf.saved_model.load`.'.format(clsid))
return cls(clsid, param_specs, kwargs)
_TypeSpecCodec = nested_structure_coder._TypeSpecCodec # pylint: disable=protected-access
_TypeSpecCodec.TYPE_SPEC_CLASS_FROM_PROTO[275837168] = _TFPTypeSpec
_TypeSpecCodec.TYPE_SPEC_CLASS_TO_PROTO[_TFPTypeSpec] = 275837168
del _TypeSpecCodec
def _make_convertible(cls):
"""Makes a subclass of `cls` that also subclasses `CompositeTensor`."""
clsid = (cls.__module__, cls.__name__)
if clsid in _registry:
return _registry[clsid]
class _CompositeTensorDist(cls, CompositeTensor):
"""A per-`cls` subclass of `CompositeTensor`."""
def _parameter_control_dependencies(self, is_init):
# We are forced by the CompositeTensor contract (no graph operations in
# `_to_components`, `_from_components`) to defer the
# `_initial_parameter_control_dependencies` to point-of-use.
if is_init:
return ()
result = tuple(
super(_CompositeTensorDist,
self)._parameter_control_dependencies(is_init=True))
result += tuple(
super(_CompositeTensorDist,
self)._parameter_control_dependencies(is_init=False))
return result
@property
def _type_spec(self):
def get_default_args(fn_or_object):
fn = type(fn_or_object) if isinstance(fn_or_object,
object) else fn_or_object
return {
k: v.default
for k, v in inspect.signature(fn).parameters.items()
if v.default is not inspect.Parameter.empty
}
if six.PY3:
default_kwargs = get_default_args(self)
missing = object()
kwargs = {
k: v
for k, v in self.parameters.items()
if default_kwargs.get(k, missing) is not v
} # non-default kwargs only
else:
kwargs = dict(self.parameters)
param_specs = {}
try:
composite_tensor_params = self._composite_tensor_params # pylint: disable=protected-access
except (AttributeError, NotImplementedError):
composite_tensor_params = ()
for k in composite_tensor_params:
if k in kwargs and kwargs[k] is not None:
v = kwargs.pop(k)
def composite_helper(v):
if isinstance(v, CompositeTensor):
return v._type_spec # pylint: disable=protected-access
elif tf.is_tensor(v):
return tf.TensorSpec.from_tensor(v)
param_specs[k] = tf.nest.map_structure(composite_helper, v)
for k, v in list(kwargs.items()):
if isinstance(v, CompositeTensor):
param_specs[k] = v._type_spec # pylint: disable=protected-access
kwargs.pop(k)
elif callable(v):
raise NotImplementedError(
'Unable to make CompositeTensor including callable argument.' + k)
return _TFPTypeSpec(
clsid, param_specs=param_specs, kwargs=kwargs)
_CompositeTensorDist.__name__ = '{}CT'.format(cls.__name__)
_registry[clsid] = _CompositeTensorDist
return _CompositeTensorDist
# Lazy-cache into `_registry` so that `tf.saved_model.load` will work.
def _find_clsid(clsid):
pkg, cls = clsid
if clsid not in _registry:
if pkg.startswith('tensorflow_probability.') and '.distributions' in pkg:
dist_cls = getattr(distributions, cls)
if (inspect.isclass(dist_cls) and
issubclass(dist_cls, distributions.Distribution)):
_make_convertible(dist_cls)
return _registry[clsid] if clsid in _registry else None
def as_composite(obj):
"""Returns a `CompositeTensor` equivalent to the given object.
Note that the returned object will have any `Variable`,
`tfp.util.DeferredTensor`, or `tfp.util.TransformedVariable` references it
closes over converted to tensors at the time this function is called. The
type of the returned object will be a subclass of both `CompositeTensor` and
`type(obj)`. For this reason, one should be careful about using
`as_composite()`, especially for `tf.Module` objects.
For example, when the composite tensor is created even as part of a
`tf.Module`, it "fixes" the values of the `DeferredTensor` and `tf.Variable`
objects it uses:
```python
class M(tf.Module):
def __init__(self):
self._v = tf.Variable(1.)
self._d = tfp.distributions.Normal(
tfp.util.DeferredTensor(self._v, lambda v: v + 1), 10)
self._dct = tfp.experimental.as_composite(self._d)
@tf.function
def mean(self):
return self._dct.mean()
m = M()
m.mean()
>>> <tf.Tensor: numpy=2.0>
m._v.assign(2.) # Doesn't update the CompositeTensor distribution.
m.mean()
>>> <tf.Tensor: numpy=2.0>
```
If, however, the creation of the composite is deferred to a method
call, then the Variable and DeferredTensor will be properly captured
and respected by the Module and its `SavedModel` (if it is serialized).
```python
class M(tf.Module):
def __init__(self):
self._v = tf.Variable(1.)
self._d = tfp.distributions.Normal(
tfp.util.DeferredTensor(self._v, lambda v: v + 1), 10)
@tf.function
def d(self):
return tfp.experimental.as_composite(self._d)
m = M()
m.d().mean()
>>> <tf.Tensor: numpy=2.0>
m._v.assign(2.)
m.d().mean()
>>> <tf.Tensor: numpy=3.0>
```
Note: This method is best-effort and based on a heuristic for what the
tensor parameters are and what the non-tensor parameters are. Things might be
broken, especially for meta-distributions like `TransformedDistribution` or
`Independent`. (We try to raise NotImplementedError in such cases.) If you'd
benefit from better coverage, please file an issue on github or send an email
to `[email protected]`.
Args:
obj: A `tfp.distributions.Distribution`.
Returns:
obj: A `tfp.distributions.Distribution` that extends `CompositeTensor`.
"""
if isinstance(obj, CompositeTensor):
return obj
cls = _make_convertible(type(obj))
kwargs = dict(obj.parameters)
def mk_err_msg(suffix=''):
return ('Unable to make a CompositeTensor for "{}" of type `{}`. Email '
'`[email protected]` or file an issue on github if you '
'would benefit from this working. {}'.format(
obj, type(obj), suffix))
try:
composite_tensor_params = obj._composite_tensor_params # pylint: disable=protected-access
except (AttributeError, NotImplementedError):
composite_tensor_params = ()
for k in composite_tensor_params:
# Use dtype inference from ctor.
if k in kwargs and kwargs[k] is not None:
v = getattr(obj, k, kwargs[k])
try:
kwargs[k] = tf.convert_to_tensor(v, name=k)
except (ValueError, TypeError) as e:
kwargs[k] = v
for k, v in kwargs.items():
def composite_helper(v):
# If we have a parameters attribute, then we may be able to convert to
# a composite tensor by guessing which of the parameters are tensors. In
# essence, we duck-type based on this attribute.
if hasattr(v, 'parameters'):
return as_composite(v)
return v
kwargs[k] = tf.nest.map_structure(composite_helper, v)
# Unfortunately, tensor_util.is_ref(v) returns true for a
# tf.linalg.LinearOperator even though that is not ideal behavior.
if tensor_util.is_ref(v) and not isinstance(v, tf.linalg.LinearOperator):
try:
kwargs[k] = tf.convert_to_tensor(v, name=k)
except TypeError as e:
raise NotImplementedError(
mk_err_msg('(Unable to convert dependent entry \'{}\' of object '
'\'{}\': {})'.format(k, obj, str(e))))
result = cls(**kwargs)
struct_coder = nested_structure_coder.StructureCoder()
try:
struct_coder.encode_structure(result._type_spec) # pylint: disable=protected-access
except nested_structure_coder.NotEncodableError as e:
raise NotImplementedError(
mk_err_msg('(Unable to serialize: {})'.format(str(e))))
return result
def register_composite(cls):
"""A decorator that registers a TFP object as composite-friendly.
This registration is not required to call `as_composite` on instances
of a given distribution (or bijector or other TFP object), but it *is*
required if a `SavedModel` with functions accepting or returning composite
wrappers of this object will be loaded in python (without having called
`as_composite` already).
Example:
```python
class MyDistribution(tfp.distributions.Distribution):
...
# This will fail to load.
model = tf.saved_model.load(
'/path/to/sm_with_funcs_returning_composite_tensor_MyDistribution')
```
Instead:
```python
@tfp.experimental.register_composite
class MyDistribution(tfp.distributions.Distribution):
...
# This will load.
model = tf.saved_model.load(
'/path/to/sm_with_funcs_returning_composite_tensor_MyDistribution')
```
Args:
cls: A subclass of `Distribution`.
Returns:
The input, with the side-effect of registering it as a composite-friendly
distribution.
Raises:
TypeError: If `cls` does not have _composite_tensor_params, or if
registration fails (`cls` is not convertible).
NotImplementedError: If registration fails (`cls` is not convertible).
"""
if not hasattr(cls, '_composite_tensor_params'):
raise TypeError('Expected cls to have property "_composite_tensor_params".')
_make_convertible(cls)
return cls
| apache-2.0 | -5,423,151,794,369,109,000 | 34.826979 | 118 | 0.667185 | false |
mementum/metaparams | tests/test_metaparams.py | 1 | 5583 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-18 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import OrderedDict
import sys
from metaparams import ParamsBase, metaparams, MetaParams
py36 = sys.version_info[0:2] >= (3, 6)
def test_run(main=False):
# Testing standard behaviour
class A(ParamsBase):
params = dict(
p1=True,
p2=dict(value=99, doc='With docstring'),
)
a = A()
check_p1 = a.params.p1
check_p1_default = a.params.p1 == a.params._defvalue('p1')
check_p1_doc = a.params._doc('p1') == ''
check_p2 = a.params.p2 == 99
check_p2_default = a.params.p2 == a.params._defvalue('p2')
check_p2_doc = a.params._doc('p2') == 'With docstring'
assert check_p1
assert check_p1_default
assert check_p1_doc
assert check_p2
assert check_p2_default
assert check_p2_doc
# Testing keyword arguments
if py36:
class B(metaclass=MetaParams, _pname='xx', _pshort=True):
xx = dict(
# a=True,
p1=True,
p2=dict(value=99, doc='With docstring'),
)
def __init__(self, a1=None):
pass
else:
@metaparams(_pname='xx', _pshort=True)
class B:
xx = dict(
# a=True,
p1=True,
p2=dict(value=99, doc='With docstring'),
)
def __init__(self, a1=None):
pass
b = B(p2=33)
check_p1 = b.xx.p1
check_p1_default = b.xx.p1 == b.xx._defvalue('p1')
check_p1_doc = b.xx._doc('p1') == ''
check_p2 = b.xx.p2 == 33
check_p2_default = not b.xx.p2 == b.xx._defvalue('p2')
check_p2_doc = b.xx._doc('p2') == 'With docstring'
assert check_p1
assert check_p1_default
assert check_p1_doc
assert check_p2
assert check_p2_default
assert check_p2_doc
# Testing inheritance
class C(B):
xx = dict(
p1=False,
p3=dict(value=None, doc='None here'),
p4=dict(required=True, type=int),
p5=dict(value='a', transform=lambda x: x.upper()),
)
# Testing inheritance
try:
c = C()
except ValueError:
pass
except:
raise
try:
c = C(p4=25.0)
except TypeError:
pass
except:
raise
c = C(p4=25, p5='c')
check_p1 = not c.xx.p1 # changed to False
check_p1_default = c.xx.p1 == c.xx._defvalue('p1')
check_p1_doc = c.xx._doc('p1') == ''
check_p2 = c.xx.p2 == 99
check_p2_default = c.xx.p2 == c.xx._defvalue('p2')
check_p2_doc = c.xx._doc('p2') == 'With docstring'
check_p3 = c.xx.p3 is None
check_p3_default = c.xx.p3 == c.xx._defvalue('p3')
check_p3_doc = c.xx._doc('p3') == 'None here'
check_p4_value = c.xx.p4 == 25
check_p5_value = c.xx.p5 == 'C'
check_defkwargs = C.xx._defkwargs() == OrderedDict(
[('p1', False), ('p2', 99), ('p3', None), ('p4', None), ('p5', 'a')]
)
check_kwargs = c.xx._kwargs() == {
'p1': False, 'p2': 99, 'p3': None, 'p4': 25, 'p5': 'C'
}
check_p4_required = C.xx._isrequired('p4')
check_p5_notrequired = not C.xx._isrequired('p5')
# Need to sort because dict order is not guaranteed in Python < 3.7
# (guaranteed as implementation detail in CPython 3.6)
check_items = sorted(list(c.xx._items())) == [
('p1', False), ('p2', 99), ('p3', None), ('p4', 25), ('p5', 'C')
]
c.xx._reset()
check_reset = c.xx._kwargs() == C.xx._defkwargs()
check_reset_2 = dict(c.xx._items()) == C.xx._defkwargs()
check_reset_3 = list(c.xx._keys()) == list(C.xx._defkeys())
check_reset_4 = list(c.xx._values()) == list(C.xx._defvalues())
assert check_p1
assert check_p1_default
assert check_p1_doc
assert check_p2
assert check_p2_default
assert check_p2_doc
assert check_p3
assert check_p3_default
assert check_p3_doc
assert check_p4_value
assert check_p5_value
assert check_defkwargs
assert check_kwargs
assert check_p4_required
assert check_p5_notrequired
assert check_items
assert check_reset
assert check_reset_2
assert check_reset_3
assert check_reset_4
# Testing keyword arguments
if py36:
class D(ParamsBase, _pshort=False, _pinst=True):
params = dict(
p1=True,
)
else:
@metaparams(_pinst=True)
class D:
params = dict(
p1=True,
)
d = D()
assert(d.params.p1)
if __name__ == '__main__':
test_run(main=True)
| gpl-3.0 | 200,310,370,738,529,700 | 26.638614 | 79 | 0.551496 | false |
jtraver/dev | python3/subprocess/write2.py | 1 | 1078 | #!/usr/bin/env python3
#!/usr/bin/python
import subprocess
import os
import shutil
import pty
master, slave = pty.openpty()
args = ('stdin1.py')
# popen = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd='.')
# http://stackoverflow.com/questions/5411780/python-run-a-daemon-sub-process-read-stdout/5413588#5413588
# not working
popen = subprocess.Popen(args, shell=True, stdin=subprocess.PIPE, stdout=slave, stderr=slave, close_fds=True, cwd='.')
stdout = os.fdopen(master)
# set the O_NONBLOCK flag of p.stdout file descriptor:
# flags = fcntl(popen1.stdout, F_GETFL) # get current popen1.stdout flags
# fcntl(popen1.stdout, F_SETFL, flags | O_NONBLOCK)
popen.stdin.write("this is line 0\n")
# line = popen.stdout.readline()
# line = stdout.readline()
# print "line = %s" % line
out, err = popen.communicate(input = "this is line 1\nthis is line 2\n\d")
if err != None:
print('errput = %s' % str(err))
print("output = %s" % str(out))
out2 = stdout.read()
print("output = %s" % str(out))
subprocess.call(['echo', ''])
| mit | -4,241,233,845,259,009,000 | 31.666667 | 118 | 0.705009 | false |
david672orford/pykarta | pykarta/geocoder/google.py | 1 | 5787 | # pykarta/geocoder/google.py
# Copyright 2013--2018, Trinity College Computing Center
# Last modified: 15 May 2018
from __future__ import print_function
import string
import xml.etree.cElementTree as ET
from .geocoder_base import GeocoderBase, GeocoderResult, GeocoderError
# See http://code.google.com/apis/maps/documentation/geocoding/index.html
class GeocoderGoogle(GeocoderBase):
url_server = "maps.google.com"
url_path = "/maps/api/geocode/xml"
delay = 0.2 # no more than 5 requests per second
# Send the query to the Google geocoder while taking care of
# pacing the request rate.
def query_google(self, query_hash):
# HTTP query with retry
retry_count = 0
while True:
#resp_text = self.get(self.url_path, query=query_hash)
resp_text = self.get_blocking(self.url_path, query=query_hash)
#print(resp_text)
try:
tree = ET.XML(resp_text)
except:
raise GeocoderError("Unparsable response")
status = tree.find("status").text
if status == "OVER_QUERY_LIMIT": # too fast
print(" error_message: %s" % tree.find("error_message").text)
retry_count += 1
self.delay += (retry_count * 0.2) # slow down
if self.delay > 5.0:
raise GeocoderError("Google's daily query limit exceeded.")
elif status == "ZERO_RESULTS":
return None
elif status != 'OK':
raise GeocoderError("Status code %s" % status)
else: # result received
return tree
xlate = {
'street_number': 'house_number',
'route': 'street',
'administrative_area_level_1': 'state',
'administrative_area_level_2': 'county',
}
location_types = {
"ROOFTOP": "LOT",
"RANGE_INTERPOLATED": "INTERPOLATED",
}
#-------------------------------------------------------------------
# Given a street address, try to find the latitude and longitude.
#-------------------------------------------------------------------
def FindAddr(self, address, countrycode=None):
result = GeocoderResult(address, "Google")
# See: https://developers.google.com/maps/documentation/geocoding/
query_hash = {
'sensor': 'false',
'address': (u"%s %s, %s, %s" \
% (address[self.f_house_number], address[self.f_street],
address[self.f_city], address[self.f_state])).encode("utf-8"),
}
components = []
if countrycode != None:
components.append("country:%s" % countrycode)
if address[self.f_postal_code] != "":
components.append("postal_code:%s" % address[self.f_postal_code])
if len(components) > 0:
query_hash['components'] = "|".join(components)
tree = self.query_google(query_hash)
if tree is not None:
for item in tree.findall("result"):
self.debug(" Candidate:")
# Suck the address components into a hash and a list.
found_addr_dict = {}
found_addr_list = []
for component in item.findall("address_component"):
comp_type = component.find("type")
if comp_type is None: # ZIP+4?
self.debug(" ZIP+4?")
continue
comp_type = comp_type.text
comp_type = self.xlate.get(comp_type, comp_type)
comp_name = component.find("short_name" if comp_type == "state" else "long_name").text
self.debug(" %s: %s" % (comp_type, comp_name))
found_addr_dict[comp_type] = comp_name
found_addr_list.append([comp_type, comp_name])
location_type = item.find("geometry/location_type").text
self.debug(" location_type: %s" % location_type)
if not self.result_truly_matches(address, found_addr_list):
self.debug(" Partial match.")
result.add_alternative_address(found_addr_dict)
continue # try next item
if not location_type in self.location_types:
self.debug(" Coordinate precision too low.")
continue # try next item
# The answer has run the gauntlet! Use it.
result.postal_code = found_addr_dict['postal_code']
result.coordinates = (
float(item.find("geometry/location/lat").text),
float(item.find("geometry/location/lng").text),
)
result.precision = self.location_types[location_type]
break
if result.coordinates is None:
self.debug(" No (acceptable) match.")
return result
#-------------------------------------------------------------------
# Given a city and state name, try to find information about it.
#-------------------------------------------------------------------
def FindCity(self, city, state, countrycode=None):
query_hash = {
'sensor': 'false',
'address': "%s, %s" % (city, state)
}
components = []
if countrycode is not None:
components.append("country:%s" % countrycode)
if len(components) > 0:
query_hash['components'] = "|".join(components)
tree = self.query_google(query_hash)
if tree is None:
self.debug(" No match")
return None
county = None
for result in tree.findall("result"):
self.debug(" Candidate:")
# Reject partial matches
partial = result.find("partial_match")
if partial is not None and partial.text == "true":
self.debug(" Partial match")
continue
for component in result.findall("address_component"):
comp_type = component.find("type")
if comp_type is not None: # why None? We don't know.
comp_type = comp_type.text
comp_name = component.find("long_name").text
self.debug(" %s: %s" % (comp_type, comp_name))
if comp_type == "administrative_area_level_2": # county
county = comp_name
if county.split(" ")[-1] != "County":
county = "%s County" % county
return {
'lat': float(result.find("geometry/location/lat").text),
'lon': float(result.find("geometry/location/lng").text),
'county': county
}
return None
if __name__ == "__main__":
gc = GeocoderGoogle()
gc.debug_enabled = True
print(gc.FindAddr(["300","Summit Street","","Hartford","CT","06106"]))
| gpl-2.0 | 4,632,901,562,203,207,000 | 31.694915 | 91 | 0.622602 | false |
akshaykurmi/reinforcement-learning | atari_breakout/per.py | 1 | 4539 | import os
import pickle
import numpy as np
from tqdm import tqdm
class SumTree:
def __init__(self, capacity):
self.capacity = capacity
self.tree = np.zeros(2 * capacity - 1, dtype=np.float32)
self.data = np.empty(capacity, dtype=object)
self.head = 0
@property
def total_priority(self):
return self.tree[0]
@property
def max_priority(self):
return np.max(self.tree[-self.capacity:])
@property
def min_priority(self):
return np.min(self.tree[-self.capacity:])
def _tree_to_data_index(self, i):
return i - self.capacity + 1
def _data_to_tree_index(self, i):
return i + self.capacity - 1
def add(self, priority, data):
tree_index = self._data_to_tree_index(self.head)
self.update_priority(tree_index, priority)
self.data[self.head] = data
self.head += 1
if self.head >= self.capacity:
self.head = 0
def update_priority(self, tree_index, priority):
delta = priority - self.tree[tree_index]
self.tree[tree_index] = priority
while tree_index != 0:
tree_index = (tree_index - 1) // 2
self.tree[tree_index] += delta
def get_leaf(self, value):
parent = 0
while True:
left = 2 * parent + 1
right = left + 1
if left >= len(self.tree):
leaf = parent
break
else:
if value <= self.tree[left]:
parent = left
else:
value -= self.tree[left]
parent = right
data_index = self._tree_to_data_index(leaf)
return leaf, self.tree[leaf], self.data[data_index]
class PrioritizedExperienceReplay:
def __init__(self, capacity, initial_size, epsilon, alpha, beta, beta_annealing_rate, max_td_error, ckpt_dir):
self.tree = SumTree(capacity)
self.capacity = capacity
self.epsilon = epsilon
self.initial_size = initial_size
self.alpha = alpha
self.beta = beta
self.beta_annealing_rate = beta_annealing_rate
self.max_td_error = max_td_error
self.ckpt_dir = ckpt_dir
def add(self, transition):
max_priority = self.tree.max_priority
if max_priority == 0:
max_priority = self.max_td_error
self.tree.add(max_priority, transition)
def sample(self, batch_size):
self.beta = np.min([1., self.beta + self.beta_annealing_rate])
priority_segment = self.tree.total_priority / batch_size
min_probability = self.tree.min_priority / self.tree.total_priority
max_weight = (min_probability * batch_size) ** (-self.beta)
samples, sample_indices, importance_sampling_weights = [], [], []
for i in range(batch_size):
value = np.random.uniform(priority_segment * i, priority_segment * (i + 1))
index, priority, transition = self.tree.get_leaf(value)
sample_probability = priority / self.tree.total_priority
importance_sampling_weights.append(((batch_size * sample_probability) ** -self.beta) / max_weight)
sample_indices.append(index)
samples.append(transition)
return sample_indices, samples, importance_sampling_weights
def update_priorities(self, tree_indices, td_errors):
td_errors += self.epsilon
clipped_errors = np.minimum(td_errors, self.max_td_error)
priorities = clipped_errors ** self.alpha
for tree_index, priority in zip(tree_indices, priorities):
self.tree.update_priority(tree_index, priority)
def load_or_instantiate(self, env):
if os.path.exists(os.path.join(self.ckpt_dir, "memory.pkl")):
self.load()
return
state = env.reset()
for _ in tqdm(range(self.initial_size), desc="Initializing replay memory", unit="transition"):
action = env.action_space.sample()
next_state, reward, done, info = env.step(action)
transition = (state, action, reward, next_state, done)
self.add(transition)
state = next_state
if done:
state = env.reset()
def load(self):
with open(os.path.join(self.ckpt_dir, "memory.pkl"), "rb") as f:
self.tree = pickle.load(f)
def save(self):
with open(os.path.join(self.ckpt_dir, "memory.pkl"), "wb") as f:
pickle.dump(self.tree, f)
| mit | -778,120,707,693,490,200 | 35.02381 | 114 | 0.584931 | false |
team-vigir/flexbe_behavior_engine | flexbe_onboard/src/flexbe_onboard/flexbe_onboard.py | 1 | 15565 | #!/usr/bin/env python
import rospy
import os
import sys
import inspect
import tempfile
import threading
import time
import zlib
import contextlib
from ast import literal_eval as cast
from flexbe_core import Logger, BehaviorLibrary
from flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached
from flexbe_msgs.msg import BehaviorSelection, BEStatus, CommandFeedback
from std_msgs.msg import Empty
class FlexbeOnboard(object):
"""
Controls the execution of robot behaviors.
"""
def __init__(self):
self.be = None
Logger.initialize()
self._tracked_imports = list()
# prepare temp folder
self._tmp_folder = tempfile.mkdtemp()
sys.path.append(self._tmp_folder)
rospy.on_shutdown(self._cleanup_tempdir)
# prepare manifest folder access
self._behavior_lib = BehaviorLibrary()
# prepare communication
self.status_topic = 'flexbe/status'
self.feedback_topic = 'flexbe/command_feedback'
self._pub = ProxyPublisher({
self.feedback_topic: CommandFeedback,
'flexbe/heartbeat': Empty
})
self._pub.createPublisher(self.status_topic, BEStatus, _latch=True)
self._execute_heartbeat()
# listen for new behavior to start
self._enable_clear_imports = rospy.get_param('~enable_clear_imports', False)
self._running = False
self._run_lock = threading.Lock()
self._switching = False
self._switch_lock = threading.Lock()
self._sub = ProxySubscriberCached()
self._sub.subscribe('flexbe/start_behavior', BehaviorSelection, self._behavior_callback)
rospy.sleep(0.5) # wait for publishers etc to really be set up
self._pub.publish(self.status_topic, BEStatus(code=BEStatus.READY))
rospy.loginfo('\033[92m--- Behavior Engine ready! ---\033[0m')
def _behavior_callback(self, msg):
thread = threading.Thread(target=self._behavior_execution, args=[msg])
thread.daemon = True
thread.start()
# =================== #
# Main execution loop #
# ------------------- #
def _behavior_execution(self, msg):
# sending a behavior while one is already running is considered as switching
if self._running:
Logger.loginfo('--> Initiating behavior switch...')
self._pub.publish(self.feedback_topic, CommandFeedback(command="switch", args=['received']))
else:
Logger.loginfo('--> Starting new behavior...')
# construct the behavior that should be executed
be = self._prepare_behavior(msg)
if be is None:
Logger.logerr('Dropped behavior start request because preparation failed.')
if self._running:
self._pub.publish(self.feedback_topic, CommandFeedback(command="switch", args=['failed']))
else:
rospy.loginfo('\033[92m--- Behavior Engine ready! ---\033[0m')
return
# perform the behavior switch if required
with self._switch_lock:
self._switching = True
if self._running:
self._pub.publish(self.feedback_topic, CommandFeedback(command="switch", args=['start']))
# ensure that switching is possible
if not self._is_switchable(be):
Logger.logerr('Dropped behavior start request because switching is not possible.')
self._pub.publish(self.feedback_topic, CommandFeedback(command="switch", args=['not_switchable']))
return
# wait if running behavior is currently starting or stopping
rate = rospy.Rate(100)
while not rospy.is_shutdown():
active_state = self.be.get_current_state()
if active_state is not None or not self._running:
break
rate.sleep()
# extract the active state if any
if active_state is not None:
rospy.loginfo("Current state %s is kept active.", active_state.name)
try:
be.prepare_for_switch(active_state)
self._pub.publish(self.feedback_topic, CommandFeedback(command="switch", args=['prepared']))
except Exception as e:
Logger.logerr('Failed to prepare behavior switch:\n%s' % str(e))
self._pub.publish(self.feedback_topic, CommandFeedback(command="switch", args=['failed']))
return
# stop the rest
rospy.loginfo('Preempting current behavior version...')
self.be.preempt()
# execute the behavior
with self._run_lock:
self._switching = False
self.be = be
self._running = True
result = None
try:
rospy.loginfo('Behavior ready, execution starts now.')
rospy.loginfo('[%s : %s]', be.name, msg.behavior_checksum)
self.be.confirm()
args = [self.be.requested_state_path] if self.be.requested_state_path is not None else []
self._pub.publish(self.status_topic,
BEStatus(behavior_id=self.be.id, code=BEStatus.STARTED, args=args))
result = self.be.execute()
if self._switching:
self._pub.publish(self.status_topic,
BEStatus(behavior_id=self.be.id, code=BEStatus.SWITCHING))
else:
self._pub.publish(self.status_topic,
BEStatus(behavior_id=self.be.id, code=BEStatus.FINISHED, args=[str(result)]))
except Exception as e:
self._pub.publish(self.status_topic, BEStatus(behavior_id=msg.behavior_checksum, code=BEStatus.FAILED))
Logger.logerr('Behavior execution failed!\n%s' % str(e))
import traceback
Logger.loginfo(traceback.format_exc())
result = result or "exception" # only set result if not executed
# done, remove left-overs like the temporary behavior file
try:
# do not clear imports for now, not working correctly (e.g., flexbe/flexbe_app#66)
# only if specifically enabled
if not self._switching and self._enable_clear_imports:
self._clear_imports()
self._cleanup_behavior(msg.behavior_checksum)
except Exception as e:
rospy.logerr('Failed to clean up behavior:\n%s' % str(e))
if not self._switching:
Logger.loginfo('Behavior execution finished with result %s.', str(result))
rospy.loginfo('\033[92m--- Behavior Engine ready! ---\033[0m')
self._running = False
self.be = None
# ==================================== #
# Preparation of new behavior requests #
# ------------------------------------ #
def _prepare_behavior(self, msg):
# get sourcecode from ros package
try:
behavior = self._behavior_lib.get_behavior(msg.behavior_id)
if behavior is None:
raise ValueError(msg.behavior_id)
be_filepath = self._behavior_lib.get_sourcecode_filepath(msg.behavior_id, add_tmp=True)
if os.path.isfile(be_filepath):
be_file = open(be_filepath, "r")
rospy.logwarn("Found a tmp version of the referred behavior! Assuming local test run.")
else:
be_filepath = self._behavior_lib.get_sourcecode_filepath(msg.behavior_id)
be_file = open(be_filepath, "r")
try:
be_content = be_file.read()
finally:
be_file.close()
except Exception as e:
Logger.logerr('Failed to retrieve behavior from library:\n%s' % str(e))
self._pub.publish(self.status_topic, BEStatus(behavior_id=msg.behavior_checksum, code=BEStatus.ERROR))
return
# apply modifications if any
try:
file_content = ""
last_index = 0
for mod in msg.modifications:
file_content += be_content[last_index:mod.index_begin] + mod.new_content
last_index = mod.index_end
file_content += be_content[last_index:]
if zlib.adler32(file_content.encode()) & 0x7fffffff != msg.behavior_checksum:
mismatch_msg = ("Checksum mismatch of behavior versions! \n"
"Attempted to load behavior: %s\n"
"Make sure that all computers are on the same version a.\n"
"Also try: rosrun flexbe_widget clear_cache" % str(be_filepath))
raise Exception(mismatch_msg)
else:
rospy.loginfo("Successfully applied %d modifications." % len(msg.modifications))
except Exception as e:
Logger.logerr('Failed to apply behavior modifications:\n%s' % str(e))
self._pub.publish(self.status_topic, BEStatus(behavior_id=msg.behavior_checksum, code=BEStatus.ERROR))
return
# create temp file for behavior class
try:
file_path = os.path.join(self._tmp_folder, 'tmp_%d.py' % msg.behavior_checksum)
with open(file_path, "w") as sc_file:
sc_file.write(file_content)
except Exception as e:
Logger.logerr('Failed to create temporary file for behavior class:\n%s' % str(e))
self._pub.publish(self.status_topic, BEStatus(behavior_id=msg.behavior_checksum, code=BEStatus.ERROR))
return
# import temp class file and initialize behavior
try:
with self._track_imports():
package = __import__("tmp_%d" % msg.behavior_checksum, fromlist=["tmp_%d" % msg.behavior_checksum])
clsmembers = inspect.getmembers(package, lambda member: (inspect.isclass(member) and
member.__module__ == package.__name__))
beclass = clsmembers[0][1]
be = beclass()
rospy.loginfo('Behavior ' + be.name + ' created.')
except Exception as e:
Logger.logerr('Exception caught in behavior definition:\n%s\n'
'See onboard terminal for more information.' % str(e))
import traceback
traceback.print_exc()
self._pub.publish(self.status_topic, BEStatus(behavior_id=msg.behavior_checksum, code=BEStatus.ERROR))
if self._enable_clear_imports:
self._clear_imports()
return
# initialize behavior parameters
if len(msg.arg_keys) > 0:
rospy.loginfo('The following parameters will be used:')
try:
for i in range(len(msg.arg_keys)):
# action call has empty string as default, not a valid param key
if msg.arg_keys[i] == '':
continue
found = be.set_parameter(msg.arg_keys[i], msg.arg_values[i])
if found:
name_split = msg.arg_keys[i].rsplit('/', 1)
behavior = name_split[0] if len(name_split) == 2 else ''
key = name_split[-1]
suffix = ' (' + behavior + ')' if behavior != '' else ''
rospy.loginfo(key + ' = ' + msg.arg_values[i] + suffix)
else:
rospy.logwarn('Parameter ' + msg.arg_keys[i] + ' (set to ' + msg.arg_values[i] + ') not defined')
except Exception as e:
Logger.logerr('Failed to initialize parameters:\n%s' % str(e))
self._pub.publish(self.status_topic, BEStatus(behavior_id=msg.behavior_checksum, code=BEStatus.ERROR))
return
# build state machine
try:
be.set_up(id=msg.behavior_checksum, autonomy_level=msg.autonomy_level, debug=False)
be.prepare_for_execution(self._convert_input_data(msg.input_keys, msg.input_values))
rospy.loginfo('State machine built.')
except Exception as e:
Logger.logerr('Behavior construction failed!\n%s\n'
'See onboard terminal for more information.' % str(e))
import traceback
traceback.print_exc()
self._pub.publish(self.status_topic, BEStatus(behavior_id=msg.behavior_checksum, code=BEStatus.ERROR))
if self._enable_clear_imports:
self._clear_imports()
return
return be
# ================ #
# Helper functions #
# ---------------- #
def _is_switchable(self, be):
if self.be.name != be.name:
Logger.logerr('Unable to switch behavior, names do not match:\ncurrent: %s <--> new: %s' %
(self.be.name, be.name))
return False
# locked inside
# locked state exists in new behavior
# ok, can switch
return True
def _cleanup_behavior(self, behavior_checksum):
file_path = os.path.join(self._tmp_folder, 'tmp_%d.pyc' % behavior_checksum)
try:
os.remove(file_path)
except OSError:
pass
try:
os.remove(file_path + 'c')
except OSError:
pass
def _clear_imports(self):
for module in self._tracked_imports:
if module in sys.modules:
del sys.modules[module]
self._tracked_imports = list()
def _cleanup_tempdir(self):
try:
os.remove(self._tmp_folder)
except OSError:
pass
def _convert_input_data(self, keys, values):
result = dict()
for k, v in zip(keys, values):
# action call has empty string as default, not a valid input key
if k == '':
continue
try:
result[k] = self._convert_dict(cast(v))
except ValueError:
# unquoted strings will raise a ValueError, so leave it as string in this case
result[k] = str(v)
except SyntaxError as se:
Logger.loginfo('Unable to parse input value for key "%s", assuming string:\n%s\n%s' %
(k, str(v), str(se)))
result[k] = str(v)
return result
def _execute_heartbeat(self):
thread = threading.Thread(target=self._heartbeat_worker)
thread.daemon = True
thread.start()
def _heartbeat_worker(self):
while True:
self._pub.publish('flexbe/heartbeat', Empty())
time.sleep(1)
def _convert_dict(self, o):
if isinstance(o, list):
return [self._convert_dict(e) for e in o]
elif isinstance(o, dict):
return self._attr_dict((k, self._convert_dict(v)) for k, v in list(o.items()))
else:
return o
class _attr_dict(dict):
__getattr__ = dict.__getitem__
@contextlib.contextmanager
def _track_imports(self):
previous_modules = set(sys.modules.keys())
try:
yield
finally:
self._tracked_imports.extend(set(sys.modules.keys()) - previous_modules)
| bsd-3-clause | 7,022,887,655,904,633,000 | 42.477654 | 119 | 0.555284 | false |
guokr/asynx | asynx/tests/test_tqclient.py | 1 | 2273 | # -*- coding: utf-8 -*-
from unittest import TestCase
from datetime import datetime, timedelta
from pytz import utc
from asynx import TaskQueueClient
class TQClientTestCase(TestCase):
def test_add_task(self):
tqc = TaskQueueClient('http://localhost:17969', 'test')
# a simple task
task = tqc.add_task(url='http://httpbin.org/get')
self.assertEqual(task['status'], 'new')
self.assertEqual(task['eta'], None)
self.assertEqual(task['request']['url'], 'http://httpbin.org/get')
self.assertEqual(task['countdown'], None)
# a delayed POST task
task = tqc.add_task(url='http://httpbin.org/post',
method='POST',
countdown=200)
self.assertEqual(task['status'], 'delayed')
self.assertTrue(195 < task['countdown'] <= 200)
utcnow = utc.localize(datetime.utcnow())
delta = timedelta(seconds=205)
self.assertTrue(utcnow < task['eta'] < utcnow + delta)
def test_scheduled_task(self):
tqc = TaskQueueClient('http://localhost:17969', 'test')
kw = {'url': 'http://httpbin.org/get',
'schedule': '*/10 * * * *'}
self.assertRaises(tqc.ResponseError, tqc.add_task, **kw)
kw['cname'] = 'test the crontab'
task = tqc.add_task(**kw)
self.assertEqual(task['schedule'], '*/10 * * * *')
tqc.delete_task(task['id'])
def test_list_tasks(self):
tqc = TaskQueueClient('http://localhost:17969', 'test')
for i in range(10):
tqc.add_task(url='http://httpbin.org/get')
result = tqc.list_tasks()
self.assertTrue(len(result['items']) > 10)
self.assertTrue(result['total'] > 10)
def test_get_task(self):
tqc = TaskQueueClient('http://localhost:17969', 'test')
task = tqc.add_task(url='http://httpbin.org/get')
task_get = tqc.get_task(task['id'])
self.assertEqual(task, task_get)
def test_delete_task(self):
tqc = TaskQueueClient('http://localhost:17969', 'test')
task = tqc.add_task(url='http://httpbin.org/get')
self.assertEqual(tqc.delete_task(task['id']), None)
self.assertRaises(tqc.ResponseError, tqc.get_task, task['id'])
| mit | -8,823,692,504,600,444,000 | 37.525424 | 74 | 0.588649 | false |
MikeDacre/LDDirection | LD_Direction/_run.py | 1 | 3957 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple functions used by the rest of the module.
Functions
---------
output_json
Return a string rendition of a dictionary
run
Run a system command and return stdout, stderr, exit_code
run_cmnd
Run a system command with run() and return result split by newline
open_zipped
Open a regular, gzipped, bz2zipped, or open filehandle agnostically
get_length
Convert kb and mb lengths into integers
chrom_sort_key
For use with sorted(), allows correct ordering of chromosomes.
"""
import re as _re
import sys as _sys
import bz2 as _bz2
import gzip as _gzip
import subprocess as _sub
import json
__all__ = ["output_json", "run", "run_cmnd", "open_zipped", "get_length",
"chrom_sort_key"]
def output_json(output):
"""Return JSON formated string."""
return json.dumps(output, sort_keys=True, indent=2)
def run(command, raise_on_error=False):
"""Run a command with subprocess the way it should be.
Parameters
----------
command : str
A command to execute, piping is fine.
raise_on_error : bool
Raise a subprocess.CalledProcessError on exit_code != 0
Returns
-------
stdout : str
stderr : str
exit_code : int
"""
pp = _sub.Popen(command, shell=True, universal_newlines=True,
stdout=_sub.PIPE, stderr=_sub.PIPE)
out, err = pp.communicate()
code = pp.returncode
if raise_on_error and code != 0:
_sys.stderr.write(
'{}\nfailed with:\nCODE: {}\nSTDOUT:\n{}\nSTDERR:\n{}\n'
.format(command, code, out, err)
)
raise _sub.CalledProcessError(
returncode=code, cmd=command, output=out, stderr=err
)
return out, err, code
def run_cmnd(cmnd):
"""Run a command and return the output split into a list by newline."""
output, _, _ = run(cmnd, raise_on_error=True)
return output.strip().split('\n')
def open_zipped(infile, mode='r'):
"""Return file handle of file regardless of compressed or not.
Also returns already opened files unchanged, text mode automatic for
compatibility with python2.
"""
# Return already open files
if hasattr(infile, 'write'):
return infile
# Make text mode automatic
if len(mode) == 1:
mode = mode + 't'
if not isinstance(infile, str):
raise ValueError("I cannot open a filename that isn't a string.")
if infile.endswith('.gz'):
return _gzip.open(infile, mode)
if infile.endswith('.bz2'):
if hasattr(_bz2, 'open'):
return _bz2.open(infile, mode)
return _bz2.BZ2File(infile, mode)
return open(infile, mode)
def get_length(length_string):
"""Convert a string length like 50kb to an integer length."""
# Calculate length_string
if isinstance(length_string, str):
if length_string.isdigit():
return int(length_string)
dist, mod = _re.match(r'(\d+)(\D+)', length_string).groups()
dist = int(dist)
mod = mod.lower()
if mod:
if not mod in ['kb', 'mb']:
raise ValueError('Cannot parse {}, must be in kb or mb only'
.format(length_string))
if mod == 'kb':
dist = dist * 1000
elif mod == 'mb':
dist = dist * 1000000
elif isinstance(length_string, int):
return length_string
else:
raise ValueError('length_string must be either a string or an integer, is '
'{}'.format(type(length_string)))
return dist
def chrom_sort_key(x):
"""Return an integer for sorting from a chromosome."""
if x.startswith('chr'):
x = x[3:]
if x.upper() == 'X':
return 100
elif x.upper() == 'Y':
return 101
elif x.upper().startswith('M'):
return 150
elif x.isdigit():
return int(x)
return x
| mit | 8,642,716,844,234,234,000 | 28.311111 | 83 | 0.59717 | false |
drdangersimon/EZgal | ezgal/src/sfhs.py | 1 | 1785 | import numpy as np
import collections,dusts
__ver__ = '1.0'
class sfh_wrapper(object):
""" sfh_wrapper class. EzGal wraps this class around the sfh function. It takes care of the
details of passing or not passing parameters """
func = '' # sfh function
args = () # extra arguments to pass on call
has_args = False # whether or not there are actually any extra arguments
def __init__( self, function, args ):
""" wrapper_obj = ezgal.sfhs.wrapper( function, args )
wrapper class. EzGal wraps this class around the sfh function. It takes care of the
details of passing or not passing parameters """
self.func = function
if type( args ) == type( () ) and len( args ) > 0:
self.has_args = True
self.args = args
def __call__( self, val ):
if self.has_args:
return self.func( val, *self.args )
else:
return self.func( val )
class numeric(object):
ages = np.array( [] )
sfr = np.array( [] )
def __init__( self, ages, sfr ):
""" numeric_obj = ezgal.sfhs.numeric( ages, sfrs )
wrapper class for making a numeric star formation history callable.
Pass a list of ages and relative star formation rates. Ages should be in gyrs. """
self.ages = ages
self.sfr = sfr
def __call__( self, val ):
return np.interp( val, self.ages, self.sfr )
def exponential( t, tau ):
""" ezgal.sfhs.exponential( ages, tau )
exponentially decaying star formation history with
e-folding time scale of tau gyrs """
return np.exp( -1.0*t/tau )
def constant( t, length ):
""" ezgal.sfhs.constant( ages, length )
Burst of constant starformation from t=0 to t=length """
if type( t ) == type( np.array( [] ) ):
sfr = np.zeros( t.size )
m = t < length
if m.sum(): sfr[m] = 1.0
return sfr
else:
return 0.0 if t > length else 1.0 | gpl-2.0 | -7,483,396,873,485,222,000 | 25.656716 | 95 | 0.654342 | false |
HydrelioxGitHub/home-assistant | homeassistant/components/media_player/anthemav.py | 1 | 5234 | """
Support for Anthem Network Receivers and Processors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.anthemav/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, EVENT_HOMEASSISTANT_STOP, STATE_OFF,
STATE_ON)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['anthemav==1.1.9']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'anthemav'
DEFAULT_PORT = 14999
SUPPORT_ANTHEMAV = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up our socket to the AVR."""
import anthemav
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
device = None
_LOGGER.info("Provisioning Anthem AVR device at %s:%d", host, port)
def async_anthemav_update_callback(message):
"""Receive notification from transport that new data exists."""
_LOGGER.info("Received update callback from AVR: %s", message)
hass.async_create_task(device.async_update_ha_state())
avr = await anthemav.Connection.create(
host=host, port=port, loop=hass.loop,
update_callback=async_anthemav_update_callback)
device = AnthemAVR(avr, name)
_LOGGER.debug("dump_devicedata: %s", device.dump_avrdata)
_LOGGER.debug("dump_conndata: %s", avr.dump_conndata)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, device.avr.close)
async_add_entities([device])
class AnthemAVR(MediaPlayerDevice):
"""Entity reading values from Anthem AVR protocol."""
def __init__(self, avr, name):
"""Initialize entity with transport."""
super().__init__()
self.avr = avr
self._name = name
def _lookup(self, propname, dval=None):
return getattr(self.avr.protocol, propname, dval)
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_ANTHEMAV
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return name of device."""
return self._name or self._lookup('model')
@property
def state(self):
"""Return state of power on/off."""
pwrstate = self._lookup('power')
if pwrstate is True:
return STATE_ON
if pwrstate is False:
return STATE_OFF
return None
@property
def is_volume_muted(self):
"""Return boolean reflecting mute state on device."""
return self._lookup('mute', False)
@property
def volume_level(self):
"""Return volume level from 0 to 1."""
return self._lookup('volume_as_percentage', 0.0)
@property
def media_title(self):
"""Return current input name (closest we have to media title)."""
return self._lookup('input_name', 'No Source')
@property
def app_name(self):
"""Return details about current video and audio stream."""
return self._lookup('video_input_resolution_text', '') + ' ' \
+ self._lookup('audio_input_name', '')
@property
def source(self):
"""Return currently selected input."""
return self._lookup('input_name', "Unknown")
@property
def source_list(self):
"""Return all active, configured inputs."""
return self._lookup('input_list', ["Unknown"])
async def async_select_source(self, source):
"""Change AVR to the designated source (by name)."""
self._update_avr('input_name', source)
async def async_turn_off(self):
"""Turn AVR power off."""
self._update_avr('power', False)
async def async_turn_on(self):
"""Turn AVR power on."""
self._update_avr('power', True)
async def async_set_volume_level(self, volume):
"""Set AVR volume (0 to 1)."""
self._update_avr('volume_as_percentage', volume)
async def async_mute_volume(self, mute):
"""Engage AVR mute."""
self._update_avr('mute', mute)
def _update_avr(self, propname, value):
"""Update a property in the AVR."""
_LOGGER.info(
"Sending command to AVR: set %s to %s", propname, str(value))
setattr(self.avr.protocol, propname, value)
@property
def dump_avrdata(self):
"""Return state of avr object for debugging forensics."""
attrs = vars(self)
return(
'dump_avrdata: '
+ ', '.join('%s: %s' % item for item in attrs.items()))
| apache-2.0 | -3,984,011,923,076,869,600 | 29.788235 | 74 | 0.632595 | false |
bendres/cdmi_shell | cdmi_shell.py | 1 | 8205 | #!/usr/bin/env python
#
##############################
# CDMI shell version: 0.1.0 #
##############################
#
# Copyright 2012 Mezeo Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cmd
import sys
import requests
import json
import getpass
import optparse
import ConfigParser
import os
CONFIG_FILE = '~/.cdmirc'
print
print '/#`|#\|\/|^|^ (#|_ _ ||'
print '\_,|_/| |_|_ _)| |(/_||'
print '==========================='
class cdmishell(cmd.Cmd):
"""CDMI CLI... """
def __init__(self):
self.intro = 'Welcome to CDMI shell... For help, type help or ? <ENTER>\n'
self.dpath = '/cdmi'
self.pathlist = {}
self.pathlist['cpath'] = self.dpath
self.prompt = 'cdmishell=> '
parser = optparse.OptionParser("Usage: cdmi_shell")
parser.add_option('-c', '--config', help="specify alternate config file (default='%s')" % CONFIG_FILE)
parser.add_option('-i', '--id', help="specify the configuration to use")
parser.set_defaults(config=CONFIG_FILE)
opts, args = parser.parse_args()
config = ConfigParser.ConfigParser()
if opts.config.startswith('~'):
opts.config = os.path.expanduser(opts.config)
if os.path.isfile(opts.config) and opts.id:
print "Using config file: %s" % opts.config
print "Using ID: %s" % opts.id
config.read(opts.config)
try:
self.url = config.get(opts.id, 'URL')
self.username = config.get(opts.id, 'USER')
self.password = config.get(opts.id, 'PASSWORD')
self.__currentpath(self.dpath)
except:
print "Failed to parse config file. Aborting."
sys.exit(1)
else:
self.url = ''
self.username = ''
self.password = ''
self.logged_in = False
if self.logged_in is False:
self.do_login()
self.__currentpath(self.dpath)
cmd.Cmd.__init__(self)
def __makerequest(self, method, path, content_type=''):
headers = {}
headers['X-CDMI-Specification-Version'] = '1.0.1'
if not content_type:
headers['Content-Type'] = content_type
if self.url:
path = self.url + path
if method == 'GET':
request = requests.get(path, headers=headers, auth=(self.username, self.password), verify=False)
if method == 'DELETE':
request = requests.delete(path, headers=headers, auth=(self.username, self.password), verify=False)
return (request)
def __currentpath(self, path=''):
if path:
self.pathlist['opath'] = self.pathlist['cpath']
self.pathlist['cpath'] = path
return self.pathlist
def __showobject(self, path, arg):
request = self.__makerequest('GET', path)
if request.status_code == 200:
keys = json.loads(request.content)
if not arg:
value = json.dumps(json.loads(request.content), sort_keys=True, indent=4)
elif arg not in keys:
value = "Key not found..."
elif arg == 'metadata':
value = json.dumps(keys[arg], sort_keys=True, indent=4)
elif arg == 'exports':
value = json.dumps(keys[arg], sort_keys=True, indent=4)
elif arg == 'value':
value = json.dumps(json.loads(keys[arg]), sort_keys=True, indent=4)
else:
value = keys[arg]
return value
def do_login(self, arg=None):
"Login and get /cdmi"
print
self.url = raw_input("Enter URL: ")
self.username = raw_input("Enter username: ")
self.password = getpass.getpass("Enter password: ")
print
request = self.__makerequest('GET', self.dpath)
if request.status_code >= 400:
print "An HTTP/1.1 %s error occured during the login process..." % request.status_code
self.logged_in = False
else:
print "Login succsessful...\n"
self.logged_in = True
self.__currentpath(self.dpath)
self.prompt = "cdmi=> "
def do_whoami(self,arg):
"Print user and location..."
print
if self.logged_in == True:
print "Logged in as..."
print "User: %s " % self.username
print "URL: %s " % self.url
else:
print "You are not logged in..."
print
def do_ls(self, arg):
"Perform a unix style listing on the current path..."
if self.url:
cpath = self.__currentpath()
if not arg:
path = cpath['cpath']
else:
path = cpath['cpath'] + '/' + arg
print
print 'Listing: %s' % path
request = self.__makerequest('GET', path)
if request.status_code != 404:
children = json.loads(request.content)
if 'children' in children:
for value in children['children']:
print value
else:
print "No child objects found. Try show..."
else:
print "No objects found..."
print
def do_cd(self, arg):
"Change current path location..."
if self.url:
cpath = self.__currentpath()
if '..' == arg:
path = self.__showobject(cpath['cpath'], 'parentURI')
npath = self.__currentpath(path)
shellprompt = self.__showobject(path, 'objectName')
self.prompt = '%s=> ' % shellprompt
else:
path = cpath['cpath'] + '/' + arg
request = self.__makerequest('GET', path)
if request.status_code == 200:
self.__currentpath(path)
self.prompt = '%s=> ' % arg
else:
print "Path does not exist..."
print path
def do_show(self, arg):
"Pretty print the current path or an individual key..."
print
if self.url:
args = arg.split()
cpath = self.__currentpath()
if len(args) > 1:
path = cpath['cpath'] + '/' + args[0]
results = self.__showobject(path, args[1])
print results
else:
path = cpath['cpath']
results = self.__showobject(path, arg)
print results
print
def do_pwd(self, arg):
"List current path location..."
print
if self.url:
path = self.__currentpath()
print path['cpath']
print
def do_tree(self, arg):
"Display a tree like hiarchy..."
if not arg:
pwd = self.__currentpath()['cpath']
else:
pwd = arg
request = self.__makerequest('GET', pwd)
children = json.loads(request.content)['children']
for child in children:
print pwd + '/' + child
if child.endswith('/'):
self.do_tree(pwd + '/' + child[:-1])
def do_quit(self, arg):
"Exit the CDMI shell..."
sys.exit(1)
def do_EOF(self, line):
"Accept ^D to exit..."
return True
# shortcuts
do_q = do_quit
do_sh = do_show
do_who = do_whoami
do_w = do_whoami
if __name__ == '__main__':
cdmishell().cmdloop()
| apache-2.0 | -574,787,395,904,441,200 | 30.92607 | 115 | 0.509689 | false |
pepetreshere/odoo | odoo/addons/base/models/res_partner.py | 1 | 46813 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import collections
import datetime
import hashlib
import pytz
import threading
import re
import requests
from lxml import etree
from random import randint
from werkzeug import urls
from odoo import api, fields, models, tools, SUPERUSER_ID, _
from odoo.modules import get_module_resource
from odoo.osv.expression import get_unaccent_wrapper
from odoo.exceptions import UserError, ValidationError
# Global variables used for the warning fields declared on the res.partner
# in the following modules : sale, purchase, account, stock
WARNING_MESSAGE = [
('no-message','No Message'),
('warning','Warning'),
('block','Blocking Message')
]
WARNING_HELP = 'Selecting the "Warning" option will notify user with the message, Selecting "Blocking Message" will throw an exception with the message and block the flow. The Message has to be written in the next field.'
ADDRESS_FIELDS = ('street', 'street2', 'zip', 'city', 'state_id', 'country_id')
@api.model
def _lang_get(self):
return self.env['res.lang'].get_installed()
# put POSIX 'Etc/*' entries at the end to avoid confusing users - see bug 1086728
_tzs = [(tz, tz) for tz in sorted(pytz.all_timezones, key=lambda tz: tz if not tz.startswith('Etc/') else '_')]
def _tz_get(self):
return _tzs
class FormatAddressMixin(models.AbstractModel):
_name = "format.address.mixin"
_description = 'Address Format'
def _fields_view_get_address(self, arch):
# consider the country of the user, not the country of the partner we want to display
address_view_id = self.env.company.country_id.address_view_id.sudo()
if address_view_id and not self._context.get('no_address_format') and (not address_view_id.model or address_view_id.model == self._name):
#render the partner address accordingly to address_view_id
doc = etree.fromstring(arch)
for address_node in doc.xpath("//div[hasclass('o_address_format')]"):
Partner = self.env['res.partner'].with_context(no_address_format=True)
sub_view = Partner.fields_view_get(
view_id=address_view_id.id, view_type='form', toolbar=False, submenu=False)
sub_view_node = etree.fromstring(sub_view['arch'])
#if the model is different than res.partner, there are chances that the view won't work
#(e.g fields not present on the model). In that case we just return arch
if self._name != 'res.partner':
try:
self.env['ir.ui.view'].postprocess_and_fields(sub_view_node, model=self._name)
except ValueError:
return arch
address_node.getparent().replace(address_node, sub_view_node)
arch = etree.tostring(doc, encoding='unicode')
return arch
class PartnerCategory(models.Model):
_description = 'Partner Tags'
_name = 'res.partner.category'
_order = 'name'
_parent_store = True
def _get_default_color(self):
return randint(1, 11)
name = fields.Char(string='Tag Name', required=True, translate=True)
color = fields.Integer(string='Color Index', default=_get_default_color)
parent_id = fields.Many2one('res.partner.category', string='Parent Category', index=True, ondelete='cascade')
child_ids = fields.One2many('res.partner.category', 'parent_id', string='Child Tags')
active = fields.Boolean(default=True, help="The active field allows you to hide the category without removing it.")
parent_path = fields.Char(index=True)
partner_ids = fields.Many2many('res.partner', column1='category_id', column2='partner_id', string='Partners')
@api.constrains('parent_id')
def _check_parent_id(self):
if not self._check_recursion():
raise ValidationError(_('You can not create recursive tags.'))
def name_get(self):
""" Return the categories' display name, including their direct
parent by default.
If ``context['partner_category_display']`` is ``'short'``, the short
version of the category name (without the direct parent) is used.
The default is the long version.
"""
if self._context.get('partner_category_display') == 'short':
return super(PartnerCategory, self).name_get()
res = []
for category in self:
names = []
current = category
while current:
names.append(current.name)
current = current.parent_id
res.append((category.id, ' / '.join(reversed(names))))
return res
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
args = args or []
if name:
# Be sure name_search is symetric to name_get
name = name.split(' / ')[-1]
args = [('name', operator, name)] + args
return self._search(args, limit=limit, access_rights_uid=name_get_uid)
class PartnerTitle(models.Model):
_name = 'res.partner.title'
_order = 'name'
_description = 'Partner Title'
name = fields.Char(string='Title', required=True, translate=True)
shortcut = fields.Char(string='Abbreviation', translate=True)
class Partner(models.Model):
_description = 'Contact'
_inherit = ['format.address.mixin', 'image.mixin']
_name = "res.partner"
_order = "display_name"
def _default_category(self):
return self.env['res.partner.category'].browse(self._context.get('category_id'))
@api.model
def default_get(self, default_fields):
"""Add the company of the parent as default if we are creating a child partner.
Also take the parent lang by default if any, otherwise, fallback to default DB lang."""
values = super().default_get(default_fields)
parent = self.env["res.partner"]
if 'parent_id' in default_fields and values.get('parent_id'):
parent = self.browse(values.get('parent_id'))
values['company_id'] = parent.company_id.id
if 'lang' in default_fields:
values['lang'] = values.get('lang') or parent.lang or self.env.lang
return values
name = fields.Char(index=True)
display_name = fields.Char(compute='_compute_display_name', store=True, index=True)
date = fields.Date(index=True)
title = fields.Many2one('res.partner.title')
parent_id = fields.Many2one('res.partner', string='Related Company', index=True)
parent_name = fields.Char(related='parent_id.name', readonly=True, string='Parent name')
child_ids = fields.One2many('res.partner', 'parent_id', string='Contact', domain=[('active', '=', True)]) # force "active_test" domain to bypass _search() override
ref = fields.Char(string='Reference', index=True)
lang = fields.Selection(_lang_get, string='Language',
help="All the emails and documents sent to this contact will be translated in this language.")
active_lang_count = fields.Integer(compute='_compute_active_lang_count')
tz = fields.Selection(_tz_get, string='Timezone', default=lambda self: self._context.get('tz'),
help="When printing documents and exporting/importing data, time values are computed according to this timezone.\n"
"If the timezone is not set, UTC (Coordinated Universal Time) is used.\n"
"Anywhere else, time values are computed according to the time offset of your web client.")
tz_offset = fields.Char(compute='_compute_tz_offset', string='Timezone offset', invisible=True)
user_id = fields.Many2one('res.users', string='Salesperson',
help='The internal user in charge of this contact.')
vat = fields.Char(string='Tax ID', index=True, help="The Tax Identification Number. Complete it if the contact is subjected to government taxes. Used in some legal statements.")
same_vat_partner_id = fields.Many2one('res.partner', string='Partner with same Tax ID', compute='_compute_same_vat_partner_id', store=False)
bank_ids = fields.One2many('res.partner.bank', 'partner_id', string='Banks')
website = fields.Char('Website Link')
comment = fields.Text(string='Notes')
category_id = fields.Many2many('res.partner.category', column1='partner_id',
column2='category_id', string='Tags', default=_default_category)
credit_limit = fields.Float(string='Credit Limit')
active = fields.Boolean(default=True)
employee = fields.Boolean(help="Check this box if this contact is an Employee.")
function = fields.Char(string='Job Position')
type = fields.Selection(
[('contact', 'Contact'),
('invoice', 'Invoice Address'),
('delivery', 'Delivery Address'),
('other', 'Other Address'),
("private", "Private Address"),
], string='Address Type',
default='contact',
help="Invoice & Delivery addresses are used in sales orders. Private addresses are only visible by authorized users.")
# address fields
street = fields.Char()
street2 = fields.Char()
zip = fields.Char(change_default=True)
city = fields.Char()
state_id = fields.Many2one("res.country.state", string='State', ondelete='restrict', domain="[('country_id', '=?', country_id)]")
country_id = fields.Many2one('res.country', string='Country', ondelete='restrict')
partner_latitude = fields.Float(string='Geo Latitude', digits=(16, 5))
partner_longitude = fields.Float(string='Geo Longitude', digits=(16, 5))
email = fields.Char()
email_formatted = fields.Char(
'Formatted Email', compute='_compute_email_formatted',
help='Format email address "Name <email@domain>"')
phone = fields.Char()
mobile = fields.Char()
is_company = fields.Boolean(string='Is a Company', default=False,
help="Check if the contact is a company, otherwise it is a person")
industry_id = fields.Many2one('res.partner.industry', 'Industry')
# company_type is only an interface field, do not use it in business logic
company_type = fields.Selection(string='Company Type',
selection=[('person', 'Individual'), ('company', 'Company')],
compute='_compute_company_type', inverse='_write_company_type')
company_id = fields.Many2one('res.company', 'Company', index=True)
color = fields.Integer(string='Color Index', default=0)
user_ids = fields.One2many('res.users', 'partner_id', string='Users', auto_join=True)
partner_share = fields.Boolean(
'Share Partner', compute='_compute_partner_share', store=True,
help="Either customer (not a user), either shared user. Indicated the current partner is a customer without "
"access or with a limited access created for sharing data.")
contact_address = fields.Char(compute='_compute_contact_address', string='Complete Address')
# technical field used for managing commercial fields
commercial_partner_id = fields.Many2one('res.partner', compute='_compute_commercial_partner',
string='Commercial Entity', store=True, index=True)
commercial_company_name = fields.Char('Company Name Entity', compute='_compute_commercial_company_name',
store=True)
company_name = fields.Char('Company Name')
barcode = fields.Char(help="Use a barcode to identify this contact.", copy=False, company_dependent=True)
# hack to allow using plain browse record in qweb views, and used in ir.qweb.field.contact
self = fields.Many2one(comodel_name=_name, compute='_compute_get_ids')
_sql_constraints = [
('check_name', "CHECK( (type='contact' AND name IS NOT NULL) or (type!='contact') )", 'Contacts require a name'),
]
@api.depends('is_company', 'name', 'parent_id.display_name', 'type', 'company_name')
def _compute_display_name(self):
diff = dict(show_address=None, show_address_only=None, show_email=None, html_format=None, show_vat=None)
names = dict(self.with_context(**diff).name_get())
for partner in self:
partner.display_name = names.get(partner.id)
@api.depends('lang')
def _compute_active_lang_count(self):
lang_count = len(self.env['res.lang'].get_installed())
for partner in self:
partner.active_lang_count = lang_count
@api.depends('tz')
def _compute_tz_offset(self):
for partner in self:
partner.tz_offset = datetime.datetime.now(pytz.timezone(partner.tz or 'GMT')).strftime('%z')
@api.depends('user_ids.share', 'user_ids.active')
def _compute_partner_share(self):
super_partner = self.env['res.users'].browse(SUPERUSER_ID).partner_id
if super_partner in self:
super_partner.partner_share = False
for partner in self - super_partner:
partner.partner_share = not partner.user_ids or not any(not user.share for user in partner.user_ids)
@api.depends('vat', 'company_id')
def _compute_same_vat_partner_id(self):
for partner in self:
# use _origin to deal with onchange()
partner_id = partner._origin.id
#active_test = False because if a partner has been deactivated you still want to raise the error,
#so that you can reactivate it instead of creating a new one, which would loose its history.
Partner = self.with_context(active_test=False).sudo()
domain = [
('vat', '=', partner.vat),
('company_id', 'in', [False, partner.company_id.id]),
]
if partner_id:
domain += [('id', '!=', partner_id), '!', ('id', 'child_of', partner_id)]
partner.same_vat_partner_id = bool(partner.vat) and not partner.parent_id and Partner.search(domain, limit=1)
@api.depends(lambda self: self._display_address_depends())
def _compute_contact_address(self):
for partner in self:
partner.contact_address = partner._display_address()
def _compute_get_ids(self):
for partner in self:
partner.self = partner.id
@api.depends('is_company', 'parent_id.commercial_partner_id')
def _compute_commercial_partner(self):
for partner in self:
if partner.is_company or not partner.parent_id:
partner.commercial_partner_id = partner
else:
partner.commercial_partner_id = partner.parent_id.commercial_partner_id
@api.depends('company_name', 'parent_id.is_company', 'commercial_partner_id.name')
def _compute_commercial_company_name(self):
for partner in self:
p = partner.commercial_partner_id
partner.commercial_company_name = p.is_company and p.name or partner.company_name
@api.model
def _fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
if (not view_id) and (view_type == 'form') and self._context.get('force_email'):
view_id = self.env.ref('base.view_partner_simple_form').id
res = super(Partner, self)._fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
res['arch'] = self._fields_view_get_address(res['arch'])
return res
@api.constrains('parent_id')
def _check_parent_id(self):
if not self._check_recursion():
raise ValidationError(_('You cannot create recursive Partner hierarchies.'))
def copy(self, default=None):
self.ensure_one()
chosen_name = default.get('name') if default else ''
new_name = chosen_name or _('%s (copy)', self.name)
default = dict(default or {}, name=new_name)
return super(Partner, self).copy(default)
@api.onchange('parent_id')
def onchange_parent_id(self):
# return values in result, as this method is used by _fields_sync()
if not self.parent_id:
return
result = {}
partner = self._origin
if partner.parent_id and partner.parent_id != self.parent_id:
result['warning'] = {
'title': _('Warning'),
'message': _('Changing the company of a contact should only be done if it '
'was never correctly set. If an existing contact starts working for a new '
'company then a new contact should be created under that new '
'company. You can use the "Discard" button to abandon this change.')}
if partner.type == 'contact' or self.type == 'contact':
# for contacts: copy the parent address, if set (aka, at least one
# value is set in the address: otherwise, keep the one from the
# contact)
address_fields = self._address_fields()
if any(self.parent_id[key] for key in address_fields):
def convert(value):
return value.id if isinstance(value, models.BaseModel) else value
result['value'] = {key: convert(self.parent_id[key]) for key in address_fields}
return result
@api.onchange('country_id')
def _onchange_country_id(self):
if self.country_id and self.country_id != self.state_id.country_id:
self.state_id = False
@api.onchange('state_id')
def _onchange_state(self):
if self.state_id.country_id:
self.country_id = self.state_id.country_id
@api.onchange('email')
def onchange_email(self):
if not self.image_1920 and self._context.get('gravatar_image') and self.email:
self.image_1920 = self._get_gravatar_image(self.email)
@api.onchange('parent_id', 'company_id')
def _onchange_company_id(self):
if self.parent_id:
self.company_id = self.parent_id.company_id.id
@api.depends('name', 'email')
def _compute_email_formatted(self):
for partner in self:
if partner.email:
partner.email_formatted = tools.formataddr((partner.name or u"False", partner.email or u"False"))
else:
partner.email_formatted = ''
@api.depends('is_company')
def _compute_company_type(self):
for partner in self:
partner.company_type = 'company' if partner.is_company else 'person'
def _write_company_type(self):
for partner in self:
partner.is_company = partner.company_type == 'company'
@api.onchange('company_type')
def onchange_company_type(self):
self.is_company = (self.company_type == 'company')
@api.constrains('barcode')
def _check_barcode_unicity(self):
if self.env['res.partner'].search_count([('barcode', '=', self.barcode)]) > 1:
raise ValidationError('An other user already has this barcode')
def _update_fields_values(self, fields):
""" Returns dict of write() values for synchronizing ``fields`` """
values = {}
for fname in fields:
field = self._fields[fname]
if field.type == 'many2one':
values[fname] = self[fname].id
elif field.type == 'one2many':
raise AssertionError(_('One2Many fields cannot be synchronized as part of `commercial_fields` or `address fields`'))
elif field.type == 'many2many':
values[fname] = [(6, 0, self[fname].ids)]
else:
values[fname] = self[fname]
return values
@api.model
def _address_fields(self):
"""Returns the list of address fields that are synced from the parent."""
return list(ADDRESS_FIELDS)
@api.model
def _formatting_address_fields(self):
"""Returns the list of address fields usable to format addresses."""
return self._address_fields()
def update_address(self, vals):
addr_vals = {key: vals[key] for key in self._address_fields() if key in vals}
if addr_vals:
return super(Partner, self).write(addr_vals)
@api.model
def _commercial_fields(self):
""" Returns the list of fields that are managed by the commercial entity
to which a partner belongs. These fields are meant to be hidden on
partners that aren't `commercial entities` themselves, and will be
delegated to the parent `commercial entity`. The list is meant to be
extended by inheriting classes. """
return ['vat', 'credit_limit']
def _commercial_sync_from_company(self):
""" Handle sync of commercial fields when a new parent commercial entity is set,
as if they were related fields """
commercial_partner = self.commercial_partner_id
if commercial_partner != self:
sync_vals = commercial_partner._update_fields_values(self._commercial_fields())
self.write(sync_vals)
def _commercial_sync_to_children(self):
""" Handle sync of commercial fields to descendants """
commercial_partner = self.commercial_partner_id
sync_vals = commercial_partner._update_fields_values(self._commercial_fields())
sync_children = self.child_ids.filtered(lambda c: not c.is_company)
for child in sync_children:
child._commercial_sync_to_children()
res = sync_children.write(sync_vals)
sync_children._compute_commercial_partner()
return res
def _fields_sync(self, values):
""" Sync commercial fields and address fields from company and to children after create/update,
just as if those were all modeled as fields.related to the parent """
# 1. From UPSTREAM: sync from parent
if values.get('parent_id') or values.get('type') == 'contact':
# 1a. Commercial fields: sync if parent changed
if values.get('parent_id'):
self._commercial_sync_from_company()
# 1b. Address fields: sync if parent or use_parent changed *and* both are now set
if self.parent_id and self.type == 'contact':
onchange_vals = self.onchange_parent_id().get('value', {})
self.update_address(onchange_vals)
# 2. To DOWNSTREAM: sync children
self._children_sync(values)
def _children_sync(self, values):
if not self.child_ids:
return
# 2a. Commercial Fields: sync if commercial entity
if self.commercial_partner_id == self:
commercial_fields = self._commercial_fields()
if any(field in values for field in commercial_fields):
self._commercial_sync_to_children()
for child in self.child_ids.filtered(lambda c: not c.is_company):
if child.commercial_partner_id != self.commercial_partner_id:
self._commercial_sync_to_children()
break
# 2b. Address fields: sync if address changed
address_fields = self._address_fields()
if any(field in values for field in address_fields):
contacts = self.child_ids.filtered(lambda c: c.type == 'contact')
contacts.update_address(values)
def _handle_first_contact_creation(self):
""" On creation of first contact for a company (or root) that has no address, assume contact address
was meant to be company address """
parent = self.parent_id
address_fields = self._address_fields()
if (parent.is_company or not parent.parent_id) and len(parent.child_ids) == 1 and \
any(self[f] for f in address_fields) and not any(parent[f] for f in address_fields):
addr_vals = self._update_fields_values(address_fields)
parent.update_address(addr_vals)
def _clean_website(self, website):
url = urls.url_parse(website)
if not url.scheme:
if not url.netloc:
url = url.replace(netloc=url.path, path='')
website = url.replace(scheme='http').to_url()
return website
def write(self, vals):
if vals.get('active') is False:
# DLE: It should not be necessary to modify this to make work the ORM. The problem was just the recompute
# of partner.user_ids when you create a new user for this partner, see test test_70_archive_internal_partners
# You modified it in a previous commit, see original commit of this:
# https://github.com/odoo/odoo/commit/9d7226371730e73c296bcc68eb1f856f82b0b4ed
#
# RCO: when creating a user for partner, the user is automatically added in partner.user_ids.
# This is wrong if the user is not active, as partner.user_ids only returns active users.
# Hence this temporary hack until the ORM updates inverse fields correctly.
self.invalidate_cache(['user_ids'], self._ids)
for partner in self:
if partner.active and partner.user_ids:
raise ValidationError(_('You cannot archive a contact linked to an internal user.'))
# res.partner must only allow to set the company_id of a partner if it
# is the same as the company of all users that inherit from this partner
# (this is to allow the code from res_users to write to the partner!) or
# if setting the company_id to False (this is compatible with any user
# company)
if vals.get('website'):
vals['website'] = self._clean_website(vals['website'])
if vals.get('parent_id'):
vals['company_name'] = False
if 'company_id' in vals:
company_id = vals['company_id']
for partner in self:
if company_id and partner.user_ids:
company = self.env['res.company'].browse(company_id)
companies = set(user.company_id for user in partner.user_ids)
if len(companies) > 1 or company not in companies:
raise UserError(
("The selected company is not compatible with the companies of the related user(s)"))
if partner.child_ids:
partner.child_ids.write({'company_id': company_id})
result = True
# To write in SUPERUSER on field is_company and avoid access rights problems.
if 'is_company' in vals and self.user_has_groups('base.group_partner_manager') and not self.env.su:
result = super(Partner, self.sudo()).write({'is_company': vals.get('is_company')})
del vals['is_company']
result = result and super(Partner, self).write(vals)
for partner in self:
if any(u.has_group('base.group_user') for u in partner.user_ids if u != self.env.user):
self.env['res.users'].check_access_rights('write')
partner._fields_sync(vals)
return result
@api.model_create_multi
def create(self, vals_list):
if self.env.context.get('import_file'):
self._check_import_consistency(vals_list)
for vals in vals_list:
if vals.get('website'):
vals['website'] = self._clean_website(vals['website'])
if vals.get('parent_id'):
vals['company_name'] = False
partners = super(Partner, self).create(vals_list)
if self.env.context.get('_partners_skip_fields_sync'):
return partners
for partner, vals in zip(partners, vals_list):
partner._fields_sync(vals)
partner._handle_first_contact_creation()
return partners
def _load_records_create(self, vals_list):
partners = super(Partner, self.with_context(_partners_skip_fields_sync=True))._load_records_create(vals_list)
# batch up first part of _fields_sync
# group partners by commercial_partner_id (if not self) and parent_id (if type == contact)
groups = collections.defaultdict(list)
for partner, vals in zip(partners, vals_list):
cp_id = None
if vals.get('parent_id') and partner.commercial_partner_id != partner:
cp_id = partner.commercial_partner_id.id
add_id = None
if partner.parent_id and partner.type == 'contact':
add_id = partner.parent_id.id
groups[(cp_id, add_id)].append(partner.id)
for (cp_id, add_id), children in groups.items():
# values from parents (commercial, regular) written to their common children
to_write = {}
# commercial fields from commercial partner
if cp_id:
to_write = self.browse(cp_id)._update_fields_values(self._commercial_fields())
# address fields from parent
if add_id:
parent = self.browse(add_id)
for f in self._address_fields():
v = parent[f]
if v:
to_write[f] = v.id if isinstance(v, models.BaseModel) else v
if to_write:
self.browse(children).write(to_write)
# do the second half of _fields_sync the "normal" way
for partner, vals in zip(partners, vals_list):
partner._children_sync(vals)
partner._handle_first_contact_creation()
return partners
def create_company(self):
self.ensure_one()
if self.company_name:
# Create parent company
values = dict(name=self.company_name, is_company=True, vat=self.vat)
values.update(self._update_fields_values(self._address_fields()))
new_company = self.create(values)
# Set new company as my parent
self.write({
'parent_id': new_company.id,
'child_ids': [(1, partner_id, dict(parent_id=new_company.id)) for partner_id in self.child_ids.ids]
})
return True
def open_commercial_entity(self):
""" Utility method used to add an "Open Company" button in partner views """
self.ensure_one()
return {'type': 'ir.actions.act_window',
'res_model': 'res.partner',
'view_mode': 'form',
'res_id': self.commercial_partner_id.id,
'target': 'current',
'flags': {'form': {'action_buttons': True}}}
def open_parent(self):
""" Utility method used to add an "Open Parent" button in partner views """
self.ensure_one()
address_form_id = self.env.ref('base.view_partner_address_form').id
return {'type': 'ir.actions.act_window',
'res_model': 'res.partner',
'view_mode': 'form',
'views': [(address_form_id, 'form')],
'res_id': self.parent_id.id,
'target': 'new',
'flags': {'form': {'action_buttons': True}}}
def _get_contact_name(self, partner, name):
return "%s, %s" % (partner.commercial_company_name or partner.sudo().parent_id.name, name)
def _get_name(self):
""" Utility method to allow name_get to be overrided without re-browse the partner """
partner = self
name = partner.name or ''
if partner.company_name or partner.parent_id:
if not name and partner.type in ['invoice', 'delivery', 'other']:
name = dict(self.fields_get(['type'])['type']['selection'])[partner.type]
if not partner.is_company:
name = self._get_contact_name(partner, name)
if self._context.get('show_address_only'):
name = partner._display_address(without_company=True)
if self._context.get('show_address'):
name = name + "\n" + partner._display_address(without_company=True)
name = name.replace('\n\n', '\n')
name = name.replace('\n\n', '\n')
if self._context.get('address_inline'):
name = name.replace('\n', ', ')
if self._context.get('show_email') and partner.email:
name = "%s <%s>" % (name, partner.email)
if self._context.get('html_format'):
name = name.replace('\n', '<br/>')
if self._context.get('show_vat') and partner.vat:
name = "%s ‒ %s" % (name, partner.vat)
return name
def name_get(self):
res = []
for partner in self:
name = partner._get_name()
res.append((partner.id, name))
return res
def _parse_partner_name(self, text):
""" Parse partner name (given by text) in order to find a name and an
email. Supported syntax:
* Raoul <[email protected]>
* "Raoul le Grand" <[email protected]>
* Raoul [email protected] (strange fault tolerant support from df40926d2a57c101a3e2d221ecfd08fbb4fea30e)
Otherwise: default, everything is set as the name. Starting from 13.3
returned email will be normalized to have a coherent encoding.
"""
name, email = '', ''
split_results = tools.email_split_tuples(text)
if split_results:
name, email = split_results[0]
if email and not name:
fallback_emails = tools.email_split(text.replace(' ', ','))
if fallback_emails:
email = fallback_emails[0]
name = text[:text.index(email)].replace('"', '').replace('<', '').strip()
if email:
email = tools.email_normalize(email)
else:
name, email = text, ''
return name, email
@api.model
def name_create(self, name):
""" Override of orm's name_create method for partners. The purpose is
to handle some basic formats to create partners using the
name_create.
If only an email address is received and that the regex cannot find
a name, the name will have the email value.
If 'force_email' key in context: must find the email address. """
default_type = self._context.get('default_type')
if default_type and default_type not in self._fields['type'].get_values(self.env):
context = dict(self._context)
context.pop('default_type')
self = self.with_context(context)
name, email = self._parse_partner_name(name)
if self._context.get('force_email') and not email:
raise UserError(_("Couldn't create contact without email address!"))
create_values = {self._rec_name: name or email}
if email: # keep default_email in context
create_values['email'] = email
partner = self.create(create_values)
return partner.name_get()[0]
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
""" Override search() to always show inactive children when searching via ``child_of`` operator. The ORM will
always call search() with a simple domain of the form [('parent_id', 'in', [ids])]. """
# a special ``domain`` is set on the ``child_ids`` o2m to bypass this logic, as it uses similar domain expressions
if len(args) == 1 and len(args[0]) == 3 and args[0][:2] == ('parent_id','in') \
and args[0][2] != [False]:
self = self.with_context(active_test=False)
return super(Partner, self)._search(args, offset=offset, limit=limit, order=order,
count=count, access_rights_uid=access_rights_uid)
def _get_name_search_order_by_fields(self):
return ''
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
self = self.with_user(name_get_uid or self.env.uid)
# as the implementation is in SQL, we force the recompute of fields if necessary
self.recompute(['display_name'])
self.flush()
if args is None:
args = []
order_by_rank = self.env.context.get('res_partner_search_mode')
if (name or order_by_rank) and operator in ('=', 'ilike', '=ilike', 'like', '=like'):
self.check_access_rights('read')
where_query = self._where_calc(args)
self._apply_ir_rules(where_query, 'read')
from_clause, where_clause, where_clause_params = where_query.get_sql()
from_str = from_clause if from_clause else 'res_partner'
where_str = where_clause and (" WHERE %s AND " % where_clause) or ' WHERE '
# search on the name of the contacts and of its company
search_name = name
if operator in ('ilike', 'like'):
search_name = '%%%s%%' % name
if operator in ('=ilike', '=like'):
operator = operator[1:]
unaccent = get_unaccent_wrapper(self.env.cr)
fields = self._get_name_search_order_by_fields()
query = """SELECT res_partner.id
FROM {from_str}
{where} ({email} {operator} {percent}
OR {display_name} {operator} {percent}
OR {reference} {operator} {percent}
OR {vat} {operator} {percent})
-- don't panic, trust postgres bitmap
ORDER BY {fields} {display_name} {operator} {percent} desc,
{display_name}
""".format(from_str=from_str,
fields=fields,
where=where_str,
operator=operator,
email=unaccent('res_partner.email'),
display_name=unaccent('res_partner.display_name'),
reference=unaccent('res_partner.ref'),
percent=unaccent('%s'),
vat=unaccent('res_partner.vat'),)
where_clause_params += [search_name]*3 # for email / display_name, reference
where_clause_params += [re.sub('[^a-zA-Z0-9]+', '', search_name) or None] # for vat
where_clause_params += [search_name] # for order by
if limit:
query += ' limit %s'
where_clause_params.append(limit)
self.env.cr.execute(query, where_clause_params)
return [row[0] for row in self.env.cr.fetchall()]
return super(Partner, self)._name_search(name, args, operator=operator, limit=limit, name_get_uid=name_get_uid)
@api.model
@api.returns('self', lambda value: value.id)
def find_or_create(self, email, assert_valid_email=False):
""" Find a partner with the given ``email`` or use :py:method:`~.name_create`
to create a new one.
:param str email: email-like string, which should contain at least one email,
e.g. ``"Raoul Grosbedon <[email protected]>"``
:param boolean assert_valid_email: raise if no valid email is found
:return: newly created record
"""
if not email:
raise ValueError(_('An email is required for find_or_create to work'))
parsed_name, parsed_email = self._parse_partner_name(email)
if not parsed_email and assert_valid_email:
raise ValueError(_('A valid email is required for find_or_create to work properly.'))
partners = self.search([('email', '=ilike', parsed_email)], limit=1)
if partners:
return partners
create_values = {self._rec_name: parsed_name or parsed_email}
if parsed_email: # keep default_email in context
create_values['email'] = parsed_email
return self.create(create_values)
def _get_gravatar_image(self, email):
email_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest()
url = "https://www.gravatar.com/avatar/" + email_hash
try:
res = requests.get(url, params={'d': '404', 's': '128'}, timeout=5)
if res.status_code != requests.codes.ok:
return False
except requests.exceptions.ConnectionError as e:
return False
except requests.exceptions.Timeout as e:
return False
return base64.b64encode(res.content)
def _email_send(self, email_from, subject, body, on_error=None):
for partner in self.filtered('email'):
tools.email_send(email_from, [partner.email], subject, body, on_error)
return True
def address_get(self, adr_pref=None):
""" Find contacts/addresses of the right type(s) by doing a depth-first-search
through descendants within company boundaries (stop at entities flagged ``is_company``)
then continuing the search at the ancestors that are within the same company boundaries.
Defaults to partners of type ``'default'`` when the exact type is not found, or to the
provided partner itself if no type ``'default'`` is found either. """
adr_pref = set(adr_pref or [])
if 'contact' not in adr_pref:
adr_pref.add('contact')
result = {}
visited = set()
for partner in self:
current_partner = partner
while current_partner:
to_scan = [current_partner]
# Scan descendants, DFS
while to_scan:
record = to_scan.pop(0)
visited.add(record)
if record.type in adr_pref and not result.get(record.type):
result[record.type] = record.id
if len(result) == len(adr_pref):
return result
to_scan = [c for c in record.child_ids
if c not in visited
if not c.is_company] + to_scan
# Continue scanning at ancestor if current_partner is not a commercial entity
if current_partner.is_company or not current_partner.parent_id:
break
current_partner = current_partner.parent_id
# default to type 'contact' or the partner itself
default = result.get('contact', self.id or False)
for adr_type in adr_pref:
result[adr_type] = result.get(adr_type) or default
return result
@api.model
def view_header_get(self, view_id, view_type):
if self.env.context.get('category_id'):
return _(
'Partners: %(category)s',
category=self.env['res.partner.category'].browse(self.env.context['category_id']).name,
)
return super().view_header_get(view_id, view_type)
@api.model
@api.returns('self')
def main_partner(self):
''' Return the main partner '''
return self.env.ref('base.main_partner')
@api.model
def _get_default_address_format(self):
return "%(street)s\n%(street2)s\n%(city)s %(state_code)s %(zip)s\n%(country_name)s"
@api.model
def _get_address_format(self):
return self.country_id.address_format or self._get_default_address_format()
def _display_address(self, without_company=False):
'''
The purpose of this function is to build and return an address formatted accordingly to the
standards of the country where it belongs.
:param address: browse record of the res.partner to format
:returns: the address formatted in a display that fit its country habits (or the default ones
if not country is specified)
:rtype: string
'''
# get the information that will be injected into the display format
# get the address format
address_format = self._get_address_format()
args = {
'state_code': self.state_id.code or '',
'state_name': self.state_id.name or '',
'country_code': self.country_id.code or '',
'country_name': self._get_country_name(),
'company_name': self.commercial_company_name or '',
}
for field in self._formatting_address_fields():
args[field] = getattr(self, field) or ''
if without_company:
args['company_name'] = ''
elif self.commercial_company_name:
address_format = '%(company_name)s\n' + address_format
return address_format % args
def _display_address_depends(self):
# field dependencies of method _display_address()
return self._formatting_address_fields() + [
'country_id.address_format', 'country_id.code', 'country_id.name',
'company_name', 'state_id.code', 'state_id.name',
]
@api.model
def get_import_templates(self):
return [{
'label': _('Import Template for Customers'),
'template': '/base/static/xls/res_partner.xls'
}]
@api.model
def _check_import_consistency(self, vals_list):
"""
The values created by an import are generated by a name search, field by field.
As a result there is no check that the field values are consistent with each others.
We check that if the state is given a value, it does belong to the given country, or we remove it.
"""
States = self.env['res.country.state']
states_ids = {vals['state_id'] for vals in vals_list if vals.get('state_id')}
state_to_country = States.search([('id', 'in', list(states_ids))]).read(['country_id'])
for vals in vals_list:
if vals.get('state_id'):
country_id = next(c['country_id'][0] for c in state_to_country if c['id'] == vals.get('state_id'))
state = States.browse(vals['state_id'])
if state.country_id.id != country_id:
state_domain = [('code', '=', state.code),
('country_id', '=', country_id)]
state = States.search(state_domain, limit=1)
vals['state_id'] = state.id # replace state or remove it if not found
def _get_country_name(self):
return self.country_id.name or ''
class ResPartnerIndustry(models.Model):
_description = 'Industry'
_name = "res.partner.industry"
_order = "name"
name = fields.Char('Name', translate=True)
full_name = fields.Char('Full Name', translate=True)
active = fields.Boolean('Active', default=True)
| agpl-3.0 | 7,808,116,250,079,923,000 | 46.475659 | 221 | 0.602529 | false |
cartoonist/pystream-protobuf | stream/varint.py | 1 | 1366 | # coding=utf-8
"""
stream.varint
~~~~~~~~~~~~~
Encode and decode an integer up to 64 bit to/from 'Varint'. See Google
Protobuf library documentation for more details about Varints.
:copyright: (c) 2017 by Ali Ghaffaari.
:license: MIT, see LICENSE for more details.
"""
import sys
import click
from google.protobuf.internal.decoder import _DecodeVarint as decodeVarint
from google.protobuf.internal.encoder import _EncodeVarint as encodeVarint
@click.group(context_settings=dict(help_option_names=['-h', '--help']))
def cli():
"""Varint encoder/decoder."""
pass
@cli.command('encode')
@click.argument('integer', nargs=1, type=int)
def cmd_encode(integer):
"""Encode an integer up to 64 bit to Varint."""
encode(integer)
@cli.command('decode')
@click.argument('input_file', nargs=1, type=click.File('rb'))
def cmd_decode(input_file):
"""Decode an integer up to 64 bit from Varint."""
decode(input_file)
def encode(value):
"""Output the encoded value to the standard output.
Args:
value (int): the integer to be encoded.
"""
encodeVarint(sys.stdout.buffer.write, value, True)
def decode(input_file):
"""Output the decoded value to the standard output.
Args:
input_file (file handler): input file handler.
"""
print(decodeVarint(input_file.read(), 0)[0])
| mit | -3,871,336,480,277,629,000 | 23.392857 | 74 | 0.674231 | false |
polyaxon/polyaxon | core/polyaxon/polyflow/matrix/hyperopt.py | 1 | 7008 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
from marshmallow import fields, validate
from polyaxon.polyflow.early_stopping import EarlyStoppingSchema
from polyaxon.polyflow.matrix.base import BaseSearchConfig
from polyaxon.polyflow.matrix.kinds import V1MatrixKind
from polyaxon.polyflow.matrix.params import HpParamSchema
from polyaxon.polyflow.matrix.tuner import TunerSchema
from polyaxon.schemas.base import BaseCamelSchema
from polyaxon.schemas.fields.ref_or_obj import RefOrObject
class HyperoptSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal(V1MatrixKind.HYPEROPT))
max_iterations = RefOrObject(fields.Int(allow_none=True))
algorithm = fields.Str(
allow_none=True, validate=validate.OneOf(["tpe", "rand", "anneal"])
)
params = fields.Dict(
keys=fields.Str(), values=fields.Nested(HpParamSchema), required=True
)
num_runs = RefOrObject(
fields.Int(required=True, validate=validate.Range(min=1)), required=True
)
seed = RefOrObject(fields.Int(allow_none=True))
concurrency = RefOrObject(fields.Int(allow_none=True))
tuner = fields.Nested(TunerSchema, allow_none=True)
early_stopping = fields.List(fields.Nested(EarlyStoppingSchema), allow_none=True)
@staticmethod
def schema_config():
return V1Hyperopt
class V1Hyperopt(BaseSearchConfig, polyaxon_sdk.V1Hyperopt):
"""Hyperopt is a search algorithm that is backed by the
[Hyperopt](http://hyperopt.github.io/hyperopt/) library
to perform sequential model-based hyperparameter optimization.
the Hyperopt integration exposes 3 algorithms: `tpe`, `rand`, `anneal`.
Args:
kind: hyperopt
algorithm: str, one of tpe, rand, anneal
params: List[Dict[str, [params](/docs/automation/optimization-engine/params/#discrete-values)]] # noqa
maxIterations: int, optional
concurrency: int, optional
num_runs: int, optional
seed: int, optional
tuner: [V1Tuner](/docs/automation/optimization-engine/tuner/), optional
early_stopping: List[[EarlyStopping](/docs/automation/helpers/early-stopping)], optional
## YAML usage
```yaml
>>> matrix:
>>> kind: hyperopt
>>> algorithm:
>>> maxIterations:
>>> concurrency:
>>> params:
>>> numRuns:
>>> seed:
>>> tuner:
>>> earlyStopping:
```
## Python usage
```python
>>> from polyaxon.polyflow import (
>>> V1Hyperopt, V1HpLogSpace, V1HpUniform, V1FailureEarlyStopping, V1MetricEarlyStopping
>>> )
>>> matrix = V1Hyperopt(
>>> algorithm="tpe",
>>> num_runs=20,
>>> concurrency=2,
>>> seed=23,
>>> params={"param1": V1HpLogSpace(...), "param2": V1HpUniform(...), ... },
>>> early_stopping=[V1FailureEarlyStopping(...), V1MetricEarlyStopping(...)]
>>> )
```
## Fields
### kind
The kind signals to the CLI, client, and other tools that this matrix is hyperopt.
If you are using the python client to create the mapping,
this field is not required and is set by default.
```yaml
>>> matrix:
>>> kind: hyperopt
```
### algorithm
The algorithm to use from the hyperopt library, the supported
algorithms: `tpe`, `rand`, `anneal`.
```yaml
>>> matrix:
>>> kind: hyperopt
>>> algorithm: anneal
```
### concurrency
An optional value to set the number of concurrent operations.
<blockquote class="light">
This value only makes sense if less or equal to the total number of possible runs.
</blockquote>
```yaml
>>> matrix:
>>> kind: hyperopt
>>> concurrency: 2
```
For more details about concurrency management,
please check the [concurrency section](/docs/automation/helpers/concurrency/).
### params
A dictionary of `key -> value generator`
to generate the parameters.
To learn about all possible
[params generators](/docs/automation/optimization-engine/params/).
> The parameters generated will be validated against
> the component's inputs/outputs definition to check that the values
> can be passed and have valid types.
```yaml
>>> matrix:
>>> kind: hyperopt
>>> params:
>>> param1:
>>> kind: ...
>>> value: ...
>>> param2:
>>> kind: ...
>>> value: ...
```
### numRuns
Maximum number of runs to start based on the search space defined.
```yaml
>>> matrix:
>>> kind: hyperopt
>>> numRuns: 5
```
### maxIterations
Maximum number of iterations to run the process of \\-> suggestions -> training ->\\
```yaml
>>> matrix:
>>> kind: hyperopt
>>> maxIterations: 5
```
### seed
Since this algorithm uses random generators,
if you want to control the seed for the random generator, you can pass a seed.
```yaml
>>> matrix:
>>> kind: hyperopt
>>> seed: 523
```
### earlyStopping
A list of early stopping conditions to check for terminating
all operations managed by the pipeline.
If one of the early stopping conditions is met,
a signal will be sent to terminate all running and pending operations.
```yaml
>>> matrix:
>>> kind: hyperopt
>>> earlyStopping: ...
```
### tuner
The tuner reference (w/o component hub reference) to use.
The component contains the logic for creating new suggestions based on hyperopt library,
users can override this section to provide a different tuner component.
```yaml
>>> matrix:
>>> kind: hyperopt
>>> tuner:
>>> hubRef: 'acme/my-hyperopt-tuner:version'
```
"""
SCHEMA = HyperoptSchema
IDENTIFIER = V1MatrixKind.HYPEROPT
REDUCED_ATTRIBUTES = [
"maxIterations",
"algorithm",
"numRuns",
"seed",
"concurrency",
"earlyStopping",
"tuner",
]
def create_iteration(self, iteration: int = None) -> int:
if iteration is None:
return 0
return iteration + 1
def should_reschedule(self, iteration):
"""Return a boolean to indicate if we need to reschedule another iteration."""
if not self.max_iterations:
return True
return iteration < self.max_iterations
| apache-2.0 | -8,689,166,251,913,191,000 | 27.37247 | 111 | 0.640982 | false |
kieranjol/IFIscripts | legacy_scripts/dpxonly.py | 1 | 11222 | #!/usr/bin/env python
import subprocess
import sys
import os
import shutil
import argparse
import datetime
import time
import csv
import uuid
from glob import glob
from ififuncs import create_csv
from ififuncs import append_csv
from ififuncs import send_gmail
from ififuncs import hashlib_manifest
from ififuncs import diff_textfiles
from ififuncs import make_manifest
from ififuncs import generate_log
'''''
Events:
md5 manifest created of source
framemd5 of source
tiff2dpx
framemd5 output
manifest of output
'''
def set_environment(logfile):
env_dict = os.environ.copy()
# https://github.com/imdn/scripts/blob/0dd89a002d38d1ff6c938d6f70764e6dd8815fdd/ffmpy.py#L272
env_dict['FFREPORT'] = 'file={}:level=48'.format(logfile)
return env_dict
def make_framemd5(directory, container, log_filename_alteration):
os.chdir(directory)
images = glob('*.%s' % container)
batch_dir = os.path.basename(os.path.dirname(os.path.dirname(root_dir)))
output_parent_directory = os.path.join(args.o, batch_dir)
if not os.path.isdir(output_parent_directory):
os.makedirs(output_parent_directory)
numberless_filename = images[0].split("_")[0:-1]
ffmpeg_friendly_name = ''
counter = 0
while counter <len(numberless_filename) :
ffmpeg_friendly_name += numberless_filename[counter] + '_'
counter += 1
output_dirname = output_parent_directory + '/' + ffmpeg_friendly_name + 'dpx_transcodes'
try:
os.makedirs(output_dirname + '/image')
os.makedirs(output_dirname + '/image/logs')
os.makedirs(output_dirname + '/image/md5')
os.makedirs(output_dirname + '/image/dpx_files')
os.makedirs(output_dirname + '/image/xml_files')
except: OSError
output = output_dirname + '/image/md5/%s%s.framemd5' % (ffmpeg_friendly_name,container)
logfile = output_dirname + '/image/logs/%s%s.log' % (ffmpeg_friendly_name, log_filename_alteration)
env_dict = set_environment(logfile)
image_seq_without_container = ffmpeg_friendly_name
ffmpeg_friendly_name += "%06d." + '%s' % container
framemd5 = ['ffmpeg','-report','-f','image2', '-i', ffmpeg_friendly_name,'-f','framemd5',output]
print framemd5
subprocess.call(framemd5, env=env_dict)
info = [output_dirname, output, image_seq_without_container, output_parent_directory]
return info
def file_check(dir2check):
os.chdir(dir2check)
tiff_check = glob('*.tiff')
dpx_check = glob('*.dpx')
if len(dpx_check) > 0:
print 'DPX sequence, not TIFF. Not processing'
return 'DPX'
elif len(tiff_check) > 0:
return 'TIFF'
else:
print 'no images found'
return 'none'
def remove_bad_files(root_dir):
rm_these = ['.DS_Store', 'Thumbs.db', 'desktop.ini']
for root, dirs, files in os.walk(root_dir):
for name in files:
path = os.path.join(root, name)
for i in rm_these:
if name == i:
print '***********************' + 'removing: ' + path
os.remove(path)
def premis_log(source_parent_dir, source_directory):
split_list = os.path.basename(os.path.dirname(source_parent_dir)).split('_')
premisxml, premis_namespace, doc, premis = setup_xml(source_directory)
items = {"workflow":"scanning","oe":split_list[0], "filmographic":split_list[1], "sourceAccession":split_list[2], "interventions":['placeholder'], "prepList":['placeholder'], "user":user}
premis = doc.getroot()
framemd5_uuid = str(uuid.uuid4())
final_sip_manifest_uuid = str(uuid.uuid4())
a = doc.xpath('//ns:agentIdentifierValue',namespaces={'ns': premis_namespace})
for i in a:
if i.text == '9430725d-7523-4071-9063-e8a6ac4f84c4':
linkingEventIdentifier = create_unit(-1,i.getparent().getparent(),'linkingEventIdentifier')
linkingEventIdentifierType = create_unit(1,linkingEventIdentifier, 'linkingEventIdentifierType')
linkingEventIdentifierValue = create_unit(1,linkingEventIdentifier, 'linkingEventIdentifierValue')
linkingEventIdentifierValue.text = final_sip_manifest_uuid
linkingEventIdentifierType.text = 'UUID'
elif i.text == 'ee83e19e-cdb1-4d83-91fb-7faf7eff738e':
linkingEventIdentifier = create_unit(-1,i.getparent().getparent(),'linkingEventIdentifier')
linkingEventIdentifierType = create_unit(1,linkingEventIdentifier, 'linkingEventIdentifierType')
linkingEventIdentifierValue = create_unit(1,linkingEventIdentifier, 'linkingEventIdentifierValue')
linkingEventIdentifierValue.text = framemd5_uuid
linkingEventIdentifierType.text = 'UUID'
representation_uuid = doc.findall('//ns:objectIdentifierValue',namespaces={'ns': premis_namespace})[0].text
#ffmpegAgent = make_agent(premis,[framemd5_uuid ], 'ee83e19e-cdb1-4d83-91fb-7faf7eff738e')
make_event(premis, 'message digest calculation', 'Checksum manifest for whole package created', [['UUID','9430725d-7523-4071-9063-e8a6ac4f84c4' ]],final_sip_manifest_uuid,[representation_uuid], 'source', 'now')
make_event(premis, 'message digest calculation', 'Frame level checksums of images', [['UUID','ee83e19e-cdb1-4d83-91fb-7faf7eff738e' ]], framemd5_uuid, [representation_uuid], 'source', 'now' )
write_premis(doc, premisxml)
parser = argparse.ArgumentParser(description='DPX2TIFF specific workflow for IFI'
' Written by Kieran O\'Leary.')
parser.add_argument(
'input', nargs='+',
help='full path of input directory'
)
parser.add_argument(
'-o',
help='full path of output directory', required=True)
args = parser.parse_args()
print args
csv_report_filename = os.path.expanduser("~/Desktop/") + 'dpx_transcode_report' + time.strftime("_%Y_%m_%dT%H_%M_%S") + '.csv'
#permission for correct directories sought from user
permission = ''
all_files = args.input
if not permission == 'y' or permission == 'Y':
print '\n\n**** All TIFF sequences within these directories will be converted to DPX.\n'
for i in all_files:
print i
permission = raw_input('\n**** These are the directories that wil be turned into DPX. \n**** If this looks ok, please press Y, otherwise, type N\n' )
while permission not in ('Y','y','N','n'):
permission = raw_input('\n**** These are the directories that wil be turned into DPX. \n**** If this looks ok, please press Y, otherwise, type N\n')
if permission == 'n' or permission == 'N':
print 'Exiting at your command- Cheerio for now'
sys.exit()
elif permission =='y' or permission == 'Y':
print 'Ok so!'
#user identity sought for accurate premis documentation
user = ''
if not user == '1' or user == '2'or user == '3':
user = raw_input('\n\n**** Who are you?\nPress 1 or 2 or 3\n\n1. Brian Cash\n2. Gavin Martin\n3. Raelene Casey\n' )
while user not in ('1','2','3'):
user = raw_input('\n\n**** Who are you?\nPress 1 or 2 or 3\n\n1. Brian Cash\n2. Gavin Martin\n3. Raelene Casey\n')
if user == '1':
user = 'Brian Cash'
print 'Hi Brian, Congratulations on becoming a father!!!'
elif user == '2':
user = 'Gavin Martin'
print 'Hi Gavin, Have you renewed your subscription to American Cinematographer?'
elif user == '3':
user = 'Raelene Casey'
print 'Hi Raelene, Brian must be out of the office'
time.sleep(1)
create_csv(csv_report_filename, ('Sequence Name', 'Lossless?', 'Start time', 'Finish Time'))
space_counter = 0
for source_directory in all_files:
for root,dirnames,filenames in os.walk(source_directory):
for folders in dirnames:
if ' ' in folders:
print 'Space found in %s - DELETE IT PLEASE' % os.path.join(root,folders)
space_counter += 1
if space_counter > 0:
sys.exit()
for root,dirnames,filenames in os.walk(source_directory):
source_directory = root
if not file_check(source_directory) == 'TIFF':
append_csv(csv_report_filename, (source_directory,'EMPTY DIRECTORY - SKIPPED', 'n/a', 'n/a'))
continue
root_dir = os.path.dirname(os.path.dirname(root))
general_log = root_dir + '/logs/image/%s__second_pass_image_log.log' % os.path.basename(root_dir)
generate_log(general_log, 'Input = %s' % root)
source_parent_dir = os.path.dirname(source_directory)
normpath = os.path.normpath(source_directory)
relative_path = normpath.split(os.sep)[-1]
split_path = os.path.split(os.path.basename(source_directory))[1]
start = datetime.datetime.now()
source_manifest = root_dir + '/%s_manifest.md5' % relative_path
info = make_framemd5(source_directory, 'tiff', 'tiff_framemd5')
output_dirname = info[0]
source_textfile = info[1]
fmd5copy = root_dir + '/metadata/image'
shutil.copy(source_textfile,fmd5copy )
image_seq_without_container = info[2]
output_parent_directory = info[3]
tiff_filename = image_seq_without_container + "%06d.tiff"
dpx_filename = image_seq_without_container + "%06d.dpx"
logfile = output_dirname + '/image/logs/%sdpx_transcode.log' % image_seq_without_container
env_dict = set_environment(logfile)
generate_log(general_log, 'Starting TIFF to DPX transcode')
tiff2dpx = ['ffmpegnometadata','-report','-f','image2','-framerate','24', '-i', tiff_filename ,output_dirname + '/image/dpx_files' '/' + dpx_filename]
print tiff2dpx
subprocess.call(tiff2dpx,env=env_dict)
generate_log(general_log, 'TIFF to DPX transcode complete')
parent_basename = os.path.basename(output_dirname)
manifest_textfile = os.path.dirname(output_dirname) + '/' + parent_basename + '_manifest.md5'
generate_log(general_log, 'Generating destination manifest via md5deep and storing as %s' % manifest_textfile)
other = make_framemd5(output_dirname + '/image/dpx_files', 'dpx', 'dpx_framemd5')
other_textfile = other[1]
judgement = diff_textfiles(source_textfile, other_textfile)
generate_log(general_log, 'Outcome of transcode was: %s' % judgement)
make_manifest(output_parent_directory, os.path.basename(output_dirname), manifest_textfile)
source_metadata_dir = root_dir + '/metadata/image'
shutil.copy(source_textfile, source_metadata_dir + '/%s' % os.path.basename(source_textfile))
finish = datetime.datetime.now()
append_csv(csv_report_filename, (parent_basename,judgement, start, finish))
| mit | -829,233,088,578,808,400 | 49.32287 | 214 | 0.627517 | false |
janusnic/django-admin-twitter | admin_twitter/admin.py | 1 | 5122 | from django.contrib import admin
from django.conf import settings
from .models import Tweet
from .models import Author
from .models import Message
from twitter import *
from django.shortcuts import get_object_or_404, render
from django.conf.urls import patterns, url
from django.http import HttpResponse, HttpResponseRedirect
# Register your models here.
def replytweet(modeladmin, request, queryset):
twitter = Twitter(auth=OAuth(settings.OAUTH_TOKEN, settings.OAUTH_SECRET, settings.CONSUMER_KEY, settings.CONSUMER_SECRET))
qs = queryset.filter(replied=False)
for tweet in qs:
retrievedmessage=Message.objects.filter(active=True).order_by('?').first()
if retrievedmessage is not None:
message = "@"+tweet.author+" " + retrievedmessage.message
print (message)
try:
twitter.statuses.update(status=message, in_reply_to_status_id=tweet.id)
tweet.replied = True
tweet.save()
except TwitterHTTPError as api_error:
print("Error: %s" % (str(api_error)))
replytweet.short_description = "Reply tweet"
def favouritetweet(modeladmin, request, queryset):
twitter = Twitter(auth=OAuth(settings.OAUTH_TOKEN, settings.OAUTH_SECRET, settings.CONSUMER_KEY, settings.CONSUMER_SECRET))
qs = queryset.filter(favourited=False)
for tweet in qs:
try:
twitter.favorites.create(_id=tweet.id)
tweet.favourited = True
tweet.save()
except TwitterHTTPError as api_error:
print("Error: %s" % (str(api_error)))
favouritetweet.short_description = "Favourite tweet"
def followauthor(modeladmin, request, queryset):
twitter = Twitter(auth=OAuth(settings.OAUTH_TOKEN, settings.OAUTH_SECRET, settings.CONSUMER_KEY, settings.CONSUMER_SECRET))
qs = queryset.filter(followed=False)
for author in qs:
try:
twitter.friendships.create(screen_name=author.id, follow=False)
author.followed = True
author.save()
except TwitterHTTPError as api_error:
print("Error: %s" % (str(api_error)))
followauthor.short_description = "Follow"
class TweetAdmin(admin.ModelAdmin):
def get_urls(self):
urls = super(TweetAdmin, self).get_urls()
my_urls = patterns('',
(r'^update/$', self.update)
)
return my_urls + urls
def update(self, request):
# custom view which should return an HttpResponse
if request.method == 'GET':
return render(request, 'admin/updateform.html',)
else:
twitter = Twitter(auth=OAuth(settings.OAUTH_TOKEN, settings.OAUTH_SECRET, settings.CONSUMER_KEY, settings.CONSUMER_SECRET))
result = twitter.search.tweets(q=request.POST.get('search', ''), count=request.POST.get('number', '50'), result_type=request.POST.get('type', 'recent'))
for tweet in result["statuses"]:
try:
t = Tweet.objects.get(pk=tweet["id"])
except Tweet.DoesNotExist:
t = Tweet()
t.id = tweet["id"]
t.author = tweet["user"]["screen_name"]
t.title = tweet["text"].encode('utf-8')
t.search = request.POST.get('search', '')
t.save()
try:
a = Author.objects.get(pk=tweet["user"]["screen_name"])
except Author.DoesNotExist:
a = Author()
a.id = tweet["user"]["screen_name"]
a.save()
return HttpResponseRedirect("..")
#return render(request, 'admin/updateform.html',)
list_display = ['id','date','title','author','replied','favourited','search',]
ordering = ['-date']
actions = [replytweet,favouritetweet,]
list_filter = ('replied', 'favourited')
search_fields = ['title','author','search',]
class Media:
js = ('updatebutton.js', )
def get_actions(self, request):
#Disable delete
actions = super(TweetAdmin, self).get_actions(request)
del actions['delete_selected']
return actions
def get_readonly_fields(self, request, obj=None):
if obj: # editing an existing object
return self.readonly_fields + ('id','date','title','author',)
return self.readonly_fields
class AuthorAdmin(admin.ModelAdmin):
list_display = ['id','date','followed',]
list_filter = ('followed',)
actions = [followauthor,]
ordering = ['-date']
search_fields = ['id']
def get_actions(self, request):
#Disable delete
actions = super(AuthorAdmin, self).get_actions(request)
del actions['delete_selected']
return actions
class MessageAdmin(admin.ModelAdmin):
list_display = ['date','message','active',]
list_filter = ('active',)
ordering = ['-date']
search_fields = ['message']
admin.site.register(Tweet, TweetAdmin)
admin.site.register(Author, AuthorAdmin)
admin.site.register(Message, MessageAdmin)
| gpl-3.0 | -2,779,515,468,671,141,400 | 35.585714 | 164 | 0.613823 | false |
fabaff/mqtt-panel | test-messages.py | 1 | 1054 | #!/usr/bin/python3
#
# test-messages.py - This script publish a random MQTT messages every 2 s.
#
# Copyright (c) 2013-2020, Fabian Affolter <[email protected]>
# Released under the MIT license. See LICENSE file for details.
#
import random
import time
import paho.mqtt.client as mqtt
timestamp = int(time.time())
broker = "127.0.0.1"
port = 1883
element = "home"
areas = ["front", "back", "kitchen", "basement", "living"]
entrances = ["door", "window"]
states = ["true", "false"]
print(f"Messages are published on topic {element}/#... -> CTRL + C to shutdown")
while True:
area = random.choice(areas)
if area in ["basement", "living"]:
topic = element + "/" + area + "/temp"
message = random.randrange(0, 30, 1)
else:
topic = element + "/" + area + "/" + random.choice(entrances)
message = random.choice(states)
mqtt_client = mqtt.Client("mqtt-panel-test", protocol=mqtt.MQTTv311)
mqtt_client.connect(broker, port=int(port))
mqtt_client.publish(topic, message)
time.sleep(2)
| mit | 9,109,013,109,760,228,000 | 29.114286 | 80 | 0.656546 | false |
DanteOnline/free-art | venv/bin/enhancer.py | 1 | 1612 | #!/home/dante/Projects/free-art/venv/bin/python3
#
# The Python Imaging Library
# $Id$
#
# this demo script creates four windows containing an image and a slider.
# drag the slider to modify the image.
#
try:
from tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
except ImportError:
from Tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
from PIL import Image, ImageTk, ImageEnhance
import sys
#
# enhancer widget
class Enhance(Frame):
def __init__(self, master, image, name, enhancer, lo, hi):
Frame.__init__(self, master)
# set up the image
self.tkim = ImageTk.PhotoImage(image.mode, image.size)
self.enhancer = enhancer(image)
self.update("1.0") # normalize
# image window
Label(self, image=self.tkim).pack()
# scale
s = Scale(self, label=name, orient=HORIZONTAL,
from_=lo, to=hi, resolution=0.01,
command=self.update)
s.set(self.value)
s.pack()
def update(self, value):
self.value = float(value)
self.tkim.paste(self.enhancer.enhance(self.value))
#
# main
if len(sys.argv) != 2:
print("Usage: enhancer file")
sys.exit(1)
root = Tk()
im = Image.open(sys.argv[1])
im.thumbnail((200, 200))
Enhance(root, im, "Color", ImageEnhance.Color, 0.0, 4.0).pack()
Enhance(Toplevel(), im, "Sharpness", ImageEnhance.Sharpness, -2.0, 2.0).pack()
Enhance(Toplevel(), im, "Brightness", ImageEnhance.Brightness, -1.0, 3.0).pack()
Enhance(Toplevel(), im, "Contrast", ImageEnhance.Contrast, -1.0, 3.0).pack()
root.mainloop()
| gpl-3.0 | -3,770,789,276,532,839,400 | 24.587302 | 80 | 0.640819 | false |
stefsmeets/scatter | scatter/scatter.py | 1 | 18719 | #!/usr/bin/env python
# Scatter - A python tool to plot and output atomic scattering factors
# Copyright (C) 2018 Stef Smeets
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function
try:
range = xrange
except NameError:
pass
from random import random as r
import numpy as np
import matplotlib.pyplot as plt
from sys import argv
import argparse
from .it_table_4322 import it_table_4322
from .it_table_4323 import it_table_4323
from .peng1998 import peng1998
from .wk1995 import wk1995
from .dt1969 import dt1969
from .atomic_radii import atomic_radii
XRS = { # a1 b1 a2 b2 a3 b3 a4 b4 c
'Al': "FORMGN Al 6.420203.038701.900200.742601.5936031.54721.9646085.08861.11510",
'C': "FORMGN C 2.3100020.84391.0200010.20751.588600.568700.8650051.65120.21560",
'Ca': "FORMGN Ca 8.6266010.44217.387300.659901.5899085.74841.02110178.4371.37510",
'F': "FORMGN F 3.5392010.28252.641204.294401.517000.261501.0243026.14760.27760",
'Ge': "FORMGN Ge 16.08162.850906.374700.251603.7068011.44683.6830054.76252.13130",
'H': "FORMGN H 0.4899220.65930.262007.740390.1967749.55190.049882.201590.00131",
'N': "FORMGN N 12.21260.005703.132209.893302.0125028.99751.166300.58260-11.529",
'Na': "FORMGN Na 4.762603.285003.173608.842201.267400.313601.11280129.4240.67600",
'O': "FORMGN O 3.0448513.22772.286805.701101.546300.323900.8670032.90890.25080",
'P': "FORMGN P 6.434501.906704.1791027.15701.780000.526001.4908068.16451.11490",
'Si': "FORMGN Si 6.291502.438603.0353032.33371.989100.678501.5410081.69371.14070",
'Zn': "FORMGN Zn 14.07433.265507.031800.233305.1652010.31632.4100058.70971.30410",
'Zr': "FORMGN Zr 17.87651.2761810.948011.91605.417320.117623.6572187.66272.06929"
}
elements = ['H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F',
'Ne', 'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar',
'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe',
'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br',
'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru',
'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I',
'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd', 'Pm', 'Sm',
'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu',
'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg',
'Tl', 'Pb', 'Bi', 'Po', 'At', 'Rn', 'Fr', 'Ra', 'Ac',
'Th', 'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf']
ions = ['H1-', 'Li1+', 'Be2+', 'Cval', 'O1-', 'O2-', 'F1-', 'Na1+', 'Mg2+',
'Al3+', 'Sival', 'Si4+', 'Cl1-', 'K1+', 'Ca2+', 'Sc3+', 'Ti2+',
'Ti3+', 'Ti4+', 'V2+', 'V3+', 'V5+', 'Cr2+', 'Cr3+', 'Mn2+',
'Mn3+', 'Mn4+', 'Fe2+', 'Fe3+', 'Co2+', 'Co3+', 'Ni2+', 'Ni3+',
'Cu1+', 'Cu2+', 'Zn2+', 'Ga3+', 'Ge4+', 'Br1-', 'Rb1+', 'Sr2+',
'Y3+', 'Zr4+', 'Nb3+', 'Nb5+', 'Mo3+', 'Mo5+', 'Mo6+', 'Ru3+',
'Ru4+', 'Rh3+', 'Rh4+', 'Pd2+', 'Pd4+', 'Ag1+', 'Ag2+', 'Cd2+',
'In3+', 'Sn2+', 'Sn4+', 'Sb3+', 'Sb5+', 'I1-', 'Cs1+', 'Ba2+',
'La3+', 'Ce3+', 'Ce4+', 'Pr3+', 'Pr4+', 'Nd3+', 'Pm3+', 'Sm3+',
'Eu2+', 'Eu3+', 'Gd3+', 'Tb3+', 'Dy3+', 'Ho3+', 'Er3+', 'Tm3+',
'Yb2+', 'Yb3+', 'Lu3+', 'Hf4+', 'Ta5+', 'W6+', 'Os4+', 'Ir3+',
'Ir4+', 'Pt2+', 'Pt4+', 'Au1+', 'Au3+', 'Hg1+', 'Hg2+', 'Tl1+',
'Tl3+', 'Pb2+', 'Pb4+', 'Bi3+', 'Bi5+', 'Ra2+', 'Ac3+', 'Th4+',
'U3+', 'U4+', 'U6+', 'Np3+', 'Np4+', 'Np6+', 'Pu3+', 'Pu4+',
'Pu6+']
other = ['D', 'Es', 'Fm', 'Md', 'No', 'Lr', 'NULL']
# n = r()*len(it_table_4322.keys()) // 1
# key = it_table_4322.keys()[int(n)]
# print it_table_4322[key]
def calc_s(ld, r):
return 4 * np.pi * (1/ld) * r
def gaussian(a, b, s):
"""General Gaussian"""
return a * np.exp(-b * s**2)
def plot_sf_atoms(atoms, s, kind="xray"):
for atom in atoms:
data = tables[atom]
y = calc_sf(atom, data, s, kind)
plt.plot(s, y, label=atom)
plt.ylabel("f")
plt.xlabel(r"$\sin(\theta)/\lambda (1/\mathrm{\AA})$")
plt.legend()
plt.show()
def calc_sf(atom, data, s, kind="xray"):
if kind == 'xray':
return calc_sf_xray(atom, data, s)
elif kind == "electron":
return calc_sf_electron(atom, data, s)
else:
raise NameError
def calc_sf_electron(atom, data, s):
"""scattering factor function for electron/it432x table"""
total = None
for i in range(5):
a, b, dZ = data[2+i], data[7+i], data[1]
y = gaussian(a, b, s)
if total is None:
total = y
else:
total += y
return total
def calc_sf_xray(atom, data, s):
"""Scattering factor function for xray/wk1995 table"""
total = None
for i in range(5):
a, b, c = data[0+i], data[5+i], data[10] # wk95
y = gaussian(a, b, s)
if total is None:
total = y + c
else:
total += y
return total
def print_xy_atoms(atoms, s, kind="xray"):
ys = []
for atom in atoms:
data = tables[atom]
ys.append(calc_sf(atom, data, s, kind))
print_xy(atoms, s, ys)
def print_xy(atoms, s, ys):
print("\n ", end=' ')
for atom in atoms:
print("{:>10s}".format(atom), end=' ')
for i, val in enumerate(s):
print("\n{:6.2f}".format(val), end=' ')
for j in range(len(atoms)):
print("{:10.5f}".format(ys[j][i]), end=' ')
def check_consistency(atoms, s, plot=False, show=False, threshold=0):
for atom in atoms:
total1 = None
data1 = it_table_4322[atom]
for i in range(5):
a, b, dZ = data1[2+i], data1[7+i], data1[1]
y = gaussian(a, b, s)
if total1 == None:
total1 = y
else:
total1 += y
data2 = it_table_4323[atom]
total2 = None
for i in range(5):
a, b, dZ = data2[2+i], data2[7+i], data2[1]
y = gaussian(a, b, s)
if total2 == None:
total2 = y
else:
total2 += y
r = sum((total1-total2)**2)
print("%4s %7.3f" % (atom, r))
if r > threshold:
if show:
print_table([atom], data1)
print_table([atom], data2)
if plot:
plt.plot(s, total1)
plt.plot(s, total2)
plt.show()
def print_combine_tables(atoms):
for atom in atoms:
if atom in wk1995:
data = wk1995[atom]
print("{ \"%s\",\n" % atom, end=' ')
print(" { %f, %f, %f, %f, %f }, \n" % (data[0], data[1], data[2], data[3], data[4]), end=' ')
print(" { %f, %f, %f, %f, %f }, \n" % (data[5], data[6], data[7], data[8], data[9]), end=' ')
print(" %f, \n" % data[10], end=' ')
else:
print('atom not found ?', atom)
exit()
if atom in it_table_4322:
data = it_table_4322[atom]
print(" { %f, %f, %f, %f, %f }, \n" % (data[2], data[3], data[4], data[5], data[6]), end=' ')
print(" { %f, %f, %f, %f, %f }, \n" % (data[7], data[8], data[9], data[10], data[11]), end=' ')
else:
print(" { 0.0, 0.0, 0.0, 0.0, 0.0 }, \n", end=' ')
print(" { 0.0, 0.0, 0.0, 0.0, 0.0 }, \n", end=' ')
if atom in it_table_4323:
data = it_table_4323[atom]
print(" { %f, %f, %f, %f, %f }, \n" % (data[2], data[3], data[4], data[5], data[6]), end=' ')
print(" { %f, %f, %f, %f, %f } }, \n" % (data[7], data[8], data[9], data[10], data[11]), end=' ')
else:
print(" { 0.0, 0.0, 0.0, 0.0, 0.0 }, \n", end=' ')
print(" { 0.0, 0.0, 0.0, 0.0, 0.0 } }, \n", end=' ')
def xrs2table(d):
e = {}
a5 = 0.0
b5 = 0.0
for key, line in d.items():
label = line[0:11].strip()
a1, b1, a2, b2, a3, b3, a4, b4, c = [
float(line[13+7*i:13+7+7*i]) for i in range(9)]
e['xrs'+key] = [a1, a2, a3, a4, a5, b1, b2, b3, b4, b5, c]
return e
def combine_scfacts(atom1, atom2, ratio):
ratio = float(ratio)
data1 = tables[atom1]
data2 = tables[atom2]
sfact = []
for n in range(12):
item = data1[n]*ratio + data2[n]*(1-ratio)
sfact.append(item)
atoms = [atom1, atom2, 'custom']
return atoms, sfact
def print_table(atoms, kind, custom_data=None):
if kind == "xray":
print("""name
a1 a1 a3 a4 a5
b1 b2 b3 b4 b5
c""")
if kind == "electron":
print("""name
a1 a1 a3 a4 a5
b1 b2 b3 b4 b5""")
for atom in atoms:
if custom_data:
data = custom_data
else:
data = tables[atom]
if kind == "xray":
print("{ \"%s\",\n" % atom, end=' ')
print(" { %f, %f, %f, %f, %f }, \n" % (data[0], data[1], data[2], data[3], data[4]), end=' ')
print(" { %f, %f, %f, %f, %f }, \n" % (data[5], data[6], data[7], data[8], data[9]), end=' ')
print(" %f, \n" % data[10], end=' ')
if kind == "electron":
print("{ \"%s\",\n" % atom, end=' ')
print(" { %f, %f, %f, %f, %f }, \n" % (data[2], data[3], data[4], data[5], data[6]), end=' ')
print(" { %f, %f, %f, %f, %f } },\n" % (data[7], data[8], data[9], data[10], data[11]), end=' ')
def add_us():
atoms, tables['custom'] = combine_scfacts(*atoms)
check_consistency(atoms, s, True, True, 0.01)
def four_gaussians(x, h1, w1, h2, w2, h3, w3, h4, w4):
return (gaussian(h1, w1, x) +
gaussian(h2, w2, x) +
gaussian(h3, w3, x) +
gaussian(h4, w4, x))
def five_gaussians(x, h1, w1, h2, w2, h3, w3, h4, w4, h5, w5):
return (gaussian(h1, w1, x) +
gaussian(h2, w2, x) +
gaussian(h3, w3, x) +
gaussian(h4, w4, x) +
gaussian(h5, w5, x))
def peng_formula(f0, dZ, s):
"""
from http://scripts.iucr.org/cgi-bin/paper?S0108767398001901
Based on the Mott-Bethe formulation
"""
raise NotImplementedError("Not working as expected")
pi = np.pi
me = 9.1093818e-31 # kg
h = 6.62606809633e-34 # J s
c = 1.60217646e-19 # C
constant = (me * c**2) / (2 * h**2)
f1 = f0 + constant * (dZ/s**2)
return f1
def print_table_shelx(atoms, s, kind):
# from IPython import embed
# embed()
sfacs = []
from scipy import optimize
for atom in atoms:
if atom not in tables:
continue
data = tables[atom]
if kind == "electron":
values = data[2], data[7], data[3], data[8], data[4], data[9], data[5], data[10], data[6], data[11]
args = s, five_gaussians(s, *values)
def errfunc(p, x, y):
# print(p.shape, x.shape, y.shape)
ret = (four_gaussians(x, *p) - y)**2
# print(ret.shape, np.sum(ret))
return ret
x_init = [0.3352, 0.4001, 0.8630, 3.0532, 2.5725, 17.1148, 2.0490, 59.0519] # Si
result = optimize.least_squares(errfunc, x_init[:], args=args)
x_values = result.x
if result.success:
print("{name} minimized -> nfev: {nfev}, njev: {njev}, optimality: {optimality:.3g}, cost: {cost:.3g}".format(name=atom, **result))
else:
raise ValueError(result.message)
element = ''.join([i for i in atom if i.isalpha()])
covrad = atomic_radii[element][2]
wght = atomic_radii[element][1]
h1, w1, h2, w2, h3, w3, h4, w4 = x_values
sfac = "SFAC {atom:4s} {h1:7.4f} {w1:7.4f} {h2:7.4f} {w2:7.4f} {h3:7.4f} {w3:7.4f} {h4:7.4f} {w4:7.4f} =\n {no_data:7.4f} {no_data:7.4f} {no_data:7.4f} {no_data:7.4f} {covrad:7.4f} {wght:7.4f}".format(
atom=atom, h1=h1, w1=w1, h2=h2, w2=w2, h3=h3, w3=w3, h4=h4, w4=w4, no_data=0.0, covrad=covrad, wght=wght)
sfacs.append(sfac)
plt.plot(s, four_gaussians(s, *x_values), label="{} fitted (4 params)".format(atom))
plt.plot(s, five_gaussians(s, *values), label="{} tabulated (5 params)".format(atom))
else:
raise NotImplementedError
plt.title("Fitting results")
plt.ylabel("f")
plt.xlabel(r"$\sin(\theta)/\lambda (1/\mathrm{\AA})$")
plt.legend()
plt.show()
print()
for sfac in sfacs:
print(sfac)
def print_table_topas(atoms):
print("x-ray {")
for atom in atoms:
if atom not in tables:
continue
data = tables[atom]
if len(data) == 12:
print('%8s' % atom, end=' ')
print('%f %f %f %f %f' % (data[2], data[3], data[4], data[5], data[6]), end=' ')
print('%f' % 0.0, end=' ')
print('%f %f %f %f %f' % (data[7], data[8], data[9], data[10], data[11]))
else:
print('%8s' % atom, end=' ')
print('%f %f %f %f %f' % (data[0], data[1], data[2], data[3], data[4]), end=' ')
print('%f' % 0.0, end=' ')
print('%f %f %f %f %f' % (data[5], data[6], data[7], data[8], data[9]))
print('}')
def main():
description = """Notes:
- Passing 'all' as an argument adds all atoms.
"""
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("args",
type=str, metavar="atoms", nargs='*',
help="List of atoms")
parser.add_argument("-t", "--table",
action="store", type=str, dest="table",
help="Scattering factor table to use (xray,electron,wk1995,it4322,it4323,peng1998,dt1969). Defaults: xray")
parser.add_argument("-r", "--range", metavar="val",
action="store", type=float, nargs=3, dest="s_range",
help="sin(th)/lambda range. Requires 3 values: start finish step")
parser.add_argument("-w", "--raw",
action="store_true", dest="print_raw_data",
help="Outputs raw data used for plotting graphs")
parser.add_argument("-n", "--noplot",
action="store_false", dest="plot",
help="Skips plotting procedure")
parser.add_argument("-m", "--merged",
action="store_true", dest="merged",
help="Combines all scattering factor tables. Used for updating the tables in focus/atoms.h")
parser.add_argument("-c", "--xrs",
action="store_true", dest="xrs",
help="Plots scattering factors for XRS, including FORMGN lines for drcard.dat")
parser.add_argument("--topas",
action="store_true", dest="topas",
help="Print table compatible with topas (save as atmscat.cpp in topas4 root dir)")
parser.add_argument("--shelx",
action="store_true", dest="shelx",
help="Print SFAC cards for SHELX")
parser.set_defaults(table="xray",
plot=True,
print_raw_data=False,
merged=False,
combine=False,
s_range=[0, 2, 0.01],
xrs=False,
print_table=True,
topas=False)
options = parser.parse_args()
args = options.args
global tables
s = np.arange(*options.s_range)
atoms = []
if 'elements' in args:
args.remove('elements')
atoms += elements
if 'ions' in args:
args.remove('ions')
atoms += ions
if 'other' in args:
args.remove('other')
atoms += other
if 'all' in args:
args.remove('all')
atoms += elements + ions + other
if 'xrsall' in args:
args.remove('xrsall')
atoms += XRS.keys()
atoms += args
if options.table in ('xray', 'wk1995'):
kind = "xray"
tables = wk1995
elif options.table == 'electron':
kind = "electron"
tables = dict(list(peng1998.items()) + list(it_table_4322.items()))
elif options.table == 'it_table_4322':
kind = "electron"
tables = it_table_4322
elif options.table == 'it_table_4323':
kind = "electron"
tables = it_table_4323
elif options.table == 'peng1998':
kind = "electron"
tables = peng1998
else:
raise NameError('Unknown scattering factor table: {}'.format(options.table))
if options.xrs:
options.print_table = False
tables = dict(list(tables.items()) + list(xrs2table(XRS).items()))
print('Add these lines to drcard.dat and run datrdn\n')
for atom in atoms:
print(XRS.get(atom, 'Atom {} not in the table!'.format(atom)))
atoms += ['xrs'+atom for atom in atoms if atom in XRS.keys()]
if options.merged:
print_combine_tables(atoms)
elif options.topas:
print_table_topas(atoms)
elif options.shelx:
print_table_shelx(atoms, s, kind)
else:
if options.print_table:
print_table(atoms, kind)
if options.plot:
plot_sf_atoms(atoms, s, kind)
if options.print_raw_data:
print_xy_atoms(atoms, s, kind)
if __name__ == '__main__':
main()
| gpl-2.0 | -6,991,040,486,443,301,000 | 34.519924 | 222 | 0.490411 | false |
MinerKasch/dd-agent | utils/kubeutil.py | 1 | 7617 | # (C) Datadog, Inc. 2015-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from collections import defaultdict
import logging
import os
from urlparse import urljoin
# project
from util import check_yaml
from utils.checkfiles import get_conf_path
from utils.http import retrieve_json
from utils.singleton import Singleton
from utils.dockerutil import DockerUtil
import requests
log = logging.getLogger('collector')
KUBERNETES_CHECK_NAME = 'kubernetes'
def is_k8s():
return 'KUBERNETES_PORT' in os.environ
class KubeUtil:
__metaclass__ = Singleton
DEFAULT_METHOD = 'http'
METRICS_PATH = '/api/v1.3/subcontainers/'
PODS_LIST_PATH = '/pods/'
DEFAULT_CADVISOR_PORT = 4194
DEFAULT_KUBELET_PORT = 10255
DEFAULT_MASTER_PORT = 8080
DEFAULT_MASTER_NAME = 'kubernetes' # DNS name to reach the master from a pod.
CA_CRT_PATH = '/run/secrets/kubernetes.io/serviceaccount/ca.crt'
AUTH_TOKEN_PATH = '/run/secrets/kubernetes.io/serviceaccount/token'
POD_NAME_LABEL = "io.kubernetes.pod.name"
NAMESPACE_LABEL = "io.kubernetes.pod.namespace"
def __init__(self, instance=None):
self.docker_util = DockerUtil()
if instance is None:
try:
config_file_path = get_conf_path(KUBERNETES_CHECK_NAME)
check_config = check_yaml(config_file_path)
instance = check_config['instances'][0]
# kubernetes.yaml was not found
except IOError as ex:
log.error(ex.message)
instance = {}
except Exception:
log.error('Kubernetes configuration file is invalid. '
'Trying connecting to kubelet with default settings anyway...')
instance = {}
self.method = instance.get('method', KubeUtil.DEFAULT_METHOD)
self.host = instance.get("host") or self.docker_util.get_hostname()
self._node_ip = self._node_name = None # lazy evaluation
self.host_name = os.environ.get('HOSTNAME')
self.cadvisor_port = instance.get('port', KubeUtil.DEFAULT_CADVISOR_PORT)
self.kubelet_port = instance.get('kubelet_port', KubeUtil.DEFAULT_KUBELET_PORT)
self.kubelet_api_url = '%s://%s:%d' % (self.method, self.host, self.kubelet_port)
self.cadvisor_url = '%s://%s:%d' % (self.method, self.host, self.cadvisor_port)
self.kubernetes_api_url = 'https://%s/api/v1' % self.DEFAULT_MASTER_NAME
self.metrics_url = urljoin(self.cadvisor_url, KubeUtil.METRICS_PATH)
self.pods_list_url = urljoin(self.kubelet_api_url, KubeUtil.PODS_LIST_PATH)
self.kube_health_url = urljoin(self.kubelet_api_url, 'healthz')
# keep track of the latest k8s event we collected and posted
# default value is 0 but TTL for k8s events is one hour anyways
self.last_event_collection_ts = defaultdict(int)
def get_kube_labels(self, excluded_keys=None):
pods = self.retrieve_pods_list()
return self.extract_kube_labels(pods, excluded_keys=excluded_keys)
def extract_kube_labels(self, pods_list, excluded_keys=None):
"""
Extract labels from a list of pods coming from
the kubelet API.
"""
excluded_keys = excluded_keys or []
kube_labels = defaultdict(list)
pod_items = pods_list.get("items") or []
for pod in pod_items:
metadata = pod.get("metadata", {})
name = metadata.get("name")
namespace = metadata.get("namespace")
labels = metadata.get("labels")
if name and labels and namespace:
key = "%s/%s" % (namespace, name)
for k, v in labels.iteritems():
if k in excluded_keys:
continue
kube_labels[key].append(u"kube_%s:%s" % (k, v))
return kube_labels
def extract_meta(self, pods_list, field_name):
"""
Exctract fields like `uid` or `name` from the `metadata` section of a
list of pods coming from the kubelet API.
TODO: currently not in use, was added to support events filtering, consider to remove it.
"""
uids = []
pods = pods_list.get("items") or []
for p in pods:
value = p.get('metadata', {}).get(field_name)
if value is not None:
uids.append(value)
return uids
def retrieve_pods_list(self):
"""
Retrieve the list of pods for this cluster querying the kubelet API.
TODO: the list of pods could be cached with some policy to be decided.
"""
return retrieve_json(self.pods_list_url)
def retrieve_metrics(self):
"""
Retrieve metrics from Cadvisor.
"""
return retrieve_json(self.metrics_url)
def filter_pods_list(self, pods_list, host_ip):
"""
Filter out (in place) pods that are not running on the given host.
TODO: currently not in use, was added to support events filtering, consider to remove it.
"""
pod_items = pods_list.get('items') or []
log.debug('Found {} pods to filter'.format(len(pod_items)))
filtered_pods = []
for pod in pod_items:
status = pod.get('status', {})
if status.get('hostIP') == host_ip:
filtered_pods.append(pod)
log.debug('Pods after filtering: {}'.format(len(filtered_pods)))
pods_list['items'] = filtered_pods
return pods_list
def retrieve_json_auth(self, url, auth_token, timeout=10):
"""
Kubernetes API requires authentication using a token available in
every pod.
We try to verify ssl certificate if available.
"""
verify = self.CA_CRT_PATH if os.path.exists(self.CA_CRT_PATH) else False
log.debug('ssl validation: {}'.format(verify))
headers = {'Authorization': 'Bearer {}'.format(auth_token)}
r = requests.get(url, timeout=timeout, headers=headers, verify=verify)
r.raise_for_status()
return r.json()
def get_node_info(self):
"""
Return the IP address and the hostname of the node where the pod is running.
"""
if None in (self._node_ip, self._node_name):
self._fetch_host_data()
return self._node_ip, self._node_name
def _fetch_host_data(self):
"""
Retrieve host name and IP address from the payload returned by the listing
pods endpoints from kubelet or kubernetes API.
The host IP address is different from the default router for the pod.
"""
pod_items = self.retrieve_pods_list().get("items") or []
for pod in pod_items:
metadata = pod.get("metadata", {})
name = metadata.get("name")
if name == self.host_name:
status = pod.get('status', {})
spec = pod.get('spec', {})
# if not found, use an empty string - we use None as "not initialized"
self._node_ip = status.get('hostIP', '')
self._node_name = spec.get('nodeName', '')
break
@classmethod
def get_auth_token(cls):
"""
Return a string containing the authorization token for the pod.
"""
try:
with open(cls.AUTH_TOKEN_PATH) as f:
return f.read()
except IOError as e:
log.error('Unable to read token from {}: {}'.format(cls.AUTH_TOKEN_PATH, e))
return None
| bsd-3-clause | -6,266,671,579,742,541,000 | 35.444976 | 97 | 0.598267 | false |
hzlf/openbroadcast | website/apps/atracker/models.py | 1 | 5334 | """Models for the ``object_events`` app."""
from django.conf import settings
from django.contrib.auth.models import SiteProfileNotAvailable
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.signals import post_save
from django.template.defaultfilters import date
from django.utils.timesince import timesince
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
class EventType(models.Model):
"""
Masterdata table containing event types.
:title: Unique title of this event type. This will be used to decide which
notification message to display or which partial to render.
"""
title = models.SlugField(
max_length=255,
unique=True,
verbose_name=_('Title'),
help_text=_('Please use a slugified name, e.g. "student-news".'),
)
class Meta:
app_label = 'atracker'
verbose_name = _('Event Type')
verbose_name_plural = _('Event Types')
ordering = ('title',)
def __unicode__(self):
return self.title
class EventManager(models.Manager):
def by_obj(self, obj):
ctype = ContentType.objects.get_for_model(obj)
return self.get_query_set().filter(object_id=obj.pk, content_type=ctype)
class Event(models.Model):
"""
An event created by a user related to any object.
:user: FK to the user who created this event. Leave this empty if this
event was created by no user but automatically.
:created: Creation date of this event.
:event_type: Type of this event.
:email_sent: True, if user has received this event via email.
:read_by_user: True, if user has noticed this event.
:content_object: Generic foreign key to the object this event is attached
to. Leave this empty if it is a global event.
:event_content_object: Generic foreign key to the object that has been
created by this event. Leave this empty if the event did not create any
object.
"""
user = models.ForeignKey(
'auth.User',
verbose_name=_('User'),
related_name='atracker_events',
null=True, blank=True,
)
created = models.DateTimeField(
auto_now_add=True,
verbose_name=_('Creation date'),
)
event_type = models.ForeignKey(
EventType,
verbose_name=_('Type'),
related_name='events',
)
archived = models.BooleanField(default=False)
distributed = models.BooleanField(default=False)
# Generic FK to the object this event is attached to
content_type = models.ForeignKey(
ContentType,
related_name='event_content_objects',
null=True, blank=True
)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = generic.GenericForeignKey('content_type', 'object_id')
# Generic FK to the object that created this event
event_content_type = models.ForeignKey(
ContentType,
related_name='event_objects',
null=True, blank=True
)
event_object_id = models.PositiveIntegerField(null=True, blank=True)
event_content_object = generic.GenericForeignKey(
'event_content_type', 'event_object_id')
objects = EventManager()
class Meta:
app_label = 'atracker'
verbose_name = _('Event')
verbose_name_plural = _('Events')
ordering = ('-created',)
@staticmethod
def create_event(user, content_object, event_content_object=None,
event_type=''):
"""
Creates an event for the given user, object and type.
If the type doesn't exist, yet, it will be created, so make sure that
you don't have any typos in your type title.
:param user: The user who created this event.
:param content_object: The object this event is attached to.
:param event_content_object: The object that created this event.
:event_type: String representing the type of this event.
"""
event_type_obj, created = EventType.objects.get_or_create(
title=event_type)
obj = Event(user=user, content_object=content_object,
event_type=event_type_obj)
if event_content_object is not None:
obj.event_content_object = event_content_object
obj.save()
return obj
def __unicode__(self):
return '{0}'.format(self.content_object)
"""
somehow obsolete... use |timesince template-tag
"""
def get_timesince(self):
delta = (now() - self.created)
if delta.days <= 1:
return '{0} ago'.format(timesince(self.created, now()))
if self.created.year != now().year:
return date(self.created, 'd F Y')
return date(self.created, 'd F')
def actstream_link(sender, instance, created, **kwargs):
from actstream import action
print 'actstream_link:'
print instance
try:
action.send(instance.user, verb=instance.event_type.title, target=instance.content_object)
except Exception, e:
print e
print 'DONE!!'
post_save.connect(actstream_link, sender=Event) | gpl-3.0 | -7,528,221,619,261,303,000 | 30.755952 | 98 | 0.648669 | false |
annavonzansen/exams | exams/migrations/0009_auto__add_subject__del_field_test_level__chg_field_test_subject.py | 1 | 11478 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Subject', removes relation that shouldn't exist at all...
db.delete_table(u'exams_subject')
# Adding model 'Subject'
db.create_table(u'exams_subject', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('uuid', self.gf('django.db.models.fields.CharField')(max_length=36, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('short', self.gf('django.db.models.fields.CharField')(unique=True, max_length=3)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'exams', ['Subject'])
# Deleting field 'Test.level'
#db.delete_column(u'exams_test', 'level')
# Changing field 'Test.subject'
db.alter_column(u'exams_test', 'subject_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['exams.Subject']))
def backwards(self, orm):
# Deleting model 'Subject'
db.delete_table(u'exams_subject')
# Adding field 'Test.level'
db.add_column(u'exams_test', 'level',
self.gf('django.db.models.fields.CharField')(max_length=2, null=True, blank=True),
keep_default=False)
# Changing field 'Test.subject'
db.alter_column(u'exams_test', 'subject_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['education.Subject']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'exams.answer': {
'Meta': {'object_name': 'Answer'},
'assignment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Assignment']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Test']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.answeroption': {
'Meta': {'object_name': 'AnswerOption'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'max_length': '255'})
},
u'exams.assignment': {
'Meta': {'ordering': "('order',)", 'object_name': 'Assignment'},
'answer_options': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.AnswerOption']", 'null': 'True', 'blank': 'True'}),
'assignment_type': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'attached_files': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['exams.File']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'content_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Test']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.examination': {
'Meta': {'object_name': 'Examination'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'registration_begin': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'registration_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'registration_status': ('django.db.models.fields.CharField', [], {'default': "'D'", 'max_length': '1'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "['title']", 'overwrite': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.file': {
'Meta': {'object_name': 'File'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.subject': {
'Meta': {'object_name': 'Subject'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'short': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.test': {
'Meta': {'object_name': 'Test'},
'begin': ('django.db.models.fields.DateTimeField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'examination': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Examination']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Subject']"}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
}
}
complete_apps = ['exams'] | gpl-2.0 | -3,904,596,853,211,038,000 | 70.74375 | 211 | 0.557763 | false |
k1ltr0/jquery.ajax.uploader | sample/server.py | 1 | 1547 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import logging
import os
import base64
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("sample.html")
def post(self):
image_name = self.get_argument("name", "")
image_data = self.get_argument("data", "")
if image_data == "":
image_data = self.request.files['data'][0]['body']
# decode image data
data = image_data.split(",")
body = ""
# sometimes when body data is white
try:
body = data[1]
except Exception:
pass
try:
os.stat("uploads")
except Exception:
os.mkdir("uploads")
f = open(os.path.join("uploads", image_name), "wb")
f.write(base64.decodestring(body))
f.close()
# return image url
# self.write({ "thumb":"/static/sample/uploads/" + image_name})
self.write("/static/sample/uploads/" + image_name)
class CDNSample(tornado.web.RequestHandler):
"""sample connecting to Loadingplay's CDN"""
def get(self):
self.render("sample_cdn.html")
application = tornado.web.Application([
(r"/", MainHandler),
(r"/cdn", CDNSample)
],
template_path=os.path.dirname(__file__),
static_path=os.path.join('..'),
debug=True
)
if __name__ == "__main__":
application.listen(8888)
logging.info("Running server on port 8888")
tornado.ioloop.IOLoop.current().start()
| mit | 2,569,699,705,626,129,000 | 22.8 | 71 | 0.574014 | false |
sigmavirus24/github-cli | gh/commands/issue/assign.py | 1 | 1327 | from gh.base import Command
from gh.util import get_issue_number
from gh.util import mktmpfile, rmtmpfile
class IssueAssignCommand(Command):
name = 'issue.assign'
usage = '%prog [options] issue.assign [#]number assignee'
summary = 'Assign an issue'
subcommands = {}
def run(self, options, args):
self.get_repo(options)
opts, args = self.parser.parse_args(args)
if len(args) != 2:
print('issue.assign requires 2 arguments.')
self.help()
return self.FAILURE
if opts.help:
self.help()
number = get_issue_number(
args, self.parser, 'issue.reopen requires a valid number'
)
if number is None:
return self.FAILURE
return self.assign_issue(number, args[1])
def assign_issue(self, number, assignee):
self.login()
user, repo = self.repository
issue = self.gh.issue(user, repo, number)
if not issue:
print("Couldn't get an issule {0}#{1}".format(repo, number))
return self.FAILURE
if issue.assign(assignee):
print("Issue assigned successfully.")
return self.SUCCESS
else:
print("Assigning issue failed.")
return self.FAILURE
IssueAssignCommand()
| gpl-3.0 | -1,235,657,331,162,847,000 | 26.645833 | 72 | 0.589299 | false |
mikewesner-wf/glasshouse | glasshouse.indigoPlugin/Contents/Server Plugin/rest.py | 1 | 2064 | from flask import Flask, url_for, Response, json, jsonify, request
app = Flask(__name__)
import indigo
from decorators import requires_apitoken, requires_auth
import requests
import db
import settings
#
# Appspot Account Setup Process
@app.route('/token', methods=['PUT'])
def token():
new_api_token = request.headers.get('token')
# verify the token with the appengine app
verify_resp = requests.put(settings.CONTROL_APP + "/checktoken", headers={'token': new_api_token})
j_resp = verify_resp.json()
if j_resp['valid']:
db.set('token', new_api_token)
resp = jsonify({'success': True})
resp.status_code = 200
else:
resp = jsonify({'success': False})
resp.status_code = 200
return resp
@app.route('/dbtest', methods=['GET'])
def dbtest():
try:
indigo.server.log("db is " + str(db))
indigo.server.log(str(dir(db)))
indigo.server.log(str(type(db.GLOBALSETTINGS)))
indigo.server.log(str(db.get("mykey")))
db.put("mykey", "1")
indigo.server.log(str(db.get("mykey")))
db.put("mykey", "2")
indigo.server.log(str(db.get("mykey")))
except Exception, e:
return str(e) + "::: " + str(db)
return '1'
@app.route('/shutdown', methods=['POST'])
@requires_auth
def shutdown():
from flask import request
func = request.environ.get('werkzeug.server.shutdown')
func()
return 'Shutting Down.'
@app.route('/logs')
@requires_auth
def api_logs():
try:
resp = indigo.server.getEventLogList().replace("\n", "<br/>")
#resp = jsonify({"logs":indigo.server.getEventLogList()})
#resp.status_code = 200
except Exception, e:
indigo.server.log(str(e))
return None
return resp
@app.route('/devices', methods = ['GET'])
@requires_apitoken
def api_devices():
data = dict([(d.address, d.name) for d in indigo.devices])
resp = jsonify(data)
resp.status_code = 200
return resp
@app.route('/devices/<deviceid>')
def api_device_by_id(deviceid):
pass
| apache-2.0 | -8,721,313,835,489,108,000 | 24.8 | 102 | 0.62064 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.