repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ZacariasBendeck/youtube-dl | youtube_dl/extractor/hentaistigma.py | 109 | 1201 | from __future__ import unicode_literals
from .common import InfoExtractor
class HentaiStigmaIE(InfoExtractor):
_VALID_URL = r'^https?://hentai\.animestigma\.com/(?P<id>[^/]+)'
_TEST = {
'url': 'http://hentai.animestigma.com/inyouchuu-etsu-bonus/',
'md5': '4e3d07422a68a4cc363d8f57c8bf0d23',
'info_dict': {
'id': 'inyouchuu-etsu-bonus',
'ext': 'mp4',
"title": "Inyouchuu Etsu Bonus",
"age_limit": 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h2[^>]+class="posttitle"[^>]*><a[^>]*>([^<]+)</a>',
webpage, 'title')
wrap_url = self._html_search_regex(
r'<iframe[^>]+src="([^"]+mp4)"', webpage, 'wrapper url')
wrap_webpage = self._download_webpage(wrap_url, video_id)
video_url = self._html_search_regex(
r'file\s*:\s*"([^"]+)"', wrap_webpage, 'video url')
return {
'id': video_id,
'url': video_url,
'title': title,
'age_limit': 18,
}
| unlicense | 5,707,690,199,560,478,000 | 29.794872 | 69 | 0.509575 | false |
SauloAislan/ironic | ironic/tests/unit/drivers/test_utils.py | 3 | 16883 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import os
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import swift
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import agent_client
from ironic.drivers.modules import fake
from ironic.drivers import utils as driver_utils
from ironic.tests import base as tests_base
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
class UtilsTestCase(db_base.DbTestCase):
def setUp(self):
super(UtilsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager()
self.driver = driver_factory.get_driver("fake")
self.node = obj_utils.create_test_node(self.context)
def test_vendor_interface_get_properties(self):
expected = {'A1': 'A1 description. Required.',
'A2': 'A2 description. Optional.',
'B1': 'B1 description. Required.',
'B2': 'B2 description. Required.'}
props = self.driver.vendor.get_properties()
self.assertEqual(expected, props)
@mock.patch.object(fake.FakeVendorA, 'validate', autospec=True)
def test_vendor_interface_validate_valid_methods(self,
mock_fakea_validate):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.vendor.validate(task, method='first_method')
mock_fakea_validate.assert_called_once_with(
self.driver.vendor.mapping['first_method'],
task, method='first_method')
def test_vendor_interface_validate_bad_method(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.vendor.validate,
task, method='fake_method')
def test_get_node_mac_addresses(self):
ports = []
ports.append(
obj_utils.create_test_port(
self.context,
address='aa:bb:cc:dd:ee:ff',
uuid='bb43dc0b-03f2-4d2e-ae87-c02d7f33cc53',
node_id=self.node.id)
)
ports.append(
obj_utils.create_test_port(
self.context,
address='dd:ee:ff:aa:bb:cc',
uuid='4fc26c0b-03f2-4d2e-ae87-c02d7f33c234',
node_id=self.node.id)
)
with task_manager.acquire(self.context, self.node.uuid) as task:
node_macs = driver_utils.get_node_mac_addresses(task)
self.assertEqual(sorted([p.address for p in ports]), sorted(node_macs))
def test_get_node_capability(self):
properties = {'capabilities': 'cap1:value1, cap2: value2'}
self.node.properties = properties
expected = 'value1'
expected2 = 'value2'
result = driver_utils.get_node_capability(self.node, 'cap1')
result2 = driver_utils.get_node_capability(self.node, 'cap2')
self.assertEqual(expected, result)
self.assertEqual(expected2, result2)
def test_get_node_capability_returns_none(self):
properties = {'capabilities': 'cap1:value1,cap2:value2'}
self.node.properties = properties
result = driver_utils.get_node_capability(self.node, 'capX')
self.assertIsNone(result)
def test_add_node_capability(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['capabilities'] = ''
driver_utils.add_node_capability(task, 'boot_mode', 'bios')
self.assertEqual('boot_mode:bios',
task.node.properties['capabilities'])
def test_add_node_capability_append(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['capabilities'] = 'a:b,c:d'
driver_utils.add_node_capability(task, 'boot_mode', 'bios')
self.assertEqual('a:b,c:d,boot_mode:bios',
task.node.properties['capabilities'])
def test_add_node_capability_append_duplicate(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['capabilities'] = 'a:b,c:d'
driver_utils.add_node_capability(task, 'a', 'b')
self.assertEqual('a:b,c:d,a:b',
task.node.properties['capabilities'])
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
def test_ensure_next_boot_device(self, node_set_boot_device_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_internal_info['persistent_boot_device'] = 'pxe'
driver_utils.ensure_next_boot_device(
task,
{'force_boot_device': True}
)
node_set_boot_device_mock.assert_called_once_with(task, 'pxe')
def test_ensure_next_boot_device_clears_is_next_boot_persistent(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_internal_info['persistent_boot_device'] = 'pxe'
task.node.driver_internal_info['is_next_boot_persistent'] = False
driver_utils.ensure_next_boot_device(
task,
{'force_boot_device': True}
)
task.node.refresh()
self.assertNotIn('is_next_boot_persistent',
task.node.driver_internal_info)
def test_force_persistent_boot_true(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['ipmi_force_boot_device'] = True
ret = driver_utils.force_persistent_boot(task, 'pxe', True)
self.assertIsNone(ret)
task.node.refresh()
self.assertIn(('persistent_boot_device', 'pxe'),
task.node.driver_internal_info.items())
self.assertNotIn('is_next_boot_persistent',
task.node.driver_internal_info)
def test_force_persistent_boot_false(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ret = driver_utils.force_persistent_boot(task, 'pxe', False)
self.assertIsNone(ret)
task.node.refresh()
self.assertEqual(
False,
task.node.driver_internal_info['is_next_boot_persistent'])
def test_capabilities_to_dict(self):
capabilities_more_than_one_item = 'a:b,c:d'
capabilities_exactly_one_item = 'e:f'
# Testing empty capabilities
self.assertEqual(
{},
driver_utils.capabilities_to_dict('')
)
self.assertEqual(
{'e': 'f'},
driver_utils.capabilities_to_dict(capabilities_exactly_one_item)
)
self.assertEqual(
{'a': 'b', 'c': 'd'},
driver_utils.capabilities_to_dict(capabilities_more_than_one_item)
)
def test_capabilities_to_dict_with_only_key_or_value_fail(self):
capabilities_only_key_or_value = 'xpto'
exc = self.assertRaises(
exception.InvalidParameterValue,
driver_utils.capabilities_to_dict,
capabilities_only_key_or_value
)
self.assertEqual('Malformed capabilities value: xpto', str(exc))
def test_capabilities_to_dict_with_invalid_character_fail(self):
for test_capabilities in ('xpto:a,', ',xpto:a'):
exc = self.assertRaises(
exception.InvalidParameterValue,
driver_utils.capabilities_to_dict,
test_capabilities
)
self.assertEqual('Malformed capabilities value: ', str(exc))
def test_capabilities_to_dict_with_incorrect_format_fail(self):
for test_capabilities in (':xpto,', 'xpto:,', ':,'):
exc = self.assertRaises(
exception.InvalidParameterValue,
driver_utils.capabilities_to_dict,
test_capabilities
)
self.assertEqual('Malformed capabilities value: ', str(exc))
def test_capabilities_not_string(self):
capabilities_already_dict = {'a': 'b'}
capabilities_something_else = 42
exc = self.assertRaises(
exception.InvalidParameterValue,
driver_utils.capabilities_to_dict,
capabilities_already_dict
)
self.assertEqual("Value of 'capabilities' must be string. Got " +
str(dict), str(exc))
exc = self.assertRaises(
exception.InvalidParameterValue,
driver_utils.capabilities_to_dict,
capabilities_something_else
)
self.assertEqual("Value of 'capabilities' must be string. Got " +
str(int), str(exc))
def test_normalize_mac_string(self):
mac_raw = "0A:1B-2C-3D:4F"
mac_clean = driver_utils.normalize_mac(mac_raw)
self.assertEqual("0a1b2c3d4f", mac_clean)
def test_normalize_mac_unicode(self):
mac_raw = u"0A:1B-2C-3D:4F"
mac_clean = driver_utils.normalize_mac(mac_raw)
self.assertEqual("0a1b2c3d4f", mac_clean)
class UtilsRamdiskLogsTestCase(tests_base.TestCase):
def setUp(self):
super(UtilsRamdiskLogsTestCase, self).setUp()
self.node = obj_utils.get_test_node(self.context)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_ramdisk_logs_file_name(self, mock_utcnow):
mock_utcnow.return_value = datetime.datetime(2000, 1, 1, 0, 0)
name = driver_utils.get_ramdisk_logs_file_name(self.node)
expected_name = ('1be26c0b-03f2-4d2e-ae87-c02d7f33c123_'
'2000-01-01-00:00:00.tar.gz')
self.assertEqual(expected_name, name)
# with instance_info
instance_uuid = '7a5641ba-d264-424a-a9d7-e2a293ca482b'
node2 = obj_utils.get_test_node(
self.context, instance_uuid=instance_uuid)
name = driver_utils.get_ramdisk_logs_file_name(node2)
expected_name = ('1be26c0b-03f2-4d2e-ae87-c02d7f33c123_' +
instance_uuid + '_2000-01-01-00:00:00.tar.gz')
self.assertEqual(expected_name, name)
@mock.patch.object(driver_utils, 'store_ramdisk_logs', autospec=True)
@mock.patch.object(agent_client.AgentClient,
'collect_system_logs', autospec=True)
def test_collect_ramdisk_logs(self, mock_collect, mock_store):
logs = 'Gary the Snail'
mock_collect.return_value = {'command_result': {'system_logs': logs}}
driver_utils.collect_ramdisk_logs(self.node)
mock_store.assert_called_once_with(self.node, logs)
@mock.patch.object(driver_utils.LOG, 'error', autospec=True)
@mock.patch.object(driver_utils, 'store_ramdisk_logs', autospec=True)
@mock.patch.object(agent_client.AgentClient,
'collect_system_logs', autospec=True)
def test_collect_ramdisk_logs_IPA_command_fail(
self, mock_collect, mock_store, mock_log):
error_str = 'MR. KRABS! I WANNA GO TO BED!'
mock_collect.return_value = {'faultstring': error_str}
driver_utils.collect_ramdisk_logs(self.node)
# assert store was never invoked
self.assertFalse(mock_store.called)
mock_log.assert_called_once_with(
mock.ANY, {'node': self.node.uuid, 'error': error_str})
@mock.patch.object(driver_utils, 'store_ramdisk_logs', autospec=True)
@mock.patch.object(agent_client.AgentClient,
'collect_system_logs', autospec=True)
def test_collect_ramdisk_logs_storage_command_fail(
self, mock_collect, mock_store):
mock_collect.side_effect = exception.IronicException('boom')
self.assertIsNone(driver_utils.collect_ramdisk_logs(self.node))
self.assertFalse(mock_store.called)
@mock.patch.object(driver_utils, 'store_ramdisk_logs', autospec=True)
@mock.patch.object(agent_client.AgentClient,
'collect_system_logs', autospec=True)
def _collect_ramdisk_logs_storage_fail(
self, expected_exception, mock_collect, mock_store):
mock_store.side_effect = expected_exception
logs = 'Gary the Snail'
mock_collect.return_value = {'command_result': {'system_logs': logs}}
driver_utils.collect_ramdisk_logs(self.node)
mock_store.assert_called_once_with(self.node, logs)
@mock.patch.object(driver_utils.LOG, 'exception', autospec=True)
def test_collect_ramdisk_logs_storage_fail_fs(self, mock_log):
error = IOError('boom')
self._collect_ramdisk_logs_storage_fail(error)
mock_log.assert_called_once_with(
mock.ANY, {'node': self.node.uuid, 'error': error})
self.assertIn('file-system', mock_log.call_args[0][0])
@mock.patch.object(driver_utils.LOG, 'error', autospec=True)
def test_collect_ramdisk_logs_storage_fail_swift(self, mock_log):
error = exception.SwiftOperationError('boom')
self._collect_ramdisk_logs_storage_fail(error)
mock_log.assert_called_once_with(
mock.ANY, {'node': self.node.uuid, 'error': error})
self.assertIn('Swift', mock_log.call_args[0][0])
@mock.patch.object(driver_utils.LOG, 'exception', autospec=True)
def test_collect_ramdisk_logs_storage_fail_unkown(self, mock_log):
error = Exception('boom')
self._collect_ramdisk_logs_storage_fail(error)
mock_log.assert_called_once_with(
mock.ANY, {'node': self.node.uuid, 'error': error})
self.assertIn('Unknown error', mock_log.call_args[0][0])
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
@mock.patch.object(driver_utils,
'get_ramdisk_logs_file_name', autospec=True)
def test_store_ramdisk_logs_swift(self, mock_logs_name, mock_swift):
container_name = 'ironic_test_container'
file_name = 'ironic_test_file.tar.gz'
b64str = 'ZW5jb2RlZHN0cmluZw==\n'
cfg.CONF.set_override('deploy_logs_storage_backend', 'swift', 'agent')
cfg.CONF.set_override(
'deploy_logs_swift_container', container_name, 'agent')
cfg.CONF.set_override('deploy_logs_swift_days_to_expire', 1, 'agent')
mock_logs_name.return_value = file_name
driver_utils.store_ramdisk_logs(self.node, b64str)
mock_swift.return_value.create_object.assert_called_once_with(
container_name, file_name, mock.ANY,
object_headers={'X-Delete-After': '86400'})
mock_logs_name.assert_called_once_with(self.node)
@mock.patch.object(os, 'makedirs', autospec=True)
@mock.patch.object(driver_utils,
'get_ramdisk_logs_file_name', autospec=True)
def test_store_ramdisk_logs_local(self, mock_logs_name, mock_makedirs):
file_name = 'ironic_test_file.tar.gz'
b64str = 'ZW5jb2RlZHN0cmluZw==\n'
log_path = '/foo/bar'
cfg.CONF.set_override('deploy_logs_local_path', log_path, 'agent')
mock_logs_name.return_value = file_name
with mock.patch.object(driver_utils, 'open', new=mock.mock_open(),
create=True) as mock_open:
driver_utils.store_ramdisk_logs(self.node, b64str)
expected_path = os.path.join(log_path, file_name)
mock_open.assert_called_once_with(expected_path, 'wb')
mock_makedirs.assert_called_once_with(log_path)
mock_logs_name.assert_called_once_with(self.node)
| apache-2.0 | -8,670,711,354,964,153,000 | 43.196335 | 79 | 0.616952 | false |
Arcanemagus/plexpy | lib/dns/rdtypes/ANY/NSEC.py | 18 | 4690 | # Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
import dns.rdatatype
import dns.name
from dns._compat import xrange
class NSEC(dns.rdata.Rdata):
"""NSEC record
@ivar next: the next name
@type next: dns.name.Name object
@ivar windows: the windowed bitmap list
@type windows: list of (window number, string) tuples"""
__slots__ = ['next', 'windows']
def __init__(self, rdclass, rdtype, next, windows):
super(NSEC, self).__init__(rdclass, rdtype)
self.next = next
self.windows = windows
def to_text(self, origin=None, relativize=True, **kw):
next = self.next.choose_relativity(origin, relativize)
text = ''
for (window, bitmap) in self.windows:
bits = []
for i in xrange(0, len(bitmap)):
byte = bitmap[i]
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(dns.rdatatype.to_text(window * 256 +
i * 8 + j))
text += (' ' + ' '.join(bits))
return '%s%s' % (next, text)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
next = tok.get_name()
next = next.choose_relativity(origin, relativize)
rdtypes = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
nrdtype = dns.rdatatype.from_text(token.value)
if nrdtype == 0:
raise dns.exception.SyntaxError("NSEC with bit 0")
if nrdtype > 65535:
raise dns.exception.SyntaxError("NSEC with bit > 65535")
rdtypes.append(nrdtype)
rdtypes.sort()
window = 0
octets = 0
prior_rdtype = 0
bitmap = bytearray(b'\0' * 32)
windows = []
for nrdtype in rdtypes:
if nrdtype == prior_rdtype:
continue
prior_rdtype = nrdtype
new_window = nrdtype // 256
if new_window != window:
windows.append((window, bitmap[0:octets]))
bitmap = bytearray(b'\0' * 32)
window = new_window
offset = nrdtype % 256
byte = offset // 8
bit = offset % 8
octets = byte + 1
bitmap[byte] = bitmap[byte] | (0x80 >> bit)
windows.append((window, bitmap[0:octets]))
return cls(rdclass, rdtype, next, windows)
def to_wire(self, file, compress=None, origin=None):
self.next.to_wire(file, None, origin)
for (window, bitmap) in self.windows:
file.write(struct.pack('!BB', window, len(bitmap)))
file.write(bitmap)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
(next, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
windows = []
while rdlen > 0:
if rdlen < 3:
raise dns.exception.FormError("NSEC too short")
window = wire[current]
octets = wire[current + 1]
if octets == 0 or octets > 32:
raise dns.exception.FormError("bad NSEC octets")
current += 2
rdlen -= 2
if rdlen < octets:
raise dns.exception.FormError("bad NSEC bitmap length")
bitmap = bytearray(wire[current: current + octets].unwrap())
current += octets
rdlen -= octets
windows.append((window, bitmap))
if origin is not None:
next = next.relativize(origin)
return cls(rdclass, rdtype, next, windows)
def choose_relativity(self, origin=None, relativize=True):
self.next = self.next.choose_relativity(origin, relativize)
| gpl-3.0 | 4,216,438,806,497,371,000 | 36.222222 | 76 | 0.575906 | false |
PyGotham/pygotham | pygotham/frontend/profile.py | 2 | 2262 | """PyGotham user profiles."""
from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for
)
from flask_login import current_user
from flask_security import login_required
from pygotham.core import db
from pygotham.frontend import route
from pygotham.models import Talk, Volunteer
__all__ = ('blueprint',)
blueprint = Blueprint(
'profile',
__name__,
subdomain='<event_slug>',
url_prefix='/profile',
)
@route(blueprint, '/dashboard/')
@login_required
def dashboard():
"""Return the user's dashboard."""
# TODO: Optionally, old proposals should be shown in a read-only mode.
talks = Talk.query.current.filter(Talk.user == current_user)
return render_template(
'profile/dashboard.html', talks=talks)
@route(blueprint, '/settings/', methods=('GET', 'POST'))
@login_required
def settings():
"""Return the user's settings."""
# TODO: How should this be handled? Should a speaker's bio be stored
# as a snapshot from event to event? It could be stored as part of a
# talks.models.Presentation.
from pygotham.forms import ProfileForm
form = ProfileForm(request.form, obj=current_user)
if form.validate_on_submit():
form.populate_obj(current_user)
db.session.commit()
flash('Your profile has been updated.', 'success')
return redirect(url_for('profile.settings'))
return render_template('profile/settings.html', form=form)
@route(blueprint, '/unvolunteer/')
@login_required
def unvolunteer():
"""Remove a user from being a volunteer."""
if current_user.is_volunteer:
volunteer = Volunteer.query.current.filter(
Volunteer.user == current_user).first()
db.session.delete(volunteer)
db.session.commit()
flash("We're sorry to see you change your mind!")
return redirect(url_for('profile.dashboard'))
@route(blueprint, '/volunteer/')
@login_required
def volunteer():
"""Sign up a user as a volunteer."""
if not current_user.is_volunteer:
volunteer = Volunteer(user=current_user, event=g.current_event)
db.session.add(volunteer)
db.session.commit()
flash('Thanks for volunteering!')
return redirect(url_for('profile.dashboard'))
| bsd-3-clause | 3,880,330,524,057,142,300 | 28.763158 | 74 | 0.677719 | false |
wcalvert/LPC11U_LPC13U_CodeBase | src/drivers/sensors/testscripts/plot_xyz_plus_mag_sma.py | 2 | 3774 | #-------------------------------------------------------------------------------
# Name: plot_sensors_event.py
# Purpose: Plots logged sensors_event_t data from logger.c CSV files
#
# Author: K. Townsend
#
# Created: 09/06/2013
# Copyright: (c) K. Townsend 2013
# Licence: BSD
#-------------------------------------------------------------------------------
import math
import numpy as np
import matplotlib.pyplot as plt
import Tkinter, tkFileDialog
from collections import deque
# This program will plot X/Y/Z data logged via drivers/storage/logger.c, and
# assumes we are getting vector data in CSV format generated using the
# 'sensorsLogSensorsEvent' helper function in drivers/sensors/sensors.c
#
# Data should look similar to the this:
#
# 0,1,5714,6.001670,-6.629296,-4.785645,0.000000
# 0,1,5729,6.001670,-6.629296,-4.785645,0.000000
# 0,1,5734,5.883990,-6.590069,-4.746419,0.000000
#
# In addition to the raw X/Y/Z data, vector magnitude is also calculated in
# a fourth data column
class RingBuffer(deque):
def __init__(self, size_max):
deque.__init__(self)
self.size_max = size_max
def append(self, datum):
deque.append(self, datum)
if len(self) > self.size_max:
self.popleft( )
def tolist(self):
return list(self)
def main():
# Variables for our moving average filter
current = 0
avg = 0
total = 0
mavals = []
# Get window size (how many 'samples' are averaged together)
windowsize = int(input("Windows size (0..65535): "))
if (windowsize > 65535):
print ('Setting window size to 65535')
windowsize = 65535
if (windowsize < 1):
print ('Setting window size to 1')
windowsize = 1
# Request the data file to process
root = Tkinter.Tk()
root.withdraw()
filename = tkFileDialog.askopenfilename()
# Load the CSV file in 'data'
data = np.genfromtxt(filename,
delimiter=',',
dtype="i32,i32,i32,f32,f32,f32,f32",
names=['id','type','timestamp','x','y','z','a'])
# Create a circular buffer for our moving average filter
window = RingBuffer(size_max=windowsize)
# Calculate magnitude in column a
for x in np.nditer(data, op_flags=['readwrite']):
x['a'] = math.sqrt(
math.pow(x['x'], 2) +
math.pow(x['y'], 2) +
math.pow(x['z'], 2))
# Perform the moving average filter operations
current+=1
# Add magnitude into the ringbuffer
window.append(x['a'])
# Make sure we've reached 'windowlength' samples in the buffer
if (current <= windowsize):
mavals.append(0)
else:
# Get the current average based on the window content
li = window.tolist()
total = 0
for i in li:
total += i
avg = (float)(total/windowsize)
# Append ma output for plotting below
mavals.append(avg);
# Display the results
plt.title("SMA Filtered sensors_event_t Data (X/Y/Z + Magnitude)\nSMA Window Size = %d Samples"
% (windowsize))
plt.xlabel('Timestamp (ms)')
plt.ylabel('Value')
plt.xlim(data['timestamp'].min(), data['timestamp'].max()*1.1)
plt.grid(True)
plt.plot(data['timestamp'], data['x'], color='r', alpha = 0.25, label='x')
plt.plot(data['timestamp'], data['y'], color='g', alpha = 0.25, label='y')
plt.plot(data['timestamp'], data['z'], color='b', alpha = 0.25, label='z')
plt.plot(data['timestamp'], data['a'], color='m', alpha = 0.25, label='mag')
plt.plot(data['timestamp'], mavals, color="black", label="mag filtered")
plt.legend()
plt.show()
pass
if __name__ == '__main__':
main()
| bsd-3-clause | 1,037,113,969,853,388,500 | 32.39823 | 99 | 0.579491 | false |
Openergy/oplus | setup.py | 1 | 1042 | from setuptools import setup, find_packages
from pkg_resources import parse_requirements
import os
with open(os.path.join("oplus", "version.py")) as f:
version = f.read().split("=")[1].strip().strip("'").strip('"')
with open("requirements.txt", "r") as f:
requirements = [str(r) for r in parse_requirements(f.read())]
setup(
name='oplus',
version=version,
packages=find_packages(),
author="Openergy development team",
author_email="[email protected]",
long_description=open('README.md').read(),
install_requires=requirements,
url='https://github.com/openergy/oplus',
classifiers=[
"Programming Language :: Python",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Natural Language :: French",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Physics",
],
package_data={'oplus': ['*.txt']},
include_package_data=True
)
| mpl-2.0 | -5,161,642,850,197,494,000 | 31.5625 | 66 | 0.637236 | false |
lude-ma/python-ivi | ivi/agilent/agilentMSO6052A.py | 7 | 1687 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent6000 import *
class agilentMSO6052A(agilent6000):
"Agilent InfiniiVision MSO6052A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSO6052A')
super(agilentMSO6052A, self).__init__(*args, **kwargs)
self._analog_channel_count = 2
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 500e6
self._init_channels()
| mit | -2,294,360,695,260,446,700 | 37.340909 | 86 | 0.736811 | false |
Alluxio/alluxio | integration/vagrant/bin/spot_request.py | 6 | 7965 | #!/usr/bin/env python
#
# The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
# (the "License"). You may not use this work except in compliance with the License, which is
# available at www.apache.org/licenses/LICENSE-2.0
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied, as more fully set forth in the License.
#
# See the NOTICE file distributed with this work for information regarding copyright ownership.
#
# -*- coding: utf-8 -*-
"""
Submit or Cancel spot instance requests.
When submit, the process will block until the request is fulfilled
or the process is killed by user(like CTRL + C),
if the process is killed, the requests will be automatically canceled.
"""
import os
import time
import pickle
import argparse
import subprocess
import yaml
import boto.ec2.blockdevicemapping as bdm
from util import mkdir_p, info, warn, error
from init_aws import get_conn, get_ec2_conf
def get_bdm(ec2_conf):
def device(d):
dev = bdm.BlockDeviceType()
if d['VirtualName'].startswith('ephemeral'):
# Instance Storage
dev.ephemeral_name = d['VirtualName']
else:
# EBS
dev.size = d['Ebs.VolumeSize']
delete = d.get('Ebs.DeleteOnTermination', None)
if delete is not None:
dev.delete_on_termination = delete
return (d['DeviceName'], dev)
devices = map(device, ec2_conf['Block_Device_Mapping'])
device_mapping = bdm.BlockDeviceMapping()
for name, dev in devices:
device_mapping[name] = dev
return device_mapping
def get_init_conf():
return yaml.load(open('conf/init.yml'))
class RequestFailedError(Exception): pass
def all_fulfilled(requests):
fulfilled = True
for r in requests:
if r.status.code != 'fulfilled':
fulfilled = False
if r.state == 'failed':
raise RequestFailedError(r.status.message)
if not fulfilled:
break
return fulfilled
def wait_until_fulfilled(request_ids, conn):
while True:
requests = conn.get_all_spot_instance_requests(request_ids)
if not all_fulfilled(requests):
time.sleep(1)
else:
return requests
def add_tag(host):
return '{}-{}'.format(get_ec2_conf()['Tag'], host)
def get_host(tag):
return tag.split('-')[-1]
# request_id -> tag
def request_id_to_tag(requests, masters):
ret = {}
for i, rid in enumerate([r.id for r in requests]):
# TODO(cc): This naming convention for host may need changes
if i == 0:
host = 'AlluxioMaster'
elif i < masters:
host = 'AlluxioMaster{}'.format(i + 1)
else:
host = 'AlluxioWorker{}'.format(i - masters + 1)
ret[rid] = add_tag(host)
return ret
def save_request_ids(request_ids):
out = open('.request_ids', 'w')
pickle.dump(request_ids, out)
out.close()
def load_request_ids():
return pickle.load(open('.request_ids'))
def submit_request(conn, ec2_conf, masters):
# enable ssh as root without tty
user_data = "#!/bin/bash\n \
echo 'Defaults:root !requiretty' > /etc/sudoers.d/998-vagrant-cloud-init-requiretty && \
echo 'Defaults:ec2-user !requiretty' > /etc/sudoers.d/999-vagrant-cloud-init-requiretty && \
chmod 440 /etc/sudoers.d/998-vagrant-cloud-init-requiretty && chmod 440 /etc/sudoers.d/999-vagrant-cloud-init-requiretty"
requests = conn.request_spot_instances(
price = ec2_conf['Spot_Price'],
image_id = ec2_conf['AMI'],
count = get_init_conf()['MachineNumber'],
availability_zone_group = ec2_conf['Availability_Zone'],
placement = ec2_conf['Availability_Zone'], # where to put instance
key_name = ec2_conf['Keypair'],
security_groups = [ec2_conf['Security_Group']],
user_data = user_data,
instance_type = ec2_conf['Instance_Type'],
block_device_map = get_bdm(ec2_conf))
request_ids = [r.id for r in requests]
save_request_ids(request_ids)
# sleep before waiting for spot instances to be fulfilled.
time.sleep(5)
# block, waiting for all requests to be fulfilled
requests = wait_until_fulfilled(request_ids, conn)
# tag the requests and instances
rid_tag = request_id_to_tag(requests, masters)
for r in requests:
tag = rid_tag[r.id]
r.add_tag('Name', tag)
conn.create_tags([r.instance_id], {'Name': tag})
return rid_tag, requests
def cancel_request(conn):
warn('canceling spot instance requests and terminating instances...')
requests = conn.get_all_spot_instance_requests(load_request_ids())
for r in requests:
r.cancel()
instance_ids = [r.instance_id for r in requests if r.instance_id is not None]
if len(instance_ids) > 0:
conn.terminate_instances(instance_ids)
# mock the inventory file and machine id files that should have
# been generated by vagrant, so that we can keep the vagrant work flow.
def mock_vagrant_info(instance_id_to_tag_ip):
inventory_dir = '.vagrant/provisioners/ansible/inventory'
mkdir_p(inventory_dir)
inventory = open(os.path.join(inventory_dir, 'vagrant_ansible_inventory'), 'w')
for instance_id, tag_ip in instance_id_to_tag_ip.iteritems():
tag, ip = tag_ip
host = get_host(tag)
inventory.write("{} ansible_ssh_host={} ansible_ssh_port=22\n".format(host, ip))
id_dir = os.path.join('.vagrant', 'machines', host, 'aws')
mkdir_p(id_dir)
with open(os.path.join(id_dir, 'id'), 'w') as f:
f.write(instance_id)
inventory.close()
def is_ssh_ready(host):
s = subprocess.Popen(['ssh',
'-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'ConnectTimeout=30',
'-i', os.path.expanduser(get_ec2_conf()['Key_Path']),
'%s@%s' % ('ec2-user', host),
'true'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
s.communicate()
return s.returncode == 0
def wait_for_ssh(hosts):
while len(hosts):
hosts = [h for h in hosts if not is_ssh_ready(h)]
def parse():
parser = argparse.ArgumentParser()
grp = parser.add_mutually_exclusive_group(required=True)
grp.add_argument('-s', '--submit', action='store_true')
grp.add_argument('-c', '--cancel', action='store_true')
parser.add_argument('--masters', type=int, default=1, help='number of Alluxio masters')
return parser.parse_args()
def main(args):
ec2_conf = get_ec2_conf()
conn = get_conn()
if args.submit:
info('waiting for spot instance requests to be fulfilled, you can cancel by ctrl+c ...')
try:
rid_tag, requests = submit_request(conn, ec2_conf, args.masters)
except (KeyboardInterrupt, RequestFailedError) as e:
error(e)
exit(1)
info('spot instance requests fulfilled')
instance_id_to_tag_ip = {}
info('getting instance IPs...')
for r in requests:
instance_id = r.instance_id
info('waiting for ip to be allocated to the machine')
ip = conn.get_only_instances([instance_id])[0].ip_address
while ip is None:
time.sleep(1)
ip = conn.get_only_instances([instance_id])[0].ip_address
instance_id_to_tag_ip[instance_id] = (rid_tag[r.id], ip)
info('mocking vagrant info under .vagrant...')
mock_vagrant_info(instance_id_to_tag_ip)
info('creation of spot instances done')
info('waiting for ssh to be available...')
wait_for_ssh([ip for tag, ip in instance_id_to_tag_ip.values()])
info('ssh for all instances are ready')
elif args.cancel:
cancel_request(conn)
if __name__ == '__main__':
main(parse())
| apache-2.0 | -5,987,553,202,681,328,000 | 31.913223 | 129 | 0.629253 | false |
brandon-rhodes/numpy | numpy/polynomial/tests/test_printing.py | 208 | 2004 | from __future__ import division, absolute_import, print_function
import numpy.polynomial as poly
from numpy.testing import TestCase, run_module_suite, assert_
class test_str(TestCase):
def test_polynomial_str(self):
res = str(poly.Polynomial([0, 1]))
tgt = 'poly([0., 1.])'
assert_(res, tgt)
def test_chebyshev_str(self):
res = str(poly.Chebyshev([0, 1]))
tgt = 'leg([0., 1.])'
assert_(res, tgt)
def test_legendre_str(self):
res = str(poly.Legendre([0, 1]))
tgt = 'leg([0., 1.])'
assert_(res, tgt)
def test_hermite_str(self):
res = str(poly.Hermite([0, 1]))
tgt = 'herm([0., 1.])'
assert_(res, tgt)
def test_hermiteE_str(self):
res = str(poly.HermiteE([0, 1]))
tgt = 'herme([0., 1.])'
assert_(res, tgt)
def test_laguerre_str(self):
res = str(poly.Laguerre([0, 1]))
tgt = 'lag([0., 1.])'
assert_(res, tgt)
class test_repr(TestCase):
def test_polynomial_str(self):
res = repr(poly.Polynomial([0, 1]))
tgt = 'Polynomial([0., 1.])'
assert_(res, tgt)
def test_chebyshev_str(self):
res = repr(poly.Chebyshev([0, 1]))
tgt = 'Chebyshev([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_legendre_repr(self):
res = repr(poly.Legendre([0, 1]))
tgt = 'Legendre([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_hermite_repr(self):
res = repr(poly.Hermite([0, 1]))
tgt = 'Hermite([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_hermiteE_repr(self):
res = repr(poly.HermiteE([0, 1]))
tgt = 'HermiteE([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_laguerre_repr(self):
res = repr(poly.Laguerre([0, 1]))
tgt = 'Laguerre([0., 1.], [0., 1.], [0., 1.])'
assert_(res, tgt)
#
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | 8,789,086,979,464,637,000 | 26.081081 | 64 | 0.504491 | false |
CauldronDevelopmentLLC/buildbot | buildbot/test/test_svnpoller.py | 2 | 16276 | # -*- test-case-name: buildbot.test.test_svnpoller -*-
import time
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.changes.svnpoller import SVNPoller
# this is the output of "svn info --xml
# svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
prefix_output = """\
<?xml version="1.0"?>
<info>
<entry
kind="dir"
path="trunk"
revision="18354">
<url>svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk</url>
<repository>
<root>svn+ssh://svn.twistedmatrix.com/svn/Twisted</root>
<uuid>bbbe8e31-12d6-0310-92fd-ac37d47ddeeb</uuid>
</repository>
<commit
revision="18352">
<author>jml</author>
<date>2006-10-01T02:37:34.063255Z</date>
</commit>
</entry>
</info>
"""
# and this is "svn info --xml svn://svn.twistedmatrix.com/svn/Twisted". I
# think this is kind of a degenerate case.. it might even be a form of error.
prefix_output_2 = """\
<?xml version="1.0"?>
<info>
</info>
"""
# this is the svn info output for a local repository, svn info --xml
# file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository
prefix_output_3 = """\
<?xml version="1.0"?>
<info>
<entry
kind="dir"
path="SVN-Repository"
revision="3">
<url>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</url>
<repository>
<root>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</root>
<uuid>c0f47ff4-ba1e-0410-96b5-d44cc5c79e7f</uuid>
</repository>
<commit
revision="3">
<author>warner</author>
<date>2006-10-01T07:37:04.182499Z</date>
</commit>
</entry>
</info>
"""
# % svn info --xml file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk
prefix_output_4 = """\
<?xml version="1.0"?>
<info>
<entry
kind="dir"
path="trunk"
revision="3">
<url>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk</url>
<repository>
<root>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</root>
<uuid>c0f47ff4-ba1e-0410-96b5-d44cc5c79e7f</uuid>
</repository>
<commit
revision="1">
<author>warner</author>
<date>2006-10-01T07:37:02.286440Z</date>
</commit>
</entry>
</info>
"""
class ComputePrefix(unittest.TestCase):
def test1(self):
base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
s = SVNPoller(base + "/")
self.failUnlessEqual(s.svnurl, base) # certify slash-stripping
prefix = s.determine_prefix(prefix_output)
self.failUnlessEqual(prefix, "trunk")
self.failUnlessEqual(s._prefix, prefix)
def test2(self):
base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted"
s = SVNPoller(base)
self.failUnlessEqual(s.svnurl, base)
prefix = s.determine_prefix(prefix_output_2)
self.failUnlessEqual(prefix, "")
def test3(self):
base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository"
s = SVNPoller(base)
self.failUnlessEqual(s.svnurl, base)
prefix = s.determine_prefix(prefix_output_3)
self.failUnlessEqual(prefix, "")
def test4(self):
base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk"
s = SVNPoller(base)
self.failUnlessEqual(s.svnurl, base)
prefix = s.determine_prefix(prefix_output_4)
self.failUnlessEqual(prefix, "sample/trunk")
# output from svn log on .../SVN-Repository/sample
# (so it includes trunk and branches)
sample_base = "file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository/sample"
sample_logentries = [None] * 6
sample_logentries[5] = """\
<logentry
revision="6">
<author>warner</author>
<date>2006-10-01T19:35:16.165664Z</date>
<paths>
<path
action="D">/sample/branch/version.c</path>
</paths>
<msg>revised_to_2</msg>
</logentry>
"""
sample_logentries[4] = """\
<logentry
revision="5">
<author>warner</author>
<date>2006-10-01T19:35:16.165664Z</date>
<paths>
<path
action="D">/sample/branch</path>
</paths>
<msg>revised_to_2</msg>
</logentry>
"""
sample_logentries[3] = """\
<logentry
revision="4">
<author>warner</author>
<date>2006-10-01T19:35:16.165664Z</date>
<paths>
<path
action="M">/sample/trunk/version.c</path>
</paths>
<msg>revised_to_2</msg>
</logentry>
"""
sample_logentries[2] = """\
<logentry
revision="3">
<author>warner</author>
<date>2006-10-01T19:35:10.215692Z</date>
<paths>
<path
action="M">/sample/branch/main.c</path>
</paths>
<msg>commit_on_branch</msg>
</logentry>
"""
sample_logentries[1] = """\
<logentry
revision="2">
<author>warner</author>
<date>2006-10-01T19:35:09.154973Z</date>
<paths>
<path
copyfrom-path="/sample/trunk"
copyfrom-rev="1"
action="A">/sample/branch</path>
</paths>
<msg>make_branch</msg>
</logentry>
"""
sample_logentries[0] = """\
<logentry
revision="1">
<author>warner</author>
<date>2006-10-01T19:35:08.642045Z</date>
<paths>
<path
action="A">/sample</path>
<path
action="A">/sample/trunk</path>
<path
action="A">/sample/trunk/subdir/subdir.c</path>
<path
action="A">/sample/trunk/main.c</path>
<path
action="A">/sample/trunk/version.c</path>
<path
action="A">/sample/trunk/subdir</path>
</paths>
<msg>sample_project_files</msg>
</logentry>
"""
sample_info_output = """\
<?xml version="1.0"?>
<info>
<entry
kind="dir"
path="sample"
revision="4">
<url>file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository/sample</url>
<repository>
<root>file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository</root>
<uuid>4f94adfc-c41e-0410-92d5-fbf86b7c7689</uuid>
</repository>
<commit
revision="4">
<author>warner</author>
<date>2006-10-01T19:35:16.165664Z</date>
</commit>
</entry>
</info>
"""
changes_output_template = """\
<?xml version="1.0"?>
<log>
%s</log>
"""
def make_changes_output(maxrevision):
# return what 'svn log' would have just after the given revision was
# committed
logs = sample_logentries[0:maxrevision]
assert len(logs) == maxrevision
logs.reverse()
output = changes_output_template % ("".join(logs))
return output
def split_file(path):
pieces = path.split("/")
if pieces[0] == "branch":
return "branch", "/".join(pieces[1:])
if pieces[0] == "trunk":
return None, "/".join(pieces[1:])
raise RuntimeError("there shouldn't be any files like %s" % path)
class MySVNPoller(SVNPoller):
def __init__(self, *args, **kwargs):
SVNPoller.__init__(self, *args, **kwargs)
self.pending_commands = []
self.finished_changes = []
def getProcessOutput(self, args):
d = defer.Deferred()
self.pending_commands.append((args, d))
return d
def submit_changes(self, changes):
self.finished_changes.extend(changes)
class ComputeChanges(unittest.TestCase):
def test1(self):
base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
s = SVNPoller(base)
s._prefix = "sample"
output = make_changes_output(4)
doc = s.parse_logs(output)
newlast, logentries = s._filter_new_logentries(doc, 4)
self.failUnlessEqual(newlast, 4)
self.failUnlessEqual(len(logentries), 0)
newlast, logentries = s._filter_new_logentries(doc, 3)
self.failUnlessEqual(newlast, 4)
self.failUnlessEqual(len(logentries), 1)
newlast, logentries = s._filter_new_logentries(doc, 1)
self.failUnlessEqual(newlast, 4)
self.failUnlessEqual(len(logentries), 3)
newlast, logentries = s._filter_new_logentries(doc, None)
self.failUnlessEqual(newlast, 4)
self.failUnlessEqual(len(logentries), 0)
def testChanges(self):
base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
s = SVNPoller(base, split_file=split_file)
s._prefix = "sample"
doc = s.parse_logs(make_changes_output(3))
newlast, logentries = s._filter_new_logentries(doc, 1)
# so we see revisions 2 and 3 as being new
self.failUnlessEqual(newlast, 3)
changes = s.create_changes(logentries)
self.failUnlessEqual(len(changes), 2)
self.failUnlessEqual(changes[0].branch, "branch")
self.failUnlessEqual(changes[0].revision, '2')
self.failUnlessEqual(changes[1].branch, "branch")
self.failUnlessEqual(changes[1].files, ["main.c"])
self.failUnlessEqual(changes[1].revision, '3')
# and now pull in r4
doc = s.parse_logs(make_changes_output(4))
newlast, logentries = s._filter_new_logentries(doc, newlast)
self.failUnlessEqual(newlast, 4)
# so we see revision 4 as being new
changes = s.create_changes(logentries)
self.failUnlessEqual(len(changes), 1)
self.failUnlessEqual(changes[0].branch, None)
self.failUnlessEqual(changes[0].revision, '4')
self.failUnlessEqual(changes[0].files, ["version.c"])
# and now pull in r5 (should *not* create a change as it's a
# branch deletion
doc = s.parse_logs(make_changes_output(5))
newlast, logentries = s._filter_new_logentries(doc, newlast)
self.failUnlessEqual(newlast, 5)
# so we see revision 5 as being new
changes = s.create_changes(logentries)
self.failUnlessEqual(len(changes), 0)
# and now pull in r6 (should create a change as it's not
# deleting an entire branch
doc = s.parse_logs(make_changes_output(6))
newlast, logentries = s._filter_new_logentries(doc, newlast)
self.failUnlessEqual(newlast, 6)
# so we see revision 6 as being new
changes = s.create_changes(logentries)
self.failUnlessEqual(len(changes), 1)
self.failUnlessEqual(changes[0].branch, 'branch')
self.failUnlessEqual(changes[0].revision, '6')
self.failUnlessEqual(changes[0].files, ["version.c"])
def testFirstTime(self):
base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
s = SVNPoller(base, split_file=split_file)
s._prefix = "sample"
doc = s.parse_logs(make_changes_output(4))
logentries = s.get_new_logentries(doc)
# SVNPoller ignores all changes that happened before it was started
self.failUnlessEqual(len(logentries), 0)
self.failUnlessEqual(s.last_change, 4)
class Misc(unittest.TestCase):
def testAlreadyWorking(self):
base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
s = MySVNPoller(base)
d = s.checksvn()
# the SVNPoller is now waiting for its getProcessOutput to finish
self.failUnlessEqual(s.overrun_counter, 0)
d2 = s.checksvn()
self.failUnlessEqual(s.overrun_counter, 1)
self.failUnlessEqual(len(s.pending_commands), 1)
def testGetRoot(self):
base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
s = MySVNPoller(base)
d = s.checksvn()
# the SVNPoller is now waiting for its getProcessOutput to finish
self.failUnlessEqual(len(s.pending_commands), 1)
self.failUnlessEqual(s.pending_commands[0][0],
["info", "--xml", "--non-interactive", base])
def makeTime(timestring):
datefmt = '%Y/%m/%d %H:%M:%S'
when = time.mktime(time.strptime(timestring, datefmt))
return when
class Everything(unittest.TestCase):
def test1(self):
s = MySVNPoller(sample_base, split_file=split_file)
d = s.checksvn()
# the SVNPoller is now waiting for its getProcessOutput to finish
self.failUnlessEqual(len(s.pending_commands), 1)
self.failUnlessEqual(s.pending_commands[0][0],
["info", "--xml", "--non-interactive",
sample_base])
d = s.pending_commands[0][1]
s.pending_commands.pop(0)
d.callback(sample_info_output)
# now it should be waiting for the 'svn log' command
self.failUnlessEqual(len(s.pending_commands), 1)
self.failUnlessEqual(s.pending_commands[0][0],
["log", "--xml", "--verbose", "--non-interactive",
"--limit=100", sample_base])
d = s.pending_commands[0][1]
s.pending_commands.pop(0)
d.callback(make_changes_output(1))
# the command ignores the first batch of changes
self.failUnlessEqual(len(s.finished_changes), 0)
self.failUnlessEqual(s.last_change, 1)
# now fire it again, nothing changing
d = s.checksvn()
self.failUnlessEqual(s.pending_commands[0][0],
["log", "--xml", "--verbose", "--non-interactive",
"--limit=100", sample_base])
d = s.pending_commands[0][1]
s.pending_commands.pop(0)
d.callback(make_changes_output(1))
# nothing has changed
self.failUnlessEqual(len(s.finished_changes), 0)
self.failUnlessEqual(s.last_change, 1)
# and again, with r2 this time
d = s.checksvn()
self.failUnlessEqual(s.pending_commands[0][0],
["log", "--xml", "--verbose", "--non-interactive",
"--limit=100", sample_base])
d = s.pending_commands[0][1]
s.pending_commands.pop(0)
d.callback(make_changes_output(2))
# r2 should appear
self.failUnlessEqual(len(s.finished_changes), 1)
self.failUnlessEqual(s.last_change, 2)
c = s.finished_changes[0]
self.failUnlessEqual(c.branch, "branch")
self.failUnlessEqual(c.revision, '2')
self.failUnlessEqual(c.files, [''])
# TODO: this is what creating the branch looks like: a Change with a
# zero-length file. We should decide if we want filenames like this
# in the Change (and make sure nobody else gets confused by it) or if
# we want to strip them out.
self.failUnlessEqual(c.comments, "make_branch")
# and again at r2, so nothing should change
d = s.checksvn()
self.failUnlessEqual(s.pending_commands[0][0],
["log", "--xml", "--verbose", "--non-interactive",
"--limit=100", sample_base])
d = s.pending_commands[0][1]
s.pending_commands.pop(0)
d.callback(make_changes_output(2))
# nothing has changed
self.failUnlessEqual(len(s.finished_changes), 1)
self.failUnlessEqual(s.last_change, 2)
# and again with both r3 and r4 appearing together
d = s.checksvn()
self.failUnlessEqual(s.pending_commands[0][0],
["log", "--xml", "--verbose", "--non-interactive",
"--limit=100", sample_base])
d = s.pending_commands[0][1]
s.pending_commands.pop(0)
d.callback(make_changes_output(4))
self.failUnlessEqual(len(s.finished_changes), 3)
self.failUnlessEqual(s.last_change, 4)
c3 = s.finished_changes[1]
self.failUnlessEqual(c3.branch, "branch")
self.failUnlessEqual(c3.revision, '3')
self.failUnlessEqual(c3.files, ["main.c"])
self.failUnlessEqual(c3.comments, "commit_on_branch")
c4 = s.finished_changes[2]
self.failUnlessEqual(c4.branch, None)
self.failUnlessEqual(c4.revision, '4')
self.failUnlessEqual(c4.files, ["version.c"])
self.failUnlessEqual(c4.comments, "revised_to_2")
self.failUnless(abs(c4.when - time.time()) < 60)
# TODO:
# get coverage of split_file returning None
# point at a live SVN server for a little while
| gpl-2.0 | 979,710,681,418,245,200 | 33.193277 | 139 | 0.642173 | false |
greg-hellings/ansible-modules-extras | monitoring/sensu_check.py | 42 | 11565 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Anders Ingemann <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: sensu_check
short_description: Manage Sensu checks
version_added: 2.0
description:
- Manage the checks that should be run on a machine by I(Sensu).
- Most options do not have a default and will not be added to the check definition unless specified.
- All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module,
- they are simply specified for your convenience.
options:
name:
description:
- The name of the check
- This is the key that is used to determine whether a check exists
required: true
state:
description: Whether the check should be present or not
choices: [ 'present', 'absent' ]
required: false
default: present
path:
description:
- Path to the json file of the check to be added/removed.
- Will be created if it does not exist (unless I(state=absent)).
- The parent folders need to exist when I(state=present), otherwise an error will be thrown
required: false
default: /etc/sensu/conf.d/checks.json
backup:
description:
- Create a backup file (if yes), including the timestamp information so
- you can get the original file back if you somehow clobbered it incorrectly.
choices: [ 'yes', 'no' ]
required: false
default: no
command:
description:
- Path to the sensu check to run (not required when I(state=absent))
required: true
handlers:
description:
- List of handlers to notify when the check fails
required: false
default: []
subscribers:
description:
- List of subscribers/channels this check should run for
- See sensu_subscribers to subscribe a machine to a channel
required: false
default: []
interval:
description:
- Check interval in seconds
required: false
default: null
timeout:
description:
- Timeout for the check
required: false
default: 10
handle:
description:
- Whether the check should be handled or not
choices: [ 'yes', 'no' ]
required: false
default: yes
subdue_begin:
description:
- When to disable handling of check failures
required: false
default: null
subdue_end:
description:
- When to enable handling of check failures
required: false
default: null
dependencies:
description:
- Other checks this check depends on, if dependencies fail,
- handling of this check will be disabled
required: false
default: []
metric:
description: Whether the check is a metric
choices: [ 'yes', 'no' ]
required: false
default: no
standalone:
description:
- Whether the check should be scheduled by the sensu client or server
- This option obviates the need for specifying the I(subscribers) option
choices: [ 'yes', 'no' ]
required: false
default: no
publish:
description:
- Whether the check should be scheduled at all.
- You can still issue it via the sensu api
choices: [ 'yes', 'no' ]
required: false
default: yes
occurrences:
description:
- Number of event occurrences before the handler should take action
required: false
default: 1
refresh:
description:
- Number of seconds handlers should wait before taking second action
required: false
default: null
aggregate:
description:
- Classifies the check as an aggregate check,
- making it available via the aggregate API
choices: [ 'yes', 'no' ]
required: false
default: no
low_flap_threshold:
description:
- The low threshhold for flap detection
required: false
default: null
high_flap_threshold:
description:
- The low threshhold for flap detection
required: false
default: null
requirements: [ ]
author: Anders Ingemann
'''
EXAMPLES = '''
# Fetch metrics about the CPU load every 60 seconds,
# the sensu server has a handler called 'relay' which forwards stats to graphite
- name: get cpu metrics
sensu_check: name=cpu_load
command=/etc/sensu/plugins/system/cpu-mpstat-metrics.rb
metric=yes handlers=relay subscribers=common interval=60
# Check whether nginx is running
- name: check nginx process
sensu_check: name=nginx_running
command='/etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid'
handlers=default subscribers=nginx interval=60
# Stop monitoring the disk capacity.
# Note that the check will still show up in the sensu dashboard,
# to remove it completely you need to issue a DELETE request to the sensu api.
- name: check disk
sensu_check: name=check_disk_capacity
'''
def sensu_check(module, path, name, state='present', backup=False):
changed = False
reasons = []
try:
import json
except ImportError:
import simplejson as json
try:
try:
stream = open(path, 'r')
config = json.load(stream.read())
except IOError, e:
if e.errno is 2: # File not found, non-fatal
if state == 'absent':
reasons.append('file did not exist and state is `absent\'')
return changed, reasons
config = {}
else:
module.fail_json(msg=str(e))
except ValueError:
msg = '{path} contains invalid JSON'.format(path=path)
module.fail_json(msg=msg)
finally:
if stream:
stream.close()
if 'checks' not in config:
if state == 'absent':
reasons.append('`checks\' section did not exist and state is `absent\'')
return changed, reasons
config['checks'] = {}
changed = True
reasons.append('`checks\' section did not exist')
if state == 'absent':
if name in config['checks']:
del config['checks'][name]
changed = True
reasons.append('check was present and state is `absent\'')
if state == 'present':
if name not in config['checks']:
check = {}
config['checks'][name] = check
changed = True
reasons.append('check was absent and state is `present\'')
else:
check = config['checks'][name]
simple_opts = ['command',
'handlers',
'subscribers',
'interval',
'timeout',
'handle',
'dependencies',
'standalone',
'publish',
'occurrences',
'refresh',
'aggregate',
'low_flap_threshold',
'high_flap_threshold',
]
for opt in simple_opts:
if module.params[opt] is not None:
if opt not in check or check[opt] != module.params[opt]:
check[opt] = module.params[opt]
changed = True
reasons.append('`{opt}\' did not exist or was different'.format(opt=opt))
else:
if opt in check:
del check[opt]
changed = True
reasons.append('`{opt}\' was removed'.format(opt=opt))
if module.params['metric']:
if 'type' not in check or check['type'] != 'metric':
check['type'] = 'metric'
changed = True
reasons.append('`type\' was not defined or not `metric\'')
if not module.params['metric'] and 'type' in check:
del check['type']
changed = True
reasons.append('`type\' was defined')
if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None:
subdue = {'begin': module.params['subdue_begin'],
'end': module.params['subdue_end'],
}
if 'subdue' not in check or check['subdue'] != subdue:
check['subdue'] = subdue
changed = True
reasons.append('`subdue\' did not exist or was different')
else:
if 'subdue' in check:
del check['subdue']
changed = True
reasons.append('`subdue\' was removed')
if changed and not module.check_mode:
if backup:
module.backup_local(path)
try:
try:
stream = open(path, 'w')
stream.write(json.dumps(config, indent=2) + '\n')
except IOError, e:
module.fail_json(msg=str(e))
finally:
if stream:
stream.close()
return changed, reasons
def main():
arg_spec = {'name': {'type': 'str', 'required': True},
'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
'backup': {'type': 'bool', 'default': 'no'},
'command': {'type': 'str'},
'handlers': {'type': 'list'},
'subscribers': {'type': 'list'},
'interval': {'type': 'int'},
'timeout': {'type': 'int'},
'handle': {'type': 'bool'},
'subdue_begin': {'type': 'str'},
'subdue_end': {'type': 'str'},
'dependencies': {'type': 'list'},
'metric': {'type': 'bool', 'default': 'no'},
'standalone': {'type': 'bool'},
'publish': {'type': 'bool'},
'occurrences': {'type': 'int'},
'refresh': {'type': 'int'},
'aggregate': {'type': 'bool'},
'low_flap_threshold': {'type': 'int'},
'high_flap_threshold': {'type': 'int'},
}
required_together = [['subdue_begin', 'subdue_end']]
module = AnsibleModule(argument_spec=arg_spec,
required_together=required_together,
supports_check_mode=True)
if module.params['state'] != 'absent' and module.params['command'] is None:
module.fail_json(msg="missing required arguments: %s" % ",".join(['command']))
path = module.params['path']
name = module.params['name']
state = module.params['state']
backup = module.params['backup']
changed, reasons = sensu_check(module, path, name, state, backup)
module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons)
from ansible.module_utils.basic import *
main()
| gpl-3.0 | 6,896,624,655,275,960,000 | 33.419643 | 104 | 0.570255 | false |
kushal124/gensim | gensim/test/test_utils.py | 53 | 2863 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking various utils functions.
"""
import logging
import unittest
from gensim import utils
class TestIsCorpus(unittest.TestCase):
def test_None(self):
# test None
result = utils.is_corpus(None)
expected = (False, None)
self.assertEqual(expected, result)
def test_simple_lists_of_tuples(self):
# test list words
# one document, one word
potentialCorpus = [[(0, 4.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# one document, several words
potentialCorpus = [[(0, 4.), (1, 2.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.), (1, 2.), (2, 5.), (3, 8.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# several documents, one word
potentialCorpus = [[(0, 4.)], [(1, 2.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.)], [(1, 2.)], [(2, 5.)], [(3, 8.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_int_tuples(self):
potentialCorpus = [[(0, 4)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_invalid_formats(self):
# test invalid formats
# these are no corpus, because they do not consists of 2-tuples with
# the form(int, float).
potentials = list()
potentials.append(["human"])
potentials.append("human")
potentials.append(["human", "star"])
potentials.append([1, 2, 3, 4, 5, 5])
potentials.append([[(0, 'string')]])
for noCorpus in potentials:
result = utils.is_corpus(noCorpus)
expected = (False, noCorpus)
self.assertEqual(expected, result)
class TestUtils(unittest.TestCase):
def test_decode_entities(self):
# create a string that fails to decode with unichr on narrow python builds
body = u'It’s the Year of the Horse. YES VIN DIESEL 🙌 💯'
expected = u'It\x92s the Year of the Horse. YES VIN DIESEL \U0001f64c \U0001f4af'
self.assertEquals(utils.decode_htmlentities(body), expected)
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
| gpl-3.0 | -8,537,000,927,375,444,000 | 31.908046 | 89 | 0.613343 | false |
randynobx/ansible | test/runner/lib/changes.py | 57 | 5755 | """Detect changes in Ansible code."""
from __future__ import absolute_import, print_function
import re
import os
from lib.util import (
ApplicationError,
SubprocessError,
MissingEnvironmentVariable,
CommonConfig,
display,
)
from lib.http import (
HttpClient,
urlencode,
)
from lib.git import (
Git,
)
class InvalidBranch(ApplicationError):
"""Exception for invalid branch specification."""
def __init__(self, branch, reason):
"""
:type branch: str
:type reason: str
"""
message = 'Invalid branch: %s\n%s' % (branch, reason)
super(InvalidBranch, self).__init__(message)
self.branch = branch
class ChangeDetectionNotSupported(ApplicationError):
"""Exception for cases where change detection is not supported."""
pass
class ShippableChanges(object):
"""Change information for Shippable build."""
def __init__(self, args, git):
"""
:type args: CommonConfig
:type git: Git
"""
self.args = args
try:
self.branch = os.environ['BRANCH']
self.is_pr = os.environ['IS_PULL_REQUEST'] == 'true'
self.is_tag = os.environ['IS_GIT_TAG'] == 'true'
self.commit = os.environ['COMMIT']
self.project_id = os.environ['PROJECT_ID']
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
if self.is_tag:
raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
if self.is_pr:
self.paths = sorted(git.get_diff_names(['origin/%s' % self.branch, '--']))
self.diff = git.get_diff(['origin/%s' % self.branch, '--'])
else:
merge_runs = self.get_merge_runs(self.project_id, self.branch)
last_successful_commit = self.get_last_successful_commit(git, merge_runs)
if last_successful_commit:
self.paths = sorted(git.get_diff_names([last_successful_commit, self.commit]))
self.diff = git.get_diff([last_successful_commit, self.commit])
else:
# tracked files (including unchanged)
self.paths = sorted(git.get_file_names(['--cached']))
self.diff = []
def get_merge_runs(self, project_id, branch):
"""
:type project_id: str
:type branch: str
:rtype: list[dict]
"""
params = dict(
isPullRequest='false',
projectIds=project_id,
branch=branch,
)
client = HttpClient(self.args, always=True)
response = client.get('https://api.shippable.com/runs?%s' % urlencode(params))
return response.json()
@staticmethod
def get_last_successful_commit(git, merge_runs):
"""
:type git: Git
:type merge_runs: dict | list[dict]
:rtype: str
"""
if 'id' in merge_runs and merge_runs['id'] == 4004:
display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
return None
merge_runs = sorted(merge_runs, key=lambda r: r['createdAt'])
known_commits = set()
last_successful_commit = None
for merge_run in merge_runs:
commit_sha = merge_run['commitSha']
if commit_sha not in known_commits:
known_commits.add(commit_sha)
if merge_run['statusCode'] == 30:
if git.is_valid_ref(commit_sha):
last_successful_commit = commit_sha
return last_successful_commit
class LocalChanges(object):
"""Change information for local work."""
def __init__(self, args, git):
"""
:type args: CommonConfig
:type git: Git
"""
self.args = args
self.current_branch = git.get_branch()
if self.is_official_branch(self.current_branch):
raise InvalidBranch(branch=self.current_branch,
reason='Current branch is not a feature branch.')
self.fork_branch = None
self.fork_point = None
self.local_branches = sorted(git.get_branches())
self.official_branches = sorted([b for b in self.local_branches if self.is_official_branch(b)])
for self.fork_branch in self.official_branches:
try:
self.fork_point = git.get_branch_fork_point(self.fork_branch)
break
except SubprocessError:
pass
if self.fork_point is None:
raise ApplicationError('Unable to auto-detect fork branch and fork point.')
# tracked files (including unchanged)
self.tracked = sorted(git.get_file_names(['--cached']))
# untracked files (except ignored)
self.untracked = sorted(git.get_file_names(['--others', '--exclude-standard']))
# tracked changes (including deletions) committed since the branch was forked
self.committed = sorted(git.get_diff_names([self.fork_point, 'HEAD']))
# tracked changes (including deletions) which are staged
self.staged = sorted(git.get_diff_names(['--cached']))
# tracked changes (including deletions) which are not staged
self.unstaged = sorted(git.get_diff_names([]))
# diff of all tracked files from fork point to working copy
self.diff = git.get_diff([self.fork_point])
@staticmethod
def is_official_branch(name):
"""
:type name: str
:rtype: bool
"""
if name == 'devel':
return True
if re.match(r'^stable-[0-9]+\.[0-9]+$', name):
return True
return False
| gpl-3.0 | -4,119,838,988,702,826,500 | 31.331461 | 108 | 0.57967 | false |
suninsky/ReceiptOCR | Python/server/lib/python2.7/site-packages/werkzeug/http.py | 85 | 36658 | # -*- coding: utf-8 -*-
"""
werkzeug.http
~~~~~~~~~~~~~
Werkzeug comes with a bunch of utilities that help Werkzeug to deal with
HTTP data. Most of the classes and functions provided by this module are
used by the wrappers, but they are useful on their own, too, especially if
the response and request objects are not used.
This covers some of the more HTTP centric features of WSGI, some other
utilities such as cookie handling are documented in the `werkzeug.utils`
module.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from time import time, gmtime
try:
from email.utils import parsedate_tz
except ImportError: # pragma: no cover
from email.Utils import parsedate_tz
try:
from urllib.request import parse_http_list as _parse_list_header
from urllib.parse import unquote_to_bytes as _unquote
except ImportError: # pragma: no cover
from urllib2 import parse_http_list as _parse_list_header, \
unquote as _unquote
from datetime import datetime, timedelta
from hashlib import md5
import base64
from werkzeug._internal import _cookie_quote, _make_cookie_domain, \
_cookie_parse_impl
from werkzeug._compat import to_unicode, iteritems, text_type, \
string_types, try_coerce_native, to_bytes, PY2, \
integer_types
_cookie_charset = 'latin1'
# for explanation of "media-range", etc. see Sections 5.3.{1,2} of RFC 7231
_accept_re = re.compile(
r'''( # media-range capturing-parenthesis
[^\s;,]+ # type/subtype
(?:[ \t]*;[ \t]* # ";"
(?: # parameter non-capturing-parenthesis
[^\s;,q][^\s;,]* # token that doesn't start with "q"
| # or
q[^\s;,=][^\s;,]* # token that is more than just "q"
)
)* # zero or more parameters
) # end of media-range
(?:[ \t]*;[ \t]*q= # weight is a "q" parameter
(\d*(?:\.\d+)?) # qvalue capturing-parentheses
[^,]* # "extension" accept params: who cares?
)? # accept params are optional
''', re.VERBOSE)
_token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
'^_`abcdefghijklmnopqrstuvwxyz|~')
_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
_unsafe_header_chars = set('()<>@,;:\"/[]?={} \t')
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(
r';\s*(%s|[^\s;,=\*]+)\s*'
r'(?:\*?=\s*(?:([^\s]+?)\'([^\s]*?)\')?(%s|[^;,]+)?)?\s*' %
(_quoted_string_re, _quoted_string_re)
)
_option_header_start_mime_type = re.compile(r',\s*([^;,\s]+)([;,]\s*.+)?')
_entity_headers = frozenset([
'allow', 'content-encoding', 'content-language', 'content-length',
'content-location', 'content-md5', 'content-range', 'content-type',
'expires', 'last-modified'
])
_hop_by_hop_headers = frozenset([
'connection', 'keep-alive', 'proxy-authenticate',
'proxy-authorization', 'te', 'trailer', 'transfer-encoding',
'upgrade'
])
HTTP_STATUS_CODES = {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used', # see RFC 3229
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required', # unused
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: 'I\'m a teapot', # see RFC 2324
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
428: 'Precondition Required', # see RFC 6585
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
449: 'Retry With', # proprietary MS extension
451: 'Unavailable For Legal Reasons',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended'
}
def wsgi_to_bytes(data):
"""coerce wsgi unicode represented bytes to real ones
"""
if isinstance(data, bytes):
return data
return data.encode('latin1') # XXX: utf8 fallback?
def bytes_to_wsgi(data):
assert isinstance(data, bytes), 'data must be bytes'
if isinstance(data, str):
return data
else:
return data.decode('latin1')
def quote_header_value(value, extra_chars='', allow_token=True):
"""Quote a header value if necessary.
.. versionadded:: 0.5
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
if isinstance(value, bytes):
value = bytes_to_wsgi(value)
value = str(value)
if allow_token:
token_chars = _token_chars | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"')
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
.. versionadded:: 0.5
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header)
for key, value in iteritems(options):
if value is None:
segments.append(key)
else:
segments.append('%s=%s' % (key, quote_header_value(value)))
return '; '.join(segments)
def dump_header(iterable, allow_token=True):
"""Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
>>> dump_header({'foo': 'bar baz'})
'foo="bar baz"'
>>> dump_header(('foo', 'bar baz'))
'foo, "bar baz"'
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
"""
if isinstance(iterable, dict):
items = []
for key, value in iteritems(iterable):
if value is None:
items.append(key)
else:
items.append('%s=%s' % (
key,
quote_header_value(value, allow_token=allow_token)
))
else:
items = [quote_header_value(x, allow_token=allow_token)
for x in iterable]
return ', '.join(items)
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_dict_header(value, cls=dict):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict (or any other mapping object created from
the type with a dict like interface provided by the `cls` arugment):
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
.. versionchanged:: 0.9
Added support for `cls` argument.
:param value: a string with a dict header.
:param cls: callable to use for storage of parsed results.
:return: an instance of `cls`
"""
result = cls()
if not isinstance(value, text_type):
# XXX: validate
value = bytes_to_wsgi(value)
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
def parse_options_header(value, multiple=False):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('text/html; charset=utf8')
('text/html', {'charset': 'utf8'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionadded:: 0.5
:param value: the header to parse.
:param multiple: Whether try to parse and return multiple MIME types
:return: (mimetype, options) or (mimetype, options, mimetype, options, …)
if multiple=True
"""
if not value:
return '', {}
result = []
value = "," + value.replace("\n", ",")
while value:
match = _option_header_start_mime_type.match(value)
if not match:
break
result.append(match.group(1)) # mimetype
options = {}
# Parse options
rest = match.group(2)
while rest:
optmatch = _option_header_piece_re.match(rest)
if not optmatch:
break
option, encoding, _, option_value = optmatch.groups()
option = unquote_header_value(option)
if option_value is not None:
option_value = unquote_header_value(
option_value,
option == 'filename')
if encoding is not None:
option_value = _unquote(option_value).decode(encoding)
options[option] = option_value
rest = rest[optmatch.end():]
result.append(options)
if multiple is False:
return tuple(result)
value = rest
return tuple(result) if result else ('', {})
def parse_accept_header(value, cls=None):
"""Parses an HTTP Accept-* header. This does not implement a complete
valid algorithm but one that supports at least value and quality
extraction.
Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
tuples sorted by the quality with some additional accessor methods).
The second parameter can be a subclass of :class:`Accept` that is created
with the parsed values and returned.
:param value: the accept header string to be parsed.
:param cls: the wrapper class for the return value (can be
:class:`Accept` or a subclass thereof)
:return: an instance of `cls`.
"""
if cls is None:
cls = Accept
if not value:
return cls(None)
result = []
for match in _accept_re.finditer(value):
quality = match.group(2)
if not quality:
quality = 1
else:
quality = max(min(float(quality), 1), 0)
result.append((match.group(1), quality))
return cls(result)
def parse_cache_control_header(value, on_update=None, cls=None):
"""Parse a cache control header. The RFC differs between response and
request cache control, this method does not. It's your responsibility
to not use the wrong control statements.
.. versionadded:: 0.5
The `cls` was added. If not specified an immutable
:class:`~werkzeug.datastructures.RequestCacheControl` is returned.
:param value: a cache control header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.CacheControl`
object is changed.
:param cls: the class for the returned object. By default
:class:`~werkzeug.datastructures.RequestCacheControl` is used.
:return: a `cls` object.
"""
if cls is None:
cls = RequestCacheControl
if not value:
return cls(None, on_update)
return cls(parse_dict_header(value), on_update)
def parse_set_header(value, on_update=None):
"""Parse a set-like header and return a
:class:`~werkzeug.datastructures.HeaderSet` object:
>>> hs = parse_set_header('token, "quoted value"')
The return value is an object that treats the items case-insensitively
and keeps the order of the items:
>>> 'TOKEN' in hs
True
>>> hs.index('quoted value')
1
>>> hs
HeaderSet(['token', 'quoted value'])
To create a header from the :class:`HeaderSet` again, use the
:func:`dump_header` function.
:param value: a set header to be parsed.
:param on_update: an optional callable that is called every time a
value on the :class:`~werkzeug.datastructures.HeaderSet`
object is changed.
:return: a :class:`~werkzeug.datastructures.HeaderSet`
"""
if not value:
return HeaderSet(None, on_update)
return HeaderSet(parse_list_header(value), on_update)
def parse_authorization_header(value):
"""Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
object.
:param value: the authorization header to parse.
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
"""
if not value:
return
value = wsgi_to_bytes(value)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except ValueError:
return
if auth_type == b'basic':
try:
username, password = base64.b64decode(auth_info).split(b':', 1)
except Exception:
return
return Authorization('basic', {'username': bytes_to_wsgi(username),
'password': bytes_to_wsgi(password)})
elif auth_type == b'digest':
auth_map = parse_dict_header(auth_info)
for key in 'username', 'realm', 'nonce', 'uri', 'response':
if key not in auth_map:
return
if 'qop' in auth_map:
if not auth_map.get('nc') or not auth_map.get('cnonce'):
return
return Authorization('digest', auth_map)
def parse_www_authenticate_header(value, on_update=None):
"""Parse an HTTP WWW-Authenticate header into a
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
object is changed.
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
"""
if not value:
return WWWAuthenticate(on_update=on_update)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except (ValueError, AttributeError):
return WWWAuthenticate(value.strip().lower(), on_update=on_update)
return WWWAuthenticate(auth_type, parse_dict_header(auth_info),
on_update)
def parse_if_range_header(value):
"""Parses an if-range header which can be an etag or a date. Returns
a :class:`~werkzeug.datastructures.IfRange` object.
.. versionadded:: 0.7
"""
if not value:
return IfRange()
date = parse_date(value)
if date is not None:
return IfRange(date=date)
# drop weakness information
return IfRange(unquote_etag(value)[0])
def parse_range_header(value, make_inclusive=True):
"""Parses a range header into a :class:`~werkzeug.datastructures.Range`
object. If the header is missing or malformed `None` is returned.
`ranges` is a list of ``(start, stop)`` tuples where the ranges are
non-inclusive.
.. versionadded:: 0.7
"""
if not value or '=' not in value:
return None
ranges = []
last_end = 0
units, rng = value.split('=', 1)
units = units.strip().lower()
for item in rng.split(','):
item = item.strip()
if '-' not in item:
return None
if item.startswith('-'):
if last_end < 0:
return None
try:
begin = int(item)
except ValueError:
return None
end = None
last_end = -1
elif '-' in item:
begin, end = item.split('-', 1)
begin = begin.strip()
end = end.strip()
if not begin.isdigit():
return None
begin = int(begin)
if begin < last_end or last_end < 0:
return None
if end:
if not end.isdigit():
return None
end = int(end) + 1
if begin >= end:
return None
else:
end = None
last_end = end
ranges.append((begin, end))
return Range(units, ranges)
def parse_content_range_header(value, on_update=None):
"""Parses a range header into a
:class:`~werkzeug.datastructures.ContentRange` object or `None` if
parsing is not possible.
.. versionadded:: 0.7
:param value: a content range header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.ContentRange`
object is changed.
"""
if value is None:
return None
try:
units, rangedef = (value or '').strip().split(None, 1)
except ValueError:
return None
if '/' not in rangedef:
return None
rng, length = rangedef.split('/', 1)
if length == '*':
length = None
elif length.isdigit():
length = int(length)
else:
return None
if rng == '*':
return ContentRange(units, None, None, length, on_update=on_update)
elif '-' not in rng:
return None
start, stop = rng.split('-', 1)
try:
start = int(start)
stop = int(stop) + 1
except ValueError:
return None
if is_byte_range_valid(start, stop, length):
return ContentRange(units, start, stop, length, on_update=on_update)
def quote_etag(etag, weak=False):
"""Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
"""
if '"' in etag:
raise ValueError('invalid etag')
etag = '"%s"' % etag
if weak:
etag = 'W/' + etag
return etag
def unquote_etag(etag):
"""Unquote a single etag:
>>> unquote_etag('W/"bar"')
('bar', True)
>>> unquote_etag('"bar"')
('bar', False)
:param etag: the etag identifier to unquote.
:return: a ``(etag, weak)`` tuple.
"""
if not etag:
return None, None
etag = etag.strip()
weak = False
if etag.startswith(('W/', 'w/')):
weak = True
etag = etag[2:]
if etag[:1] == etag[-1:] == '"':
etag = etag[1:-1]
return etag, weak
def parse_etags(value):
"""Parse an etag header.
:param value: the tag header to parse
:return: an :class:`~werkzeug.datastructures.ETags` object.
"""
if not value:
return ETags()
strong = []
weak = []
end = len(value)
pos = 0
while pos < end:
match = _etag_re.match(value, pos)
if match is None:
break
is_weak, quoted, raw = match.groups()
if raw == '*':
return ETags(star_tag=True)
elif quoted:
raw = quoted
if is_weak:
weak.append(raw)
else:
strong.append(raw)
pos = match.end()
return ETags(strong, weak)
def generate_etag(data):
"""Generate an etag for some data."""
return md5(data).hexdigest()
def parse_date(value):
"""Parse one of the following date formats into a datetime object:
.. sourcecode:: text
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
If parsing fails the return value is `None`.
:param value: a string with a supported date format.
:return: a :class:`datetime.datetime` object.
"""
if value:
t = parsedate_tz(value.strip())
if t is not None:
try:
year = t[0]
# unfortunately that function does not tell us if two digit
# years were part of the string, or if they were prefixed
# with two zeroes. So what we do is to assume that 69-99
# refer to 1900, and everything below to 2000
if year >= 0 and year <= 68:
year += 2000
elif year >= 69 and year <= 99:
year += 1900
return datetime(*((year,) + t[1:7])) - \
timedelta(seconds=t[-1] or 0)
except (ValueError, OverflowError):
return None
def _dump_date(d, delim):
"""Used for `http_date` and `cookie_date`."""
if d is None:
d = gmtime()
elif isinstance(d, datetime):
d = d.utctimetuple()
elif isinstance(d, (integer_types, float)):
d = gmtime(d)
return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % (
('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday],
d.tm_mday, delim,
('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')[d.tm_mon - 1],
delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec
)
def cookie_date(expires=None):
"""Formats the time to ensure compatibility with Netscape's cookie
standard.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
:param expires: If provided that date is used, otherwise the current.
"""
return _dump_date(expires, '-')
def http_date(timestamp=None):
"""Formats the time to match the RFC1123 date format.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.
:param timestamp: If provided that date is used, otherwise the current.
"""
return _dump_date(timestamp, ' ')
def is_resource_modified(environ, etag=None, data=None, last_modified=None,
ignore_if_range=True):
"""Convenience method for conditional requests.
:param environ: the WSGI environment of the request to be checked.
:param etag: the etag for the response for comparison.
:param data: or alternatively the data of the response to automatically
generate an etag using :func:`generate_etag`.
:param last_modified: an optional date of the last modification.
:param ignore_if_range: If `False`, `If-Range` header will be taken into
account.
:return: `True` if the resource was modified, otherwise `False`.
"""
if etag is None and data is not None:
etag = generate_etag(data)
elif data is not None:
raise TypeError('both data and etag given')
if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
return False
unmodified = False
if isinstance(last_modified, string_types):
last_modified = parse_date(last_modified)
# ensure that microsecond is zero because the HTTP spec does not transmit
# that either and we might have some false positives. See issue #39
if last_modified is not None:
last_modified = last_modified.replace(microsecond=0)
if_range = None
if not ignore_if_range and 'HTTP_RANGE' in environ:
# http://tools.ietf.org/html/rfc7233#section-3.2
# A server MUST ignore an If-Range header field received in a request
# that does not contain a Range header field.
if_range = parse_if_range_header(environ.get('HTTP_IF_RANGE'))
if if_range is not None and if_range.date is not None:
modified_since = if_range.date
else:
modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE'))
if modified_since and last_modified and last_modified <= modified_since:
unmodified = True
if etag:
etag, _ = unquote_etag(etag)
if if_range is not None and if_range.etag is not None:
unmodified = parse_etags(if_range.etag).contains(etag)
else:
if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH'))
if if_none_match:
# http://tools.ietf.org/html/rfc7232#section-3.2
# "A recipient MUST use the weak comparison function when comparing
# entity-tags for If-None-Match"
unmodified = if_none_match.contains_weak(etag)
return not unmodified
def remove_entity_headers(headers, allowed=('expires', 'content-location')):
"""Remove all entity headers from a list or :class:`Headers` object. This
operation works in-place. `Expires` and `Content-Location` headers are
by default not removed. The reason for this is :rfc:`2616` section
10.3.5 which specifies some entity headers that should be sent.
.. versionchanged:: 0.5
added `allowed` parameter.
:param headers: a list or :class:`Headers` object.
:param allowed: a list of headers that should still be allowed even though
they are entity headers.
"""
allowed = set(x.lower() for x in allowed)
headers[:] = [(key, value) for key, value in headers if
not is_entity_header(key) or key.lower() in allowed]
def remove_hop_by_hop_headers(headers):
"""Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
:class:`Headers` object. This operation works in-place.
.. versionadded:: 0.5
:param headers: a list or :class:`Headers` object.
"""
headers[:] = [(key, value) for key, value in headers if
not is_hop_by_hop_header(key)]
def is_entity_header(header):
"""Check if a header is an entity header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _entity_headers
def is_hop_by_hop_header(header):
"""Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an HTTP/1.1 "Hop-by-Hop" header, `False` otherwise.
"""
return header.lower() in _hop_by_hop_headers
def parse_cookie(header, charset='utf-8', errors='replace', cls=None):
"""Parse a cookie. Either from a string or WSGI environ.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
This function now returns a :class:`TypeConversionDict` instead of a
regular dict. The `cls` parameter was added.
:param header: the header to be used to parse the cookie. Alternatively
this can be a WSGI environment.
:param charset: the charset for the cookie values.
:param errors: the error behavior for the charset decoding.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`TypeConversionDict` is
used.
"""
if isinstance(header, dict):
header = header.get('HTTP_COOKIE', '')
elif header is None:
header = ''
# If the value is an unicode string it's mangled through latin1. This
# is done because on PEP 3333 on Python 3 all headers are assumed latin1
# which however is incorrect for cookies, which are sent in page encoding.
# As a result we
if isinstance(header, text_type):
header = header.encode('latin1', 'replace')
if cls is None:
cls = TypeConversionDict
def _parse_pairs():
for key, val in _cookie_parse_impl(header):
key = to_unicode(key, charset, errors, allow_none_charset=True)
val = to_unicode(val, charset, errors, allow_none_charset=True)
yield try_coerce_native(key), val
return cls(_parse_pairs())
def dump_cookie(key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False,
charset='utf-8', sync_expires=True):
"""Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
The parameters are the same as in the cookie Morsel object in the
Python standard library but it accepts unicode data, too.
On Python 3 the return value of this function will be a unicode
string, on Python 2 it will be a native string. In both cases the
return value is usually restricted to ascii as the vast majority of
values are properly escaped, but that is no guarantee. If a unicode
string is returned it's tunneled through latin1 as required by
PEP 3333.
The return value is not ASCII safe if the key contains unicode
characters. This is technically against the specification but
happens in the wild. It's strongly recommended to not use
non-ASCII values for the keys.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session. Additionally `timedelta` objects
are accepted, too.
:param expires: should be a `datetime` object or unix timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: Use this if you want to set a cross-domain cookie. For
example, ``domain=".example.com"`` will set a cookie
that is readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: The cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param charset: the encoding for unicode values.
:param sync_expires: automatically set expires if max_age is defined
but expires not.
"""
key = to_bytes(key, charset)
value = to_bytes(value, charset)
if path is not None:
path = iri_to_uri(path, charset)
domain = _make_cookie_domain(domain)
if isinstance(max_age, timedelta):
max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
if expires is not None:
if not isinstance(expires, string_types):
expires = cookie_date(expires)
elif max_age is not None and sync_expires:
expires = to_bytes(cookie_date(time() + max_age))
buf = [key + b'=' + _cookie_quote(value)]
# XXX: In theory all of these parameters that are not marked with `None`
# should be quoted. Because stdlib did not quote it before I did not
# want to introduce quoting there now.
for k, v, q in ((b'Domain', domain, True),
(b'Expires', expires, False,),
(b'Max-Age', max_age, False),
(b'Secure', secure, None),
(b'HttpOnly', httponly, None),
(b'Path', path, False)):
if q is None:
if v:
buf.append(k)
continue
if v is None:
continue
tmp = bytearray(k)
if not isinstance(v, (bytes, bytearray)):
v = to_bytes(text_type(v), charset)
if q:
v = _cookie_quote(v)
tmp += b'=' + v
buf.append(bytes(tmp))
# The return value will be an incorrectly encoded latin1 header on
# Python 3 for consistency with the headers object and a bytestring
# on Python 2 because that's how the API makes more sense.
rv = b'; '.join(buf)
if not PY2:
rv = rv.decode('latin1')
return rv
def is_byte_range_valid(start, stop, length):
"""Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7
"""
if (start is None) != (stop is None):
return False
elif start is None:
return length is None or length >= 0
elif length is None:
return 0 <= start < stop
elif start >= stop:
return False
return 0 <= start < length
# circular dependency fun
from werkzeug.datastructures import Accept, HeaderSet, ETags, Authorization, \
WWWAuthenticate, TypeConversionDict, IfRange, Range, ContentRange, \
RequestCacheControl
# DEPRECATED
# backwards compatible imports
from werkzeug.datastructures import ( # noqa
MIMEAccept, CharsetAccept, LanguageAccept, Headers
)
from werkzeug.urls import iri_to_uri
| mit | -1,931,132,630,411,118,000 | 33.777989 | 83 | 0.598783 | false |
UManPychron/pychron | pychron/file_defaults.py | 2 | 11951 | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
"""
This file defines the text for various default files.
Values are used in pychron.paths when building directory structure
"""
from __future__ import absolute_import
import yaml
from pychron.core.helpers.strtools import to_bool
# PIPELINE_TEMPLATES = '''- Isotope Evolutions
# - Blanks
# - IC Factor
# - Flux
# - Ideogram
# - Spectrum
# - Inverse Isochron
# - Series
# - Regression Series
# - Radial
# - Analysis Table
# - Interpreted Age Table
# - Interpreted Age Ideogram
# - Auto Ideogram
# - Auto Series
# - Auto Report
# - Report
# - Diff
# - Vertical Flux
# - Xy Scatter
# - Geochron
# - Yield
# - CSV Analyses Export
# - CSV Ideogram
# - Correction Factors
# - Monitor Chain
# - Analysis Metadata
# '''
IDENTIFIERS_DEFAULT = """
- name: Blank
shortname: b
extractable: False
special: True
- name: Blank Air
shortname: ba
extractable: False
special: True
- name: Blank Cocktail
shortname: bc
extractable: False
special: True
- name: Blank Unknown
shortname: bu
extractable: False
special: True
- name: Blank ExtractionLine
shortname: be
extractable: False
special: True
- name: Background
shortname: bg
extractable: False
special: True
- name: Unknown
shortname: u
extractable: True
special: False
- name: Cocktail
shortname: c
extractable: False
special: True
- name: Air
shortname: a
extractable: False
special: True
- name: Pause
shortname: pa
extractable: False
special: True
- name: Degas
shortname: dg
extractable: True
special: True
- name: Detector IC
shortname: ic
extractable: False
special: True
"""
EDIT_UI_DEFAULT = """
predefined: Simple
"""
TASK_EXTENSION_DEFAULT = """
-
plugin_id: pychron.update.plugin
actions:
- pychron.update.check_for_updates, True
- pychron.update.manage_version, False
- pychron.update.manage_branch, False
- pychron.update.build_app, False
-
plugin_id: pychron.processing.plugin.recall
actions:
- pychron.recall.recall, True
- pychron.recall.configure, True
- pychron.recall.time_view, True
-
plugin_id: pychron.processing.plugin.figures
actions:
- pychron.figure.spectrum, True
- pychron.figure.ideogram, True
- pychron.figure.inv_isochron, True
- pychron.figure.series, True
- pychron.figure.composite, True
- pychron.figure.xyscatter, True
- pychron.figure.file_ideogram, True
- pychron.figure.file_spectrum, True
- pychron.figure.ideogram_file_template, True
- pychron.figure.spectrum_file_template, True
- pychron.figure.refresh, True
-
plugin_id: pychron.processing.plugin.reduction
actions:
- pychron.reduction.iso_evo, True
- pychron.reduction.blanks, True
- pychron.reduction.ic_factor, True
- pychron.reduction.discrimination, False
- pychron.reduction.flux, True
-
plugin_id: pychron.processing.plugin.dataset
actions:
- pychron.reduction.sqlite_dataset, True
- pychron.reduction.xml_dataset, True
-
plugin_id: pychron.processing.plugin.grouping
actions:
- pychron.grouping.selected, True
- pychron.grouping.aliquot, True
- pychron.grouping.lnumber, True
- pychron.grouping.sample, True
- pychron.grouping.clear, True
- pychron.grouping.gselected, True
- pychron.grouping.gsample, True
-
plugin_id: pychron.processing.plugin.misc
actions:
- pychron.misc.tag, True
- pychron.misc.drtag, False
- pychron.misc.select_drtag, False
- pychron.misc.db_save, True
- pychron.misc.clear_cache, True
- pychron.misc.modify_k, False
- pychron.misc.modify_identifier, False
-
plugin_id: pychron.processing.plugin.agroup
actions:
- pychron.agroup.make, False
- pychron.agroup.delete, False
-
plugin_id: pychron.experiment.plugin.edit
task_id: pychron.experiment.task
actions:
- pychron.experiment.edit.deselect, False
- pychron.experiment.edit.reset, True
- pychron.experiment.edit.sync, True
- pychron.experiment.edit.undo, False
- pychron.experiment.edit.configure, False
-
plugin_id: pychron.experiment.plugin
actions:
- pychron.experiment.open_system_conditionals, True
- pychron.experiment.open_queue_conditionals, True
- pychron.experiment.open_experiment, True
- pychron.experiment.open_last_experiment, True
- pychron.experiment.launch_history, True
- pychron.experiment.run_history_view, True
- pychron.experiment.test_notify, False
- pychron.experiment.new_experiment, True
- pychron.experiment.signal_calculator, False
- pychron.experiment.new_pattern, False
- pychron.experiment.open_pattern, False
-
plugin_id: pychron.entry.plugin
task_id: pychron.entry.irradiation.task
actions:
- pychron.entry2.transfer_j, True
- pychron.entry2.import_irradiation, True
- pychron.entry2.export_irradiation, False
- pychron.entry2.import_samples_from_file, False
- pychron.entry2.generate_tray, False
- pychron.entry2.save_labbook, False
- pychron.entry2.make_template, False
-
plugin_id: pychron.entry.plugin
actions:
- pychron.entry1.labnumber_entry, True
- pychron.entry1.sample_entry, True
- pychron.entry1.sample_prep, True
- pychron.entry1.generate_irradiation_table, False
- pychron.entry1.import_irradiation_holder, False
- pychron.entry1.sensitivity_entry, True
- pychron.entry1.flux_monitor, False
"""
actions = []
for line in TASK_EXTENSION_DEFAULT.split('\n'):
line = line.strip()
if line.startswith('- pychron.'):
a, b = line.split(',')
if to_bool(b):
actions.append(a)
SIMPLE_UI_DEFAULT = '\n'.join(actions)
DEFAULT_INITIALIZATION = '''<root>
<globals>
</globals>
<plugins>
<general>
<plugin enabled="false">Processing</plugin>
<plugin enabled="false">MediaStorage</plugin>
<plugin enabled="false">PyScript</plugin>
<plugin enabled="false">Video</plugin>
<plugin enabled="false">Database</plugin>
<plugin enabled="false">Entry</plugin>
<plugin enabled="false">ArArConstants</plugin>
<plugin enabled="false">Loading</plugin>
<plugin enabled="false">LabBook</plugin>
<plugin enabled="false">DashboardServer</plugin>
<plugin enabled="false">DashboardClient</plugin>
</general>
<hardware>
</hardware>
<social>
</social>
</plugins>
</root>
'''
DEFAULT_STARTUP_TESTS = '''
- plugin: Database
tests:
- test_pychron
- test_pychron_version
- plugin: MassSpec
tests:
- test_database
- plugin: LabBook
tests:
- plugin: ArArConstants
tests:
- plugin: ArgusSpectrometer
tests:
- test_communication
- test_intensity
- plugin: ExtractionLine
tests:
- test_valve_communication
- test_gauge_communication
'''
EXPERIMENT_DEFAULTS = '''
columns:
- Labnumber
- Aliquot
- Sample
- Position
- Extract
- Units
- Duration (s)
- Cleanup (s)
- Beam (mm)
- Pattern
- Extraction
- Measurement
- Conditionals
- Comment
'''
RATIO_CHANGE_DETECTION = '''
# - ratio: Ar40/Ar36
# nanalyses: 5
# threshold: 1
## percent_threshold: 1
## nominal_ratio: 295
## nsigma: 3
# analysis_type: air
# failure_count: 2
# consecutive_failure: True
# - ratio: Ar40/Ar39
# nanalyses: 5
# threshold: 1
## percent_threshold: 1
## nominal_ratio: 10
## nsigma: 3
# analysis_type: cocktail
# failure_count: 2
# consecutive_failure: True
'''
def make_screen(**kw):
obj = {'padding_left': 100,
'padding_right': 100,
'padding_top': 100,
'padding_bottom': 100,
'bgcolor': 'white',
'plot_bgcolor': 'white',
'xtick_in': 1,
'xtick_out': 5,
'ytick_in': 1,
'ytick_out': 5,
'use_xgrid': True,
'use_ygrid': True,
}
obj.update(kw)
return yaml.dump(obj, default_flow_style=False)
def make_presentation(**kw):
obj = {'padding_left': 40,
'padding_right': 40,
'padding_top': 40,
'padding_bottom': 40,
'bgcolor': (239, 238, 185),
'plot_bgcolor': (208, 243, 241),
'xtick_in': 1,
'xtick_out': 5,
'ytick_in': 1,
'ytick_out': 5,
'use_xgrid': True,
'use_ygrid': True, }
obj.update(kw)
return yaml.dump(obj, default_flow_style=False)
DEFINE_EQUILIBRATION_SCREEN = make_screen()
ISO_EVO_SCREEN = make_screen()
SERIES_SCREEN = make_screen()
BLANKS_SCREEN = make_screen()
ICFACTOR_SCREEN = make_screen()
BLANKS_PRESENTATION = make_presentation()
iso_d = dict(use_xgrid=False, use_ygrid=False)
inv_iso_d = dict(use_xgrid=False, use_ygrid=False,
nominal_intercept_label='Atm',
nominal_intercept_value=295.5,
show_nominal_intercept=True,
invert_nominal_intercept=True,
inset_marker_size=2.5,
inset_marker_color='black')
ISOCHRON_SCREEN = make_screen(**iso_d)
ISOCHRON_PRESENTATION = make_presentation(**iso_d)
INVERSE_ISOCHRON_SCREEN = make_screen(**inv_iso_d)
INVERSE_ISOCHRON_PRESENTATION = make_presentation(**inv_iso_d)
ideo_d = dict(probability_curve_kind='cumulative',
mean_calculation_kind='weighted mean',
mean_sig_figs=2,
index_attr='uage')
IDEOGRAM_SCREEN = make_screen(mean_indicator_fontsize=12,
**ideo_d)
IDEOGRAM_PRESENTATION = make_presentation(mean_indicator_fontsize=24,
**ideo_d)
spec_d = dict(plateau_line_width=1,
plateau_line_color='black',
plateau_sig_figs=2,
# calculate_fixed_plateau= False,
# calculate_fixed_plateau_start= '',
# calculate_fixed_plateau_end= '',
pc_nsteps=3,
pc_gas_fraction=50,
integrated_sig_figs=2,
legend_location='Upper Right',
include_legend=False,
include_sample_in_legend=False,
display_step=True,
display_extract_value=False)
SPECTRUM_PRESENTATION = make_presentation(**spec_d)
SPECTRUM_SCREEN = make_screen(**spec_d)
radial_d = dict()
RADIAL_SCREEN = make_screen(**radial_d)
regression_series_d = dict()
REGRESSION_SERIES_SCREEN = make_screen(**regression_series_d)
FLUX_CONSTANTS_DEFAULT = """
# This is an example flux file. Add additional decay_constant and monitor_age pairs here
"FC MIN":
lambda_ec: [5.80e-11, 0]
lambda_b: [4.884e-10, 0]
monitor_age: 28.201
"FC SJ":
lambda_ec: [5.81e-11, 0]
lambda_b: [4.962e-10, 0]
monitor_age: 28.02
"""
REACTORS_DEFAULT = '''{
"Triga": {
"K4039": [0.007614,0.000105],
"K3839": [0.013,0.0],
"K3739": [0.0,0.0],
"Ca3937": [0.00066,1e-05],
"Ca3837": [4e-05,2e-06],
"Ca3637": [0.000264,1e-06],
"Cl3638": [250.0,0.0],
"Ca_K": [1.96,0.0],
"Cl_K": [0.227,0.0]
}
}
'''
# ============= EOF =============================================
| apache-2.0 | 4,311,021,293,203,045,000 | 25.616927 | 88 | 0.633336 | false |
nomadcube/scikit-learn | sklearn/decomposition/incremental_pca.py | 199 | 10508 | """Incremental Principal Components Analysis."""
# Author: Kyle Kastner <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from .base import _BasePCA
from ..utils import check_array, gen_batches
from ..utils.extmath import svd_flip, _batch_mean_variance_update
class IncrementalPCA(_BasePCA):
"""Incremental principal components analysis (IPCA).
Linear dimensionality reduction using Singular Value Decomposition of
centered data, keeping only the most significant singular vectors to
project the data to a lower dimensional space.
Depending on the size of the input data, this algorithm can be much more
memory efficient than a PCA.
This algorithm has constant memory complexity, on the order
of ``batch_size``, enabling use of np.memmap files without loading the
entire file into memory.
The computational overhead of each SVD is
``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples
remain in memory at a time. There will be ``n_samples / batch_size`` SVD
computations to get the principal components, versus 1 large SVD of
complexity ``O(n_samples * n_features ** 2)`` for PCA.
Read more in the :ref:`User Guide <IncrementalPCA>`.
Parameters
----------
n_components : int or None, (default=None)
Number of components to keep. If ``n_components `` is ``None``,
then ``n_components`` is set to ``min(n_samples, n_features)``.
batch_size : int or None, (default=None)
The number of samples to use for each batch. Only used when calling
``fit``. If ``batch_size`` is ``None``, then ``batch_size``
is inferred from the data and set to ``5 * n_features``, to provide a
balance between approximation accuracy and memory consumption.
copy : bool, (default=True)
If False, X will be overwritten. ``copy=False`` can be used to
save memory but is unsafe for general use.
whiten : bool, optional
When True (False by default) the ``components_`` vectors are divided
by ``n_samples`` times ``components_`` to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometimes
improve the predictive accuracy of the downstream estimators by
making data respect some hard-wired assumptions.
Attributes
----------
components_ : array, shape (n_components, n_features)
Components with maximum variance.
explained_variance_ : array, shape (n_components,)
Variance explained by each of the selected components.
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If all components are stored, the sum of explained variances is equal
to 1.0
mean_ : array, shape (n_features,)
Per-feature empirical mean, aggregate over calls to ``partial_fit``.
var_ : array, shape (n_features,)
Per-feature empirical variance, aggregate over calls to ``partial_fit``.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf.
n_components_ : int
The estimated number of components. Relevant when ``n_components=None``.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Notes
-----
Implements the incremental PCA model from:
`D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3,
pp. 125-141, May 2008.`
See http://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf
This model is an extension of the Sequential Karhunen-Loeve Transform from:
`A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and
its Application to Images, IEEE Transactions on Image Processing, Volume 9,
Number 8, pp. 1371-1374, August 2000.`
See http://www.cs.technion.ac.il/~mic/doc/skl-ip.pdf
We have specifically abstained from an optimization used by authors of both
papers, a QR decomposition used in specific situations to reduce the
algorithmic complexity of the SVD. The source for this technique is
`Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5,
section 5.4.4, pp 252-253.`. This technique has been omitted because it is
advantageous only when decomposing a matrix with ``n_samples`` (rows)
>= 5/3 * ``n_features`` (columns), and hurts the readability of the
implemented algorithm. This would be a good opportunity for future
optimization, if it is deemed necessary.
References
----------
D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77,
Issue 1-3, pp. 125-141, May 2008.
G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5,
Section 5.4.4, pp. 252-253.
See also
--------
PCA
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, whiten=False, copy=True,
batch_size=None):
self.n_components = n_components
self.whiten = whiten
self.copy = copy
self.batch_size = batch_size
def fit(self, X, y=None):
"""Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y: Passthrough for ``Pipeline`` compatibility.
Returns
-------
self: object
Returns the instance itself.
"""
self.components_ = None
self.mean_ = None
self.singular_values_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.noise_variance_ = None
self.var_ = None
self.n_samples_seen_ = 0
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if self.batch_size is None:
self.batch_size_ = 5 * n_features
else:
self.batch_size_ = self.batch_size
for batch in gen_batches(n_samples, self.batch_size_):
self.partial_fit(X[batch])
return self
def partial_fit(self, X, y=None):
"""Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self: object
Returns the instance itself.
"""
X = check_array(X, copy=self.copy, dtype=np.float)
n_samples, n_features = X.shape
if not hasattr(self, 'components_'):
self.components_ = None
if self.n_components is None:
self.n_components_ = n_features
elif not 1 <= self.n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d, need "
"more rows than columns for IncrementalPCA "
"processing" % (self.n_components, n_features))
else:
self.n_components_ = self.n_components
if (self.components_ is not None) and (self.components_.shape[0]
!= self.n_components_):
raise ValueError("Number of input features has changed from %i "
"to %i between calls to partial_fit! Try "
"setting n_components to a fixed value." % (
self.components_.shape[0], self.n_components_))
if self.components_ is None:
# This is the first pass through partial_fit
self.n_samples_seen_ = 0
col_var = X.var(axis=0)
col_mean = X.mean(axis=0)
X -= col_mean
U, S, V = linalg.svd(X, full_matrices=False)
U, V = svd_flip(U, V, u_based_decision=False)
explained_variance = S ** 2 / n_samples
explained_variance_ratio = S ** 2 / np.sum(col_var *
n_samples)
else:
col_batch_mean = X.mean(axis=0)
col_mean, col_var, n_total_samples = _batch_mean_variance_update(
X, self.mean_, self.var_, self.n_samples_seen_)
X -= col_batch_mean
# Build matrix of combined previous basis and new data
mean_correction = np.sqrt((self.n_samples_seen_ * n_samples) /
n_total_samples) * (self.mean_ -
col_batch_mean)
X_combined = np.vstack((self.singular_values_.reshape((-1, 1)) *
self.components_, X,
mean_correction))
U, S, V = linalg.svd(X_combined, full_matrices=False)
U, V = svd_flip(U, V, u_based_decision=False)
explained_variance = S ** 2 / n_total_samples
explained_variance_ratio = S ** 2 / np.sum(col_var *
n_total_samples)
self.n_samples_seen_ += n_samples
self.components_ = V[:self.n_components_]
self.singular_values_ = S[:self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[:self.n_components_]
self.explained_variance_ratio_ = \
explained_variance_ratio[:self.n_components_]
if self.n_components_ < n_features:
self.noise_variance_ = \
explained_variance[self.n_components_:].mean()
else:
self.noise_variance_ = 0.
return self
| bsd-3-clause | 7,555,955,023,578,288,000 | 40.207843 | 80 | 0.606681 | false |
supriyantomaftuh/django | tests/staticfiles_tests/test_storage.py | 147 | 18183 | from __future__ import unicode_literals
import os
import sys
import unittest
from django.conf import settings
from django.contrib.staticfiles import finders, storage
from django.contrib.staticfiles.management.commands import collectstatic
from django.contrib.staticfiles.management.commands.collectstatic import \
Command as CollectstaticCommand
from django.core.cache.backends.base import BaseCache
from django.core.management import call_command
from django.test import SimpleTestCase, override_settings
from django.utils import six
from django.utils.encoding import force_text
from .cases import (
BaseCollectionTestCase, BaseStaticFilesTestCase, StaticFilesTestCase,
)
from .settings import TEST_ROOT, TEST_SETTINGS, TESTFILES_PATH
def hashed_file_path(test, path):
fullpath = test.render_template(test.static_template_snippet(path))
return fullpath.replace(settings.STATIC_URL, '')
class TestHashedFiles(object):
hashed_file_path = hashed_file_path
def tearDown(self):
# Clear hashed files to avoid side effects among tests.
storage.staticfiles_storage.hashed_files.clear()
def test_template_tag_return(self):
"""
Test the CachedStaticFilesStorage backend.
"""
self.assertStaticRaises(ValueError, "does/not/exist.png", "/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt")
self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt", asvar=True)
self.assertStaticRenders("cached/styles.css", "/static/cached/styles.bb84a0240107.css")
self.assertStaticRenders("path/", "/static/path/")
self.assertStaticRenders("path/?query", "/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.bb84a0240107.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_ignored_completely(self):
relpath = self.hashed_file_path("cached/css/ignored.css")
self.assertEqual(relpath, "cached/css/ignored.6c77f2643390.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'#foobar', content)
self.assertIn(b'http:foobar', content)
self.assertIn(b'https:foobar', content)
self.assertIn(b'data:foobar', content)
self.assertIn(b'//foobar', content)
def test_path_with_querystring(self):
relpath = self.hashed_file_path("cached/styles.css?spam=eggs")
self.assertEqual(relpath, "cached/styles.bb84a0240107.css?spam=eggs")
with storage.staticfiles_storage.open(
"cached/styles.bb84a0240107.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_with_fragment(self):
relpath = self.hashed_file_path("cached/styles.css#eggs")
self.assertEqual(relpath, "cached/styles.bb84a0240107.css#eggs")
with storage.staticfiles_storage.open(
"cached/styles.bb84a0240107.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_with_querystring_and_fragment(self):
relpath = self.hashed_file_path("cached/css/fragments.css")
self.assertEqual(relpath, "cached/css/fragments.75433540b096.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'fonts/font.a4b0478549d0.eot?#iefix', content)
self.assertIn(b'fonts/font.b8d603e42714.svg#webfontIyfZbseF', content)
self.assertIn(b'data:font/woff;charset=utf-8;base64,d09GRgABAAAAADJoAA0AAAAAR2QAAQAAAAAAAAAAAAA', content)
self.assertIn(b'#default#VML', content)
def test_template_tag_absolute(self):
relpath = self.hashed_file_path("cached/absolute.css")
self.assertEqual(relpath, "cached/absolute.ae9ef2716fe3.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"/static/cached/styles.css", content)
self.assertIn(b"/static/cached/styles.bb84a0240107.css", content)
self.assertIn(b'/static/cached/img/relative.acae32e4532b.png', content)
def test_template_tag_denorm(self):
relpath = self.hashed_file_path("cached/denorm.css")
self.assertEqual(relpath, "cached/denorm.c5bd139ad821.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"..//cached///styles.css", content)
self.assertIn(b"../cached/styles.bb84a0240107.css", content)
self.assertNotIn(b"url(img/relative.png )", content)
self.assertIn(b'url("img/relative.acae32e4532b.png', content)
def test_template_tag_relative(self):
relpath = self.hashed_file_path("cached/relative.css")
self.assertEqual(relpath, "cached/relative.b0375bd89156.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"../cached/styles.css", content)
self.assertNotIn(b'@import "styles.css"', content)
self.assertNotIn(b'url(img/relative.png)', content)
self.assertIn(b'url("img/relative.acae32e4532b.png")', content)
self.assertIn(b"../cached/styles.bb84a0240107.css", content)
def test_import_replacement(self):
"See #18050"
relpath = self.hashed_file_path("cached/import.css")
self.assertEqual(relpath, "cached/import.2b1d40b0bbd4.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"""import url("styles.bb84a0240107.css")""", relfile.read())
def test_template_tag_deep_relative(self):
relpath = self.hashed_file_path("cached/css/window.css")
self.assertEqual(relpath, "cached/css/window.3906afbb5a17.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b'url(img/window.png)', content)
self.assertIn(b'url("img/window.acae32e4532b.png")', content)
def test_template_tag_url(self):
relpath = self.hashed_file_path("cached/url.css")
self.assertEqual(relpath, "cached/url.902310b73412.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"https://", relfile.read())
def test_post_processing(self):
"""
Test that post_processing behaves correctly.
Files that are alterable should always be post-processed; files that
aren't should be skipped.
collectstatic has already been called once in setUp() for this testcase,
therefore we check by verifying behavior on a second run.
"""
collectstatic_args = {
'interactive': False,
'verbosity': 0,
'link': False,
'clear': False,
'dry_run': False,
'post_process': True,
'use_default_ignore_patterns': True,
'ignore_patterns': ['*.ignoreme'],
}
collectstatic_cmd = CollectstaticCommand()
collectstatic_cmd.set_options(**collectstatic_args)
stats = collectstatic_cmd.collect()
self.assertIn(os.path.join('cached', 'css', 'window.css'), stats['post_processed'])
self.assertIn(os.path.join('cached', 'css', 'img', 'window.png'), stats['unmodified'])
self.assertIn(os.path.join('test', 'nonascii.css'), stats['post_processed'])
def test_css_import_case_insensitive(self):
relpath = self.hashed_file_path("cached/styles_insensitive.css")
self.assertEqual(relpath, "cached/styles_insensitive.c609562b6d3c.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
@override_settings(
STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'faulty')],
STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'],
)
def test_post_processing_failure(self):
"""
Test that post_processing indicates the origin of the error when it
fails. Regression test for #18986.
"""
finders.get_finder.cache_clear()
err = six.StringIO()
with self.assertRaises(Exception):
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
self.assertEqual("Post-processing 'faulty.css' failed!\n\n", err.getvalue())
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(
TEST_SETTINGS,
STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage',
DEBUG=False,
))
class TestCollectionCachedStorage(TestHashedFiles, BaseCollectionTestCase,
BaseStaticFilesTestCase, SimpleTestCase):
"""
Tests for the Cache busting storage
"""
def test_cache_invalidation(self):
name = "cached/styles.css"
hashed_name = "cached/styles.bb84a0240107.css"
# check if the cache is filled correctly as expected
cache_key = storage.staticfiles_storage.hash_key(name)
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(self.hashed_file_path(name), cached_name)
# clearing the cache to make sure we re-set it correctly in the url method
storage.staticfiles_storage.hashed_files.clear()
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(cached_name, None)
self.assertEqual(self.hashed_file_path(name), hashed_name)
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(cached_name, hashed_name)
def test_cache_key_memcache_validation(self):
"""
Handle cache key creation correctly, see #17861.
"""
name = (
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/\x16\xb4"
)
cache_key = storage.staticfiles_storage.hash_key(name)
cache_validator = BaseCache({})
cache_validator.validate_key(cache_key)
self.assertEqual(cache_key, 'staticfiles:821ea71ef36f95b3922a77f7364670e7')
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(
TEST_SETTINGS,
STATICFILES_STORAGE='django.contrib.staticfiles.storage.ManifestStaticFilesStorage',
DEBUG=False,
))
class TestCollectionManifestStorage(TestHashedFiles, BaseCollectionTestCase,
BaseStaticFilesTestCase, SimpleTestCase):
"""
Tests for the Cache busting storage
"""
def setUp(self):
super(TestCollectionManifestStorage, self).setUp()
self._clear_filename = os.path.join(TESTFILES_PATH, 'cleared.txt')
with open(self._clear_filename, 'w') as f:
f.write('to be deleted in one test')
def tearDown(self):
super(TestCollectionManifestStorage, self).tearDown()
if os.path.exists(self._clear_filename):
os.unlink(self._clear_filename)
def test_manifest_exists(self):
filename = storage.staticfiles_storage.manifest_name
path = storage.staticfiles_storage.path(filename)
self.assertTrue(os.path.exists(path))
def test_loaded_cache(self):
self.assertNotEqual(storage.staticfiles_storage.hashed_files, {})
manifest_content = storage.staticfiles_storage.read_manifest()
self.assertIn(
'"version": "%s"' % storage.staticfiles_storage.manifest_version,
force_text(manifest_content)
)
def test_parse_cache(self):
hashed_files = storage.staticfiles_storage.hashed_files
manifest = storage.staticfiles_storage.load_manifest()
self.assertEqual(hashed_files, manifest)
def test_clear_empties_manifest(self):
cleared_file_name = os.path.join('test', 'cleared.txt')
# collect the additional file
self.run_collectstatic()
hashed_files = storage.staticfiles_storage.hashed_files
self.assertIn(cleared_file_name, hashed_files)
manifest_content = storage.staticfiles_storage.load_manifest()
self.assertIn(cleared_file_name, manifest_content)
original_path = storage.staticfiles_storage.path(cleared_file_name)
self.assertTrue(os.path.exists(original_path))
# delete the original file form the app, collect with clear
os.unlink(self._clear_filename)
self.run_collectstatic(clear=True)
self.assertFileNotFound(original_path)
hashed_files = storage.staticfiles_storage.hashed_files
self.assertNotIn(cleared_file_name, hashed_files)
manifest_content = storage.staticfiles_storage.load_manifest()
self.assertNotIn(cleared_file_name, manifest_content)
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(
TEST_SETTINGS,
STATICFILES_STORAGE='staticfiles_tests.storage.SimpleCachedStaticFilesStorage',
DEBUG=False,
))
class TestCollectionSimpleCachedStorage(BaseCollectionTestCase,
BaseStaticFilesTestCase, SimpleTestCase):
"""
Tests for the Cache busting storage
"""
hashed_file_path = hashed_file_path
def test_template_tag_return(self):
"""
Test the CachedStaticFilesStorage backend.
"""
self.assertStaticRaises(ValueError, "does/not/exist.png", "/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt", "/static/test/file.deploy12345.txt")
self.assertStaticRenders("cached/styles.css", "/static/cached/styles.deploy12345.css")
self.assertStaticRenders("path/", "/static/path/")
self.assertStaticRenders("path/?query", "/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.deploy12345.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.deploy12345.css", content)
class CustomStaticFilesStorage(storage.StaticFilesStorage):
"""
Used in TestStaticFilePermissions
"""
def __init__(self, *args, **kwargs):
kwargs['file_permissions_mode'] = 0o640
kwargs['directory_permissions_mode'] = 0o740
super(CustomStaticFilesStorage, self).__init__(*args, **kwargs)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports chmod.")
class TestStaticFilePermissions(BaseCollectionTestCase, StaticFilesTestCase):
command_params = {
'interactive': False,
'post_process': True,
'verbosity': 0,
'ignore_patterns': ['*.ignoreme'],
'use_default_ignore_patterns': True,
'clear': False,
'link': False,
'dry_run': False,
}
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
super(TestStaticFilePermissions, self).setUp()
def tearDown(self):
os.umask(self.old_umask)
super(TestStaticFilePermissions, self).tearDown()
# Don't run collectstatic command in this test class.
def run_collectstatic(self, **kwargs):
pass
@override_settings(
FILE_UPLOAD_PERMISSIONS=0o655,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765,
)
def test_collect_static_files_permissions(self):
collectstatic.Command().execute(**self.command_params)
test_file = os.path.join(settings.STATIC_ROOT, "test.txt")
test_dir = os.path.join(settings.STATIC_ROOT, "subdir")
file_mode = os.stat(test_file)[0] & 0o777
dir_mode = os.stat(test_dir)[0] & 0o777
self.assertEqual(file_mode, 0o655)
self.assertEqual(dir_mode, 0o765)
@override_settings(
FILE_UPLOAD_PERMISSIONS=None,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=None,
)
def test_collect_static_files_default_permissions(self):
collectstatic.Command().execute(**self.command_params)
test_file = os.path.join(settings.STATIC_ROOT, "test.txt")
test_dir = os.path.join(settings.STATIC_ROOT, "subdir")
file_mode = os.stat(test_file)[0] & 0o777
dir_mode = os.stat(test_dir)[0] & 0o777
self.assertEqual(file_mode, 0o666 & ~self.umask)
self.assertEqual(dir_mode, 0o777 & ~self.umask)
@override_settings(
FILE_UPLOAD_PERMISSIONS=0o655,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765,
STATICFILES_STORAGE='staticfiles_tests.test_storage.CustomStaticFilesStorage',
)
def test_collect_static_files_subclass_of_static_storage(self):
collectstatic.Command().execute(**self.command_params)
test_file = os.path.join(settings.STATIC_ROOT, "test.txt")
test_dir = os.path.join(settings.STATIC_ROOT, "subdir")
file_mode = os.stat(test_file)[0] & 0o777
dir_mode = os.stat(test_dir)[0] & 0o777
self.assertEqual(file_mode, 0o640)
self.assertEqual(dir_mode, 0o740)
| bsd-3-clause | -3,708,478,434,379,620,400 | 42.709135 | 118 | 0.670461 | false |
MiltosD/CEFELRC | lib/python2.7/site-packages/django/db/models/sql/constants.py | 394 | 1043 | import re
# Valid query types (a dictionary is used for speedy lookups).
QUERY_TERMS = dict([(x, None) for x in (
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'isnull', 'search', 'regex', 'iregex',
)])
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
# Separator used to split filter strings apart.
LOOKUP_SEP = '__'
# Constants to make looking up tuple values clearer.
# Join lists (indexes into the tuples that are values in the alias_map
# dictionary in the Query class).
TABLE_NAME = 0
RHS_ALIAS = 1
JOIN_TYPE = 2
LHS_ALIAS = 3
LHS_JOIN_COL = 4
RHS_JOIN_COL = 5
NULLABLE = 6
# How many results to expect from a cursor.execute call
MULTI = 'multi'
SINGLE = 'single'
ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
ORDER_DIR = {
'ASC': ('ASC', 'DESC'),
'DESC': ('DESC', 'ASC')}
| bsd-3-clause | 3,794,071,409,986,952,700 | 27.189189 | 79 | 0.649089 | false |
multigcs/quadfork | Libraries/Mavlink/pymavlink/generator/lib/minixsv/minixsvWrapper.py | 79 | 2400 | #!/usr/local/bin/python
import sys
import getopt
from ..genxmlif import GenXmlIfError
from xsvalErrorHandler import ErrorHandler, XsvalError
from ..minixsv import *
from pyxsval import parseAndValidate
##########################################
# minixsv Wrapper for calling minixsv from command line
validSyntaxText = '''\
minixsv XML Schema Validator
Syntax: minixsv [-h] [-?] [-p Parser] [-s XSD-Filename] XML-Filename
Options:
-h, -?: Display this help text
-p Parser: XML Parser to be used
(XMLIF_MINIDOM, XMLIF_ELEMENTTREE, XMLIF_4DOM
default: XMLIF_ELEMENTTREE)
-s XSD-FileName: specify the schema file for validation
(if not specified in XML-File)
'''
def checkShellInputParameter():
"""check shell input parameters."""
xmlInputFilename = None
xsdFilename = None
xmlParser = "XMLIF_ELEMENTTREE"
try:
(options, arguments) = getopt.getopt(sys.argv[1:], '?hp:s:')
if ('-?','') in options or ('-h','') in options:
print validSyntaxText
sys.exit(-1)
else:
if len (arguments) == 1:
xmlInputFilename = arguments[0]
for o, a in options:
if o == "-s":
xsdFilename = a
if o == "-p":
if a in (XMLIF_MINIDOM, XMLIF_ELEMENTTREE, XMLIF_4DOM):
xmlParser = a
else:
print 'Invalid XML parser %s!' %(a)
sys.exit(-1)
else:
print 'minixsv needs one argument (XML input file)!'
sys.exit(-1)
except getopt.GetoptError, errstr:
print errstr
sys.exit(-1)
return xmlInputFilename, xsdFilename, xmlParser
def main():
xmlInputFilename, xsdFileName, xmlParser = checkShellInputParameter()
try:
parseAndValidate (xmlInputFilename, xsdFile=xsdFileName, xmlIfClass=xmlParser)
except IOError, errstr:
print errstr
sys.exit(-1)
except GenXmlIfError, errstr:
print errstr
sys.exit(-1)
except XsvalError, errstr:
print errstr
sys.exit(-1)
if __name__ == "__main__":
main()
| gpl-3.0 | 1,943,417,319,065,918,200 | 29.578947 | 86 | 0.5275 | false |
aerler/Ensemble | src/ensemble/ensemble_test.py | 1 | 8702 | '''
Created on 2013-08-24
Unittest for the GeoPy main package geodata.
@author: Andre R. Erler, GPL v3
'''
import unittest
import netCDF4 as nc
import numpy as np
import numpy.ma as ma
import scipy.stats as ss
import os
import gc
from copy import deepcopy
import shutil
# internal imports
# from ensemble.base import Ensemble
from ensemble.expand import expandArgumentList
## tests related to loading datasets
class ArgumentTest(unittest.TestCase):
def setUp(self):
''' create two test variables '''
pass
def tearDown(self):
''' clean up '''
gc.collect()
def testExpArgList(self):
''' test function to expand argument lists '''
# test arguments
args1 = [0,1,2]; args2 = ['0','1','2']; args3 = ['test']*3; arg4 = 'static1'; arg5 = 'static2'
explist = ['arg1','arg2','arg3']
# test inner product expansion
arg_list = expandArgumentList(arg1=args1, arg2=args2, arg3=args3, arg4=arg4, arg5=arg5,
expand_list=explist, lproduct='inner')
assert len(arg_list) == len(args1) and len(arg_list) == len(args2)
for args,arg1,arg2,arg3 in zip(arg_list,args1,args2,args3):
assert args['arg1'] == arg1
assert args['arg2'] == arg2
assert args['arg3'] == arg3
assert args['arg4'] == arg4
assert args['arg5'] == arg5
# test outer product expansion
arg_list = expandArgumentList(arg1=args1, arg2=args2, arg3=args3, arg4=arg4, arg5=arg5,
expand_list=explist, lproduct='outer')
assert len(arg_list) == len(args1) * len(args2) * len(args3)
n = 0
for arg1 in args1:
for arg2 in args2:
for arg3 in args3:
args = arg_list[n]
assert args['arg1'] == arg1
assert args['arg2'] == arg2
assert args['arg3'] == arg3
assert args['arg4'] == arg4
assert args['arg5'] == arg5
n += 1
assert n == len(arg_list)
# test simultaneous inner and outer product expansion
n1 = len(args2) * len(args3) / len(args1)
tmp1 = args1*n1
arg_list = expandArgumentList(arg1=tmp1, arg2=args2, arg3=args3, arg4=arg4, arg5=arg5,
outer_list=['arg2','arg3'], inner_list=['arg1'])
assert len(arg_list) == len(args2) * len(args3) == len(tmp1)
n = 0
for arg2 in args2:
for arg3 in args3:
args = arg_list[n]
assert args['arg1'] == tmp1[n]
assert args['arg2'] == arg2
assert args['arg3'] == arg3
assert args['arg4'] == arg4
assert args['arg5'] == arg5
n += 1
assert n == len(arg_list)
# test parallel outer product expansion
assert len(args1) == len(args2) # necessary for test
arg_list = expandArgumentList(arg1=args1, arg2=args2, arg3=args3, arg4=arg4, arg5=arg5,
expand_list=[('arg1','arg2'),'arg3'], lproduct='outer')
assert len(arg_list) == len(args1) * len(args3)
n = 0
for arg1,arg2 in zip(args1,args2):
for arg3 in args3:
args = arg_list[n]
assert args['arg1'] == arg1
assert args['arg2'] == arg2
assert args['arg3'] == arg3
assert args['arg4'] == arg4
assert args['arg5'] == arg5
n += 1
assert n == len(arg_list)
## simple tests for the Container protocol
class ContainerTest(unittest.TestCase):
def setUp(self):
''' create some objects for testing '''
pass
def tearDown(self):
''' clean up '''
gc.collect()
def testEnsemble(self):
''' simple test for the Ensemble container class '''
# make test objects
test_1 = 'test 1'; test_2 = 'test 2'; test_3 = 'test 3'
# instantiate ensemble
ens = Ensemble(test_1, test_2, name='ensemble', title='Test Ensemble')
# basic functionality
assert len(ens.members) == len(ens) == 2
assert test_1 in ens and test_2 in ens
# collective add/remove
# test adding a new member
ens += test_3 # this is an ensemble operation
assert len(ens) == 3
assert test_3 in ens
# remove
del ens[-1]
assert len(ens) == 2
assert test_3 not in ens
# print representation
print(''); print(ens); print('')
## tests for the method redirect functionality
class MethodTest(unittest.TestCase):
def setUp(self):
''' create Dataset with Axes and a Variables for testing '''
pass
def tearDown(self):
''' clean up '''
gc.collect()
def testEnsemble(self):
''' test the Ensemble container class '''
# test object
dataset = self.dataset
dataset.load()
# make a copy
copy = dataset.copy()
copy.name = 'copy of {}'.format(dataset.name)
yacod = dataset.copy()
yacod.name = 'yacod' # used later
# instantiate ensemble
ens = Ensemble(dataset, copy, name='ensemble', title='Test Ensemble', basetype='Dataset')
# basic functionality
assert len(ens.members) == len(ens)
# these var/ax names are specific to the test dataset...
if all(ens.hasVariable('var')):
assert isinstance(ens.var,Ensemble)
assert ens.var.basetype == Variable
#assert ens.var == Ensemble(dataset.var, copy.var, basetype=Variable, idkey='dataset_name')
assert ens.var.members == [dataset.var, copy.var]
#print ens.var
#print Ensemble(dataset.var, copy.var, basetype=Variable, idkey='dataset_name')
#print(''); print(ens); print('')
#print ens.time
assert ens.time == [dataset.time , copy.time]
# Axis ensembles are not supported anymore, since they are often shared.
#assert isinstance(ens.time,Ensemble) and ens.time.basetype == Variable
# collective add/remove
ax = Axis(name='ax', units='none', coord=(1,10))
var1 = Variable(name='new',units='none',axes=(ax,))
var2 = Variable(name='new',units='none',axes=(ax,))
ens.addVariable([var1,var2], copy=False) # this is a dataset operation
assert ens[0].hasVariable(var1)
assert ens[1].hasVariable(var2)
assert all(ens.hasVariable('new'))
# test adding a new member
ens += yacod # this is an ensemble operation
#print(''); print(ens); print('')
ens -= 'new' # this is a dataset operation
assert not any(ens.hasVariable('new'))
ens -= 'test'
# fancy test of Variable and Dataset integration
assert not any(ens[self.var.name].mean(axis='time').hasAxis('time'))
print((ens.prettyPrint(short=True)))
# apply function to dataset ensemble
if all(ax.units == 'month' for ax in ens.time):
maxens = ens.seasonalMax(lstrict=not lsimple); del maxens
# test call
tes = ens(time=slice(0,3,2))
assert all(len(tax)==2 for tax in tes.time)
# test list indexing
sne = ens[list(range(len(ens)-1,-1,-1))]
assert sne[-1] == ens[0] and sne[0] == ens[-1]
if __name__ == "__main__":
# use Intel MKL multithreading: OMP_NUM_THREADS=4
# import os
print(('OMP_NUM_THREADS = {:s}\n'.format(os.environ['OMP_NUM_THREADS'])))
specific_tests = []
# specific_tests += ['Ensemble']
# list of tests to be performed
tests = []
# list of Container tests
tests += ['Argument']
# list of Container tests
# tests += ['Container']
# list of Method tests
# tests += ['Method']
# construct dictionary of test classes defined above
test_classes = dict()
local_values = locals().copy()
for key,val in local_values.items():
if key[-4:] == 'Test':
test_classes[key[:-4]] = val
# run tests
report = []
for test in tests: # test+'.test'+specific_test
if specific_tests:
test_names = ['ensemble_test.'+test+'Test.test'+s_t for s_t in specific_tests]
s = unittest.TestLoader().loadTestsFromNames(test_names)
else: s = unittest.TestLoader().loadTestsFromTestCase(test_classes[test])
report.append(unittest.TextTestRunner(verbosity=2).run(s))
# print summary
runs = 0; errs = 0; fails = 0
for name,test in zip(tests,report):
#print test, dir(test)
runs += test.testsRun
e = len(test.errors)
errs += e
f = len(test.failures)
fails += f
if e+ f != 0: print(("\nErrors in '{:s}' Tests: {:s}".format(name,str(test))))
if errs + fails == 0:
print(("\n *** All {:d} Test(s) successfull!!! *** \n".format(runs)))
else:
print(("\n ### Test Summary: ### \n" +
" ### Ran {:2d} Test(s) ### \n".format(runs) +
" ### {:2d} Failure(s) ### \n".format(fails)+
" ### {:2d} Error(s) ### \n".format(errs)))
| gpl-3.0 | 3,458,317,141,232,719,400 | 33.395257 | 99 | 0.591588 | false |
mwcraig/ccdproc | ccdproc/tests/test_cosmicray.py | 2 | 10917 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_allclose
import pytest
from astropy.utils import NumpyRNGContext
from astropy.nddata import StdDevUncertainty
from astropy import units as u
from ccdproc.core import (cosmicray_lacosmic, cosmicray_median,
background_deviation_box, background_deviation_filter)
from ccdproc.tests.pytest_fixtures import ccd_data as ccd_data_func
DATA_SCALE = 5.3
NCRAYS = 30
def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS):
size = data.shape[0]
with NumpyRNGContext(125):
crrays = np.random.randint(0, size, size=(ncrays, 2))
# use (threshold + 1) below to make sure cosmic ray is well above the
# threshold no matter what the random number generator returns
crflux = (10 * scale * np.random.random(NCRAYS) +
(threshold + 5) * scale)
for i in range(ncrays):
y, x = crrays[i]
data.data[y, x] = crflux[i]
def test_cosmicray_lacosmic():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
data, crarr = cosmicray_lacosmic(ccd_data.data, sigclip=5)
# check the number of cosmic rays detected
# currently commented out while checking on issues
# in astroscrappy
# assert crarr.sum() == NCRAYS
def test_cosmicray_lacosmic_ccddata():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
nccd_data = cosmicray_lacosmic(ccd_data, sigclip=5)
# check the number of cosmic rays detected
# currently commented out while checking on issues
# in astroscrappy
# assert nccd_data.mask.sum() == NCRAYS
def test_cosmicray_lacosmic_check_data():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
with pytest.raises(TypeError):
noise = DATA_SCALE * np.ones_like(ccd_data.data)
cosmicray_lacosmic(10, noise)
@pytest.mark.parametrize('array_input', [True, False])
@pytest.mark.parametrize('gain_correct_data', [True, False])
def test_cosmicray_gain_correct(array_input, gain_correct_data):
# Add regression check for #705 and for the new gain_correct
# argument.
# The issue is that cosmicray_lacosmic gain-corrects the
# data and returns that gain corrected data. That is not the
# intent...
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
# No units here on purpose.
gain = 2.0
# Don't really need to set this (6.5 is the default value) but want to
# make lack of units explicit.
readnoise = 6.5
if array_input:
new_data, cr_mask = cosmicray_lacosmic(ccd_data.data,
gain=gain,
gain_apply=gain_correct_data)
else:
new_ccd = cosmicray_lacosmic(ccd_data,
gain=gain,
gain_apply=gain_correct_data)
new_data = new_ccd.data
cr_mask = new_ccd.mask
# Fill masked locations with 0 since there is no simple relationship
# between the original value and the corrected value.
orig_data = np.ma.array(ccd_data.data, mask=cr_mask).filled(0)
new_data = np.ma.array(new_data.data, mask=cr_mask).filled(0)
if gain_correct_data:
gain_for_test = gain
else:
gain_for_test = 1.0
np.testing.assert_allclose(gain_for_test * orig_data, new_data)
def test_cosmicray_lacosmic_accepts_quantity_gain():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
# The units below are the point of the test
gain = 2.0 * u.electron / u.adu
# Since gain and ccd_data have units, the readnoise should too.
readnoise = 6.5 * u.electron
new_ccd = cosmicray_lacosmic(ccd_data,
gain=gain,
gain_apply=True)
def test_cosmicray_lacosmic_accepts_quantity_readnoise():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
gain = 2.0 * u.electron / u.adu
# The units below are the point of this test
readnoise = 6.5 * u.electron
new_ccd = cosmicray_lacosmic(ccd_data,
gain=gain,
gain_apply=True,
readnoise=readnoise)
def test_cosmicray_lacosmic_detects_inconsistent_units():
# This is intended to detect cases like a ccd with units
# of adu, a readnoise in electrons and a gain in adu / electron.
# That is not internally inconsistent.
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
ccd_data.unit = 'adu'
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
readnoise = 6.5 * u.electron
# The units below are deliberately incorrect.
gain = 2.0 * u.adu / u.electron
with pytest.raises(ValueError) as e:
cosmicray_lacosmic(ccd_data,
gain=gain,
gain_apply=True,
readnoise=readnoise)
assert 'Inconsistent units' in str(e.value)
def test_cosmicray_lacosmic_warns_on_ccd_in_electrons(recwarn):
# Check that an input ccd in electrons raises a warning.
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
# The unit below is important for the test; this unit on
# input is supposed to raise an error.
ccd_data.unit = u.electron
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
# No units here on purpose.
gain = 2.0
# Don't really need to set this (6.5 is the default value) but want to
# make lack of units explicit.
readnoise = 6.5
new_ccd = cosmicray_lacosmic(ccd_data,
gain=gain,
gain_apply=True,
readnoise=readnoise)
assert "Image unit is electron" in str(recwarn.pop())
def test_cosmicray_median_check_data():
with pytest.raises(TypeError):
ndata, crarr = cosmicray_median(10, thresh=5, mbox=11,
error_image=DATA_SCALE)
def test_cosmicray_median():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
ndata, crarr = cosmicray_median(ccd_data.data, thresh=5, mbox=11,
error_image=DATA_SCALE)
# check the number of cosmic rays detected
assert crarr.sum() == NCRAYS
def test_cosmicray_median_ccddata():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
ccd_data.uncertainty = ccd_data.data*0.0+DATA_SCALE
nccd = cosmicray_median(ccd_data, thresh=5, mbox=11,
error_image=None)
# check the number of cosmic rays detected
assert nccd.mask.sum() == NCRAYS
def test_cosmicray_median_masked():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
data = np.ma.masked_array(ccd_data.data, (ccd_data.data > -1e6))
ndata, crarr = cosmicray_median(data, thresh=5, mbox=11,
error_image=DATA_SCALE)
# check the number of cosmic rays detected
assert crarr.sum() == NCRAYS
def test_cosmicray_median_background_None():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
data, crarr = cosmicray_median(ccd_data.data, thresh=5, mbox=11,
error_image=None)
# check the number of cosmic rays detected
assert crarr.sum() == NCRAYS
def test_cosmicray_median_gbox():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
scale = DATA_SCALE # yuck. Maybe use pytest.parametrize?
threshold = 5
add_cosmicrays(ccd_data, scale, threshold, ncrays=NCRAYS)
error = ccd_data.data*0.0+DATA_SCALE
data, crarr = cosmicray_median(ccd_data.data, error_image=error,
thresh=5, mbox=11, rbox=0, gbox=5)
data = np.ma.masked_array(data, crarr)
assert crarr.sum() > NCRAYS
assert abs(data.std() - scale) < 0.1
def test_cosmicray_median_rbox():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
scale = DATA_SCALE # yuck. Maybe use pytest.parametrize?
threshold = 5
add_cosmicrays(ccd_data, scale, threshold, ncrays=NCRAYS)
error = ccd_data.data*0.0+DATA_SCALE
data, crarr = cosmicray_median(ccd_data.data, error_image=error,
thresh=5, mbox=11, rbox=21, gbox=5)
assert data[crarr].mean() < ccd_data.data[crarr].mean()
assert crarr.sum() > NCRAYS
def test_cosmicray_median_background_deviation():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
with pytest.raises(TypeError):
cosmicray_median(ccd_data.data, thresh=5, mbox=11,
error_image='blank')
def test_background_deviation_box():
with NumpyRNGContext(123):
scale = 5.3
cd = np.random.normal(loc=0, size=(100, 100), scale=scale)
bd = background_deviation_box(cd, 25)
assert abs(bd.mean() - scale) < 0.10
def test_background_deviation_box_fail():
with NumpyRNGContext(123):
scale = 5.3
cd = np.random.normal(loc=0, size=(100, 100), scale=scale)
with pytest.raises(ValueError):
background_deviation_box(cd, 0.5)
def test_background_deviation_filter():
with NumpyRNGContext(123):
scale = 5.3
cd = np.random.normal(loc=0, size=(100, 100), scale=scale)
bd = background_deviation_filter(cd, 25)
assert abs(bd.mean() - scale) < 0.10
def test_background_deviation_filter_fail():
with NumpyRNGContext(123):
scale = 5.3
cd = np.random.normal(loc=0, size=(100, 100), scale=scale)
with pytest.raises(ValueError):
background_deviation_filter(cd, 0.5)
| bsd-3-clause | 3,243,147,568,152,654,000 | 35.881757 | 77 | 0.635339 | false |
transientskp/aartfaac-arthur | scripts/arthur-plot.py | 1 | 1440 | #!/usr/bin/env python3
import sys
import numpy as np
from arthur.imaging import full_calculation, calculate_lag
from arthur.io import read_full
from arthur.plot import plot_image, plot_lag, plot_chan_power, plot_corr_mat, plot_diff
from arthur.constants import NUM_CHAN
from matplotlib import pyplot
FRQ = 58398437.5 # Central observation frequency in Hz
def main():
if len(sys.argv) < 2:
print("Image the first set of visibilites from a visibilities file")
print()
print("usage: {} <file>".format(sys.argv[0]))
sys.exit(1)
else:
path = sys.argv[1]
# define them here so we can access them out of for loop scope
lags = []
prev_data = date = img_data = corr_data = diff_data = None
chan_data = np.zeros((NUM_CHAN, 60), dtype=np.float32)
for date, body in read_full(path):
img_data, corr_data, chan_row = full_calculation(body, FRQ)
lags += [calculate_lag(date).seconds]
if prev_data is None:
prev_data = img_data
chan_data = np.roll(chan_data, 1)
chan_data[:, 0] = chan_row
diff_data = img_data - prev_data
prev_data = img_data
fig_img = plot_image(date, img_data, FRQ)
fig_lag = plot_lag(lags)
fig_chan = plot_chan_power(chan_data)
fig_cm = plot_corr_mat(corr_data, FRQ, date)
fig_diff = plot_diff(diff_data, FRQ, date)
pyplot.show()
if __name__ == '__main__':
main()
| gpl-3.0 | 7,387,992,118,067,336,000 | 31 | 87 | 0.63125 | false |
seiji56/rmaze-2016 | testes/sharp.py | 1 | 1113 | import time
import Adafruit_ADS1x15
import sys
addr = 0
def convert( aString ):
if aString.startswith("0x") or aString.startswith("0X"):
return int(aString,16)
elif aString.startswith("0"):
return int(aString,8)
else:
return int(aString)
milli_time = lambda: int(round(time.time() * 1000))
if len(sys.argv) < 3:
print('Usage: ' + sys.argv[0] + ' <address> <port>')
exit(0)
addr = convert(sys.argv[1])
port = convert(sys.argv[2])
it = 1
if len(sys.argv) == 4:
it = convert(sys.argv[3])
adc = Adafruit_ADS1x15.ADS1015(address=addr, busnum=1)
GAIN = 1
print('Reading port ' + str(port) + 'ADS1x15 at ' + hex(addr) + ' values, press Ctrl-C to quit...')
print('| {0:^6} | {1:^6} |'.format(*([port] + ['Time'])))
#print('-' * 46)
while True:
value = 0
ltime = milli_time()
try:
for i in range(it):
value += adc.read_adc(port, gain=GAIN)
except IOError:
print('Could not read sensor.')
exit(-1)
value /= it
print('| {0:^6} | {1:^6} |'.format(*([value] + [milli_time() - ltime])))
time.sleep(0.5)
| gpl-3.0 | -4,920,968,997,997,010,000 | 23.733333 | 99 | 0.573226 | false |
mbr/unleash | tests/test_depgraph.py | 1 | 1538 | import pytest
from unleash.depgraph import DependencyGraph
@pytest.fixture
def dg():
# our example dependency graph. it looks like this
#
# D -> B
# \
# A E -> F
# /
# C
g = DependencyGraph()
g.add_obj('D', ['B'])
g.add_obj('B', ['A'])
g.add_obj('C', ['A'])
g.add_obj('E', ['F'])
return g
def test_get_full_dependencies(dg):
assert dg.get_full_dependencies('D') == {'B', 'A'}
def test_get_full_dependants(dg):
assert dg.get_full_dependants('A') == {'B', 'C', 'D'}
assert dg.get_full_dependants('F') == {'E'}
def test_get_dependants(dg):
assert set(dg.get_dependants('A')) == {'B', 'C'}
def test_get_dependencies(dg):
assert dg.get_dependencies('B') == ['A']
assert dg.get_dependencies('D') == ['B']
assert dg.get_dependencies('E') == ['F']
def test_remove_obj(dg):
dg.remove_obj('A')
assert dg.get_dependencies('B') == []
def test_remove_dependency(dg):
dg.remove_dependency('C', 'A')
assert dg.get_full_dependants('A') == {'B', 'D'}
def test_resolve_order(dg):
ordered = dg.resolve_order()
a = ordered.index('A')
b = ordered.index('B')
c = ordered.index('C')
d = ordered.index('D')
e = ordered.index('E')
f = ordered.index('F')
assert d > b
assert b > a
assert c > a
assert e > f
def test_dag_enforced(dg):
with pytest.raises(ValueError):
dg.add_obj('A', ['B'])
with pytest.raises(ValueError):
dg.add_dependency('A', 'B')
| mit | 8,220,235,134,665,998,000 | 19.783784 | 57 | 0.550715 | false |
cstipkovic/spidermonkey-research | testing/marionette/harness/marionette/tests/unit/test_addons.py | 1 | 1985 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import unittest
from marionette import MarionetteTestCase
from marionette_driver.addons import Addons, AddonInstallException
here = os.path.abspath(os.path.dirname(__file__))
class TestAddons(MarionetteTestCase):
def setUp(self):
MarionetteTestCase.setUp(self)
self.addons = Addons(self.marionette)
@property
def all_addon_ids(self):
with self.marionette.using_context('chrome'):
addons = self.marionette.execute_async_script("""
Components.utils.import("resource://gre/modules/AddonManager.jsm");
AddonManager.getAllAddons(function(addons){
let ids = addons.map(function(x) {
return x.id;
});
marionetteScriptFinished(ids);
});
""")
return addons
def test_install_and_remove_temporary_unsigned_addon(self):
addon_path = os.path.join(here, 'mn-restartless-unsigned.xpi')
addon_id = self.addons.install(addon_path, temp=True)
self.assertIn(addon_id, self.all_addon_ids)
self.addons.uninstall(addon_id)
self.assertNotIn(addon_id, self.all_addon_ids)
def test_install_unsigned_addon(self):
addon_path = os.path.join(here, 'mn-restartless-unsigned.xpi')
with self.assertRaises(AddonInstallException):
self.addons.install(addon_path)
@unittest.skip("need to get the test extension signed")
def test_install_and_remove_signed_addon(self):
addon_path = os.path.join(here, 'mn-restartless-signed.xpi')
addon_id = self.addons.install(addon_path)
self.assertIn(addon_id, self.all_addon_ids)
self.addons.uninstall(addon_id)
self.assertNotIn(addon_id, self.all_addon_ids)
| mpl-2.0 | 3,420,210,611,468,736,500 | 33.224138 | 81 | 0.654408 | false |
jimbobhickville/libcloud | libcloud/test/compute/test_vcloud.py | 33 | 32420 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import httplib, b
from libcloud.compute.drivers.vcloud import TerremarkDriver, VCloudNodeDriver, Subject
from libcloud.compute.drivers.vcloud import VCloud_1_5_NodeDriver, ControlAccess
from libcloud.compute.drivers.vcloud import VCloud_5_1_NodeDriver
from libcloud.compute.drivers.vcloud import Vdc
from libcloud.compute.base import Node, NodeImage
from libcloud.compute.types import NodeState
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import VCLOUD_PARAMS
class TerremarkTests(unittest.TestCase, TestCaseMixin):
def setUp(self):
VCloudNodeDriver.connectionCls.host = "test"
VCloudNodeDriver.connectionCls.conn_classes = (None, TerremarkMockHttp)
TerremarkMockHttp.type = None
self.driver = TerremarkDriver(*VCLOUD_PARAMS)
def test_list_images(self):
ret = self.driver.list_images()
self.assertEqual(
ret[0].id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vAppTemplate/5')
def test_list_sizes(self):
ret = self.driver.list_sizes()
self.assertEqual(ret[0].ram, 512)
def test_create_node(self):
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
node = self.driver.create_node(
name='testerpart2',
image=image,
size=size,
vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224',
network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725',
cpus=2,
)
self.assertTrue(isinstance(node, Node))
self.assertEqual(
node.id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vapp/14031')
self.assertEqual(node.name, 'testerpart2')
def test_list_nodes(self):
ret = self.driver.list_nodes()
node = ret[0]
self.assertEqual(
node.id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vapp/14031')
self.assertEqual(node.name, 'testerpart2')
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.public_ips, [])
self.assertEqual(node.private_ips, ['10.112.78.69'])
def test_reboot_node(self):
node = self.driver.list_nodes()[0]
ret = self.driver.reboot_node(node)
self.assertTrue(ret)
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
class VCloud_1_5_Tests(unittest.TestCase, TestCaseMixin):
def setUp(self):
VCloudNodeDriver.connectionCls.host = 'test'
VCloudNodeDriver.connectionCls.conn_classes = (
None, VCloud_1_5_MockHttp)
VCloud_1_5_MockHttp.type = None
self.driver = VCloud_1_5_NodeDriver(*VCLOUD_PARAMS)
def test_list_images(self):
ret = self.driver.list_images()
self.assertEqual(
'https://vm-vcloud/api/vAppTemplate/vappTemplate-ac1bc027-bf8c-4050-8643-4971f691c158', ret[0].id)
def test_list_sizes(self):
ret = self.driver.list_sizes()
self.assertEqual(ret[0].ram, 512)
def test_networks(self):
ret = self.driver.networks
self.assertEqual(
ret[0].get('href'), 'https://vm-vcloud/api/network/dca8b667-6c8f-4c3e-be57-7a9425dba4f4')
def test_create_node(self):
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
node = self.driver.create_node(
name='testNode',
image=image,
size=size,
ex_vdc='MyVdc',
ex_network='vCloud - Default',
cpus=2,
)
self.assertTrue(isinstance(node, Node))
self.assertEqual(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', node.id)
self.assertEqual('testNode', node.name)
def test_create_node_clone(self):
image = self.driver.list_nodes()[0]
node = self.driver.create_node(name='testNode', image=image)
self.assertTrue(isinstance(node, Node))
self.assertEqual(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', node.id)
self.assertEqual('testNode', node.name)
def test_list_nodes(self):
ret = self.driver.list_nodes()
node = ret[0]
self.assertEqual(
node.id, 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a')
self.assertEqual(node.name, 'testNode')
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.public_ips, ['65.41.67.2'])
self.assertEqual(node.private_ips, ['65.41.67.2'])
self.assertEqual(node.extra, {'vdc': 'MyVdc',
'vms': [{
'id': 'https://vm-vcloud/api/vApp/vm-dd75d1d3-5b7b-48f0-aff3-69622ab7e045',
'name': 'testVm',
'state': NodeState.RUNNING,
'public_ips': ['65.41.67.2'],
'private_ips': ['65.41.67.2'],
'os_type': 'rhel5_64Guest'
}]})
node = ret[1]
self.assertEqual(
node.id, 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b')
self.assertEqual(node.name, 'testNode2')
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.public_ips, ['192.168.0.103'])
self.assertEqual(node.private_ips, ['192.168.0.100'])
self.assertEqual(node.extra, {'vdc': 'MyVdc',
'vms': [{
'id': 'https://vm-vcloud/api/vApp/vm-dd75d1d3-5b7b-48f0-aff3-69622ab7e046',
'name': 'testVm2',
'state': NodeState.RUNNING,
'public_ips': ['192.168.0.103'],
'private_ips': ['192.168.0.100'],
'os_type': 'rhel5_64Guest'
}]})
def test_reboot_node(self):
node = self.driver.list_nodes()[0]
ret = self.driver.reboot_node(node)
self.assertTrue(ret)
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_validate_vm_names(self):
# valid inputs
self.driver._validate_vm_names(['host-n-ame-name'])
self.driver._validate_vm_names(['tc-mybuild-b1'])
self.driver._validate_vm_names(None)
# invalid inputs
self.assertRaises(
ValueError, self.driver._validate_vm_names, ['invalid.host'])
self.assertRaises(
ValueError, self.driver._validate_vm_names, ['inv-alid.host'])
self.assertRaises(
ValueError, self.driver._validate_vm_names, ['hostnametoooolong'])
self.assertRaises(
ValueError, self.driver._validate_vm_names, ['host$name'])
def test_change_vm_names(self):
self.driver._change_vm_names(
'/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', ['changed1', 'changed2'])
def test_is_node(self):
self.assertTrue(self.driver._is_node(
Node('testId', 'testNode', state=0, public_ips=[], private_ips=[], driver=self.driver)))
self.assertFalse(self.driver._is_node(
NodeImage('testId', 'testNode', driver=self.driver)))
def test_ex_undeploy(self):
node = self.driver.ex_undeploy_node(
Node('https://test/api/vApp/undeployTest', 'testNode', state=0,
public_ips=[], private_ips=[], driver=self.driver))
self.assertEqual(node.state, NodeState.STOPPED)
def test_ex_undeploy_with_error(self):
node = self.driver.ex_undeploy_node(
Node('https://test/api/vApp/undeployErrorTest', 'testNode',
state=0, public_ips=[], private_ips=[], driver=self.driver))
self.assertEqual(node.state, NodeState.STOPPED)
def test_ex_find_node(self):
node = self.driver.ex_find_node('testNode')
self.assertEqual(node.name, "testNode")
node = self.driver.ex_find_node('testNode', self.driver.vdcs[0])
self.assertEqual(node.name, "testNode")
node = self.driver.ex_find_node('testNonExisting', self.driver.vdcs[0])
self.assertEqual(node, None)
def test_ex_add_vm_disk__with_invalid_values(self):
self.assertRaises(
ValueError, self.driver.ex_add_vm_disk, 'dummy', 'invalid value')
self.assertRaises(
ValueError, self.driver.ex_add_vm_disk, 'dummy', '-1')
def test_ex_add_vm_disk(self):
self.driver.ex_add_vm_disk('https://test/api/vApp/vm-test', '20')
def test_ex_set_vm_cpu__with_invalid_values(self):
self.assertRaises(ValueError, self.driver.ex_set_vm_cpu, 'dummy', 50)
self.assertRaises(ValueError, self.driver.ex_set_vm_cpu, 'dummy', -1)
def test_ex_set_vm_cpu(self):
self.driver.ex_set_vm_cpu('https://test/api/vApp/vm-test', 4)
def test_ex_set_vm_memory__with_invalid_values(self):
self.assertRaises(
ValueError, self.driver.ex_set_vm_memory, 'dummy', 777)
self.assertRaises(
ValueError, self.driver.ex_set_vm_memory, 'dummy', -1024)
def test_ex_set_vm_memory(self):
self.driver.ex_set_vm_memory('https://test/api/vApp/vm-test', 1024)
def test_vdcs(self):
vdcs = self.driver.vdcs
self.assertEqual(len(vdcs), 1)
self.assertEqual(
vdcs[0].id, 'https://vm-vcloud/api/vdc/3d9ae28c-1de9-4307-8107-9356ff8ba6d0')
self.assertEqual(vdcs[0].name, 'MyVdc')
self.assertEqual(vdcs[0].allocation_model, 'AllocationPool')
self.assertEqual(vdcs[0].storage.limit, 5120000)
self.assertEqual(vdcs[0].storage.used, 1984512)
self.assertEqual(vdcs[0].storage.units, 'MB')
self.assertEqual(vdcs[0].cpu.limit, 160000)
self.assertEqual(vdcs[0].cpu.used, 0)
self.assertEqual(vdcs[0].cpu.units, 'MHz')
self.assertEqual(vdcs[0].memory.limit, 527360)
self.assertEqual(vdcs[0].memory.used, 130752)
self.assertEqual(vdcs[0].memory.units, 'MB')
def test_ex_list_nodes(self):
self.assertEqual(
len(self.driver.ex_list_nodes()), len(self.driver.list_nodes()))
def test_ex_list_nodes__masked_exception(self):
"""
Test that we don't mask other exceptions.
"""
brokenVdc = Vdc('/api/vdc/brokenVdc', 'brokenVdc', self.driver)
self.assertRaises(AnotherError, self.driver.ex_list_nodes, (brokenVdc))
def test_ex_power_off(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
self.driver.ex_power_off_node(node)
def test_ex_query(self):
results = self.driver.ex_query(
'user', filter='name==jrambo', page=2, page_size=30, sort_desc='startDate')
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['type'], 'UserRecord')
self.assertEqual(results[0]['name'], 'jrambo')
self.assertEqual(results[0]['isLdapUser'], 'true')
def test_ex_get_control_access(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
control_access = self.driver.ex_get_control_access(node)
self.assertEqual(
control_access.everyone_access_level, ControlAccess.AccessLevel.READ_ONLY)
self.assertEqual(len(control_access.subjects), 1)
self.assertEqual(control_access.subjects[0].type, 'group')
self.assertEqual(control_access.subjects[0].name, 'MyGroup')
self.assertEqual(control_access.subjects[
0].id, 'https://vm-vcloud/api/admin/group/b8202c48-7151-4e61-9a6c-155474c7d413')
self.assertEqual(control_access.subjects[
0].access_level, ControlAccess.AccessLevel.FULL_CONTROL)
def test_ex_set_control_access(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
control_access = ControlAccess(node, None, [Subject(
name='MyGroup',
type='group',
access_level=ControlAccess.AccessLevel.FULL_CONTROL)])
self.driver.ex_set_control_access(node, control_access)
def test_ex_get_metadata(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
metadata = self.driver.ex_get_metadata(node)
self.assertEqual(metadata, {'owners': '[email protected]'})
def test_ex_set_metadata_entry(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
self.driver.ex_set_metadata_entry(node, 'foo', 'bar')
class VCloud_5_1_Tests(unittest.TestCase, TestCaseMixin):
def setUp(self):
VCloudNodeDriver.connectionCls.host = 'test'
VCloudNodeDriver.connectionCls.conn_classes = (
None, VCloud_1_5_MockHttp)
VCloud_1_5_MockHttp.type = None
self.driver = VCloudNodeDriver(
*VCLOUD_PARAMS, **{'api_version': '5.1'})
self.assertTrue(isinstance(self.driver, VCloud_5_1_NodeDriver))
def _test_create_node_valid_ex_vm_memory(self):
# TODO: Hook up the fixture
values = [4, 1024, 4096]
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
for value in values:
self.driver.create_node(
name='testerpart2',
image=image,
size=size,
vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224',
network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725',
cpus=2,
ex_vm_memory=value
)
def test_create_node_invalid_ex_vm_memory(self):
values = [1, 3, 7]
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
for value in values:
try:
self.driver.create_node(
name='testerpart2',
image=image,
size=size,
vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224',
network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725',
cpus=2,
ex_vm_memory=value
)
except ValueError:
pass
else:
self.fail('Exception was not thrown')
def test_list_images(self):
ret = self.driver.list_images()
self.assertEqual(
'https://vm-vcloud/api/vAppTemplate/vappTemplate-ac1bc027-bf8c-4050-8643-4971f691c158', ret[0].id)
class TerremarkMockHttp(MockHttp):
fixtures = ComputeFileFixtures('terremark')
def _api_v0_8_login(self, method, url, body, headers):
headers['set-cookie'] = 'vcloud-token=testtoken'
body = self.fixtures.load('api_v0_8_login.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_org_240(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_org_240.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_vdc_224(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_vdc_224.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_vdc_224_catalog(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_vdc_224_catalog.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_catalogItem_5(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_catalogItem_5.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_vdc_224_action_instantiateVAppTemplate(self, method, url, body, headers):
body = self.fixtures.load(
'api_v0_8_vdc_224_action_instantiateVAppTemplate.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_vapp_14031_action_deploy(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_vapp_14031_action_deploy.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_task_10496(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_task_10496.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_vapp_14031_power_action_powerOn(self, method, url, body, headers):
body = self.fixtures.load(
'api_v0_8_vapp_14031_power_action_powerOn.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_vapp_14031(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('api_v0_8_vapp_14031_get.xml')
elif method == 'DELETE':
body = ''
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_vapp_14031_power_action_reset(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_vapp_14031_power_action_reset.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_vapp_14031_power_action_poweroff(self, method, url, body, headers):
body = self.fixtures.load(
'api_v0_8_vapp_14031_power_action_poweroff.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_task_11001(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_task_11001.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
class AnotherErrorMember(Exception):
"""
helper class for the synthetic exception
"""
def __init__(self):
self.tag = 'Error'
def get(self, foo):
return 'ACCESS_TO_RESOURCE_IS_FORBIDDEN_1'
class AnotherError(Exception):
pass
class VCloud_1_5_MockHttp(MockHttp, unittest.TestCase):
fixtures = ComputeFileFixtures('vcloud_1_5')
def request(self, method, url, body=None, headers=None, raw=False):
self.assertTrue(url.startswith('/api/'),
('"%s" is invalid. Needs to '
'start with "/api". The passed URL should be just '
'the path, not full URL.', url))
super(VCloud_1_5_MockHttp, self).request(method, url, body, headers,
raw)
def _api_sessions(self, method, url, body, headers):
headers['x-vcloud-authorization'] = 'testtoken'
body = self.fixtures.load('api_sessions.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_org(self, method, url, body, headers):
body = self.fixtures.load('api_org.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_org_96726c78_4ae3_402f_b08b_7a78c6903d2a(self, method, url, body, headers):
body = self.fixtures.load(
'api_org_96726c78_4ae3_402f_b08b_7a78c6903d2a.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_network_dca8b667_6c8f_4c3e_be57_7a9425dba4f4(self, method, url, body, headers):
body = self.fixtures.load(
'api_network_dca8b667_6c8f_4c3e_be57_7a9425dba4f4.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0(self, method, url, body, headers):
body = self.fixtures.load(
'api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vdc_brokenVdc(self, method, url, body, headers):
body = self.fixtures.load('api_vdc_brokenVdc.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_errorRaiser(self, method, url, body, headers):
m = AnotherErrorMember()
raise AnotherError(m)
def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate(self, method, url, body, headers):
body = self.fixtures.load(
'api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_powerOn(self, method, url, body, headers):
return self._api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(method, url, body, headers)
# Clone
def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_cloneVApp(self, method, url, body, headers):
body = self.fixtures.load(
'api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_cloneVApp.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_networkConnectionSection(self, method, url, body, headers):
body = self.fixtures.load(
'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a(self, method, url, body, headers):
status = httplib.OK
if method == 'GET':
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a.xml')
status = httplib.OK
elif method == 'DELETE':
body = self.fixtures.load(
'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml')
status = httplib.ACCEPTED
return status, body, headers, httplib.responses[status]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b(self, method, url, body, headers):
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c(self, method, url, body, headers):
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045(self, method, url, body, headers):
body = self.fixtures.load(
'put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'get_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml')
status = httplib.OK
else:
body = self.fixtures.load(
'put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml')
status = httplib.ACCEPTED
return status, body, headers, httplib.responses[status]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_reset(self, method, url, body, headers):
return self._api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(method, url, body, headers)
def _api_task_b034df55_fe81_4798_bc81_1f0fd0ead450(self, method, url, body, headers):
body = self.fixtures.load(
'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_catalog_cddb3cb2_3394_4b14_b831_11fbc4028da4(self, method, url, body, headers):
body = self.fixtures.load(
'api_catalog_cddb3cb2_3394_4b14_b831_11fbc4028da4.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_catalogItem_3132e037_759b_4627_9056_ca66466fa607(self, method, url, body, headers):
body = self.fixtures.load(
'api_catalogItem_3132e037_759b_4627_9056_ca66466fa607.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_undeployTest(self, method, url, body, headers):
body = self.fixtures.load('api_vApp_undeployTest.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_undeployTest_action_undeploy(self, method, url, body, headers):
body = self.fixtures.load('api_task_undeploy.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_task_undeploy(self, method, url, body, headers):
body = self.fixtures.load('api_task_undeploy.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_undeployErrorTest(self, method, url, body, headers):
body = self.fixtures.load('api_vApp_undeployTest.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_undeployErrorTest_action_undeploy(self, method, url, body, headers):
if b('shutdown') in b(body):
body = self.fixtures.load('api_task_undeploy_error.xml')
else:
body = self.fixtures.load('api_task_undeploy.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_task_undeployError(self, method, url, body, headers):
body = self.fixtures.load('api_task_undeploy_error.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_access_to_resource_forbidden(self, method, url, body, headers):
raise Exception(
ET.fromstring(self.fixtures.load('api_vApp_vapp_access_to_resource_forbidden.xml')))
def _api_vApp_vm_test(self, method, url, body, headers):
body = self.fixtures.load('api_vApp_vm_test.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vm_test_virtualHardwareSection_disks(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'get_api_vApp_vm_test_virtualHardwareSection_disks.xml')
status = httplib.OK
else:
body = self.fixtures.load(
'put_api_vApp_vm_test_virtualHardwareSection_disks.xml')
status = httplib.ACCEPTED
return status, body, headers, httplib.responses[status]
def _api_vApp_vm_test_virtualHardwareSection_cpu(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'get_api_vApp_vm_test_virtualHardwareSection_cpu.xml')
status = httplib.OK
else:
body = self.fixtures.load(
'put_api_vApp_vm_test_virtualHardwareSection_cpu.xml')
status = httplib.ACCEPTED
return status, body, headers, httplib.responses[status]
def _api_vApp_vm_test_virtualHardwareSection_memory(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'get_api_vApp_vm_test_virtualHardwareSection_memory.xml')
status = httplib.OK
else:
body = self.fixtures.load(
'put_api_vApp_vm_test_virtualHardwareSection_memory.xml')
status = httplib.ACCEPTED
return status, body, headers, httplib.responses[status]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_powerOff(self, method, url, body, headers):
return self._api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(method, url, body, headers)
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(self, method, url, body, headers):
assert method == 'POST'
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_all.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_query(self, method, url, body, headers):
assert method == 'GET'
if 'type=user' in url:
self.assertTrue('page=2' in url)
self.assertTrue('filter=(name==jrambo)' in url)
self.assertTrue('sortDesc=startDate')
body = self.fixtures.load('api_query_user.xml')
elif 'type=group' in url:
body = self.fixtures.load('api_query_group.xml')
else:
raise AssertionError('Unexpected query type')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_metadata(self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load('api_vapp_post_metadata.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
else:
body = self.fixtures.load('api_vapp_get_metadata.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_controlAccess(self, method, url, body, headers):
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_action_controlAccess(self, method, url, body, headers):
body = str(body)
self.assertTrue(method == 'POST')
self.assertTrue(
'<IsSharedToEveryone>false</IsSharedToEveryone>' in body)
self.assertTrue(
'<Subject href="https://vm-vcloud/api/admin/group/b8202c48-7151-4e61-9a6c-155474c7d413" />' in body)
self.assertTrue('<AccessLevel>FullControl</AccessLevel>' in body)
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_admin_group_b8202c48_7151_4e61_9a6c_155474c7d413(self, method, url, body, headers):
body = self.fixtures.load(
'api_admin_group_b8202c48_7151_4e61_9a6c_155474c7d413.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 | -5,506,104,586,087,110,000 | 44.216179 | 119 | 0.631956 | false |
GoogleCloudPlatform/sap-deployment-automation | third_party/github.com/ansible/awx/awx_collection/plugins/modules/tower_credential.py | 1 | 13502 | #!/usr/bin/python
# coding: utf-8 -*-
# Copyright: (c) 2017, Wayne Witzel III <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_credential
author: "Wayne Witzel III (@wwitzel3)"
short_description: create, update, or destroy Ansible Tower credential.
description:
- Create, update, or destroy Ansible Tower credentials. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- The name to use for the credential.
required: True
type: str
new_name:
description:
- Setting this option will change the existing name (looked up via the name field.
required: False
type: str
description:
description:
- The description to use for the credential.
type: str
organization:
description:
- Organization that should own the credential.
type: str
credential_type:
description:
- Name of credential type.
- Will be preferred over kind
type: str
inputs:
description:
- >-
Credential inputs where the keys are var names used in templating.
Refer to the Ansible Tower documentation for example syntax.
- Any fields in this dict will take prescedence over any fields mentioned below (i.e. host, username, etc)
type: dict
user:
description:
- User that should own this credential.
type: str
team:
description:
- Team that should own this credential.
type: str
kind:
description:
- Type of credential being added.
- The ssh choice refers to a Tower Machine credential.
- Deprecated, please use credential_type
required: False
type: str
choices: ["ssh", "vault", "net", "scm", "aws", "vmware", "satellite6", "cloudforms", "gce", "azure_rm", "openstack", "rhv", "insights", "tower"]
host:
description:
- Host for this credential.
- Deprecated, will be removed in a future release
type: str
username:
description:
- Username for this credential. ``access_key`` for AWS.
- Deprecated, please use inputs
type: str
password:
description:
- Password for this credential. ``secret_key`` for AWS. ``api_key`` for RAX.
- Use "ASK" and launch in Tower to be prompted.
- Deprecated, please use inputs
type: str
project:
description:
- Project that should use this credential for GCP.
- Deprecated, will be removed in a future release
type: str
ssh_key_data:
description:
- SSH private key content. To extract the content from a file path, use the lookup function (see examples).
- Deprecated, please use inputs
type: str
ssh_key_unlock:
description:
- Unlock password for ssh_key.
- Use "ASK" and launch in Tower to be prompted.
- Deprecated, please use inputs
type: str
authorize:
description:
- Should use authorize for net type.
- Deprecated, please use inputs
type: bool
default: 'no'
authorize_password:
description:
- Password for net credentials that require authorize.
- Deprecated, please use inputs
type: str
client:
description:
- Client or application ID for azure_rm type.
- Deprecated, please use inputs
type: str
security_token:
description:
- STS token for aws type.
- Deprecated, please use inputs
type: str
secret:
description:
- Secret token for azure_rm type.
- Deprecated, please use inputs
type: str
subscription:
description:
- Subscription ID for azure_rm type.
- Deprecated, please use inputs
type: str
tenant:
description:
- Tenant ID for azure_rm type.
- Deprecated, please use inputs
type: str
domain:
description:
- Domain for openstack type.
- Deprecated, please use inputs
type: str
become_method:
description:
- Become method to use for privilege escalation.
- Some examples are "None", "sudo", "su", "pbrun"
- Due to become plugins, these can be arbitrary
- Deprecated, please use inputs
type: str
become_username:
description:
- Become username.
- Use "ASK" and launch in Tower to be prompted.
- Deprecated, please use inputs
type: str
become_password:
description:
- Become password.
- Use "ASK" and launch in Tower to be prompted.
- Deprecated, please use inputs
type: str
vault_password:
description:
- Vault password.
- Use "ASK" and launch in Tower to be prompted.
- Deprecated, please use inputs
type: str
vault_id:
description:
- Vault identifier.
- This parameter is only valid if C(kind) is specified as C(vault).
- Deprecated, please use inputs
type: str
state:
description:
- Desired state of the resource.
choices: ["present", "absent"]
default: "present"
type: str
extends_documentation_fragment: awx.awx.auth
notes:
- Values `inputs` and the other deprecated fields (such as `tenant`) are replacements of existing values.
See the last 4 examples for details.
'''
EXAMPLES = '''
- name: Add tower machine credential
tower_credential:
name: Team Name
description: Team Description
organization: test-org
credential_type: Machine
state: present
tower_config_file: "~/tower_cli.cfg"
- name: Create a valid SCM credential from a private_key file
tower_credential:
name: SCM Credential
organization: Default
state: present
credential_type: Source Control
inputs:
username: joe
password: secret
ssh_key_data: "{{ lookup('file', '/tmp/id_rsa') }}"
ssh_key_unlock: "passphrase"
- name: Fetch private key
slurp:
src: '$HOME/.ssh/aws-private.pem'
register: aws_ssh_key
- name: Add Credential Into Tower
tower_credential:
name: Workshop Credential
credential_type: Machine
organization: Default
inputs:
ssh_key_data: "{{ aws_ssh_key['content'] | b64decode }}"
run_once: true
delegate_to: localhost
- name: Add Credential with Custom Credential Type
tower_credential:
name: Workshop Credential
credential_type: MyCloudCredential
organization: Default
tower_username: admin
tower_password: ansible
tower_host: https://localhost
- name: Create a Vaiult credential (example for notes)
tower_credential:
name: Example password
credential_type: Vault
organization: Default
inputs:
vault_password: 'hello'
vault_id: 'My ID'
- name: Bad password update (will replace vault_id)
tower_credential:
name: Example password
credential_type: Vault
organization: Default
inputs:
vault_password: 'new_password'
- name: Another bad password update (will replace vault_id)
tower_credential:
name: Example password
credential_type: Vault
organization: Default
vault_password: 'new_password'
- name: A safe way to update a password and keep vault_id
tower_credential:
name: Example password
credential_type: Vault
organization: Default
inputs:
vault_password: 'new_password'
vault_id: 'My ID'
'''
from ..module_utils.tower_api import TowerAPIModule
KIND_CHOICES = {
'ssh': 'Machine',
'vault': 'Vault',
'net': 'Network',
'scm': 'Source Control',
'aws': 'Amazon Web Services',
'vmware': 'VMware vCenter',
'satellite6': 'Red Hat Satellite 6',
'cloudforms': 'Red Hat CloudForms',
'gce': 'Google Compute Engine',
'azure_rm': 'Microsoft Azure Resource Manager',
'openstack': 'OpenStack',
'rhv': 'Red Hat Virtualization',
'insights': 'Insights',
'tower': 'Ansible Tower',
}
OLD_INPUT_NAMES = (
'authorize', 'authorize_password', 'client',
'security_token', 'secret', 'tenant', 'subscription',
'domain', 'become_method', 'become_username',
'become_password', 'vault_password', 'project', 'host',
'username', 'password', 'ssh_key_data', 'vault_id',
'ssh_key_unlock'
)
def main():
# Any additional arguments that are not fields of the item can be added here
argument_spec = dict(
name=dict(required=True),
new_name=dict(),
description=dict(),
organization=dict(),
credential_type=dict(),
inputs=dict(type='dict', no_log=True),
user=dict(),
team=dict(),
# These are for backwards compatability
kind=dict(choices=list(KIND_CHOICES.keys())),
host=dict(),
username=dict(),
password=dict(no_log=True),
project=dict(),
ssh_key_data=dict(no_log=True),
ssh_key_unlock=dict(no_log=True),
authorize=dict(type='bool'),
authorize_password=dict(no_log=True),
client=dict(),
security_token=dict(),
secret=dict(no_log=True),
subscription=dict(),
tenant=dict(),
domain=dict(),
become_method=dict(),
become_username=dict(),
become_password=dict(no_log=True),
vault_password=dict(no_log=True),
vault_id=dict(),
# End backwards compatability
state=dict(choices=['present', 'absent'], default='present'),
)
# Create a module for ourselves
module = TowerAPIModule(argument_spec=argument_spec, required_one_of=[['kind', 'credential_type']])
# Extract our parameters
name = module.params.get('name')
new_name = module.params.get('new_name')
description = module.params.get('description')
organization = module.params.get('organization')
credential_type = module.params.get('credential_type')
inputs = module.params.get('inputs')
user = module.params.get('user')
team = module.params.get('team')
# The legacy arguments are put into a hash down below
kind = module.params.get('kind')
# End backwards compatability
state = module.params.get('state')
# Deprication warnings
for legacy_input in OLD_INPUT_NAMES:
if module.params.get(legacy_input) is not None:
module.deprecate(msg='{0} parameter has been deprecated, please use inputs instead'.format(legacy_input), version="ansible.tower:4.0.0")
if kind:
module.deprecate(msg='The kind parameter has been deprecated, please use credential_type instead', version="ansible.tower:4.0.0")
cred_type_id = module.resolve_name_to_id('credential_types', credential_type if credential_type else KIND_CHOICES[kind])
if organization:
org_id = module.resolve_name_to_id('organizations', organization)
# Attempt to look up the object based on the provided name, credential type and optional organization
lookup_data = {
'name': name,
'credential_type': cred_type_id,
}
if organization:
lookup_data['organization'] = org_id
credential = module.get_one('credentials', **{'data': lookup_data})
if state == 'absent':
# If the state was absent we can let the module delete it if needed, the module will handle exiting from this
module.delete_if_needed(credential)
# Attempt to look up the related items the user specified (these will fail the module if not found)
if user:
user_id = module.resolve_name_to_id('users', user)
if team:
team_id = module.resolve_name_to_id('teams', team)
# Create credential input from legacy inputs
has_inputs = False
credential_inputs = {}
for legacy_input in OLD_INPUT_NAMES:
if module.params.get(legacy_input) is not None:
has_inputs = True
credential_inputs[legacy_input] = module.params.get(legacy_input)
if inputs:
has_inputs = True
credential_inputs.update(inputs)
# Create the data that gets sent for create and update
credential_fields = {
'name': new_name if new_name else name,
'credential_type': cred_type_id,
}
if has_inputs:
credential_fields['inputs'] = credential_inputs
if description:
credential_fields['description'] = description
if organization:
credential_fields['organization'] = org_id
# If we don't already have a credential (and we are creating one) we can add user/team
# The API does not appear to do anything with these after creation anyway
# NOTE: We can't just add these on a modification because they are never returned from a GET so it would always cause a changed=True
if not credential:
if user:
credential_fields['user'] = user_id
if team:
credential_fields['team'] = team_id
# If the state was present we can let the module build or update the existing group, this will return on its own
module.create_or_update_if_needed(
credential, credential_fields, endpoint='credentials', item_type='credential'
)
if __name__ == '__main__':
main()
| apache-2.0 | -317,633,327,197,229,200 | 30.620609 | 150 | 0.63598 | false |
paweljasinski/ironpython3 | Src/StdLib/Lib/multiprocessing/semaphore_tracker.py | 5 | 4863 | #
# On Unix we run a server process which keeps track of unlinked
# semaphores. The server ignores SIGINT and SIGTERM and reads from a
# pipe. Every other process of the program has a copy of the writable
# end of the pipe, so we get EOF when all other processes have exited.
# Then the server process unlinks any remaining semaphore names.
#
# This is important because the system only supports a limited number
# of named semaphores, and they will not be automatically removed till
# the next reboot. Without this semaphore tracker process, "killall
# python" would probably leave unlinked semaphores.
#
import errno
import os
import signal
import sys
import threading
import warnings
import _multiprocessing
from . import spawn
from . import util
from . import current_process
__all__ = ['ensure_running', 'register', 'unregister']
class SemaphoreTracker(object):
def __init__(self):
self._lock = threading.Lock()
self._fd = None
def getfd(self):
self.ensure_running()
return self._fd
def ensure_running(self):
'''Make sure that semaphore tracker process is running.
This can be run from any process. Usually a child process will use
the semaphore created by its parent.'''
with self._lock:
if self._fd is not None:
return
fds_to_pass = []
try:
fds_to_pass.append(sys.stderr.fileno())
except Exception:
pass
cmd = 'from multiprocessing.semaphore_tracker import main;main(%d)'
r, w = os.pipe()
try:
fds_to_pass.append(r)
# process will out live us, so no need to wait on pid
exe = spawn.get_executable()
args = [exe] + util._args_from_interpreter_flags()
args += ['-c', cmd % r]
util.spawnv_passfds(exe, args, fds_to_pass)
except:
os.close(w)
raise
else:
self._fd = w
finally:
os.close(r)
def register(self, name):
'''Register name of semaphore with semaphore tracker.'''
self._send('REGISTER', name)
def unregister(self, name):
'''Unregister name of semaphore with semaphore tracker.'''
self._send('UNREGISTER', name)
def _send(self, cmd, name):
self.ensure_running()
msg = '{0}:{1}\n'.format(cmd, name).encode('ascii')
if len(name) > 512:
# posix guarantees that writes to a pipe of less than PIPE_BUF
# bytes are atomic, and that PIPE_BUF >= 512
raise ValueError('name too long')
nbytes = os.write(self._fd, msg)
assert nbytes == len(msg)
_semaphore_tracker = SemaphoreTracker()
ensure_running = _semaphore_tracker.ensure_running
register = _semaphore_tracker.register
unregister = _semaphore_tracker.unregister
getfd = _semaphore_tracker.getfd
def main(fd):
'''Run semaphore tracker.'''
# protect the process from ^C and "killall python" etc
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
for f in (sys.stdin, sys.stdout):
try:
f.close()
except Exception:
pass
cache = set()
try:
# keep track of registered/unregistered semaphores
with open(fd, 'rb') as f:
for line in f:
try:
cmd, name = line.strip().split(b':')
if cmd == b'REGISTER':
cache.add(name)
elif cmd == b'UNREGISTER':
cache.remove(name)
else:
raise RuntimeError('unrecognized command %r' % cmd)
except Exception:
try:
sys.excepthook(*sys.exc_info())
except:
pass
finally:
# all processes have terminated; cleanup any remaining semaphores
if cache:
try:
warnings.warn('semaphore_tracker: There appear to be %d '
'leaked semaphores to clean up at shutdown' %
len(cache))
except Exception:
pass
for name in cache:
# For some reason the process which created and registered this
# semaphore has failed to unregister it. Presumably it has died.
# We therefore unlink it.
try:
name = name.decode('ascii')
try:
_multiprocessing.sem_unlink(name)
except Exception as e:
warnings.warn('semaphore_tracker: %r: %s' % (name, e))
finally:
pass
| apache-2.0 | -3,411,210,705,306,444,000 | 32.537931 | 79 | 0.557269 | false |
benbox69/pyload | module/plugins/internal/Extractor.py | 6 | 4390 | # -*- coding: utf-8 -*-
import os
import re
from module.PyFile import PyFile
from module.plugins.internal.Plugin import Plugin
class ArchiveError(Exception):
pass
class CRCError(Exception):
pass
class PasswordError(Exception):
pass
class Extractor(Plugin):
__name__ = "Extractor"
__type__ = "extractor"
__version__ = "0.33"
__status__ = "testing"
__description__ = """Base extractor plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "[email protected]"),
("Immenz" , "[email protected]" )]
EXTENSIONS = []
REPAIR = False
VERSION = None
@classmethod
def is_archive(cls, filename):
name = os.path.basename(filename).lower()
return any(name.endswith(ext) for ext in cls.EXTENSIONS)
@classmethod
def is_multipart(cls, filename):
return False
@classmethod
def find(cls):
"""
Check if system statisfy dependencies
:return: boolean
"""
pass
@classmethod
def get_targets(cls, files_ids):
"""
Filter suited targets from list of filename id tuple list
:param files_ids: List of filepathes
:return: List of targets, id tuple list
"""
targets = []
processed = []
for fname, id, fout in files_ids:
if cls.is_archive(fname):
pname = re.sub(cls.re_multipart, "", fname) if cls.is_multipart(fname) else os.path.splitext(fname)[0]
if pname not in processed:
processed.append(pname)
targets.append((fname, id, fout))
return targets
def __init__(self, plugin, filename, out,
fullpath=True,
overwrite=False,
excludefiles=[],
renice=0,
delete='No',
keepbroken=False,
fid=None):
"""
Initialize extractor for specific file
"""
self._init(plugin.pyload)
self.plugin = plugin
self.filename = filename
self.out = out
self.fullpath = fullpath
self.overwrite = overwrite
self.excludefiles = excludefiles
self.renice = renice
self.delete = delete
self.keepbroken = keepbroken
self.files = [] #: Store extracted files here
pyfile = self.pyload.files.getFile(fid) if fid else None
self.notify_progress = lambda x: pyfile.setProgress(x) if pyfile else lambda x: None
self.init()
def init(self):
"""
Initialize additional data structures
"""
pass
def _log(self, level, plugintype, pluginname, messages):
return self.plugin._log(level,
plugintype,
self.plugin.__name__,
(self.__name__,) + messages)
def check(self):
"""
Quick Check by listing content of archive.
Raises error if password is needed, integrity is questionable or else.
:raises PasswordError
:raises CRCError
:raises ArchiveError
"""
raise NotImplementedError
def verify(self):
"""
Testing with Extractors buildt-in method
Raises error if password is needed, integrity is questionable or else.
:raises PasswordError
:raises CRCError
:raises ArchiveError
"""
raise NotImplementedError
def repair(self):
return None
def extract(self, password=None):
"""
Extract the archive. Raise specific errors in case of failure.
:param progress: Progress function, call this to update status
:param password password to use
:raises PasswordError
:raises CRCError
:raises ArchiveError
:return:
"""
raise NotImplementedError
def get_delete_files(self):
"""
Return list of files to delete, do *not* delete them here.
:return: List with paths of files to delete
"""
return [self.filename]
def list(self, password=None):
"""
Populate self.files at some point while extracting
"""
return self.files
| gpl-3.0 | 5,119,132,440,145,374,000 | 23.80226 | 118 | 0.549658 | false |
xodus7/tensorflow | tensorflow/python/autograph/utils/type_check.py | 26 | 1170 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities used in autograph-generated code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_util
def is_tensor(*args):
"""Check if any arguments are tensors.
Args:
*args: Python objects that may or may not be tensors.
Returns:
True if any *args are TensorFlow types, False if none are.
"""
return any([tensor_util.is_tensor(a) for a in args])
| apache-2.0 | -2,660,473,299,974,355,000 | 34.454545 | 80 | 0.692308 | false |
yanheven/keystone | keystone/common/wsgi.py | 3 | 31025 | # Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
import copy
import itertools
import urllib
from oslo_config import cfg
import oslo_i18n
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import strutils
import routes.middleware
import six
import webob.dec
import webob.exc
from keystone.common import dependency
from keystone.common import json_home
from keystone.common import utils
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LI
from keystone.i18n import _LW
from keystone.models import token_model
CONF = cfg.CONF
LOG = log.getLogger(__name__)
# Environment variable used to pass the request context
CONTEXT_ENV = 'openstack.context'
# Environment variable used to pass the request params
PARAMS_ENV = 'openstack.params'
def validate_token_bind(context, token_ref):
bind_mode = CONF.token.enforce_token_bind
if bind_mode == 'disabled':
return
if not isinstance(token_ref, token_model.KeystoneToken):
raise exception.UnexpectedError(_('token reference must be a '
'KeystoneToken type, got: %s') %
type(token_ref))
bind = token_ref.bind
# permissive and strict modes don't require there to be a bind
permissive = bind_mode in ('permissive', 'strict')
# get the named mode if bind_mode is not one of the known
name = None if permissive or bind_mode == 'required' else bind_mode
if not bind:
if permissive:
# no bind provided and none required
return
else:
LOG.info(_LI("No bind information present in token"))
raise exception.Unauthorized()
if name and name not in bind:
LOG.info(_LI("Named bind mode %s not in bind information"), name)
raise exception.Unauthorized()
for bind_type, identifier in six.iteritems(bind):
if bind_type == 'kerberos':
if not (context['environment'].get('AUTH_TYPE', '').lower()
== 'negotiate'):
LOG.info(_LI("Kerberos credentials required and not present"))
raise exception.Unauthorized()
if not context['environment'].get('REMOTE_USER') == identifier:
LOG.info(_LI("Kerberos credentials do not match "
"those in bind"))
raise exception.Unauthorized()
LOG.info(_LI("Kerberos bind authentication successful"))
elif bind_mode == 'permissive':
LOG.debug(("Ignoring unknown bind for permissive mode: "
"{%(bind_type)s: %(identifier)s}"),
{'bind_type': bind_type, 'identifier': identifier})
else:
LOG.info(_LI("Couldn't verify unknown bind: "
"{%(bind_type)s: %(identifier)s}"),
{'bind_type': bind_type, 'identifier': identifier})
raise exception.Unauthorized()
def best_match_language(req):
"""Determines the best available locale from the Accept-Language
HTTP header passed in the request.
"""
if not req.accept_language:
return None
return req.accept_language.best_match(
oslo_i18n.get_available_languages('keystone'))
class BaseApplication(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = keystone.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import keystone.fancy_api
keystone.fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify()
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(explanation='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError('You must implement __call__')
@dependency.requires('assignment_api', 'policy_api', 'token_provider_api')
class Application(BaseApplication):
@webob.dec.wsgify()
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict.pop('action')
del arg_dict['controller']
# allow middleware up the stack to provide context, params and headers.
context = req.environ.get(CONTEXT_ENV, {})
context['query_string'] = dict(six.iteritems(req.params))
context['headers'] = dict(six.iteritems(req.headers))
context['path'] = req.environ['PATH_INFO']
scheme = (None if not CONF.secure_proxy_ssl_header
else req.environ.get(CONF.secure_proxy_ssl_header))
if scheme:
# NOTE(andrey-mp): "wsgi.url_scheme" contains the protocol used
# before the proxy removed it ('https' usually). So if
# the webob.Request instance is modified in order to use this
# scheme instead of the one defined by API, the call to
# webob.Request.relative_url() will return a URL with the correct
# scheme.
req.environ['wsgi.url_scheme'] = scheme
context['host_url'] = req.host_url
params = req.environ.get(PARAMS_ENV, {})
# authentication and authorization attributes are set as environment
# values by the container and processed by the pipeline. the complete
# set is not yet know.
context['environment'] = req.environ
context['accept_header'] = req.accept
req.environ = None
params.update(arg_dict)
context.setdefault('is_admin', False)
# TODO(termie): do some basic normalization on methods
method = getattr(self, action)
# NOTE(morganfainberg): use the request method to normalize the
# response code between GET and HEAD requests. The HTTP status should
# be the same.
req_method = req.environ['REQUEST_METHOD'].upper()
LOG.info('%(req_method)s %(path)s?%(params)s', {
'req_method': req_method,
'path': context['path'],
'params': urllib.urlencode(req.params)})
params = self._normalize_dict(params)
try:
result = method(context, **params)
except exception.Unauthorized as e:
LOG.warning(
_LW("Authorization failed. %(exception)s from "
"%(remote_addr)s"),
{'exception': e, 'remote_addr': req.environ['REMOTE_ADDR']})
return render_exception(e, context=context,
user_locale=best_match_language(req))
except exception.Error as e:
LOG.warning(six.text_type(e))
return render_exception(e, context=context,
user_locale=best_match_language(req))
except TypeError as e:
LOG.exception(six.text_type(e))
return render_exception(exception.ValidationError(e),
context=context,
user_locale=best_match_language(req))
except Exception as e:
LOG.exception(six.text_type(e))
return render_exception(exception.UnexpectedError(exception=e),
context=context,
user_locale=best_match_language(req))
if result is None:
return render_response(status=(204, 'No Content'))
elif isinstance(result, six.string_types):
return result
elif isinstance(result, webob.Response):
return result
elif isinstance(result, webob.exc.WSGIHTTPException):
return result
response_code = self._get_response_code(req)
return render_response(body=result, status=response_code,
method=req_method)
def _get_response_code(self, req):
req_method = req.environ['REQUEST_METHOD']
controller = importutils.import_class('keystone.common.controller')
code = None
if isinstance(self, controller.V3Controller) and req_method == 'POST':
code = (201, 'Created')
return code
def _normalize_arg(self, arg):
return arg.replace(':', '_').replace('-', '_')
def _normalize_dict(self, d):
return {self._normalize_arg(k): v for (k, v) in six.iteritems(d)}
def assert_admin(self, context):
if not context['is_admin']:
try:
user_token_ref = token_model.KeystoneToken(
token_id=context['token_id'],
token_data=self.token_provider_api.validate_token(
context['token_id']))
except exception.TokenNotFound as e:
raise exception.Unauthorized(e)
validate_token_bind(context, user_token_ref)
creds = copy.deepcopy(user_token_ref.metadata)
try:
creds['user_id'] = user_token_ref.user_id
except exception.UnexpectedError:
LOG.debug('Invalid user')
raise exception.Unauthorized()
if user_token_ref.project_scoped:
creds['tenant_id'] = user_token_ref.project_id
else:
LOG.debug('Invalid tenant')
raise exception.Unauthorized()
creds['roles'] = user_token_ref.role_names
# Accept either is_admin or the admin role
self.policy_api.enforce(creds, 'admin_required', {})
def _attribute_is_empty(self, ref, attribute):
"""Returns true if the attribute in the given ref (which is a
dict) is empty or None.
"""
return ref.get(attribute) is None or ref.get(attribute) == ''
def _require_attribute(self, ref, attribute):
"""Ensures the reference contains the specified attribute.
Raise a ValidationError if the given attribute is not present
"""
if self._attribute_is_empty(ref, attribute):
msg = _('%s field is required and cannot be empty') % attribute
raise exception.ValidationError(message=msg)
def _require_attributes(self, ref, attrs):
"""Ensures the reference contains the specified attributes.
Raise a ValidationError if any of the given attributes is not present
"""
missing_attrs = [attribute for attribute in attrs
if self._attribute_is_empty(ref, attribute)]
if missing_attrs:
msg = _('%s field(s) cannot be empty') % ', '.join(missing_attrs)
raise exception.ValidationError(message=msg)
def _get_trust_id_for_request(self, context):
"""Get the trust_id for a call.
Retrieve the trust_id from the token
Returns None if token is not trust scoped
"""
if ('token_id' not in context or
context.get('token_id') == CONF.admin_token):
LOG.debug(('will not lookup trust as the request auth token is '
'either absent or it is the system admin token'))
return None
try:
token_data = self.token_provider_api.validate_token(
context['token_id'])
except exception.TokenNotFound:
LOG.warning(_LW('Invalid token in _get_trust_id_for_request'))
raise exception.Unauthorized()
token_ref = token_model.KeystoneToken(token_id=context['token_id'],
token_data=token_data)
return token_ref.trust_id
@classmethod
def base_url(cls, context, endpoint_type):
url = CONF['%s_endpoint' % endpoint_type]
if url:
substitutions = dict(
itertools.chain(six.iteritems(CONF),
six.iteritems(CONF.eventlet_server)))
url = url % substitutions
else:
# NOTE(jamielennox): if url is not set via the config file we
# should set it relative to the url that the user used to get here
# so as not to mess with version discovery. This is not perfect.
# host_url omits the path prefix, but there isn't another good
# solution that will work for all urls.
url = context['host_url']
return url.rstrip('/')
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = keystone.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import keystone.analytics
keystone.analytics.Analytics(app, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app, **local_config)
return _factory
def __init__(self, application):
super(Middleware, self).__init__()
self.application = application
def process_request(self, request):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, request, response):
"""Do whatever you'd like to the response, based on the request."""
return response
@webob.dec.wsgify()
def __call__(self, request):
try:
response = self.process_request(request)
if response:
return response
response = request.get_response(self.application)
return self.process_response(request, response)
except exception.Error as e:
LOG.warning(six.text_type(e))
return render_exception(e, request=request,
user_locale=best_match_language(request))
except TypeError as e:
LOG.exception(six.text_type(e))
return render_exception(exception.ValidationError(e),
request=request,
user_locale=best_match_language(request))
except Exception as e:
LOG.exception(six.text_type(e))
return render_exception(exception.UnexpectedError(exception=e),
request=request,
user_locale=best_match_language(request))
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify()
def __call__(self, req):
if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST ENVIRON', ('*' * 20))
for key, value in req.environ.items():
LOG.debug('%s = %s', key,
strutils.mask_password(value))
LOG.debug('')
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST BODY', ('*' * 20))
for line in req.body_file:
LOG.debug('%s', strutils.mask_password(line))
LOG.debug('')
resp = req.get_response(self.application)
if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE HEADERS', ('*' * 20))
for (key, value) in six.iteritems(resp.headers):
LOG.debug('%s = %s', key, value)
LOG.debug('')
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE BODY', ('*' * 20))
for part in app_iter:
LOG.debug(part)
yield part
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify()
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify()
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
msg = _('The resource could not be found.')
return render_exception(exception.NotFound(msg),
request=req,
user_locale=best_match_language(req))
app = match['controller']
return app
class ComposingRouter(Router):
def __init__(self, mapper=None, routers=None):
if mapper is None:
mapper = routes.Mapper()
if routers is None:
routers = []
for router in routers:
router.add_routes(mapper)
super(ComposingRouter, self).__init__(mapper)
class ComposableRouter(Router):
"""Router that supports use by ComposingRouter."""
def __init__(self, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.add_routes(mapper)
super(ComposableRouter, self).__init__(mapper)
def add_routes(self, mapper):
"""Add routes to given mapper."""
pass
class ExtensionRouter(Router):
"""A router that allows extensions to supplement or overwrite routes.
Expects to be subclassed.
"""
def __init__(self, application, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.application = application
self.add_routes(mapper)
mapper.connect('{path_info:.*}', controller=self.application)
super(ExtensionRouter, self).__init__(mapper)
def add_routes(self, mapper):
pass
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = keystone.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import keystone.analytics
keystone.analytics.Analytics(app, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app, **local_config)
return _factory
class RoutersBase(object):
"""Base class for Routers."""
def __init__(self):
self.v3_resources = []
def append_v3_routers(self, mapper, routers):
"""Append v3 routers.
Subclasses should override this method to map its routes.
Use self._add_resource() to map routes for a resource.
"""
def _add_resource(self, mapper, controller, path, rel,
get_action=None, head_action=None, get_head_action=None,
put_action=None, post_action=None, patch_action=None,
delete_action=None, get_post_action=None,
path_vars=None, status=json_home.Status.STABLE):
if get_head_action:
getattr(controller, get_head_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=get_head_action,
conditions=dict(method=['GET', 'HEAD']))
if get_action:
getattr(controller, get_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=get_action,
conditions=dict(method=['GET']))
if head_action:
getattr(controller, head_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=head_action,
conditions=dict(method=['HEAD']))
if put_action:
getattr(controller, put_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=put_action,
conditions=dict(method=['PUT']))
if post_action:
getattr(controller, post_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=post_action,
conditions=dict(method=['POST']))
if patch_action:
getattr(controller, patch_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=patch_action,
conditions=dict(method=['PATCH']))
if delete_action:
getattr(controller, delete_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=delete_action,
conditions=dict(method=['DELETE']))
if get_post_action:
getattr(controller, get_post_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=get_post_action,
conditions=dict(method=['GET', 'POST']))
resource_data = dict()
if path_vars:
resource_data['href-template'] = path
resource_data['href-vars'] = path_vars
else:
resource_data['href'] = path
json_home.Status.update_resource_data(resource_data, status)
self.v3_resources.append((rel, resource_data))
class V3ExtensionRouter(ExtensionRouter, RoutersBase):
"""Base class for V3 extension router."""
def __init__(self, application, mapper=None):
self.v3_resources = list()
super(V3ExtensionRouter, self).__init__(application, mapper)
def _update_version_response(self, response_data):
response_data['resources'].update(self.v3_resources)
@webob.dec.wsgify()
def __call__(self, request):
if request.path_info != '/':
# Not a request for version info so forward to super.
return super(V3ExtensionRouter, self).__call__(request)
response = request.get_response(self.application)
if response.status_code != 200:
# The request failed, so don't update the response.
return response
if response.headers['Content-Type'] != 'application/json-home':
# Not a request for JSON Home document, so don't update the
# response.
return response
response_data = jsonutils.loads(response.body)
self._update_version_response(response_data)
response.body = jsonutils.dumps(response_data,
cls=utils.SmarterEncoder)
return response
def render_response(body=None, status=None, headers=None, method=None):
"""Forms a WSGI response."""
if headers is None:
headers = []
else:
headers = list(headers)
headers.append(('Vary', 'X-Auth-Token'))
if body is None:
body = ''
status = status or (204, 'No Content')
else:
content_types = [v for h, v in headers if h == 'Content-Type']
if content_types:
content_type = content_types[0]
else:
content_type = None
JSON_ENCODE_CONTENT_TYPES = ('application/json',
'application/json-home',)
if content_type is None or content_type in JSON_ENCODE_CONTENT_TYPES:
body = jsonutils.dumps(body, cls=utils.SmarterEncoder)
if content_type is None:
headers.append(('Content-Type', 'application/json'))
status = status or (200, 'OK')
resp = webob.Response(body=body,
status='%s %s' % status,
headerlist=headers)
if method == 'HEAD':
# NOTE(morganfainberg): HEAD requests should return the same status
# as a GET request and same headers (including content-type and
# content-length). The webob.Response object automatically changes
# content-length (and other headers) if the body is set to b''. Capture
# all headers and reset them on the response object after clearing the
# body. The body can only be set to a binary-type (not TextType or
# NoneType), so b'' is used here and should be compatible with
# both py2x and py3x.
stored_headers = resp.headers.copy()
resp.body = b''
for header, value in six.iteritems(stored_headers):
resp.headers[header] = value
return resp
def render_exception(error, context=None, request=None, user_locale=None):
"""Forms a WSGI response based on the current error."""
error_message = error.args[0]
message = oslo_i18n.translate(error_message, desired_locale=user_locale)
if message is error_message:
# translate() didn't do anything because it wasn't a Message,
# convert to a string.
message = six.text_type(message)
body = {'error': {
'code': error.code,
'title': error.title,
'message': message,
}}
headers = []
if isinstance(error, exception.AuthPluginException):
body['error']['identity'] = error.authentication
elif isinstance(error, exception.Unauthorized):
url = CONF.public_endpoint
if not url:
if request:
context = {'host_url': request.host_url}
if context:
url = Application.base_url(context, 'public')
else:
url = 'http://localhost:%d' % CONF.eventlet_server.public_port
else:
substitutions = dict(
itertools.chain(six.iteritems(CONF),
six.iteritems(CONF.eventlet_server)))
url = url % substitutions
headers.append(('WWW-Authenticate', 'Keystone uri="%s"' % url))
return render_response(status=(error.code, error.title),
body=body,
headers=headers)
| apache-2.0 | -321,072,724,926,001,100 | 36.651699 | 79 | 0.595133 | false |
magic2du/contact_matrix | Contact_maps/DeepLearning/DeepLearningTool/DL_contact_matrix_load2-new10fold_01_09_2015_01.py | 1 | 25014 |
# coding: utf-8
# In[1]:
# this part imports libs and load data from csv file
import sys
sys.path.append('../../../libs/')
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pickle
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
import cPickle
import gzip
import os
import time
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import numpy as np
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pdb, PIL
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
from DL_libs import *
from itertools import izip #new
import math
from sklearn.svm import SVC
# In[2]:
# set settings for this script
settings = {}
settings['with_auc_score'] = False
settings['reduce_ratio'] = 1
settings['SVM'] = 1
settings['SVM_RBF'] = 1
settings['SVM_POLY'] = 1
settings['DL'] = 1
settings['Log'] = 1
settings['SAE_SVM'] = 1
settings['SAE_SVM_RBF'] = 1
settings['SAE_SVM_POLY'] = 1
settings['DL_S'] = 1
settings['SAE_S_SVM'] = 1
settings['SAE_S_SVM_RBF'] = 1
settings['SAE_S_SVM_POLY'] = 1
settings['number_iterations'] = 10
settings['finetune_lr'] = 0.1
settings['batch_size'] = 30
settings['pretraining_interations'] = 50000#10000
settings['pretrain_lr'] = 0.001
#settings['training_epochs'] = 300 #300
settings['training_interations'] = 50000 #300
settings['hidden_layers_sizes'] = [200, 200, 200, 200, 200]
settings['corruption_levels'] = [0.5, 0.5, 0.5, 0.5, 0.5 ]
settings['number_of_training'] = [10000]#[1000, 2500, 5000, 7500, 10000]
settings['test_set_from_test'] = True
import logging
import time
current_date = time.strftime("%m_%d_%Y")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logname = 'log_DL_handwritten_digits' + current_date + '.log'
handler = logging.FileHandler(logname)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
#logger.debug('This message should go to the log file')
for key, value in settings.items():
logger.info(key +': '+ str(value))
# In[3]:
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
X_train,y_train = train_set
X_valid,y_valid = valid_set
X_total=np.vstack((X_train, X_valid))
X_total = np.array(X_total, dtype= theano.config.floatX)
print'sample size', X_total.shape
y_total = np.concatenate([y_train, y_valid])
# In[5]:
################## generate data from training set###################
array_A =[]
array_B =[]
for i in range(100000):
array_A.append(np.random.random_integers(0, 59999))
array_B.append(np.random.random_integers(0, 59999))
pos_index = []
neg_index = []
for index in xrange(100000):
if y_total[array_A[index]] - y_total[array_B[index]] == 1:
pos_index.append(index)
else:
neg_index.append(index)
print 'number of positive examples', len(pos_index)
selected_neg_index= neg_index[ : len(pos_index)]
array_A = np.array(array_A)
array_B = np.array(array_B)
index_for_positive_image_A = array_A[pos_index]
index_for_positive_image_B = array_B[pos_index]
index_for_neg_image_A = array_A[selected_neg_index]
index_for_neg_image_B = array_B[selected_neg_index]
X_pos_A = X_total[index_for_positive_image_A]
X_pos_B = X_total[index_for_positive_image_B]
X_pos_whole = np.hstack((X_pos_A,X_pos_B))
X_neg_A = X_total[index_for_neg_image_A]
X_neg_B = X_total[index_for_neg_image_B]
X_neg_whole = np.hstack((X_neg_A, X_neg_B))
print X_pos_A.shape, X_pos_B.shape, X_pos_whole.shape
print X_neg_A.shape, X_neg_B.shape, X_neg_whole.shape
X_whole = np.vstack((X_pos_whole, X_neg_whole))
print X_whole.shape
y_pos = np.ones(X_pos_whole.shape[0])
y_neg = np.zeros(X_neg_whole.shape[0])
y_whole = np.concatenate([y_pos,y_neg])
print y_whole
# In[7]:
#pylab.imshow(imageB.reshape(28, 28), cmap="Greys")
# In[8]:
def saveAsCsv(with_auc_score, fname, score_dict, arguments): #new
newfile = False
if os.path.isfile('report_' + fname + '.csv'):
pass
else:
newfile = True
csvfile = open('report_' + fname + '.csv', 'a+')
writer = csv.writer(csvfile)
if newfile == True:
writer.writerow(['no.', 'number_of_training', 'method', 'isTest']+ score_dict.keys()) #, 'AUC'])
for arg in arguments:
writer.writerow([i for i in arg])
csvfile.close()
def run_models(settings = None):
analysis_scr = []
with_auc_score = settings['with_auc_score']
for subset_no in xrange(1,settings['number_iterations']+1):
print("Subset:", subset_no)
################## generate data ###################
array_A =[]
array_B =[]
for i in range(100000):
array_A.append(np.random.random_integers(0, 59999))
array_B.append(np.random.random_integers(0, 59999))
pos_index = []
neg_index = []
for index in xrange(100000):
if y_total[array_A[index]] - y_total[array_B[index]] == 1:
pos_index.append(index)
else:
neg_index.append(index)
print 'number of positive examples', len(pos_index)
selected_neg_index= neg_index[ : len(pos_index)]
array_A = np.array(array_A)
array_B = np.array(array_B)
index_for_positive_image_A = array_A[pos_index]
index_for_positive_image_B = array_B[pos_index]
index_for_neg_image_A = array_A[selected_neg_index]
index_for_neg_image_B = array_B[selected_neg_index]
X_pos_A = X_total[index_for_positive_image_A]
X_pos_B = X_total[index_for_positive_image_B]
X_pos_whole = np.hstack((X_pos_A,X_pos_B))
X_neg_A = X_total[index_for_neg_image_A]
X_neg_B = X_total[index_for_neg_image_B]
X_neg_whole = np.hstack((X_neg_A, X_neg_B))
print X_pos_A.shape, X_pos_B.shape, X_pos_whole.shape
print X_neg_A.shape, X_neg_B.shape, X_neg_whole.shape
X_whole = np.vstack((X_pos_whole, X_neg_whole))
print X_whole.shape
y_pos = np.ones(X_pos_whole.shape[0])
y_neg = np.zeros(X_neg_whole.shape[0])
y_whole = np.concatenate([y_pos,y_neg])
print y_whole.shape
x_train_pre_validation, x_test, y_train_pre_validation, y_test = train_test_split(X_whole,y_whole, test_size=0.2, random_state=211)
for number_of_training in settings['number_of_training']:
x_train, x_validation, y_train, y_validation = train_test_split(x_train_pre_validation[:number_of_training],
y_train_pre_validation[:number_of_training],\
test_size=0.2, random_state=21)
print x_train.shape, y_train.shape, x_validation.shape, y_validation.shape, x_test.shape, y_test.shape
x_train_minmax, x_validation_minmax, x_test_minmax = x_train, x_validation, x_test
train_X_reduced = x_train
train_y_reduced = y_train
test_X = x_test
test_y = y_test
###original data###
################ end of data ####################
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
if settings['SVM']:
print "SVM"
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, y_train)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append(( subset_no,number_of_training, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_RBF']:
print "SVM_RBF"
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(scaled_train_X, y_train)
predicted_test_y = L1_SVC_RBF_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_POLY']:
print "SVM_POLY"
L1_SVC_POLY_Selector = SVC(C=1, kernel='poly').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_POLY_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_POLY_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['Log']:
print "Log"
log_clf_l2 = sklearn.linear_model.LogisticRegression(C=1, penalty='l2')
log_clf_l2.fit(scaled_train_X, train_y_reduced)
predicted_test_y = log_clf_l2.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no,number_of_training, 'Log', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = log_clf_l2.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'Log', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# direct deep learning
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = cal_epochs(settings['training_interations'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes = settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
if settings['DL']:
print "direct deep learning"
sda = trainSda(x_train_minmax, y_train,
x_validation_minmax, y_validation,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
test_predicted = sda.predict(x_test_minmax)
isTest = True; #new
analysis_scr.append((subset_no,number_of_training, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
training_predicted = sda.predict(x_train_minmax)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
####transformed original data####
x = train_X_reduced
a_MAE_original = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_original.transform(train_X_reduced)
new_x_test_minmax_A = a_MAE_original.transform(x_test_minmax)
standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_A)
new_x_train_scaled = standard_scaler.transform(new_x_train_minmax_A)
new_x_test_scaled = standard_scaler.transform(new_x_test_minmax_A)
new_x_train_combo = np.hstack((scaled_train_X, new_x_train_scaled))
new_x_test_combo = np.hstack((scaled_test_X, new_x_test_scaled))
if settings['SAE_SVM']:
# SAE_SVM
print 'SAE followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_RBF']:
# SAE_SVM
print 'SAE followed by SVM RBF'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_POLY']:
# SAE_SVM
print 'SAE followed by SVM POLY'
L1_SVC_RBF_Selector = SVC(C=1, kernel='poly').fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'SAE_SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
#### separated transformed data ####
y_test = test_y
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes = [x/2 for x in hidden_layers_sizes], corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes = [x/2 for x in hidden_layers_sizes], corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_whole)
new_x_train_minmax_whole_scaled = standard_scaler.transform(new_x_train_minmax_whole)
new_x_test_minmax_whole_scaled = standard_scaler.transform(new_x_test_minmax_whole)
if settings['DL_S']:
# deep learning using split network
sda_transformed = trainSda(new_x_train_minmax_whole, y_train,
new_x_validationt_minmax_whole, y_validation ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
predicted_test_y = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'DL_S', isTest) + tuple(performance_score(y_test, predicted_test_y, with_auc_score).values()))
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
if settings['SAE_S_SVM']:
print 'SAE_S followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_S_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append(( subset_no,number_of_training, 'SAE_S_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
if settings['SAE_S_SVM_RBF']:
print 'SAE S followed by SVM RBF'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_S_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
if settings['SAE_S_SVM_POLY']:
# SAE_SVM
print 'SAE S followed by SVM POLY'
L1_SVC_RBF_Selector = SVC(C=1, kernel='poly').fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
report_name = 'DL_handwritten_digits' + '_size_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + '_' + str(settings['pretraining_interations']) + '_' + current_date
saveAsCsv(with_auc_score, report_name, performance_score(test_y, predicted_test_y, with_auc_score), analysis_scr)
return sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr
# In[9]:
sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr = run_models(settings)
# In[48]:
# save objects
sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date +'sda.pickle', 'wb') as handle:
pickle.dump(sda, handle)
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date + 'a_MAE_original.pickle', 'wb') as handle:
pickle.dump(a_MAE_original, handle)
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date + 'a_MAE_A.pickle', 'wb') as handle:
pickle.dump(a_MAE_A, handle)
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date + 'a_MAE_B.pickle', 'wb') as handle:
pickle.dump(a_MAE_B, handle)
x = logging._handlers.copy()
for i in x:
log.removeHandler(i)
i.flush()
i.close()
# In[ ]:
# In[31]:
'''
weights_map_to_input_space = []
StackedNNobject = sda
image_dimension_x = 28*2
image_dimension_y = 28
if isinstance(StackedNNobject, SdA) or isinstance(StackedNNobject, MultipleAEs):
weights_product = StackedNNobject.dA_layers[0].W.get_value(borrow=True)
image = PIL.Image.fromarray(tile_raster_images(
X=weights_product.T,
img_shape=(image_dimension_x, image_dimension_y), tile_shape=(10, 10),
tile_spacing=(1, 1)))
sample_image_path = 'hidden_0_layer_weights.png'
image.save(sample_image_path)
weights_map_to_input_space.append(weights_product)
for i_layer in range(1, len(StackedNNobject.dA_layers)):
i_weigths = StackedNNobject.dA_layers[i_layer].W.get_value(borrow=True)
weights_product = np.dot(weights_product, i_weigths)
weights_map_to_input_space.append(weights_product)
image = PIL.Image.fromarray(tile_raster_images(
X=weights_product.T,
img_shape=(image_dimension_x, image_dimension_y), tile_shape=(10, 10),
tile_spacing=(1, 1)))
sample_image_path = 'hidden_'+ str(i_layer)+ '_layer_weights.png'
image.save(sample_image_path)
'''
# In[18]:
| gpl-2.0 | -2,474,494,673,687,344,600 | 46.375 | 258 | 0.611378 | false |
maizy/ambient7 | ambient7-arduino/serial2influxdb/serial2influxdb.py | 1 | 4275 | #!/usr/bin/env python3
# coding: utf-8
import argparse
import configparser
import logging
import time
import re
import datetime
import serial
import influxdb
SERIAL_RETRY_DELAY = 5.0
logger = logging.getLogger('s2i')
def parse_args_and_config(args):
parser = argparse.ArgumentParser(description='ambient7 - serial2influxdb')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('config', metavar='config.cfg', type=argparse.FileType('r', encoding='UTF-8'),
default='./config.cfg', help='path to config', nargs='?')
cli_args = parser.parse_args(args)
config = configparser.ConfigParser()
config.read_file(cli_args.config)
return cli_args, config
def open_serial(config):
while True:
try:
return serial.Serial(
port=config['serial']['tty'],
baudrate=int(config['serial']['baud']),
timeout=1,
exclusive=True
)
except serial.SerialException as e:
logger.warning('unable to open pyserial connection: {}'.format(e))
logger.info('retry after {} second'.format(SERIAL_RETRY_DELAY))
time.sleep(SERIAL_RETRY_DELAY)
def resilient_line_generator(config):
ser = None
while True:
if ser is None:
ser = open_serial(config)
try:
byte_str = ser.readline()
except serial.SerialException as e:
try:
ser.close()
except Exception:
pass
ser = None
continue
if byte_str not in (b'', b'\r\n'):
try:
yield byte_str.decode('utf-8').rstrip('\r\n')
except UnicodeDecodeError:
pass
def collect_data(key, value, tags=None):
data = {
'time': datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'measurement': key,
}
if tags:
data['tags'] = tags
if key == 'uptime':
data['fields'] = {'value': int(value.rstrip('s'))}
elif key == 'humidity':
data['fields'] = {'value': float(value.strip('%'))}
elif key == 'co2':
if value.endswith('PPM'):
value = value[:-3]
data['fields'] = {'value': int(value)}
elif key == 'temperature':
data['fields'] = {'value': float(value.strip('C'))}
else:
return None
return [data]
def build_influxdb_client(config):
opts = {
'host': config['influxdb']['server'],
'port': int(config['influxdb']['port']),
'database': config['influxdb']['database']
}
if 'username' in config['influxdb']:
opts['username'] = config['influxdb']['username']
opts['password'] = config['influxdb']['password']
return influxdb.InfluxDBClient(**opts)
def main(args):
cli_args, config = parse_args_and_config(args)
influxdb_client = build_influxdb_client(config)
logging.basicConfig(
level=logging.DEBUG if cli_args.verbose else logging.INFO,
stream=sys.stderr,
format='%(asctime)s %(levelname).1s %(message)s'
)
tags = {}
if 'metrics' in config and 'tags' in config['metrics']:
for pair in config['metrics']['tags'].split(','):
tag_key, _, tag_value = pair.partition('=')
tags[tag_key] = tag_value
try:
for line in resilient_line_generator(config):
if logger.isEnabledFor(logging.DEBUG):
logger.debug("receive line: %r", line)
data_match = re.match(r'DATA: (?P<key>[a-z0-9_]+)=(?P<value>.+)', line, re.IGNORECASE)
if data_match is not None:
key = data_match.group('key')
raw_value = data_match.group('value')
logging.info('%s=%s', key, raw_value)
data = collect_data(key, raw_value, tags)
if data is not None:
try:
influxdb_client.write_points(data)
except Exception as e:
logger.warning("Unable to send data to influxdb: %s", e)
except KeyboardInterrupt:
return 1
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv[1:]))
| apache-2.0 | 4,087,495,277,312,809,500 | 29.319149 | 102 | 0.553684 | false |
mannygit/zerorpc-python | zerorpc/events.py | 51 | 9209 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import msgpack
import gevent.pool
import gevent.queue
import gevent.event
import gevent.local
import gevent.lock
import gevent_zmq as zmq
from .context import Context
class Sender(object):
def __init__(self, socket):
self._socket = socket
self._send_queue = gevent.queue.Channel()
self._send_task = gevent.spawn(self._sender)
def __del__(self):
self.close()
def close(self):
if self._send_task:
self._send_task.kill()
def _sender(self):
running = True
for parts in self._send_queue:
for i in xrange(len(parts) - 1):
try:
self._socket.send(parts[i], flags=zmq.SNDMORE)
except gevent.GreenletExit:
if i == 0:
return
running = False
self._socket.send(parts[i], flags=zmq.SNDMORE)
self._socket.send(parts[-1])
if not running:
return
def __call__(self, parts):
self._send_queue.put(parts)
class Receiver(object):
def __init__(self, socket):
self._socket = socket
self._recv_queue = gevent.queue.Channel()
self._recv_task = gevent.spawn(self._recver)
def __del__(self):
self.close()
def close(self):
if self._recv_task:
self._recv_task.kill()
def _recver(self):
running = True
while True:
parts = []
while True:
try:
part = self._socket.recv()
except gevent.GreenletExit:
running = False
if len(parts) == 0:
return
part = self._socket.recv()
parts.append(part)
if not self._socket.getsockopt(zmq.RCVMORE):
break
if not running:
break
self._recv_queue.put(parts)
def __call__(self):
return self._recv_queue.get()
class Event(object):
__slots__ = ['_name', '_args', '_header']
def __init__(self, name, args, context, header=None):
self._name = name
self._args = args
if header is None:
self._header = {
'message_id': context.new_msgid(),
'v': 3
}
else:
self._header = header
@property
def header(self):
return self._header
@property
def name(self):
return self._name
@name.setter
def name(self, v):
self._name = v
@property
def args(self):
return self._args
def pack(self):
return msgpack.Packer().pack((self._header, self._name, self._args))
@staticmethod
def unpack(blob):
unpacker = msgpack.Unpacker()
unpacker.feed(blob)
unpacked_msg = unpacker.unpack()
try:
(header, name, args) = unpacked_msg
except Exception as e:
raise Exception('invalid msg format "{0}": {1}'.format(
unpacked_msg, e))
# Backward compatibility
if not isinstance(header, dict):
header = {}
return Event(name, args, None, header)
def __str__(self, ignore_args=False):
if ignore_args:
args = '[...]'
else:
args = self._args
try:
args = '<<{0}>>'.format(str(self.unpack(self._args)))
except:
pass
return '{0} {1} {2}'.format(self._name, self._header,
args)
class Events(object):
def __init__(self, zmq_socket_type, context=None):
self._zmq_socket_type = zmq_socket_type
self._context = context or Context.get_instance()
self._socket = zmq.Socket(self._context, zmq_socket_type)
self._send = self._socket.send_multipart
self._recv = self._socket.recv_multipart
if zmq_socket_type in (zmq.PUSH, zmq.PUB, zmq.DEALER, zmq.ROUTER):
self._send = Sender(self._socket)
if zmq_socket_type in (zmq.PULL, zmq.SUB, zmq.DEALER, zmq.ROUTER):
self._recv = Receiver(self._socket)
@property
def recv_is_available(self):
return self._zmq_socket_type in (zmq.PULL, zmq.SUB, zmq.DEALER, zmq.ROUTER)
def __del__(self):
try:
if not self._socket.closed:
self.close()
except AttributeError:
pass
def close(self):
try:
self._send.close()
except AttributeError:
pass
try:
self._recv.close()
except AttributeError:
pass
self._socket.close()
def _resolve_endpoint(self, endpoint, resolve=True):
if resolve:
endpoint = self._context.hook_resolve_endpoint(endpoint)
if isinstance(endpoint, (tuple, list)):
r = []
for sub_endpoint in endpoint:
r.extend(self._resolve_endpoint(sub_endpoint, resolve))
return r
return [endpoint]
def connect(self, endpoint, resolve=True):
r = []
for endpoint_ in self._resolve_endpoint(endpoint, resolve):
r.append(self._socket.connect(endpoint_))
return r
def bind(self, endpoint, resolve=True):
r = []
for endpoint_ in self._resolve_endpoint(endpoint, resolve):
r.append(self._socket.bind(endpoint_))
return r
def create_event(self, name, args, xheader=None):
xheader = {} if xheader is None else xheader
event = Event(name, args, context=self._context)
for k, v in xheader.items():
if k == 'zmqid':
continue
event.header[k] = v
return event
def emit_event(self, event, identity=None):
if identity is not None:
parts = list(identity)
parts.extend(['', event.pack()])
elif self._zmq_socket_type in (zmq.DEALER, zmq.ROUTER):
parts = ('', event.pack())
else:
parts = (event.pack(),)
self._send(parts)
def emit(self, name, args, xheader=None):
xheader = {} if xheader is None else xheader
event = self.create_event(name, args, xheader)
identity = xheader.get('zmqid', None)
return self.emit_event(event, identity)
def recv(self):
parts = self._recv()
if len(parts) == 1:
identity = None
blob = parts[0]
else:
identity = parts[0:-2]
blob = parts[-1]
event = Event.unpack(blob)
if identity is not None:
event.header['zmqid'] = identity
return event
def setsockopt(self, *args):
return self._socket.setsockopt(*args)
@property
def context(self):
return self._context
class WrappedEvents(object):
def __init__(self, channel):
self._channel = channel
def close(self):
pass
@property
def recv_is_available(self):
return self._channel.recv_is_available
def create_event(self, name, args, xheader=None):
xheader = {} if xheader is None else xheader
event = Event(name, args, self._channel.context)
event.header.update(xheader)
return event
def emit_event(self, event, identity=None):
event_payload = (event.header, event.name, event.args)
wrapper_event = self._channel.create_event('w', event_payload)
self._channel.emit_event(wrapper_event)
def emit(self, name, args, xheader=None):
wrapper_event = self.create_event(name, args, xheader)
self.emit_event(wrapper_event)
def recv(self, timeout=None):
wrapper_event = self._channel.recv()
(header, name, args) = wrapper_event.args
return Event(name, args, None, header)
@property
def context(self):
return self._channel.context
| mit | 2,456,981,820,384,678,400 | 28.802589 | 83 | 0.5689 | false |
yaroslavvb/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/estimator_utils.py | 75 | 7274 | # pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions relating DataFrames to Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers import feature_column
from tensorflow.contrib.learn.python.learn.dataframe import series as ss
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import parsing_ops
def _to_feature_spec(tensor, default_value=None):
if isinstance(tensor, sparse_tensor.SparseTensor):
return parsing_ops.VarLenFeature(dtype=tensor.dtype)
else:
return parsing_ops.FixedLenFeature(shape=tensor.get_shape(),
dtype=tensor.dtype,
default_value=default_value)
def _infer_feature_specs(dataframe, keys_with_defaults):
with ops.Graph().as_default():
tensors = dataframe.build()
feature_specs = {
name: _to_feature_spec(tensor, keys_with_defaults.get(name))
for name, tensor in tensors.items()}
return feature_specs
def _build_alternate_universe(
dataframe, base_input_keys_with_defaults, feature_keys):
"""Create an alternate universe assuming that the base series are defined.
The resulting graph will be used with an `input_fn` that provides exactly
those features.
Args:
dataframe: the underlying `DataFrame`
base_input_keys_with_defaults: a `dict` from the names of columns to
considered base features to their default values.
feature_keys: the names of columns to be used as features (including base
features and derived features).
Returns:
A `dict` mapping names to rebuilt `Series`.
"""
feature_specs = _infer_feature_specs(dataframe, base_input_keys_with_defaults)
alternate_universe_map = {
dataframe[name]: ss.PredefinedSeries(name, feature_specs[name])
for name in base_input_keys_with_defaults.keys()
}
def _in_alternate_universe(orig_series):
# pylint: disable=protected-access
# Map Series in the original DataFrame to series rebuilt assuming base_keys.
try:
return alternate_universe_map[orig_series]
except KeyError:
rebuilt_inputs = []
for i in orig_series._input_series:
rebuilt_inputs.append(_in_alternate_universe(i))
rebuilt_series = ss.TransformedSeries(rebuilt_inputs,
orig_series._transform,
orig_series._output_name)
alternate_universe_map[orig_series] = rebuilt_series
return rebuilt_series
orig_feature_series_dict = {fk: dataframe[fk] for fk in feature_keys}
new_feature_series_dict = ({name: _in_alternate_universe(x)
for name, x in orig_feature_series_dict.items()})
return new_feature_series_dict, feature_specs
def to_feature_columns_and_input_fn(dataframe,
base_input_keys_with_defaults,
feature_keys,
label_keys=None,
**kwargs):
"""Build a list of FeatureColumns and an input_fn for use with Estimator.
Args:
dataframe: the underlying dataframe
base_input_keys_with_defaults: a dict from the names of columns to be
considered base features to their default values. These columns will be
fed via input_fn.
feature_keys: the names of columns from which to generate FeatureColumns.
These may include base features and/or derived features.
label_keys: the names of columns to be used as labels. None is
acceptable for unsupervised learning.
**kwargs: Additional keyword arguments, unused here.
Returns:
A tuple of two elements:
* A list of `FeatureColumn`s to be used when constructing an Estimator
* An input_fn, i.e. a function that returns a pair of dicts
(features, labels), each mapping string names to Tensors.
the feature dict provides mappings for all the base columns required
by the FeatureColumns.
Raises:
ValueError: when the feature and label key sets are non-disjoint, or the
base_input and label sets are non-disjoint.
"""
if feature_keys is None or not feature_keys:
raise ValueError("feature_keys must be specified.")
if label_keys is None:
label_keys = []
base_input_keys = base_input_keys_with_defaults.keys()
in_two = (set(feature_keys) & set(label_keys)) or (set(base_input_keys) &
set(label_keys))
if in_two:
raise ValueError("Columns cannot be used for both features and labels: %s"
% ", ".join(in_two))
# Obtain the feature series in the alternate universe
new_feature_series_dict, feature_specs = _build_alternate_universe(
dataframe, base_input_keys_with_defaults, feature_keys)
# TODO(soergel): Allow non-real, non-dense DataFrameColumns
for key in new_feature_series_dict.keys():
spec = feature_specs[key]
if not (
isinstance(spec, parsing_ops.FixedLenFeature)
and (spec.dtype.is_integer or spec.dtype.is_floating)):
raise ValueError("For now, only real dense columns can be passed from "
"DataFrame to Estimator. %s is %s of %s" % (
(key, type(spec).__name__, spec.dtype)))
# Make FeatureColumns from these
feature_columns = [feature_column.DataFrameColumn(name, s)
for name, s in new_feature_series_dict.items()]
# Make a new DataFrame with only the Series needed for input_fn.
# This is important to avoid starting queue feeders that won't be used.
limited_dataframe = dataframe.select_columns(
list(base_input_keys) + list(label_keys))
# Build an input_fn suitable for use with Estimator.
def input_fn():
"""An input_fn() for feeding the given set of DataFrameColumns."""
# It's important to build all the tensors together in one DataFrame.
# If we did df.select() for both key sets and then build those, the two
# resulting DataFrames would be shuffled independently.
tensors = limited_dataframe.build(**kwargs)
base_input_features = {key: tensors[key] for key in base_input_keys}
labels = {key: tensors[key] for key in label_keys}
# TODO(soergel): Remove this special case when b/30367437 is fixed.
if len(labels) == 1:
labels = list(labels.values())[0]
return base_input_features, labels
return feature_columns, input_fn
| apache-2.0 | 520,605,405,067,000,260 | 40.329545 | 80 | 0.668683 | false |
Arable/old-www-do-not-use | lib/python2.7/site-packages/requests/packages/urllib3/_collections.py | 310 | 3111 | # urllib3/_collections.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import MutableMapping
try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
__all__ = ['RecentlyUsedContainer']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self.lock:
# Copy pointers to all values, then wipe the mapping
# under Python 2, this copies the list of values twice :-|
values = list(self._container.values())
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self.lock:
return self._container.keys()
| mit | -5,688,311,323,667,925,000 | 29.203883 | 92 | 0.620058 | false |
psychopy/versions | psychopy/app/utils.py | 1 | 17380 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""utility classes for the Builder
"""
from __future__ import absolute_import, division, print_function
import os
from builtins import object
from wx.lib.agw.aui.aui_constants import *
from wx.lib.agw.aui.aui_utilities import IndentPressedBitmap, ChopText, TakeScreenShot
import sys
import wx
import wx.lib.agw.aui as aui
from wx.lib import platebtn
import psychopy
from psychopy import logging
from . import pavlovia_ui
from . import icons
from .themes import ThemeMixin
from psychopy.localization import _translate
class FileDropTarget(wx.FileDropTarget):
"""On Mac simply setting a handler for the EVT_DROP_FILES isn't enough.
Need this too.
"""
def __init__(self, targetFrame):
wx.FileDropTarget.__init__(self)
self.target = targetFrame
def OnDropFiles(self, x, y, filenames):
logging.debug(
'PsychoPyBuilder: received dropped files: %s' % filenames)
for fname in filenames:
if fname.endswith('.psyexp') or fname.lower().endswith('.py'):
self.target.fileOpen(filename=fname)
else:
logging.warning(
'dropped file ignored: did not end in .psyexp or .py')
return True
class WindowFrozen(object):
"""
Equivalent to wxWindowUpdateLocker.
Usage::
with WindowFrozen(wxControl):
update multiple things
# will automatically thaw here
"""
def __init__(self, ctrl):
self.ctrl = ctrl
def __enter__(self): # started the with... statement
# Freeze should not be called if platform is win32.
if sys.platform == 'win32':
return self.ctrl
# check it hasn't been deleted
#
# Don't use StrictVersion() here, as `wx` doesn't follow the required
# numbering scheme.
if self.ctrl is not None and wx.__version__[:3] <= '2.8':
self.ctrl.Freeze()
return self.ctrl
def __exit__(self, exc_type, exc_val, exc_tb):
# Thaw should not be called if platform is win32.
if sys.platform == 'win32':
return
# check it hasn't been deleted
if self.ctrl is not None and self.ctrl.IsFrozen():
self.ctrl.Thaw()
def getSystemFonts(encoding='system', fixedWidthOnly=False):
"""Get a list of installed system fonts.
Parameters
----------
encoding : str
Get fonts with matching encodings.
fixedWidthOnly : bool
Return on fixed width fonts.
Returns
-------
list
List of font facenames.
"""
fontEnum = wx.FontEnumerator()
encoding = "FONTENCODING_" + encoding.upper()
if hasattr(wx, encoding):
encoding = getattr(wx, encoding)
return fontEnum.GetFacenames(encoding, fixedWidthOnly=fixedWidthOnly)
class PsychopyToolbar(wx.ToolBar, ThemeMixin):
"""Toolbar for the Builder/Coder Frame"""
def __init__(self, frame):
wx.ToolBar.__init__(self, frame)
self.frame = frame
self.app = self.frame.app
self._needMakeTools = True
# Configure toolbar appearance
self.SetWindowStyle(wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_FLAT | wx.TB_NODIVIDER)
#self.SetBackgroundColour(ThemeMixin.appColors['frame_bg'])
# Set icon size (16 for win/linux small mode, 32 for everything else
self.iconSize = 32 # mac: 16 either doesn't work, or looks bad
self.SetToolBitmapSize((self.iconSize, self.iconSize))
# OS-dependent tool-tips
ctrlKey = 'Ctrl+'
if sys.platform == 'darwin':
ctrlKey = 'Cmd+'
# keys are the keyboard keys, not the keys of the dict
self.keys = {k: self.frame.app.keys[k].replace('Ctrl+', ctrlKey)
for k in self.frame.app.keys}
self.keys['none'] = ''
# self.makeTools() # will be done when theme is applied
# Finished setup. Make it happen
def makeTools(self):
frame = self.frame
# Create tools
cl = frame.__class__.__name__
pavButtons = pavlovia_ui.toolbar.PavloviaButtons(
frame, toolbar=self, tbSize=self.iconSize)
if frame.__class__.__name__ == 'BuilderFrame':
self.addPsychopyTool(
name='filenew',
label=_translate('New'),
shortcut='new',
tooltip=_translate("Create new experiment file"),
func=self.frame.app.newBuilderFrame) # New
self.addPsychopyTool(
name='fileopen',
label=_translate('Open'),
shortcut='open',
tooltip=_translate("Open an existing experiment file"),
func=self.frame.fileOpen) # Open
self.frame.bldrBtnSave = self.addPsychopyTool(
name='filesave',
label=_translate('Save'),
shortcut='save',
tooltip=_translate("Save current experiment file"),
func=self.frame.fileSave) # Save
self.addPsychopyTool(
name='filesaveas',
label=_translate('Save As...'),
shortcut='saveAs',
tooltip=_translate("Save current experiment file as..."),
func=self.frame.fileSaveAs) # SaveAs
self.frame.bldrBtnUndo = self.addPsychopyTool(
name='undo',
label=_translate('Undo'),
shortcut='undo',
tooltip=_translate("Undo last action"),
func=self.frame.undo) # Undo
self.frame.bldrBtnRedo = self.addPsychopyTool(
name='redo',
label=_translate('Redo'),
shortcut='redo',
tooltip=_translate("Redo last action"),
func=self.frame.redo) # Redo
self.AddSeparator() # Seperator
self.addPsychopyTool(
name='monitors',
label=_translate('Monitor Center'),
shortcut='none',
tooltip=_translate("Monitor settings and calibration"),
func=self.frame.app.openMonitorCenter) # Monitor Center
self.addPsychopyTool(
name='cogwindow',
label=_translate('Experiment Settings'),
shortcut='none',
tooltip=_translate("Edit experiment settings"),
func=self.frame.setExperimentSettings) # Settings
self.AddSeparator()
self.addPsychopyTool(
name='compile',
label=_translate('Compile Script'),
shortcut='compileScript',
tooltip=_translate("Compile to script"),
func=self.frame.compileScript) # Compile
self.frame.bldrBtnRunner = self.addPsychopyTool(
name='runner',
label=_translate('Runner'),
shortcut='runnerScript',
tooltip=_translate("Send experiment to Runner"),
func=self.frame.runFile) # Run
self.frame.bldrBtnRun = self.addPsychopyTool(
name='run',
label=_translate('Run'),
shortcut='runScript',
tooltip=_translate("Run experiment"),
func=self.frame.runFile) # Run
self.AddSeparator() # Seperator
pavButtons.addPavloviaTools()
elif frame.__class__.__name__ == 'CoderFrame':
self.addPsychopyTool('filenew', _translate('New'), 'new',
_translate("Create new experiment file"),
self.frame.fileNew) # New
self.addPsychopyTool('fileopen', _translate('Open'), 'open',
_translate("Open an existing experiment file"),
self.frame.fileOpen) # Open
self.frame.cdrBtnSave = \
self.addPsychopyTool('filesave', _translate('Save'), 'save',
_translate("Save current experiment file"),
self.frame.fileSave) # Save
self.addPsychopyTool('filesaveas', _translate('Save As...'), 'saveAs',
_translate("Save current experiment file as..."),
self.frame.fileSaveAs) # SaveAs
self.frame.cdrBtnUndo = \
self.addPsychopyTool('undo', _translate('Undo'), 'undo',
_translate("Undo last action"),
self.frame.undo) # Undo
self.frame.cdrBtnRedo = \
self.addPsychopyTool('redo', _translate('Redo'), 'redo',
_translate("Redo last action"),
self.frame.redo) # Redo
self.AddSeparator() # Seperator
self.addPsychopyTool('monitors', _translate('Monitor Center'), 'none',
_translate("Monitor settings and calibration"),
self.frame.app.openMonitorCenter)
self.addPsychopyTool('color', _translate('Color Picker'), 'none',
_translate("Color Picker -> clipboard"),
self.frame.app.colorPicker)
self.AddSeparator()
self.frame.cdrBtnRunner = self.addPsychopyTool(
'runner', _translate('Runner'), 'runnerScript',
_translate("Send experiment to Runner"),
self.frame.runFile)
self.frame.cdrBtnRun = self.addPsychopyTool(
'run', _translate('Run'), 'runScript',
_translate("Run experiment"),
self.frame.runFile)
self.AddSeparator()
pavButtons.addPavloviaTools(
buttons=['pavloviaSync', 'pavloviaSearch', 'pavloviaUser'])
frame.btnHandles.update(pavButtons.btnHandles)
self.Realize()
def addPsychopyTool(self, name, label, shortcut, tooltip, func,
emblem=None):
if not name.endswith('.png'):
name += '.png'
item = self.app.iconCache.makeBitmapButton(parent=self, filename=name,
name=label,
label=("%s [%s]" % (
label,
self.keys[shortcut])),
emblem=emblem, toolbar=self,
tip=tooltip,
size=self.iconSize)
# Bind function
self.Bind(wx.EVT_TOOL, func, item)
return item
class PsychopyPlateBtn(platebtn.PlateButton, ThemeMixin):
def __init__(self, parent, id=wx.ID_ANY, label='', bmp=None,
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=1, name=wx.ButtonNameStr):
platebtn.PlateButton.__init__(self, parent, id, label, bmp, pos, size, style, name)
self.parent = parent
self.__InitColors()
self._applyAppTheme()
def _applyAppTheme(self):
cs = ThemeMixin.appColors
self.__InitColors()
self.SetBackgroundColour(wx.Colour(self.parent.GetBackgroundColour()))
self.SetPressColor(cs['txtbutton_bg_hover'])
self.SetLabelColor(cs['text'],
cs['txtbutton_fg_hover'])
def __InitColors(self):
cs = ThemeMixin.appColors
"""Initialize the default colors"""
colors = dict(default=True,
hlight=cs['txtbutton_bg_hover'],
press=cs['txtbutton_bg_hover'],
htxt=cs['text'])
return colors
class PsychopyScrollbar(wx.ScrollBar):
def __init__(self, parent, ori=wx.VERTICAL):
wx.ScrollBar.__init__(self)
if ori == wx.HORIZONTAL:
style = wx.SB_HORIZONTAL
else:
style = wx.SB_VERTICAL
self.Create(parent, style=style)
self.ori = ori
self.parent = parent
self.Bind(wx.EVT_SCROLL, self.DoScroll)
self.Resize()
def DoScroll(self, event):
if self.ori == wx.HORIZONTAL:
w = event.GetPosition()
h = self.parent.GetScrollPos(wx.VERTICAL)
elif self.ori == wx.VERTICAL:
w = self.parent.GetScrollPos(wx.HORIZONTAL)
h = event.GetPosition()
else:
return
self.parent.Scroll(w, h)
self.Resize()
def Resize(self):
sz = self.parent.GetSize()
vsz = self.parent.GetVirtualSize()
start = self.parent.GetViewStart()
if self.ori == wx.HORIZONTAL:
sz = (sz.GetWidth(), 20)
vsz = vsz.GetWidth()
elif self.ori == wx.VERTICAL:
sz = (20, sz.GetHeight())
vsz = vsz.GetHeight()
self.SetDimensions(start[0], start[1], sz[0], sz[1])
self.SetScrollbar(
position=self.GetScrollPos(self.ori),
thumbSize=10,
range=1,
pageSize=vsz
)
class FrameSwitcher(wx.Menu):
"""Menu for switching between different frames"""
def __init__(self, parent):
wx.Menu.__init__(self)
self.parent = parent
self.app = parent.app
self.itemFrames = {}
# Listen for window switch
self.next = self.Append(wx.ID_MDI_WINDOW_NEXT,
_translate("&Next Window\t%s") % self.app.keys['cycleWindows'],
_translate("&Next Window\t%s") % self.app.keys['cycleWindows'])
self.Bind(wx.EVT_MENU, self.nextWindow, self.next)
self.AppendSeparator()
# Add creator options
self.minItemSpec = [
{'label': "Builder", 'class': psychopy.app.builder.BuilderFrame, 'method': self.app.showBuilder},
{'label': "Coder", 'class': psychopy.app.coder.CoderFrame, 'method': self.app.showCoder},
{'label': "Runner", 'class': psychopy.app.runner.RunnerFrame, 'method': self.app.showRunner},
]
for spec in self.minItemSpec:
if not isinstance(self.Window, spec['class']):
item = self.Append(
wx.ID_ANY, spec['label'], spec['label']
)
self.Bind(wx.EVT_MENU, spec['method'], item)
self.AppendSeparator()
self.updateFrames()
@property
def frames(self):
return self.parent.app.getAllFrames()
def updateFrames(self):
"""Set items according to which windows are open"""
self.next.Enable(len(self.frames) > 1)
# Make new items if needed
for frame in self.frames:
if frame not in self.itemFrames:
if frame.filename:
label = type(frame).__name__.replace("Frame", "") + ": " + os.path.basename(frame.filename)
else:
label = type(frame).__name__.replace("Frame", "")
self.itemFrames[frame] = self.AppendRadioItem(wx.ID_ANY, label, label)
self.Bind(wx.EVT_MENU, self.showFrame, self.itemFrames[frame])
# Edit items to match frames
for frame in self.itemFrames:
item = self.itemFrames[frame]
if not item:
continue
if frame not in self.frames:
# Disable unused items
item.Enable(False)
else:
# Rename item
if frame.filename:
self.itemFrames[frame].SetItemLabel(
type(frame).__name__.replace("Frame", "") + ": " + os.path.basename(frame.filename)
)
else:
self.itemFrames[frame].SetItemLabel(
type(frame).__name__.replace("Frame", "") + ": None"
)
item.Check(frame == self.Window)
self.itemFrames = {key: self.itemFrames[key] for key in self.itemFrames if self.itemFrames[key] is not None}
def showFrame(self, event=None):
itemFrames = event.EventObject.itemFrames
frame = [key for key in itemFrames if itemFrames[key].Id == event.Id][0]
frame.Show(True)
frame.Raise()
self.parent.app.SetTopWindow(frame)
self.updateFrames()
def nextWindow(self, event=None):
"""Cycle through list of open windows"""
current = event.EventObject.Window
i = self.frames.index(current)
while self.frames[i] == current:
i -= 1
self.frames[i].Raise()
self.frames[i].Show()
self.updateFrames()
| gpl-3.0 | 2,146,391,062,270,683,000 | 39.798122 | 116 | 0.531243 | false |
netscaler/horizon | openstack_dashboard/dashboards/project/firewalls/tests.py | 5 | 19627 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: KC Wang, Big Switch Networks
from mox import IsA # noqa
from django.core.urlresolvers import reverse # noqa
from django.core.urlresolvers import reverse_lazy # noqa
from django import http
from openstack_dashboard import api
from openstack_dashboard.api import fwaas
from openstack_dashboard.test import helpers as test
class FirewallTests(test.TestCase):
class AttributeDict(dict):
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
DASHBOARD = 'project'
INDEX_URL = reverse_lazy('horizon:%s:firewalls:index' % DASHBOARD)
ADDRULE_PATH = 'horizon:%s:firewalls:addrule' % DASHBOARD
ADDPOLICY_PATH = 'horizon:%s:firewalls:addpolicy' % DASHBOARD
ADDFIREWALL_PATH = 'horizon:%s:firewalls:addfirewall' % DASHBOARD
RULE_DETAIL_PATH = 'horizon:%s:firewalls:ruledetails' % DASHBOARD
POLICY_DETAIL_PATH = 'horizon:%s:firewalls:policydetails' % DASHBOARD
FIREWALL_DETAIL_PATH = 'horizon:%s:firewalls:firewalldetails' % DASHBOARD
UPDATERULE_PATH = 'horizon:%s:firewalls:updaterule' % DASHBOARD
UPDATEPOLICY_PATH = 'horizon:%s:firewalls:updatepolicy' % DASHBOARD
UPDATEFIREWALL_PATH = 'horizon:%s:firewalls:updatefirewall' % DASHBOARD
INSERTRULE_PATH = 'horizon:%s:firewalls:insertrule' % DASHBOARD
REMOVERULE_PATH = 'horizon:%s:firewalls:removerule' % DASHBOARD
def set_up_expect(self):
# retrieve rules
rule1 = self.fw_rules.first()
tenant_id = rule1.tenant_id
api.fwaas.rules_list(
IsA(http.HttpRequest),
tenant_id=tenant_id).AndReturn(self.fw_rules.list())
# retrieves policies
policies = self.fw_policies.list()
api.fwaas.policies_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(policies)
# retrieves firewalls
firewalls = self.firewalls.list()
api.fwaas.firewalls_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(firewalls)
def set_up_expect_with_exception(self):
rule1 = self.fw_rules.first()
tenant_id = rule1.tenant_id
api.fwaas.rules_list(
IsA(http.HttpRequest),
tenant_id=tenant_id).AndRaise(self.exceptions.neutron)
api.fwaas.policies_list(
IsA(http.HttpRequest),
tenant_id=tenant_id).AndRaise(self.exceptions.neutron)
api.fwaas.firewalls_list(
IsA(http.HttpRequest),
tenant_id=tenant_id).AndRaise(self.exceptions.neutron)
@test.create_stubs({api.fwaas: ('firewalls_list',
'policies_list',
'rules_list')}, )
def test_index_firewalls(self):
self.set_up_expect()
self.mox.ReplayAll()
firewall = self.firewalls.first()
tenant_id = firewall.tenant_id
res = self.client.get(self.INDEX_URL, tenant_id=tenant_id)
self.assertTemplateUsed(res, '%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data),
len(self.firewalls.list()))
@test.create_stubs({api.fwaas: ('firewalls_list',
'policies_list',
'rules_list')}, )
def test_index_policies(self):
self.set_up_expect()
self.mox.ReplayAll()
policy = self.fw_policies.first()
tenant_id = policy.tenant_id
res = self.client.get(self.INDEX_URL + '?tab=fwtabs__policies',
tenant_id=tenant_id)
self.assertTemplateUsed(res, '%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['policiestable_table'].data),
len(self.fw_policies.list()))
@test.create_stubs({api.fwaas: ('firewalls_list',
'policies_list',
'rules_list')}, )
def test_index_rules(self):
self.set_up_expect()
self.mox.ReplayAll()
rule = self.fw_rules.first()
tenant_id = rule.tenant_id
res = self.client.get(self.INDEX_URL + '?tab=fwtabs__rules',
tenant_id=tenant_id)
self.assertTemplateUsed(res, '%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['rulestable_table'].data),
len(self.fw_rules.list()))
@test.create_stubs({api.fwaas: ('firewalls_list',
'policies_list',
'rules_list')}, )
def test_index_exception_firewalls(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
firewall = self.firewalls.first()
tenant_id = firewall.tenant_id
res = self.client.get(self.INDEX_URL, tenant_id=tenant_id)
self.assertTemplateUsed(res,
'%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data), 0)
@test.create_stubs({api.fwaas: ('firewalls_list',
'policies_list',
'rules_list')}, )
def test_index_exception_policies(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
policy = self.fw_policies.first()
tenant_id = policy.tenant_id
res = self.client.get(self.INDEX_URL + '?tab=fwtabs__policies',
tenant_id=tenant_id)
self.assertTemplateUsed(res,
'%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['policiestable_table'].data), 0)
@test.create_stubs({api.fwaas: ('firewalls_list',
'policies_list',
'rules_list')}, )
def test_index_exception_rules(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
rule = self.fw_rules.first()
tenant_id = rule.tenant_id
res = self.client.get(self.INDEX_URL + '?tab=fwtabs__rules',
tenant_id=tenant_id)
self.assertTemplateUsed(res,
'%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['rulestable_table'].data), 0)
@test.create_stubs({api.fwaas: ('rule_create',), })
def test_add_rule_post(self):
rule1 = self.fw_rules.first()
form_data = {'name': rule1.name,
'description': rule1.description,
'protocol': rule1.protocol,
'action': rule1.action,
'source_ip_address': rule1.source_ip_address,
'source_port': rule1.source_port,
'destination_ip_address': rule1.destination_ip_address,
'destination_port': rule1.destination_port,
'shared': rule1.shared,
'enabled': rule1.enabled
}
api.fwaas.rule_create(
IsA(http.HttpRequest), **form_data).AndReturn(rule1)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDRULE_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
def test_add_rule_post_with_error(self):
rule1 = self.fw_rules.first()
form_data = {'name': rule1.name,
'description': rule1.description,
'protocol': 'abc',
'action': 'pass',
'source_ip_address': rule1.source_ip_address,
'source_port': rule1.source_port,
'destination_ip_address': rule1.destination_ip_address,
'destination_port': rule1.destination_port,
'shared': rule1.shared,
'enabled': rule1.enabled
}
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDRULE_PATH), form_data)
self.assertFormErrors(res, 2)
@test.create_stubs({api.fwaas: ('policy_create', 'rules_list'), })
def test_add_policy_post(self):
policy = self.fw_policies.first()
rules = self.fw_rules.list()
tenant_id = policy.tenant_id
form_data = {'name': policy.name,
'description': policy.description,
'firewall_rules': policy.firewall_rules,
'shared': policy.shared,
'audited': policy.audited
}
api.fwaas.rules_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(rules)
api.fwaas.policy_create(
IsA(http.HttpRequest), **form_data).AndReturn(policy)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDPOLICY_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('policy_create', 'rules_list'), })
def test_add_policy_post_with_error(self):
policy = self.fw_policies.first()
rules = self.fw_rules.list()
tenant_id = policy.tenant_id
form_data = {'description': policy.description,
'firewall_rules': None,
'shared': policy.shared,
'audited': policy.audited
}
api.fwaas.rules_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(rules)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDPOLICY_PATH), form_data)
self.assertFormErrors(res, 1)
@test.create_stubs({api.fwaas: ('firewall_create', 'policies_list'), })
def test_add_firewall_post(self):
firewall = self.firewalls.first()
policies = self.fw_policies.list()
tenant_id = firewall.tenant_id
form_data = {'name': firewall.name,
'description': firewall.description,
'firewall_policy_id': firewall.firewall_policy_id,
'shared': firewall.shared,
'admin_state_up': firewall.admin_state_up
}
api.fwaas.policies_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(policies)
api.fwaas.firewall_create(
IsA(http.HttpRequest), **form_data).AndReturn(firewall)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDFIREWALL_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('firewall_create', 'policies_list'), })
def test_add_firewall_post_with_error(self):
firewall = self.firewalls.first()
policies = self.fw_policies.list()
tenant_id = firewall.tenant_id
form_data = {'name': firewall.name,
'description': firewall.description,
'firewall_policy_id': None,
'shared': firewall.shared,
'admin_state_up': firewall.admin_state_up
}
api.fwaas.policies_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(policies)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDFIREWALL_PATH), form_data)
self.assertFormErrors(res, 1)
@test.create_stubs({api.fwaas: ('rule_get', 'rule_update')})
def test_update_rule_post(self):
rule = self.fw_rules.first()
api.fwaas.rule_get(IsA(http.HttpRequest), rule.id).AndReturn(rule)
rule.name = 'new name'
rule.description = 'new desc'
rule.protocol = 'ICMP'
rule.action = 'ALLOW'
rule.shared = False
rule.enabled = True
data = {'name': rule.name,
'description': rule.description,
'protocol': rule.protocol,
'action': rule.action,
'shared': rule.shared,
'enabled': rule.enabled,
'source_ip_address': rule.source_ip_address,
'destination_ip_address': None,
'source_port': None,
'destination_port': rule.destination_port,
}
api.fwaas.rule_update(IsA(http.HttpRequest), rule.id, **data)\
.AndReturn(rule)
self.mox.ReplayAll()
form_data = {'name': rule.name,
'description': rule.description,
'protocol': rule.protocol,
'action': rule.action,
'shared': rule.shared,
'enabled': rule.enabled,
'source_ip_address': rule.source_ip_address,
'destination_ip_address': '',
'source_port': '',
'destination_port': rule.destination_port,
}
res = self.client.post(
reverse(self.UPDATERULE_PATH, args=(rule.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('policy_get', 'policy_update',
'rules_list')})
def test_update_policy_post(self):
policy = self.fw_policies.first()
api.fwaas.policy_get(IsA(http.HttpRequest),
policy.id).AndReturn(policy)
policy.name = 'new name'
policy.description = 'new desc'
policy.shared = True
policy.audited = False
data = {'name': policy.name,
'description': policy.description,
'shared': policy.shared,
'audited': policy.audited
}
api.fwaas.policy_update(IsA(http.HttpRequest), policy.id, **data)\
.AndReturn(policy)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.UPDATEPOLICY_PATH, args=(policy.id,)), data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('firewall_get', 'policies_list',
'firewall_update')})
def test_update_firewall_post(self):
firewall = self.firewalls.first()
tenant_id = firewall.tenant_id
api.fwaas.firewall_get(IsA(http.HttpRequest),
firewall.id).AndReturn(firewall)
firewall.name = 'new name'
firewall.description = 'new desc'
firewall.admin_state_up = False
data = {'name': firewall.name,
'description': firewall.description,
'firewall_policy_id': firewall.firewall_policy_id,
'admin_state_up': firewall.admin_state_up
}
policies = self.fw_policies.list()
api.fwaas.policies_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(policies)
api.fwaas.firewall_update(IsA(http.HttpRequest), firewall.id, **data)\
.AndReturn(firewall)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.UPDATEFIREWALL_PATH, args=(firewall.id,)), data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('policy_get',
'policy_insert_rule',
'rules_list')})
def test_policy_insert_rule(self):
policy = self.fw_policies.first()
tenant_id = policy.tenant_id
rules = self.fw_rules.list()
new_rule_id = rules[2].id
data = {'firewall_rule_id': new_rule_id,
'insert_before': rules[1].id,
'insert_after': rules[0].id}
api.fwaas.policy_get(IsA(http.HttpRequest),
policy.id).AndReturn(policy)
policy.firewall_rules = [rules[0].id,
new_rule_id,
rules[1].id]
api.fwaas.rules_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(rules)
api.fwaas.policy_insert_rule(IsA(http.HttpRequest), policy.id, **data)\
.AndReturn(policy)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.INSERTRULE_PATH, args=(policy.id,)), data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('policy_get', 'policy_remove_rule',
'rules_list',)})
def test_policy_remove_rule(self):
policy = self.fw_policies.first()
tenant_id = policy.tenant_id
rules = self.fw_rules.list()
remove_rule_id = policy.firewall_rules[0]
left_rule_id = policy.firewall_rules[1]
data = {'firewall_rule_id': remove_rule_id}
after_remove_policy_dict = {'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'tenant_id': '1',
'name': 'policy1',
'description': 'policy description',
'firewall_rules': [left_rule_id],
'audited': True,
'shared': True}
after_remove_policy = fwaas.Policy(after_remove_policy_dict)
api.fwaas.policy_get(IsA(http.HttpRequest),
policy.id).AndReturn(policy)
api.fwaas.rules_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(rules)
api.fwaas.policy_remove_rule(IsA(http.HttpRequest), policy.id, **data)\
.AndReturn(after_remove_policy)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.REMOVERULE_PATH, args=(policy.id,)), data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
| apache-2.0 | 7,097,110,327,593,757,000 | 36.744231 | 79 | 0.556478 | false |
foursquare/pants | src/python/pants/backend/jvm/tasks/jvm_compile/execution_graph.py | 1 | 11698 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import queue
import sys
import threading
import traceback
from builtins import map, object, str
from collections import defaultdict, deque
from heapq import heappop, heappush
from pants.base.worker_pool import Work
class Job(object):
"""A unit of scheduling for the ExecutionGraph.
The ExecutionGraph represents a DAG of dependent work. A Job a node in the graph along with the
keys of its dependent jobs.
"""
def __init__(self, key, fn, dependencies, size=0, on_success=None, on_failure=None):
"""
:param key: Key used to reference and look up jobs
:param fn callable: The work to perform
:param dependencies: List of keys for dependent jobs
:param size: Estimated job size used for prioritization
:param on_success: Zero parameter callback to run if job completes successfully. Run on main
thread.
:param on_failure: Zero parameter callback to run if job completes successfully. Run on main
thread."""
self.key = key
self.fn = fn
self.dependencies = dependencies
self.size = size
self.on_success = on_success
self.on_failure = on_failure
def __call__(self):
self.fn()
def run_success_callback(self):
if self.on_success:
self.on_success()
def run_failure_callback(self):
if self.on_failure:
self.on_failure()
UNSTARTED = 'Unstarted'
QUEUED = 'Queued'
SUCCESSFUL = 'Successful'
FAILED = 'Failed'
CANCELED = 'Canceled'
class StatusTable(object):
DONE_STATES = {SUCCESSFUL, FAILED, CANCELED}
def __init__(self, keys, pending_dependencies_count):
self._statuses = {key: UNSTARTED for key in keys}
self._pending_dependencies_count = pending_dependencies_count
def mark_as(self, state, key):
self._statuses[key] = state
def mark_queued(self, key):
self.mark_as(QUEUED, key)
def unfinished_items(self):
"""Returns a list of (name, status) tuples, only including entries marked as unfinished."""
return [(key, stat) for key, stat in self._statuses.items() if stat not in self.DONE_STATES]
def failed_keys(self):
return [key for key, stat in self._statuses.items() if stat == FAILED]
def is_unstarted(self, key):
return self._statuses.get(key) is UNSTARTED
def mark_one_successful_dependency(self, key):
self._pending_dependencies_count[key] -= 1
def is_ready_to_submit(self, key):
return self.is_unstarted(key) and self._pending_dependencies_count[key] == 0
def are_all_done(self):
return all(s in self.DONE_STATES for s in self._statuses.values())
def has_failures(self):
return any(stat is FAILED for stat in self._statuses.values())
class ExecutionFailure(Exception):
"""Raised when work units fail during execution"""
def __init__(self, message, cause=None):
if cause:
message = "{}: {}".format(message, str(cause))
super(ExecutionFailure, self).__init__(message)
self.cause = cause
class UnexecutableGraphError(Exception):
"""Base exception class for errors that make an ExecutionGraph not executable"""
def __init__(self, msg):
super(UnexecutableGraphError, self).__init__("Unexecutable graph: {}".format(msg))
class NoRootJobError(UnexecutableGraphError):
def __init__(self):
super(NoRootJobError, self).__init__(
"All scheduled jobs have dependencies. There must be a circular dependency.")
class UnknownJobError(UnexecutableGraphError):
def __init__(self, undefined_dependencies):
super(UnknownJobError, self).__init__("Undefined dependencies {}"
.format(", ".join(map(repr, undefined_dependencies))))
class JobExistsError(UnexecutableGraphError):
def __init__(self, key):
super(JobExistsError, self).__init__("Job already scheduled {!r}"
.format(key))
class ThreadSafeCounter(object):
def __init__(self):
self.lock = threading.Lock()
self._counter = 0
def get(self):
with self.lock:
return self._counter
def increment(self):
with self.lock:
self._counter += 1
def decrement(self):
with self.lock:
self._counter -= 1
class ExecutionGraph(object):
"""A directed acyclic graph of work to execute.
This is currently only used within jvm compile, but the intent is to unify it with the future
global execution graph.
"""
def __init__(self, job_list, print_stack_trace):
"""
:param job_list Job: list of Jobs to schedule and run.
"""
self._print_stack_trace = print_stack_trace
self._dependencies = defaultdict(list)
self._dependees = defaultdict(list)
self._jobs = {}
self._job_keys_as_scheduled = []
self._job_keys_with_no_dependencies = []
for job in job_list:
self._schedule(job)
unscheduled_dependencies = set(self._dependees.keys()) - set(self._job_keys_as_scheduled)
if unscheduled_dependencies:
raise UnknownJobError(unscheduled_dependencies)
if len(self._job_keys_with_no_dependencies) == 0:
raise NoRootJobError()
self._job_priority = self._compute_job_priorities(job_list)
def format_dependee_graph(self):
return "\n".join([
"{} -> {{\n {}\n}}".format(key, ',\n '.join(self._dependees[key]))
for key in self._job_keys_as_scheduled
])
def _schedule(self, job):
key = job.key
dependency_keys = job.dependencies
self._job_keys_as_scheduled.append(key)
if key in self._jobs:
raise JobExistsError(key)
self._jobs[key] = job
if len(dependency_keys) == 0:
self._job_keys_with_no_dependencies.append(key)
self._dependencies[key] = dependency_keys
for dependency_key in dependency_keys:
self._dependees[dependency_key].append(key)
def _compute_job_priorities(self, job_list):
"""Walks the dependency graph breadth-first, starting from the most dependent tasks,
and computes the job priority as the sum of the jobs sizes along the critical path."""
job_size = {job.key: job.size for job in job_list}
job_priority = defaultdict(int)
bfs_queue = deque()
for job in job_list:
if len(self._dependees[job.key]) == 0:
job_priority[job.key] = job_size[job.key]
bfs_queue.append(job.key)
satisfied_dependees_count = defaultdict(int)
while len(bfs_queue) > 0:
job_key = bfs_queue.popleft()
for dependency_key in self._dependencies[job_key]:
job_priority[dependency_key] = \
max(job_priority[dependency_key],
job_size[dependency_key] + job_priority[job_key])
satisfied_dependees_count[dependency_key] += 1
if satisfied_dependees_count[dependency_key] == len(self._dependees[dependency_key]):
bfs_queue.append(dependency_key)
return job_priority
def execute(self, pool, log):
"""Runs scheduled work, ensuring all dependencies for each element are done before execution.
:param pool: A WorkerPool to run jobs on
:param log: logger for logging debug information and progress
submits all the work without any dependencies to the worker pool
when a unit of work finishes,
if it is successful
calls success callback
checks for dependees whose dependencies are all successful, and submits them
if it fails
calls failure callback
marks dependees as failed and queues them directly into the finished work queue
when all work is either successful or failed,
cleans up the work pool
if there's an exception on the main thread,
calls failure callback for unfinished work
aborts work pool
re-raises
"""
log.debug(self.format_dependee_graph())
status_table = StatusTable(self._job_keys_as_scheduled,
{key: len(self._jobs[key].dependencies) for key in self._job_keys_as_scheduled})
finished_queue = queue.Queue()
heap = []
jobs_in_flight = ThreadSafeCounter()
def put_jobs_into_heap(job_keys):
for job_key in job_keys:
# minus because jobs with larger priority should go first
heappush(heap, (-self._job_priority[job_key], job_key))
def try_to_submit_jobs_from_heap():
def worker(worker_key, work):
try:
work()
result = (worker_key, SUCCESSFUL, None)
except Exception:
_, exc_value, exc_traceback = sys.exc_info()
result = (worker_key, FAILED, (exc_value, traceback.format_tb(exc_traceback)))
finished_queue.put(result)
jobs_in_flight.decrement()
while len(heap) > 0 and jobs_in_flight.get() < pool.num_workers:
priority, job_key = heappop(heap)
jobs_in_flight.increment()
status_table.mark_queued(job_key)
pool.submit_async_work(Work(worker, [(job_key, (self._jobs[job_key]))]))
def submit_jobs(job_keys):
put_jobs_into_heap(job_keys)
try_to_submit_jobs_from_heap()
try:
submit_jobs(self._job_keys_with_no_dependencies)
while not status_table.are_all_done():
try:
finished_key, result_status, value = finished_queue.get(timeout=10)
except queue.Empty:
log.debug("Waiting on \n {}\n".format("\n ".join(
"{}: {}".format(key, state) for key, state in status_table.unfinished_items())))
try_to_submit_jobs_from_heap()
continue
finished_job = self._jobs[finished_key]
direct_dependees = self._dependees[finished_key]
status_table.mark_as(result_status, finished_key)
# Queue downstream tasks.
if result_status is SUCCESSFUL:
try:
finished_job.run_success_callback()
except Exception as e:
log.debug(traceback.format_exc())
raise ExecutionFailure("Error in on_success for {}".format(finished_key), e)
ready_dependees = []
for dependee in direct_dependees:
status_table.mark_one_successful_dependency(dependee)
if status_table.is_ready_to_submit(dependee):
ready_dependees.append(dependee)
submit_jobs(ready_dependees)
else: # Failed or canceled.
try:
finished_job.run_failure_callback()
except Exception as e:
log.debug(traceback.format_exc())
raise ExecutionFailure("Error in on_failure for {}".format(finished_key), e)
# Propagate failures downstream.
for dependee in direct_dependees:
if status_table.is_unstarted(dependee):
status_table.mark_queued(dependee)
finished_queue.put((dependee, CANCELED, None))
# Log success or failure for this job.
if result_status is FAILED:
exception, tb = value
log.error("{} failed: {}".format(finished_key, exception))
if self._print_stack_trace:
log.error('Traceback:\n{}'.format('\n'.join(tb)))
else:
log.debug("{} finished with status {}".format(finished_key, result_status))
except ExecutionFailure:
raise
except Exception as e:
# Call failure callbacks for jobs that are unfinished.
for key, state in status_table.unfinished_items():
self._jobs[key].run_failure_callback()
log.debug(traceback.format_exc())
raise ExecutionFailure("Error running job", e)
if status_table.has_failures():
raise ExecutionFailure("Failed jobs: {}".format(', '.join(status_table.failed_keys())))
| apache-2.0 | 1,957,824,382,805,941,500 | 32.711816 | 111 | 0.653103 | false |
foursquare/pants | tests/python/pants_test/backend/jvm/tasks/test_jvmdoc_gen.py | 2 | 1845 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pants.backend.jvm.tasks.jvmdoc_gen import Jvmdoc, JvmdocGen
from pants.base.exceptions import TaskError
from pants_test.jvm.jvm_task_test_base import JvmTaskTestBase
dummydoc = Jvmdoc(tool_name='dummydoc', product_type='dummydoc')
class DummyJvmdocGen(JvmdocGen):
@classmethod
def jvmdoc(cls):
return dummydoc
def execute(self):
self.generate_doc(lambda t: True, create_dummydoc_command)
def create_dummydoc_command(classpath, gendir, *targets):
# here we need to test that we get the expected classpath
pass
class JvmdocGenTest(JvmTaskTestBase):
"""Test some base functionality in JvmdocGen."""
@classmethod
def task_type(cls):
return DummyJvmdocGen
def setUp(self):
super(JvmdocGenTest, self).setUp()
self.t1 = self.make_target('t1')
context = self.context(target_roots=[self.t1])
self.targets = context.targets()
self.populate_runtime_classpath(context)
self.task = self.create_task(context)
def test_classpath(self):
self.task.execute()
def test_generate(self):
def create_jvmdoc_command_fail(classpath, gendir, *targets):
return ['python', os.path.join(os.path.dirname(__file__), "false.py")]
def create_jvmdoc_command_succeed(classpath, gendir, *targets):
return ['python', os.path.join(os.path.dirname(__file__), "true.py")]
for generate in [self.task._generate_individual,
self.task._generate_combined]:
with self.assertRaises(TaskError):
generate(self.targets, create_jvmdoc_command_fail)
generate(self.targets, create_jvmdoc_command_succeed)
| apache-2.0 | -4,552,293,231,459,239,000 | 27.828125 | 82 | 0.716531 | false |
xyuanmu/XX-Net | code/default/x_tunnel/local/cloudfront_front/web_control.py | 4 | 5336 | #!/usr/bin/env python
# coding:utf-8
import os
import time
import urllib.parse
import simple_http_server
from .front import front
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir))
top_path = os.path.abspath(os.path.join(root_path, os.pardir, os.pardir, os.pardir))
web_ui_path = os.path.join(current_path, os.path.pardir, "web_ui")
class ControlHandler(simple_http_server.HttpServerHandler):
def __init__(self, client_address, headers, command, path, rfile, wfile):
self.client_address = client_address
self.headers = headers
self.command = command
self.path = path
self.rfile = rfile
self.wfile = wfile
def do_GET(self):
path = urllib.parse.urlparse(self.path).path
if path == "/log":
return self.req_log_handler()
elif path == "/ip_list":
return self.req_ip_list_handler()
elif path == "/debug":
return self.req_debug_handler()
else:
front.logger.warn('Control Req %s %s %s ', self.address_string(), self.command, self.path)
self.wfile.write(b'HTTP/1.1 404\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n404 Not Found')
front.logger.info('%s "%s %s HTTP/1.1" 404 -', self.address_string(), self.command, self.path)
def req_log_handler(self):
req = urllib.parse.urlparse(self.path).query
reqs = urllib.parse.parse_qs(req, keep_blank_values=True)
data = ''
cmd = "get_last"
if reqs["cmd"]:
cmd = reqs["cmd"][0]
if cmd == "get_last":
max_line = int(reqs["max_line"][0])
data = front.logger.get_last_lines(max_line)
elif cmd == "get_new":
last_no = int(reqs["last_no"][0])
data = front.logger.get_new_lines(last_no)
else:
front.logger.error('PAC %s %s %s ', self.address_string(), self.command, self.path)
mimetype = 'text/plain'
self.send_response_nc(mimetype, data)
def req_ip_list_handler(self):
time_now = time.time()
data = "<html><body><div style='float: left; white-space:nowrap;font-family: monospace;'>"
data += "time:%d pointer:%d<br>\r\n" % (time_now, front.ip_manager.ip_pointer)
data += "<table><tr><th>N</th><th>IP</th><th>HS</th><th>Fails</th>"
data += "<th>down_fail</th><th>links</th>"
data += "<th>get_time</th><th>success_time</th><th>fail_time</th><th>down_fail_time</th>"
data += "<th>data_active</th><th>transfered_data</th><th>Trans</th>"
data += "<th>history</th></tr>\n"
i = 1
for ip in front.ip_manager.ip_list:
handshake_time = front.ip_manager.ip_dict[ip]["handshake_time"]
fail_times = front.ip_manager.ip_dict[ip]["fail_times"]
down_fail = front.ip_manager.ip_dict[ip]["down_fail"]
links = front.ip_manager.ip_dict[ip]["links"]
get_time = front.ip_manager.ip_dict[ip]["get_time"]
if get_time:
get_time = time_now - get_time
success_time = front.ip_manager.ip_dict[ip]["success_time"]
if success_time:
success_time = time_now - success_time
fail_time = front.ip_manager.ip_dict[ip]["fail_time"]
if fail_time:
fail_time = time_now - fail_time
down_fail_time = front.ip_manager.ip_dict[ip]["down_fail_time"]
if down_fail_time:
down_fail_time = time_now - down_fail_time
data_active = front.ip_manager.ip_dict[ip]["data_active"]
if data_active:
active_time = time_now - data_active
else:
active_time = 0
history = front.ip_manager.ip_dict[ip]["history"]
t0 = 0
str_out = ''
for item in history:
t = item[0]
v = item[1]
if t0 == 0:
t0 = t
time_per = int((t - t0) * 1000)
t0 = t
str_out += "%d(%s) " % (time_per, v)
data += "<tr><td>%d</td><td>%s</td><td>%d</td><td>%d</td><td>%d</td><td>%d</td><td>%d</td><td>%d</td><td>%d</td>" \
"<td>%d</td><td>%d</td><td>%s</td></tr>\n" % \
(i, ip, handshake_time, fail_times, down_fail, links, get_time, success_time, fail_time, down_fail_time, \
active_time, str_out)
i += 1
data += "</table></div></body></html>"
mimetype = 'text/html'
self.send_response_nc(mimetype, data)
def req_debug_handler(self):
data = ""
objs = [front.connect_manager] + list(front.dispatchs.values())
for obj in objs:
data += "%s\r\n" % obj.__class__
for attr in dir(obj):
if attr.startswith("__"):
continue
sub_obj = getattr(obj, attr)
if callable(sub_obj):
continue
data += " %s = %s\r\n" % (attr, sub_obj)
if hasattr(obj, "to_string"):
data += obj.to_string()
mimetype = 'text/plain'
self.send_response_nc(mimetype, data) | bsd-2-clause | 4,726,747,232,259,229,000 | 37.121429 | 127 | 0.529048 | false |
lz1988/company-site | django/contrib/sites/management.py | 232 | 1587 | """
Creates the default Site object.
"""
from django.db.models import signals
from django.db import connections
from django.db import router
from django.contrib.sites.models import Site
from django.contrib.sites import models as site_app
from django.core.management.color import no_style
def create_default_site(app, created_models, verbosity, db, **kwargs):
# Only create the default sites in databases where Django created the table
if Site in created_models and router.allow_syncdb(db, Site) :
# The default settings set SITE_ID = 1, and some tests in Django's test
# suite rely on this value. However, if database sequences are reused
# (e.g. in the test suite after flush/syncdb), it isn't guaranteed that
# the next id will be 1, so we coerce it. See #15573 and #16353. This
# can also crop up outside of tests - see #15346.
if verbosity >= 2:
print("Creating example.com Site object")
Site(pk=1, domain="example.com", name="example.com").save(using=db)
# We set an explicit pk instead of relying on auto-incrementation,
# so we need to reset the database sequence. See #17415.
sequence_sql = connections[db].ops.sequence_reset_sql(no_style(), [Site])
if sequence_sql:
if verbosity >= 2:
print("Resetting sequence")
cursor = connections[db].cursor()
for command in sequence_sql:
cursor.execute(command)
Site.objects.clear_cache()
signals.post_syncdb.connect(create_default_site, sender=site_app)
| bsd-3-clause | 8,895,923,184,502,504,000 | 43.083333 | 81 | 0.674858 | false |
ruuk/script.module.youtube.dl | lib/youtube_dl/extractor/urort.py | 64 | 2249 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
)
from ..utils import (
unified_strdate,
)
class UrortIE(InfoExtractor):
IE_DESC = 'NRK P3 Urørt'
_VALID_URL = r'https?://(?:www\.)?urort\.p3\.no/#!/Band/(?P<id>[^/]+)$'
_TEST = {
'url': 'https://urort.p3.no/#!/Band/Gerilja',
'md5': '5ed31a924be8a05e47812678a86e127b',
'info_dict': {
'id': '33124-24',
'ext': 'mp3',
'title': 'The Bomb',
'thumbnail': r're:^https?://.+\.jpg',
'uploader': 'Gerilja',
'uploader_id': 'Gerilja',
'upload_date': '20100323',
},
'params': {
'matchtitle': '^The Bomb$', # To test, we want just one video
}
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
fstr = compat_urllib_parse.quote("InternalBandUrl eq '%s'" % playlist_id)
json_url = 'http://urort.p3.no/breeze/urort/TrackDTOViews?$filter=%s&$orderby=Released%%20desc&$expand=Tags%%2CFiles' % fstr
songs = self._download_json(json_url, playlist_id)
entries = []
for s in songs:
formats = [{
'tbr': f.get('Quality'),
'ext': f['FileType'],
'format_id': '%s-%s' % (f['FileType'], f.get('Quality', '')),
'url': 'http://p3urort.blob.core.windows.net/tracks/%s' % f['FileRef'],
'preference': 3 if f['FileType'] == 'mp3' else 2,
} for f in s['Files']]
self._sort_formats(formats)
e = {
'id': '%d-%s' % (s['BandId'], s['$id']),
'title': s['Title'],
'uploader_id': playlist_id,
'uploader': s.get('BandName', playlist_id),
'thumbnail': 'http://urort.p3.no/cloud/images/%s' % s['Image'],
'upload_date': unified_strdate(s.get('Released')),
'formats': formats,
}
entries.append(e)
return {
'_type': 'playlist',
'id': playlist_id,
'title': playlist_id,
'entries': entries,
}
| gpl-2.0 | 6,488,771,619,845,157,000 | 33.060606 | 132 | 0.483096 | false |
sdopoku/flask-blog | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/utf8prober.py | 2919 | 2652 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
| gpl-2.0 | 1,224,546,314,913,529,600 | 33.894737 | 69 | 0.633107 | false |
Ted1993/Flasky | venv/lib/python2.7/site-packages/mako/ext/turbogears.py | 39 | 2132 | # ext/turbogears.py
# Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from mako import compat
from mako.lookup import TemplateLookup
from mako.template import Template
class TGPlugin(object):
"""TurboGears compatible Template Plugin."""
def __init__(self, extra_vars_func=None, options=None, extension='mak'):
self.extra_vars_func = extra_vars_func
self.extension = extension
if not options:
options = {}
# Pull the options out and initialize the lookup
lookup_options = {}
for k, v in options.items():
if k.startswith('mako.'):
lookup_options[k[5:]] = v
elif k in ['directories', 'filesystem_checks', 'module_directory']:
lookup_options[k] = v
self.lookup = TemplateLookup(**lookup_options)
self.tmpl_options = {}
# transfer lookup args to template args, based on those available
# in getargspec
for kw in compat.inspect_getargspec(Template.__init__)[0]:
if kw in lookup_options:
self.tmpl_options[kw] = lookup_options[kw]
def load_template(self, templatename, template_string=None):
"""Loads a template from a file or a string"""
if template_string is not None:
return Template(template_string, **self.tmpl_options)
# Translate TG dot notation to normal / template path
if '/' not in templatename:
templatename = '/' + templatename.replace('.', '/') + '.' +\
self.extension
# Lookup template
return self.lookup.get_template(templatename)
def render(self, info, format="html", fragment=False, template=None):
if isinstance(template, compat.string_types):
template = self.load_template(template)
# Load extra vars func if provided
if self.extra_vars_func:
info.update(self.extra_vars_func())
return template.render(**info)
| mit | 8,990,383,964,585,528,000 | 35.758621 | 79 | 0.625235 | false |
telerik/cloudbase-init | cloudbaseinit/openstack/common/notifier/rabbit_notifier.py | 1 | 1108 | # Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cloudbaseinit.openstack.common.gettextutils import _
from cloudbaseinit.openstack.common import log as logging
from cloudbaseinit.openstack.common.notifier import rpc_notifier
LOG = logging.getLogger(__name__)
def notify(context, message):
"""Deprecated in Grizzly. Please use rpc_notifier instead."""
LOG.deprecated(_("The rabbit_notifier is now deprecated."
" Please use rpc_notifier instead."))
rpc_notifier.notify(context, message)
| apache-2.0 | 3,942,909,638,147,022,000 | 37.206897 | 78 | 0.728339 | false |
AlexBoogaard/Sick-Beard-Torrent-Edition | lib/unidecode/x0b5.py | 253 | 4919 | data = (
'dyil', # 0x00
'dyilg', # 0x01
'dyilm', # 0x02
'dyilb', # 0x03
'dyils', # 0x04
'dyilt', # 0x05
'dyilp', # 0x06
'dyilh', # 0x07
'dyim', # 0x08
'dyib', # 0x09
'dyibs', # 0x0a
'dyis', # 0x0b
'dyiss', # 0x0c
'dying', # 0x0d
'dyij', # 0x0e
'dyic', # 0x0f
'dyik', # 0x10
'dyit', # 0x11
'dyip', # 0x12
'dyih', # 0x13
'di', # 0x14
'dig', # 0x15
'digg', # 0x16
'digs', # 0x17
'din', # 0x18
'dinj', # 0x19
'dinh', # 0x1a
'did', # 0x1b
'dil', # 0x1c
'dilg', # 0x1d
'dilm', # 0x1e
'dilb', # 0x1f
'dils', # 0x20
'dilt', # 0x21
'dilp', # 0x22
'dilh', # 0x23
'dim', # 0x24
'dib', # 0x25
'dibs', # 0x26
'dis', # 0x27
'diss', # 0x28
'ding', # 0x29
'dij', # 0x2a
'dic', # 0x2b
'dik', # 0x2c
'dit', # 0x2d
'dip', # 0x2e
'dih', # 0x2f
'dda', # 0x30
'ddag', # 0x31
'ddagg', # 0x32
'ddags', # 0x33
'ddan', # 0x34
'ddanj', # 0x35
'ddanh', # 0x36
'ddad', # 0x37
'ddal', # 0x38
'ddalg', # 0x39
'ddalm', # 0x3a
'ddalb', # 0x3b
'ddals', # 0x3c
'ddalt', # 0x3d
'ddalp', # 0x3e
'ddalh', # 0x3f
'ddam', # 0x40
'ddab', # 0x41
'ddabs', # 0x42
'ddas', # 0x43
'ddass', # 0x44
'ddang', # 0x45
'ddaj', # 0x46
'ddac', # 0x47
'ddak', # 0x48
'ddat', # 0x49
'ddap', # 0x4a
'ddah', # 0x4b
'ddae', # 0x4c
'ddaeg', # 0x4d
'ddaegg', # 0x4e
'ddaegs', # 0x4f
'ddaen', # 0x50
'ddaenj', # 0x51
'ddaenh', # 0x52
'ddaed', # 0x53
'ddael', # 0x54
'ddaelg', # 0x55
'ddaelm', # 0x56
'ddaelb', # 0x57
'ddaels', # 0x58
'ddaelt', # 0x59
'ddaelp', # 0x5a
'ddaelh', # 0x5b
'ddaem', # 0x5c
'ddaeb', # 0x5d
'ddaebs', # 0x5e
'ddaes', # 0x5f
'ddaess', # 0x60
'ddaeng', # 0x61
'ddaej', # 0x62
'ddaec', # 0x63
'ddaek', # 0x64
'ddaet', # 0x65
'ddaep', # 0x66
'ddaeh', # 0x67
'ddya', # 0x68
'ddyag', # 0x69
'ddyagg', # 0x6a
'ddyags', # 0x6b
'ddyan', # 0x6c
'ddyanj', # 0x6d
'ddyanh', # 0x6e
'ddyad', # 0x6f
'ddyal', # 0x70
'ddyalg', # 0x71
'ddyalm', # 0x72
'ddyalb', # 0x73
'ddyals', # 0x74
'ddyalt', # 0x75
'ddyalp', # 0x76
'ddyalh', # 0x77
'ddyam', # 0x78
'ddyab', # 0x79
'ddyabs', # 0x7a
'ddyas', # 0x7b
'ddyass', # 0x7c
'ddyang', # 0x7d
'ddyaj', # 0x7e
'ddyac', # 0x7f
'ddyak', # 0x80
'ddyat', # 0x81
'ddyap', # 0x82
'ddyah', # 0x83
'ddyae', # 0x84
'ddyaeg', # 0x85
'ddyaegg', # 0x86
'ddyaegs', # 0x87
'ddyaen', # 0x88
'ddyaenj', # 0x89
'ddyaenh', # 0x8a
'ddyaed', # 0x8b
'ddyael', # 0x8c
'ddyaelg', # 0x8d
'ddyaelm', # 0x8e
'ddyaelb', # 0x8f
'ddyaels', # 0x90
'ddyaelt', # 0x91
'ddyaelp', # 0x92
'ddyaelh', # 0x93
'ddyaem', # 0x94
'ddyaeb', # 0x95
'ddyaebs', # 0x96
'ddyaes', # 0x97
'ddyaess', # 0x98
'ddyaeng', # 0x99
'ddyaej', # 0x9a
'ddyaec', # 0x9b
'ddyaek', # 0x9c
'ddyaet', # 0x9d
'ddyaep', # 0x9e
'ddyaeh', # 0x9f
'ddeo', # 0xa0
'ddeog', # 0xa1
'ddeogg', # 0xa2
'ddeogs', # 0xa3
'ddeon', # 0xa4
'ddeonj', # 0xa5
'ddeonh', # 0xa6
'ddeod', # 0xa7
'ddeol', # 0xa8
'ddeolg', # 0xa9
'ddeolm', # 0xaa
'ddeolb', # 0xab
'ddeols', # 0xac
'ddeolt', # 0xad
'ddeolp', # 0xae
'ddeolh', # 0xaf
'ddeom', # 0xb0
'ddeob', # 0xb1
'ddeobs', # 0xb2
'ddeos', # 0xb3
'ddeoss', # 0xb4
'ddeong', # 0xb5
'ddeoj', # 0xb6
'ddeoc', # 0xb7
'ddeok', # 0xb8
'ddeot', # 0xb9
'ddeop', # 0xba
'ddeoh', # 0xbb
'dde', # 0xbc
'ddeg', # 0xbd
'ddegg', # 0xbe
'ddegs', # 0xbf
'dden', # 0xc0
'ddenj', # 0xc1
'ddenh', # 0xc2
'dded', # 0xc3
'ddel', # 0xc4
'ddelg', # 0xc5
'ddelm', # 0xc6
'ddelb', # 0xc7
'ddels', # 0xc8
'ddelt', # 0xc9
'ddelp', # 0xca
'ddelh', # 0xcb
'ddem', # 0xcc
'ddeb', # 0xcd
'ddebs', # 0xce
'ddes', # 0xcf
'ddess', # 0xd0
'ddeng', # 0xd1
'ddej', # 0xd2
'ddec', # 0xd3
'ddek', # 0xd4
'ddet', # 0xd5
'ddep', # 0xd6
'ddeh', # 0xd7
'ddyeo', # 0xd8
'ddyeog', # 0xd9
'ddyeogg', # 0xda
'ddyeogs', # 0xdb
'ddyeon', # 0xdc
'ddyeonj', # 0xdd
'ddyeonh', # 0xde
'ddyeod', # 0xdf
'ddyeol', # 0xe0
'ddyeolg', # 0xe1
'ddyeolm', # 0xe2
'ddyeolb', # 0xe3
'ddyeols', # 0xe4
'ddyeolt', # 0xe5
'ddyeolp', # 0xe6
'ddyeolh', # 0xe7
'ddyeom', # 0xe8
'ddyeob', # 0xe9
'ddyeobs', # 0xea
'ddyeos', # 0xeb
'ddyeoss', # 0xec
'ddyeong', # 0xed
'ddyeoj', # 0xee
'ddyeoc', # 0xef
'ddyeok', # 0xf0
'ddyeot', # 0xf1
'ddyeop', # 0xf2
'ddyeoh', # 0xf3
'ddye', # 0xf4
'ddyeg', # 0xf5
'ddyegg', # 0xf6
'ddyegs', # 0xf7
'ddyen', # 0xf8
'ddyenj', # 0xf9
'ddyenh', # 0xfa
'ddyed', # 0xfb
'ddyel', # 0xfc
'ddyelg', # 0xfd
'ddyelm', # 0xfe
'ddyelb', # 0xff
)
| gpl-3.0 | -5,185,665,382,399,024,000 | 18.065891 | 20 | 0.478146 | false |
DazWorrall/ansible | lib/ansible/modules/network/avi/avi_snmptrapprofile.py | 27 | 3396 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_snmptrapprofile
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of SnmpTrapProfile Avi RESTful Object
description:
- This module is used to configure SnmpTrapProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
name:
description:
- A user-friendly name of the snmp trap configuration.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
trap_servers:
description:
- The ip address or hostname of the snmp trap destination server.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the snmp trap profile object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create SnmpTrapProfile object
avi_snmptrapprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_snmptrapprofile
"""
RETURN = '''
obj:
description: SnmpTrapProfile (api/snmptrapprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
trap_servers=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'snmptrapprofile',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 | -275,794,914,653,128,060 | 29.321429 | 77 | 0.651649 | false |
quodlibet/quodlibet | tests/test_pattern.py | 4 | 24758 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
from senf import fsnative
from tests import TestCase
from quodlibet.formats import AudioFile
from quodlibet.pattern import (FileFromPattern, XMLFromPattern, Pattern,
XMLFromMarkupPattern, ArbitraryExtensionFileFromPattern)
class _TPattern(TestCase):
def setUp(self):
s1 = {'tracknumber': u'5/6', 'artist': u'Artist', 'title': u'Title5',
'~filename': '/path/to/a.mp3', 'xmltest': u"<&>"}
s2 = {'tracknumber': u'6', 'artist': u'Artist', 'title': u'Title6',
'~filename': '/path/to/b.ogg', 'discnumber': u'2',
'unislash': u"foo\uff0fbar"}
s3 = {'title': u'test/subdir', 'genre': u'/\n/',
'~filename': '/one/more/a.flac', 'version': u'Instrumental'}
s4 = {'performer': u'a\nb', 'artist': u'foo\nbar'}
s5 = {'tracknumber': u'7/1234', 'artist': u'Artist',
'title': u'Title7', '~filename': '/path/to/e.mp3'}
s6 = {'artist': u'Foo', 'albumartist': u'foo.bar', 'album': u'Best Of',
'~filename': '/path/to/f.mp3', 'title': u'The.Final.Word'}
s7 = {'artist': u'un élève français', '~filename': '/path/to/g.mp3',
'albumartist': u'Lee "Scratch" Perry',
'album': u"The 'only' way!", 'comment': u'Trouble|Strife'}
s8 = {'tracknumber': u'7/8', 'artist': u'Artist1\n\nArtist3',
'artistsort': u'SortA1\nSortA2',
'album': u'Album5', 'albumsort': u'SortAlbum5',
'~filename': '/path/to/g.mp3', 'xmltest': u"<&>"}
if os.name == "nt":
s1["~filename"] = u"C:\\path\\to\\a.mp3"
s2["~filename"] = u"C:\\path\\to\\b.ogg"
s3["~filename"] = u"C:\\one\\more\\a.flac"
s4["~filename"] = u"C:\\path\\to\\a.mp3"
s5["~filename"] = u"C:\\path\\to\\a.mp3"
s6["~filename"] = u"C:\\path\\to\\f.mp3"
s7["~filename"] = u"C:\\path\\to\\g.mp3"
s8["~filename"] = u"C:\\path\\to\\h.mp3"
self.a = AudioFile(s1)
self.b = AudioFile(s2)
self.c = AudioFile(s3)
self.d = AudioFile(s4)
self.e = AudioFile(s5)
self.f = AudioFile(s6)
self.g = AudioFile(s7)
self.h = AudioFile(s8)
class TPattern(_TPattern):
from quodlibet.formats import AudioFile
AudioFile
def test_numeric(self):
pat = Pattern("<~#rating>")
self.assertEqual(pat.format(self.a), "0.50")
def test_space(self):
pat = Pattern("a ")
self.assertEqual(pat.format(self.a), "a ")
pat = Pattern(" a")
self.assertEqual(pat.format(self.a), " a")
pat = Pattern("a\n\n")
self.assertEqual(pat.format(self.a), "a\n\n")
def test_escape(self):
pat = Pattern("a \\<foo\\|bla\\>")
self.assertEqual(pat.format(self.a), "a <foo|bla>")
pat = Pattern(r"a\\<foo>")
self.assertEqual(pat.format(self.a), "a\\")
def test_query_like_tag(self):
pat = Pattern("<t=v>")
self.assertEqual(pat.format(AudioFile({"t=v": "foo"})), "foo")
def test_conditional_number_dot_title(s):
pat = Pattern('<tracknumber|<tracknumber>. ><title>')
s.assertEquals(pat.format(s.a), '5/6. Title5')
s.assertEquals(pat.format(s.b), '6. Title6')
s.assertEquals(pat.format(s.c), 'test/subdir')
def test_conditional_other_number_dot_title(s):
pat = Pattern('<tracknumber|<tracknumber>|00>. <title>')
s.assertEquals(pat.format(s.a), '5/6. Title5')
s.assertEquals(pat.format(s.b), '6. Title6')
s.assertEquals(pat.format(s.c), '00. test/subdir')
def test_conditional_other_other(s):
# FIXME: was <tracknumber|a|b|c>.. but we can't put <>| in the format
# string since it would break the XML pattern formatter.
s.assertEqual(Pattern('<tracknumber|a|b|c>').format(s.a), "")
def test_conditional_genre(s):
pat = Pattern('<genre|<genre>|music>')
s.assertEquals(pat.format(s.a), 'music')
s.assertEquals(pat.format(s.b), 'music')
s.assertEquals(pat.format(s.c), '/, /')
def test_conditional_unknown(s):
pat = Pattern('<album|foo|bar>')
s.assertEquals(pat.format(s.a), 'bar')
def test_conditional_equals(s):
pat = Pattern('<artist=Artist|matched|not matched>')
s.assertEquals(pat.format(s.a), 'matched')
pat = Pattern('<artist=Artistic|matched|not matched>')
s.assertEquals(pat.format(s.a), 'not matched')
def test_conditional_equals_unicode(s):
pat = Pattern(u'<artist=Artist|matched|not matched>')
s.assertEquals(pat.format(s.g), 'not matched')
pat = Pattern(u'<artist=un élève français|matched|not matched>')
s.assertEquals(pat.format(s.g), 'matched')
def test_duplicate_query(self):
pat = Pattern('<u=yes|<u=yes|x|y>|<u=yes|q|z>>')
self.assertEqual(pat.format(AudioFile({"u": u"yes"})), "x")
self.assertEqual(pat.format(AudioFile({"u": u"no"})), "z")
def test_tag_query_escaping(s):
pat = Pattern('<albumartist=Lee "Scratch" Perry|matched|not matched>')
s.assertEquals(pat.format(s.g), 'matched')
def test_tag_query_escaped_pipe(s):
pat = Pattern(r'<albumartist=/Lee\|Bob/|matched|not matched>')
s.assertEquals(pat.format(s.g), 'matched')
pat = Pattern(r'<albumartist=\||matched|not matched>')
s.assertEquals(pat.format(s.g), 'not matched')
pat = Pattern(r'<comment=/Trouble\|Strife/|matched|not matched>')
s.assertEquals(pat.format(s.g), 'matched')
def test_tag_query_quoting(s):
pat = Pattern('<album=The only way|matched|not matched>')
s.assertEquals(pat.format(s.g), 'not matched')
pat = Pattern("<album=\"The 'only' way!\"|matched|not matched>")
s.assertEquals(pat.format(s.g), 'matched')
def test_tag_query_regex(s):
pat = Pattern("<album=/'only'/|matched|not matched>")
s.assertEquals(pat.format(s.g), 'matched')
pat = Pattern("<album=/The .+ way/|matched|not matched>")
s.assertEquals(pat.format(s.g), 'matched')
pat = Pattern("</The .+ way/|matched|not matched>")
s.assertEquals(pat.format(s.g), 'not matched')
def test_tag_internal(self):
if os.name != "nt":
pat = Pattern("<~filename='/path/to/a.mp3'|matched|not matched>")
self.assertEquals(pat.format(self.a), 'matched')
pat = Pattern(
"<~filename=/\\/path\\/to\\/a.mp3/|matched|not matched>")
self.assertEquals(pat.format(self.a), 'matched')
else:
pat = Pattern(
r"<~filename='C:\\\path\\\to\\\a.mp3'|matched|not matched>")
self.assertEquals(pat.format(self.a), 'matched')
def test_tag_query_disallowed_free_text(s):
pat = Pattern("<The only way|matched|not matched>")
s.assertEquals(pat.format(s.g), 'not matched')
def test_query_scope(self):
pat = Pattern("<foo|<artist=Foo|x|y>|<artist=Foo|z|q>>")
self.assertEqual(pat.format(self.f), "z")
def test_query_numeric(self):
pat = Pattern("<#(foo=42)|42|other>")
self.assertEqual(pat.format(AudioFile()), "other")
self.assertEqual(pat.format(AudioFile({"foo": "42"})), "42")
def test_conditional_notfile(s):
pat = Pattern('<tracknumber|<tracknumber>|00>')
s.assertEquals(pat.format(s.a), '5/6')
s.assertEquals(pat.format(s.b), '6')
s.assertEquals(pat.format(s.c), '00')
def test_conditional_subdir(s):
pat = Pattern('/a<genre|/<genre>>/<title>')
s.assertEquals(pat.format(s.a), '/a/Title5')
s.assertEquals(pat.format(s.b), '/a/Title6')
s.assertEquals(pat.format(s.c), '/a//, //test/subdir')
def test_number_dot_title(s):
pat = Pattern('<tracknumber>. <title>')
s.assertEquals(pat.format(s.a), '5/6. Title5')
s.assertEquals(pat.format(s.b), '6. Title6')
s.assertEquals(pat.format(s.c), '. test/subdir')
def test_recnumber_dot_title(s):
pat = Pattern(r'\<<tracknumber>\>. <title>')
s.assertEquals(pat.format(s.a), '<5/6>. Title5')
s.assertEquals(pat.format(s.b), '<6>. Title6')
s.assertEquals(pat.format(s.c), '<>. test/subdir')
def test_generated(s):
pat = Pattern('<~basename>')
s.assertEquals(pat.format(s.a), os.path.basename(s.a["~filename"]))
def test_generated_and_not_generated(s):
pat = Pattern('<~basename> <title>')
res = pat.format(s.a)
s.assertEquals(
res, os.path.basename(s.a["~filename"]) + " " + s.a["title"])
def test_number_dot_title_dot(s):
pat = Pattern('<tracknumber>. <title>.')
s.assertEquals(pat.format(s.a), '5/6. Title5.')
s.assertEquals(pat.format(s.b), '6. Title6.')
s.assertEquals(pat.format(s.c), '. test/subdir.')
def test_number_dot_genre(s):
pat = Pattern('<tracknumber>. <genre>')
s.assertEquals(pat.format(s.a), '5/6. ')
s.assertEquals(pat.format(s.b), '6. ')
s.assertEquals(pat.format(s.c), '. /, /')
def test_unicode_with_int(s):
song = AudioFile({"tracknumber": "5/6",
"title": b"\xe3\x81\x99\xe3\x81\xbf\xe3\x82\x8c".decode('utf-8')})
pat = Pattern('<~#track>. <title>')
s.assertEquals(pat.format(song),
b"5. \xe3\x81\x99\xe3\x81\xbf\xe3\x82\x8c".decode('utf-8'))
class _TFileFromPattern(_TPattern):
def _create(self, string):
return FileFromPattern(string)
def test_escape_slash(s):
fpat = s._create('<~filename>')
s.assertTrue(fpat.format(s.a).endswith("_path_to_a.mp3"))
pat = Pattern('<~filename>')
if os.name != "nt":
s.assertTrue(pat.format(s.a).startswith("/path/to/a"))
else:
s.assertTrue(pat.format(s.a).startswith("C:\\path\\to\\a"))
if os.name != "nt":
wpat = s._create(r'\\<artist>\\ "<title>')
s.assertTrue(
wpat.format(s.a).startswith(r'\Artist\ "Title5'))
else:
# FIXME..
pass
def test_directory_rooting(s):
if os.name == "nt":
s.assertRaises(ValueError, FileFromPattern, 'a\\<b>')
s.assertRaises(ValueError, FileFromPattern, '<a>\\<b>')
s._create('C:\\<a>\\<b>')
else:
s.assertRaises(ValueError, FileFromPattern, 'a/<b>')
s.assertRaises(ValueError, FileFromPattern, '<a>/<b>')
s._create('/<a>/<b>')
def test_backslash_conversion_win32(s):
if os.name == 'nt':
pat = s._create(r'Z:\<artist>\<title>')
s.assertTrue(pat.format(s.a).startswith(r'Z:\Artist\Title5'))
def test_raw_slash_preservation(s):
if os.name == "nt":
pat = s._create('C:\\a\\b\\<genre>')
s.assertTrue(pat.format(s.a).startswith('C:\\a\\b\\'))
s.assertTrue(pat.format(s.b).startswith('C:\\a\\b\\'))
s.assertTrue(pat.format(s.c).startswith('C:\\a\\b\\_, _'))
else:
pat = s._create('/a/b/<genre>')
s.assertTrue(pat.format(s.a).startswith('/a/b/'))
s.assertTrue(pat.format(s.b).startswith('/a/b/'))
s.assertTrue(pat.format(s.c).startswith('/a/b/_, _'))
def test_specialcase_anti_ext(s):
p1 = s._create('<~filename>')
p2 = s._create('<~dirname>_<~basename>')
s.assertEquals(p1.format(s.a), p2.format(s.a))
s.assertTrue(p1.format(s.a).endswith('_path_to_a.mp3'))
s.assertEquals(p1.format(s.b), p2.format(s.b))
s.assertTrue(p1.format(s.b).endswith('_path_to_b.ogg'))
s.assertEquals(p1.format(s.c), p2.format(s.c))
s.assertTrue(p1.format(s.c).endswith('_one_more_a.flac'))
def test_long_filename(s):
if os.name == "nt":
a = AudioFile({"title": "x" * 300, "~filename": u"C:\\f.mp3"})
path = s._create(u'C:\\foobar\\ä<title>\\<title>').format(a)
assert isinstance(path, fsnative)
s.failUnlessEqual(len(path), 3 + 6 + 1 + 255 + 1 + 255)
path = s._create(u'äüö<title><title>').format(a)
assert isinstance(path, fsnative)
s.failUnlessEqual(len(path), 255)
else:
a = AudioFile({"title": "x" * 300, "~filename": "/f.mp3"})
path = s._create(u'/foobar/ä<title>/<title>').format(a)
assert isinstance(path, fsnative)
s.failUnlessEqual(len(path), 1 + 6 + 1 + 255 + 1 + 255)
path = s._create(u'äüö<title><title>').format(a)
assert isinstance(path, fsnative)
s.failUnlessEqual(len(path), 255)
class TFileFromPattern(_TFileFromPattern):
def _create(self, string):
return FileFromPattern(string)
def test_type(self):
pat = self._create('')
self.assertTrue(isinstance(pat.format(self.a), fsnative))
pat = self._create('<title>')
self.assertTrue(isinstance(pat.format(self.a), fsnative))
def test_number_dot_title_dot(s):
pat = s._create('<tracknumber>. <title>.')
s.assertEquals(pat.format(s.a), '05. Title5..mp3')
s.assertEquals(pat.format(s.b), '06. Title6..ogg')
s.assertEquals(pat.format(s.c), '. test_subdir..flac')
def test_tracknumber_decimals(s):
pat = s._create('<tracknumber>. <title>')
s.assertEquals(pat.format(s.a), '05. Title5.mp3')
s.assertEquals(pat.format(s.e), '0007. Title7.mp3')
def test_ext_case_preservation(s):
x = AudioFile({'~filename': fsnative(u'/tmp/Xx.Flac'), 'title': 'Xx'})
# If pattern has a particular ext, preserve case of ext
p1 = s._create('<~basename>')
s.assertEquals(p1.format(x), 'Xx.Flac')
p2 = s._create('<title>.FLAC')
s.assertEquals(p2.format(x), 'Xx.FLAC')
# If pattern doesn't have a particular ext, lowercase ext
p3 = s._create('<title>')
s.assertEquals(p3.format(x), 'Xx.flac')
class TArbitraryExtensionFileFromPattern(_TFileFromPattern):
def _create(self, string):
return ArbitraryExtensionFileFromPattern(string)
def test_number_dot_title_dot(s):
pat = s._create('<tracknumber>. <title>.')
if os.name == 'nt':
# Can't have Windows names ending with dot
s.assertEquals(pat.format(s.a), '05. Title5_')
s.assertEquals(pat.format(s.b), '06. Title6_')
s.assertEquals(pat.format(s.c), '. test_subdir_')
else:
s.assertEquals(pat.format(s.a), '05. Title5.')
s.assertEquals(pat.format(s.b), '06. Title6.')
s.assertEquals(pat.format(s.c), '. test_subdir.')
def test_tracknumber_decimals(s):
pat = s._create('<tracknumber>. <title>')
s.assertEquals(pat.format(s.a), '05. Title5')
s.assertEquals(pat.format(s.e), '0007. Title7')
def test_constant_albumart_example(s):
pat = s._create("folder.jpg")
s.assertEquals(pat.format(s.a), 'folder.jpg')
def test_extra_dots(s):
pat = s._create("<artist~album>.png")
s.assertEquals(pat.format(s.f), 'Foo - Best Of.png')
pat = s._create("<albumartist~title>.png")
s.assertEquals(pat.format(s.f), 'foo.bar - The.Final.Word.png')
class TXMLFromPattern(_TPattern):
def test_markup_passthrough(s):
pat = XMLFromPattern(r'\<b\><<title>>\</b\>')
s.assertEquals(pat.format(s.a), '<b><Title5></b>')
s.assertEquals(pat.format(s.b), '<b><Title6></b>')
s.assertEquals(pat.format(s.c), '<b><test/subdir></b>')
def test_escape(s):
pat = XMLFromPattern(r'\<b\><<xmltest>>\</b\>')
s.assertEquals(pat.format(s.a), '<b><<&>></b>')
def test_cond_markup(s):
pat = XMLFromPattern(r'<title|\<b\><title> woo\</b\>>')
s.assertEquals(pat.format(s.a), '<b>Title5 woo</b>')
class TXMLFromMarkupPattern(_TPattern):
def _test_markup(self, text):
from gi.repository import Pango
Pango.parse_markup(text, -1, "\x00")
def test_convenience(s):
pat = XMLFromMarkupPattern(r'[b]foo[/b]')
s.assertEquals(pat.format(s.a), '<b>foo</b>')
s._test_markup(pat.format(s.a))
pat = XMLFromMarkupPattern('[small ]foo[/small \t]')
s.assertEquals(pat.format(s.a), '<small >foo</small \t>')
s._test_markup(pat.format(s.a))
def test_link(s):
pat = XMLFromMarkupPattern(r'[a href=""]foo[/a]')
s.assertEquals(pat.format(s.a), '<a href="">foo</a>')
def test_convenience_invalid(s):
pat = XMLFromMarkupPattern(r'[b foo="1"]')
s.assertEquals(pat.format(s.a), '[b foo="1"]')
s._test_markup(pat.format(s.a))
def test_span(s):
pat = XMLFromMarkupPattern(r'[span]foo[/span]')
s.assertEquals(pat.format(s.a), '<span>foo</span>')
s._test_markup(pat.format(s.a))
pat = XMLFromMarkupPattern(r'[span weight="bold"]foo[/span]')
s.assertEquals(pat.format(s.a), '<span weight="bold">foo</span>')
s._test_markup(pat.format(s.a))
def test_escape(s):
pat = XMLFromMarkupPattern(r'\[b]')
s.assertEquals(pat.format(s.a), '[b]')
s._test_markup(pat.format(s.a))
pat = XMLFromMarkupPattern(r'\\\\[b]\\\\[/b]')
s.assertEquals(pat.format(s.a), r'\\<b>\\</b>')
s._test_markup(pat.format(s.a))
class TRealTags(TestCase):
def test_empty(self):
self.failUnlessEqual(Pattern("").tags, [])
def test_both(self):
pat = "<foo|<~bar~fuu> - <fa>|<bar>>"
self.failUnlessEqual(Pattern(pat).tags, ["bar", "fuu", "fa"])
pat = "<foo|<~bar~fuu> - <fa>|<quux>>"
self.failUnlessEqual(Pattern(pat).tags, ["bar", "fuu", "fa", "quux"])
class TPatternFormatList(_TPattern):
def test_numeric(self):
pat = Pattern("<~#rating>")
self.assertEqual(pat.format_list(self.a), {("0.50", "0.50")})
def test_empty(self):
pat = Pattern("<nopenope>")
self.assertEqual(pat.format_list(self.a), {("", "")})
def test_same(s):
pat = Pattern('<~basename> <title>')
s.failUnlessEqual(pat.format_list(s.a),
{(pat.format(s.a), pat.format(s.a))})
pat = Pattern('/a<genre|/<genre>>/<title>')
s.failUnlessEqual(pat.format_list(s.a),
{(pat.format(s.a), pat.format(s.a))})
def test_same2(s):
fpat = FileFromPattern('<~filename>')
pat = Pattern('<~filename>')
s.assertEquals(fpat.format_list(s.a),
{(fpat.format(s.a), fpat.format(s.a))})
s.assertEquals(pat.format_list(s.a),
{(pat.format(s.a), pat.format(s.a))})
def test_tied(s):
pat = Pattern('<genre>')
s.failUnlessEqual(pat.format_list(s.c), {('/', '/')})
pat = Pattern('<performer>')
s.failUnlessEqual(pat.format_list(s.d), {('a', 'a'), ('b', 'b')})
pat = Pattern('<performer><performer>')
s.failUnlessEqual(set(pat.format_list(s.d)),
{('aa', 'aa'), ('ab', 'ab'),
('ba', 'ba'), ('bb', 'bb')})
pat = Pattern('<~performer~artist>')
s.failUnlessEqual(pat.format_list(s.d),
{('a', 'a'), ('b', 'b'),
('bar', 'bar'), ('foo', 'foo')})
pat = Pattern('<performer~artist>')
s.failUnlessEqual(pat.format_list(s.d),
{('a', 'a'), ('b', 'b'),
('bar', 'bar'), ('foo', 'foo')})
pat = Pattern('<artist|<artist>.|<performer>>')
s.failUnlessEqual(pat.format_list(s.d),
{('foo.', 'foo.'), ('bar.', 'bar.')})
pat = Pattern('<artist|<artist|<artist>.|<performer>>>')
s.failUnlessEqual(pat.format_list(s.d),
{('foo.', 'foo.'), ('bar.', 'bar.')})
def test_sort(s):
pat = Pattern('<album>')
s.failUnlessEqual(pat.format_list(s.f),
{(u'Best Of', u'Best Of')})
pat = Pattern('<album>')
s.failUnlessEqual(pat.format_list(s.h), {(u'Album5', u'SortAlbum5')})
pat = Pattern('<artist>')
s.failUnlessEqual(pat.format_list(s.h), {(u'Artist1', u'SortA1'),
(u'', u'SortA2'),
(u'Artist3', u'Artist3')})
pat = Pattern('<artist> x')
s.failUnlessEqual(pat.format_list(s.h), {(u'Artist1 x', u'SortA1 x'),
(u' x', u'SortA2 x'),
(u'Artist3 x', u'Artist3 x')})
def test_sort_tied(s):
pat = Pattern('<~artist~album>')
s.failUnlessEqual(pat.format_list(s.h), {(u'Artist1', u'SortA1'),
(u'', u'SortA2'),
(u'Artist3', u'Artist3'),
(u'Album5', u'SortAlbum5')})
pat = Pattern('<~album~artist>')
s.failUnlessEqual(pat.format_list(s.h), {(u'Artist1', u'SortA1'),
(u'', u'SortA2'),
(u'Artist3', u'Artist3'),
(u'Album5', u'SortAlbum5')})
pat = Pattern('<~artist~artist>')
s.failUnlessEqual(pat.format_list(s.h), {(u'Artist1', u'SortA1'),
(u'', u'SortA2'),
(u'Artist3', u'Artist3')})
def test_sort_combine(s):
pat = Pattern('<album> <artist>')
s.failUnlessEqual(pat.format_list(s.h),
{(u'Album5 Artist1', u'SortAlbum5 SortA1'),
(u'Album5 ', u'SortAlbum5 SortA2'),
(u'Album5 Artist3', u'SortAlbum5 Artist3')})
pat = Pattern('x <artist> <album>')
s.failUnlessEqual(pat.format_list(s.h),
{(u'x Artist1 Album5', u'x SortA1 SortAlbum5'),
(u'x Album5', u'x SortA2 SortAlbum5'),
(u'x Artist3 Album5', u'x Artist3 SortAlbum5')})
pat = Pattern(' <artist> <album> xx')
s.failUnlessEqual(pat.format_list(s.h),
{(u' Artist1 Album5 xx', u' SortA1 SortAlbum5 xx'),
(u' Album5 xx', u' SortA2 SortAlbum5 xx'),
(u' Artist3 Album5 xx', u' Artist3 SortAlbum5 xx')})
pat = Pattern('<album> <tracknumber> <artist>')
s.failUnlessEqual(pat.format_list(s.h),
{(u'Album5 7/8 Artist1', u'SortAlbum5 7/8 SortA1'),
(u'Album5 7/8 ', u'SortAlbum5 7/8 SortA2'),
(u'Album5 7/8 Artist3', u'SortAlbum5 7/8 Artist3')})
pat = Pattern('<tracknumber> <album> <artist>')
s.failUnlessEqual(pat.format_list(s.h),
{(u'7/8 Album5 Artist1', u'7/8 SortAlbum5 SortA1'),
(u'7/8 Album5 ', u'7/8 SortAlbum5 SortA2'),
(u'7/8 Album5 Artist3', u'7/8 SortAlbum5 Artist3')})
def test_sort_multiply(s):
pat = Pattern('<artist> <artist>')
s.failUnlessEqual(pat.format_list(s.h),
{(u'Artist1 Artist1', u'SortA1 SortA1'),
(u' Artist1', u'SortA2 SortA1'),
(u'Artist3 Artist1', u'Artist3 SortA1'),
(u'Artist1 ', u'SortA1 SortA2'),
(u' ', u'SortA2 SortA2'),
(u'Artist3 ', u'Artist3 SortA2'),
(u'Artist1 Artist3', u'SortA1 Artist3'),
(u' Artist3', u'SortA2 Artist3'),
(u'Artist3 Artist3', u'Artist3 Artist3')})
def test_missing_value(self):
pat = Pattern('<genre> - <artist>')
self.assertEqual(pat.format_list(self.a),
{(" - Artist", " - Artist")})
pat = Pattern('')
self.assertEqual(pat.format_list(self.a), {("", "")})
def test_string(s):
pat = Pattern('display')
s.assertEqual(pat.format_list(s.a), {("display", "display")})
| gpl-2.0 | 6,373,307,308,129,381,000 | 41.515464 | 79 | 0.533988 | false |
c0hen/django-venv | lib/python3.4/site-packages/psycopg2/pool.py | 3 | 8136 | """Connection pooling for psycopg2
This module implements thread-safe (and not) connection pools.
"""
# psycopg/pool.py - pooling code for psycopg
#
# Copyright (C) 2003-2010 Federico Di Gregorio <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import psycopg2
import psycopg2.extensions as _ext
class PoolError(psycopg2.Error):
pass
class AbstractConnectionPool(object):
"""Generic key-based pooling code."""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the connection pool.
New 'minconn' connections are created immediately calling 'connfunc'
with given parameters. The connection pool will support a maximum of
about 'maxconn' connections.
"""
self.minconn = int(minconn)
self.maxconn = int(maxconn)
self.closed = False
self._args = args
self._kwargs = kwargs
self._pool = []
self._used = {}
self._rused = {} # id(conn) -> key map
self._keys = 0
for i in range(self.minconn):
self._connect()
def _connect(self, key=None):
"""Create a new connection and assign it to 'key' if not None."""
conn = psycopg2.connect(*self._args, **self._kwargs)
if key is not None:
self._used[key] = conn
self._rused[id(conn)] = key
else:
self._pool.append(conn)
return conn
def _getkey(self):
"""Return a new unique key."""
self._keys += 1
return self._keys
def _getconn(self, key=None):
"""Get a free connection and assign it to 'key' if not None."""
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._getkey()
if key in self._used:
return self._used[key]
if self._pool:
self._used[key] = conn = self._pool.pop()
self._rused[id(conn)] = key
return conn
else:
if len(self._used) == self.maxconn:
raise PoolError("connection pool exhausted")
return self._connect(key)
def _putconn(self, conn, key=None, close=False):
"""Put away a connection."""
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._rused.get(id(conn))
if not key:
raise PoolError("trying to put unkeyed connection")
if len(self._pool) < self.minconn and not close:
# Return the connection into a consistent state before putting
# it back into the pool
if not conn.closed:
status = conn.get_transaction_status()
if status == _ext.TRANSACTION_STATUS_UNKNOWN:
# server connection lost
conn.close()
elif status != _ext.TRANSACTION_STATUS_IDLE:
# connection in error or in transaction
conn.rollback()
self._pool.append(conn)
else:
# regular idle connection
self._pool.append(conn)
# If the connection is closed, we just discard it.
else:
conn.close()
# here we check for the presence of key because it can happen that a
# thread tries to put back a connection after a call to close
if not self.closed or key in self._used:
del self._used[key]
del self._rused[id(conn)]
def _closeall(self):
"""Close all connections.
Note that this can lead to some code fail badly when trying to use
an already closed connection. If you call .closeall() make sure
your code can deal with it.
"""
if self.closed: raise PoolError("connection pool is closed")
for conn in self._pool + list(self._used.values()):
try:
conn.close()
except:
pass
self.closed = True
class SimpleConnectionPool(AbstractConnectionPool):
"""A connection pool that can't be shared across different threads."""
getconn = AbstractConnectionPool._getconn
putconn = AbstractConnectionPool._putconn
closeall = AbstractConnectionPool._closeall
class ThreadedConnectionPool(AbstractConnectionPool):
"""A connection pool that works with the threading module."""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the threading lock."""
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
def getconn(self, key=None):
"""Get a free connection and assign it to 'key' if not None."""
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, key=None, close=False):
"""Put away an unused connection."""
self._lock.acquire()
try:
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
"""Close all connections (even the one currently in use.)"""
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
class PersistentConnectionPool(AbstractConnectionPool):
"""A pool that assigns persistent connections to different threads.
Note that this connection pool generates by itself the required keys
using the current thread id. This means that until a thread puts away
a connection it will always get the same connection object by successive
`!getconn()` calls. This also means that a thread can't use more than one
single connection from the pool.
"""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the threading lock."""
import warnings
warnings.warn("deprecated: use ZPsycopgDA.pool implementation",
DeprecationWarning)
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
# we we'll need the thread module, to determine thread ids, so we
# import it here and copy it in an instance variable
import _thread as _thread # work around for 2to3 bug - see ticket #348
self.__thread = _thread
def getconn(self):
"""Generate thread id and return a connection."""
key = self.__thread.get_ident()
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, close=False):
"""Put away an unused connection."""
key = self.__thread.get_ident()
self._lock.acquire()
try:
if not conn: conn = self._used[key]
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
"""Close all connections (even the one currently in use.)"""
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
| gpl-3.0 | 513,129,088,310,076,300 | 33.621277 | 78 | 0.60263 | false |
Darthkpo/xtt | openpyxl/tests/test_backend.py | 4 | 2038 | #
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
"""Make sure we're using the fastest backend available"""
from openpyxl import LXML
try:
from xml.etree.cElementTree import Element as cElement
C = True
except ImportError:
C = False
try:
from lxml.etree import Element as lElement
except ImportError:
lElement is None
from xml.etree.ElementTree import Element as pyElement
def test_backend():
from openpyxl.xml.functions import Element
if LXML is True:
assert Element == lElement
elif C is True:
assert Element == cElement
else:
assert Element == pyElement
def test_namespace_register():
from openpyxl.xml.functions import Element, tostring
from openpyxl.xml.constants import SHEET_MAIN_NS
e = Element('{%s}sheet' % SHEET_MAIN_NS)
xml = tostring(e)
if hasattr(xml, "decode"):
xml = xml.decode("utf-8")
assert xml.startswith("<s:sheet")
| mit | 8,471,415,294,556,642,000 | 33.542373 | 79 | 0.736997 | false |
InterfaceMasters/ONL | components/all/vendor-config/qemu/src/python/qemu/__init__.py | 9 | 1313 | #!/usr/bin/python
############################################################
# <bsn.cl fy=2013 v=onl>
#
# Copyright 2013, 2014 Big Switch Networks, Inc.
#
# Licensed under the Eclipse Public License, Version 1.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the
# License.
#
# </bsn.cl>
############################################################
#
# OpenNetworkPlatform support for QEMU Platforms.
#
############################################################
from onl.platform.base import OpenNetworkPlatformBase, sysinfo
import struct
import time
class OpenNetworkPlatformQEMU(OpenNetworkPlatformBase):
def manufacturer(self):
return "QEMU"
def _sys_info_dict(self):
return {
sysinfo.MAGIC : 0,
sysinfo.PRODUCT_NAME : "QEMU Emulation",
sysinfo.PART_NUMBER : "QEMU"
}
| epl-1.0 | -6,204,466,505,490,720,000 | 28.840909 | 62 | 0.593298 | false |
shipci/boto | tests/unit/vpc/test_customergateway.py | 114 | 4610 | from tests.compat import OrderedDict
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, CustomerGateway
class TestDescribeCustomerGateways(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeCustomerGatewaysResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<customerGatewaySet>
<item>
<customerGatewayId>cgw-b4dc3961</customerGatewayId>
<state>available</state>
<type>ipsec.1</type>
<ipAddress>12.1.2.3</ipAddress>
<bgpAsn>65534</bgpAsn>
<tagSet/>
</item>
</customerGatewaySet>
</DescribeCustomerGatewaysResponse>
"""
def test_get_all_customer_gateways(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_customer_gateways(
'cgw-b4dc3961',
filters=OrderedDict([('state', ['pending', 'available']),
('ip-address', '12.1.2.3')]))
self.assert_request_parameters({
'Action': 'DescribeCustomerGateways',
'CustomerGatewayId.1': 'cgw-b4dc3961',
'Filter.1.Name': 'state',
'Filter.1.Value.1': 'pending',
'Filter.1.Value.2': 'available',
'Filter.2.Name': 'ip-address',
'Filter.2.Value.1': '12.1.2.3'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(len(api_response), 1)
self.assertIsInstance(api_response[0], CustomerGateway)
self.assertEqual(api_response[0].id, 'cgw-b4dc3961')
class TestCreateCustomerGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateCustomerGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<customerGateway>
<customerGatewayId>cgw-b4dc3961</customerGatewayId>
<state>pending</state>
<type>ipsec.1</type>
<ipAddress>12.1.2.3</ipAddress>
<bgpAsn>65534</bgpAsn>
<tagSet/>
</customerGateway>
</CreateCustomerGatewayResponse>
"""
def test_create_customer_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_customer_gateway(
'ipsec.1', '12.1.2.3', 65534)
self.assert_request_parameters({
'Action': 'CreateCustomerGateway',
'Type': 'ipsec.1',
'IpAddress': '12.1.2.3',
'BgpAsn': 65534},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, CustomerGateway)
self.assertEquals(api_response.id, 'cgw-b4dc3961')
self.assertEquals(api_response.state, 'pending')
self.assertEquals(api_response.type, 'ipsec.1')
self.assertEquals(api_response.ip_address, '12.1.2.3')
self.assertEquals(api_response.bgp_asn, 65534)
class TestDeleteCustomerGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteCustomerGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteCustomerGatewayResponse>
"""
def test_delete_customer_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_customer_gateway('cgw-b4dc3961')
self.assert_request_parameters({
'Action': 'DeleteCustomerGateway',
'CustomerGatewayId': 'cgw-b4dc3961'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
if __name__ == '__main__':
unittest.main()
| mit | 2,568,950,329,228,620,300 | 39.086957 | 95 | 0.580694 | false |
vim-IDE/python-mode | pymode/libs/_markerlib/markers.py | 1769 | 3979 | # -*- coding: utf-8 -*-
"""Interpret PEP 345 environment markers.
EXPR [in|==|!=|not in] EXPR [or|and] ...
where EXPR belongs to any of those:
python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1])
python_full_version = sys.version.split()[0]
os.name = os.name
sys.platform = sys.platform
platform.version = platform.version()
platform.machine = platform.machine()
platform.python_implementation = platform.python_implementation()
a free string, like '2.6', or 'win32'
"""
__all__ = ['default_environment', 'compile', 'interpret']
import ast
import os
import platform
import sys
import weakref
_builtin_compile = compile
try:
from platform import python_implementation
except ImportError:
if os.name == "java":
# Jython 2.5 has ast module, but not platform.python_implementation() function.
def python_implementation():
return "Jython"
else:
raise
# restricted set of variables
_VARS = {'sys.platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# FIXME parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os.name': os.name,
'platform.version': platform.version(),
'platform.machine': platform.machine(),
'platform.python_implementation': python_implementation(),
'extra': None # wheel extension
}
for var in list(_VARS.keys()):
if '.' in var:
_VARS[var.replace('.', '_')] = _VARS[var]
def default_environment():
"""Return copy of default PEP 385 globals dictionary."""
return dict(_VARS)
class ASTWhitelist(ast.NodeTransformer):
def __init__(self, statement):
self.statement = statement # for error messages
ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str)
# Bool operations
ALLOWED += (ast.And, ast.Or)
# Comparison operations
ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn)
def visit(self, node):
"""Ensure statement only contains allowed nodes."""
if not isinstance(node, self.ALLOWED):
raise SyntaxError('Not allowed in environment markers.\n%s\n%s' %
(self.statement,
(' ' * node.col_offset) + '^'))
return ast.NodeTransformer.visit(self, node)
def visit_Attribute(self, node):
"""Flatten one level of attribute access."""
new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx)
return ast.copy_location(new_node, node)
def parse_marker(marker):
tree = ast.parse(marker, mode='eval')
new_tree = ASTWhitelist(marker).generic_visit(tree)
return new_tree
def compile_marker(parsed_marker):
return _builtin_compile(parsed_marker, '<environment marker>', 'eval',
dont_inherit=True)
_cache = weakref.WeakValueDictionary()
def compile(marker):
"""Return compiled marker as a function accepting an environment dict."""
try:
return _cache[marker]
except KeyError:
pass
if not marker.strip():
def marker_fn(environment=None, override=None):
""""""
return True
else:
compiled_marker = compile_marker(parse_marker(marker))
def marker_fn(environment=None, override=None):
"""override updates environment"""
if override is None:
override = {}
if environment is None:
environment = default_environment()
environment.update(override)
return eval(compiled_marker, environment)
marker_fn.__doc__ = marker
_cache[marker] = marker_fn
return _cache[marker]
def interpret(marker, environment=None):
return compile(marker)(environment)
| lgpl-3.0 | 4,987,659,097,569,394,000 | 32.436975 | 106 | 0.621262 | false |
jayceyxc/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/writer/dump_worksheet.py | 61 | 8158 | # file openpyxl/writer/straight_worksheet.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Write worksheets to xml representations in an optimized way"""
import datetime
import os
from ..cell import column_index_from_string, get_column_letter, Cell
from ..worksheet import Worksheet
from ..shared.xmltools import XMLGenerator, get_document_content, \
start_tag, end_tag, tag
from ..shared.date_time import SharedDate
from ..shared.ooxml import MAX_COLUMN, MAX_ROW
from tempfile import NamedTemporaryFile
from ..writer.excel import ExcelWriter
from ..writer.strings import write_string_table
from ..writer.styles import StyleWriter
from ..style import Style, NumberFormat
from ..shared.ooxml import ARC_SHARED_STRINGS, ARC_CONTENT_TYPES, \
ARC_ROOT_RELS, ARC_WORKBOOK_RELS, ARC_APP, ARC_CORE, ARC_THEME, \
ARC_STYLE, ARC_WORKBOOK, \
PACKAGE_WORKSHEETS, PACKAGE_DRAWINGS, PACKAGE_CHARTS
STYLES = {'datetime' : {'type':Cell.TYPE_NUMERIC,
'style':'1'},
'string':{'type':Cell.TYPE_STRING,
'style':'0'},
'numeric':{'type':Cell.TYPE_NUMERIC,
'style':'0'},
'formula':{'type':Cell.TYPE_FORMULA,
'style':'0'},
'boolean':{'type':Cell.TYPE_BOOL,
'style':'0'},
}
DATETIME_STYLE = Style()
DATETIME_STYLE.number_format.format_code = NumberFormat.FORMAT_DATE_YYYYMMDD2
BOUNDING_BOX_PLACEHOLDER = 'A1:%s%d' % (get_column_letter(MAX_COLUMN), MAX_ROW)
class DumpWorksheet(Worksheet):
"""
.. warning::
You shouldn't initialize this yourself, use :class:`openpyxl.workbook.Workbook` constructor instead,
with `optimized_write = True`.
"""
def __init__(self, parent_workbook):
Worksheet.__init__(self, parent_workbook)
self._max_col = 0
self._max_row = 0
self._parent = parent_workbook
self._fileobj_header = NamedTemporaryFile(mode='r+', prefix='openpyxl.', suffix='.header', delete=False)
self._fileobj_content = NamedTemporaryFile(mode='r+', prefix='openpyxl.', suffix='.content', delete=False)
self._fileobj = NamedTemporaryFile(mode='w', prefix='openpyxl.', delete=False)
self.doc = XMLGenerator(self._fileobj_content, 'utf-8')
self.header = XMLGenerator(self._fileobj_header, 'utf-8')
self.title = 'Sheet'
self._shared_date = SharedDate()
self._string_builder = self._parent.strings_table_builder
@property
def filename(self):
return self._fileobj.name
def write_header(self):
doc = self.header
start_tag(doc, 'worksheet',
{'xml:space': 'preserve',
'xmlns': 'http://schemas.openxmlformats.org/spreadsheetml/2006/main',
'xmlns:r': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships'})
start_tag(doc, 'sheetPr')
tag(doc, 'outlinePr',
{'summaryBelow': '1',
'summaryRight': '1'})
end_tag(doc, 'sheetPr')
tag(doc, 'dimension', {'ref': 'A1:%s' % (self.get_dimensions())})
start_tag(doc, 'sheetViews')
start_tag(doc, 'sheetView', {'workbookViewId': '0'})
tag(doc, 'selection', {'activeCell': 'A1',
'sqref': 'A1'})
end_tag(doc, 'sheetView')
end_tag(doc, 'sheetViews')
tag(doc, 'sheetFormatPr', {'defaultRowHeight': '15'})
start_tag(doc, 'sheetData')
def close(self):
self._close_content()
self._close_header()
self._write_fileobj(self._fileobj_header)
self._write_fileobj(self._fileobj_content)
self._fileobj.close()
def _write_fileobj(self, fobj):
fobj.flush()
fobj.seek(0)
while True:
chunk = fobj.read(4096)
if not chunk:
break
self._fileobj.write(chunk)
fobj.close()
os.remove(fobj.name)
self._fileobj.flush()
def _close_header(self):
doc = self.header
#doc.endDocument()
def _close_content(self):
doc = self.doc
end_tag(doc, 'sheetData')
end_tag(doc, 'worksheet')
#doc.endDocument()
def get_dimensions(self):
if not self._max_col or not self._max_row:
return 'A1'
else:
return '%s%d' % (get_column_letter(self._max_col), (self._max_row))
def append(self, row):
"""
:param row: iterable containing values to append
:type row: iterable
"""
doc = self.doc
self._max_row += 1
span = len(row)
self._max_col = max(self._max_col, span)
row_idx = self._max_row
attrs = {'r': '%d' % row_idx,
'spans': '1:%d' % span}
start_tag(doc, 'row', attrs)
for col_idx, cell in enumerate(row):
if cell is None:
continue
coordinate = '%s%d' % (get_column_letter(col_idx+1), row_idx)
attributes = {'r': coordinate}
if isinstance(cell, bool):
dtype = 'boolean'
elif isinstance(cell, (int, float)):
dtype = 'numeric'
elif isinstance(cell, (datetime.datetime, datetime.date)):
dtype = 'datetime'
cell = self._shared_date.datetime_to_julian(cell)
attributes['s'] = STYLES[dtype]['style']
elif cell and cell[0] == '=':
dtype = 'formula'
else:
dtype = 'string'
cell = self._string_builder.add(cell)
attributes['t'] = STYLES[dtype]['type']
start_tag(doc, 'c', attributes)
if dtype == 'formula':
tag(doc, 'f', body = '%s' % cell[1:])
tag(doc, 'v')
else:
tag(doc, 'v', body = '%s' % cell)
end_tag(doc, 'c')
end_tag(doc, 'row')
def save_dump(workbook, filename):
writer = ExcelDumpWriter(workbook)
writer.save(filename)
return True
class ExcelDumpWriter(ExcelWriter):
def __init__(self, workbook):
self.workbook = workbook
self.style_writer = StyleDumpWriter(workbook)
self.style_writer._style_list.append(DATETIME_STYLE)
def _write_string_table(self, archive):
shared_string_table = self.workbook.strings_table_builder.get_table()
archive.writestr(ARC_SHARED_STRINGS,
write_string_table(shared_string_table))
return shared_string_table
def _write_worksheets(self, archive, shared_string_table, style_writer):
for i, sheet in enumerate(self.workbook.worksheets):
sheet.write_header()
sheet.close()
archive.write(sheet.filename, PACKAGE_WORKSHEETS + '/sheet%d.xml' % (i + 1))
os.remove(sheet.filename)
class StyleDumpWriter(StyleWriter):
def _get_style_list(self, workbook):
return []
| apache-2.0 | 8,361,752,331,826,589,000 | 30.867188 | 114 | 0.599657 | false |
ApolloAuto/apollo | third_party/gpus/check_cuda_libs.py | 3 | 2904 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Verifies that a list of libraries is installed on the system.
Takes a a list of arguments with every two subsequent arguments being a logical
tuple of (path, check_soname). The path to the library and either True or False
to indicate whether to check the soname field on the shared library.
Example Usage:
./check_cuda_libs.py /path/to/lib1.so True /path/to/lib2.so False
"""
import os
import os.path
import platform
import subprocess
import sys
# pylint: disable=g-import-not-at-top,g-importing-member
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top,g-importing-member
class ConfigError(Exception):
pass
def check_cuda_lib(path, check_soname=True):
"""Tests if a library exists on disk and whether its soname matches the filename.
Args:
path: the path to the library.
check_soname: whether to check the soname as well.
Raises:
ConfigError: If the library does not exist or if its soname does not match
the filename.
"""
if not os.path.isfile(path):
raise ConfigError("No library found under: " + path)
objdump = which("objdump")
if check_soname and objdump is not None:
# Decode is necessary as in py3 the return type changed from str to bytes
output = subprocess.check_output([objdump, "-p", path]).decode("utf-8")
output = [line for line in output.splitlines() if "SONAME" in line]
sonames = [line.strip().split(" ")[-1] for line in output]
if not any(soname == os.path.basename(path) for soname in sonames):
raise ConfigError("None of the libraries match their SONAME: " + path)
def main():
try:
args = [argv for argv in sys.argv[1:]]
if len(args) % 2 == 1:
raise ConfigError("Expected even number of arguments")
checked_paths = []
for i in range(0, len(args), 2):
path = args[i]
check_cuda_lib(path, check_soname=args[i + 1] == "True")
checked_paths.append(path)
# pylint: disable=superfluous-parens
print(os.linesep.join(checked_paths))
# pylint: enable=superfluous-parens
except ConfigError as e:
sys.stderr.write(str(e))
sys.exit(1)
if __name__ == "__main__":
main()
| apache-2.0 | -2,862,822,080,672,765,400 | 33.571429 | 83 | 0.692149 | false |
slava-sh/NewsBlur | vendor/readability/cleaners.py | 13 | 1199 | # strip out a set of nuisance html attributes that can mess up rendering in RSS feeds
import re
from lxml.html.clean import Cleaner
bad_attrs = ['style', '[-a-z]*color', 'background[-a-z]*', 'on*']
single_quoted = "'[^']+'"
double_quoted = '"[^"]+"'
non_space = '[^ "\'>]+'
htmlstrip = re.compile("<" # open
"([^>]+) " # prefix
"(?:%s) *" % ('|'.join(bad_attrs),) + # undesirable attributes
'= *(?:%s|%s|%s)' % (non_space, single_quoted, double_quoted) + # value
"([^>]*)" # postfix
">" # end
, re.I)
def clean_attributes(html):
while htmlstrip.search(html):
html = htmlstrip.sub('<\\1\\2>', html)
return html
def normalize_spaces(s):
if not s: return ''
"""replace any sequence of whitespace
characters with a single space"""
return ' '.join(s.split())
html_cleaner = Cleaner(scripts=True, javascript=True, comments=True,
style=True, links=True, meta=False, add_nofollow=False,
page_structure=False, processing_instructions=True, embedded=False,
frames=False, forms=False, annoying_tags=False, remove_tags=None,
remove_unknown_tags=False, safe_attrs_only=False)
| mit | 1,016,075,113,468,532,400 | 36.46875 | 85 | 0.59633 | false |
polyaxon/polyaxon | core/polyaxon/polypod/common/container_resources.py | 1 | 1492 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional, Union
from polyaxon.k8s import k8s_schemas
def sanitize_resources(
resources: Union[k8s_schemas.V1ResourceRequirements, Dict]
) -> Optional[k8s_schemas.V1ResourceRequirements]:
def validate_resources(r_field: Dict) -> Dict:
if not r_field:
return r_field
for k in r_field:
r_field[k] = str(r_field[k])
return r_field
if not resources:
return None
if isinstance(resources, Dict):
return k8s_schemas.V1ResourceRequirements(
limits=validate_resources(resources.get("limits", None)),
requests=validate_resources(resources.get("requests", None)),
)
else:
return k8s_schemas.V1ResourceRequirements(
limits=validate_resources(resources.limits),
requests=validate_resources(resources.requests),
)
| apache-2.0 | -2,722,190,665,310,583,300 | 31.434783 | 74 | 0.691019 | false |
leapcode/bitmask-dev | tests/integration/mail/outgoing/test_outgoing.py | 1 | 9783 | # -*- coding: utf-8 -*-
# test_gateway.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
SMTP gateway tests.
"""
import re
from copy import deepcopy
from StringIO import StringIO
from email.parser import Parser
from datetime import datetime
from twisted.internet.defer import fail
from twisted.mail.smtp import User
from twisted.python import log
from mock import Mock
from leap.bitmask.mail.rfc3156 import RFC3156CompliantGenerator
from leap.bitmask.mail.outgoing.service import OutgoingMail
from leap.bitmask.mail.outgoing.sender import SMTPSender
from leap.bitmask.mail.testing import ADDRESS, ADDRESS_2, PUBLIC_KEY_2
from leap.bitmask.mail.testing import KeyManagerWithSoledadTestCase
from leap.bitmask.mail.testing.smtp import getSMTPFactory
from leap.bitmask.keymanager import errors
BEGIN_PUBLIC_KEY = "-----BEGIN PGP PUBLIC KEY BLOCK-----"
TEST_USER = u'[email protected]'
class TestOutgoingMail(KeyManagerWithSoledadTestCase):
EMAIL_DATA = ['HELO gateway.leap.se',
'MAIL FROM: <%s>' % ADDRESS_2,
'RCPT TO: <%s>' % ADDRESS,
'DATA',
'From: User <%s>' % ADDRESS_2,
'To: Leap <%s>' % ADDRESS,
'Date: ' + datetime.now().strftime('%c'),
'Subject: test message',
'',
'This is a secret message.',
'Yours,',
'A.',
'',
'.',
'QUIT']
def setUp(self):
self.lines = [line for line in self.EMAIL_DATA[4:12]]
self.lines.append('') # add a trailing newline
self.raw = '\r\n'.join(self.lines)
self.expected_body = '\r\n'.join(self.EMAIL_DATA[9:12]) + "\r\n"
self.fromAddr = ADDRESS_2
class opts:
cert = u'/tmp/cert'
key = u'/tmp/cert'
hostname = 'remote'
port = 666
self.opts = opts
def init_outgoing_and_proto(_):
self.outgoing = OutgoingMail(self.fromAddr, self.km)
self.outgoing.add_sender(
SMTPSender(self.fromAddr, opts.key, opts.hostname, opts.port))
user = TEST_USER
# TODO -- this shouldn't need SMTP to be tested!? or does it?
self.proto = getSMTPFactory(
{user: None}, {user: self.km}, {user: None})
self.dest = User(ADDRESS, 'gateway.leap.se', self.proto, ADDRESS_2)
d = KeyManagerWithSoledadTestCase.setUp(self)
d.addCallback(init_outgoing_and_proto)
return d
def test_message_encrypt(self):
"""
Test if message gets encrypted to destination email.
"""
def check_decryption(res):
decrypted, _ = res
self.assertIn(
self.expected_body,
decrypted,
'Decrypted text does not contain the original text.')
d = self.outgoing._maybe_encrypt_and_sign(self.raw, self.dest)
d.addCallback(self._assert_encrypted)
d.addCallback(lambda message: self.km.decrypt(
message.get_payload(1).get_payload(), ADDRESS))
d.addCallback(check_decryption)
return d
def test_message_encrypt_sign(self):
"""
Test if message gets encrypted to destination email and signed with
sender key.
'"""
def check_decryption_and_verify(res):
decrypted, signkey = res
self.assertIn(
self.expected_body,
decrypted,
'Decrypted text does not contain the original text.')
self.assertTrue(ADDRESS_2 in signkey.address,
"Verification failed")
d = self.outgoing._maybe_encrypt_and_sign(self.raw, self.dest)
d.addCallback(self._assert_encrypted)
d.addCallback(lambda message: self.km.decrypt(
message.get_payload(1).get_payload(), ADDRESS, verify=ADDRESS_2))
d.addCallback(check_decryption_and_verify)
return d
def test_message_sign(self):
"""
Test if message is signed with sender key.
"""
# mock the key fetching
# XXX this is fucking ugly.
self.km._fetch_keys_from_server_and_store_local = Mock(
return_value=fail(errors.KeyNotFound()))
recipient = User('[email protected]',
'gateway.leap.se', self.proto, ADDRESS)
self.outgoing = OutgoingMail(self.fromAddr, self.km)
def check_signed(res):
message, _ = res
self.assertTrue('Content-Type' in message)
self.assertEqual('multipart/signed', message.get_content_type())
self.assertEqual('application/pgp-signature',
message.get_param('protocol'))
self.assertEqual('pgp-sha512', message.get_param('micalg'))
# assert content of message
body = (message.get_payload(0)
.get_payload(0)
.get_payload(decode=True))
self.assertEqual(self.expected_body,
body)
# assert content of signature
self.assertTrue(
message.get_payload(1).get_payload().startswith(
'-----BEGIN PGP SIGNATURE-----\n'),
'Message does not start with signature header.')
self.assertTrue(
message.get_payload(1).get_payload().endswith(
'-----END PGP SIGNATURE-----\n'),
'Message does not end with signature footer.')
return message
def verify(message):
# replace EOL before verifying (according to rfc3156)
fp = StringIO()
g = RFC3156CompliantGenerator(
fp, mangle_from_=False, maxheaderlen=76)
g.flatten(message.get_payload(0))
signed_text = re.sub('\r?\n', '\r\n',
fp.getvalue())
def assert_verify(key):
self.assertTrue(ADDRESS_2 in key.address,
'Signature could not be verified.')
d = self.km.verify(
signed_text, ADDRESS_2,
detached_sig=message.get_payload(1).get_payload())
d.addCallback(assert_verify)
return d
# TODO shouldn't depend on private method on this test
d = self.outgoing._maybe_encrypt_and_sign(self.raw, recipient)
d.addCallback(check_signed)
d.addCallback(verify)
return d
def test_attach_key(self):
d = self.outgoing._maybe_encrypt_and_sign(self.raw, self.dest)
d.addCallback(self._assert_encrypted)
d.addCallback(self._check_headers, self.lines[:4])
d.addCallback(lambda message: self.km.decrypt(
message.get_payload(1).get_payload(), ADDRESS))
d.addCallback(lambda (decrypted, _):
self._check_key_attachment(Parser().parsestr(decrypted)))
return d
def test_attach_key_not_known(self):
unknown_address = "[email protected]"
lines = deepcopy(self.lines)
lines[1] = "To: <%s>" % (unknown_address,)
raw = '\r\n'.join(lines)
dest = User(unknown_address, 'gateway.leap.se', self.proto, ADDRESS_2)
d = self.outgoing._maybe_encrypt_and_sign(
raw, dest, fetch_remote=False)
d.addCallback(lambda (message, _):
self._check_headers(message, lines[:4]))
d.addCallback(self._check_key_attachment)
d.addErrback(log.err)
return d
def _check_headers(self, message, headers):
msgstr = message.as_string(unixfrom=False)
for header in headers:
self.assertTrue(header in msgstr,
"Missing header: %s" % (header,))
return message
def _check_key_attachment(self, message):
for payload in message.get_payload():
if payload.is_multipart():
return self._check_key_attachment(payload)
if 'application/pgp-keys' == payload.get_content_type():
keylines = PUBLIC_KEY_2.split('\n')
key = BEGIN_PUBLIC_KEY + '\n\n' + '\n'.join(keylines[4:-1])
self.assertTrue(key in payload.get_payload(decode=True),
"Key attachment don't match")
return
self.fail("No public key attachment found")
def _assert_encrypted(self, res):
message, _ = res
self.assertTrue('Content-Type' in message)
self.assertEqual('multipart/encrypted', message.get_content_type())
self.assertEqual('application/pgp-encrypted',
message.get_param('protocol'))
self.assertEqual(2, len(message.get_payload()))
self.assertEqual('application/pgp-encrypted',
message.get_payload(0).get_content_type())
self.assertEqual('application/octet-stream',
message.get_payload(1).get_content_type())
return message
| gpl-3.0 | -2,455,723,392,723,195,400 | 38.289157 | 79 | 0.580088 | false |
rvraghav93/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 8 | 35969 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_equal, assert_false, assert_true,
assert_not_equal, assert_almost_equal,
assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry, SkipTest)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_false(hasattr(t2, "idf_"))
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(rng.choice(vocab_words, size=5, replace=False))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = rng.choice(vocab_words, size=5, replace=False)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
def test_vectorizer_string_object_as_input():
message = ("Iterable over raw text documents expected, "
"string object received.")
for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
assert_raise_message(
ValueError, message, vec.fit_transform, "hello world!")
assert_raise_message(
ValueError, message, vec.fit, "hello world!")
assert_raise_message(
ValueError, message, vec.transform, "hello world!")
| bsd-3-clause | 3,974,509,567,453,235,000 | 35.891282 | 79 | 0.636576 | false |
xgds/xgds_core | setup.py | 1 | 1649 | # __BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# __END_LICENSE__
import os
from setuptools import setup, find_packages
def read_file(filename):
"""Read a file into a string"""
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return ''
# Use the docstring of the __init__ file to be the description
DESC = " ".join(__import__('xgds_core').__doc__.splitlines()).strip()
setup(
name="xgds_core",
version=__import__('xgds_core').get_version().replace(' ', '-'),
url='',
author='tecohen',
author_email='',
description=DESC,
long_description=read_file('README'),
packages=find_packages(),
include_package_data=True,
install_requires=read_file('requirements.txt'),
classifiers=[
'License :: OSI Approved :: NASA Open Source Agreement',
'Framework :: Django',
],
)
| apache-2.0 | -489,253,708,743,833,800 | 33.354167 | 83 | 0.688902 | false |
40423126/2016fallcadp_ag9 | plugin/liquid_tags/mdx_liquid_tags.py | 281 | 3447 | """
Markdown Extension for Liquid-style Tags
----------------------------------------
A markdown extension to allow user-defined tags of the form::
{% tag arg1 arg2 ... argn %}
Where "tag" is associated with some user-defined extension.
These result in a preprocess step within markdown that produces
either markdown or html.
"""
import warnings
import markdown
import itertools
import re
import os
from functools import wraps
# Define some regular expressions
LIQUID_TAG = re.compile(r'\{%.*?%\}', re.MULTILINE | re.DOTALL)
EXTRACT_TAG = re.compile(r'(?:\s*)(\S+)(?:\s*)')
LT_CONFIG = { 'CODE_DIR': 'code',
'NOTEBOOK_DIR': 'notebooks',
'FLICKR_API_KEY': 'flickr',
'GIPHY_API_KEY': 'giphy'
}
LT_HELP = { 'CODE_DIR' : 'Code directory for include_code subplugin',
'NOTEBOOK_DIR' : 'Notebook directory for notebook subplugin',
'FLICKR_API_KEY': 'Flickr key for accessing the API',
'GIPHY_API_KEY': 'Giphy key for accessing the API'
}
class _LiquidTagsPreprocessor(markdown.preprocessors.Preprocessor):
_tags = {}
def __init__(self, configs):
self.configs = configs
def run(self, lines):
page = '\n'.join(lines)
liquid_tags = LIQUID_TAG.findall(page)
for i, markup in enumerate(liquid_tags):
# remove {% %}
markup = markup[2:-2]
tag = EXTRACT_TAG.match(markup).groups()[0]
markup = EXTRACT_TAG.sub('', markup, 1)
if tag in self._tags:
liquid_tags[i] = self._tags[tag](self, tag, markup.strip())
# add an empty string to liquid_tags so that chaining works
liquid_tags.append('')
# reconstruct string
page = ''.join(itertools.chain(*zip(LIQUID_TAG.split(page),
liquid_tags)))
# resplit the lines
return page.split("\n")
class LiquidTags(markdown.Extension):
"""Wrapper for MDPreprocessor"""
def __init__(self, config):
try:
# Needed for markdown versions >= 2.5
for key,value in LT_CONFIG.items():
self.config[key] = [value,LT_HELP[key]]
super(LiquidTags,self).__init__(**config)
except AttributeError:
# Markdown versions < 2.5
for key,value in LT_CONFIG.items():
config[key] = [config[key],LT_HELP[key]]
super(LiquidTags,self).__init__(config)
@classmethod
def register(cls, tag):
"""Decorator to register a new include tag"""
def dec(func):
if tag in _LiquidTagsPreprocessor._tags:
warnings.warn("Enhanced Markdown: overriding tag '%s'" % tag)
_LiquidTagsPreprocessor._tags[tag] = func
return func
return dec
def extendMarkdown(self, md, md_globals):
self.htmlStash = md.htmlStash
md.registerExtension(self)
# for the include_code preprocessor, we need to re-run the
# fenced code block preprocessor after substituting the code.
# Because the fenced code processor is run before, {% %} tags
# within equations will not be parsed as an include.
md.preprocessors.add('mdincludes',
_LiquidTagsPreprocessor(self), ">html_block")
def makeExtension(configs=None):
"""Wrapper for a MarkDown extension"""
return LiquidTags(configs=configs)
| agpl-3.0 | 6,307,572,002,454,985,000 | 34.173469 | 77 | 0.591529 | false |
f-guichard/cf-sample-php-buildpack-custo | extensions/composer/extension.py | 6 | 16424 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Composer Extension
Downloads, installs and runs Composer.
"""
import os
import os.path
import sys
import logging
import re
import json
import StringIO
from build_pack_utils import utils
from build_pack_utils import stream_output
from extension_helpers import ExtensionHelper
_log = logging.getLogger('composer')
def find_composer_paths(ctx):
build_dir = ctx['BUILD_DIR']
webdir = ctx['WEBDIR']
json_path = None
lock_path = None
json_paths = [
os.path.join(build_dir, 'composer.json'),
os.path.join(build_dir, webdir, 'composer.json')
]
lock_paths = [
os.path.join(build_dir, 'composer.lock'),
os.path.join(build_dir, webdir, 'composer.lock')
]
env_path = os.getenv('COMPOSER_PATH')
if env_path is not None:
json_paths = json_paths + [
os.path.join(build_dir, env_path, 'composer.json'),
os.path.join(build_dir, webdir, env_path, 'composer.json')
]
lock_paths = lock_paths + [
os.path.join(build_dir, env_path, 'composer.lock'),
os.path.join(build_dir, webdir, env_path, 'composer.lock')
]
for path in json_paths:
if os.path.exists(path):
json_path = path
for path in lock_paths:
if os.path.exists(path):
lock_path = path
return (json_path, lock_path)
class ComposerConfiguration(object):
def __init__(self, ctx):
self._ctx = ctx
self._log = _log
self._init_composer_paths()
def _init_composer_paths(self):
(self.json_path, self.lock_path) = \
find_composer_paths(self._ctx)
def read_exts_from_path(self, path):
exts = []
if path:
req_pat = re.compile(r'"require"\s?\:\s?\{(.*?)\}', re.DOTALL)
ext_pat = re.compile(r'"ext-(.*?)"')
with open(path, 'rt') as fp:
data = fp.read()
for req_match in req_pat.finditer(data):
for ext_match in ext_pat.finditer(req_match.group(1)):
exts.append(ext_match.group(1))
return exts
def pick_php_version(self, requested):
selected = None
if requested is None:
selected = self._ctx['PHP_VERSION']
elif requested == '5.5.*' or requested == '>=5.5':
selected = self._ctx['PHP_55_LATEST']
elif requested == '5.6.*' or requested == '>=5.6':
selected = self._ctx['PHP_56_LATEST']
elif requested.startswith('5.5.'):
selected = requested
elif requested.startswith('5.6.'):
selected = requested
else:
selected = self._ctx['PHP_VERSION']
return selected
def read_version_from_composer(self, key):
(json_path, lock_path) = find_composer_paths(self._ctx)
if json_path is not None:
composer = json.load(open(json_path, 'r'))
require = composer.get('require', {})
return require.get(key, None)
if lock_path is not None:
composer = json.load(open(lock_path, 'r'))
platform = composer.get('platform', {})
return platform.get(key, None)
return None
def configure(self):
if self.json_path or self.lock_path:
exts = []
# include any existing extensions
exts.extend(self._ctx.get('PHP_EXTENSIONS', []))
# add 'openssl' extension
exts.append('openssl')
# add platform extensions from composer.json & composer.lock
exts.extend(self.read_exts_from_path(self.json_path))
exts.extend(self.read_exts_from_path(self.lock_path))
hhvm_version = self.read_version_from_composer('hhvm')
if hhvm_version:
self._ctx['PHP_VM'] = 'hhvm'
self._log.debug('Composer picked HHVM Version [%s]',
hhvm_version)
else:
# update context with new list of extensions,
# if composer.json exists
php_version = self.read_version_from_composer('php')
self._log.debug('Composer picked PHP Version [%s]',
php_version)
self._ctx['PHP_VERSION'] = self.pick_php_version(php_version)
self._ctx['PHP_EXTENSIONS'] = utils.unique(exts)
self._ctx['PHP_VM'] = 'php'
class ComposerExtension(ExtensionHelper):
def __init__(self, ctx):
ExtensionHelper.__init__(self, ctx)
self._log = _log
def _defaults(self):
return {
'COMPOSER_VERSION': '1.0.0-alpha10',
'COMPOSER_PACKAGE': 'composer.phar',
'COMPOSER_DOWNLOAD_URL': '/composer/'
'{COMPOSER_VERSION}/{COMPOSER_PACKAGE}',
'COMPOSER_INSTALL_OPTIONS': ['--no-interaction', '--no-dev'],
'COMPOSER_VENDOR_DIR': '{BUILD_DIR}/{LIBDIR}/vendor',
'COMPOSER_BIN_DIR': '{BUILD_DIR}/php/bin',
'COMPOSER_CACHE_DIR': '{CACHE_DIR}/composer'
}
def _should_compile(self):
(json_path, lock_path) = \
find_composer_paths(self._ctx)
return (json_path is not None or lock_path is not None)
def _compile(self, install):
self._builder = install.builder
self.composer_runner = ComposerCommandRunner(self._ctx, self._builder)
self.move_local_vendor_folder()
self.install()
self.run()
def move_local_vendor_folder(self):
vendor_path = os.path.join(self._ctx['BUILD_DIR'],
self._ctx['WEBDIR'],
'vendor')
if os.path.exists(vendor_path):
self._log.debug("Vendor [%s] exists, moving to LIBDIR",
vendor_path)
(self._builder.move()
.under('{BUILD_DIR}/{WEBDIR}')
.into('{BUILD_DIR}/{LIBDIR}')
.where_name_matches('^%s/.*$' % vendor_path)
.done())
def install(self):
self._builder.install().package('PHP').done()
if self._ctx['COMPOSER_VERSION'] == 'latest':
dependencies_path = os.path.join(self._ctx['BP_DIR'],
'dependencies')
if os.path.exists(dependencies_path):
raise RuntimeError('"COMPOSER_VERSION": "latest" ' \
'is not supported in the cached buildpack. Please vendor your preferred version of composer with your app, or use the provided default composer version.')
self._ctx['COMPOSER_DOWNLOAD_URL'] = \
'https://getcomposer.org/composer.phar'
self._builder.install()._installer.install_binary_direct(
self._ctx['COMPOSER_DOWNLOAD_URL'], None,
os.path.join(self._ctx['BUILD_DIR'], 'php', 'bin'),
extract=False)
else:
self._builder.install()._installer._install_binary_from_manifest(
self._ctx['COMPOSER_DOWNLOAD_URL'],
os.path.join(self._ctx['BUILD_DIR'], 'php', 'bin'),
extract=False)
def _github_oauth_token_is_valid(self, candidate_oauth_token):
stringio_writer = StringIO.StringIO()
curl_command = 'curl -H "Authorization: token %s" ' \
'https://api.github.com/rate_limit' % candidate_oauth_token
stream_output(stringio_writer,
curl_command,
env=os.environ,
cwd=self._ctx['BUILD_DIR'],
shell=True)
github_response = stringio_writer.getvalue()
github_response_json = json.loads(github_response)
return 'resources' in github_response_json
def _github_rate_exceeded(self, token_is_valid):
stringio_writer = StringIO.StringIO()
if token_is_valid:
candidate_oauth_token = os.getenv('COMPOSER_GITHUB_OAUTH_TOKEN')
curl_command = 'curl -H "Authorization: token %s" ' \
'https://api.github.com/rate_limit' % candidate_oauth_token
else:
curl_command = 'curl https://api.github.com/rate_limit'
stream_output(stringio_writer,
curl_command,
env=os.environ,
cwd=self._ctx['BUILD_DIR'],
shell=True)
github_response = stringio_writer.getvalue()
github_response_json = json.loads(github_response)
rate = github_response_json['rate']
num_remaining = rate['remaining']
return num_remaining <= 0
def setup_composer_github_token(self):
github_oauth_token = os.getenv('COMPOSER_GITHUB_OAUTH_TOKEN')
if self._github_oauth_token_is_valid(github_oauth_token):
print('-----> Using custom GitHub OAuth token in'
' $COMPOSER_GITHUB_OAUTH_TOKEN')
self.composer_runner.run('config', '-g',
'github-oauth.github.com',
'"%s"' % github_oauth_token)
return True
else:
print('-----> The GitHub OAuth token supplied from '
'$COMPOSER_GITHUB_OAUTH_TOKEN is invalid')
return False
def check_github_rate_exceeded(self, token_is_valid):
if self._github_rate_exceeded(token_is_valid):
print('-----> The GitHub api rate limit has been exceeded. '
'Composer will continue by downloading from source, which might result in slower downloads. '
'You can increase your rate limit with a GitHub OAuth token. '
'Please obtain a GitHub OAuth token by registering your application at '
'https://github.com/settings/applications/new. '
'Then set COMPOSER_GITHUB_OAUTH_TOKEN in your environment to the value of this token.')
def run(self):
# Move composer files into root directory
(json_path, lock_path) = find_composer_paths(self._ctx)
if json_path is not None and os.path.dirname(json_path) != self._ctx['BUILD_DIR']:
(self._builder.move()
.under(os.path.dirname(json_path))
.where_name_is('composer.json')
.into('BUILD_DIR')
.done())
if lock_path is not None and os.path.dirname(lock_path) != self._ctx['BUILD_DIR']:
(self._builder.move()
.under(os.path.dirname(lock_path))
.where_name_is('composer.lock')
.into('BUILD_DIR')
.done())
# Sanity Checks
if not os.path.exists(os.path.join(self._ctx['BUILD_DIR'],
'composer.lock')):
msg = (
'PROTIP: Include a `composer.lock` file with your '
'application! This will make sure the exact same version '
'of dependencies are used when you deploy to CloudFoundry.')
self._log.warning(msg)
print msg
# dump composer version, if in debug mode
if self._ctx.get('BP_DEBUG', False):
self.composer_runner.run('-V')
if not os.path.exists(os.path.join(self._ctx['BP_DIR'], 'dependencies')):
token_is_valid = False
# config composer to use github token, if provided
if os.getenv('COMPOSER_GITHUB_OAUTH_TOKEN', False):
token_is_valid = self.setup_composer_github_token()
# check that the api rate limit has not been exceeded, otherwise exit
self.check_github_rate_exceeded(token_is_valid)
# install dependencies w/Composer
self.composer_runner.run('install', '--no-progress',
*self._ctx['COMPOSER_INSTALL_OPTIONS'])
class ComposerCommandRunner(object):
def __init__(self, ctx, builder):
self._log = _log
self._ctx = ctx
self._strategy = HHVMComposerStrategy(ctx) \
if ctx['PHP_VM'] == 'hhvm' else PHPComposerStrategy(ctx)
self._php_path = self._strategy.binary_path()
self._composer_path = os.path.join(ctx['BUILD_DIR'], 'php',
'bin', 'composer.phar')
self._strategy.write_config(builder)
def _build_composer_environment(self):
env = {}
for key in os.environ.keys():
val = self._ctx.get(key, '')
env[key] = val if type(val) == str else json.dumps(val)
# add basic composer vars
env['COMPOSER_VENDOR_DIR'] = self._ctx['COMPOSER_VENDOR_DIR']
env['COMPOSER_BIN_DIR'] = self._ctx['COMPOSER_BIN_DIR']
env['COMPOSER_CACHE_DIR'] = self._ctx['COMPOSER_CACHE_DIR']
# prevent key system variables from being overridden
env['LD_LIBRARY_PATH'] = self._strategy.ld_library_path()
env['PHPRC'] = self._ctx['TMPDIR']
env['PATH'] = ':'.join(filter(None,
[env.get('PATH', ''),
os.path.dirname(self._php_path)]))
self._log.debug("ENV IS: %s",
'\n'.join(["%s=%s (%s)" % (key, val, type(val))
for (key, val) in env.iteritems()]))
return env
def run(self, *args):
try:
cmd = [self._php_path, self._composer_path]
cmd.extend(args)
self._log.debug("Running command [%s]", ' '.join(cmd))
stream_output(sys.stdout,
' '.join(cmd),
env=self._build_composer_environment(),
cwd=self._ctx['BUILD_DIR'],
shell=True)
except:
print "-----> Composer command failed"
raise
class HHVMComposerStrategy(object):
def __init__(self, ctx):
self._ctx = ctx
def binary_path(self):
return os.path.join(
self._ctx['BUILD_DIR'], 'hhvm', 'usr', 'bin', 'hhvm')
def write_config(self, builder):
pass
def ld_library_path(self):
return os.path.join(
self._ctx['BUILD_DIR'], 'hhvm', 'usr', 'lib', 'hhvm')
class PHPComposerStrategy(object):
def __init__(self, ctx):
self._ctx = ctx
def binary_path(self):
return os.path.join(
self._ctx['BUILD_DIR'], 'php', 'bin', 'php')
def write_config(self, builder):
# rewrite a temp copy of php.ini for use by composer
(builder.copy()
.under('{BUILD_DIR}/php/etc')
.where_name_is('php.ini')
.into('TMPDIR')
.done())
utils.rewrite_cfgs(os.path.join(self._ctx['TMPDIR'], 'php.ini'),
{'TMPDIR': self._ctx['TMPDIR'],
'HOME': self._ctx['BUILD_DIR']},
delim='@')
def ld_library_path(self):
return os.path.join(
self._ctx['BUILD_DIR'], 'php', 'lib')
# Extension Methods
def configure(ctx):
config = ComposerConfiguration(ctx)
config.configure()
def preprocess_commands(ctx):
composer = ComposerExtension(ctx)
return composer.preprocess_commands()
def service_commands(ctx):
composer = ComposerExtension(ctx)
return composer.service_commands()
def service_environment(ctx):
composer = ComposerExtension(ctx)
return composer.service_environment()
def compile(install):
composer = ComposerExtension(install.builder._ctx)
return composer.compile(install)
| apache-2.0 | -3,752,372,668,106,999,300 | 37.644706 | 174 | 0.555102 | false |
pipermerriam/django | django/contrib/auth/migrations/0001_initial.py | 108 | 4524 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.auth.models
from django.core import validators
from django.db import migrations, models
from django.utils import timezone
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '__first__'),
]
operations = [
migrations.CreateModel(
name='Permission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='name')),
('content_type', models.ForeignKey(
to='contenttypes.ContentType',
on_delete=models.CASCADE,
to_field='id',
verbose_name='content type',
)),
('codename', models.CharField(max_length=100, verbose_name='codename')),
],
options={
'ordering': ('content_type__app_label', 'content_type__model', 'codename'),
'unique_together': set([('content_type', 'codename')]),
'verbose_name': 'permission',
'verbose_name_plural': 'permissions',
},
managers=[
('objects', django.contrib.auth.models.PermissionManager()),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=80, verbose_name='name')),
('permissions', models.ManyToManyField(to='auth.Permission', verbose_name='permissions', blank=True)),
],
options={
'verbose_name': 'group',
'verbose_name_plural': 'groups',
},
managers=[
('objects', django.contrib.auth.models.GroupManager()),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(to='auth.Group', verbose_name='groups', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user')),
('user_permissions', models.ManyToManyField(to='auth.Permission', verbose_name='user permissions', blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user')),
],
options={
'swappable': 'AUTH_USER_MODEL',
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| bsd-3-clause | -5,594,721,615,096,761,000 | 54.851852 | 277 | 0.580239 | false |
blackye/luscan-devel | thirdparty_libs/yaml/parser.py | 409 | 25542 |
# The following YAML grammar is LL(1) and is parsed by a recursive descent
# parser.
#
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
# block_node_or_indentless_sequence ::=
# ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# FIRST sets:
#
# stream: { STREAM-START }
# explicit_document: { DIRECTIVE DOCUMENT-START }
# implicit_document: FIRST(block_node)
# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_sequence: { BLOCK-SEQUENCE-START }
# block_mapping: { BLOCK-MAPPING-START }
# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
# indentless_sequence: { ENTRY }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_sequence: { FLOW-SEQUENCE-START }
# flow_mapping: { FLOW-MAPPING-START }
# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
__all__ = ['Parser', 'ParserError']
from error import MarkedYAMLError
from tokens import *
from events import *
from scanner import *
class ParserError(MarkedYAMLError):
pass
class Parser(object):
# Since writing a recursive-descendant parser is a straightforward task, we
# do not give many comments here.
DEFAULT_TAGS = {
u'!': u'!',
u'!!': u'tag:yaml.org,2002:',
}
def __init__(self):
self.current_event = None
self.yaml_version = None
self.tag_handles = {}
self.states = []
self.marks = []
self.state = self.parse_stream_start
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def check_event(self, *choices):
# Check the type of the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
if self.current_event is not None:
if not choices:
return True
for choice in choices:
if isinstance(self.current_event, choice):
return True
return False
def peek_event(self):
# Get the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
return self.current_event
def get_event(self):
# Get the next event and proceed further.
if self.current_event is None:
if self.state:
self.current_event = self.state()
value = self.current_event
self.current_event = None
return value
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
def parse_stream_start(self):
# Parse the stream start.
token = self.get_token()
event = StreamStartEvent(token.start_mark, token.end_mark,
encoding=token.encoding)
# Prepare the next state.
self.state = self.parse_implicit_document_start
return event
def parse_implicit_document_start(self):
# Parse an implicit document.
if not self.check_token(DirectiveToken, DocumentStartToken,
StreamEndToken):
self.tag_handles = self.DEFAULT_TAGS
token = self.peek_token()
start_mark = end_mark = token.start_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=False)
# Prepare the next state.
self.states.append(self.parse_document_end)
self.state = self.parse_block_node
return event
else:
return self.parse_document_start()
def parse_document_start(self):
# Parse any extra document end indicators.
while self.check_token(DocumentEndToken):
self.get_token()
# Parse an explicit document.
if not self.check_token(StreamEndToken):
token = self.peek_token()
start_mark = token.start_mark
version, tags = self.process_directives()
if not self.check_token(DocumentStartToken):
raise ParserError(None, None,
"expected '<document start>', but found %r"
% self.peek_token().id,
self.peek_token().start_mark)
token = self.get_token()
end_mark = token.end_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=True, version=version, tags=tags)
self.states.append(self.parse_document_end)
self.state = self.parse_document_content
else:
# Parse the end of the stream.
token = self.get_token()
event = StreamEndEvent(token.start_mark, token.end_mark)
assert not self.states
assert not self.marks
self.state = None
return event
def parse_document_end(self):
# Parse the document end.
token = self.peek_token()
start_mark = end_mark = token.start_mark
explicit = False
if self.check_token(DocumentEndToken):
token = self.get_token()
end_mark = token.end_mark
explicit = True
event = DocumentEndEvent(start_mark, end_mark,
explicit=explicit)
# Prepare the next state.
self.state = self.parse_document_start
return event
def parse_document_content(self):
if self.check_token(DirectiveToken,
DocumentStartToken, DocumentEndToken, StreamEndToken):
event = self.process_empty_scalar(self.peek_token().start_mark)
self.state = self.states.pop()
return event
else:
return self.parse_block_node()
def process_directives(self):
self.yaml_version = None
self.tag_handles = {}
while self.check_token(DirectiveToken):
token = self.get_token()
if token.name == u'YAML':
if self.yaml_version is not None:
raise ParserError(None, None,
"found duplicate YAML directive", token.start_mark)
major, minor = token.value
if major != 1:
raise ParserError(None, None,
"found incompatible YAML document (version 1.* is required)",
token.start_mark)
self.yaml_version = token.value
elif token.name == u'TAG':
handle, prefix = token.value
if handle in self.tag_handles:
raise ParserError(None, None,
"duplicate tag handle %r" % handle.encode('utf-8'),
token.start_mark)
self.tag_handles[handle] = prefix
if self.tag_handles:
value = self.yaml_version, self.tag_handles.copy()
else:
value = self.yaml_version, None
for key in self.DEFAULT_TAGS:
if key not in self.tag_handles:
self.tag_handles[key] = self.DEFAULT_TAGS[key]
return value
# block_node_or_indentless_sequence ::= ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
def parse_block_node(self):
return self.parse_node(block=True)
def parse_flow_node(self):
return self.parse_node()
def parse_block_node_or_indentless_sequence(self):
return self.parse_node(block=True, indentless_sequence=True)
def parse_node(self, block=False, indentless_sequence=False):
if self.check_token(AliasToken):
token = self.get_token()
event = AliasEvent(token.value, token.start_mark, token.end_mark)
self.state = self.states.pop()
else:
anchor = None
tag = None
start_mark = end_mark = tag_mark = None
if self.check_token(AnchorToken):
token = self.get_token()
start_mark = token.start_mark
end_mark = token.end_mark
anchor = token.value
if self.check_token(TagToken):
token = self.get_token()
tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
elif self.check_token(TagToken):
token = self.get_token()
start_mark = tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
if self.check_token(AnchorToken):
token = self.get_token()
end_mark = token.end_mark
anchor = token.value
if tag is not None:
handle, suffix = tag
if handle is not None:
if handle not in self.tag_handles:
raise ParserError("while parsing a node", start_mark,
"found undefined tag handle %r" % handle.encode('utf-8'),
tag_mark)
tag = self.tag_handles[handle]+suffix
else:
tag = suffix
#if tag == u'!':
# raise ParserError("while parsing a node", start_mark,
# "found non-specific tag '!'", tag_mark,
# "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
if start_mark is None:
start_mark = end_mark = self.peek_token().start_mark
event = None
implicit = (tag is None or tag == u'!')
if indentless_sequence and self.check_token(BlockEntryToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark)
self.state = self.parse_indentless_sequence_entry
else:
if self.check_token(ScalarToken):
token = self.get_token()
end_mark = token.end_mark
if (token.plain and tag is None) or tag == u'!':
implicit = (True, False)
elif tag is None:
implicit = (False, True)
else:
implicit = (False, False)
event = ScalarEvent(anchor, tag, implicit, token.value,
start_mark, end_mark, style=token.style)
self.state = self.states.pop()
elif self.check_token(FlowSequenceStartToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_sequence_first_entry
elif self.check_token(FlowMappingStartToken):
end_mark = self.peek_token().end_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_mapping_first_key
elif block and self.check_token(BlockSequenceStartToken):
end_mark = self.peek_token().start_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_sequence_first_entry
elif block and self.check_token(BlockMappingStartToken):
end_mark = self.peek_token().start_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_mapping_first_key
elif anchor is not None or tag is not None:
# Empty scalars are allowed even if a tag or an anchor is
# specified.
event = ScalarEvent(anchor, tag, (implicit, False), u'',
start_mark, end_mark)
self.state = self.states.pop()
else:
if block:
node = 'block'
else:
node = 'flow'
token = self.peek_token()
raise ParserError("while parsing a %s node" % node, start_mark,
"expected the node content, but found %r" % token.id,
token.start_mark)
return event
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
def parse_block_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_sequence_entry()
def parse_block_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken, BlockEndToken):
self.states.append(self.parse_block_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_block_sequence_entry
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block collection", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
def parse_indentless_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken,
KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_indentless_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_indentless_sequence_entry
return self.process_empty_scalar(token.end_mark)
token = self.peek_token()
event = SequenceEndEvent(token.start_mark, token.start_mark)
self.state = self.states.pop()
return event
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
def parse_block_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_mapping_key()
def parse_block_mapping_key(self):
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_value)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_value
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block mapping", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_block_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_key)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_block_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# Note that while production rules for both flow_sequence_entry and
# flow_mapping_entry are equal, their interpretations are different.
# For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
# generate an inline mapping (set syntax).
def parse_flow_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_sequence_entry(first=True)
def parse_flow_sequence_entry(self, first=False):
if not self.check_token(FlowSequenceEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow sequence", self.marks[-1],
"expected ',' or ']', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.peek_token()
event = MappingStartEvent(None, None, True,
token.start_mark, token.end_mark,
flow_style=True)
self.state = self.parse_flow_sequence_entry_mapping_key
return event
elif not self.check_token(FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry)
return self.parse_flow_node()
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_sequence_entry_mapping_key(self):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_value
return self.process_empty_scalar(token.end_mark)
def parse_flow_sequence_entry_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_end)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_end
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_sequence_entry_mapping_end
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_sequence_entry_mapping_end(self):
self.state = self.parse_flow_sequence_entry
token = self.peek_token()
return MappingEndEvent(token.start_mark, token.start_mark)
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
def parse_flow_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_mapping_key(first=True)
def parse_flow_mapping_key(self, first=False):
if not self.check_token(FlowMappingEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow mapping", self.marks[-1],
"expected ',' or '}', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_value
return self.process_empty_scalar(token.end_mark)
elif not self.check_token(FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_empty_value)
return self.parse_flow_node()
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_key)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_mapping_empty_value(self):
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(self.peek_token().start_mark)
def process_empty_scalar(self, mark):
return ScalarEvent(None, None, (True, False), u'', mark, mark)
| gpl-2.0 | -5,762,860,175,286,385,000 | 42.365025 | 156 | 0.561467 | false |
allinpaybusiness/ACS | allinpay projects/creditscoreMLP/classMLP.py | 1 | 9585 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import sys;
import os;
sys.path.append("allinpay projects")
from creditscore.creditscore import CreditScore
import numpy as np
import pandas as pd
import time
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
class CreditScoreMLP(CreditScore):
def MLP_trainandtest(self, testsize, cv, feature_sel, varthreshold, activation,solver, alpha, max_iter =1000,nclusters=10, cmethod=None, *hidden_layer_sizes):
#分割数据集为训练集和测试集
data_feature = self.data.ix[:, self.data.columns != 'default']
data_target = self.data['default']
X_train, X_test, y_train, y_test = train_test_split(data_feature, data_target, test_size=testsize, random_state=0)
#对训练集做变量粗分类和woe转化,并据此对测试集做粗分类和woe转化
X_train, X_test = self.binandwoe_traintest(X_train, y_train, X_test, nclusters, cmethod)
#在train中做变量筛选, sklearn.feature_selection中的方法
if feature_sel == "VarianceThreshold":
selector = VarianceThreshold(threshold = varthreshold)
X_train1 = pd.DataFrame(selector.fit_transform(X_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "RFECV":
estimator = LogisticRegression()
selector = RFECV(estimator, step=1, cv=cv)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectFromModel":
estimator = LogisticRegression()
selector = SelectFromModel(estimator)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectKBest":
selector = SelectKBest()
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
else:
X_train1, X_test1 = X_train, X_test
#训练并预测模型
classifier = MLPClassifier(hidden_layer_sizes=hidden_layer_sizes, activation=activation,solver=solver,alpha=alpha, max_iter =1000) # 使用类,参数全是默认的
#为避免单次神经网络训练不收敛的情况,反复训练10次,最终预测概率为10次的平均值
probability = 0
for i in range(10):
#训练模型
classifier.fit(X_train1, y_train)
#预测概率
probability += classifier.predict_proba(X_test1)[:,1]
probability = probability / 10
predresult = pd.DataFrame({'target' : y_test, 'probability' : probability})
return predresult
def MLP_trainandtest_kfold(self, nsplit, cv, feature_sel, varthreshold, activation,solver, alpha, max_iter =1000,nclusters=10, cmethod=None, *hidden_layer_sizes):
data_feature = self.data.ix[:, self.data.columns != 'default']
data_target = self.data['default']
#将数据集分割成k个分段分别进行训练和测试,对每个分段,该分段为测试集,其余数据为训练集
kf = KFold(n_splits=nsplit, shuffle=True)
predresult = pd.DataFrame()
for train_index, test_index in kf.split(data_feature):
X_train, X_test = data_feature.iloc[train_index, ], data_feature.iloc[test_index, ]
y_train, y_test = data_target.iloc[train_index, ], data_target.iloc[test_index, ]
#如果随机抽样造成train或者test中只有一个分类,跳过此次预测
if (len(y_train.unique()) == 1) or (len(y_test.unique()) == 1):
continue
#对训练集做变量粗分类和woe转化,并据此对测试集做粗分类和woe转化
X_train, X_test = self.binandwoe_traintest(X_train, y_train, X_test, nclusters, cmethod)
#在train中做变量筛选, sklearn.feature_selection中的方法
if feature_sel == "VarianceThreshold":
selector = VarianceThreshold(threshold = varthreshold)
X_train1 = pd.DataFrame(selector.fit_transform(X_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "RFECV":
estimator = LogisticRegression()
selector = RFECV(estimator, step=1, cv=cv)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectFromModel":
estimator = LogisticRegression()
selector = SelectFromModel(estimator)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectKBest":
selector = SelectKBest()
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
else:
X_train1, X_test1 = X_train, X_test
#训练并预测模型
classifier = MLPClassifier(hidden_layer_sizes=hidden_layer_sizes, activation=activation,solver=solver, alpha=alpha,max_iter =max_iter) # 使用类,参数全是默认的
#为避免单次神经网络训练不收敛的情况,反复训练10次,最终预测概率为10次的平均值
probability = 0
for i in range(10):
#训练模型
classifier.fit(X_train1, y_train)
#预测概率
probability += classifier.predict_proba(X_test1)[:,1]
probability = probability / 10
temp = pd.DataFrame({'target' : y_test, 'probability' : probability})
predresult = pd.concat([predresult, temp], ignore_index = True)
return predresult
def loopMLP_trainandtest(self, testsize, cv, feature_sel, varthreshold, activation, solver,alpha, max_iter =1000, nclusters=10, cmethod=None):
df = pd.DataFrame()
for i in range (3 , 101,3):#对神经元做循环
hidden_layer_sizes = (i,)
#分割train test做测试
predresult = self.MLP_trainandtest(testsize, cv, feature_sel, varthreshold, activation,solver ,alpha, max_iter,nclusters, cmethod, *hidden_layer_sizes)
#评估并保存测试结果
auc, ks, metrics_p = self.loopmodelmetrics_scores(predresult)
temp = pd.DataFrame({'hidden_first_layer' : i, 'auc_value' : auc ,'ks_value' :ks ,'p0=0.5' :metrics_p['accuracy'][5]} ,index=[0])
df = pd.concat([df, temp], ignore_index = False)
print('num %s complete' %i)
time0 = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))
exist = os.path.exists('d:/ACS_CSVS')
if exist:
df.to_csv('d:/ACS_CSVS/'+time0+'_MLP.csv',index=False,sep=',')
else:
os.makedirs('d:/ACS_CSVS/')
df.to_csv('d:/ACS_CSVS/'+time0+'_MLP.csv',index=False,sep=',')
def loopMLP_trainandtest_kfold(self, testsize, cv, feature_sel, varthreshold, activation, solver,alpha, max_iter =1000, nclusters=10, cmethod=None):
df = pd.DataFrame()
for i in range (3 , 101,3):#对神经元做循环
hidden_layer_sizes = (i,)
#分割train test做测试
predresult = self.MLP_trainandtest_kfold(testsize, cv, feature_sel, varthreshold, activation,solver ,alpha, max_iter,nclusters, cmethod, *hidden_layer_sizes)
#评估并保存测试结果
auc, ks, metrics_p = self.loopmodelmetrics_scores(predresult)
temp = pd.DataFrame({'hidden_first_layer' : i, 'auc_value' : auc ,'ks_value' :ks ,'p0=0.5' :metrics_p['accuracy'][5]} ,index=[0])
df = pd.concat([df, temp], ignore_index = False)
print('num %s complete' %i)
time0 = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))
exist = os.path.exists('d:/ACS_CSVS')
if exist:
df.to_csv('d:/ACS_CSVS/'+time0+'_MLP.csv',index=False,sep=',')
else:
os.makedirs('d:/ACS_CSVS/')
df.to_csv('d:/ACS_CSVS/'+time0+'_MLP.csv',index=False,sep=',')
| apache-2.0 | 361,407,939,134,608,700 | 46.547872 | 169 | 0.602305 | false |
silizium/ardupilot | Tools/scripts/frame_sizes.py | 351 | 1117 | #!/usr/bin/env python
import re, sys, operator, os
code_line = re.compile("^\s*\d+:/")
frame_line = re.compile("^\s*\d+\s+/\* frame size = (\d+) \*/")
class frame(object):
def __init__(self, code, frame_size):
self.code = code
self.frame_size = int(frame_size)
frames = []
def process_lst(filename):
'''process one lst file'''
last_code = ''
h = open(filename, mode='r')
for line in h:
if code_line.match(line):
last_code = line.strip()
elif frame_line.match(line):
frames.append(frame(last_code, frame_line.match(line).group(1)))
h.close()
if len(sys.argv) > 1:
dname = sys.argv[1]
else:
dname = '.'
for root, dirs, files in os.walk(dname):
for f in files:
if f.endswith(".lst"):
process_lst(os.path.join(root, f))
sorted_frames = sorted(frames,
key=operator.attrgetter('frame_size'),
reverse=True)
print("FrameSize Code")
for frame in sorted_frames:
if frame.frame_size > 0:
print("%9u %s" % (frame.frame_size, frame.code))
| gpl-3.0 | 6,439,051,330,673,832,000 | 24.386364 | 76 | 0.557744 | false |
mlperf/training_results_v0.7 | SIAT/benchmarks/resnet/implementations/tensorflow_open_src/resnet.py | 1 | 21852 | import tensorflow as tf
# from mlperf_compliance import mlperf_log
# from mlperf_compliance import resnet_log_helper
# from configs.res50.res50_config import res50_config
_BATCH_NORM_EPSILON = 1e-4
_BATCH_NORM_DECAY = 0.9
ML_PERF_LOG=False
class LayerBuilder(object):
def __init__(self, activation=None, data_format='channels_last',
training=False, use_batch_norm=False, batch_norm_config=None,
conv_initializer=None, bn_init_mode='adv_bn_init', bn_gamma_initial_value=1.0 ):
self.activation = activation
self.data_format = data_format
self.training = training
self.use_batch_norm = use_batch_norm
self.batch_norm_config = batch_norm_config
self.conv_initializer = conv_initializer
self.bn_init_mode = bn_init_mode
self.bn_gamma_initial_value = bn_gamma_initial_value
if self.batch_norm_config is None:
self.batch_norm_config = {
'decay': _BATCH_NORM_DECAY,
'epsilon': _BATCH_NORM_EPSILON,
'scale': True,
'zero_debias_moving_mean': False,
}
def _conv2d(self, inputs, activation, *args, **kwargs):
x = tf.layers.conv2d(
inputs, data_format=self.data_format,
use_bias=not self.use_batch_norm,
kernel_initializer=self.conv_initializer,
activation=None if self.use_batch_norm else activation,
*args, **kwargs)
if self.use_batch_norm:
param_initializers = {
'moving_mean': tf.zeros_initializer(),
'moving_variance': tf.ones_initializer(),
'beta': tf.zeros_initializer(),
}
if self.bn_init_mode == 'adv_bn_init':
param_initializers['gamma'] = tf.ones_initializer()
elif self.bn_init_mode == 'conv_bn_init':
param_initializers['gamma'] = tf.constant_initializer(self.bn_gamma_initial_value)
else:
raise ValueError("--bn_init_mode must be 'conv_bn_init' or 'adv_bn_init' ")
x = self.batch_norm(x)
x = activation(x) if activation is not None else x
return x
def conv2d_linear_last_bn(self, inputs, *args, **kwargs):
x = tf.layers.conv2d(
inputs, data_format=self.data_format,
use_bias=False,
kernel_initializer=self.conv_initializer,
activation=None, *args, **kwargs)
param_initializers = {
'moving_mean': tf.zeros_initializer(),
'moving_variance': tf.ones_initializer(),
'beta': tf.zeros_initializer(),
}
if self.bn_init_mode == 'adv_bn_init':
param_initializers['gamma'] = tf.zeros_initializer()
elif self.bn_init_mode == 'conv_bn_init':
param_initializers['gamma'] = tf.constant_initializer(self.bn_gamma_initial_value)
else:
raise ValueError("--bn_init_mode must be 'conv_bn_init' or 'adv_bn_init' ")
x = self.batch_norm(x, param_initializers=param_initializers)
return x
def conv2d_linear(self, inputs, *args, **kwargs):
return self._conv2d(inputs, None, *args, **kwargs)
def conv2d(self, inputs, *args, **kwargs):
return self._conv2d(inputs, self.activation, *args, **kwargs)
def pad2d(self, inputs, begin, end=None):
if end is None:
end = begin
try:
_ = begin[1]
except TypeError:
begin = [begin, begin]
try:
_ = end[1]
except TypeError:
end = [end, end]
if self.data_format == 'channels_last':
padding = [[0, 0], [begin[0], end[0]], [begin[1], end[1]], [0, 0]]
else:
padding = [[0, 0], [0, 0], [begin[0], end[0]], [begin[1], end[1]]]
return tf.pad(inputs, padding)
def max_pooling2d(self, inputs, *args, **kwargs):
return tf.layers.max_pooling2d(
inputs, data_format=self.data_format, *args, **kwargs)
def average_pooling2d(self, inputs, *args, **kwargs):
return tf.layers.average_pooling2d(
inputs, data_format=self.data_format, *args, **kwargs)
def dense_linear(self, inputs, units, **kwargs):
return tf.layers.dense(inputs, units, activation=None)
def dense(self, inputs, units, **kwargs):
return tf.layers.dense(inputs, units, activation=self.activation)
def activate(self, inputs, activation=None):
activation = activation or self.activation
return activation(inputs) if activation is not None else inputs
def batch_norm(self, inputs, **kwargs):
all_kwargs = dict(self.batch_norm_config)
all_kwargs.update(kwargs)
data_format = 'NHWC' if self.data_format == 'channels_last' else 'NCHW'
bn_inputs = inputs
outputs = tf.contrib.layers.batch_norm(
inputs, is_training=self.training, data_format=data_format,
fused=True, **all_kwargs)
if ML_PERF_LOG:
resnet_log_helper.log_batch_norm(
input_tensor=bn_inputs, output_tensor=outputs, momentum=_BATCH_NORM_DECAY,
epsilon=_BATCH_NORM_EPSILON, center=True, scale=True, training=self.training)
return outputs
def spatial_average2d(self, inputs):
shape = inputs.get_shape().as_list()
if self.data_format == 'channels_last':
n, h, w, c = shape
else:
n, c, h, w = shape
n = -1 if n is None else n
# x = tf.layers.average_pooling2d(inputs, (h, w), (1, 1),
# data_format=self.data_format)
axis = [1,2]
x = tf.reduce_mean( inputs, axis, keepdims=True )
x = tf.reshape(x, [n, c])
return tf.reshape(x, [n, c])
def flatten2d(self, inputs):
x = inputs
if self.data_format != 'channels_last':
# Note: This ensures the output order matches that of NHWC networks
x = tf.transpose(x, [0, 2, 3, 1])
input_shape = x.get_shape().as_list()
num_inputs = 1
for dim in input_shape[1:]:
num_inputs *= dim
return tf.reshape(x, [-1, num_inputs], name='flatten')
def residual2d(self, inputs, network, units=None, scale=1.0, activate=False):
outputs = network(inputs)
c_axis = -1 if self.data_format == 'channels_last' else 1
h_axis = 1 if self.data_format == 'channels_last' else 2
w_axis = h_axis + 1
ishape, oshape = [y.get_shape().as_list() for y in [inputs, outputs]]
ichans, ochans = ishape[c_axis], oshape[c_axis]
strides = ((ishape[h_axis] - 1) // oshape[h_axis] + 1,
(ishape[w_axis] - 1) // oshape[w_axis] + 1)
with tf.name_scope('residual'):
if (ochans != ichans or strides[0] != 1 or strides[1] != 1):
inputs = self.conv2d_linear(inputs, units, 1, strides, 'SAME')
x = inputs + scale * outputs
if activate:
x = self.activate(x)
return x
def Squeeze_excitation_layer(self, inputs):
se_ratio = 4
shape = inputs.get_shape().as_list()
n, h, w, c = shape # for channels last
with tf.name_scope('SE_block'):
x = self.spatial_average2d(inputs)
x = tf.layers.dense(inputs=x, units=c/se_ratio)
x = tf.nn.relu(x)
x = tf.layers.dense(inputs=x, units=c)
x = tf.nn.sigmoid(x)
x = tf.reshape(x, [-1,1,1,c]) # for channels last
scaled_outputs = inputs * x
return scaled_outputs
def resnet_bottleneck_v1_with_senet(builder, inputs, depth, depth_bottleneck, stride, filters, arch_type,
basic=False):
num_inputs = inputs.get_shape().as_list()[3]
x = inputs
print ('=== inputs and num_inputs:', num_inputs, inputs)
with tf.name_scope('resnet_v1'):
if ML_PERF_LOG:
resnet_log_helper.log_begin_block(input_tensor=x, block_type=mlperf_log.BOTTLENECK_BLOCK)
if depth == num_inputs:
if stride == 1:#v1.5
shortcut = x
else:#v1
shortcut = builder.max_pooling2d(x, 1, stride)
else:
if 'D1' in arch_type:
shortcut = builder.average_pooling2d(x, stride, stride)
shortcut = builder.conv2d_linear( shortcut, depth, 1,1,'SAME' )
else:
shortcut = builder.conv2d_linear(x, depth, 1, stride, 'SAME')
conv_input = x
if ML_PERF_LOG:
resnet_log_helper.log_conv2d(
input_tensor=conv_input, output_tensor=shortcut, stride=stride,
filters=filters*4, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
resnet_log_helper.log_projection(input_tensor=conv_input, output_tensor=shortcut)
if basic:
x = builder.pad2d(x, 1)
x = builder.conv2d(x, depth_bottleneck, 3, stride, 'VALID')
x = builder.conv2d_linear(x, depth, 3, 1, 'SAME')
else:
conv_input = x
x = builder.conv2d(x, depth_bottleneck, 1, 1, 'SAME')
if ML_PERF_LOG:
resnet_log_helper.log_conv2d(
input_tensor=conv_input, output_tensor=x, stride=1,
filters=filters, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
conv_input = x
if stride == 1:
x = builder.conv2d(x, depth_bottleneck, 3, stride, 'SAME')
else:
if 'E2' in arch_type:
x = builder.conv2d(x, depth_bottleneck, 3, 1, 'SAME')
x = builder.average_pooling2d(x, stride, stride)
else:
x = builder.conv2d(x, depth_bottleneck, 3, stride, 'SAME')
if ML_PERF_LOG:
resnet_log_helper.log_conv2d(
input_tensor=conv_input, output_tensor=x, stride=stride,
filters=filters, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
# x = builder.conv2d_linear(x, depth, 1, 1, 'SAME')
conv_input = x
x = builder.conv2d_linear_last_bn(x, depth, 1, 1, 'SAME')
x = builder.Squeeze_excitation_layer(x) #------------------------bottleneck末尾加senet
if ML_PERF_LOG:
resnet_log_helper.log_conv2d(
input_tensor=conv_input, output_tensor=x, stride=1,
filters=filters*4, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_SHORTCUT_ADD)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
x = tf.nn.relu(x + shortcut)
if ML_PERF_LOG:
resnet_log_helper.log_end_block(output_tensor=x)
return x
# num_inputs = inputs.get_shape().as_list()[3]
# x = inputs
# print ('=== inputs and num_inputs:', num_inputs, inputs)
# with tf.name_scope('resnet_v1'):
# if ML_PERF_LOG:
# resnet_log_helper.log_begin_block(input_tensor=x, block_type=mlperf_log.BOTTLENECK_BLOCK)
# if depth == num_inputs:
# if stride == 1:#v1.5
# shortcut = x
# else:#v1
# shortcut = builder.max_pooling2d(x, 1, stride)
# else:
# if 'D1' in arch_type:
# shortcut = builder.average_pooling2d(x, stride, stride)
# shortcut = builder.conv2d_linear( shortcut, depth, 1,1,'SAME' )
# else:
# shortcut = builder.conv2d_linear(x, depth, 1, stride, 'SAME')
#
# conv_input = x
# if ML_PERF_LOG:
# resnet_log_helper.log_conv2d(
# input_tensor=conv_input, output_tensor=shortcut, stride=stride,
# filters=filters*4, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
# resnet_log_helper.log_projection(input_tensor=conv_input, output_tensor=shortcut)
# if basic:
# x = builder.pad2d(x, 1)
# x = builder.conv2d(x, depth_bottleneck, 3, stride, 'VALID')
# x = builder.conv2d_linear(x, depth, 3, 1, 'SAME')
# else:
# conv_input = x
# x = builder.conv2d(x, depth_bottleneck, 1, 1, 'SAME')
# conv_input = x
#
# if stride == 1:
# x = builder.conv2d(x, depth_bottleneck, 3, stride, 'SAME')
# else:
# if 'E2' in arch_type:
# x = builder.conv2d(x, depth_bottleneck, 3, 1, 'SAME')
# x = builder.average_pooling2d(x, stride, stride)
# else:
# x = builder.conv2d(x, depth_bottleneck, 3, stride, 'SAME')
#
# # x = builder.conv2d_linear(x, depth, 1, 1, 'SAME')
# conv_input = x
# x = builder.conv2d_linear_last_bn(x, depth, 1, 1, 'SAME')
# x = builder.Squeeze_excitation_layer(x) #------------------------bottleneck末尾加senet
#
#
#
# x = tf.nn.relu(x + shortcut)
# if ML_PERF_LOG:
# resnet_log_helper.log_end_block(output_tensor=x)
# return x
def resnet_bottleneck_v1(builder, inputs, depth, depth_bottleneck, stride, filters, arch_type,
basic=False):
num_inputs = inputs.get_shape().as_list()[3]
x = inputs
print ('=== inputs and num_inputs:', num_inputs, inputs)
with tf.name_scope('resnet_v1'):
if ML_PERF_LOG:
resnet_log_helper.log_begin_block(input_tensor=x, block_type=mlperf_log.BOTTLENECK_BLOCK)
if depth == num_inputs:
if stride == 1:#v1.5
shortcut = x
else:#v1
shortcut = builder.max_pooling2d(x, 1, stride)
else:
if 'D1' in arch_type:
shortcut = builder.average_pooling2d(x, stride, stride)
shortcut = builder.conv2d_linear( shortcut, depth, 1,1,'SAME' )
else:
shortcut = builder.conv2d_linear(x, depth, 1, stride, 'SAME')
conv_input = x
if ML_PERF_LOG:
resnet_log_helper.log_conv2d(
input_tensor=conv_input, output_tensor=shortcut, stride=stride,
filters=filters*4, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
resnet_log_helper.log_projection(input_tensor=conv_input, output_tensor=shortcut)
if basic:
x = builder.pad2d(x, 1)
x = builder.conv2d(x, depth_bottleneck, 3, stride, 'VALID')
x = builder.conv2d_linear(x, depth, 3, 1, 'SAME')
else:
conv_input = x
x = builder.conv2d(x, depth_bottleneck, 1, 1, 'SAME')
if ML_PERF_LOG:
resnet_log_helper.log_conv2d(
input_tensor=conv_input, output_tensor=x, stride=1,
filters=filters, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
conv_input = x
if stride == 1:
x = builder.conv2d(x, depth_bottleneck, 3, stride, 'SAME')
else:
if 'E2' in arch_type:
x = builder.conv2d(x, depth_bottleneck, 3, 1, 'SAME')
x = builder.average_pooling2d(x, stride, stride)
else:
x = builder.conv2d(x, depth_bottleneck, 3, stride, 'SAME')
if ML_PERF_LOG:
resnet_log_helper.log_conv2d(
input_tensor=conv_input, output_tensor=x, stride=stride,
filters=filters, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
# x = builder.conv2d_linear(x, depth, 1, 1, 'SAME')
conv_input = x
x = builder.conv2d_linear_last_bn(x, depth, 1, 1, 'SAME')
if ML_PERF_LOG:
resnet_log_helper.log_conv2d(
input_tensor=conv_input, output_tensor=x, stride=1,
filters=filters*4, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_SHORTCUT_ADD)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
x = tf.nn.relu(x + shortcut)
if ML_PERF_LOG:
resnet_log_helper.log_end_block(output_tensor=x)
return x
def inference_resnet_v1_impl(builder, inputs, layer_counts, arch_type='ori+C1+D1+E2+4se', resnet_version='v1.5', basic=False):
#def inference_resnet_v1_impl(builder, inputs, layer_counts, arch_type='ori', resnet_version='v1.5', basic=False):
x = inputs
if 'C1' in arch_type:
x = builder.conv2d( x, 32, 3, 2, 'SAME' )
x = builder.conv2d( x, 32, 3, 1, 'SAME' )
x = builder.conv2d( x, 64, 3, 1, 'SAME' )
else:
x = builder.conv2d(x, 64, 7, 2, 'SAME')
num_filters=64
x, argmax = tf.nn.max_pool_with_argmax( input=x, ksize=(1,3,3,1), strides=(1,2,2,1), padding='SAME' )
if '4se' in arch_type:
with tf.name_scope('block_1'):
for i in range(layer_counts[0]):
# if i ==layer_counts[0]-1:
# x = resnet_bottleneck_v1_with_senet(builder, x, 256, 64, 1, num_filters, arch_type=arch_type, basic=basic)
# else:
x = resnet_bottleneck_v1(builder, x, 256, 64, 1, num_filters, arch_type=arch_type, basic=basic)
with tf.name_scope('block_2'):
for i in range(layer_counts[1]):
num_filters=num_filters*2
# if i ==layer_counts[1]-1:
# x = resnet_bottleneck_v1_with_senet(builder, x, 512, 128, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
# else:
x = resnet_bottleneck_v1(builder, x, 512, 128, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
with tf.name_scope('block_3'):
for i in range(layer_counts[2]):
num_filters=num_filters*2
if i ==layer_counts[2]-1:
x = resnet_bottleneck_v1_with_senet(builder, x, 1024, 256, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
else:
x = resnet_bottleneck_v1(builder, x, 1024, 256, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
with tf.name_scope('block_4'):
for i in range(layer_counts[3]):
num_filters=num_filters*2
if i ==2:
x = resnet_bottleneck_v1_with_senet(builder, x, 2048, 512, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
else:
x = resnet_bottleneck_v1(builder, x, 2048, 512, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
else:
for i in range(layer_counts[0]):
x = resnet_bottleneck_v1(builder, x, 256, 64, 1, num_filters, arch_type=arch_type, basic=basic)
for i in range(layer_counts[1]):
num_filters=num_filters*2
x = resnet_bottleneck_v1(builder, x, 512, 128, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
for i in range(layer_counts[2]):
num_filters=num_filters*2
x = resnet_bottleneck_v1(builder, x, 1024, 256, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
for i in range(layer_counts[3]):
num_filters=num_filters*2
x = resnet_bottleneck_v1(builder, x, 2048, 512, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
# x = builder.spatial_average2d(x)
# same function as spatial average
axis = [1,2]
x = tf.reduce_mean( x, axis, keepdims=True )
x = tf.reshape(x, [-1,2048])
logits = tf.layers.dense(x, 1001,
kernel_initializer=tf.random_normal_initializer(stddev=0.01, seed=1))
return logits
def inference_resnet_v1(config, inputs, nlayer, arch_type, data_format='channels_last',
training=False, conv_initializer=None, bn_init_mode='adv_bn_init', bn_gamma_initial_value=1.0 ):
"""Deep Residual Networks family of models
https://arxiv.org/abs/1512.03385
"""
if ML_PERF_LOG:
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_INITIAL_SHAPE,
value=inputs.shape.as_list()[1:])
builder = LayerBuilder(tf.nn.relu, data_format, training, use_batch_norm=True,
conv_initializer=conv_initializer, bn_init_mode=bn_init_mode, bn_gamma_initial_value=bn_gamma_initial_value)
if nlayer == 18:
return inference_resnet_v1_impl(builder, inputs, [2, 2, 2, 2], arch_type, basic=True)
elif nlayer == 34:
return inference_resnet_v1_impl(builder, inputs, [3, 4, 6, 3], basic=True)
elif nlayer == 50:
return inference_resnet_v1_impl(builder, inputs, [3, 4, 6, 3])
elif nlayer == 101:
return inference_resnet_v1_impl(builder, inputs, [3, 4, 23, 3])
elif nlayer == 152:
return inference_resnet_v1_impl(builder, inputs, [3, 8, 36, 3])
else:
raise ValueError("Invalid nlayer (%i); must be one of: 18,34,50,101,152" %
nlayer)
| apache-2.0 | 8,897,350,612,938,748,000 | 43.571429 | 141 | 0.556227 | false |
surchs/brainbox | visu/base.py | 1 | 8414 | __author__ = 'surchs'
import sys
import numpy as np
from matplotlib import gridspec
from nilearn import plotting as nlp
from matplotlib import pyplot as plt
from matplotlib import colors as mpc
def add_subplot_axes(ax, rect, axisbg='w'):
fig = plt.gcf()
box = ax.get_position()
width = box.width
height = box.height
inax_position = ax.transAxes.transform(rect[0:2])
trans_figure = fig.transFigure.inverted()
infig_position = trans_figure.transform(inax_position)
x = infig_position[0]
y = infig_position[1]
width *= rect[2]
height *= rect[3]
subax = fig.add_axes([x, y, width, height], axisbg=axisbg)
return subax
def add_four_grid(ax, dist=0.05, ticks=False, border=False, titles=None):
"""
Function that creates a symmetric four grid inside a subplot
:param ax: Axis handle of parent subplot
:param dist: Distance between neighbouring fields of the grd
:param ticks: True if ticks shall be visible
:param border: True if border shall be visible
:param titles: Iterable with length 4 in this order:
0) top left
1) bottom left
2) top right
3) bottom right
If set, distance the fields will be made narrower to
accommodate the title
:return: Axis handles for the four subfields in this order:
0) top left
1) bottom left
2) top right
3) bottom right
"""
# See if titles are provided for all subplots
if titles and len(titles) == 4:
title = True
else:
title = False
# Make left top plot
lt = add_subplot_axes(ax, [0, 0.5+dist/2,
0.5-dist/(2-title), 0.5-dist/(2-title)])
if title:
lt.set_title(titles[0])
if not ticks:
lt.set_xticks([])
lt.set_yticks([])
if not border:
lt.spines["top"].set_visible(False)
lt.spines["right"].set_visible(False)
lt.spines["left"].set_visible(False)
lt.spines["bottom"].set_visible(False)
# Make left bottom plot
lb = add_subplot_axes(ax, [0, 0,
0.5-dist/(2-title), 0.5-dist/(2-title)])
if title:
lb.set_title(titles[1])
if not ticks:
lb.set_xticks([])
lb.set_yticks([])
if not border:
lb.spines["top"].set_visible(False)
lb.spines["right"].set_visible(False)
lb.spines["left"].set_visible(False)
lb.spines["bottom"].set_visible(False)
# Make right top plot
rt = add_subplot_axes(ax, [0.5+dist/2, 0.5+dist/2,
0.5-dist/(2-title), 0.5-dist/(2-title)])
if title:
rt.set_title(titles[2])
if not border:
rt.set_xticks([])
rt.set_yticks([])
if not border:
rt.spines["top"].set_visible(False)
rt.spines["right"].set_visible(False)
rt.spines["left"].set_visible(False)
rt.spines["bottom"].set_visible(False)
# Make right bottom plot
rb = add_subplot_axes(ax, [0.5+dist/2, 0,
0.5-dist/(2-title), 0.5-dist/(2-title)])
if title:
rb.set_title(titles[3])
if not ticks:
rb.set_xticks([])
rb.set_yticks([])
if not border:
rb.spines["top"].set_visible(False)
rb.spines["right"].set_visible(False)
rb.spines["left"].set_visible(False)
rb.spines["bottom"].set_visible(False)
return lt, lb, rt, rb
def make_montage(vol, axis='coronal', x_step=5, y_step=6):
"""
Makes a montage of a 3D volume
"""
n_steps = x_step * y_step
if axis == 'coronal':
it_dim = vol.shape[1]
x_dim = vol.shape[0]
y_dim = vol.shape[2]
elif axis == 'axial':
it_dim = vol.shape[0]
x_dim = vol.shape[1]
y_dim = vol.shape[2]
vis_mat = np.zeros((x_step*x_dim, y_step*y_dim))
it_slc = np.linspace(0, it_dim-1, n_steps)
itc = 0
for y in np.arange(y_step):
for x in np.arange(x_step):
slc_ind = it_slc[itc]
get_slc = np.floor(slc_ind)
if axis == 'coronal':
slc = vol[:, get_slc, :]
elif axis == 'axial':
slc = vol[get_slc, ...]
vis_mat[x_dim * x:x_dim * (x + 1), y_dim * y:y_dim * (y + 1)] = slc
itc += 1
out_mat = np.fliplr(np.rot90(vis_mat))
return out_mat
def montage(img, thr=0, mode='coronal', rows=5, cloumns=6, fsz=(10, 20)):
"""
Make a montage using nilearn for the background
The output figure will be 5 slices wide and 6
slices deep
:param img: nilearn image containing the data
:param thr: threshold for the image
:param mode: view mode. saggital, coronal, axial
:param rows: number of rows in the figure
:param cloumns: number of columns in the figure
:param fsz: size of the figure
:return fig: figure handle for saving or whatnot
"""
# Hardwired view range
sag_rng = [-65, 65]
cor_rng = [-100, 65]
axi_rng = [-71, 85]
# Get the number of slices
n_slices = rows * cloumns
if mode == 'coronal':
# Get the slice indices
view_range = np.floor(np.linspace(cor_rng[0], cor_rng[1], n_slices))
view_mode = 'y'
if mode == 'axial':
# Get the slice indices
view_range = np.floor(np.linspace(axi_rng[0], axi_rng[1], n_slices))
view_mode = 'z'
if mode == 'saggital':
# Get the slice indices
view_range = np.floor(np.linspace(sag_rng[0], sag_rng[1], n_slices))
view_mode = 'x'
# Prepare the figure
fig = plt.figure(figsize=fsz)
gs = gridspec.GridSpec(cloumns, 1, hspace=0, wspace=0)
# Loop through the rows of the image
for row_id in range(cloumns):
# Create the axis to show
ax = fig.add_subplot(gs[row_id, 0])
# Get the slices in the column direction
row_range = view_range[row_id*rows:(row_id+1)*rows]
# Display the thing
nlp.plot_stat_map(img, cut_coords=row_range,
display_mode=view_mode, threshold=thr,
axes=ax, black_bg=True)
return fig
def make_cmap(colors, position=None, bit=False):
"""
make_cmap takes a list of tuples which contain RGB values. The RGB
values may either be in 8-bit [0 to 255] (in which bit must be set to
True when called) or arithmetic [0 to 1] (default). make_cmap returns
a cmap with equally spaced colors.
Arrange your tuples so that the first color is the lowest value for the
colorbar and the last is the highest.
position contains values from 0 to 1 to dictate the location of each color.
"""
bit_rgb = np.linspace(0,1,256)
if position:
position = np.linspace(0,1,len(colors))
else:
if len(position) != len(colors):
sys.exit("position length must be the same as colors")
elif position[0] != 0 or position[-1] != 1:
sys.exit("position must start with 0 and end with 1")
if bit:
for i in range(len(colors)):
colors[i] = (bit_rgb[colors[i][0]],
bit_rgb[colors[i][1]],
bit_rgb[colors[i][2]])
cdict = {'red':[], 'green':[], 'blue':[]}
for pos, color in zip(position, colors):
cdict['red'].append((pos, color[0], color[0]))
cdict['green'].append((pos, color[1], color[1]))
cdict['blue'].append((pos, color[2], color[2]))
cmap = mpc.LinearSegmentedColormap('my_colormap',cdict,256)
return cmap
def hot_cold():
"""
This generates a niak-like colormap of hot cold
:return:
"""
# Define a new colormap
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 1.0, 1.0),
(0.25, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.75, 0.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 1.0, 1.0),
(0.25, 1.0, 1.0),
(0.5, 0.0, 0.0),
(1.0, 0.0, 0.0))
}
hotcold = mpc.LinearSegmentedColormap('hotcold', cdict)
return hotcold
| mit | -8,054,908,546,902,702,000 | 31.996078 | 79 | 0.54445 | false |
jvs/sourcer | sourcer/expressions/str.py | 1 | 1444 | from outsourcer import Code
from . import utils
from .base import Expression
from .constants import POS, RESULT, STATUS, TEXT
class Str(Expression):
is_commented = False
def __init__(self, value):
if not isinstance(value, (bytes, str)):
raise TypeError(f'Expected bytes or str. Received: {type(value)}.')
self.value = value
self.skip_ignored = False
self.num_blocks = 0 if not self.value else 1
def __str__(self):
return repr(self.value)
def always_succeeds(self):
return not self.value
def can_partially_succeed(self):
return False
def argumentize(self, out):
wrap = Code('_wrap_string_literal')
value = Expression.argumentize(self, out)
return out.var('arg', wrap(self.value, value))
def _compile(self, out):
if not self.value:
out += STATUS << True
out += RESULT << ''
return
value = out.var('value', self.value)
end = out.var('end', POS + len(self.value))
with out.IF(TEXT[POS : end] == value):
out += RESULT << value
out += POS << (utils.skip_ignored(end) if self.skip_ignored else end)
out += STATUS << True
with out.ELSE():
out += RESULT << self.error_func()
out += STATUS << False
def complain(self):
return f'Expected to match the string {self.value!r}'
| mit | 1,280,094,630,958,975,700 | 27.313725 | 81 | 0.572715 | false |
rhiever/bokeh | sphinx/source/docs/tutorials/exercises/unemployment.py | 23 | 2160 | import numpy as np
from bokeh.models import HoverTool
from bokeh.plotting import ColumnDataSource, figure, output_file, show
from bokeh.sampledata.unemployment1948 import data
# Read in the data with pandas. Convert the year column to string
data['Year'] = [str(x) for x in data['Year']]
years = list(data['Year'])
months = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]
data = data.set_index('Year')
# this is the colormap from the original plot
colors = [
"#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce",
"#ddb7b1", "#cc7878", "#933b41", "#550b1d"
]
# Set up the data for plotting. We will need to have values for every
# pair of year/month names. Map the rate to a color.
month = []
year = []
color = []
rate = []
for y in years:
for m in months:
month.append(m)
year.append(y)
monthly_rate = data[m][y]
rate.append(monthly_rate)
color.append(colors[min(int(monthly_rate)-2, 8)])
# EXERCISE: create a `ColumnDataSource` with columns: month, year, color, rate
source = ColumnDataSource(
data=dict(
month=month,
year=year,
color=color,
rate=rate,
)
)
# EXERCISE: output to static HTML file
# create a new figure
p = figure(title="US Unemployment (1948 - 2013)", tools="resize,hover",
x_range=years, y_range=list(reversed(months)),
plot_width=900, plot_height=400, x_axis_location="above")
# EXERCISE: use the `rect renderer with the following attributes:
# - x_range is years, y_range is months (reversed)
# - fill color for the rectangles is the 'color' field
# - line_color for the rectangles is None
# - tools are resize and hover tools
# - add a nice title, and set the plot_width and plot_height
# EXERCISE: use p.grid, p.axis, etc. to style the plot. Some suggestions:
# - remove the axis and grid lines
# - remove the major ticks
# - make the tick labels smaller
# - set the x-axis orientation to vertical, or angled
# EXERCISE: configure the hover tool to display the month, year and rate
hover = p.select(dict(type=HoverTool))
hover.tooltips = [
# fill me in
]
show(p)
| bsd-3-clause | 8,508,792,494,792,177,000 | 30.304348 | 82 | 0.662963 | false |
heiher/libreoffice-core | scripting/examples/python/HelloWorld.py | 12 | 1539 | # HelloWorld python script for the scripting framework
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
def HelloWorldPython( ):
"""Prints the string 'Hello World(in Python)' into the current document"""
#get the doc from the scripting context which is made available to all scripts
desktop = XSCRIPTCONTEXT.getDesktop()
model = desktop.getCurrentComponent()
#check whether there's already an opened document. Otherwise, create a new one
if not hasattr(model, "Text"):
model = desktop.loadComponentFromURL(
"private:factory/swriter","_blank", 0, () )
#get the XText interface
text = model.Text
#create an XTextRange at the end of the document
tRange = text.End
#and set the string
tRange.String = "Hello World (in Python)"
return None
| gpl-3.0 | -6,799,308,032,610,818,000 | 41.75 | 78 | 0.730344 | false |
abrowning80/solutions-geoevent-java | data/packages/geoprocessing/visibility/Toolshare/scripts/makefeature.py | 6 | 1382 | #-------------------------------------------------------------------------------
# Name: module2
# Purpose:
#
# Author: patr5136
#
# Created: 23/08/2013
# Copyright: (c) patr5136 2013
# Licence: <your licence>
#-------------------------------------------------------------------------------
import arcpy
import os, json
def makeFeature(geo, wkid):
sr = arcpy.SpatialReference(wkid);
arcpy.CreateFeatureclass_management('in_memory', 'tmpPoly', POLYGON,'#','#','#',sr)
fc = os.path.join('in_memory', 'tmpPoly')
fields = ["SHAPE@"]
insert = arcpy.da.InsertCursor(fc, fields)
insert.insertRow(geo);
return fc
def makePolygon(json):
jsonPoly = json.loads(json)
rings=arcpy.Array()
for ring in jsonPoly['rings']:
points = arcpy.Array();
for coord in ring:
x=coord[0]
y=coord[1]
z=None
if len(coord)>2:
z=coord[2]
#z=coord[3]
p=arcpy.Point()
p.X=x
p.Y=y
if z:
p.Z=z
points.add(p)
rings.add(points)
return arcpy.Polygon(rings)
if __name__ == '__main__':
jsonPolygon = arcpy.GetParameterAsTextsText(0)
wkid = arcpy.GetParameter(1)
polygon = makePolygon(jsonPolygon)
fc = makeFeature(polygon, wkid)
arcpy.SetParameter(2, fc)
| apache-2.0 | 2,700,602,174,642,429,400 | 26.098039 | 87 | 0.48987 | false |
ccarouge/cwsl-mas | cwsl/core/file_creator.py | 4 | 11088 | """
Authors: Tim Bedin, Tim Erwin
Copyright 2014 CSIRO
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contains the FileCreator class.
"""
import os
import re
import itertools
import logging
from cwsl.core.dataset import DataSet
from cwsl.core.constraint import Constraint
from cwsl.core.metafile import MetaFile
module_logger = logging.getLogger('cwsl.core.file_creator')
class FileCreator(DataSet):
''' This class is a DataSet that creates the output MockClimateFiles
objects, given an output pattern and a set of Constraints.
A FileCreator has a 'output_pattern' attribute which defines
what the filename of any created output files should be.
The ArgumentCreator class compares these possible created files
and throws away any that cannot be created as they do not have
matching files in the input DataSet.
The output pattern has a particular syntax. The pattern is given as a
string with attribute names surrounded by % signs.
eg:
"/projects/ua6/CAWCR_CVC_processed/%variable%_%modeling_realm%_%model%.nc"
This class will raise an error if it is instantiated with a pattern with
empty constraints - it does not make sense to have a file creator that
has 'empty' or 'all' constraints - they must be in canonical form.
It will also raise an error if instantiated with 'extra' constraints
that do not match those from its output pattern.
'''
def __init__(self, output_pattern, extra_constraints):
''' This constructor sets up the FileCreator from the
pattern of its output and 'extra' constraints that set the value of
its attributes.
'''
self.output_pattern = output_pattern
# Construct the initial constraints from the output pattern.
self.constraints = FileCreator.constraints_from_pattern(output_pattern)
# Add the extra constraints to the self.constraints, strip out any that
# are not part of the output pattern.
self.merge_constraints(extra_constraints)
# This object must create files, so after merging all constraints must
# be in canonical form.
# "extra" and "info" are keywords for non-compulsory constraints that
# are replaced by a placeholder value.
for constraint in self.constraints:
if not constraint.values:
split_key = constraint.key.split('_')
if 'extra' in split_key:
constraint.values = set(['noextras'])
elif 'info' in split_key:
constraint.values = set(['orig'+split_key[0]])
else:
module_logger.error("Constraint {0} is empty - should be in canonical form!"
.format(constraint))
raise EmptyConstraintError("Constraint {0} is empty - should be in canonical form!"
.format(constraint))
# A set to hold all the valid combinations of attributes.
self.valid_combinations = set()
# One to hold the valid_hashes
self.valid_hashes = set()
self.cons_names = [cons.key for cons in self.constraints]
def get_files(self, att_dict, check=False, update=True):
""" This method returns all possible MockClimateFiles from the
FileCreator that match an input attribute dictionary.
If check is True, then we check that the hash of the for the
file is in the - 'valid_hashes' hash list. This is used when using
the FileCreator as an input, we only want to give files that
actually exists.
"""
# Get the keys of the input dictionary.
search_keys = [att for att in att_dict.keys()]
cons_names = [cons.key for cons in self.constraints]
to_loop = []
# We do this for every constraint in the FileCreator
for key in cons_names:
if key not in search_keys:
# If a key is not in the att_dict, grab the existing constraint.
existing_cons = self.get_constraint(key)
to_loop.append((existing_cons.key, existing_cons.values))
assert(type(existing_cons.values == set))
else:
new_cons = Constraint(key, [att_dict[key]])
to_loop.append((new_cons.key, new_cons.values))
keys = [cons[0] for cons in to_loop]
values = [cons[1] for cons in to_loop]
new_iter = itertools.product(*values)
outfiles = []
for combination in new_iter:
new_file = self.climate_file_from_combination(keys, combination,
check=check, update=update)
if new_file:
outfiles.append(new_file)
return outfiles
@property
def files(self):
""" This property returns all the real files
that exist in this file_creator.
"""
huge_iterator = itertools.product(*[cons.values
for cons in self.constraints])
cons_names = [cons.key for cons in self.constraints]
for combination in huge_iterator:
# Create a set of constraints for this combination.
climate_file = self.climate_file_from_combination(cons_names, combination,
check=True, update=False)
if climate_file:
yield climate_file
def get_constraint(self, attribute_name):
""" Get a particular constraint by name."""
for constraint in self.constraints:
if constraint.key == attribute_name:
return constraint
# If it can't be found, return None.
return None
def merge_constraints(self, new_constraints):
""" This function adds the constraint values to the constraints from
a pattern.
"""
existing_cons_names = [cons.key for cons in self.constraints]
# Now add the constraints - only if they are in the pattern!
for cons in new_constraints:
if cons.key in existing_cons_names:
self.constraints.add(cons)
attribute_names = [cons.key for cons in self.constraints]
repeated_atts = []
for name in attribute_names:
if attribute_names.count(name) > 1:
repeated_atts.append(name)
to_remove = [cons for cons in self.constraints
if cons.key in repeated_atts]
new_cons_dict = {}
for cons in to_remove:
new_cons_dict[cons.key] = set([])
for cons in to_remove:
new_cons_dict[cons.key] = new_cons_dict[cons.key].union(cons.values)
self.constraints.remove(cons)
for key in new_cons_dict:
self.constraints.add(Constraint(key, new_cons_dict[key]))
def climate_file_from_combination(self, keys, next_combination,
check, update):
""" Make a possible output MetaFile object from
a combination of attributes.
"""
# Turn the combination tuple into a dictionary with
# attribute names.
sub_dict = {}
cons_list = []
for key, value in zip(keys, next_combination):
sub_dict[key] = value
cons_list.append(Constraint(key, [value]))
new_file = self.output_pattern
for key in sub_dict:
att_sub = "%" + key + "%"
new_file = re.sub(att_sub, sub_dict[key], new_file)
new_path = os.path.dirname(new_file)
file_name = os.path.basename(new_file)
new_climate_file = MetaFile(path_dir=new_path,
filename=file_name,
all_atts=sub_dict)
if check:
# Check that this combination is valid for the FileCreator
# If it is not, return None.
module_logger.debug("Checking cons_list: {}".format(cons_list))
if frozenset(cons_list) not in self.valid_combinations:
module_logger.debug("This combination: {0} is not found in {1}"
.format(cons_list, self.valid_combinations))
return None
if update:
# Add the hash to the 'valid_hashes' set.
file_hash = hash(new_climate_file)
self.valid_hashes.add(file_hash)
self.valid_combinations.add(frozenset(cons_list))
module_logger.debug("Returning climate file: {}".format(new_climate_file))
return new_climate_file
@staticmethod
def default_pattern(out_constraints, temp=False):
""" Creates a default pattern from a set of constraints.
Mostly for testing - we could extend this to use real patterns.
"""
out_pattern = ''
for cons in out_constraints:
out_pattern += '%' + cons.key + '%_'
output = out_pattern[:-1]
if temp:
# Try some different temp directories.
if "TMPDIR" in os.environ:
output = os.path.join(os.environ["TMPDIR"], output)
elif "TEMP" in os.environ:
output = os.path.join(os.environ["TEMP"], output)
elif "TMP" in os.environ:
output = os.path.join(os.environ["TMP"], output)
else:
output = os.path.join("/tmp", output)
return output
@staticmethod
def constraints_from_pattern(pattern_string):
""" This function builds a set of constraint objects from
an output pattern.
"""
regex_pattern = r"%(\S+?)%"
attribute_names = re.findall(regex_pattern, pattern_string)
constraint_list = [Constraint(att_name, [])
for att_name in attribute_names]
return set(constraint_list)
class EmptyConstraintError(Exception):
def __init__(self, constraint):
self.constraint = constraint
module_logger.error("Constraint {} is empty but must contain values"
.format(self.constraint))
def __repr__(self):
return repr(self.constraint)
class ExtraConstraintError(Exception):
def __init__(self, constraint):
self.constraint = constraint
module_logger.error("Constraint {} passed to FileCreator is not found in the output pattern!"
.format(self.constraint))
def __repr__(self):
return repr(self.constraint)
| apache-2.0 | 3,285,513,506,962,864,000 | 34.652733 | 103 | 0.602273 | false |
knxd/PyKNyX | tests/core/dptXlator/dptXlator2ByteFloat.py | 2 | 2328 | # -*- coding: utf-8 -*-
from pyknyx.core.dptXlator.dptXlator2ByteFloat import *
import unittest
# Mute logger
from pyknyx.services.logger import logging
logger = logging.getLogger(__name__)
logging.getLogger("pyknyx").setLevel(logging.ERROR)
class DPTXlator2ByteFloatTestCase(unittest.TestCase):
def setUp(self):
self.testTable = (
( 0., 0x0000, b"\x00\x00"),
( 0.01, 0x0001, b"\x00\x01"),
( -0.01, 0x87ff, b"\x87\xff"),
( -1., 0x879c, b"\x87\x9c"),
( 1., 0x0064, b"\x00\x64"),
( -272.96, 0xa156, b"\xa1\x56"),
(670760.96, 0x7fff, b"\x7f\xff"),
)
self.dptXlator = DPTXlator2ByteFloat("9.xxx")
def tearDown(self):
pass
#def test_constructor(self):
#print self.dptXlator.handledDPT
def test_typeSize(self):
self.assertEqual(self.dptXlator.typeSize, 2)
def testcheckValue(self):
with self.assertRaises(DPTXlatorValueError):
self.dptXlator.checkValue(self.dptXlator._dpt.limits[1] + 1)
def test_dataToValue(self):
for value, data, frame in self.testTable:
value_ = self.dptXlator.dataToValue(data)
self.assertEqual(value_, value, "Conversion failed (converted value for %s is %.2f, should be %.2f)" %
(hex(data), value_, value))
def test_valueToData(self):
for value, data, frame in self.testTable:
data_ = self.dptXlator.valueToData(value)
self.assertEqual(data_, data, "Conversion failed (converted data for %.2f is %s, should be %s)" %
(value, hex(data_), hex(data)))
def test_dataToFrame(self):
for value, data, frame in self.testTable:
frame_ = self.dptXlator.dataToFrame(data)
self.assertEqual(frame_, frame, "Conversion failed (converted frame for %s is %r, should be %r)" %
(hex(data), frame_, frame))
def test_frameToData(self):
for value, data, frame in self.testTable:
data_ = self.dptXlator.frameToData(frame)
self.assertEqual(data_, data, "Conversion failed (converted data for %r is %s, should be %s)" %
(frame, hex(data_), hex(data)))
| gpl-3.0 | -735,284,505,910,309,400 | 37.163934 | 114 | 0.57646 | false |
wweiradio/django | django/conf/locale/zh_Hant/formats.py | 1008 | 1810 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
TIME_FORMAT = 'H:i' # 20:45
DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
YEAR_MONTH_FORMAT = 'Y年n月' # 2016年9月
MONTH_DAY_FORMAT = 'm月j日' # 9月5日
SHORT_DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
SHORT_DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
FIRST_DAY_OF_WEEK = 1 # 星期一 (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y/%m/%d', # '2016/09/05'
'%Y-%m-%d', # '2016-09-05'
'%Y年%n月%j日', # '2016年9月5日'
]
TIME_INPUT_FORMATS = [
'%H:%M', # '20:45'
'%H:%M:%S', # '20:45:29'
'%H:%M:%S.%f', # '20:45:29.000200'
]
DATETIME_INPUT_FORMATS = [
'%Y/%m/%d %H:%M', # '2016/09/05 20:45'
'%Y-%m-%d %H:%M', # '2016-09-05 20:45'
'%Y年%n月%j日 %H:%M', # '2016年9月5日 14:45'
'%Y/%m/%d %H:%M:%S', # '2016/09/05 20:45:29'
'%Y-%m-%d %H:%M:%S', # '2016-09-05 20:45:29'
'%Y年%n月%j日 %H:%M:%S', # '2016年9月5日 20:45:29'
'%Y/%m/%d %H:%M:%S.%f', # '2016/09/05 20:45:29.000200'
'%Y-%m-%d %H:%M:%S.%f', # '2016-09-05 20:45:29.000200'
'%Y年%n月%j日 %H:%n:%S.%f', # '2016年9月5日 20:45:29.000200'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ''
NUMBER_GROUPING = 4
| bsd-3-clause | 7,908,479,657,532,321,000 | 36.6 | 77 | 0.536643 | false |
ycl2045/nova-master | nova/tests/integrated/v3/test_security_groups.py | 29 | 2233 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.network.security_group import neutron_driver
from nova.tests.integrated.v3 import test_servers
def fake_get(*args, **kwargs):
nova_group = {}
nova_group['id'] = 'fake'
nova_group['description'] = ''
nova_group['name'] = 'test'
nova_group['project_id'] = 'fake'
nova_group['rules'] = []
return nova_group
def fake_get_instances_security_groups_bindings(self, context, servers):
result = {}
for s in servers:
result[s.get('id')] = [{'name': 'test'}]
return result
class SecurityGroupsJsonTest(test_servers.ServersSampleBase):
extension_name = 'os-security-groups'
def setUp(self):
self.flags(security_group_api=('neutron'))
super(SecurityGroupsJsonTest, self).setUp()
self.stubs.Set(neutron_driver.SecurityGroupAPI, 'get', fake_get)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'get_instances_security_groups_bindings',
fake_get_instances_security_groups_bindings)
def test_server_create(self):
self._post_server()
def test_server_get(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_server_detail(self):
self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
| apache-2.0 | 5,530,289,195,131,717,000 | 34.444444 | 78 | 0.648455 | false |
dannyboi104/SickRage | sickbeard/providers/t411.py | 2 | 9980 | # -*- coding: latin-1 -*-
# Author: djoole <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import traceback
import re
import datetime
import time
from requests.auth import AuthBase
import sickbeard
import generic
import requests
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import show_name_helpers
from sickbeard import db
from sickbeard import helpers
from sickbeard import classes
from sickbeard.helpers import sanitizeSceneName
from sickbeard.exceptions import ex
class T411Provider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "T411")
self.supportsBacklog = True
self.enabled = False
self.username = None
self.password = None
self.ratio = None
self.token = None
self.tokenLastUpdate = None
self.cache = T411Cache(self)
self.urls = {'base_url': 'http://www.t411.io/',
'search': 'https://api.t411.io/torrents/search/%s?cid=%s&limit=100',
'login_page': 'https://api.t411.io/auth',
'download': 'https://api.t411.io/torrents/download/%s',
}
self.url = self.urls['base_url']
self.subcategories = [433, 637, 455, 639]
def isEnabled(self):
return self.enabled
def imageName(self):
return 't411.png'
def getQuality(self, item, anime=False):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _doLogin(self):
if self.token is not None:
if time.time() < (self.tokenLastUpdate + 30 * 60):
logger.log('T411 Authentication token is still valid', logger.DEBUG)
return True
login_params = {'username': self.username,
'password': self.password}
logger.log('Performing authentication to T411', logger.DEBUG)
response = helpers.getURL(self.urls['login_page'], post_data=login_params, timeout=30, json=True)
if not response:
logger.log(u'Unable to connect to ' + self.name + ' provider.', logger.WARNING)
return False
if response and 'token' in response:
self.token = response['token']
self.tokenLastUpdate = time.time()
self.uid = response['uid'].encode('ascii', 'ignore')
self.session.auth = T411Auth(self.token)
logger.log('Using T411 Authorization token : ' + self.token, logger.DEBUG)
return True
else:
logger.log('T411 token not found in authentication response', logger.WARNING)
return False
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
if not ep_obj:
return [search_string]
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + '.' + str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string = show_name + '.' + "%d" % ep_obj.scene_absolute_number
else:
ep_string = show_name + '.S%02d' % int(ep_obj.scene_season) # 1) showName.SXX
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return [search_string]
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.'
if self.show.air_by_date:
ep_string += str(ep_obj.airdate).replace('-', '|')
elif self.show.sports:
ep_string += str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
elif self.show.anime:
ep_string += "%i" % int(ep_obj.scene_absolute_number)
else:
ep_string += sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode}
if add_string:
ep_string += ' %s' % add_string
search_string['Episode'].append(re.sub('\s+', '.', ep_string))
return [search_string]
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
logger.log(u"_doSearch started with ..." + str(search_params), logger.DEBUG)
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
if not self._doLogin():
return results
for mode in search_params.keys():
for search_string in search_params[mode]:
for sc in self.subcategories:
searchURL = self.urls['search'] % (search_string, sc)
logger.log(u"" + self.name + " search page URL: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL, json=True)
if not data:
continue
try:
if 'torrents' not in data:
logger.log(
u"The Data returned from " + self.name + " do not contains any torrent : " + str(data),
logger.DEBUG)
continue
torrents = data['torrents']
if not torrents:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
logger.WARNING)
continue
for torrent in torrents:
try:
torrent_name = torrent['name']
torrent_id = torrent['id']
torrent_download_url = (self.urls['download'] % torrent_id).encode('utf8')
if not torrent_name or not torrent_download_url:
continue
item = torrent_name, torrent_download_url
logger.log(u"Found result: " + torrent_name + " (" + torrent_download_url + ")",
logger.DEBUG)
items[mode].append(item)
except Exception as e:
logger.log(u"Invalid torrent data, skipping results: {0}".format(str(torrent)), logger.DEBUG)
continue
except Exception, e:
logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(),
logger.ERROR)
results += items[mode]
return results
def _get_title_and_url(self, item):
title, url = item
if title:
title = self._clean_title_from_provider(title)
if url:
url = str(url).replace('&', '&')
return title, url
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return []
for sqlshow in sqlResults:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
searchResults = self._doSearch(searchString[0])
for item in searchResults:
title, url = self._get_title_and_url(item)
if title and url:
results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
return results
def seedRatio(self):
return self.ratio
class T411Auth(AuthBase):
"""Attaches HTTP Authentication to the given Request object."""
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers['Authorization'] = self.token
return r
class T411Cache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# Only poll T411 every 10 minutes max
self.minTime = 10
def _getRSSData(self):
search_params = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_params)}
provider = T411Provider()
| gpl-3.0 | 121,364,437,317,147,620 | 35.691176 | 125 | 0.547395 | false |
444thLiao/VarappX | tests/data_models/test_users.py | 2 | 3481 | #!/usr/bin/env python3
import unittest
import django.test
from varapp.data_models.users import *
from varapp.models.users import *
from django.conf import settings
class TestUser(unittest.TestCase):
def test_user_constructor(self):
s = User('A', 'a@a', 'code', '', 1, Person(firstname='A'), Role('guest'))
self.assertEqual(s.username, 'A')
self.assertEqual(s.person.firstname, 'A')
self.assertEqual(s.role.name, 'guest')
def test_expose(self):
u = User('A', 'a@a', 'code', '', 1, Person(firstname='A'), Role('guest'))
self.assertIsInstance(u.expose(), dict)
self.assertEqual(u.expose()['username'], 'A')
class TestDatabase(unittest.TestCase):
def test_database_constructor(self):
d = Database('db', 'path', 'filename', 'sha1', 'desc', 1, 'size', ['A','B'])
self.assertEqual(d.name, 'db')
self.assertEqual(d.users[1], 'B')
def test_expose(self):
d = Database('db', 'path', 'filename', 'sha1', 'desc', 1, 'size', ['A','B'])
self.assertIsInstance(d.expose(), dict)
self.assertEqual(d.expose()['users'][0], 'A')
class TestFactories(django.test.TestCase):
def test_role_factory(self):
R = Roles(rank=6)
r = role_factory(R)
self.assertIsInstance(r, Role)
self.assertEqual(r.rank, R.rank)
def test_person_factory(self):
P = People(firstname='asdf')
p = person_factory(P)
self.assertIsInstance(p, Person)
self.assertEqual(p.firstname, P.firstname)
def test_database_factory(self):
D = VariantsDb.objects.get(filename=settings.DB_TEST)
d = database_factory(D)
self.assertIsInstance(d, Database)
self.assertEqual(d.name, D.name)
self.assertGreaterEqual(len(d.users), 1)
def test_user_factory(self):
R = Roles.objects.create(rank=6)
P = People.objects.create(firstname='asdf')
U = Users.objects.create(username='adsf', role=R, person=P, is_active=1)
D = VariantsDb.objects.get(filename=settings.DB_TEST)
u = user_factory(U)
self.assertIsInstance(u, User)
self.assertEqual(u.username, U.username)
self.assertGreaterEqual(len(u.databases), 0)
# Add access to test db - it should reflect in User.databases
DbAccess.objects.create(user=U, variants_db=D, is_active=1)
u = user_factory(U)
self.assertGreaterEqual(len(u.databases), 1)
# Make the db inactive - it should get ignored again
D.is_active = 0
D.save()
u = user_factory(U)
self.assertGreaterEqual(len(u.databases), 0)
class TestLists(unittest.TestCase):
def test_users_list_from_users_db(self):
L = users_list_from_users_db()
self.assertGreaterEqual(len(L), 1)
self.assertIsInstance(L[0], User)
def test_roles_list_from_users_db(self):
L = roles_list_from_users_db()
self.assertGreaterEqual(len(L), 1)
self.assertIsInstance(L[0], Role)
def test_persons_list_from_db(self):
L = persons_list_from_db()
self.assertGreaterEqual(len(L), 1)
self.assertIsInstance(L[0], Person)
def test_databases_list_from_users_db(self):
L = databases_list_from_users_db()
self.assertGreaterEqual(len(L), 1)
self.assertIsInstance(L[0], Database)
self.assertEqual(L[0].filename, settings.DB_TEST)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -908,909,783,271,142,300 | 33.127451 | 84 | 0.620511 | false |
bmerry/mlsgpu | utils/simulate.py | 1 | 12858 | #!/usr/bin/env python
# mlsgpu: surface reconstruction from point clouds
# Copyright (C) 2013 University of Cape Town
#
# This file is part of mlsgpu.
#
# mlsgpu is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, print_function
import sys
import heapq
import timeplot
from optparse import OptionParser
class QItem(object):
def __init__(self, parent, parent_get, parent_push):
self.parent = parent
self.size = 1
self.finish = 0.0
self.parent_get = parent_get
self.parent_push = parent_push
self.children = []
def total_time(self):
ans = self.finish
for x in self.children:
ans += x.parent_get
ans += x.parent_push
return ans
class EndQItem(object):
def __init__(self):
pass
def process_worker(worker, pq):
pqid = 0
item = None
cq = []
get_size = None
for action in worker.actions:
if action.name in ['bbox', 'pop']:
if pqid == len(pq):
break
item = pq[pqid]
pqid += 1
base = action.stop
elif action.name == 'get':
parent_get = action.start - base
base = action.stop
get_size = action.value
elif action.name == 'push':
parent_push = action.start - base
base = action.stop
child = QItem(item, parent_get, parent_push)
if get_size is not None:
child.size = get_size
get_size = None
item.children.append(child)
cq.append(child)
item.finish = 0.0
elif action.name in ['compute', 'load', 'write']:
if worker.name != 'main' or action.name != 'write':
# Want to exclude phase 3
item.finish += action.stop - action.start
elif action.name in ['init']:
pass
else:
raise ValueError('Unhandled action "' + action.name + '"')
if pqid != len(pq):
raise ValueError('Parent queue was not exhausted')
return cq
def get_worker(group, name):
for worker in group:
if worker.name == name:
return worker
return None
class SimPool(object):
def __init__(self, simulator, size, inorder = True):
self._size = size
self._waiters = []
self._watchers = []
self._allocs = []
self._spare = size
self._inorder = inorder
self._simulator = simulator
def spare(self):
return self._spare
def _biggest(self):
"""Maximum possible allocation without blocking"""
if not self._inorder:
return self._spare
elif not self._allocs:
return self._size
else:
start = self._allocs[0][0]
end = self._allocs[-1][1]
if end > start:
return max(self._size - end, start)
else:
return start - end
def get(self, worker, size):
if not self._inorder:
size = 1
assert size > 0
assert size <= self._size
self._waiters.append((worker, size))
self._do_wakeups()
def can_get(self, size):
if not self._inorder:
size = 1
return size <= self._biggest()
def watch(self, worker):
'''Request to be woken up when free space increases'''
self._watches.append(worker)
def unwatch(self, worker):
'''Cancel a previous watch request'''
self._watches.remove(worker)
def _do_wakeups(self):
while self._waiters:
(w, size) = self._waiters[0]
if size > self._biggest():
break
elif not self._allocs:
start = 0
elif not self._inorder:
start = self._allocs[-1][1]
else:
cur_start = self._allocs[0][0]
cur_end = self._allocs[-1][1]
cur_limit = self._size
if cur_end <= cur_start:
limit = cur_start
if cur_limit - cur_end >= size:
start = cur_end
else:
start = 0
a = (start, start + size)
self._allocs.append(a)
self._spare -= size
del self._waiters[0]
self._simulator.wakeup(w, value = a)
while self._watchers:
w = self._watchers.pop(0)
self._simulator.wakeup(w)
def done(self, alloc):
self._allocs.remove(alloc)
self._spare += alloc[1] - alloc[0]
self._do_wakeups()
class SimSimpleQueue(object):
"""
Queue without associated pool. Just accepts objects and provides
a blocking pop.
"""
def __init__(self, simulator):
self._queue = []
self._waiters = []
self._simulator = simulator
self._running = True
def _do_wakeups(self):
while self._waiters and self._queue:
item = self._queue.pop(0)
worker = self._waiters.pop(0)
self._simulator.wakeup(worker, value = item)
while self._waiters and not self._running:
worker = self._waiters.pop(0)
self._simulator.wakeup(worker, value = EndQItem())
def pop(self, worker):
self._waiters.append(worker)
self._do_wakeups()
def push(self, item):
self._queue.append(item)
self._do_wakeups()
def stop(self):
self._running = False
self._do_wakeups()
class SimQueue(object):
def __init__(self, simulator, pool_size, inorder = True):
self._pool = SimPool(simulator, pool_size, inorder)
self._queue = SimSimpleQueue(simulator)
def spare(self):
return self._pool.spare()
def pop(self, worker):
self._queue.pop(worker)
def get(self, worker, size):
self._pool.get(worker, size)
def can_get(self, size):
return self._pool.can_get(worker, size)
def watch(self, worker):
self._pool.watch(worker)
def unwatch(self, worker):
self._pool.unwatch(worker)
def push(self, item, alloc):
self._queue.push(item)
def done(self, alloc):
self._pool.done(alloc)
def watch(self, alloc):
self._pool
def stop(self):
self._queue.stop()
class SimWorker(object):
def __init__(self, simulator, name, inq, outqs, options):
self.simulator = simulator
self.name = name
self.inq = inq
self.outqs = outqs
self.generator = self.run()
def best_queue(self, size):
if len(self.outqs) > 1:
valid_queues = [q for q in self.outqs if q.can_get(size)]
if valid_queues:
return max(valid_queues, key = lambda x: x.spare())
else:
return None
else:
return self.outqs[0]
def run(self):
yield
while True:
self.inq.pop(self)
item = yield
if isinstance(item, EndQItem):
if self.simulator.count_running_workers(self.name) == 1:
# We are the last worker from the set
for q in self.outqs:
q.stop()
break
print(self.name, self.simulator.time, item.total_time())
for child in item.children:
size = child.size
yield child.parent_get
while True:
outq = self.best_queue(size)
if outq is not None:
break
for q in self.outqs:
q.watch(self)
yield
for q in self.outqs:
q.unwatch(self)
outq.get(self, size)
child.alloc = yield
yield child.parent_push
outq.push(child, child.alloc)
if item.finish > 0:
yield item.finish
if hasattr(item, 'alloc'):
self.inq.done(item.alloc)
class Simulator(object):
def __init__(self):
self.workers = []
self.wakeup_queue = []
self.time = 0.0
self.running = set()
def add_worker(self, worker):
self.workers.append(worker)
worker.generator.send(None)
self.wakeup(worker)
def wakeup(self, worker, time = None, value = None):
if time is None:
time = self.time
assert time >= self.time
for (t, w, v) in self.wakeup_queue:
assert w != worker
heapq.heappush(self.wakeup_queue, (time, worker, value))
def count_running_workers(self, name):
ans = 0
for w in self.running:
if w.name == name:
ans += 1
return ans
def run(self):
self.time = 0.0
self.running = set(self.workers)
while self.wakeup_queue:
(self.time, worker, value) = heapq.heappop(self.wakeup_queue)
assert worker in self.running
try:
compute_time = worker.generator.send(value)
if compute_time is not None:
assert compute_time >= 0
self.wakeup(worker, self.time + compute_time)
except StopIteration:
self.running.remove(worker)
if self.running:
print("Workers still running: possible deadlock", file = sys.stderr)
for w in self.running:
print(" " + w.name, file = sys.stderr)
sys.exit(1)
def load_items(group):
copy_worker = get_worker(group, 'bucket.fine.0')
if copy_worker is None:
copy_worker = get_worker(group, 'copy.0')
all_queue = [QItem(None, 0.0, 0.0)]
coarse_queue = process_worker(get_worker(group, 'main'), all_queue)
copy_queue = process_worker(copy_worker, coarse_queue)
mesh_queue = process_worker(get_worker(group, 'device.0'), copy_queue)
process_worker(get_worker(group, 'mesher.0'), mesh_queue)
return all_queue[0]
def simulate(root, options):
simulator = Simulator()
gpus = options.gpus
if options.infinite:
big = 10**30
coarse_cap = big
copy_cap = big
mesher_cap = big
else:
coarse_cap = options.coarse_cap * 1024 * 1024
copy_cap = 2
mesher_cap = options.mesher_cap * 1024 * 1024
all_queue = SimQueue(simulator, 1)
coarse_queue = SimQueue(simulator, coarse_cap)
copy_queues = [SimQueue(simulator, copy_cap, inorder = False) for i in range(gpus)]
mesh_queue = SimQueue(simulator, mesher_cap)
simulator.add_worker(SimWorker(simulator, 'coarse', all_queue, [coarse_queue], options))
simulator.add_worker(SimWorker(simulator, 'copy', coarse_queue, copy_queues, options))
for i in range(gpus):
simulator.add_worker(SimWorker(simulator, 'device', copy_queues[i], [mesh_queue], options))
simulator.add_worker(SimWorker(simulator, 'mesher', mesh_queue, [], options))
all_queue.push(root, None)
all_queue.stop()
simulator.run()
print(simulator.time)
def main():
parser = OptionParser()
parser.add_option('--infinite', action = 'store_true')
parser.add_option('--gpus', type = 'int', default = 1)
parser.add_option('--coarse-cap', type = 'int', metavar = 'MiB', default = 512)
parser.add_option('--bucket-cap', type = 'int', metavar = 'MiB', default = 128)
parser.add_option('--mesher-cap', type = 'int', metavar = 'MiB', default = 512)
(options, args) = parser.parse_args()
groups = []
if args:
for fname in args:
with open(fname, 'r') as f:
groups.append(timeplot.load_data(f))
else:
groups.append(timeplot.load_data(sys.stdin))
if len(groups) != 1:
print("Only one group is supported", file = sys.stderr)
sys.exit(1)
group = groups[0]
for worker in group:
if worker.name.endswith('.1'):
print("Only one worker of each type is supported", file = sys.stderr)
sys.exit(1)
root = load_items(group)
simulate(root, options)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,755,193,426,946,831,000 | 30.208738 | 99 | 0.549697 | false |
kc4271/batch_downloader | requests/packages/urllib3/contrib/pyopenssl.py | 304 | 15086 | '''SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
You can install them with the following command:
pip install pyopenssl ndg-httpsclient pyasn1
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
encryption in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
Module Variables
----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
Default: ``ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:
ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS``
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
'''
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type import univ, constraint
from socket import _fileobject, timeout
import ssl
import select
from cStringIO import StringIO
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM over any AES-CBC for better performance and security,
# - use 3DES as fallback which is secure but slow,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_SSL_CIPHER_LIST = "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:" + \
"ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:" + \
"!aNULL:!MD5:!DSS"
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
class SubjectAltName(BaseSubjectAltName):
'''ASN.1 implementation for subjectAltNames support'''
# There is no limit to how many SAN certificates a certificate may have,
# however this needs to have some limit so we'll set an arbitrarily high
# limit.
sizeSpec = univ.SequenceOf.sizeSpec + \
constraint.ValueSizeConstraint(1, 1024)
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class fileobject(_fileobject):
def _wait_for_sock(self):
rd, wd, ed = select.select([self._sock], [], [],
self._sock.gettimeout())
if not rd:
raise timeout()
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(rbufsize)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
try:
data = self._sock.recv(left)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self._sock.recv
while True:
try:
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
break
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.'''
def __init__(self, connection, socket):
self.connection = connection
self.socket = socket
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
return fileobject(self.connection, mode, bufsize)
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def sendall(self, data):
return self.connection.sendall(data)
def close(self):
return self.connection.shutdown()
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs:
try:
ctx.load_verify_locations(ca_certs, None)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
ctx.set_default_verify_paths()
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
ctx.set_options(OP_NO_COMPRESSION)
# Set list of supported ciphersuites.
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
select.select([sock], [], [])
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
break
return WrappedSocket(cnx, sock)
| mit | 3,865,868,602,345,258,000 | 34.748815 | 79 | 0.56052 | false |
elahejalalpour/ELRyu | ryu/services/protocols/bgp/operator/commands/show/memory.py | 27 | 2971 | import gc
import sys
from ryu.services.protocols.bgp.operator.command import Command
from ryu.services.protocols.bgp.operator.command import CommandsResponse
from ryu.services.protocols.bgp.operator.command import STATUS_ERROR
from ryu.services.protocols.bgp.operator.command import STATUS_OK
class Memory(Command):
help_msg = 'show memory information'
command = 'memory'
def __init__(self, *args, **kwargs):
super(Memory, self).__init__(*args, **kwargs)
self.subcommands = {
'summary': self.Summary}
class Summary(Command):
help_msg = 'shows total memory used and how it is getting used'
command = 'summary'
def action(self, params):
count = {}
size = {}
total_size = 0
unreachable = gc.collect()
for obj in gc.get_objects():
inst_name = type(obj).__name__
c = count.get(inst_name, None)
if not c:
count[inst_name] = 0
s = size.get(inst_name, None)
if not s:
size[inst_name] = 0
count[inst_name] += 1
s = sys.getsizeof(obj)
size[inst_name] += s
total_size += s
# Total size in MB
total_size = total_size // 1000000
ret = {
'unreachable': unreachable,
'total': total_size,
'summary': []}
for class_name, s in size.items():
# Calculate size in MB
size_mb = s // 1000000
# We are only interested in class which take-up more than a MB
if size_mb > 0:
ret['summary'].append(
{
'class': class_name,
'instances': count.get(class_name, None),
'size': size_mb
}
)
return CommandsResponse(STATUS_OK, ret)
@classmethod
def cli_resp_formatter(cls, resp):
if resp.status == STATUS_ERROR:
return Command.cli_resp_formatter(resp)
val = resp.value
ret = 'Unreachable objects: {0}\n'.format(
val.get('unreachable', None)
)
ret += 'Total memory used (MB): {0}\n'.format(
val.get('total', None)
)
ret += 'Classes with instances that take-up more than one MB:\n'
ret += '{0:<20s} {1:>16s} {2:>16s}\n'.format(
'Class',
'#Instance',
'Size(MB)'
)
for s in val.get('summary', []):
ret += '{0:<20s} {1:>16d} {2:>16d}\n'.format(
s.get('class', None), s.get('instances', None),
s.get('size', None)
)
return ret
| apache-2.0 | -55,127,114,808,981,940 | 32.382022 | 78 | 0.464827 | false |
mastizada/pontoon | pontoon/base/migrations/0049_create_translation_memory_entries.py | 3 | 1418 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def create_translation_memory_entries(apps, schema):
Translation = apps.get_model('base', 'Translation')
TranslationMemoryEntry = apps.get_model('base', 'TranslationMemoryEntry')
def get_memory_entry(translation):
return TranslationMemoryEntry(
entity_id=translation['entity_id'],
source=translation['entity__string'],
target=translation['string'],
locale_id=translation['locale_id'],
translation_id=translation['pk'],
)
translations = (
Translation.objects.filter(approved=True, fuzzy=False)
.filter(models.Q(plural_form__isnull=True) | models.Q(plural_form=0))
.prefetch_related('entity')
.values('pk', 'entity_id', 'entity__string', 'string', 'locale_id')
)
TranslationMemoryEntry.objects.bulk_create(map(get_memory_entry, translations), 1000)
def remove_translation_memory_entries(apps, schema):
TranslationMemoryEntry = apps.get_model('base', 'TranslationMemoryEntry')
TranslationMemoryEntry.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('base', '0048_translationmemoryentry'),
]
operations = [
migrations.RunPython(create_translation_memory_entries, remove_translation_memory_entries)
]
| bsd-3-clause | 4,044,819,193,914,254,000 | 35.358974 | 98 | 0.672073 | false |
kosgroup/odoo | addons/note/models/note.py | 2 | 5954 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.tools import html2plaintext
class Stage(models.Model):
_name = "note.stage"
_description = "Note Stage"
_order = 'sequence'
name = fields.Char('Stage Name', translate=True, required=True)
sequence = fields.Integer(help="Used to order the note stages", default=1)
user_id = fields.Many2one('res.users', string='Owner', required=True, ondelete='cascade', default=lambda self: self.env.uid, help="Owner of the note stage")
fold = fields.Boolean('Folded by Default')
class Tag(models.Model):
_name = "note.tag"
_description = "Note Tag"
name = fields.Char('Tag Name', required=True)
color = fields.Integer('Color Index')
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag name already exists !"),
]
class Note(models.Model):
_name = 'note.note'
_inherit = ['mail.thread']
_description = "Note"
_order = 'sequence'
def _get_default_stage_id(self):
return self.env['note.stage'].search([('user_id', '=', self.env.uid)], limit=1)
name = fields.Text(compute='_compute_name', string='Note Summary', store=True)
user_id = fields.Many2one('res.users', string='Owner', default=lambda self: self.env.uid)
memo = fields.Html('Note Content')
sequence = fields.Integer('Sequence')
stage_id = fields.Many2one('note.stage', compute='_compute_stage_id',
inverse='_inverse_stage_id', string='Stage')
stage_ids = fields.Many2many('note.stage', 'note_stage_rel', 'note_id', 'stage_id',
string='Stages of Users', default=_get_default_stage_id)
open = fields.Boolean(string='Active', track_visibility='onchange', default=True)
date_done = fields.Date('Date done')
color = fields.Integer(string='Color Index')
tag_ids = fields.Many2many('note.tag', 'note_tags_rel', 'note_id', 'tag_id', string='Tags')
@api.depends('memo')
def _compute_name(self):
""" Read the first line of the memo to determine the note name """
for note in self:
text = html2plaintext(note.memo) if note.memo else ''
note.name = text.strip().replace('*', '').split("\n")[0]
@api.multi
def _compute_stage_id(self):
for note in self:
for stage in note.stage_ids.filtered(lambda stage: stage.user_id == self.env.user):
note.stage_id = stage
@api.multi
def _inverse_stage_id(self):
for note in self.filtered('stage_id'):
note.stage_ids = note.stage_id + note.stage_ids.filtered(lambda stage: stage.user_id != self.env.user)
@api.model
def name_create(self, name):
return self.create({'memo': name}).name_get()[0]
@api.model
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
if groupby and groupby[0] == "stage_id":
stages = self.env['note.stage'].search([('user_id', '=', self.env.uid)])
if stages: # if the user has some stages
result = [{ # notes by stage for stages user
'__context': {'group_by': groupby[1:]},
'__domain': domain + [('stage_ids.id', '=', stage.id)],
'stage_id': (stage.id, stage.name),
'stage_id_count': self.search_count(domain + [('stage_ids', '=', stage.id)]),
'__fold': stage.fold,
} for stage in stages]
# note without user's stage
nb_notes_ws = self.search_count(domain + [('stage_ids', 'not in', stages.ids)])
if nb_notes_ws:
# add note to the first column if it's the first stage
dom_not_in = ('stage_ids', 'not in', stages.ids)
if result and result[0]['stage_id'][0] == stages[0].id:
dom_in = result[0]['__domain'].pop()
result[0]['__domain'] = domain + ['|', dom_in, dom_not_in]
result[0]['stage_id_count'] += nb_notes_ws
else:
# add the first stage column
result = [{
'__context': {'group_by': groupby[1:]},
'__domain': domain + [dom_not_in],
'stage_id': (stages[0].id, stages[0].name),
'stage_id_count': nb_notes_ws,
'__fold': stages[0].name,
}] + result
else: # if stage_ids is empty, get note without user's stage
nb_notes_ws = self.search_count(domain)
if nb_notes_ws:
result = [{ # notes for unknown stage
'__context': {'group_by': groupby[1:]},
'__domain': domain,
'stage_id': False,
'stage_id_count': nb_notes_ws
}]
else:
result = []
return result
return super(Note, self).read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy)
@api.multi
def _notification_recipients(self, message, groups):
""" All users can create a new note. """
groups = super(Note, self)._notification_recipients(message, groups)
new_action_id = self.env['ir.model.data'].xmlid_to_res_id('note.action_note_note')
new_action = self._notification_link_helper('new', action_id=new_action_id)
groups['user']['actions'] = [{'url': new_action, 'title': _('New Note')}]
return groups
@api.multi
def action_close(self):
return self.write({'open': False, 'date_done': fields.date.today()})
@api.multi
def action_open(self):
return self.write({'open': True})
| gpl-3.0 | 7,629,107,794,595,674,000 | 42.144928 | 160 | 0.549547 | false |
magicrub/MissionPlanner | Lib/distutils/version.py | 59 | 11732 | #
# distutils/version.py
#
# Implements multiple version numbering conventions for the
# Python Module Distribution Utilities.
#
# $Id$
#
"""Provides classes to represent module version numbers (one class for
each style of version numbering). There are currently two such classes
implemented: StrictVersion and LooseVersion.
Every version number class implements the following interface:
* the 'parse' method takes a string and parses it to some internal
representation; if the string is an invalid version number,
'parse' raises a ValueError exception
* the class constructor takes an optional string argument which,
if supplied, is passed to 'parse'
* __str__ reconstructs the string that was passed to 'parse' (or
an equivalent string -- ie. one that will generate an equivalent
version number instance)
* __repr__ generates Python code to recreate the version number instance
* __cmp__ compares the current instance with either another instance
of the same class or a string (which will be parsed to an instance
of the same class, thus must follow the same rules)
"""
import string, re
from types import StringType
class Version:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes.
"""
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def __repr__ (self):
return "%s ('%s')" % (self.__class__.__name__, str(self))
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# __cmp__ (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
class StrictVersion (Version):
"""Version numbering for anal retentives and software idealists.
Implements the standard interface for version number classes as
described above. A version number consists of two or three
dot-separated numeric components, with an optional "pre-release" tag
on the end. The pre-release tag consists of the letter 'a' or 'b'
followed by a number. If the numeric components of two version
numbers are equal, then one with a pre-release tag will always
be deemed earlier (lesser) than one without.
The following are valid version numbers (shown in the order that
would be obtained by sorting according to the supplied cmp function):
0.4 0.4.0 (these two are equivalent)
0.4.1
0.5a1
0.5b3
0.5
0.9.6
1.0
1.0.4a3
1.0.4b1
1.0.4
The following are examples of invalid version numbers:
1
2.7.2.2
1.3.a4
1.3pl1
1.3c4
The rationale for this version numbering system will be explained
in the distutils documentation.
"""
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
re.VERBOSE)
def parse (self, vstring):
match = self.version_re.match(vstring)
if not match:
raise ValueError, "invalid version number '%s'" % vstring
(major, minor, patch, prerelease, prerelease_num) = \
match.group(1, 2, 4, 5, 6)
if patch:
self.version = tuple(map(string.atoi, [major, minor, patch]))
else:
self.version = tuple(map(string.atoi, [major, minor]) + [0])
if prerelease:
self.prerelease = (prerelease[0], string.atoi(prerelease_num))
else:
self.prerelease = None
def __str__ (self):
if self.version[2] == 0:
vstring = string.join(map(str, self.version[0:2]), '.')
else:
vstring = string.join(map(str, self.version), '.')
if self.prerelease:
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
return vstring
def __cmp__ (self, other):
if isinstance(other, StringType):
other = StrictVersion(other)
compare = cmp(self.version, other.version)
if (compare == 0): # have to compare prerelease
# case 1: neither has prerelease; they're equal
# case 2: self has prerelease, other doesn't; other is greater
# case 3: self doesn't have prerelease, other does: self is greater
# case 4: both have prerelease: must compare them!
if (not self.prerelease and not other.prerelease):
return 0
elif (self.prerelease and not other.prerelease):
return -1
elif (not self.prerelease and other.prerelease):
return 1
elif (self.prerelease and other.prerelease):
return cmp(self.prerelease, other.prerelease)
else: # numeric versions don't match --
return compare # prerelease stuff doesn't matter
# end class StrictVersion
# The rules according to Greg Stein:
# 1) a version number has 1 or more numbers separated by a period or by
# sequences of letters. If only periods, then these are compared
# left-to-right to determine an ordering.
# 2) sequences of letters are part of the tuple for comparison and are
# compared lexicographically
# 3) recognize the numeric components may have leading zeroes
#
# The LooseVersion class below implements these rules: a version number
# string is split up into a tuple of integer and string components, and
# comparison is a simple tuple comparison. This means that version
# numbers behave in a predictable and obvious way, but a way that might
# not necessarily be how people *want* version numbers to behave. There
# wouldn't be a problem if people could stick to purely numeric version
# numbers: just split on period and compare the numbers as tuples.
# However, people insist on putting letters into their version numbers;
# the most common purpose seems to be:
# - indicating a "pre-release" version
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
# - indicating a post-release patch ('p', 'pl', 'patch')
# but of course this can't cover all version number schemes, and there's
# no way to know what a programmer means without asking him.
#
# The problem is what to do with letters (and other non-numeric
# characters) in a version number. The current implementation does the
# obvious and predictable thing: keep them as strings and compare
# lexically within a tuple comparison. This has the desired effect if
# an appended letter sequence implies something "post-release":
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
#
# However, if letters in a version number imply a pre-release version,
# the "obvious" thing isn't correct. Eg. you would expect that
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
# implemented here, this just isn't so.
#
# Two possible solutions come to mind. The first is to tie the
# comparison algorithm to a particular set of semantic rules, as has
# been done in the StrictVersion class above. This works great as long
# as everyone can go along with bondage and discipline. Hopefully a
# (large) subset of Python module programmers will agree that the
# particular flavour of bondage and discipline provided by StrictVersion
# provides enough benefit to be worth using, and will submit their
# version numbering scheme to its domination. The free-thinking
# anarchists in the lot will never give in, though, and something needs
# to be done to accommodate them.
#
# Perhaps a "moderately strict" version class could be implemented that
# lets almost anything slide (syntactically), and makes some heuristic
# assumptions about non-digits in version number strings. This could
# sink into special-case-hell, though; if I was as talented and
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
# just as happy dealing with things like "2g6" and "1.13++". I don't
# think I'm smart enough to do it right though.
#
# In any case, I've coded the test suite for this module (see
# ../test/test_version.py) specifically to fail on things like comparing
# "1.2a2" and "1.2". That's not because the *code* is doing anything
# wrong, it's because the simple, obvious design doesn't match my
# complicated, hairy expectations for real-world version numbers. It
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
# the Right Thing" (ie. the code matches the conception). But I'd rather
# have a conception that matches common notions about version numbers.
class LooseVersion (Version):
"""Version numbering for anarchists and software realists.
Implements the standard interface for version number classes as
described above. A version number consists of a series of numbers,
separated by either periods or strings of letters. When comparing
version numbers, the numeric components will be compared
numerically, and the alphabetic components lexically. The following
are all valid version numbers, in no particular order:
1.5.1
1.5.2b2
161
3.10a
8.02
3.4j
1996.07.12
3.2.pl0
3.1.1.6
2g6
11g
0.960923
2.2beta29
1.13++
5.5.kw
2.0b1pl0
In fact, there is no such thing as an invalid version number under
this scheme; the rules for comparison are simple and predictable,
but may not always give the results you want (for some definition
of "want").
"""
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def parse (self, vstring):
# I've given up on thinking I can reconstruct the version string
# from the parsed tuple -- so I just store the string here for
# use by __str__
self.vstring = vstring
components = filter(lambda x: x and x != '.',
self.component_re.split(vstring))
for i in range(len(components)):
try:
components[i] = int(components[i])
except ValueError:
pass
self.version = components
def __str__ (self):
return self.vstring
def __repr__ (self):
return "LooseVersion ('%s')" % str(self)
def __cmp__ (self, other):
if isinstance(other, StringType):
other = LooseVersion(other)
return cmp(self.version, other.version)
# end class LooseVersion
| gpl-3.0 | 8,511,797,182,796,536,000 | 37.237458 | 79 | 0.636123 | false |
ukanga/SickRage | sickbeard/search.py | 3 | 30233 | # coding=utf-8
# Author: Nic Wolfe <[email protected]>
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import datetime
import os
import re
import threading
import traceback
import sickbeard
from sickbeard import clients, common, db, failed_history, helpers, history, logger, notifiers, nzbSplitter, nzbget, \
sab, show_name_helpers, ui
from sickbeard.common import MULTI_EP_RESULT, Quality, SEASON_RESULT, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import AuthException, ex
from sickrage.providers.GenericProvider import GenericProvider
def _downloadResult(result):
"""
Downloads a result to the appropriate black hole folder.
:param result: SearchResult instance to download.
:return: boolean, True on success
"""
resProvider = result.provider
if resProvider is None:
logger.log("Invalid provider name - this is a coding error, report it please", logger.ERROR)
return False
# nzbs with an URL can just be downloaded from the provider
if result.resultType == "nzb":
newResult = resProvider.download_result(result)
# if it's an nzb data result
elif result.resultType == "nzbdata":
# get the final file path to the nzb
fileName = ek(os.path.join, sickbeard.NZB_DIR, result.name + ".nzb")
logger.log("Saving NZB to " + fileName)
newResult = True
# save the data to disk
try:
with ek(open, fileName, 'w') as fileOut:
fileOut.write(result.extraInfo[0])
helpers.chmodAsParent(fileName)
except EnvironmentError as e:
logger.log("Error trying to save NZB to black hole: " + ex(e), logger.ERROR)
newResult = False
elif result.resultType == "torrent":
newResult = resProvider.download_result(result)
else:
logger.log("Invalid provider type - this is a coding error, report it please", logger.ERROR)
newResult = False
return newResult
def snatchEpisode(result, endStatus=SNATCHED): # pylint: disable=too-many-branches, too-many-statements
"""
Contains the internal logic necessary to actually "snatch" a result that
has been found.
:param result: SearchResult instance to be snatched.
:param endStatus: the episode status that should be used for the episode object once it's snatched.
:return: boolean, True on success
"""
if result is None:
return False
result.priority = 0 # -1 = low, 0 = normal, 1 = high
if sickbeard.ALLOW_HIGH_PRIORITY:
# if it aired recently make it high priority
for curEp in result.episodes:
if datetime.date.today() - curEp.airdate <= datetime.timedelta(days=7):
result.priority = 1
endStatus = SNATCHED_PROPER if re.search(r'\b(proper|repack|real)\b', result.name, re.I) else endStatus
if result.url.startswith('magnet') or result.url.endswith('torrent'):
result.resultType = 'torrent'
# NZBs can be sent straight to SAB or saved to disk
if result.resultType in ("nzb", "nzbdata"):
if sickbeard.NZB_METHOD == "blackhole":
dlResult = _downloadResult(result)
elif sickbeard.NZB_METHOD == "sabnzbd":
dlResult = sab.sendNZB(result)
elif sickbeard.NZB_METHOD == "nzbget":
is_proper = True if endStatus == SNATCHED_PROPER else False
dlResult = nzbget.sendNZB(result, is_proper)
elif sickbeard.NZB_METHOD == "download_station":
client = clients.getClientInstance(sickbeard.NZB_METHOD)(
sickbeard.SYNOLOGY_DSM_HOST, sickbeard.SYNOLOGY_DSM_USERNAME, sickbeard.SYNOLOGY_DSM_PASSWORD)
dlResult = client.sendNZB(result)
else:
logger.log("Unknown NZB action specified in config: " + sickbeard.NZB_METHOD, logger.ERROR)
dlResult = False
# Torrents can be sent to clients or saved to disk
elif result.resultType == "torrent":
# torrents are saved to disk when blackhole mode
if sickbeard.TORRENT_METHOD == "blackhole":
dlResult = _downloadResult(result)
else:
if not result.content and not result.url.startswith('magnet'):
if result.provider.login():
result.content = result.provider.get_url(result.url, returns='content')
if result.content or result.url.startswith('magnet'):
client = clients.getClientInstance(sickbeard.TORRENT_METHOD)()
dlResult = client.sendTORRENT(result)
else:
logger.log("Torrent file content is empty", logger.WARNING)
dlResult = False
else:
logger.log("Unknown result type, unable to download it ({0!r})".format(result.resultType), logger.ERROR)
dlResult = False
if not dlResult:
return False
if sickbeard.USE_FAILED_DOWNLOADS:
failed_history.logSnatch(result)
ui.notifications.message('Episode snatched', result.name)
history.logSnatch(result)
# don't notify when we re-download an episode
sql_l = []
trakt_data = []
for curEpObj in result.episodes:
with curEpObj.lock:
if isFirstBestMatch(result):
curEpObj.status = Quality.compositeStatus(SNATCHED_BEST, result.quality)
else:
curEpObj.status = Quality.compositeStatus(endStatus, result.quality)
sql_l.append(curEpObj.get_sql())
if curEpObj.status not in Quality.DOWNLOADED:
try:
notifiers.notify_snatch("{0} from {1}".format(curEpObj._format_pattern('%SN - %Sx%0E - %EN - %QN'), result.provider.name)) # pylint: disable=protected-access
except Exception:
# Without this, when notification fail, it crashes the snatch thread and SR will
# keep snatching until notification is sent
logger.log("Failed to send snatch notification", logger.DEBUG)
trakt_data.append((curEpObj.season, curEpObj.episode))
data = notifiers.trakt_notifier.trakt_episode_data_generate(trakt_data)
if sickbeard.USE_TRAKT and sickbeard.TRAKT_SYNC_WATCHLIST:
logger.log("Add episodes, showid: indexerid " + str(result.show.indexerid) + ", Title " + str(result.show.name) + " to Traktv Watchlist", logger.DEBUG)
if data:
notifiers.trakt_notifier.update_watchlist(result.show, data_episode=data, update="add")
if sql_l:
main_db_con = db.DBConnection()
main_db_con.mass_action(sql_l)
return True
def pickBestResult(results, show): # pylint: disable=too-many-branches
"""
Find the best result out of a list of search results for a show
:param results: list of result objects
:param show: Shows we check for
:return: best result object
"""
results = results if isinstance(results, list) else [results]
logger.log("Picking the best result out of " + str([x.name for x in results]), logger.DEBUG)
bestResult = None
# find the best result for the current episode
for cur_result in results:
if show and cur_result.show is not show:
continue
# build the black And white list
if show.is_anime:
if not show.release_groups.is_valid(cur_result):
continue
logger.log("Quality of " + cur_result.name + " is " + Quality.qualityStrings[cur_result.quality])
anyQualities, bestQualities = Quality.splitQuality(show.quality)
if cur_result.quality not in anyQualities + bestQualities:
logger.log(cur_result.name + " is a quality we know we don't want, rejecting it", logger.DEBUG)
continue
if not show_name_helpers.filter_bad_releases(cur_result.name, parse=False, show=show):
continue
if hasattr(cur_result, 'size'):
if sickbeard.USE_FAILED_DOWNLOADS and failed_history.hasFailed(cur_result.name, cur_result.size,
cur_result.provider.name):
logger.log(cur_result.name + " has previously failed, rejecting it")
continue
if not bestResult:
bestResult = cur_result
elif cur_result.quality in bestQualities and (bestResult.quality < cur_result.quality or bestResult.quality not in bestQualities):
bestResult = cur_result
elif cur_result.quality in anyQualities and bestResult.quality not in bestQualities and bestResult.quality < cur_result.quality:
bestResult = cur_result
elif bestResult.quality == cur_result.quality:
if "proper" in cur_result.name.lower() or "real" in cur_result.name.lower() or "repack" in cur_result.name.lower():
logger.log("Preferring " + cur_result.name + " (repack/proper/real over nuked)")
bestResult = cur_result
elif "internal" in bestResult.name.lower() and "internal" not in cur_result.name.lower():
logger.log("Preferring " + cur_result.name + " (normal instead of internal)")
bestResult = cur_result
elif "xvid" in bestResult.name.lower() and "x264" in cur_result.name.lower():
logger.log("Preferring " + cur_result.name + " (x264 over xvid)")
bestResult = cur_result
if bestResult:
logger.log("Picked " + bestResult.name + " as the best", logger.DEBUG)
else:
logger.log("No result picked.", logger.DEBUG)
return bestResult
def isFinalResult(result):
"""
Checks if the given result is good enough quality that we can stop searching for other ones.
:param result: quality to check
:return: True if the result is the highest quality in both the any/best quality lists else False
"""
logger.log("Checking if we should keep searching after we've found " + result.name, logger.DEBUG)
show_obj = result.episodes[0].show
any_qualities, best_qualities = Quality.splitQuality(show_obj.quality)
# if there is a re-download that's higher than this then we definitely need to keep looking
if best_qualities and result.quality < max(best_qualities):
return False
# if it does not match the shows black and white list its no good
elif show_obj.is_anime and show_obj.release_groups.is_valid(result):
return False
# if there's no re-download that's higher (above) and this is the highest initial download then we're good
elif any_qualities and result.quality in any_qualities:
return True
elif best_qualities and result.quality == max(best_qualities):
return True
# if we got here than it's either not on the lists, they're empty, or it's lower than the highest required
else:
return False
def isFirstBestMatch(result):
"""
Checks if the given result is a best quality match and if we want to stop searching providers here.
:param result: to check
:return: True if the result is the best quality match else False
"""
logger.log("Checking if we should stop searching for a better quality for for episode " + result.name,
logger.DEBUG)
show_obj = result.episodes[0].show
any_qualities_, best_qualities = Quality.splitQuality(show_obj.quality)
return result.quality in best_qualities if best_qualities else False
def wantedEpisodes(show, fromDate):
"""
Get a list of episodes that we want to download
:param show: Show these episodes are from
:param fromDate: Search from a certain date
:return: list of wanted episodes
"""
wanted = []
if show.paused:
logger.log("Not checking for episodes of {0} because the show is paused".format(show.name), logger.DEBUG)
return wanted
allowed_qualities, preferred_qualities = common.Quality.splitQuality(show.quality)
all_qualities = list(set(allowed_qualities + preferred_qualities))
logger.log("Seeing if we need anything from " + show.name, logger.DEBUG)
con = db.DBConnection()
sql_results = con.select(
"SELECT status, season, episode FROM tv_episodes WHERE showid = ? AND season > 0 and airdate > ?",
[show.indexerid, fromDate.toordinal()]
)
# check through the list of statuses to see if we want any
for result in sql_results:
cur_status, cur_quality = common.Quality.splitCompositeStatus(int(result[b"status"] or -1))
if cur_status not in {common.WANTED, common.DOWNLOADED, common.SNATCHED, common.SNATCHED_PROPER}:
continue
if cur_status != common.WANTED:
if preferred_qualities:
if cur_quality in preferred_qualities:
continue
elif cur_quality in allowed_qualities:
continue
epObj = show.getEpisode(result[b"season"], result[b"episode"])
epObj.wantedQuality = [i for i in all_qualities if i > cur_quality and i != common.Quality.UNKNOWN]
wanted.append(epObj)
return wanted
def searchForNeededEpisodes():
"""
Check providers for details on wanted episodes
:return: episodes we have a search hit for
"""
foundResults = {}
didSearch = False
show_list = sickbeard.showList
fromDate = datetime.date.fromordinal(1)
episodes = []
for curShow in show_list:
if not curShow.paused:
sickbeard.name_cache.buildNameCache(curShow)
episodes.extend(wantedEpisodes(curShow, fromDate))
if not episodes:
# nothing wanted so early out, ie: avoid whatever abritrarily
# complex thing a provider cache update entails, for example,
# reading rss feeds
logger.log("No episodes needed.", logger.INFO)
return foundResults.values()
origThreadName = threading.currentThread().name
providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.is_active() and x.enable_daily]
for curProvider in providers:
threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
curProvider.cache.update_cache()
for curProvider in providers:
threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
curFoundResults = {}
try:
curFoundResults = curProvider.search_rss(episodes)
except AuthException as e:
logger.log("Authentication error: " + ex(e), logger.WARNING)
continue
except Exception as e:
logger.log("Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
continue
didSearch = True
# pick a single result for each episode, respecting existing results
for curEp in curFoundResults:
if not curEp.show or curEp.show.paused:
logger.log("Skipping {0} because the show is paused ".format(curEp.prettyName()), logger.DEBUG)
continue
bestResult = pickBestResult(curFoundResults[curEp], curEp.show)
# if all results were rejected move on to the next episode
if not bestResult:
logger.log("All found results for " + curEp.prettyName() + " were rejected.", logger.DEBUG)
continue
# if it's already in the list (from another provider) and the newly found quality is no better then skip it
if curEp in foundResults and bestResult.quality <= foundResults[curEp].quality:
continue
foundResults[curEp] = bestResult
threading.currentThread().name = origThreadName
if not didSearch:
logger.log(
"No NZB/Torrent providers found or enabled in the sickrage config for daily searches. Please check your settings.",
logger.INFO)
return foundResults.values()
def searchProviders(show, episodes, manualSearch=False, downCurQuality=False): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
"""
Walk providers for information on shows
:param show: Show we are looking for
:param episodes: Episodes we hope to find
:param manualSearch: Boolean, is this a manual search?
:param downCurQuality: Boolean, should we re-download currently available quality file
:return: results for search
"""
foundResults = {}
finalResults = []
didSearch = False
# build name cache for show
sickbeard.name_cache.buildNameCache(show)
origThreadName = threading.currentThread().name
providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.is_active() and x.enable_backlog]
for curProvider in providers:
threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
curProvider.cache.update_cache()
threading.currentThread().name = origThreadName
for curProvider in providers:
threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
if curProvider.anime_only and not show.is_anime:
logger.log("" + str(show.name) + " is not an anime, skipping", logger.DEBUG)
continue
foundResults[curProvider.name] = {}
searchCount = 0
search_mode = curProvider.search_mode
# Always search for episode when manually searching when in sponly
if search_mode == 'sponly' and manualSearch is True:
search_mode = 'eponly'
while True:
searchCount += 1
if search_mode == 'eponly':
logger.log("Performing episode search for " + show.name)
else:
logger.log("Performing season pack search for " + show.name)
try:
searchResults = curProvider.find_search_results(show, episodes, search_mode, manualSearch, downCurQuality)
except AuthException as error:
logger.log("Authentication error: {0!r}".format(error), logger.WARNING)
break
except Exception as error:
logger.log("Exception while searching {0}. Error: {1!r}".format(curProvider.name, error), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
break
didSearch = True
if len(searchResults):
# make a list of all the results for this provider
for curEp in searchResults:
if curEp in foundResults[curProvider.name]:
foundResults[curProvider.name][curEp] += searchResults[curEp]
else:
foundResults[curProvider.name][curEp] = searchResults[curEp]
break
elif not curProvider.search_fallback or searchCount == 2:
break
if search_mode == 'sponly':
logger.log("Fallback episode search initiated", logger.DEBUG)
search_mode = 'eponly'
else:
logger.log("Fallback season pack search initiate", logger.DEBUG)
search_mode = 'sponly'
# skip to next provider if we have no results to process
if not foundResults[curProvider.name]:
continue
# pick the best season NZB
bestSeasonResult = None
if SEASON_RESULT in foundResults[curProvider.name]:
bestSeasonResult = pickBestResult(foundResults[curProvider.name][SEASON_RESULT], show)
highest_quality_overall = 0
for cur_episode in foundResults[curProvider.name]:
for cur_result in foundResults[curProvider.name][cur_episode]:
if cur_result.quality != Quality.UNKNOWN and cur_result.quality > highest_quality_overall:
highest_quality_overall = cur_result.quality
logger.log("The highest quality of any match is " + Quality.qualityStrings[highest_quality_overall],
logger.DEBUG)
# see if every episode is wanted
if bestSeasonResult:
searchedSeasons = {str(x.season) for x in episodes}
# get the quality of the season nzb
seasonQual = bestSeasonResult.quality
logger.log(
"The quality of the season " + bestSeasonResult.provider.provider_type + " is " + Quality.qualityStrings[
seasonQual], logger.DEBUG)
main_db_con = db.DBConnection()
allEps = [int(x[b"episode"])
for x in main_db_con.select("SELECT episode FROM tv_episodes WHERE showid = ? AND ( season IN ( " + ','.join(searchedSeasons) + " ) )",
[show.indexerid])]
logger.log(
"Executed query: [SELECT episode FROM tv_episodes WHERE showid = {0} AND season in {1}]".format(show.indexerid, ','.join(searchedSeasons)))
logger.log("Episode list: " + str(allEps), logger.DEBUG)
allWanted = True
anyWanted = False
for curEpNum in allEps:
for season in {x.season for x in episodes}:
if not show.wantEpisode(season, curEpNum, seasonQual, downCurQuality):
allWanted = False
else:
anyWanted = True
# if we need every ep in the season and there's nothing better then just download this and be done with it (unless single episodes are preferred)
if allWanted and bestSeasonResult.quality == highest_quality_overall:
logger.log(
"Every ep in this season is needed, downloading the whole " + bestSeasonResult.provider.provider_type + " " + bestSeasonResult.name)
epObjs = []
for curEpNum in allEps:
for season in {x.season for x in episodes}:
epObjs.append(show.getEpisode(season, curEpNum))
bestSeasonResult.episodes = epObjs
# Remove provider from thread name before return results
threading.currentThread().name = origThreadName
return [bestSeasonResult]
elif not anyWanted:
logger.log(
"No eps from this season are wanted at this quality, ignoring the result of " + bestSeasonResult.name,
logger.DEBUG)
else:
if bestSeasonResult.provider.provider_type == GenericProvider.NZB:
logger.log("Breaking apart the NZB and adding the individual ones to our results", logger.DEBUG)
# if not, break it apart and add them as the lowest priority results
individualResults = nzbSplitter.split_result(bestSeasonResult)
for curResult in individualResults:
if len(curResult.episodes) == 1:
epNum = curResult.episodes[0].episode
elif len(curResult.episodes) > 1:
epNum = MULTI_EP_RESULT
if epNum in foundResults[curProvider.name]:
foundResults[curProvider.name][epNum].append(curResult)
else:
foundResults[curProvider.name][epNum] = [curResult]
# If this is a torrent all we can do is leech the entire torrent, user will have to select which eps not do download in his torrent client
else:
# Season result from Torrent Provider must be a full-season torrent, creating multi-ep result for it.
logger.log(
"Adding multi-ep result for full-season torrent. Set the episodes you don't want to 'don't download' in your torrent client if desired!")
epObjs = []
for curEpNum in allEps:
for season in {x.season for x in episodes}:
epObjs.append(show.getEpisode(season, curEpNum))
bestSeasonResult.episodes = epObjs
if MULTI_EP_RESULT in foundResults[curProvider.name]:
foundResults[curProvider.name][MULTI_EP_RESULT].append(bestSeasonResult)
else:
foundResults[curProvider.name][MULTI_EP_RESULT] = [bestSeasonResult]
# go through multi-ep results and see if we really want them or not, get rid of the rest
multiResults = {}
if MULTI_EP_RESULT in foundResults[curProvider.name]:
for _multiResult in foundResults[curProvider.name][MULTI_EP_RESULT]:
logger.log("Seeing if we want to bother with multi-episode result " + _multiResult.name, logger.DEBUG)
# Filter result by ignore/required/whitelist/blacklist/quality, etc
multiResult = pickBestResult(_multiResult, show)
if not multiResult:
continue
# see how many of the eps that this result covers aren't covered by single results
neededEps = []
notNeededEps = []
for epObj in multiResult.episodes:
# if we have results for the episode
if epObj.episode in foundResults[curProvider.name] and len(foundResults[curProvider.name][epObj.episode]) > 0:
notNeededEps.append(epObj.episode)
else:
neededEps.append(epObj.episode)
logger.log(
"Single-ep check result is neededEps: " + str(neededEps) + ", notNeededEps: " + str(notNeededEps),
logger.DEBUG)
if not neededEps:
logger.log("All of these episodes were covered by single episode results, ignoring this multi-episode result", logger.DEBUG)
continue
# check if these eps are already covered by another multi-result
multiNeededEps = []
multiNotNeededEps = []
for epObj in multiResult.episodes:
if epObj.episode in multiResults:
multiNotNeededEps.append(epObj.episode)
else:
multiNeededEps.append(epObj.episode)
logger.log(
"Multi-ep check result is multiNeededEps: " + str(multiNeededEps) + ", multiNotNeededEps: " + str(
multiNotNeededEps), logger.DEBUG)
if not multiNeededEps:
logger.log(
"All of these episodes were covered by another multi-episode nzbs, ignoring this multi-ep result",
logger.DEBUG)
continue
# don't bother with the single result if we're going to get it with a multi result
for epObj in multiResult.episodes:
multiResults[epObj.episode] = multiResult
if epObj.episode in foundResults[curProvider.name]:
logger.log(
"A needed multi-episode result overlaps with a single-episode result for ep #" + str(
epObj.episode) + ", removing the single-episode results from the list", logger.DEBUG)
del foundResults[curProvider.name][epObj.episode]
# of all the single ep results narrow it down to the best one for each episode
finalResults += set(multiResults.values())
for curEp in foundResults[curProvider.name]:
if curEp in (MULTI_EP_RESULT, SEASON_RESULT):
continue
if not foundResults[curProvider.name][curEp]:
continue
# if all results were rejected move on to the next episode
bestResult = pickBestResult(foundResults[curProvider.name][curEp], show)
if not bestResult:
continue
# add result if its not a duplicate and
found = False
for i, result in enumerate(finalResults):
for bestResultEp in bestResult.episodes:
if bestResultEp in result.episodes:
if result.quality < bestResult.quality:
finalResults.pop(i)
else:
found = True
if not found:
finalResults += [bestResult]
# check that we got all the episodes we wanted first before doing a match and snatch
wantedEpCount = 0
for wantedEp in episodes:
for result in finalResults:
if wantedEp in result.episodes and isFinalResult(result):
wantedEpCount += 1
# make sure we search every provider for results unless we found everything we wanted
if wantedEpCount == len(episodes):
break
if not didSearch:
logger.log("No NZB/Torrent providers found or enabled in the sickrage config for backlog searches. Please check your settings.",
logger.INFO)
# Remove provider from thread name before return results
threading.currentThread().name = origThreadName
return finalResults
| gpl-3.0 | -6,609,789,846,448,220,000 | 41.462079 | 174 | 0.621473 | false |
ghchinoy/tensorflow | tensorflow/python/tools/freeze_graph_test.py | 4 | 13446 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph freezing tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from tensorflow.core.example import example_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.tools import freeze_graph
from tensorflow.python.training import saver as saver_lib
class FreezeGraphTest(test_util.TensorFlowTestCase):
def _testFreezeGraph(self, saver_write_version):
checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
checkpoint_state_name = "checkpoint_state"
input_graph_name = "input_graph.pb"
output_graph_name = "output_graph.pb"
# We'll create an input graph that has a single variable containing 1.0,
# and that then multiplies it by 2.
with ops.Graph().as_default():
variable_node = variables.VariableV1(1.0, name="variable_node")
output_node = math_ops.multiply(variable_node, 2.0, name="output_node")
sess = session.Session()
init = variables.global_variables_initializer()
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
saver = saver_lib.Saver(write_version=saver_write_version)
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)
# We save out the graph to disk, and then call the const conversion
# routine.
input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
input_saver_def_path = ""
input_binary = False
output_node_names = "output_node"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
clear_devices = False
freeze_graph.freeze_graph(
input_graph_path,
input_saver_def_path,
input_binary,
checkpoint_path,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph_path,
clear_devices,
"",
"",
"",
checkpoint_version=saver_write_version)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_path, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(4, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
def _createTFExampleString(self, feature_name, feature_value):
"""Create a serialized tensorflow example."""
example = example_pb2.Example()
example.features.feature[feature_name].float_list.value.extend([
feature_value])
return example.SerializeToString()
def _writeDummySavedModel(self, path, feature_name):
"""Writes a classifier with two input features to the given path."""
with ops.Graph().as_default():
examples = array_ops.placeholder(dtypes.string, name="input_node")
feature_configs = {
feature_name: parsing_ops.FixedLenFeature(shape=[],
dtype=dtypes.float32),
}
features = parsing_ops.parse_example(examples, feature_configs)
feature = features[feature_name]
variable_node = variables.VariableV1(1.0, name="variable_node")
scores = math_ops.multiply(variable_node, feature, name="output_node")
class_feature = array_ops.fill(array_ops.shape(feature),
"class_%s" % feature_name)
classes = array_ops.transpose(class_feature)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
signature = (
signature_def_utils.classification_signature_def(
examples=examples,
classes=classes,
scores=scores,))
builder = saved_model_builder.SavedModelBuilder(path)
builder.add_meta_graph_and_variables(
sess,
[tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature,
},)
builder.save(as_text=True)
@test_util.run_v1_only("b/120545219")
def testFreezeGraphV1(self):
self._testFreezeGraph(saver_pb2.SaverDef.V1)
@test_util.run_v1_only("b/120545219")
def testFreezeGraphV2(self):
self._testFreezeGraph(saver_pb2.SaverDef.V2)
def testFreezeMetaGraph(self):
tmp_dir = self.get_temp_dir()
checkpoint_prefix = os.path.join(tmp_dir, "meta_graph_checkpoint")
checkpoint_state_name = "checkpoint_state"
output_graph_filename = os.path.join(tmp_dir, "output_graph.pb")
with ops.Graph().as_default():
variable_node = variables.VariableV1(1.0, name="variable_node")
output_node = math_ops.multiply(variable_node, 2.0, name="output_node")
sess = session.Session()
init = variables.global_variables_initializer()
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
saver = saver_lib.Saver()
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
input_saver_def_path = ""
input_binary = True
output_node_names = "output_node"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
clear_devices = False
input_meta_graph = checkpoint_path + ".meta"
freeze_graph.freeze_graph(
"", input_saver_def_path, input_binary, checkpoint_path,
output_node_names, restore_op_name, filename_tensor_name,
output_graph_filename, clear_devices, "", "", "", input_meta_graph)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_filename, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(4, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
def testFreezeSavedModel(self):
tmp_dir = self.get_temp_dir()
saved_model_dir = os.path.join(tmp_dir, "saved_model_dir")
feature_name = "feature"
self._writeDummySavedModel(saved_model_dir, feature_name)
output_graph_filename = os.path.join(tmp_dir, "output_graph.pb")
input_saved_model_dir = saved_model_dir
output_node_names = "output_node"
input_binary = False
input_saver_def_path = False
restore_op_name = None
filename_tensor_name = None
clear_devices = False
input_meta_graph = False
checkpoint_path = None
input_graph_filename = None
saved_model_tags = tag_constants.SERVING
freeze_graph.freeze_graph(input_graph_filename, input_saver_def_path,
input_binary, checkpoint_path, output_node_names,
restore_op_name, filename_tensor_name,
output_graph_filename, clear_devices, "", "", "",
input_meta_graph, input_saved_model_dir,
saved_model_tags)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_filename, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(8, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
feature_value = 2.0
example = self._createTFExampleString(feature_name, feature_value)
with session.Session() as sess:
input_node = sess.graph.get_tensor_by_name("input_node:0")
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node, feed_dict={input_node: [example]})
self.assertNear(feature_value, output, 0.00001)
def testSinglePartitionedVariable(self):
"""Ensures partitioned variables fail cleanly with freeze graph."""
checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
checkpoint_state_name = "checkpoint_state"
input_graph_name = "input_graph.pb"
output_graph_name = "output_graph.pb"
# Create a graph with partition variables. When weights are partitioned into
# a single partition, the weights variable is followed by a identity ->
# identity (an additional identity node).
partitioner = partitioned_variables.fixed_size_partitioner(1)
with ops.Graph().as_default():
with variable_scope.variable_scope("part", partitioner=partitioner):
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros(
(batch_size, height, width, depth), name="input1")
input2 = array_ops.zeros(
(batch_size, height, width, depth), name="input2")
num_nodes = depth
filter1 = variable_scope.get_variable("filter", [num_nodes, num_nodes])
filter2 = array_ops.reshape(filter1, [1, 1, num_nodes, num_nodes])
conv = nn.conv2d(
input=input1, filter=filter2, strides=[1, 1, 1, 1], padding="SAME")
node = math_ops.add(conv, input2, name="test/add")
node = nn.relu6(node, name="test/relu6")
# Save graph and checkpoints.
sess = session.Session()
sess.run(variables.global_variables_initializer())
saver = saver_lib.Saver()
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)
# Ensure this graph has partition variables.
self.assertTrue([
tensor.name.split(":")[0]
for op in sess.graph.get_operations()
for tensor in op.values()
if re.search(r"/part_\d+/", tensor.name)
])
# Test freezing graph doesn't make it crash.
output_node_names = "save/restore_all"
output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
with self.assertRaises(ValueError):
freeze_graph.freeze_graph_with_def_protos(
input_graph_def=sess.graph_def,
input_saver_def=None,
input_checkpoint=checkpoint_path,
output_node_names=output_node_names,
restore_op_name="save/restore_all", # default value
filename_tensor_name="save/Const:0", # default value
output_graph=output_graph_path,
clear_devices=False,
initializer_nodes="")
if __name__ == "__main__":
test.main()
| apache-2.0 | -328,287,658,419,026,600 | 39.378378 | 80 | 0.65856 | false |
BeZazz/lamebench | nb_third_party/dns/rdtypes/IN/WKS.py | 248 | 4116 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import socket
import struct
import dns.ipv4
import dns.rdata
_proto_tcp = socket.getprotobyname('tcp')
_proto_udp = socket.getprotobyname('udp')
class WKS(dns.rdata.Rdata):
"""WKS record
@ivar address: the address
@type address: string
@ivar protocol: the protocol
@type protocol: int
@ivar bitmap: the bitmap
@type bitmap: string
@see: RFC 1035"""
__slots__ = ['address', 'protocol', 'bitmap']
def __init__(self, rdclass, rdtype, address, protocol, bitmap):
super(WKS, self).__init__(rdclass, rdtype)
self.address = address
self.protocol = protocol
self.bitmap = bitmap
def to_text(self, origin=None, relativize=True, **kw):
bits = []
for i in xrange(0, len(self.bitmap)):
byte = ord(self.bitmap[i])
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(str(i * 8 + j))
text = ' '.join(bits)
return '%s %d %s' % (self.address, self.protocol, text)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
address = tok.get_string()
protocol = tok.get_string()
if protocol.isdigit():
protocol = int(protocol)
else:
protocol = socket.getprotobyname(protocol)
bitmap = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
if token.value.isdigit():
serv = int(token.value)
else:
if protocol != _proto_udp and protocol != _proto_tcp:
raise NotImplementedError("protocol must be TCP or UDP")
if protocol == _proto_udp:
protocol_text = "udp"
else:
protocol_text = "tcp"
serv = socket.getservbyname(token.value, protocol_text)
i = serv // 8
l = len(bitmap)
if l < i + 1:
for j in xrange(l, i + 1):
bitmap.append('\x00')
bitmap[i] = chr(ord(bitmap[i]) | (0x80 >> (serv % 8)))
bitmap = dns.rdata._truncate_bitmap(bitmap)
return cls(rdclass, rdtype, address, protocol, bitmap)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
file.write(dns.ipv4.inet_aton(self.address))
protocol = struct.pack('!B', self.protocol)
file.write(protocol)
file.write(self.bitmap)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
address = dns.ipv4.inet_ntoa(wire[current : current + 4])
protocol, = struct.unpack('!B', wire[current + 4 : current + 5])
current += 5
rdlen -= 5
bitmap = wire[current : current + rdlen]
return cls(rdclass, rdtype, address, protocol, bitmap)
from_wire = classmethod(from_wire)
def _cmp(self, other):
sa = dns.ipv4.inet_aton(self.address)
oa = dns.ipv4.inet_aton(other.address)
v = cmp(sa, oa)
if v == 0:
sp = struct.pack('!B', self.protocol)
op = struct.pack('!B', other.protocol)
v = cmp(sp, op)
if v == 0:
v = cmp(self.bitmap, other.bitmap)
return v
| apache-2.0 | 446,422,037,357,367,600 | 35.424779 | 79 | 0.584305 | false |
kod3r/neon | neon/backends/tests/test_batched_dot.py | 10 | 3638 | # ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# pylint: skip-file
"""
test batched_dot behaviors between NervanaCPU, and NervanaGPU backend
against numpy.
In NervanaGPU, it supports both N as inside dimension or as outer dimension.
In NervanaCPU, it only supports N as inside dimension, since this is what we use.
"""
import numpy as np
from neon.backends.nervanagpu import NervanaGPU
from neon.backends.nervanacpu import NervanaCPU
from neon.backends.tests.utils import assert_tensors_allclose
size = 32 # size input for GPU - 32, 64, 128, None=auto
def setup_test_data(X, N, C, K, dtype):
dimW = (K, C)
dimI = (X, C, N)
dimO = (X, K, N)
cpuI = np.random.uniform(-1.0, 1.0, dimI).astype(dtype)
cpuE = np.random.uniform(-1.0, 1.0, dimO).astype(dtype)
cpuW = np.random.uniform(-1.0, 1.0, dimW).astype(dtype)
# set_trace()
return cpuI, cpuE, cpuW
def run_batched_dot(lib, I, E, W, X, dtype):
devI = lib.array(I, dtype=dtype)
devE = lib.array(E, dtype=dtype)
devW = lib.array(W, dtype=dtype)
devO = lib.zeros(E.shape, dtype=dtype)
devB = lib.zeros(I.shape, dtype=dtype)
devU = lib.zeros(W.shape, dtype=dtype)
if isinstance(lib, NervanaCPU):
lib.batched_dot(devW, devI, devO) # fprop
lib.batched_dot(devW.T, devE, devB) # bprop
lib.batched_dot(devE, devI.T, devU) # update
elif isinstance(lib, NervanaGPU):
lib.batched_dot(devW, devI, devO, size=size) # fprop
lib.batched_dot(devW.T, devE, devB, size=size) # bprop
lib.batched_dot(devE, devI.T, devU, size=size) # update
else:
# set_trace()
for i in range(X):
devO[i] = np.dot(W, I[i]) # fprop
devB[i] = np.dot(W.T, E[i]) # bprop
devU += np.dot(E[i], I[i].T) # update
return devO, devB, devU
def test_batched_dot():
np.set_printoptions(threshold=8192 * 4, linewidth=600,
formatter={'int': lambda x: "%2d" % x, 'float': lambda x: "%2.0f" % x})
ng = NervanaGPU(stochastic_round=False, bench=1)
nc = NervanaCPU()
dtype = np.float32 # np.float16 or np.float32
X = 100 # Batch Size
N = 32 # Minibatch Size
C = 1536 # Input Features
K = 768 # Output Features
cpuI, cpuE, cpuW = setup_test_data(X, N, C, K, dtype)
ngO, ngB, ngU = run_batched_dot(ng, cpuI, cpuE, cpuW, X, dtype)
ncO, ncB, ncU = run_batched_dot(nc, cpuI, cpuE, cpuW, X, dtype)
npO, npB, npU = run_batched_dot(np, cpuI, cpuE, cpuW, X, dtype)
# set_trace()
assert_tensors_allclose(npO, ngO, rtol=0, atol=1e-3)
assert_tensors_allclose(npB, ngB, rtol=0, atol=1e-3)
assert_tensors_allclose(npU, ngU, rtol=0, atol=1e-3)
assert_tensors_allclose(npO, ncO, rtol=0, atol=1e-3)
assert_tensors_allclose(npB, ncB, rtol=0, atol=1e-3)
assert_tensors_allclose(npU, ncU, rtol=0, atol=1e-3)
ng.ctx.detach()
del(ng)
| apache-2.0 | -6,557,122,120,701,619,000 | 33.980769 | 95 | 0.615173 | false |
cntnboys/410Lab6 | build/django/build/lib.linux-x86_64-2.7/django/db/backends/mysql/validation.py | 65 | 1311 | from django.core import checks
from django.db.backends import BaseDatabaseValidation
class DatabaseValidation(BaseDatabaseValidation):
def check_field(self, field, **kwargs):
"""
MySQL has the following field length restriction:
No character (varchar) fields can have a length exceeding 255
characters if they have a unique index on them.
"""
from django.db import connection
errors = super(DatabaseValidation, self).check_field(field, **kwargs)
# Ignore any related fields.
if getattr(field, 'rel', None) is None:
field_type = field.db_type(connection)
# Ignore any non-concrete fields
if field_type is None:
return errors
if (field_type.startswith('varchar') # Look for CharFields...
and field.unique # ... that are unique
and (field.max_length is None or int(field.max_length) > 255)):
errors.append(
checks.Error(
('MySQL does not allow unique CharFields to have a max_length > 255.'),
hint=None,
obj=field,
id='mysql.E001',
)
)
return errors
| apache-2.0 | -1,364,551,610,489,862,000 | 36.457143 | 95 | 0.55225 | false |
jostep/tensorflow | tensorflow/python/kernel_tests/string_to_number_op_test.py | 104 | 4041 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StringToNumber op from parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
_ERROR_MESSAGE = "StringToNumberOp could not correctly convert string: "
class StringToNumberOpTest(test.TestCase):
def _test(self, tf_type, good_pairs, bad_pairs):
with self.test_session():
# Build a small testing graph.
input_string = array_ops.placeholder(dtypes.string)
output = parsing_ops.string_to_number(
input_string, out_type=tf_type)
# Check all the good input/output pairs.
for instr, outnum in good_pairs:
result, = output.eval(feed_dict={input_string: [instr]})
self.assertAllClose([outnum], [result])
# Check that the bad inputs produce the right errors.
for instr, outstr in bad_pairs:
with self.assertRaisesOpError(outstr):
output.eval(feed_dict={input_string: [instr]})
def testToFloat(self):
self._test(dtypes.float32,
[("0", 0), ("3", 3), ("-1", -1),
("1.12", 1.12), ("0xF", 15), (" -10.5", -10.5),
("3.40282e+38", 3.40282e+38),
# Greater than max value of float.
("3.40283e+38", float("INF")),
("-3.40283e+38", float("-INF")),
# Less than min value of float.
("NAN", float("NAN")),
("INF", float("INF"))],
[("10foobar", _ERROR_MESSAGE + "10foobar")])
def testToDouble(self):
self._test(dtypes.float64,
[("0", 0), ("3", 3), ("-1", -1),
("1.12", 1.12), ("0xF", 15), (" -10.5", -10.5),
("3.40282e+38", 3.40282e+38),
# Greater than max value of float.
("3.40283e+38", 3.40283e+38),
# Less than min value of float.
("-3.40283e+38", -3.40283e+38),
("NAN", float("NAN")),
("INF", float("INF"))],
[("10foobar", _ERROR_MESSAGE + "10foobar")])
def testToInt32(self):
self._test(dtypes.int32,
[("0", 0), ("3", 3), ("-1", -1),
(" -10", -10),
("-2147483648", -2147483648),
("2147483647", 2147483647)],
[ # Less than min value of int32.
("-2147483649", _ERROR_MESSAGE + "-2147483649"),
# Greater than max value of int32.
("2147483648", _ERROR_MESSAGE + "2147483648"),
("2.9", _ERROR_MESSAGE + "2.9"),
("10foobar", _ERROR_MESSAGE + "10foobar")])
def testToInt64(self):
self._test(dtypes.int64,
[("0", 0), ("3", 3), ("-1", -1),
(" -10", -10),
("-2147483648", -2147483648),
("2147483647", 2147483647),
("-2147483649", -2147483649), # Less than min value of int32.
("2147483648", 2147483648)], # Greater than max value of int32.
[("2.9", _ERROR_MESSAGE + "2.9"),
("10foobar", _ERROR_MESSAGE + "10foobar")])
if __name__ == "__main__":
test.main()
| apache-2.0 | 6,205,451,113,576,852,000 | 39.41 | 80 | 0.539223 | false |
zhwei/cabric | cabric/perm.py | 1 | 2522 | # -*- coding: utf-8 -*-
from fabric.api import *
from cabric.cmd import cmd_expanduser,cmd_su
import os
def put_public_key(path=None, user=None):
"""
Upload pub key from remote server.
Limit: Openssh standard key,must comment with user mail.
:param path:local path
:param user:remote username
:return:
"""
if os.path.exists(os.path.expanduser(path)) is False:
abort("public key not exist")
else:
# 通过解读最后注释来判断key是否存在,如果不存在注释,判断为非法的key
fp = open(os.path.expanduser(path))
pub_key = fp.read()
pos = pub_key.rfind(" ")
mail = pub_key[pos + 1:].strip()
if mail.find('@') == -1:
abort('please add comment WHO YOU ARE.')
if user:
user_path = cmd_expanduser(user)
else:
user_path = '~'
remote_root = '%s/.ssh' % user_path
remote_path = '%s/authorized_keys' % remote_root
with settings(warn_only=True):
if run('test -d %s' % remote_root).failed:
cmd_su('mkdir %s' % remote_root, user)
if user:
run('chown %s.%s %s' % (user, user, remote_root))
put(path, '/tmp/tmp.pub', mode=0644)
cmd_su('grep %s %s | cat /tmp/tmp.pub >> %s' % (mail, remote_path, remote_path),user)
if user:
run('chown %s.%s %s' % (user, user, remote_path))
pass
def put_private_key(path=None, user=None):
"""
Upload private key to remote server
Limit: Must be a standard key generate from ssh-keygen
:param path:local path
:param user:remote username
"""
if os.path.exists(os.path.expanduser(path)) is False:
abort("private key not exist")
else:
# valid key type
fp = open(os.path.expanduser(path))
private_key = fp.read()
pos = private_key.find("\n")
if private_key[0:pos].find('DSA') > -1:
dsa = True
else:
dsa = False
user_path = cmd_expanduser(user)
remote_root = '%s/.ssh' % user_path
if dsa:
remote_path = '%s/id_dsa' % remote_root
else:
remote_path = '%s/id_rsa' % remote_root
with settings(warn_only=True):
if run('test -d %s' % remote_root).failed:
if user:
run('chown -Rf %s.%s %s' % (user, user, user_path))
cmd_su('mkdir %s' % remote_root,user)
put(path, remote_path, mode=0600)
if user:
run('chown %s.%s %s' % (user, user, remote_path))
pass
| mit | 6,725,413,645,272,268,000 | 26.662921 | 89 | 0.559708 | false |
vvv1559/intellij-community | python/lib/Lib/site-packages/django/utils/_os.py | 71 | 2011 | import os
from os.path import join, normcase, normpath, abspath, isabs, sep
from django.utils.encoding import force_unicode
# Define our own abspath function that can handle joining
# unicode paths to a current working directory that has non-ASCII
# characters in it. This isn't necessary on Windows since the
# Windows version of abspath handles this correctly. The Windows
# abspath also handles drive letters differently than the pure
# Python implementation, so it's best not to replace it.
if os.name == 'nt':
abspathu = abspath
else:
def abspathu(path):
"""
Version of os.path.abspath that uses the unicode representation
of the current working directory, thus avoiding a UnicodeDecodeError
in join when the cwd has non-ASCII characters.
"""
if not isabs(path):
path = join(os.getcwdu(), path)
return normpath(path)
def safe_join(base, *paths):
"""
Joins one or more path components to the base path component intelligently.
Returns a normalized, absolute version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
"""
# We need to use normcase to ensure we don't false-negative on case
# insensitive operating systems (like Windows).
base = force_unicode(base)
paths = [force_unicode(p) for p in paths]
final_path = normcase(abspathu(join(base, *paths)))
base_path = normcase(abspathu(base))
base_path_len = len(base_path)
# Ensure final_path starts with base_path and that the next character after
# the final path is os.sep (or nothing, in which case final_path must be
# equal to base_path).
if not final_path.startswith(base_path) \
or final_path[base_path_len:base_path_len+1] not in ('', sep):
raise ValueError('The joined path (%s) is located outside of the base '
'path component (%s)' % (final_path, base_path))
return final_path
| apache-2.0 | 5,173,750,275,702,239,000 | 42.717391 | 79 | 0.688712 | false |
sparkslabs/kamaelia_ | Sketches/DK/Kamaelia-Paint/App/XYPad.py | 3 | 22735 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO:
# * Convert to vectors?
"""
=============
XY Pad Widget
=============
An XY pad widget with a draggable, bouncing puck. Pick up data on the
"outbox" outbox to receive the position of the puck and messages indicating
when it has touched one of the sides.
Example Usage
-------------
Create an XY pad which redraws 60 times per second:
from Kamaelia.Util.Clock import CheapAndCheerfulClock as Clock
clock = Clock(float(1)/60).activate()
xyPad = XYPad().activate()
clock.link((clock, "outbox"), (xyPad, "newframe"))
How Does it Work?
-----------------
The component requests a display surface from the Pygame Display service
component. This is used as the surface of the XY pad. It binds listeners for
mouse click and motion to the service.
The component works in one of two different modes, bouncing and non-bouncing.
This is specified upon initialization by the bouncingPuck argument.
In the bouncing mode the puck will continue to move once it has been set into
motion by a mouse drag. If the mouse button remains down for longer than 0.1
seconds it is deemed to be a drag. In the bouncing mode the component sends a
(message, 1) tuple to the "outbox" outbox each time the puck collides with one
of the sides. The messages can be changed using the collisionMsg argument.
They default to "top", "right", "bottom", "left".
In the non-bouncing mode the puck remains stationary after it has been dragged.
Both modes send a (positionMsg, (x, y)) tuple to the "outbox" outbox if the
puck moves.
If the editable argument to the constructor is set to be false the pad will not
respond to mouse presses.
As well as being controlled by the mouse an XY pad can be controlled externally,
for example by a second XY pad. Position and velocity messages received on the
"remoteChanges" inbox are used to change the motion of the puck. Position
messages are of the form ("Position", (xPos, yPos)), and velocity messages are
of the form ("Velocity", (xVel, yVel)).
In order to allow communication between two XY pads the component outputs
position and velocity messages to the "localChanges" outbox. By connecting the
"localChanges" outbox of one XY pad to the "remoteChanges" inbox of another,
the second pad can duplicate the motion of the first.
The XY pad only redraws the surface and updates the puck position when it
receives a message on its "newframe" inbox. Note that although providing
messages more frequently here will lead to more frequent updates, it will also
lead to higher CPU usage.
The visual appearance of the pad can be specified by arguments to the
constructor. The size, position and colours are all adjustable.
If a producerFinished or shutdownMicroprocess message is received on its
"control" inbox, it is passed on out of its "signal" outbox and the component
terminates.
"""
import time
import pygame
import Axon
from Axon.Ipc import producerFinished, WaitComplete
from Kamaelia.UI.Pygame.Display import PygameDisplay
from Kamaelia.UI.Pygame.Button import Button
from Kamaelia.Util.Clock import CheapAndCheerfulClock as Clock
class XYPad(Axon.Component.component):
"""\
XYPad([bouncingPuck, position, bgcolour, fgcolour, positionMsg,
collisionMsg, size]) -> new XYPad component.
Create an XY pad widget using the Pygame Display service. Sends messages
for position and direction changes out of its "outbox" outbox.
Keyword arguments (all optional):
bouncingPuck -- whether the puck will continue to move after it has been
dragged (default=True)
position -- (x,y) position of top left corner in pixels
bgcolour -- (r,g,b) fill colour (default=(255,255,255))
fgcolor -- (r, g, b) colour of the puck and border
messagePrefix -- string to be prepended to all messages
positionMsg -- sent as the first element of a (positionMsg, 1) tuple when
the puck moves
collisionMsg -- (t, r, b, l) sent as the first element of a
(collisionMsg[i], 1) tuple when the puck hits a side
(default = ("top", "right", "bottom", "left"))
size -- (w,h) in pixels (default=(100, 100))
"""
Inboxes = {"inbox" : "Receive events from Pygame Display",
"remoteChanges" : "Receive messages to alter the state of the XY pad",
"control" : "For shutdown messages",
"callback" : "Receive callbacks from Pygame Display",
"newframe" : "Recieve messages indicating a new frame is to be drawn",
"buttons" : "Recieve interrupts from the buttons"
}
Outboxes = {"outbox" : "XY positions emitted here",
"localChanges" : "Messages indicating change in the state of the XY pad emitted here",
"signal" : "For shutdown messages",
"display_signal" : "Outbox used for communicating to the display surface"
}
def __init__(self, bouncingPuck=True, position=None,
bgcolour=(255, 255, 255), fgcolour=(0, 0, 0),
messagePrefix = "",
positionMsg="Position",
colours="RG",
selectedColour = (0,0,0),
saturator=False,
slider = False,
alpha = False,
colourSelector = False,
collisionMsg = ("Top", "Right", "Bottom", "Left"),
size=(100, 100), editable=True):
"""
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
"""
super(XYPad, self).__init__()
self.size = size
# Does the puck bounce around
self.bouncingPuck = bouncingPuck
# Is the puck currently bouncing around
self.isBouncing = False
self.selectedColour = selectedColour
self.mouseDown = False
self.clickTime = None
self.mousePositions = []
self.lastMousePos = (0, 0)
self.colourSelector = colourSelector
self.saturator = saturator
self.puckRadius = 10
self.puckPos = [self.size[0]/2, self.size[1]/2]
self.puckVel = [0, 0]
self.alpha = alpha
self.selectedAlpha = 255
self.slider = slider
self.selectedSize = 3
self.borderWidth = 5
self.bgcolour = bgcolour
self.fgcolour = fgcolour
self.colours = colours
self.messagePrefix = messagePrefix
self.positionMsg = positionMsg
self.collisionMsg = collisionMsg
self.editable = editable
self.dispRequest = {"DISPLAYREQUEST" : True,
"callback" : (self,"callback"),
"events" : (self, "inbox"),
"size": self.size,
}
if position:
self.dispRequest["position"] = position
def waitBox(self, boxName):
"""Wait for a message on boxName inbox"""
while 1:
if self.dataReady(boxName):
return
else:
yield 1
def main(self):
"""Main loop."""
# pgd = PygameDisplay( width=300, height=550 ).activate()
# PygameDisplay.setDisplayService(pgd)
displayservice = PygameDisplay.getDisplayService()
self.link((self,"display_signal"), displayservice)
self.send( self.dispRequest,
"display_signal")
for _ in self.waitBox("callback"): yield 1
self.display = self.recv("callback")
# colour buttons
if self.colourSelector:
rgbutton = Button(caption="Red/Green",position=(10,170), msg = ("Colour", "RG")).activate()
rbbutton = Button(caption="Red/Blue",position=(80,170), msg = ("Colour", "RB")).activate()
gbbutton = Button(caption="Green/Blue",position=(145,170), msg = ("Colour", "GB")).activate()
self.link( (rgbutton,"outbox"), (self,"buttons") )
self.link( (rbbutton,"outbox"), (self,"buttons") )
self.link( (gbbutton,"outbox"), (self,"buttons") )
# tool buttons
circleb = Button(caption="Circle",position=(10,10), msg = (("Tool", "Circle"),)).activate()
eraseb = Button(caption="Eraser",position=(100,10), msg = (("Tool", "Eraser"),)).activate()
lineb = Button(caption="Line",position=(10,50), msg = (("Tool", "Line"),)).activate()
bucketb = Button(caption="Bucket",position=(10,90), msg = (("Tool", "Bucket"),)).activate()
eyeb = Button(caption="Eyedropper",position=(10,130), msg = (("Tool", "Eyedropper"),)).activate()
addlayerb = Button(caption="Add Layer",position=(10,540), msg = (("Layer", "Add"),)).activate()
prevlayerb = Button(caption="<-",position=(80,540), msg = (("Layer", "Prev"),)).activate()
nextlayerb = Button(caption="->",position=(110,540), msg = (("Layer", "Next"),)).activate()
dellayerb = Button(caption="Delete",position=(140,540), msg = (("Layer", "Delete"),)).activate()
self.link( (circleb,"outbox"), (self,"outbox"), passthrough = 2 )
self.link( (eraseb,"outbox"), (self,"outbox"), passthrough = 2 )
self.link( (lineb,"outbox"), (self,"outbox"), passthrough = 2 )
self.link( (bucketb,"outbox"), (self,"outbox"), passthrough = 2 )
self.link( (eyeb,"outbox"), (self,"outbox"), passthrough = 2 )
self.link( (addlayerb,"outbox"), (self,"outbox"), passthrough = 2 )
self.link( (prevlayerb,"outbox"), (self,"outbox"), passthrough = 2 )
self.link( (nextlayerb,"outbox"), (self,"outbox"), passthrough = 2 )
self.link( (dellayerb,"outbox"), (self,"outbox"), passthrough = 2 )
SizePicker = XYPad(size=(255, 50), bouncingPuck = False, position = (10, 480),
bgcolour=(0, 0, 0), fgcolour=(255, 255, 255), slider = True).activate()
self.link( (SizePicker,"outbox"), (self,"outbox"), passthrough = 2 )
AlphaPicker = XYPad(size=(255, 20), bouncingPuck = False, position = (10, 575),
bgcolour=(0, 0, 0), fgcolour=(255, 255, 255), slider = True, alpha = True).activate()
self.link( (AlphaPicker,"outbox"), (self,"outbox"), passthrough = 2 )
#clock - don't really need this
FPS = 60
clock = Clock(float(1)/FPS).activate()
self.link((clock, "outbox"), (self, "newframe"))
# Initial render so we don't see a blank screen
self.drawBG()
# self.render()
if self.editable:
self.send({"ADDLISTENEVENT" : pygame.MOUSEBUTTONDOWN,
"surface" : self.display},
"display_signal")
self.send({"ADDLISTENEVENT" : pygame.MOUSEBUTTONUP,
"surface" : self.display},
"display_signal")
self.send({"ADDLISTENEVENT" : pygame.MOUSEMOTION,
"surface" : self.display},
"display_signal")
done = False
while not done:
if not self.anyReady():
self.pause()
yield 1
while self.dataReady("buttons"):
bmsg = self.recv("buttons")
if bmsg[0]=="Colour":
self.colours = bmsg[1]
self.drawBG()
while self.dataReady("control"):
cmsg = self.recv("control")
if (isinstance(cmsg, producerFinished)):
self.send(cmsg, "signal")
done = True
while self.dataReady("inbox"):
for event in self.recv("inbox"):
if event.type == pygame.MOUSEBUTTONDOWN:
self.clickTime = time.time()
if self.slider:
self.sliderPos = event.pos[0]
self.drawBG()
if self.display.get_rect().collidepoint(*event.pos):
self.mouseDown = True
self.isBouncing = False
self.mousePositions = []
self.puckVel = [0, 0]
self.puckPos = list(event.pos)
self.lastMousePos = event.pos
self.send((self.messagePrefix + self.positionMsg,
(float(self.puckPos[0])/self.size[0],
float(self.puckPos[1])/self.size[1])),
"localChanges")
self.send((self.messagePrefix + "Velocity",
self.puckVel), "localChanges")
if event.type == pygame.MOUSEBUTTONUP:
if self.mouseDown:
if self.slider:
self.sliderPos = event.pos[0]
self.drawBG()
if (self.bouncingPuck and
time.time() - self.clickTime > 0.1):
# Click and drag
self.isBouncing = True
if len(self.mousePositions):
for i in xrange(2):
# Use the average of the last 50
# relative mouse positions
positions = [x[i] for x in self.mousePositions]
self.puckVel[i] = sum(positions)
self.puckVel[i] /= float(len(positions))
else:
# Just a click
self.puckVel = [0, 0]
self.render()
self.send((self.messagePrefix + "Velocity",
self.puckVel), "localChanges")
self.mouseDown = False
if event.type == pygame.MOUSEMOTION and self.mouseDown:
if self.slider:
self.sliderPos = event.pos[0]
self.drawBG()
if self.display.get_rect().collidepoint(*event.pos):
# We are dragging inside the display
# Keep a buffer of 50 mouse positions
if len(self.mousePositions) > 50:
del self.mousePositions[0]
relPos = []
for i in xrange(2):
relPos.append(event.pos[i] -
self.lastMousePos[i])
self.mousePositions.append(relPos)
# Move the puck to where the mouse is and remember
# where it is
self.puckPos = list(event.pos)
self.lastMousePos = event.pos
self.send((self.messagePrefix + self.positionMsg,
(float(self.puckPos[0])/self.size[0],
float(self.puckPos[1])/self.size[1])),
"localChanges")
self.render()
if self.dataReady("remoteChanges"):
bundle = self.recv("remoteChanges")
# The action to take is given by the last section of the
# OSC address - this should maybe be done by a component and
# we just listen for ("Velocity", (xVel, yVel)) tuples
action = bundle[0].split("/")[-1]
if action == "Velocity":
if self.bouncingPuck:
self.puckVel = bundle[1]
self.isBouncing = 1
elif action == "Position":
for i in xrange(2):
self.puckPos[i] = self.size[i] * bundle[1][i]
self.render()
if self.dataReady("newframe"):
# Time to render a new frame
# Clear any backlog of render messages
while self.dataReady("newframe"):
self.recv("newframe")
# Change the direction of the puck if it hits a wall
if self.isBouncing:
self.processCollisions()
if self.isBouncing:
# Update the position
for i in xrange(2):
self.puckPos[i] += self.puckVel[i]
self.render()
def processCollisions(self):
"""
Detect whether the puck has collided with a wall, and change its
direction appropriately
"""
if self.puckPos[0] <= 0:
# Left wall
self.puckVel[0] *= -1
self.send((self.messagePrefix + self.collisionMsg[3], 1), "outbox")
if self.puckPos[0] >= self.size[0]:
# Right wall
self.puckVel[0] *= -1
self.send((self.messagePrefix + self.collisionMsg[1], 1), "outbox")
if self.puckPos[1] <= 0:
# Top wall
self.puckVel[1] *= -1
self.send((self.messagePrefix + self.collisionMsg[0], 1), "outbox")
if self.puckPos[1] >= self.size[1]:
# Bottom wall
self.puckVel[1] *= -1
self.send((self.messagePrefix + self.collisionMsg[2], 1), "outbox")
def drawBG(self):
if self.slider:
self.display.fill( (255,255,255) )
pygame.draw.rect(self.display, (0,0,0),
self.display.get_rect(), 2)
elif self.saturator:
for y in range(0, self.size[0], self.size[0]/25):
box = pygame.Rect(self.size[0]/2, y, 10, 10)
pygame.draw.rect(self.display, (self.selectedColour[0],self.selectedColour[1],self.selectedColour[2],y), box, 0)
elif self.colourSelector:
if (self.colours == "RG"):
for y in range(0, self.size[0], self.size[0]/25):
for x in range(0, self.size[1], self.size[1]/25):
box = pygame.Rect(x, y, 10, 10)
pygame.draw.rect(self.display, (x,y,0), box, 0)
elif (self.colours == "RB"):
for y in range(0, self.size[0], self.size[0]/25):
for x in range(0, self.size[1], self.size[1]/25):
box = pygame.Rect(x, y, 10, 10)
pygame.draw.rect(self.display, (x,0,y), box, 0)
elif (self.colours == "GB"):
for y in range(0, self.size[0], self.size[0]/25):
for x in range(0, self.size[1], self.size[1]/25):
box = pygame.Rect(x, y, 10, 10)
pygame.draw.rect(self.display, (0,x,y), box, 0)
self.send({"REDRAW":True, "surface":self.display}, "display_signal")
def render(self):
"""Draw the border and puck onto the surface"""
# self.display.get_rect(), self.borderWidth)
if self.colourSelector:
if (self.colours == "RG"):
self.selectedColour = (self.puckPos[0], self.puckPos[1], 0)
elif (self.colours == "RB"):
self.selectedColour = (self.puckPos[0], 0, self.puckPos[1])
elif (self.colours == "GB"):
self.selectedColour = (0, self.puckPos[0], self.puckPos[1])
pygame.draw.rect(self.display, self.selectedColour,
self.display.get_rect(), self.borderWidth)
self.send((("colour",self.selectedColour),), "outbox")
if self.slider and not self.alpha:
# print float(self.size[1])/float(self.size[0])*self.sliderPos
self.selectedSize = float(self.size[1])/float(self.size[0])*self.sliderPos
self.send((("Size",self.selectedSize),), "outbox")
box = pygame.Rect(self.sliderPos, 0, 5, self.selectedSize)
pygame.draw.rect(self.display, (0,0,0),
box, 0)
if self.slider and self.alpha:
# print self.sliderPos
self.selectedAlpha = self.sliderPos
self.send((("Alpha",self.selectedAlpha),), "outbox")
box = pygame.Rect(self.sliderPos, 0, 5, 20)
pygame.draw.rect(self.display, (0,0,0),
box, 0)
# Puck
# pygame.draw.circle(self.display, self.fgcolour,
# [int(x) for x in self.puckPos], self.puckRadius)
self.send({"REDRAW":True, "surface":self.display}, "display_signal")
if __name__ == "__main__":
from Kamaelia.Util.Clock import CheapAndCheerfulClock as Clock
from Kamaelia.Util.Console import ConsoleEchoer
from Kamaelia.Chassis.Graphline import Graphline
# xyPad = XYPad().activate()
xyPad2 = XYPad(size=(255, 255), bouncingPuck = False, position = (70, 0),
bgcolour=(0, 0, 0), fgcolour=(255, 255, 255),
positionMsg="p2").activate()
ce = ConsoleEchoer().activate()
# clock.link((clock, "outbox"), (xyPad, "newframe"))
# xyPad.link((xyPad, "outbox"), (ce,"inbox"))
xyPad2.link((xyPad2, "outbox"), (ce,"inbox"))
Axon.Scheduler.scheduler.run.runThreads()
# Licensed to the BBC under a Contributor Agreement: JT/DK
| apache-2.0 | 431,071,562,639,047,940 | 44.47 | 128 | 0.537717 | false |
fhaoquan/kbengine | kbe/src/lib/python/Lib/test/list_tests.py | 106 | 17676 | """
Tests common to list and UserList.UserList
"""
import sys
import os
from functools import cmp_to_key
from test import support, seq_tests
class CommonTest(seq_tests.CommonTest):
def test_init(self):
# Iterable arg is optional
self.assertEqual(self.type2test([]), self.type2test())
# Init clears previous values
a = self.type2test([1, 2, 3])
a.__init__()
self.assertEqual(a, self.type2test([]))
# Init overwrites previous values
a = self.type2test([1, 2, 3])
a.__init__([4, 5, 6])
self.assertEqual(a, self.type2test([4, 5, 6]))
# Mutables always return a new object
b = self.type2test(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_repr(self):
l0 = []
l2 = [0, 1, 2]
a0 = self.type2test(l0)
a2 = self.type2test(l2)
self.assertEqual(str(a0), str(l0))
self.assertEqual(repr(a0), repr(l0))
self.assertEqual(repr(a2), repr(l2))
self.assertEqual(str(a2), "[0, 1, 2]")
self.assertEqual(repr(a2), "[0, 1, 2]")
a2.append(a2)
a2.append(3)
self.assertEqual(str(a2), "[0, 1, 2, [...], 3]")
self.assertEqual(repr(a2), "[0, 1, 2, [...], 3]")
l0 = []
for i in range(sys.getrecursionlimit() + 100):
l0 = [l0]
self.assertRaises(RuntimeError, repr, l0)
def test_print(self):
d = self.type2test(range(200))
d.append(d)
d.extend(range(200,400))
d.append(d)
d.append(400)
try:
with open(support.TESTFN, "w") as fo:
fo.write(str(d))
with open(support.TESTFN, "r") as fo:
self.assertEqual(fo.read(), repr(d))
finally:
os.remove(support.TESTFN)
def test_set_subscript(self):
a = self.type2test(range(20))
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 0), [1,2,3])
self.assertRaises(TypeError, a.__setitem__, slice(0, 10), 1)
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 2), [1,2])
self.assertRaises(TypeError, a.__getitem__, 'x', 1)
a[slice(2,10,3)] = [1,2,3]
self.assertEqual(a, self.type2test([0, 1, 1, 3, 4, 2, 6, 7, 3,
9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19]))
def test_reversed(self):
a = self.type2test(range(20))
r = reversed(a)
self.assertEqual(list(r), self.type2test(range(19, -1, -1)))
self.assertRaises(StopIteration, next, r)
self.assertEqual(list(reversed(self.type2test())),
self.type2test())
# Bug 3689: make sure list-reversed-iterator doesn't have __len__
self.assertRaises(TypeError, len, reversed([1,2,3]))
def test_setitem(self):
a = self.type2test([0, 1])
a[0] = 0
a[1] = 100
self.assertEqual(a, self.type2test([0, 100]))
a[-1] = 200
self.assertEqual(a, self.type2test([0, 200]))
a[-2] = 100
self.assertEqual(a, self.type2test([100, 200]))
self.assertRaises(IndexError, a.__setitem__, -3, 200)
self.assertRaises(IndexError, a.__setitem__, 2, 200)
a = self.type2test([])
self.assertRaises(IndexError, a.__setitem__, 0, 200)
self.assertRaises(IndexError, a.__setitem__, -1, 200)
self.assertRaises(TypeError, a.__setitem__)
a = self.type2test([0,1,2,3,4])
a[0] = 1
a[1] = 2
a[2] = 3
self.assertEqual(a, self.type2test([1,2,3,3,4]))
a[0] = 5
a[1] = 6
a[2] = 7
self.assertEqual(a, self.type2test([5,6,7,3,4]))
a[-2] = 88
a[-1] = 99
self.assertEqual(a, self.type2test([5,6,7,88,99]))
a[-2] = 8
a[-1] = 9
self.assertEqual(a, self.type2test([5,6,7,8,9]))
def test_delitem(self):
a = self.type2test([0, 1])
del a[1]
self.assertEqual(a, [0])
del a[0]
self.assertEqual(a, [])
a = self.type2test([0, 1])
del a[-2]
self.assertEqual(a, [1])
del a[-1]
self.assertEqual(a, [])
a = self.type2test([0, 1])
self.assertRaises(IndexError, a.__delitem__, -3)
self.assertRaises(IndexError, a.__delitem__, 2)
a = self.type2test([])
self.assertRaises(IndexError, a.__delitem__, 0)
self.assertRaises(TypeError, a.__delitem__)
def test_setslice(self):
l = [0, 1]
a = self.type2test(l)
for i in range(-3, 4):
a[:i] = l[:i]
self.assertEqual(a, l)
a2 = a[:]
a2[:i] = a[:i]
self.assertEqual(a2, a)
a[i:] = l[i:]
self.assertEqual(a, l)
a2 = a[:]
a2[i:] = a[i:]
self.assertEqual(a2, a)
for j in range(-3, 4):
a[i:j] = l[i:j]
self.assertEqual(a, l)
a2 = a[:]
a2[i:j] = a[i:j]
self.assertEqual(a2, a)
aa2 = a2[:]
aa2[:0] = [-2, -1]
self.assertEqual(aa2, [-2, -1, 0, 1])
aa2[0:] = []
self.assertEqual(aa2, [])
a = self.type2test([1, 2, 3, 4, 5])
a[:-1] = a
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:-1] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5, 5]))
a = self.type2test([])
a[:] = tuple(range(10))
self.assertEqual(a, self.type2test(range(10)))
self.assertRaises(TypeError, a.__setitem__, slice(0, 1, 5))
self.assertRaises(TypeError, a.__setitem__)
def test_delslice(self):
a = self.type2test([0, 1])
del a[1:2]
del a[0:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1:2]
del a[0:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-2:-1]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[-2:-1]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[1:]
del a[:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1:]
del a[:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-1:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[-1:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[:]
self.assertEqual(a, self.type2test([]))
def test_append(self):
a = self.type2test([])
a.append(0)
a.append(1)
a.append(2)
self.assertEqual(a, self.type2test([0, 1, 2]))
self.assertRaises(TypeError, a.append)
def test_extend(self):
a1 = self.type2test([0])
a2 = self.type2test((0, 1))
a = a1[:]
a.extend(a2)
self.assertEqual(a, a1 + a2)
a.extend(self.type2test([]))
self.assertEqual(a, a1 + a2)
a.extend(a)
self.assertEqual(a, self.type2test([0, 0, 1, 0, 0, 1]))
a = self.type2test("spam")
a.extend("eggs")
self.assertEqual(a, list("spameggs"))
self.assertRaises(TypeError, a.extend, None)
self.assertRaises(TypeError, a.extend)
def test_insert(self):
a = self.type2test([0, 1, 2])
a.insert(0, -2)
a.insert(1, -1)
a.insert(2, 0)
self.assertEqual(a, [-2, -1, 0, 0, 1, 2])
b = a[:]
b.insert(-2, "foo")
b.insert(-200, "left")
b.insert(200, "right")
self.assertEqual(b, self.type2test(["left",-2,-1,0,0,"foo",1,2,"right"]))
self.assertRaises(TypeError, a.insert)
def test_pop(self):
a = self.type2test([-1, 0, 1])
a.pop()
self.assertEqual(a, [-1, 0])
a.pop(0)
self.assertEqual(a, [0])
self.assertRaises(IndexError, a.pop, 5)
a.pop(0)
self.assertEqual(a, [])
self.assertRaises(IndexError, a.pop)
self.assertRaises(TypeError, a.pop, 42, 42)
a = self.type2test([0, 10, 20, 30, 40])
def test_remove(self):
a = self.type2test([0, 0, 1])
a.remove(1)
self.assertEqual(a, [0, 0])
a.remove(0)
self.assertEqual(a, [0])
a.remove(0)
self.assertEqual(a, [])
self.assertRaises(ValueError, a.remove, 0)
self.assertRaises(TypeError, a.remove)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.remove, BadCmp())
class BadCmp2:
def __eq__(self, other):
raise BadExc()
d = self.type2test('abcdefghcij')
d.remove('c')
self.assertEqual(d, self.type2test('abdefghcij'))
d.remove('c')
self.assertEqual(d, self.type2test('abdefghij'))
self.assertRaises(ValueError, d.remove, 'c')
self.assertEqual(d, self.type2test('abdefghij'))
# Handle comparison errors
d = self.type2test(['a', 'b', BadCmp2(), 'c'])
e = self.type2test(d)
self.assertRaises(BadExc, d.remove, 'c')
for x, y in zip(d, e):
# verify that original order and values are retained.
self.assertIs(x, y)
def test_count(self):
a = self.type2test([0, 1, 2])*3
self.assertEqual(a.count(0), 3)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(3), 0)
self.assertRaises(TypeError, a.count)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
self.assertRaises(BadExc, a.count, BadCmp())
def test_index(self):
u = self.type2test([0, 1])
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(u.count(0), 2)
self.assertEqual(u.index(0), 2)
self.assertEqual(u.index(0, 2), 2)
self.assertEqual(u.index(-2, -10), 0)
self.assertEqual(u.index(0, 3), 3)
self.assertEqual(u.index(0, 3, 4), 3)
self.assertRaises(ValueError, u.index, 2, 0, -10)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.index, BadCmp())
a = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(a.index(0), 2)
self.assertEqual(a.index(0, 2), 2)
self.assertEqual(a.index(0, -4), 2)
self.assertEqual(a.index(-2, -10), 0)
self.assertEqual(a.index(0, 3), 3)
self.assertEqual(a.index(0, -3), 3)
self.assertEqual(a.index(0, 3, 4), 3)
self.assertEqual(a.index(0, -3, -2), 3)
self.assertEqual(a.index(0, -4*sys.maxsize, 4*sys.maxsize), 2)
self.assertRaises(ValueError, a.index, 0, 4*sys.maxsize,-4*sys.maxsize)
self.assertRaises(ValueError, a.index, 2, 0, -10)
a.remove(0)
self.assertRaises(ValueError, a.index, 2, 0, 4)
self.assertEqual(a, self.type2test([-2, -1, 0, 1, 2]))
# Test modifying the list during index's iteration
class EvilCmp:
def __init__(self, victim):
self.victim = victim
def __eq__(self, other):
del self.victim[:]
return False
a = self.type2test()
a[:] = [EvilCmp(a) for _ in range(100)]
# This used to seg fault before patch #1005778
self.assertRaises(ValueError, a.index, None)
def test_reverse(self):
u = self.type2test([-2, -1, 0, 1, 2])
u2 = u[:]
u.reverse()
self.assertEqual(u, [2, 1, 0, -1, -2])
u.reverse()
self.assertEqual(u, u2)
self.assertRaises(TypeError, u.reverse, 42)
def test_clear(self):
u = self.type2test([2, 3, 4])
u.clear()
self.assertEqual(u, [])
u = self.type2test([])
u.clear()
self.assertEqual(u, [])
u = self.type2test([])
u.append(1)
u.clear()
u.append(2)
self.assertEqual(u, [2])
self.assertRaises(TypeError, u.clear, None)
def test_copy(self):
u = self.type2test([1, 2, 3])
v = u.copy()
self.assertEqual(v, [1, 2, 3])
u = self.type2test([])
v = u.copy()
self.assertEqual(v, [])
# test that it's indeed a copy and not a reference
u = self.type2test(['a', 'b'])
v = u.copy()
v.append('i')
self.assertEqual(u, ['a', 'b'])
self.assertEqual(v, u + ['i'])
# test that it's a shallow, not a deep copy
u = self.type2test([1, 2, [3, 4], 5])
v = u.copy()
self.assertEqual(u, v)
self.assertIs(v[3], u[3])
self.assertRaises(TypeError, u.copy, None)
def test_sort(self):
u = self.type2test([1, 0])
u.sort()
self.assertEqual(u, [0, 1])
u = self.type2test([2,1,0,-1,-2])
u.sort()
self.assertEqual(u, self.type2test([-2,-1,0,1,2]))
self.assertRaises(TypeError, u.sort, 42, 42)
def revcmp(a, b):
if a == b:
return 0
elif a < b:
return 1
else: # a > b
return -1
u.sort(key=cmp_to_key(revcmp))
self.assertEqual(u, self.type2test([2,1,0,-1,-2]))
# The following dumps core in unpatched Python 1.5:
def myComparison(x,y):
xmod, ymod = x%3, y%7
if xmod == ymod:
return 0
elif xmod < ymod:
return -1
else: # xmod > ymod
return 1
z = self.type2test(range(12))
z.sort(key=cmp_to_key(myComparison))
self.assertRaises(TypeError, z.sort, 2)
def selfmodifyingComparison(x,y):
z.append(1)
if x == y:
return 0
elif x < y:
return -1
else: # x > y
return 1
self.assertRaises(ValueError, z.sort,
key=cmp_to_key(selfmodifyingComparison))
self.assertRaises(TypeError, z.sort, 42, 42, 42, 42)
def test_slice(self):
u = self.type2test("spam")
u[:2] = "h"
self.assertEqual(u, list("ham"))
def test_iadd(self):
super().test_iadd()
u = self.type2test([0, 1])
u2 = u
u += [2, 3]
self.assertIs(u, u2)
u = self.type2test("spam")
u += "eggs"
self.assertEqual(u, self.type2test("spameggs"))
self.assertRaises(TypeError, u.__iadd__, None)
def test_imul(self):
u = self.type2test([0, 1])
u *= 3
self.assertEqual(u, self.type2test([0, 1, 0, 1, 0, 1]))
u *= 0
self.assertEqual(u, self.type2test([]))
s = self.type2test([])
oldid = id(s)
s *= 10
self.assertEqual(id(s), oldid)
def test_extendedslicing(self):
# subscript
a = self.type2test([0,1,2,3,4])
# deletion
del a[::2]
self.assertEqual(a, self.type2test([1,3]))
a = self.type2test(range(5))
del a[1::2]
self.assertEqual(a, self.type2test([0,2,4]))
a = self.type2test(range(5))
del a[1::-2]
self.assertEqual(a, self.type2test([0,2,3,4]))
a = self.type2test(range(10))
del a[::1000]
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 6, 7, 8, 9]))
# assignment
a = self.type2test(range(10))
a[::2] = [-1]*5
self.assertEqual(a, self.type2test([-1, 1, -1, 3, -1, 5, -1, 7, -1, 9]))
a = self.type2test(range(10))
a[::-4] = [10]*3
self.assertEqual(a, self.type2test([0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
a = self.type2test(range(4))
a[::-1] = a
self.assertEqual(a, self.type2test([3, 2, 1, 0]))
a = self.type2test(range(10))
b = a[:]
c = a[:]
a[2:3] = self.type2test(["two", "elements"])
b[slice(2,3)] = self.type2test(["two", "elements"])
c[2:3:] = self.type2test(["two", "elements"])
self.assertEqual(a, b)
self.assertEqual(a, c)
a = self.type2test(range(10))
a[::2] = tuple(range(5))
self.assertEqual(a, self.type2test([0, 1, 1, 3, 2, 5, 3, 7, 4, 9]))
# test issue7788
a = self.type2test(range(10))
del a[9::1<<333]
def test_constructor_exception_handling(self):
# Bug #1242657
class F(object):
def __iter__(self):
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, list, F())
| lgpl-3.0 | 1,550,433,381,947,286,000 | 29.215385 | 81 | 0.500905 | false |
jakesyl/pychess | testing/draw.py | 21 | 1665 | import unittest
from pychess.Savers import pgn
from pychess.Utils.lutils import ldraw
class DrawTestCase(unittest.TestCase):
def setUp(self):
with open('gamefiles/3fold.pgn') as f1:
self.PgnFile1 = pgn.load(f1)
with open('gamefiles/bilbao.pgn') as f2:
self.PgnFile2 = pgn.load(f2)
with open('gamefiles/material.pgn') as f3:
self.PgnFile3 = pgn.load(f3)
def test1(self):
"""Testing the same position, for the third time"""
for i, game in enumerate(self.PgnFile1.games):
model = self.PgnFile1.loadToModel(i)
lboard = model.boards[-2].board
self.assertTrue(lboard.repetitionCount() < 3)
lboard = model.boards[-1].board
self.assertEqual(lboard.repetitionCount(), 3)
def test2(self):
"""Testing the 50 move rule"""
for i, game in enumerate(self.PgnFile2.games):
model = self.PgnFile2.loadToModel(i)
lboard = model.boards[-2].board
self.assertEqual(ldraw.testFifty(lboard), False)
lboard = model.boards[-1].board
self.assertEqual(ldraw.testFifty(lboard), True)
def test3(self):
"""Testing too few material"""
for i, game in enumerate(self.PgnFile3.games):
model = self.PgnFile3.loadToModel(i)
lboard = model.boards[-2].board
self.assertEqual(ldraw.testMaterial(lboard), False)
lboard = model.boards[-1].board
self.assertEqual(ldraw.testMaterial(lboard), True)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 8,872,808,236,522,490,000 | 29.735849 | 63 | 0.587387 | false |
chirilo/kitsune | kitsune/search/tests/test_plugin.py | 15 | 1279 | from django.contrib.sites.models import Site
import mock
from nose.tools import eq_
from kitsune.sumo.tests import TestCase
from kitsune.sumo.urlresolvers import reverse
class OpenSearchTestCase(TestCase):
"""Test the SUMO OpenSearch plugin."""
@mock.patch.object(Site.objects, 'get_current')
def test_plugin(self, get_current):
"""The plugin loads with the correct mimetype."""
get_current.return_value.domain = 'testserver'
response = self.client.get(reverse('search.plugin',
locale='en-US'))
eq_(200, response.status_code)
assert 'expires' in response
eq_('application/opensearchdescription+xml', response['content-type'])
@mock.patch.object(Site.objects, 'get_current')
def test_localized_plugin(self, get_current):
"""Every locale gets its own plugin!"""
get_current.return_value.domain = 'testserver'
response = self.client.get(reverse('search.plugin',
locale='en-US'))
assert '/en-US/search' in response.content
response = self.client.get(reverse('search.plugin',
locale='fr'))
assert '/fr/search' in response.content
| bsd-3-clause | 6,138,544,491,154,991,000 | 35.542857 | 78 | 0.615324 | false |
CognetTestbed/COGNET_CODE | LIB_NETLINK/libnl-3-android-nogit/python/netlink/route/links/vlan.py | 13 | 1943 | #
# Copyright (c) 2011 Thomas Graf <[email protected]>
#
"""VLAN network link
"""
from __future__ import absolute_import
from ... import core as netlink
from .. import capi as capi
class VLANLink(object):
def __init__(self, link):
self._link = link
@property
@netlink.nlattr(type=int)
def id(self):
"""vlan identifier"""
return capi.rtnl_link_vlan_get_id(self._link)
@id.setter
def id(self, value):
capi.rtnl_link_vlan_set_id(self._link, int(value))
@property
@netlink.nlattr(type=str)
def flags(self):
""" VLAN flags
Setting this property will *Not* reset flags to value you supply in
Examples:
link.flags = '+xxx' # add xxx flag
link.flags = 'xxx' # exactly the same
link.flags = '-xxx' # remove xxx flag
link.flags = [ '+xxx', '-yyy' ] # list operation
"""
flags = capi.rtnl_link_vlan_get_flags(self._link)
return capi.rtnl_link_vlan_flags2str(flags, 256)[0].split(',')
def _set_flag(self, flag):
if flag.startswith('-'):
i = capi.rtnl_link_vlan_str2flags(flag[1:])
capi.rtnl_link_vlan_unset_flags(self._link, i)
elif flag.startswith('+'):
i = capi.rtnl_link_vlan_str2flags(flag[1:])
capi.rtnl_link_vlan_set_flags(self._link, i)
else:
i = capi.rtnl_link_vlan_str2flags(flag)
capi.rtnl_link_vlan_set_flags(self._link, i)
@flags.setter
def flags(self, value):
if type(value) is list:
for flag in value:
self._set_flag(flag)
else:
self._set_flag(value)
###################################################################
# TODO:
# - ingress map
# - egress map
def brief(self):
return 'vlan-id {0}'.format(self.id)
def init(link):
link.vlan = VLANLink(link._link)
return link.vlan
| gpl-3.0 | 2,117,081,151,696,543,000 | 26.366197 | 75 | 0.545548 | false |
pixelrebel/st2 | st2actions/st2actions/resultstracker/resultstracker.py | 5 | 4285 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
import six
from collections import defaultdict
from kombu import Connection
from st2common.query.base import QueryContext
from st2common import log as logging
from st2common.models.db.executionstate import ActionExecutionStateDB
from st2common.persistence.executionstate import ActionExecutionState
from st2common.transport import actionexecutionstate, consumers, publishers
from st2common.transport import utils as transport_utils
from st2common.util.loader import register_query_module
LOG = logging.getLogger(__name__)
ACTIONSTATE_WORK_Q = actionexecutionstate.get_queue('st2.resultstracker.work',
routing_key=publishers.CREATE_RK)
class ResultsTracker(consumers.MessageHandler):
message_type = ActionExecutionStateDB
def __init__(self, connection, queues):
super(ResultsTracker, self).__init__(connection, queues)
self._queriers = {}
self._query_threads = []
self._failed_imports = set()
def start(self, wait=False):
self._bootstrap()
super(ResultsTracker, self).start(wait=wait)
def wait(self):
super(ResultsTracker, self).wait()
for thread in self._query_threads:
thread.wait()
def shutdown(self):
super(ResultsTracker, self).shutdown()
LOG.info('Stats from queriers:')
self._print_stats()
def _print_stats(self):
for _, querier in six.iteritems(self._queriers):
if querier:
querier.print_stats()
def _bootstrap(self):
all_states = ActionExecutionState.get_all()
LOG.info('Found %d pending states in db.' % len(all_states))
query_contexts_dict = defaultdict(list)
for state_db in all_states:
try:
context = QueryContext.from_model(state_db)
except:
LOG.exception('Invalid state object: %s', state_db)
continue
query_module_name = state_db.query_module
querier = self.get_querier(query_module_name)
if querier is not None:
query_contexts_dict[querier].append(context)
for querier, contexts in six.iteritems(query_contexts_dict):
LOG.info('Found %d pending actions for query module %s', len(contexts), querier)
querier.add_queries(query_contexts=contexts)
def process(self, query_context):
querier = self.get_querier(query_context.query_module)
context = QueryContext.from_model(query_context)
querier.add_queries(query_contexts=[context])
return
def get_querier(self, query_module_name):
if (query_module_name not in self._queriers and
query_module_name not in self._failed_imports):
try:
query_module = register_query_module(query_module_name)
except:
LOG.exception('Failed importing query module: %s', query_module_name)
self._failed_imports.add(query_module_name)
self._queriers[query_module_name] = None
else:
querier = query_module.get_instance()
self._queriers[query_module_name] = querier
self._query_threads.append(eventlet.spawn(querier.start))
return self._queriers[query_module_name]
def get_tracker():
with Connection(transport_utils.get_messaging_urls()) as conn:
return ResultsTracker(conn, [ACTIONSTATE_WORK_Q])
| apache-2.0 | 7,826,983,016,845,986,000 | 37.603604 | 92 | 0.665578 | false |
HLFH/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/walla.py | 27 | 2976 | # coding: utf-8
from __future__ import unicode_literals
import re
from .subtitles import SubtitlesInfoExtractor
from ..utils import (
xpath_text,
int_or_none,
)
class WallaIE(SubtitlesInfoExtractor):
_VALID_URL = r'http://vod\.walla\.co\.il/[^/]+/(?P<id>\d+)/(?P<display_id>.+)'
_TEST = {
'url': 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one',
'info_dict': {
'id': '2642630',
'display_id': 'one-direction-all-for-one',
'ext': 'flv',
'title': 'וואן דיירקשן: ההיסטריה',
'description': 'md5:de9e2512a92442574cdb0913c49bc4d8',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 3600,
},
'params': {
# rtmp download
'skip_download': True,
}
}
_SUBTITLE_LANGS = {
'עברית': 'heb',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
video = self._download_xml(
'http://video2.walla.co.il/?w=null/null/%s/@@/video/flv_pl' % video_id,
display_id)
item = video.find('./items/item')
title = xpath_text(item, './title', 'title')
description = xpath_text(item, './synopsis', 'description')
thumbnail = xpath_text(item, './preview_pic', 'thumbnail')
duration = int_or_none(xpath_text(item, './duration', 'duration'))
subtitles = {}
for subtitle in item.findall('./subtitles/subtitle'):
lang = xpath_text(subtitle, './title')
subtitles[self._SUBTITLE_LANGS.get(lang, lang)] = xpath_text(subtitle, './src')
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, subtitles)
return
subtitles = self.extract_subtitles(video_id, subtitles)
formats = []
for quality in item.findall('./qualities/quality'):
format_id = xpath_text(quality, './title')
fmt = {
'url': 'rtmp://wafla.walla.co.il/vod',
'play_path': xpath_text(quality, './src'),
'player_url': 'http://isc.walla.co.il/w9/swf/video_swf/vod/WallaMediaPlayerAvod.swf',
'page_url': url,
'ext': 'flv',
'format_id': xpath_text(quality, './title'),
}
m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
if m:
fmt['height'] = int(m.group('height'))
formats.append(fmt)
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
| gpl-3.0 | 3,063,681,558,481,189,400 | 32.168539 | 101 | 0.521341 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.