code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django import forms
from django.utils.translation import ugettext_lazy as _
from shuup.core.models import Category
from shuup.xtheme import TemplatedPlugin
from shuup.xtheme.plugins.forms import GenericPluginForm, TranslatableField
class CategoryLinksConfigForm(GenericPluginForm):
"""
A configuration form for the CategoryLinksPlugin
"""
def populate(self):
"""
A custom populate method to display category choices
"""
for field in self.plugin.fields:
if isinstance(field, tuple):
name, value = field
value.initial = self.plugin.config.get(name, value.initial)
self.fields[name] = value
self.fields["categories"] = forms.ModelMultipleChoiceField(
queryset=Category.objects.all_visible(customer=None),
required=False,
initial=self.plugin.config.get("categories", None),
)
def clean(self):
"""
A custom clean method to save category configuration information in a serializable form
"""
cleaned_data = super(CategoryLinksConfigForm, self).clean()
categories = cleaned_data.get("categories", [])
cleaned_data["categories"] = [category.pk for category in categories if hasattr(category, "pk")]
return cleaned_data
class CategoryLinksPlugin(TemplatedPlugin):
"""
A plugin for displaying links to visible categories on the shop front
"""
identifier = "category_links"
name = _("Category Links")
template_name = "shuup/xtheme/plugins/category_links.jinja"
editor_form_class = CategoryLinksConfigForm
fields = [
("title", TranslatableField(label=_("Title"), required=False, initial="")),
("show_all_categories", forms.BooleanField(
label=_("Show all categories"),
required=False,
initial=True,
help_text=_("All categories are shown, even if not selected"),
)),
"categories",
]
def get_context_data(self, context):
"""
A custom get_context_data method to return only visible categories
for request customer.
"""
selected_categories = self.config.get("categories", [])
show_all_categories = self.config.get("show_all_categories", True)
request = context.get("request")
categories = Category.objects.all_visible(
customer=getattr(request, "customer"),
shop=getattr(request, "shop")
)
if not show_all_categories:
categories = categories.filter(id__in=selected_categories)
return {
"title": self.get_translated_value("title"),
"categories": categories,
}
| shawnadelic/shuup | shuup/xtheme/plugins/category_links.py | Python | agpl-3.0 | 2,994 |
# -*- coding: utf-8 -*-
from .base import BaseHandler
class TestRoute(BaseHandler):
def get(self, file):
return self.render(str(file) + '.jade', show_h1=1)
| web-izmerenie/avto-lux161 | avto-lux/app/core/routes/testroute.py | Python | agpl-3.0 | 162 |
import sys
import time
import sys
num = 1000
print_granularity = 1000
count = 0
first = True
start = 0
gran_start = 0
min = 0
max = 0
avg = 0
sum = 0
total = 0
def set_print_granularity(p):
global print_granularity
print_granularity = p
print("%s: print granularity = %s" % (sys.argv[0], print_granularity))
def loop_count():
global min, max, avg, total, gran_start, sum, start, first, count
now = round(time.time() * 1000)
if not first:
elapsed = now - start
if elapsed < min: min = elapsed
if elapsed > max: max = elapsed
sum = sum + elapsed
start = now
count = count + 1
total = total + 1
if count % print_granularity == 0 and not first:
gran_elapsed = now - gran_start
gran_start = now
avg = sum / print_granularity
print("%s: last %s run stats in msec \t\t elapsed = %s \t min = %s \t max = %s \t avg = %s \t\t total loops = %s" % (sys.argv[0], print_granularity, sum, min, max, avg, total))
# sys.stdout.write("-")
# sys.stdout.flush()
if first or count % print_granularity == 0:
gran_start = now
min = 10e10
max = -10e10
avg = 0
sum = 0
first = False
| scalien/keyspace | test/concurrency/common.py | Python | agpl-3.0 | 1,105 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Business Applications
# Copyright (c) 2015 Odoo S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
class StockPicking(models.Model):
_inherit = 'stock.picking'
carrier_price = fields.Float(string="Shipping Cost", readonly=True)
delivery_type = fields.Selection(related='carrier_id.delivery_type', readonly=True)
@api.multi
def do_transfer(self):
res = super(StockPicking, self).do_transfer()
if self.carrier_id and self.carrier_id.delivery_type != 'grid':
self.send_to_shipper()
return res
# Signature due to strange old api methods
@api.model
def _prepare_shipping_invoice_line(self, picking, invoice):
picking.ensure_one()
invoice.ensure_one()
carrier = picking.carrier_id
# No carrier
if not carrier:
return None
# Carrier already invoiced on the sale order
if any(inv_line.product_id.id == carrier.product_id.id for inv_line in invoice.invoice_line_ids):
return None
# Classic carrier
if carrier.delivery_type == 'grid':
return super(StockPicking, self)._prepare_shipping_invoice_line(picking, invoice)
# Shipping provider
price = picking.carrier_price
account_id = carrier.product_id.property_account_income.id
if not account_id:
account_id = carrier.product_id.categ_id.property_account_income_categ.id
taxes = carrier.product_id.taxes_id
taxes_ids = taxes.ids
# Apply original SO fiscal position
if picking.sale_id.fiscal_position_id:
fpos = picking.sale_id.fiscal_position_id
account_id = fpos.map_account(account_id)
taxes_ids = fpos.map_tax(taxes).ids
res = {
'name': carrier.name,
'invoice_id': invoice.id,
'uos_id': carrier.product_id.uos_id.id,
'product_id': carrier.product_id.id,
'account_id': account_id,
'price_unit': price,
'quantity': 1,
'invoice_line_tax_ids': [(6, 0, taxes_ids)],
}
return res
@api.one
def send_to_shipper(self):
res = self.carrier_id.send_shipping(self)[0]
self.carrier_price = res['exact_price']
self.carrier_tracking_ref = res['tracking_number']
msg = "Shipment sent to carrier %s for expedition with tracking number %s" % (self.carrier_id.name, self.carrier_tracking_ref)
self.message_post(body=msg)
@api.multi
def open_website_url(self):
self.ensure_one()
client_action = {'type': 'ir.actions.act_url',
'name': "Shipment Tracking Page",
'target': 'new',
'url': self.carrier_id.get_tracking_link(self)[0]
}
return client_action
@api.one
def cancel_shipment(self):
self.carrier_id.cancel_shipment(self)
msg = "Shipment %s cancelled" % self.carrier_tracking_ref
self.message_post(body=msg)
self.carrier_tracking_ref = False
| tvtsoft/odoo8 | addons/delivery/models/stock_picking.py | Python | agpl-3.0 | 4,025 |
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import decimal
import pytest
from django.conf import settings
from shuup.core.models import Shipment, ShippingStatus, StockBehavior
from shuup.testing.factories import (
add_product_to_order, create_empty_order, create_product,
get_default_shop, get_default_supplier
)
from shuup.utils.excs import Problem
@pytest.mark.django_db
def test_shipment_identifier():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
product_lines = order.lines.exclude(product_id=None)
for line in product_lines:
for i in range(0, int(line.quantity)):
shipment = order.create_shipment({line.product: 1}, supplier=supplier)
expected_key_start = "%s/%s" % (order.pk, i)
assert shipment.identifier.startswith(expected_key_start)
assert order.shipments.count() == int(line.quantity)
assert order.shipping_status == ShippingStatus.FULLY_SHIPPED # Check that order is now fully shipped
assert not order.can_edit()
@pytest.mark.django_db
def test_shipment_creation_from_unsaved_shipment():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
product_lines = order.lines.exclude(product_id=None)
for line in product_lines:
for i in range(0, int(line.quantity)):
unsaved_shipment = Shipment(order=order, supplier=supplier)
shipment = order.create_shipment({line.product: 1}, shipment=unsaved_shipment)
expected_key_start = "%s/%s" % (order.pk, i)
assert shipment.identifier.startswith(expected_key_start)
assert order.shipments.count() == int(line.quantity)
@pytest.mark.django_db
def test_shipment_creation_without_supplier_and_shipment():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
product_lines = order.lines.exclude(product_id=None)
for line in product_lines:
for i in range(0, int(line.quantity)):
with pytest.raises(AssertionError):
order.create_shipment({line.product: 1})
assert order.shipments.count() == 0
@pytest.mark.django_db
def test_shipment_creation_with_invalid_unsaved_shipment():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
second_order = create_empty_order(shop=shop)
second_order.full_clean()
second_order.save()
product_lines = order.lines.exclude(product_id=None)
for line in product_lines:
for i in range(0, int(line.quantity)):
with pytest.raises(AssertionError):
unsaved_shipment = Shipment(supplier=supplier, order=second_order)
order.create_shipment({line.product: 1}, shipment=unsaved_shipment)
assert order.shipments.count() == 0
@pytest.mark.django_db
def test_partially_shipped_order_status():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
assert order.can_edit()
first_product_line = order.lines.exclude(product_id=None).first()
assert first_product_line.quantity > 1
order.create_shipment({first_product_line.product: 1}, supplier=supplier)
assert order.shipping_status == ShippingStatus.PARTIALLY_SHIPPED
assert not order.can_edit()
@pytest.mark.django_db
def test_shipment_delete():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
assert order.can_edit()
first_product_line = order.lines.exclude(product_id=None).first()
assert first_product_line.quantity > 1
shipment = order.create_shipment({first_product_line.product: 1}, supplier=supplier)
assert order.shipping_status == ShippingStatus.PARTIALLY_SHIPPED
assert order.shipments.all().count() == 1
# Test shipment delete
shipment.soft_delete()
assert order.shipments.all().count() == 1
assert order.shipments.all_except_deleted().count() == 0
# Check the shipping status update
assert order.shipping_status == ShippingStatus.NOT_SHIPPED
@pytest.mark.django_db
def test_shipment_with_insufficient_stock():
if "shuup.simple_supplier" not in settings.INSTALLED_APPS:
pytest.skip("Need shuup.simple_supplier in INSTALLED_APPS")
from shuup_tests.simple_supplier.utils import get_simple_supplier
shop = get_default_shop()
supplier = get_simple_supplier()
order = _get_order(shop, supplier, stocked=True)
product_line = order.lines.products().first()
product = product_line.product
assert product_line.quantity == 15
supplier.adjust_stock(product.pk, delta=10)
stock_status = supplier.get_stock_status(product.pk)
assert stock_status.physical_count == 10
order.create_shipment({product: 5}, supplier=supplier)
assert order.shipping_status == ShippingStatus.PARTIALLY_SHIPPED
assert order.shipments.all().count() == 1
with pytest.raises(Problem):
order.create_shipment({product: 10}, supplier=supplier)
# Should be fine after adding more stock
supplier.adjust_stock(product.pk, delta=5)
order.create_shipment({product: 10}, supplier=supplier)
def _get_order(shop, supplier, stocked=False):
order = create_empty_order(shop=shop)
order.full_clean()
order.save()
for product_data in _get_product_data(stocked):
quantity = product_data.pop("quantity")
product = create_product(
sku=product_data.pop("sku"),
shop=shop,
supplier=supplier,
default_price=3.33,
**product_data)
add_product_to_order(order, supplier, product, quantity=quantity, taxless_base_unit_price=1)
order.cache_prices()
order.check_all_verified()
order.save()
return order
def _get_product_data(stocked=False):
return [
{
"sku": "sku1234",
"net_weight": decimal.Decimal("1"),
"gross_weight": decimal.Decimal("43.34257"),
"quantity": decimal.Decimal("15"),
"stock_behavior": StockBehavior.STOCKED if stocked else StockBehavior.UNSTOCKED
}
]
| hrayr-artunyan/shuup | shuup_tests/core/test_shipments.py | Python | agpl-3.0 | 6,394 |
"""
Test cases to cover Accounts-related behaviors of the User API application
"""
import datetime
import hashlib
import json
from copy import deepcopy
from unittest import mock
import ddt
import pytz
from django.conf import settings
from django.test.testcases import TransactionTestCase
from django.test.utils import override_settings
from django.urls import reverse
from edx_name_affirmation.api import create_verified_name
from edx_name_affirmation.statuses import VerifiedNameStatus
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from common.djangoapps.student.models import PendingEmailChange, UserProfile
from common.djangoapps.student.tests.factories import TEST_PASSWORD, RegistrationFactory, UserFactory
from openedx.core.djangoapps.oauth_dispatch.jwt import create_jwt_for_user
from openedx.core.djangoapps.user_api.accounts import ACCOUNT_VISIBILITY_PREF_KEY
from openedx.core.djangoapps.user_api.models import UserPreference
from openedx.core.djangoapps.user_api.preferences.api import set_user_preference
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
from .. import ALL_USERS_VISIBILITY, CUSTOM_VISIBILITY, PRIVATE_VISIBILITY
TEST_PROFILE_IMAGE_UPLOADED_AT = datetime.datetime(2002, 1, 9, 15, 43, 1, tzinfo=pytz.UTC)
# this is used in one test to check the behavior of profile image url
# generation with a relative url in the config.
TEST_PROFILE_IMAGE_BACKEND = deepcopy(settings.PROFILE_IMAGE_BACKEND)
TEST_PROFILE_IMAGE_BACKEND['options']['base_url'] = '/profile-images/'
TEST_BIO_VALUE = "Tired mother of twins"
TEST_LANGUAGE_PROFICIENCY_CODE = "hi"
class UserAPITestCase(APITestCase):
"""
The base class for all tests of the User API
"""
VERIFIED_NAME = "Verified User"
def setUp(self):
super().setUp()
self.anonymous_client = APIClient()
self.different_user = UserFactory.create(password=TEST_PASSWORD)
self.different_client = APIClient()
self.staff_user = UserFactory(is_staff=True, password=TEST_PASSWORD)
self.staff_client = APIClient()
self.user = UserFactory.create(password=TEST_PASSWORD) # will be assigned to self.client by default
def login_client(self, api_client, user):
"""Helper method for getting the client and user and logging in. Returns client. """
client = getattr(self, api_client)
user = getattr(self, user)
client.login(username=user.username, password=TEST_PASSWORD)
return client
def send_post(self, client, json_data, content_type='application/json', expected_status=201):
"""
Helper method for sending a post to the server, defaulting to application/json content_type.
Verifies the expected status and returns the response.
"""
# pylint: disable=no-member
response = client.post(self.url, data=json.dumps(json_data), content_type=content_type)
assert expected_status == response.status_code
return response
def send_patch(self, client, json_data, content_type="application/merge-patch+json", expected_status=200):
"""
Helper method for sending a patch to the server, defaulting to application/merge-patch+json content_type.
Verifies the expected status and returns the response.
"""
# pylint: disable=no-member
response = client.patch(self.url, data=json.dumps(json_data), content_type=content_type)
assert expected_status == response.status_code
return response
def post_search_api(self, client, json_data, content_type='application/json', expected_status=200):
"""
Helper method for sending a post to the server, defaulting to application/merge-patch+json content_type.
Verifies the expected status and returns the response.
"""
# pylint: disable=no-member
response = client.post(self.search_api_url, data=json.dumps(json_data), content_type=content_type)
assert expected_status == response.status_code
return response
def send_get(self, client, query_parameters=None, expected_status=200):
"""
Helper method for sending a GET to the server. Verifies the expected status and returns the response.
"""
url = self.url + '?' + query_parameters if query_parameters else self.url # pylint: disable=no-member
response = client.get(url)
assert expected_status == response.status_code
return response
# pylint: disable=no-member
def send_put(self, client, json_data, content_type="application/json", expected_status=204):
"""
Helper method for sending a PUT to the server. Verifies the expected status and returns the response.
"""
response = client.put(self.url, data=json.dumps(json_data), content_type=content_type)
assert expected_status == response.status_code
return response
# pylint: disable=no-member
def send_delete(self, client, expected_status=204):
"""
Helper method for sending a DELETE to the server. Verifies the expected status and returns the response.
"""
response = client.delete(self.url)
assert expected_status == response.status_code
return response
def create_mock_profile(self, user):
"""
Helper method that creates a mock profile for the specified user
:return:
"""
legacy_profile = UserProfile.objects.get(id=user.id)
legacy_profile.country = "US"
legacy_profile.state = "MA"
legacy_profile.level_of_education = "m"
legacy_profile.year_of_birth = 2000
legacy_profile.goals = "world peace"
legacy_profile.mailing_address = "Park Ave"
legacy_profile.gender = "f"
legacy_profile.bio = TEST_BIO_VALUE
legacy_profile.profile_image_uploaded_at = TEST_PROFILE_IMAGE_UPLOADED_AT
legacy_profile.language_proficiencies.create(code=TEST_LANGUAGE_PROFICIENCY_CODE)
legacy_profile.phone_number = "+18005555555"
legacy_profile.save()
def create_mock_verified_name(self, user):
"""
Helper method to create an approved VerifiedName entry in name affirmation.
"""
legacy_profile = UserProfile.objects.get(id=user.id)
create_verified_name(user, self.VERIFIED_NAME, legacy_profile.name, status=VerifiedNameStatus.APPROVED)
def create_user_registration(self, user):
"""
Helper method that creates a registration object for the specified user
"""
RegistrationFactory(user=user)
def _verify_profile_image_data(self, data, has_profile_image):
"""
Verify the profile image data in a GET response for self.user
corresponds to whether the user has or hasn't set a profile
image.
"""
template = '{root}/{filename}_{{size}}.{extension}'
if has_profile_image:
url_root = 'http://example-storage.com/profile-images'
filename = hashlib.md5(('secret' + self.user.username).encode('utf-8')).hexdigest()
file_extension = 'jpg'
template += '?v={}'.format(TEST_PROFILE_IMAGE_UPLOADED_AT.strftime("%s"))
else:
url_root = 'http://testserver/static'
filename = 'default'
file_extension = 'png'
template = template.format(root=url_root, filename=filename, extension=file_extension)
assert data['profile_image'] == {'has_image': has_profile_image,
'image_url_full': template.format(size=50),
'image_url_small': template.format(size=10)}
@ddt.ddt
@skip_unless_lms
class TestOwnUsernameAPI(CacheIsolationTestCase, UserAPITestCase):
"""
Unit tests for the Accounts API.
"""
ENABLED_CACHES = ['default']
def setUp(self):
super().setUp()
self.url = reverse("own_username_api")
def _verify_get_own_username(self, queries, expected_status=200):
"""
Internal helper to perform the actual assertion
"""
with self.assertNumQueries(queries):
response = self.send_get(self.client, expected_status=expected_status)
if expected_status == 200:
data = response.data
assert 1 == len(data)
assert self.user.username == data['username']
def test_get_username(self):
"""
Test that a client (logged in) can get her own username.
"""
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self._verify_get_own_username(17)
def test_get_username_inactive(self):
"""
Test that a logged-in client can get their
username, even if inactive.
"""
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.user.is_active = False
self.user.save()
self._verify_get_own_username(17)
def test_get_username_not_logged_in(self):
"""
Test that a client (not logged in) gets a 401
when trying to retrieve their username.
"""
# verify that the endpoint is inaccessible when not logged in
self._verify_get_own_username(13, expected_status=401)
@ddt.ddt
@skip_unless_lms
@mock.patch('openedx.core.djangoapps.user_api.accounts.image_helpers._PROFILE_IMAGE_SIZES', [50, 10])
@mock.patch.dict(
'django.conf.settings.PROFILE_IMAGE_SIZES_MAP',
{'full': 50, 'small': 10},
clear=True
)
class TestAccountsAPI(CacheIsolationTestCase, UserAPITestCase):
"""
Unit tests for the Accounts API.
"""
ENABLED_CACHES = ['default']
TOTAL_QUERY_COUNT = 27
FULL_RESPONSE_FIELD_COUNT = 30
def setUp(self):
super().setUp()
self.url = reverse("accounts_api", kwargs={'username': self.user.username})
self.search_api_url = reverse("accounts_search_emails_api")
def _set_user_age_to_10_years(self, user):
"""
Sets the given user's age to 10.
Returns the calculated year of birth.
"""
legacy_profile = UserProfile.objects.get(id=user.id)
current_year = datetime.datetime.now().year
year_of_birth = current_year - 10
legacy_profile.year_of_birth = year_of_birth
legacy_profile.save()
return year_of_birth
def _verify_full_shareable_account_response(self, response, account_privacy=None, badges_enabled=False):
"""
Verify that the shareable fields from the account are returned
"""
data = response.data
assert 12 == len(data)
# public fields (3)
assert account_privacy == data['account_privacy']
self._verify_profile_image_data(data, True)
assert self.user.username == data['username']
# additional shareable fields (8)
assert TEST_BIO_VALUE == data['bio']
assert 'US' == data['country']
assert data['date_joined'] is not None
assert [{'code': TEST_LANGUAGE_PROFICIENCY_CODE}] == data['language_proficiencies']
assert 'm' == data['level_of_education']
assert data['social_links'] is not None
assert data['time_zone'] is None
assert badges_enabled == data['accomplishments_shared']
def _verify_private_account_response(self, response, requires_parental_consent=False):
"""
Verify that only the public fields are returned if a user does not want to share account fields
"""
data = response.data
assert 3 == len(data)
assert PRIVATE_VISIBILITY == data['account_privacy']
self._verify_profile_image_data(data, not requires_parental_consent)
assert self.user.username == data['username']
def _verify_full_account_response(self, response, requires_parental_consent=False, year_of_birth=2000):
"""
Verify that all account fields are returned (even those that are not shareable).
"""
data = response.data
assert self.FULL_RESPONSE_FIELD_COUNT == len(data)
# public fields (3)
expected_account_privacy = (
PRIVATE_VISIBILITY if requires_parental_consent else
UserPreference.get_value(self.user, 'account_privacy')
)
assert expected_account_privacy == data['account_privacy']
self._verify_profile_image_data(data, not requires_parental_consent)
assert self.user.username == data['username']
# additional shareable fields (8)
assert TEST_BIO_VALUE == data['bio']
assert 'US' == data['country']
assert data['date_joined'] is not None
assert data['last_login'] is not None
assert [{'code': TEST_LANGUAGE_PROFICIENCY_CODE}] == data['language_proficiencies']
assert 'm' == data['level_of_education']
assert data['social_links'] is not None
assert UserPreference.get_value(self.user, 'time_zone') == data['time_zone']
assert data['accomplishments_shared'] is not None
assert ((self.user.first_name + ' ') + self.user.last_name) == data['name']
# additional admin fields (13)
assert self.user.email == data['email']
assert self.user.id == data['id']
assert self.VERIFIED_NAME == data['verified_name']
assert data['extended_profile'] is not None
assert 'MA' == data['state']
assert 'f' == data['gender']
assert 'world peace' == data['goals']
assert data['is_active']
assert 'Park Ave' == data['mailing_address']
assert requires_parental_consent == data['requires_parental_consent']
assert data['secondary_email'] is None
assert data['secondary_email_enabled'] is None
assert year_of_birth == data['year_of_birth']
def test_anonymous_access(self):
"""
Test that an anonymous client (not logged in) cannot call GET or PATCH.
"""
self.send_get(self.anonymous_client, expected_status=401)
self.send_patch(self.anonymous_client, {}, expected_status=401)
def test_unsupported_methods(self):
"""
Test that DELETE, POST, and PUT are not supported.
"""
self.client.login(username=self.user.username, password=TEST_PASSWORD)
assert 405 == self.client.put(self.url).status_code
assert 405 == self.client.post(self.url).status_code
assert 405 == self.client.delete(self.url).status_code
@ddt.data(
("client", "user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_get_account_unknown_user(self, api_client, user):
"""
Test that requesting a user who does not exist returns a 404.
"""
client = self.login_client(api_client, user)
response = client.get(reverse("accounts_api", kwargs={'username': "does_not_exist"}))
assert 404 == response.status_code
@ddt.data(
("client", "user"),
)
@ddt.unpack
def test_regsitration_activation_key(self, api_client, user):
"""
Test that registration activation key has a value.
UserFactory does not auto-generate registration object for the test users.
It is created only for users that signup via email/API. Therefore, activation key has to be tested manually.
"""
self.create_user_registration(self.user)
client = self.login_client(api_client, user)
response = self.send_get(client)
assert response.data["activation_key"] is not None
def test_successful_get_account_by_email(self):
"""
Test that request using email by a staff user successfully retrieves Account Info.
"""
api_client = "staff_client"
user = "staff_user"
client = self.login_client(api_client, user)
self.create_mock_profile(self.user)
self.create_mock_verified_name(self.user)
set_user_preference(self.user, ACCOUNT_VISIBILITY_PREF_KEY, PRIVATE_VISIBILITY)
response = self.send_get(client, query_parameters=f'email={self.user.email}')
self._verify_full_account_response(response)
def test_unsuccessful_get_account_by_email(self):
"""
Test that request using email by a normal user fails to retrieve Account Info.
"""
api_client = "client"
user = "user"
client = self.login_client(api_client, user)
self.create_mock_profile(self.user)
set_user_preference(self.user, ACCOUNT_VISIBILITY_PREF_KEY, PRIVATE_VISIBILITY)
response = self.send_get(
client, query_parameters=f'email={self.user.email}', expected_status=status.HTTP_403_FORBIDDEN
)
assert response.data.get('detail') == 'You do not have permission to perform this action.'
def test_successful_get_account_by_user_id(self):
"""
Test that request using lms user id by a staff user successfully retrieves Account Info.
"""
api_client = "staff_client"
user = "staff_user"
url = reverse("accounts_detail_api")
client = self.login_client(api_client, user)
self.create_mock_profile(self.user)
self.create_mock_verified_name(self.user)
set_user_preference(self.user, ACCOUNT_VISIBILITY_PREF_KEY, PRIVATE_VISIBILITY)
response = client.get(url + f'?lms_user_id={self.user.id}')
assert response.status_code == status.HTTP_200_OK
response.data = response.data[0]
self._verify_full_account_response(response)
def test_unsuccessful_get_account_by_user_id(self):
"""
Test that requesting using lms user id by a normal user fails to retrieve Account Info.
"""
api_client = "client"
user = "user"
url = reverse("accounts_detail_api")
client = self.login_client(api_client, user)
self.create_mock_profile(self.user)
set_user_preference(self.user, ACCOUNT_VISIBILITY_PREF_KEY, PRIVATE_VISIBILITY)
response = client.get(url + f'?lms_user_id={self.user.id}')
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.data.get('detail') == 'You do not have permission to perform this action.'
@ddt.data('abc', '2f', '1.0', "2/8")
def test_get_account_by_user_id_non_integer(self, non_integer_id):
"""
Test that request using a non-integer lms user id by a staff user fails to retrieve Account Info.
"""
api_client = "staff_client"
user = "staff_user"
url = reverse("accounts_detail_api")
client = self.login_client(api_client, user)
self.create_mock_profile(self.user)
self.create_mock_verified_name(self.user)
set_user_preference(self.user, ACCOUNT_VISIBILITY_PREF_KEY, PRIVATE_VISIBILITY)
response = client.get(url + f'?lms_user_id={non_integer_id}')
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_search_emails(self):
client = self.login_client('staff_client', 'staff_user')
json_data = {'emails': [self.user.email]}
response = self.post_search_api(client, json_data=json_data)
assert response.data == [{'email': self.user.email, 'id': self.user.id, 'username': self.user.username}]
def test_search_emails_with_non_staff_user(self):
client = self.login_client('client', 'user')
json_data = {'emails': [self.user.email]}
response = self.post_search_api(client, json_data=json_data, expected_status=404)
assert response.data == {
'developer_message': "not_found",
'user_message': "Not Found"
}
def test_search_emails_with_non_existing_email(self):
client = self.login_client('staff_client', 'staff_user')
json_data = {"emails": ['[email protected]']}
response = self.post_search_api(client, json_data=json_data)
assert response.data == []
def test_search_emails_with_invalid_param(self):
client = self.login_client('staff_client', 'staff_user')
json_data = {'invalid_key': [self.user.email]}
response = self.post_search_api(client, json_data=json_data, expected_status=400)
assert response.data == {
'developer_message': "'emails' field is required",
'user_message': "'emails' field is required"
}
# Note: using getattr so that the patching works even if there is no configuration.
# This is needed when testing CMS as the patching is still executed even though the
# suite is skipped.
@mock.patch.dict(getattr(settings, "ACCOUNT_VISIBILITY_CONFIGURATION", {}), {"default_visibility": "all_users"})
def test_get_account_different_user_visible(self):
"""
Test that a client (logged in) can only get the shareable fields for a different user.
This is the case when default_visibility is set to "all_users".
"""
self.different_client.login(username=self.different_user.username, password=TEST_PASSWORD)
self.create_mock_profile(self.user)
with self.assertNumQueries(self.TOTAL_QUERY_COUNT):
response = self.send_get(self.different_client)
self._verify_full_shareable_account_response(response, account_privacy=ALL_USERS_VISIBILITY)
# Note: using getattr so that the patching works even if there is no configuration.
# This is needed when testing CMS as the patching is still executed even though the
# suite is skipped.
@mock.patch.dict(getattr(settings, "ACCOUNT_VISIBILITY_CONFIGURATION", {}), {"default_visibility": "private"})
def test_get_account_different_user_private(self):
"""
Test that a client (logged in) can only get the shareable fields for a different user.
This is the case when default_visibility is set to "private".
"""
self.different_client.login(username=self.different_user.username, password=TEST_PASSWORD)
self.create_mock_profile(self.user)
with self.assertNumQueries(self.TOTAL_QUERY_COUNT):
response = self.send_get(self.different_client)
self._verify_private_account_response(response)
@mock.patch.dict(settings.FEATURES, {'ENABLE_OPENBADGES': True})
@ddt.data(
("client", "user", PRIVATE_VISIBILITY),
("different_client", "different_user", PRIVATE_VISIBILITY),
("staff_client", "staff_user", PRIVATE_VISIBILITY),
("client", "user", ALL_USERS_VISIBILITY),
("different_client", "different_user", ALL_USERS_VISIBILITY),
("staff_client", "staff_user", ALL_USERS_VISIBILITY),
)
@ddt.unpack
def test_get_account_private_visibility(self, api_client, requesting_username, preference_visibility):
"""
Test the return from GET based on user visibility setting.
"""
def verify_fields_visible_to_all_users(response):
"""
Confirms that private fields are private, and public/shareable fields are public/shareable
"""
if preference_visibility == PRIVATE_VISIBILITY:
self._verify_private_account_response(response)
else:
self._verify_full_shareable_account_response(response, ALL_USERS_VISIBILITY, badges_enabled=True)
client = self.login_client(api_client, requesting_username)
# Update user account visibility setting.
set_user_preference(self.user, ACCOUNT_VISIBILITY_PREF_KEY, preference_visibility)
self.create_mock_profile(self.user)
self.create_mock_verified_name(self.user)
response = self.send_get(client)
if requesting_username == "different_user":
verify_fields_visible_to_all_users(response)
else:
self._verify_full_account_response(response)
# Verify how the view parameter changes the fields that are returned.
response = self.send_get(client, query_parameters='view=shared')
verify_fields_visible_to_all_users(response)
response = self.send_get(client, query_parameters=f'view=shared&username={self.user.username}')
verify_fields_visible_to_all_users(response)
@ddt.data(
("client", "user"),
("staff_client", "staff_user"),
("different_client", "different_user"),
)
@ddt.unpack
def test_custom_visibility_over_age(self, api_client, requesting_username):
self.create_mock_profile(self.user)
self.create_mock_verified_name(self.user)
# set user's custom visibility preferences
set_user_preference(self.user, ACCOUNT_VISIBILITY_PREF_KEY, CUSTOM_VISIBILITY)
shared_fields = ("bio", "language_proficiencies", "name")
for field_name in shared_fields:
set_user_preference(self.user, f"visibility.{field_name}", ALL_USERS_VISIBILITY)
# make API request
client = self.login_client(api_client, requesting_username)
response = self.send_get(client)
# verify response
if requesting_username == "different_user":
data = response.data
assert 6 == len(data)
# public fields
assert self.user.username == data['username']
assert UserPreference.get_value(self.user, 'account_privacy') == data['account_privacy']
self._verify_profile_image_data(data, has_profile_image=True)
# custom shared fields
assert TEST_BIO_VALUE == data['bio']
assert [{'code': TEST_LANGUAGE_PROFICIENCY_CODE}] == data['language_proficiencies']
assert ((self.user.first_name + ' ') + self.user.last_name) == data['name']
else:
self._verify_full_account_response(response)
@ddt.data(
("client", "user"),
("staff_client", "staff_user"),
("different_client", "different_user"),
)
@ddt.unpack
def test_custom_visibility_under_age(self, api_client, requesting_username):
self.create_mock_profile(self.user)
self.create_mock_verified_name(self.user)
year_of_birth = self._set_user_age_to_10_years(self.user)
# set user's custom visibility preferences
set_user_preference(self.user, ACCOUNT_VISIBILITY_PREF_KEY, CUSTOM_VISIBILITY)
shared_fields = ("bio", "language_proficiencies")
for field_name in shared_fields:
set_user_preference(self.user, f"visibility.{field_name}", ALL_USERS_VISIBILITY)
# make API request
client = self.login_client(api_client, requesting_username)
response = self.send_get(client)
# verify response
if requesting_username == "different_user":
self._verify_private_account_response(response, requires_parental_consent=True)
else:
self._verify_full_account_response(
response,
requires_parental_consent=True,
year_of_birth=year_of_birth,
)
def test_get_account_default(self):
"""
Test that a client (logged in) can get her own account information (using default legacy profile information,
as created by the test UserFactory).
"""
def verify_get_own_information(queries):
"""
Internal helper to perform the actual assertions
"""
with self.assertNumQueries(queries):
response = self.send_get(self.client)
data = response.data
assert self.FULL_RESPONSE_FIELD_COUNT == len(data)
assert self.user.username == data['username']
assert ((self.user.first_name + ' ') + self.user.last_name) == data['name']
for empty_field in ("year_of_birth", "level_of_education", "mailing_address", "bio"):
assert data[empty_field] is None
assert data['country'] is None
assert data['state'] is None
assert 'm' == data['gender']
assert 'Learn a lot' == data['goals']
assert self.user.email == data['email']
assert self.user.id == data['id']
assert data['date_joined'] is not None
assert data['last_login'] is not None
assert self.user.is_active == data['is_active']
self._verify_profile_image_data(data, False)
assert data['requires_parental_consent']
assert [] == data['language_proficiencies']
assert PRIVATE_VISIBILITY == data['account_privacy']
assert data['time_zone'] is None
# Badges aren't on by default, so should not be present.
assert data['accomplishments_shared'] is False
self.client.login(username=self.user.username, password=TEST_PASSWORD)
verify_get_own_information(25)
# Now make sure that the user can get the same information, even if not active
self.user.is_active = False
self.user.save()
verify_get_own_information(17)
def test_get_account_empty_string(self):
"""
Test the conversion of empty strings to None for certain fields.
"""
legacy_profile = UserProfile.objects.get(id=self.user.id)
legacy_profile.country = ""
legacy_profile.state = ""
legacy_profile.level_of_education = ""
legacy_profile.gender = ""
legacy_profile.bio = ""
legacy_profile.save()
self.client.login(username=self.user.username, password=TEST_PASSWORD)
with self.assertNumQueries(25):
response = self.send_get(self.client)
for empty_field in ("level_of_education", "gender", "country", "state", "bio",):
assert response.data[empty_field] is None
@ddt.data(
("different_client", "different_user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_patch_account_disallowed_user(self, api_client, user):
"""
Test that a client cannot call PATCH on a different client's user account (even with
is_staff access).
"""
client = self.login_client(api_client, user)
self.send_patch(client, {}, expected_status=403)
@ddt.data(
("client", "user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_patch_account_unknown_user(self, api_client, user):
"""
Test that trying to update a user who does not exist returns a 403.
"""
client = self.login_client(api_client, user)
response = client.patch(
reverse("accounts_api", kwargs={'username': "does_not_exist"}),
data=json.dumps({}), content_type="application/merge-patch+json"
)
assert 403 == response.status_code
@ddt.data(
("gender", "f", "not a gender", '"not a gender" is not a valid choice.'),
("level_of_education", "none", "ȻħȺɍłɇs", '"ȻħȺɍłɇs" is not a valid choice.'),
("country", "GB", "XY", '"XY" is not a valid choice.'),
("state", "MA", "PY", '"PY" is not a valid choice.'),
("year_of_birth", 2009, "not_an_int", "A valid integer is required."),
("name", "bob", "z" * 256, "Ensure this field has no more than 255 characters."),
("name", "ȻħȺɍłɇs", " ", "The name field must be at least 1 character long."),
("goals", "Smell the roses"),
("mailing_address", "Sesame Street"),
# Note that we store the raw data, so it is up to client to escape the HTML.
(
"bio", "<html>Lacrosse-playing superhero 壓是進界推日不復女</html>",
"z" * 301, "The about me field must be at most 300 characters long."
),
("account_privacy", ALL_USERS_VISIBILITY),
("account_privacy", PRIVATE_VISIBILITY),
# Note that email is tested below, as it is not immediately updated.
# Note that language_proficiencies is tested below as there are multiple error and success conditions.
)
@ddt.unpack
def test_patch_account(self, field, value, fails_validation_value=None, developer_validation_message=None):
"""
Test the behavior of patch, when using the correct content_type.
"""
client = self.login_client("client", "user")
if field == 'account_privacy':
# Ensure the user has birth year set, and is over 13, so
# account_privacy behaves normally
legacy_profile = UserProfile.objects.get(id=self.user.id)
legacy_profile.year_of_birth = 2000
legacy_profile.save()
response = self.send_patch(client, {field: value})
assert value == response.data[field]
if fails_validation_value:
error_response = self.send_patch(client, {field: fails_validation_value}, expected_status=400)
expected_user_message = 'This value is invalid.'
if field == 'bio':
expected_user_message = "The about me field must be at most 300 characters long."
assert expected_user_message == error_response.data['field_errors'][field]['user_message']
assert "Value '{value}' is not valid for field '{field}': {messages}".format(
value=fails_validation_value,
field=field,
messages=[developer_validation_message]
) == error_response.data['field_errors'][field]['developer_message']
elif field != "account_privacy":
# If there are no values that would fail validation, then empty string should be supported;
# except for account_privacy, which cannot be an empty string.
response = self.send_patch(client, {field: ""})
assert '' == response.data[field]
def test_patch_inactive_user(self):
""" Verify that a user can patch her own account, even if inactive. """
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.user.is_active = False
self.user.save()
response = self.send_patch(self.client, {"goals": "to not activate account"})
assert 'to not activate account' == response.data['goals']
@ddt.unpack
def test_patch_account_noneditable(self):
"""
Tests the behavior of patch when a read-only field is attempted to be edited.
"""
client = self.login_client("client", "user")
def verify_error_response(field_name, data):
"""
Internal helper to check the error messages returned
"""
assert 'This field is not editable via this API' == data['field_errors'][field_name]['developer_message']
assert "The '{}' field cannot be edited.".format(
field_name
) == data['field_errors'][field_name]['user_message']
for field_name in ["username", "date_joined", "is_active", "profile_image", "requires_parental_consent"]:
response = self.send_patch(client, {field_name: "will_error", "gender": "o"}, expected_status=400)
verify_error_response(field_name, response.data)
# Make sure that gender did not change.
response = self.send_get(client)
assert 'm' == response.data['gender']
# Test error message with multiple read-only items
response = self.send_patch(client, {"username": "will_error", "date_joined": "xx"}, expected_status=400)
assert 2 == len(response.data['field_errors'])
verify_error_response("username", response.data)
verify_error_response("date_joined", response.data)
def test_patch_bad_content_type(self):
"""
Test the behavior of patch when an incorrect content_type is specified.
"""
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.send_patch(self.client, {}, content_type="application/json", expected_status=415)
self.send_patch(self.client, {}, content_type="application/xml", expected_status=415)
def test_patch_account_empty_string(self):
"""
Tests the behavior of patch when attempting to set fields with a select list of options to the empty string.
Also verifies the behaviour when setting to None.
"""
self.client.login(username=self.user.username, password=TEST_PASSWORD)
for field_name in ["gender", "level_of_education", "country", "state"]:
response = self.send_patch(self.client, {field_name: ""})
# Although throwing a 400 might be reasonable, the default DRF behavior with ModelSerializer
# is to convert to None, which also seems acceptable (and is difficult to override).
assert response.data[field_name] is None
# Verify that the behavior is the same for sending None.
response = self.send_patch(self.client, {field_name: ""})
assert response.data[field_name] is None
def test_patch_name_metadata(self):
"""
Test the metadata stored when changing the name field.
"""
def get_name_change_info(expected_entries):
"""
Internal method to encapsulate the retrieval of old names used
"""
legacy_profile = UserProfile.objects.get(id=self.user.id)
name_change_info = legacy_profile.get_meta()["old_names"]
assert expected_entries == len(name_change_info)
return name_change_info
def verify_change_info(change_info, old_name, requester, new_name):
"""
Internal method to validate name changes
"""
assert 3 == len(change_info)
assert old_name == change_info[0]
assert f'Name change requested through account API by {requester}' == change_info[1]
assert change_info[2] is not None
# Verify the new name was also stored.
get_response = self.send_get(self.client)
assert new_name == get_response.data['name']
self.client.login(username=self.user.username, password=TEST_PASSWORD)
legacy_profile = UserProfile.objects.get(id=self.user.id)
assert {} == legacy_profile.get_meta()
old_name = legacy_profile.name
# First change the name as the user and verify meta information.
self.send_patch(self.client, {"name": "Mickey Mouse"})
name_change_info = get_name_change_info(1)
verify_change_info(name_change_info[0], old_name, self.user.username, "Mickey Mouse")
# Now change the name again and verify meta information.
self.send_patch(self.client, {"name": "Donald Duck"})
name_change_info = get_name_change_info(2)
verify_change_info(name_change_info[0], old_name, self.user.username, "Donald Duck", )
verify_change_info(name_change_info[1], "Mickey Mouse", self.user.username, "Donald Duck")
@mock.patch.dict(
'django.conf.settings.PROFILE_IMAGE_SIZES_MAP',
{'full': 50, 'medium': 30, 'small': 10},
clear=True
)
def test_patch_email(self):
"""
Test that the user can request an email change through the accounts API.
Full testing of the helper method used (do_email_change_request) exists in the package with the code.
Here just do minimal smoke testing.
"""
client = self.login_client("client", "user")
old_email = self.user.email
new_email = "[email protected]"
response = self.send_patch(client, {"email": new_email, "goals": "change my email"})
# Since request is multi-step, the email won't change on GET immediately (though goals will update).
assert old_email == response.data['email']
assert 'change my email' == response.data['goals']
# Now call the method that will be invoked with the user clicks the activation key in the received email.
# First we must get the activation key that was sent.
pending_change = PendingEmailChange.objects.filter(user=self.user)
assert 1 == len(pending_change)
activation_key = pending_change[0].activation_key
confirm_change_url = reverse(
"confirm_email_change", kwargs={'key': activation_key}
)
response = self.client.post(confirm_change_url)
assert 200 == response.status_code
get_response = self.send_get(client)
assert new_email == get_response.data['email']
@ddt.data(
("not_an_email",),
("",),
(None,),
)
@ddt.unpack
def test_patch_invalid_email(self, bad_email):
"""
Test a few error cases for email validation (full test coverage lives with do_email_change_request).
"""
client = self.login_client("client", "user")
# Try changing to an invalid email to make sure error messages are appropriately returned.
error_response = self.send_patch(client, {"email": bad_email}, expected_status=400)
field_errors = error_response.data["field_errors"]
assert "Error thrown from validate_new_email: 'Valid e-mail address required.'" == \
field_errors['email']['developer_message']
assert 'Valid e-mail address required.' == field_errors['email']['user_message']
@mock.patch('common.djangoapps.student.views.management.do_email_change_request')
def test_patch_duplicate_email(self, do_email_change_request):
"""
Test that same success response will be sent to user even if the given email already used.
"""
existing_email = "[email protected]"
UserFactory.create(email=existing_email)
client = self.login_client("client", "user")
# Try changing to an existing email to make sure no error messages returned.
response = self.send_patch(client, {"email": existing_email})
assert 200 == response.status_code
# Verify that no actual request made for email change
assert not do_email_change_request.called
def test_patch_language_proficiencies(self):
"""
Verify that patching the language_proficiencies field of the user
profile completely overwrites the previous value.
"""
client = self.login_client("client", "user")
# Patching language_proficiencies exercises the
# `LanguageProficiencySerializer.get_identity` method, which compares
# identifies language proficiencies based on their language code rather
# than django model id.
for proficiencies in ([{"code": "en"}, {"code": "fr"}, {"code": "es"}], [{"code": "fr"}], [{"code": "aa"}], []):
response = self.send_patch(client, {"language_proficiencies": proficiencies})
self.assertCountEqual(response.data["language_proficiencies"], proficiencies)
@ddt.data(
(
"not_a_list",
{'non_field_errors': ['Expected a list of items but got type "unicode".']}
),
(
["not_a_JSON_object"],
[{'non_field_errors': ['Invalid data. Expected a dictionary, but got unicode.']}]
),
(
[{}],
[{'code': ['This field is required.']}]
),
(
[{"code": "invalid_language_code"}],
[{'code': ['"invalid_language_code" is not a valid choice.']}]
),
(
[{"code": "kw"}, {"code": "el"}, {"code": "kw"}],
['The language_proficiencies field must consist of unique languages.']
),
)
@ddt.unpack
def test_patch_invalid_language_proficiencies(self, patch_value, expected_error_message):
"""
Verify we handle error cases when patching the language_proficiencies
field.
"""
expected_error_message = str(expected_error_message).replace('unicode', 'str')
client = self.login_client("client", "user")
response = self.send_patch(client, {"language_proficiencies": patch_value}, expected_status=400)
assert response.data['field_errors']['language_proficiencies']['developer_message'] == \
f"Value '{patch_value}' is not valid for field 'language_proficiencies': {expected_error_message}"
@mock.patch('openedx.core.djangoapps.user_api.accounts.serializers.AccountUserSerializer.save')
def test_patch_serializer_save_fails(self, serializer_save):
"""
Test that AccountUpdateErrors are passed through to the response.
"""
serializer_save.side_effect = [Exception("bummer"), None]
self.client.login(username=self.user.username, password=TEST_PASSWORD)
error_response = self.send_patch(self.client, {"goals": "save an account field"}, expected_status=400)
assert "Error thrown when saving account updates: 'bummer'" == error_response.data['developer_message']
assert error_response.data['user_message'] is None
@override_settings(PROFILE_IMAGE_BACKEND=TEST_PROFILE_IMAGE_BACKEND)
def test_convert_relative_profile_url(self):
"""
Test that when TEST_PROFILE_IMAGE_BACKEND['base_url'] begins
with a '/', the API generates the full URL to profile images based on
the URL of the request.
"""
self.client.login(username=self.user.username, password=TEST_PASSWORD)
response = self.send_get(self.client)
assert response.data['profile_image'] == \
{'has_image': False,
'image_url_full': 'http://testserver/static/default_50.png',
'image_url_small': 'http://testserver/static/default_10.png'}
@ddt.data(
("client", "user", True),
("different_client", "different_user", False),
("staff_client", "staff_user", True),
)
@ddt.unpack
def test_parental_consent(self, api_client, requesting_username, has_full_access):
"""
Verifies that under thirteens never return a public profile.
"""
client = self.login_client(api_client, requesting_username)
year_of_birth = self._set_user_age_to_10_years(self.user)
set_user_preference(self.user, ACCOUNT_VISIBILITY_PREF_KEY, ALL_USERS_VISIBILITY)
# Verify that the default view is still private (except for clients with full access)
response = self.send_get(client)
if has_full_access:
data = response.data
assert self.FULL_RESPONSE_FIELD_COUNT == len(data)
assert self.user.username == data['username']
assert ((self.user.first_name + ' ') + self.user.last_name) == data['name']
assert self.user.email == data['email']
assert self.user.id == data['id']
assert year_of_birth == data['year_of_birth']
for empty_field in ("country", "level_of_education", "mailing_address", "bio", "state",):
assert data[empty_field] is None
assert 'm' == data['gender']
assert 'Learn a lot' == data['goals']
assert data['is_active']
assert data['date_joined'] is not None
assert data['last_login'] is not None
self._verify_profile_image_data(data, False)
assert data['requires_parental_consent']
assert PRIVATE_VISIBILITY == data['account_privacy']
else:
self._verify_private_account_response(response, requires_parental_consent=True)
# Verify that the shared view is still private
response = self.send_get(client, query_parameters='view=shared')
self._verify_private_account_response(response, requires_parental_consent=True)
@skip_unless_lms
class TestAccountAPITransactions(TransactionTestCase):
"""
Tests the transactional behavior of the account API
"""
def setUp(self):
super().setUp()
self.client = APIClient()
self.user = UserFactory.create(password=TEST_PASSWORD)
self.url = reverse("accounts_api", kwargs={'username': self.user.username})
@mock.patch('common.djangoapps.student.views.do_email_change_request')
def test_update_account_settings_rollback(self, mock_email_change):
"""
Verify that updating account settings is transactional when a failure happens.
"""
# Send a PATCH request with updates to both profile information and email.
# Throw an error from the method that is used to process the email change request
# (this is the last thing done in the api method). Verify that the profile did not change.
mock_email_change.side_effect = [ValueError, "mock value error thrown"]
self.client.login(username=self.user.username, password=TEST_PASSWORD)
old_email = self.user.email
json_data = {"email": "[email protected]", "gender": "o"}
response = self.client.patch(self.url, data=json.dumps(json_data), content_type="application/merge-patch+json")
assert 400 == response.status_code
# Verify that GET returns the original preferences
response = self.client.get(self.url)
data = response.data
assert old_email == data['email']
assert 'm' == data['gender']
@ddt.ddt
class NameChangeViewTests(UserAPITestCase):
""" NameChangeView tests """
def setUp(self):
super().setUp()
self.url = reverse('name_change')
def test_request_succeeds(self):
"""
Test that a valid name change request succeeds.
"""
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.send_post(self.client, {'name': 'New Name'})
def test_unauthenticated(self):
"""
Test that a name change request fails for an unauthenticated user.
"""
self.send_post(self.client, {'name': 'New Name'}, expected_status=401)
def test_empty_request(self):
"""
Test that an empty request fails.
"""
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.send_post(self.client, {}, expected_status=400)
def test_blank_name(self):
"""
Test that a blank name string fails.
"""
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.send_post(self.client, {'name': ''}, expected_status=400)
@ddt.data('<html>invalid name</html>', 'https://invalid.com')
def test_fails_validation(self, invalid_name):
"""
Test that an invalid name will return an error.
"""
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.send_post(
self.client,
{'name': invalid_name},
expected_status=400
)
@ddt.ddt
@mock.patch('django.conf.settings.USERNAME_REPLACEMENT_WORKER', 'test_replace_username_service_worker')
class UsernameReplacementViewTests(APITestCase):
""" Tests UsernameReplacementView """
SERVICE_USERNAME = 'test_replace_username_service_worker'
def setUp(self):
super().setUp()
self.service_user = UserFactory(username=self.SERVICE_USERNAME)
self.url = reverse("username_replacement")
def build_jwt_headers(self, user):
"""
Helper function for creating headers for the JWT authentication.
"""
token = create_jwt_for_user(user)
headers = {'HTTP_AUTHORIZATION': f'JWT {token}'}
return headers
def call_api(self, user, data):
""" Helper function to call API with data """
data = json.dumps(data)
headers = self.build_jwt_headers(user)
return self.client.post(self.url, data, content_type='application/json', **headers)
def test_auth(self):
""" Verify the endpoint only works with the service worker """
data = {
"username_mappings": [
{"test_username_1": "test_new_username_1"},
{"test_username_2": "test_new_username_2"}
]
}
# Test unauthenticated
response = self.client.post(self.url)
assert response.status_code == 401
# Test non-service worker
random_user = UserFactory()
response = self.call_api(random_user, data)
assert response.status_code == 403
# Test service worker
response = self.call_api(self.service_user, data)
assert response.status_code == 200
@ddt.data(
[{}, {}],
{},
[{"test_key": "test_value", "test_key_2": "test_value_2"}]
)
def test_bad_schema(self, mapping_data):
""" Verify the endpoint rejects bad data schema """
data = {
"username_mappings": mapping_data
}
response = self.call_api(self.service_user, data)
assert response.status_code == 400
def test_existing_and_non_existing_users(self):
""" Tests a mix of existing and non existing users """
random_users = [UserFactory() for _ in range(5)]
fake_usernames = ["myname_" + str(x) for x in range(5)]
existing_users = [{user.username: user.username + '_new'} for user in random_users]
non_existing_users = [{username: username + '_new'} for username in fake_usernames]
data = {
"username_mappings": existing_users + non_existing_users
}
expected_response = {
'failed_replacements': [],
'successful_replacements': existing_users + non_existing_users
}
response = self.call_api(self.service_user, data)
assert response.status_code == 200
assert response.data == expected_response
| edx/edx-platform | openedx/core/djangoapps/user_api/accounts/tests/test_views.py | Python | agpl-3.0 | 53,614 |
def _checkInput(index):
if index < 0:
raise ValueError("Indice negativo non supportato [{}]".format(index))
elif type(index) != int:
raise TypeError("Inserire un intero [tipo input {}]".format(type(index).__name__))
def fib_from_string(index):
_checkInput(index)
serie = "0 1 1 2 3 5 8".replace(" ", "")
return int(serie[index])
def fib_from_list(index):
_checkInput(index)
serie = [0,1,1,2,3,5,8]
return serie[index]
def fib_from_algo(index):
_checkInput(index)
current_number = current_index = 0
base = 1
while current_index < index:
old_base = current_number
current_number = current_number + base
base = old_base
current_index += 1
pass
return current_number
def recursion(index):
if index <= 1:
return index
return recursion(index - 1) + recursion(index - 2)
def fib_from_recursion_func(index):
_checkInput(index)
return recursion(index)
calculate = fib_from_recursion_func | feroda/lessons-python4beginners | students/2016-09-04/simone-cosma/fibonacci.py | Python | agpl-3.0 | 1,035 |
"""
Braitenberg Vehicle2b
The more light sensed on the left side the faster the right motor moves.
The more light sensed on the right side the faster the left motor moves.
This causes the robot to turn towards a light source.
"""
from pyrobot.brain import Brain, avg
class Vehicle(Brain):
def setup(self):
self.robot.light[0].units = "SCALED"
def step(self):
leftSpeed = max([s.value for s in self.robot.light[0]["right"]])
rightSpeed = max([s.value for s in self.robot.light[0]["left"]])
print "leftSpeed, rightSpeed:", leftSpeed, rightSpeed
self.motors(leftSpeed, rightSpeed)
def INIT(engine):
if engine.robot.type not in ['K-Team', 'Pyrobot']:
raise "Robot should have light sensors!"
return Vehicle('Braitenberg2a', engine)
| emilydolson/forestcat | pyrobot/plugins/brains/BraitenbergVehicle2b.py | Python | agpl-3.0 | 789 |
#!/usr/bin/env python
import pygtk
pygtk.require('2.0')
import pynotify
import sys
if __name__ == '__main__':
if not pynotify.init("XY"):
sys.exit(1)
n = pynotify.Notification("X, Y Test",
"This notification should point to 150, 10")
n.set_hint("x", 150)
n.set_hint("y", 10)
if not n.show():
print "Failed to send notification"
sys.exit(1)
| Distrotech/notify-python | tests/test-xy.py | Python | lgpl-2.1 | 418 |
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Integration tests for loading and saving netcdf files."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from os.path import join as path_join, dirname, sep as os_sep
import shutil
from subprocess import check_call
import tempfile
import iris
from iris.tests import stock
class TestClimatology(iris.tests.IrisTest):
reference_cdl_path = os_sep.join(
[
dirname(tests.__file__),
(
"results/integration/climatology/TestClimatology/"
"reference_simpledata.cdl"
),
]
)
@classmethod
def _simple_cdl_string(cls):
with open(cls.reference_cdl_path, "r") as f:
cdl_content = f.read()
# Add the expected CDL first line since this is removed from the
# stored results file.
cdl_content = "netcdf {\n" + cdl_content
return cdl_content
@staticmethod
def _load_sanitised_cube(filepath):
cube = iris.load_cube(filepath)
# Remove attributes convention, if any.
cube.attributes.pop("Conventions", None)
# Remove any var-names.
for coord in cube.coords():
coord.var_name = None
cube.var_name = None
return cube
@classmethod
def setUpClass(cls):
# Create a temp directory for temp files.
cls.temp_dir = tempfile.mkdtemp()
cls.path_ref_cdl = path_join(cls.temp_dir, "standard.cdl")
cls.path_ref_nc = path_join(cls.temp_dir, "standard.nc")
# Create reference CDL file.
with open(cls.path_ref_cdl, "w") as f_out:
f_out.write(cls._simple_cdl_string())
# Create reference netCDF file from reference CDL.
command = "ncgen -o {} {}".format(cls.path_ref_nc, cls.path_ref_cdl)
check_call(command, shell=True)
cls.path_temp_nc = path_join(cls.temp_dir, "tmp.nc")
# Create reference cube.
cls.cube_ref = stock.climatology_3d()
@classmethod
def tearDownClass(cls):
# Destroy a temp directory for temp files.
shutil.rmtree(cls.temp_dir)
###############################################################################
# Round-trip tests
def test_cube_to_cube(self):
# Save reference cube to file, load cube from same file, test against
# reference cube.
iris.save(self.cube_ref, self.path_temp_nc)
cube = self._load_sanitised_cube(self.path_temp_nc)
self.assertEqual(cube, self.cube_ref)
def test_file_to_file(self):
# Load cube from reference file, save same cube to file, test against
# reference CDL.
cube = iris.load_cube(self.path_ref_nc)
iris.save(cube, self.path_temp_nc)
self.assertCDL(
self.path_temp_nc,
reference_filename=self.reference_cdl_path,
flags="",
)
# NOTE:
# The saving half of the round-trip tests is tested in the
# appropriate dedicated test class:
# unit.fileformats.netcdf.test_Saver.Test_write.test_with_climatology .
# The loading half has no equivalent dedicated location, so is tested
# here as test_load_from_file.
def test_load_from_file(self):
# Create cube from file, test against reference cube.
cube = self._load_sanitised_cube(self.path_ref_nc)
self.assertEqual(cube, self.cube_ref)
if __name__ == "__main__":
tests.main()
| pp-mo/iris | lib/iris/tests/integration/test_climatology.py | Python | lgpl-3.0 | 3,683 |
#__all__ = [ 'search', 'ham_distance', 'lev_distance', 'distance', 'distance_matrix' ]
| vfulco/scalpel | lib/gravity/tae/match/__init__.py | Python | lgpl-3.0 | 94 |
#Special rules for nouns, to avoid suggesting wrong lemmas. Nothing is done for other POS.
import pymorphy2
blacklist1 = ['ъб', 'ъв', 'ъг', 'ъд', 'ъж', 'ъз', 'ък', 'ъл', 'ъм', 'ън', 'ъп', 'ър', 'ъс', 'ът', 'ъф', 'ъх', 'ъц', 'ъч', 'ъш', 'ъщ', 'йй', 'ьь', 'ъъ', 'ыы', 'чя', 'чю', 'чй', 'щя', 'щю', 'щй', 'шя', 'шю', 'шй', 'жы', 'шы', 'аь', 'еь', 'ёь', 'иь', 'йь', 'оь', 'уь', 'ыь', 'эь', 'юь', 'яь', 'аъ', 'еъ', 'ёъ', 'иъ', 'йъ', 'оъ', 'уъ', 'ыъ', 'эъ', 'юъ', 'яъ']
blacklist2 = ['чьк', 'чьн', 'щьн'] # forbidden
blacklist3 = ['руметь']
base1 = ['ло','уа', 'ая', 'ши', 'ти', 'ни', 'ки', 'ко', 'ли', 'уи', 'до', 'аи', 'то'] # unchanged
base2 = ['алз','бва', 'йты','ике','нту','лди','лит', 'вра','афе', 'бле', 'яху','уке', 'дзе', 'ури', 'ава', 'чче','нте', 'нне', 'гие', 'уро', 'сут', 'оне', 'ино', 'йду', 'нью', 'ньо', 'ньи', 'ери', 'ску', 'дье']
base3 = ['иани','льди', 'льде', 'ейру', 'зема', 'хими', 'ками', 'кала', 'мари', 'осси', 'лари', 'тано', 'ризе', 'енте', 'енеи']
base4 = ['швили', 'льяри']
change1 = ['лл','рр', 'пп', 'тт', 'ер', 'ук', 'ун', 'юк', 'ан', 'ян', 'ия', 'ин'] # declines
change2 = ['вец','дюн', 'еув', 'инз', 'ейн', 'лис','лек','бен','нек','рок', 'ргл', 'бих','бус','айс','гас','таш', 'хэм', 'аал', 'дад', 'анд', 'лес', 'мар','ньш', 'рос','суф', 'вик', 'якс', 'веш','анц', 'янц', 'сон', 'сен', 'нен', 'ман', 'цак', 'инд', 'кин', 'чин', 'рем', 'рём', 'дин']
change3 = ['ерит', 'гард', 'иньш', 'скис', 'ллит', 'еней', 'рроз', 'манн', 'берг', 'вист', 'хайм',]
female1 = ['ская', 'ской', 'скую']
female2 = ['овой']
female3 = ['евой']
female4 = ['иной']
middlemale = ['а', 'у']
middlestr1 = ['ии', 'ию'] # for Данелия
middlestr2 = ['ией']
male = ['ов', 'ев', 'ин']
male1 = ['ский', 'ским', 'ском']
male2 = ['ского', 'скому']
male3 = ['е', 'ы']
male4 = ['ым', 'ом', 'ем', 'ой']
side1 = ['авы', 'аве', 'аву', 'фик', 'иол', 'риц', 'икк', 'ест', 'рех', 'тин']
side2 = ['авой']
sname = ['вич', 'вна']
sname1 = ['вн']
def lemmas_done(found, lemmatized):
"""
Check predicted lemmas according to the rules.
"""
morph = pymorphy2.MorphAnalyzer()
fix = []
fixednames = []
doublefemale =[]
for i in range(len(lemmatized)):
p = morph.parse(found[i])[0]
if p.tag.POS == 'NOUN':
if (found[i].istitle()) and ((found[i][-2:] in base1) or (found[i][-2:] in male) or (found[i][-3:] in base2) or (found[i][-4:] in base3) or (found[i][-5:] in base4)):
fixednames.append(found[i])
elif (found[i].istitle()) and ((found[i][-2:] in change1) or (found[i][-3:] in change2) or (found[i][-4:] in change3)):
fixednames.append(found[i])
elif (found[i].istitle()) and (found[i][-4:] in female1):
fixednames.append(found[i][:-2] + 'ая')
elif (found[i].istitle()) and (found[i][-4:] in female2):
fixednames.append(found[i][:-4] + 'ова')
elif (found[i].istitle()) and (found[i][-4:] in female3):
fixednames.append(found[i][:-4] + 'ева')
elif (found[i].istitle()) and (found[i][-4:] in female4):
fixednames.append(found[i][:-4] + 'ина')
elif (found[i].istitle()) and (found[i][-4:] in male1):
fixednames.append(found[i][:-2] + 'ий')
elif (found[i].istitle()) and (found[i][-5:] in male2):
fixednames.append(found[i][:-3] + 'ий')
elif (found[i].istitle()) and (found[i][-1:] in male3) and (found[i][-3:-1] in male):
fixednames.append(found[i][:-1])
elif (found[i].istitle()) and (found[i][-2:] in male4) and (found[i][-4:-2] in male):
fixednames.append(found[i][:-2])
elif (found[i].istitle()) and (found[i][-1:] in middlemale) and (found[i][-3:-1] in male):
fixednames.append(found[i][:-1])
doublefemale.append(found[i][:-1] + 'а')
elif (found[i].istitle()) and ((found[i][-1:] in male3) or (found[i][-1:] in middlemale)) and (found[i][-3:-1] in change1):
fixednames.append(found[i][:-1])
elif (found[i].istitle()) and ((found[i][-1:] in male3) or (found[i][-1:] in middlemale)) and (found[i][-4:-1] in change2):
fixednames.append(found[i][:-1])
elif (found[i].istitle()) and ((found[i][-1:] in male3) or (found[i][-1:] in middlemale)) and (found[i][-5:-1] in change3):
fixednames.append(found[i][:-1])
elif (found[i].istitle()) and (found[i][-2:] in male4) and (found[i][-4:-2] in change1):
fixednames.append(found[i][:-2])
elif (found[i].istitle()) and (found[i][-2:] in male4) and (found[i][-5:-2] in change2):
fixednames.append(found[i][:-2])
elif (found[i].istitle()) and (found[i][-2:] in male4) and (found[i][-6:-2] in change3):
fixednames.append(found[i][:-2])
elif (found[i].istitle()) and (found[i][-2:] in middlestr1):
fixednames.append(found[i][:-1] + 'я')
elif (found[i].istitle()) and (found[i][-3:] in middlestr2):
fixednames.append(found[i][:-2] + 'я')
elif (found[i].istitle()) and (found[i][-3:] in side1):
fixednames.append(found[i][:-1] + 'а')
elif (found[i].istitle()) and (found[i][-4:] in side2):
fixednames.append(found[i][:-2] + 'а')
elif (found[i].istitle()) and (found[i][-4:-1] in side1):
fixednames.append(found[i][:-1] + 'а')
elif (found[i].istitle()) and (found[i][-5:-2] in side1):
fixednames.append(found[i][:-2] + 'а')
elif (found[i].istitle()) and (found[i][-3:] in sname):
fixednames.append(found[i])
elif (found[i].istitle()) and (found[i][-4:-1] in sname) and ((found[i][-1:] in middlemale) or (found[i][-1:] in male3)):
fixednames.append(found[i][:-1])
elif (found[i].istitle()) and (found[i][-5:-2] in sname) and (found[i][-2:] in male4):
fixednames.append(found[i][:-2])
elif (found[i].istitle()) and (found[i][-3:-1] in sname1) and ((found[i][-1:] in middlemale) or (found[i][-1:] in male3)):
fixednames.append(found[i][:-1] + 'а')
elif (found[i].istitle()) and (found[i][-4:-2] in sname1) and (found[i][-2:] in male4):
fixednames.append(found[i][:-2] + 'а')
else:
fixednames.append(lemmatized[i])
else:
fixednames.append(lemmatized[i])
for i in range(len(fixednames)):
if (fixednames[i][-2:] in blacklist1) or (fixednames[i][-3:] in blacklist2) or (fixednames[i][-6:] in blacklist3):
fix.append(found[i])
else:
fix.append(fixednames[i])
fix = fix + doublefemale
newfreq = len(doublefemale)
return fix, newfreq
| azilya/Zaliznyak-s-grammatical-dictionary | gdictionary/postprocessing.py | Python | lgpl-3.0 | 7,574 |
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 IBM Corp
#
# Author: Tong Li <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging.handlers
import os
import tempfile
from ceilometer.dispatcher import file
from ceilometer.openstack.common.fixture import config
from ceilometer.openstack.common import test
from ceilometer.publisher import utils
class TestDispatcherFile(test.BaseTestCase):
def setUp(self):
super(TestDispatcherFile, self).setUp()
self.CONF = self.useFixture(config.Config()).conf
def test_file_dispatcher_with_all_config(self):
# Create a temporaryFile to get a file name
tf = tempfile.NamedTemporaryFile('r')
filename = tf.name
tf.close()
self.CONF.dispatcher_file.file_path = filename
self.CONF.dispatcher_file.max_bytes = 50
self.CONF.dispatcher_file.backup_count = 5
dispatcher = file.FileDispatcher(self.CONF)
# The number of the handlers should be 1
self.assertEqual(1, len(dispatcher.log.handlers))
# The handler should be RotatingFileHandler
handler = dispatcher.log.handlers[0]
self.assertIsInstance(handler,
logging.handlers.RotatingFileHandler)
msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
}
msg['message_signature'] = utils.compute_signature(
msg,
self.CONF.publisher.metering_secret,
)
# The record_metering_data method should exist and not produce errors.
dispatcher.record_metering_data(msg)
# After the method call above, the file should have been created.
self.assertTrue(os.path.exists(handler.baseFilename))
def test_file_dispatcher_with_path_only(self):
# Create a temporaryFile to get a file name
tf = tempfile.NamedTemporaryFile('r')
filename = tf.name
tf.close()
self.CONF.dispatcher_file.file_path = filename
self.CONF.dispatcher_file.max_bytes = None
self.CONF.dispatcher_file.backup_count = None
dispatcher = file.FileDispatcher(self.CONF)
# The number of the handlers should be 1
self.assertEqual(1, len(dispatcher.log.handlers))
# The handler should be RotatingFileHandler
handler = dispatcher.log.handlers[0]
self.assertIsInstance(handler,
logging.FileHandler)
msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
}
msg['message_signature'] = utils.compute_signature(
msg,
self.CONF.publisher.metering_secret,
)
# The record_metering_data method should exist and not produce errors.
dispatcher.record_metering_data(msg)
# After the method call above, the file should have been created.
self.assertTrue(os.path.exists(handler.baseFilename))
def test_file_dispatcher_with_no_path(self):
self.CONF.dispatcher_file.file_path = None
dispatcher = file.FileDispatcher(self.CONF)
# The log should be None
self.assertIsNone(dispatcher.log)
| tanglei528/ceilometer | ceilometer/tests/dispatcher/test_file.py | Python | apache-2.0 | 3,762 |
# -*- coding: utf-8 -*-
#
# Copyright 2013 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pecan import rest
from pecan import expose
from pecan import request
from mistral.openstack.common import log as logging
from mistral.db import api as db_api
from mistral.services import scheduler
LOG = logging.getLogger(__name__)
class WorkbookDefinitionController(rest.RestController):
@expose()
def get(self, workbook_name):
LOG.debug("Fetch workbook definition [workbook_name=%s]" %
workbook_name)
return db_api.workbook_definition_get(workbook_name)
@expose(content_type="text/plain")
def put(self, workbook_name):
text = request.text
LOG.debug("Update workbook definition [workbook_name=%s, text=%s]" %
(workbook_name, text))
wb = db_api.workbook_definition_put(workbook_name, text)
scheduler.create_associated_triggers(wb)
return wb['definition']
| TimurNurlygayanov/mistral | mistral/api/controllers/v1/workbook_definition.py | Python | apache-2.0 | 1,511 |
from trex_astf_lib.api import *
# IPV6 tunable example
#
# ipv6.src_msb
# ipv6.dst_msb
# ipv6.enable
#
class Prof1():
def __init__(self):
pass
def get_profile(self, **kwargs):
# ip generator
ip_gen_c = ASTFIPGenDist(ip_range=["16.0.0.0", "16.0.0.255"], distribution="seq")
ip_gen_s = ASTFIPGenDist(ip_range=["48.0.0.0", "48.0.255.255"], distribution="seq")
ip_gen = ASTFIPGen(glob=ASTFIPGenGlobal(ip_offset="1.0.0.0"),
dist_client=ip_gen_c,
dist_server=ip_gen_s)
c_glob_info = ASTFGlobalInfo()
c_glob_info.tcp.rxbufsize = 8*1024
c_glob_info.tcp.txbufsize = 8*1024
s_glob_info = ASTFGlobalInfo()
s_glob_info.tcp.rxbufsize = 8*1024
s_glob_info.tcp.txbufsize = 8*1024
return ASTFProfile(default_ip_gen=ip_gen,
# Defaults affects all files
default_c_glob_info=c_glob_info,
default_s_glob_info=s_glob_info,
cap_list=[
ASTFCapInfo(file="../avl/delay_10_http_browsing_0.pcap", cps=1)
]
)
def register():
return Prof1()
| kisel/trex-core | scripts/astf/param_tcp_rxbufsize_8k.py | Python | apache-2.0 | 1,298 |
from pony.orm.core import *
| buddyli/private2w | libs/pony/orm/__init__.py | Python | apache-2.0 | 28 |
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from chaco.data_label import DataLabel
from chaco.plot_label import PlotLabel
# ============= standard library imports ========================
from numpy import max
from traits.api import Bool, Str
# ============= local library imports ==========================
from pychron.pipeline.plot.overlays.mean_indicator_overlay import MovableMixin
try:
class FlowPlotLabel(PlotLabel, MovableMixin):
def overlay(self, component, gc, *args, **kw):
if self.ox:
self.x = self.ox - self.offset_x
self.y = self.oy - self.offset_y
super(FlowPlotLabel, self).overlay(component, gc, *args, **kw)
def hittest(self, pt):
x, y = pt
w, h = self.get_preferred_size()
return abs(x - self.x) < w and abs(y - self.y) < h
except TypeError:
# documentation auto doc hack
class FlowPlotLabel:
pass
class FlowDataLabel(DataLabel):
"""
this label repositions itself if doesn't fit within the
its component bounds.
"""
constrain_x = Bool(True)
constrain_y = Bool(True)
# position_event=Event
id = Str
# _ox=None
# def _draw(self, gc, **kw):
# self.font='modern 18'
# gc.set_font(self.font)
# print 'draw', self.font
# super(FlowDataLabel, self)._draw(gc,**kw)
# def _set_x(self, val):
# super(FlowDataLabel, self)._set_x(val)
# if self._ox is None:
# self._ox = val
# elif self._ox != val:
# self.position_event=(self.x, self.y)
#
# def _set_y(self, val):
# super(FlowDataLabel, self)._set_y(val)
# if val>0:
# self.position_event = (self.x, self.y)
def overlay(self, component, gc, *args, **kw):
# face name was getting set to "Helvetica" by reportlab during pdf generation
# set face_name back to "" to prevent font display issue. see issue #72
self.font.face_name = ""
super(FlowDataLabel, self).overlay(component, gc, *args, **kw)
def do_layout(self, **kw):
DataLabel.do_layout(self, **kw)
ws, hs = self._cached_line_sizes.T
if self.constrain_x:
w = max(ws)
d = self.component.x2 - (self.x + w + 3 * self.border_padding)
if d < 0:
self.x += d
self.x = max((self.x, 0))
if self.constrain_y:
h = max(hs)
self.y = max((self.y, 0))
yd = self.component.y2 - h - 2 * self.border_padding - self.line_spacing
self.y = min((self.y, yd))
# ============= EOF =============================================
| USGSDenverPychron/pychron | pychron/pipeline/plot/flow_label.py | Python | apache-2.0 | 3,470 |
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
import logging
_logger = logging.getLogger('websocket')
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
_logger.addHandler(NullHandler())
_traceEnabled = False
__all__ = ["enableTrace", "dump", "error", "warning", "debug", "trace",
"isEnabledForError", "isEnabledForDebug", "isEnabledForTrace"]
def enableTrace(traceable, handler = logging.StreamHandler()):
"""
turn on/off the traceability.
traceable: boolean value. if set True, traceability is enabled.
"""
global _traceEnabled
_traceEnabled = traceable
if traceable:
_logger.addHandler(handler)
_logger.setLevel(logging.DEBUG)
def dump(title, message):
if _traceEnabled:
_logger.debug("--- " + title + " ---")
_logger.debug(message)
_logger.debug("-----------------------")
def error(msg):
_logger.error(msg)
def warning(msg):
_logger.warning(msg)
def debug(msg):
_logger.debug(msg)
def trace(msg):
if _traceEnabled:
_logger.debug(msg)
def isEnabledForError():
return _logger.isEnabledFor(logging.ERROR)
def isEnabledForDebug():
return _logger.isEnabledFor(logging.DEBUG)
def isEnabledForTrace():
return _traceEnabled
| vert-x3/vertx-web | vertx-web/src/test/sockjs-protocol/websocket/_logging.py | Python | apache-2.0 | 2,165 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from mock import Mock
from calvin.tests import DummyNode
from calvin.runtime.north.actormanager import ActorManager
from calvin.runtime.south.endpoint import LocalOutEndpoint, LocalInEndpoint
from calvin.actor.actor import Actor
pytestmark = pytest.mark.unittest
def create_actor(node):
actor_manager = ActorManager(node)
actor_id = actor_manager.new('std.Identity', {})
actor = actor_manager.actors[actor_id]
actor._calvinsys = Mock()
return actor
@pytest.fixture
def actor():
return create_actor(DummyNode())
@pytest.mark.parametrize("port_type,port_name,port_property,value,expected", [
("invalid", "", "", "", False),
("in", "missing", "", "", False),
("out", "missing", "", "", False),
("out", "token", "missing", "", False),
("in", "token", "missing", "", False),
("out", "token", "name", "new_name", True),
("out", "token", "name", "new_name", True),
])
def test_set_port_property(port_type, port_name, port_property, value, expected):
assert actor().set_port_property(port_type, port_name, port_property, value) is expected
@pytest.mark.parametrize("inport_ret_val,outport_ret_val,expected", [
(False, False, False),
(False, True, False),
(True, False, False),
(True, True, True),
])
def test_did_connect(actor, inport_ret_val, outport_ret_val, expected):
for port in actor.inports.values():
port.is_connected = Mock(return_value=inport_ret_val)
for port in actor.outports.values():
port.is_connected = Mock(return_value=outport_ret_val)
actor.fsm = Mock()
actor.did_connect(None)
if expected:
actor.fsm.transition_to.assert_called_with(Actor.STATUS.ENABLED)
assert actor._calvinsys.scheduler_wakeup.called
else:
assert not actor.fsm.transition_to.called
assert not actor._calvinsys.scheduler_wakeup.called
@pytest.mark.parametrize("inport_ret_val,outport_ret_val,expected", [
(True, True, False),
(True, False, False),
(False, True, False),
(False, False, True),
])
def test_did_disconnect(actor, inport_ret_val, outport_ret_val, expected):
for port in actor.inports.values():
port.is_connected = Mock(return_value=inport_ret_val)
for port in actor.outports.values():
port.is_connected = Mock(return_value=outport_ret_val)
actor.fsm = Mock()
actor.did_disconnect(None)
if expected:
actor.fsm.transition_to.assert_called_with(Actor.STATUS.READY)
else:
assert not actor.fsm.transition_to.called
def test_enabled(actor):
actor.enable()
assert actor.enabled()
actor.disable()
assert not actor.enabled()
def test_connections():
node = DummyNode()
node.id = "node_id"
actor = create_actor(node)
inport = actor.inports['token']
outport = actor.outports['token']
port = Mock()
port.id = "x"
peer_port = Mock()
peer_port.id = "y"
inport.attach_endpoint(LocalInEndpoint(port, peer_port))
outport.attach_endpoint(LocalOutEndpoint(port, peer_port))
assert actor.connections(node) == {
'actor_id': actor.id,
'actor_name': actor.name,
'inports': {inport.id: (node, "y")},
'outports': {outport.id: [(node, "y")]}
}
def test_state(actor):
inport = actor.inports['token']
outport = actor.outports['token']
correct_state = {
'_component_members': set([actor.id]),
'_deployment_requirements': [],
'_managed': set(['dump', '_signature', 'id', '_deployment_requirements', 'name', 'credentials']),
'_signature': None,
'dump': False,
'id': actor.id,
'inports': {'token': {'fifo': {'N': 5,
'fifo': [{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'}],
'read_pos': {inport.id: 0},
'readers': [inport.id],
'tentative_read_pos': {inport.id: 0},
'write_pos': 0},
'id': inport.id,
'name': 'token'}},
'name': '',
'outports': {'token': {'fanout': 1,
'fifo': {'N': 5,
'fifo': [{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'},
{'data': 0, 'type': 'Token'}],
'read_pos': {},
'readers': [],
'tentative_read_pos': {},
'write_pos': 0},
'id': outport.id,
'name': 'token'}}}
test_state = actor.state()
for k, v in correct_state.iteritems():
# Read state use list to support JSON serialization
if isinstance(v, set):
assert set(test_state[k]) == v
else:
assert test_state[k] == v
@pytest.mark.parametrize("prev_signature,new_signature,expected", [
(None, "new_val", "new_val"),
("old_val", "new_val", "old_val")
])
def test_set_signature(actor, prev_signature, new_signature, expected):
actor.signature_set(prev_signature)
actor.signature_set(new_signature)
assert actor._signature == expected
def test_component(actor):
actor.component_add(1)
assert 1 in actor.component_members()
actor.component_add([2, 3])
assert 2 in actor.component_members()
assert 3 in actor.component_members()
actor.component_remove(1)
assert 1 not in actor.component_members()
actor.component_remove([2, 3])
assert 2 not in actor.component_members()
assert 3 not in actor.component_members()
def test_requirements(actor):
assert actor.requirements_get() == []
actor.requirements_add([1, 2, 3])
assert actor.requirements_get() == [1, 2, 3]
actor.requirements_add([4, 5])
assert actor.requirements_get() == [4, 5]
actor.requirements_add([6, 7], extend=True)
assert actor.requirements_get() == [4, 5, 6, 7]
| les69/calvin-base | calvin/tests/test_actor.py | Python | apache-2.0 | 7,248 |
#!/var/www/horizon/.venv/bin/python
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
| neumerance/deploy | .venv/bin/rst2xml.py | Python | apache-2.0 | 619 |
import logging
import threading
import time
from ballast.discovery import ServerList
from ballast.rule import Rule, RoundRobinRule
from ballast.ping import (
Ping,
SocketPing,
PingStrategy,
SerialPingStrategy
)
class LoadBalancer(object):
DEFAULT_PING_INTERVAL = 30
MAX_PING_TIME = 3
def __init__(self, server_list, rule=None, ping_strategy=None, ping=None, ping_on_start=True):
assert isinstance(server_list, ServerList)
assert rule is None or isinstance(rule, Rule)
assert ping_strategy is None or isinstance(ping_strategy, PingStrategy)
assert ping is None or isinstance(ping, Ping)
# some locks for thread-safety
self._lock = threading.Lock()
self._server_lock = threading.Lock()
self._rule = rule \
if rule is not None \
else RoundRobinRule()
self._ping_strategy = ping_strategy \
if ping_strategy is not None \
else SerialPingStrategy()
self._ping = ping \
if ping is not None \
else SocketPing()
self.max_ping_time = self.MAX_PING_TIME
self._ping_interval = self.DEFAULT_PING_INTERVAL
self._server_list = server_list
self._servers = set()
self._stats = LoadBalancerStats()
self._rule.load_balancer = self
self._logger = logging.getLogger(self.__module__)
# start our background worker
# to periodically ping our servers
self._ping_timer_running = False
self._ping_timer = None
if ping_on_start:
self._start_ping_timer()
@property
def ping_interval(self):
return self._ping_interval
@ping_interval.setter
def ping_interval(self, value):
self._ping_interval = value
if self._ping_timer_running:
self._stop_ping_timer()
self._start_ping_timer()
@property
def max_ping_time(self):
if self._ping is None:
return 0
return self._ping.max_ping_time
@max_ping_time.setter
def max_ping_time(self, value):
if self._ping is not None:
self._ping.max_ping_time = value
@property
def stats(self):
return self._stats
@property
def servers(self):
with self._server_lock:
return set(self._servers)
@property
def reachable_servers(self):
with self._server_lock:
servers = set()
for s in self._servers:
if s.is_alive:
servers.add(s)
return servers
def choose_server(self):
# choose a server, will
# throw if there are none
server = self._rule.choose()
return server
def mark_server_down(self, server):
self._logger.debug("Marking server down: %s", server)
server._is_alive = False
def ping(self, server=None):
if server is None:
self._ping_all_servers()
else:
is_alive = self._ping.is_alive(server)
server._is_alive = is_alive
def ping_async(self, server=None):
if server is None:
# self._ping_all_servers()
t = threading.Thread(name='ballast-worker', target=self._ping_all_servers)
t.daemon = True
t.start()
else:
is_alive = self._ping.is_alive(server)
server._is_alive = is_alive
def _ping_all_servers(self):
with self._server_lock:
results = self._ping_strategy.ping(
self._ping,
self._server_list
)
self._servers = set(results)
def _start_ping_timer(self):
with self._lock:
if self._ping_timer_running:
self._logger.debug("Background pinger already running")
return
self._ping_timer_running = True
self._ping_timer = threading.Thread(name='ballast-worker', target=self._ping_loop)
self._ping_timer.daemon = True
self._ping_timer.start()
def _stop_ping_timer(self):
with self._lock:
self._ping_timer_running = False
self._ping_timer = None
def _ping_loop(self):
while self._ping_timer_running:
try:
self._ping_all_servers()
except BaseException as e:
self._logger.error("There was an error pinging servers: %s", e)
time.sleep(self._ping_interval)
class LoadBalancerStats(object):
def get_server_stats(self, server):
pass
| thomasstreet/ballast | ballast/core.py | Python | apache-2.0 | 4,607 |
import os, re, csv
# regular expressions for capturing the interesting quantities
noise_pattern = 'noise: \[(.+)\]'
res_pattern = '^([0-9.]+$)'
search_dir = "output"
results_file = '../results.csv'
os.chdir( search_dir )
files = filter( os.path.isfile, os.listdir( '.' ))
#files = [ os.path.join( search_dir, f ) for f in files ] # add path to each file
files.sort( key=lambda x: os.path.getmtime( x ))
results = []
for file in files:
f = open( file )
contents = f.read()
# noise
matches = re.search( noise_pattern, contents, re.DOTALL )
try:
noise = matches.group( 1 )
noise = noise.strip()
noise = noise.split()
except AttributeError:
print "noise error 1: %s" % ( contents )
continue
# rmse
matches = re.search( res_pattern, contents, re.M )
try:
res = matches.group( 1 )
except AttributeError:
print "matches error 2: %s" % ( contents )
continue
results.append( [ res ] + noise )
writer = csv.writer( open( results_file, 'wb' ))
for result in results:
writer.writerow( result ) | jiminliang/msda-denoising | spearmint_variable_noise/output2csv.py | Python | apache-2.0 | 1,028 |
import os
import logging
from mongodb_consistent_backup.Common import LocalCommand
from mongodb_consistent_backup.Pipeline import PoolThread
class TarThread(PoolThread):
def __init__(self, backup_dir, output_file, compression='none', verbose=False, binary="tar"):
super(TarThread, self).__init__(self.__class__.__name__, compression)
self.compression_method = compression
self.backup_dir = backup_dir
self.output_file = output_file
self.verbose = verbose
self.binary = binary
self._command = None
def close(self, exit_code=None, frame=None):
if self._command and not self.stopped:
logging.debug("Stopping running tar command: %s" % self._command.command)
del exit_code
del frame
self._command.close()
self.stopped = True
def run(self):
if os.path.isdir(self.backup_dir):
if not os.path.isfile(self.output_file):
try:
backup_base_dir = os.path.dirname(self.backup_dir)
backup_base_name = os.path.basename(self.backup_dir)
log_msg = "Archiving directory: %s" % self.backup_dir
cmd_flags = ["-C", backup_base_dir, "-c", "-f", self.output_file, "--remove-files"]
if self.do_gzip():
log_msg = "Archiving and compressing directory: %s" % self.backup_dir
cmd_flags.append("-z")
cmd_flags.append(backup_base_name)
logging.info(log_msg)
self.running = True
self._command = LocalCommand(self.binary, cmd_flags, self.verbose)
self.exit_code = self._command.run()
except Exception, e:
return self.result(False, "Failed archiving file: %s!" % self.output_file, e)
finally:
self.running = False
self.stopped = True
self.completed = True
else:
return self.result(False, "Output file: %s already exists!" % self.output_file, None)
return self.result(True, "Archiving successful.", None)
def result(self, success, message, error):
return {
"success": success,
"message": message,
"error": error,
"directory": self.backup_dir,
"exit_code": self.exit_code
}
| Percona-Lab/mongodb_consistent_backup | mongodb_consistent_backup/Archive/Tar/TarThread.py | Python | apache-2.0 | 2,555 |
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/garch.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix, Frame, List, MultiReturn, Scalar
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def garch(X: Matrix,
kmax: int,
momentum: float,
start_stepsize: float,
end_stepsize: float,
start_vicinity: float,
end_vicinity: float,
sim_seed: int,
verbose: bool):
"""
:param X: The input Matrix to apply Arima on.
:param kmax: Number of iterations
:param momentum: Momentum for momentum-gradient descent (set to 0 to deactivate)
:param start_stepsize: Initial gradient-descent stepsize
:param end_stepsize: gradient-descent stepsize at end (linear descent)
:param start_vicinity: proportion of randomness of restart-location for gradient descent at beginning
:param end_vicinity: same at end (linear decay)
:param sim_seed: seed for simulation of process on fitted coefficients
:param verbose: verbosity, comments during fitting
:return: 'OperationNode' containing simulated garch(1,1) process on fitted coefficients & variances of simulated fitted process & constant term of fitted process & 1-st arch-coefficient of fitted process & 1-st garch-coefficient of fitted process & drawbacks: slow convergence of optimization (sort of simulated annealing/gradient descent)
"""
params_dict = {'X': X, 'kmax': kmax, 'momentum': momentum, 'start_stepsize': start_stepsize, 'end_stepsize': end_stepsize, 'start_vicinity': start_vicinity, 'end_vicinity': end_vicinity, 'sim_seed': sim_seed, 'verbose': verbose}
vX_0 = Matrix(X.sds_context, '')
vX_1 = Matrix(X.sds_context, '')
vX_2 = Scalar(X.sds_context, '')
vX_3 = Scalar(X.sds_context, '')
vX_4 = Scalar(X.sds_context, '')
output_nodes = [vX_0, vX_1, vX_2, vX_3, vX_4, ]
op = MultiReturn(X.sds_context, 'garch', output_nodes, named_input_nodes=params_dict)
vX_0._unnamed_input_nodes = [op]
vX_1._unnamed_input_nodes = [op]
vX_2._unnamed_input_nodes = [op]
vX_3._unnamed_input_nodes = [op]
vX_4._unnamed_input_nodes = [op]
return op
| apache/incubator-systemml | src/main/python/systemds/operator/algorithm/builtin/garch.py | Python | apache-2.0 | 3,230 |
"""
Tests for login and logout.
"""
import datetime
from unittest.mock import patch
import responses
import quilt3
from .utils import QuiltTestCase
class TestSession(QuiltTestCase):
@patch('quilt3.session.open_url')
@patch('quilt3.session.input', return_value='123456')
@patch('quilt3.session.login_with_token')
def test_login(self, mock_login_with_token, mock_input, mock_open_url):
quilt3.login()
url = quilt3.session.get_registry_url()
mock_open_url.assert_called_with(f'{url}/login')
mock_login_with_token.assert_called_with('123456')
@patch('quilt3.session._save_auth')
@patch('quilt3.session._save_credentials')
def test_login_with_token(self, mock_save_credentials, mock_save_auth):
url = quilt3.session.get_registry_url()
mock_auth = dict(
refresh_token='refresh-token',
access_token='access-token',
expires_at=123456789
)
self.requests_mock.add(
responses.POST,
f'{url}/api/token',
json=mock_auth,
status=200
)
self.requests_mock.add(
responses.GET,
f'{url}/api/auth/get_credentials',
json=dict(
AccessKeyId='access-key',
SecretAccessKey='secret-key',
SessionToken='session-token',
Expiration="2019-05-28T23:58:07+00:00"
),
status=200
)
quilt3.session.login_with_token('123456')
mock_save_auth.assert_called_with({url: mock_auth})
mock_save_credentials.assert_called_with(dict(
access_key='access-key',
secret_key='secret-key',
token='session-token',
expiry_time="2019-05-28T23:58:07+00:00"
))
@patch('quilt3.session._save_credentials')
@patch('quilt3.session._load_credentials')
def test_create_botocore_session(self, mock_load_credentials, mock_save_credentials):
def format_date(date):
return date.replace(tzinfo=datetime.timezone.utc, microsecond=0).isoformat()
# Test good credentials.
future_date = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
mock_load_credentials.return_value = dict(
access_key='access-key',
secret_key='secret-key',
token='session-token',
expiry_time=format_date(future_date)
)
session = quilt3.session.create_botocore_session()
credentials = session.get_credentials()
assert credentials.access_key == 'access-key'
assert credentials.secret_key == 'secret-key'
assert credentials.token == 'session-token'
mock_save_credentials.assert_not_called()
# Test expired credentials.
past_date = datetime.datetime.utcnow() - datetime.timedelta(minutes=5)
mock_load_credentials.return_value = dict(
access_key='access-key',
secret_key='secret-key',
token='session-token',
expiry_time=format_date(past_date)
)
url = quilt3.session.get_registry_url()
self.requests_mock.add(
responses.GET,
f'{url}/api/auth/get_credentials',
json=dict(
AccessKeyId='access-key2',
SecretAccessKey='secret-key2',
SessionToken='session-token2',
Expiration=format_date(future_date)
),
status=200
)
session = quilt3.session.create_botocore_session()
credentials = session.get_credentials()
assert credentials.access_key == 'access-key2'
assert credentials.secret_key == 'secret-key2'
assert credentials.token == 'session-token2'
mock_save_credentials.assert_called()
def test_logged_in(self):
registry_url = quilt3.session.get_registry_url()
other_registry_url = registry_url + 'other'
mock_auth = dict(
refresh_token='refresh-token',
access_token='access-token',
expires_at=123456789,
)
with patch('quilt3.session._load_auth', return_value={registry_url: mock_auth}) as mocked_load_auth:
assert quilt3.logged_in() == 'https://example.com'
mocked_load_auth.assert_called_once()
with patch('quilt3.session._load_auth', return_value={other_registry_url: mock_auth}) as mocked_load_auth:
assert quilt3.logged_in() is None
mocked_load_auth.assert_called_once()
| quiltdata/quilt-compiler | api/python/tests/test_session.py | Python | apache-2.0 | 4,576 |
import logging
import re
import socket
from mopidy.config import validators
from mopidy.internal import log, path
def decode(value):
if isinstance(value, bytes):
value = value.decode(errors="surrogateescape")
for char in ("\\", "\n", "\t"):
value = value.replace(
char.encode(encoding="unicode-escape").decode(), char
)
return value
def encode(value):
if isinstance(value, bytes):
value = value.decode(errors="surrogateescape")
for char in ("\\", "\n", "\t"):
value = value.replace(
char, char.encode(encoding="unicode-escape").decode()
)
return value
class DeprecatedValue:
pass
class ConfigValue:
"""Represents a config key's value and how to handle it.
Normally you will only be interacting with sub-classes for config values
that encode either deserialization behavior and/or validation.
Each config value should be used for the following actions:
1. Deserializing from a raw string and validating, raising ValueError on
failure.
2. Serializing a value back to a string that can be stored in a config.
3. Formatting a value to a printable form (useful for masking secrets).
:class:`None` values should not be deserialized, serialized or formatted,
the code interacting with the config should simply skip None config values.
"""
def deserialize(self, value):
"""Cast raw string to appropriate type."""
return decode(value)
def serialize(self, value, display=False):
"""Convert value back to string for saving."""
if value is None:
return ""
return str(value)
class Deprecated(ConfigValue):
"""Deprecated value.
Used for ignoring old config values that are no longer in use, but should
not cause the config parser to crash.
"""
def deserialize(self, value):
return DeprecatedValue()
def serialize(self, value, display=False):
return DeprecatedValue()
class String(ConfigValue):
"""String value.
Is decoded as utf-8 and \\n \\t escapes should work and be preserved.
"""
def __init__(self, optional=False, choices=None):
self._required = not optional
self._choices = choices
def deserialize(self, value):
value = decode(value).strip()
validators.validate_required(value, self._required)
if not value:
return None
validators.validate_choice(value, self._choices)
return value
def serialize(self, value, display=False):
if value is None:
return ""
return encode(value)
class Secret(String):
"""Secret string value.
Is decoded as utf-8 and \\n \\t escapes should work and be preserved.
Should be used for passwords, auth tokens etc. Will mask value when being
displayed.
"""
def __init__(self, optional=False, choices=None):
self._required = not optional
self._choices = None # Choices doesn't make sense for secrets
def serialize(self, value, display=False):
if value is not None and display:
return "********"
return super().serialize(value, display)
class Integer(ConfigValue):
"""Integer value."""
def __init__(
self, minimum=None, maximum=None, choices=None, optional=False
):
self._required = not optional
self._minimum = minimum
self._maximum = maximum
self._choices = choices
def deserialize(self, value):
value = decode(value)
validators.validate_required(value, self._required)
if not value:
return None
value = int(value)
validators.validate_choice(value, self._choices)
validators.validate_minimum(value, self._minimum)
validators.validate_maximum(value, self._maximum)
return value
class Boolean(ConfigValue):
"""Boolean value.
Accepts ``1``, ``yes``, ``true``, and ``on`` with any casing as
:class:`True`.
Accepts ``0``, ``no``, ``false``, and ``off`` with any casing as
:class:`False`.
"""
true_values = ("1", "yes", "true", "on")
false_values = ("0", "no", "false", "off")
def __init__(self, optional=False):
self._required = not optional
def deserialize(self, value):
value = decode(value)
validators.validate_required(value, self._required)
if not value:
return None
if value.lower() in self.true_values:
return True
elif value.lower() in self.false_values:
return False
raise ValueError(f"invalid value for boolean: {value!r}")
def serialize(self, value, display=False):
if value is True:
return "true"
elif value in (False, None):
return "false"
else:
raise ValueError(f"{value!r} is not a boolean")
class List(ConfigValue):
"""List value.
Supports elements split by commas or newlines. Newlines take presedence and
empty list items will be filtered out.
"""
def __init__(self, optional=False):
self._required = not optional
def deserialize(self, value):
value = decode(value)
if "\n" in value:
values = re.split(r"\s*\n\s*", value)
else:
values = re.split(r"\s*,\s*", value)
values = tuple(v.strip() for v in values if v.strip())
validators.validate_required(values, self._required)
return tuple(values)
def serialize(self, value, display=False):
if not value:
return ""
return "\n " + "\n ".join(encode(v) for v in value if v)
class LogColor(ConfigValue):
def deserialize(self, value):
value = decode(value)
validators.validate_choice(value.lower(), log.COLORS)
return value.lower()
def serialize(self, value, display=False):
if value.lower() in log.COLORS:
return encode(value.lower())
return ""
class LogLevel(ConfigValue):
"""Log level value.
Expects one of ``critical``, ``error``, ``warning``, ``info``, ``debug``,
``trace``, or ``all``, with any casing.
"""
levels = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG,
"trace": log.TRACE_LOG_LEVEL,
"all": logging.NOTSET,
}
def deserialize(self, value):
value = decode(value)
validators.validate_choice(value.lower(), self.levels.keys())
return self.levels.get(value.lower())
def serialize(self, value, display=False):
lookup = {v: k for k, v in self.levels.items()}
if value in lookup:
return encode(lookup[value])
return ""
class Hostname(ConfigValue):
"""Network hostname value."""
def __init__(self, optional=False):
self._required = not optional
def deserialize(self, value, display=False):
value = decode(value).strip()
validators.validate_required(value, self._required)
if not value:
return None
socket_path = path.get_unix_socket_path(value)
if socket_path is not None:
path_str = Path(not self._required).deserialize(socket_path)
return f"unix:{path_str}"
try:
socket.getaddrinfo(value, None)
except OSError:
raise ValueError("must be a resolveable hostname or valid IP")
return value
class Port(Integer):
"""Network port value.
Expects integer in the range 0-65535, zero tells the kernel to simply
allocate a port for us.
"""
def __init__(self, choices=None, optional=False):
super().__init__(
minimum=0, maximum=2 ** 16 - 1, choices=choices, optional=optional
)
class _ExpandedPath(str):
def __new__(cls, original, expanded):
return super().__new__(cls, expanded)
def __init__(self, original, expanded):
self.original = original
class Path(ConfigValue):
"""File system path.
The following expansions of the path will be done:
- ``~`` to the current user's home directory
- ``$XDG_CACHE_DIR`` according to the XDG spec
- ``$XDG_CONFIG_DIR`` according to the XDG spec
- ``$XDG_DATA_DIR`` according to the XDG spec
- ``$XDG_MUSIC_DIR`` according to the XDG spec
"""
def __init__(self, optional=False):
self._required = not optional
def deserialize(self, value):
value = decode(value).strip()
expanded = path.expand_path(value)
validators.validate_required(value, self._required)
validators.validate_required(expanded, self._required)
if not value or expanded is None:
return None
return _ExpandedPath(value, expanded)
def serialize(self, value, display=False):
if isinstance(value, _ExpandedPath):
value = value.original
if isinstance(value, bytes):
value = value.decode(errors="surrogateescape")
return value
| kingosticks/mopidy | mopidy/config/types.py | Python | apache-2.0 | 9,146 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.providers.qubole.operators.qubole`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.qubole.operators.qubole import QuboleOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.qubole.operators.qubole`.",
DeprecationWarning,
stacklevel=2,
)
| nathanielvarona/airflow | airflow/contrib/operators/qubole_operator.py | Python | apache-2.0 | 1,158 |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jan 21, 2020
@author: alfoa, wangc
Lasso model fit with Lars using BIC or AIC for model selection.
"""
#Internal Modules (Lazy Importer)--------------------------------------------------------------------
#Internal Modules (Lazy Importer) End----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
from numpy import finfo
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from SupervisedLearning.ScikitLearn import ScikitLearnBase
from utils import InputData, InputTypes
#Internal Modules End--------------------------------------------------------------------------------
class LassoLarsIC(ScikitLearnBase):
"""
Lasso model fit with Lars using BIC or AIC for model selection
"""
info = {'problemtype':'regression', 'normalize':False}
def __init__(self):
"""
Constructor that will appropriately initialize a supervised learning object
@ In, None
@ Out, None
"""
super().__init__()
import sklearn
import sklearn.linear_model
self.model = sklearn.linear_model.LassoLarsIC
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(LassoLarsIC, cls).getInputSpecification()
specs.description = r"""The \xmlNode{LassoLarsIC} (\textit{Lasso model fit with Lars using BIC or AIC for model selection})
is a Lasso model fit with Lars using BIC or AIC for model selection.
The optimization objective for Lasso is:
$(1 / (2 * n\_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1$
AIC is the Akaike information criterion and BIC is the Bayes Information criterion. Such criteria
are useful to select the value of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should explain well the data
while being simple.
\zNormalizationNotPerformed{LassoLarsIC}
"""
specs.addSub(InputData.parameterInputFactory("criterion", contentType=InputTypes.makeEnumType("criterion", "criterionType",['bic', 'aic']),
descr=r"""The type of criterion to use.""", default='aic'))
specs.addSub(InputData.parameterInputFactory("fit_intercept", contentType=InputTypes.BoolType,
descr=r"""Whether the intercept should be estimated or not. If False,
the data is assumed to be already centered.""", default=True))
specs.addSub(InputData.parameterInputFactory("normalize", contentType=InputTypes.BoolType,
descr=r"""This parameter is ignored when fit_intercept is set to False. If True,
the regressors X will be normalized before regression by subtracting the mean and
dividing by the l2-norm.""", default=True))
specs.addSub(InputData.parameterInputFactory("max_iter", contentType=InputTypes.IntegerType,
descr=r"""The maximum number of iterations.""", default=500))
specs.addSub(InputData.parameterInputFactory("precompute", contentType=InputTypes.StringType,
descr=r"""Whether to use a precomputed Gram matrix to speed up calculations.
For sparse input this option is always True to preserve sparsity.""", default='auto'))
specs.addSub(InputData.parameterInputFactory("eps", contentType=InputTypes.FloatType,
descr=r"""The machine-precision regularization in the computation of the Cholesky
diagonal factors. Increase this for very ill-conditioned systems. Unlike the tol
parameter in some iterative optimization-based algorithms, this parameter does not
control the tolerance of the optimization.""", default=finfo(float).eps))
specs.addSub(InputData.parameterInputFactory("positive", contentType=InputTypes.BoolType,
descr=r"""When set to True, forces the coefficients to be positive.""", default=False))
specs.addSub(InputData.parameterInputFactory("verbose", contentType=InputTypes.BoolType,
descr=r"""Amount of verbosity.""", default=False))
return specs
def _handleInput(self, paramInput):
"""
Function to handle the common parts of the distribution parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
settings, notFound = paramInput.findNodesAndExtractValues(['fit_intercept','max_iter', 'normalize', 'precompute',
'eps','positive','criterion', 'verbose'])
# notFound must be empty
assert(not notFound)
self.initializeModel(settings)
| idaholab/raven | framework/SupervisedLearning/ScikitLearn/LinearModel/LassoLarsIC.py | Python | apache-2.0 | 6,386 |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import collections
import time
from enum import Enum
from pyflink.datastream import TimerService
from pyflink.datastream.timerservice import InternalTimer, K, N, InternalTimerService
from pyflink.fn_execution.state_impl import RemoteKeyedStateBackend
class InternalTimerImpl(InternalTimer[K, N]):
def __init__(self, timestamp: int, key: K, namespace: N):
self._timestamp = timestamp
self._key = key
self._namespace = namespace
def get_timestamp(self) -> int:
return self._timestamp
def get_key(self) -> K:
return self._key
def get_namespace(self) -> N:
return self._namespace
def __hash__(self):
result = int(self._timestamp ^ (self._timestamp >> 32))
result = 31 * result + hash(tuple(self._key))
result = 31 * result + hash(self._namespace)
return result
def __eq__(self, other):
return self.__class__ == other.__class__ and self._timestamp == other._timestamp \
and self._key == other._key and self._namespace == other._namespace
class TimerOperandType(Enum):
REGISTER_EVENT_TIMER = 0
REGISTER_PROC_TIMER = 1
DELETE_EVENT_TIMER = 2
DELETE_PROC_TIMER = 3
class InternalTimerServiceImpl(InternalTimerService[N]):
"""
Internal implementation of InternalTimerService.
"""
def __init__(self, keyed_state_backend: RemoteKeyedStateBackend):
self._keyed_state_backend = keyed_state_backend
self._current_watermark = None
self.timers = collections.OrderedDict()
def current_processing_time(self):
return int(time.time() * 1000)
def current_watermark(self):
return self._current_watermark
def advance_watermark(self, watermark: int):
self._current_watermark = watermark
def register_processing_time_timer(self, namespace: N, t: int):
current_key = self._keyed_state_backend.get_current_key()
timer = (TimerOperandType.REGISTER_PROC_TIMER, InternalTimerImpl(t, current_key, namespace))
self.timers[timer] = None
def register_event_time_timer(self, namespace: N, t: int):
current_key = self._keyed_state_backend.get_current_key()
timer = (TimerOperandType.REGISTER_EVENT_TIMER,
InternalTimerImpl(t, current_key, namespace))
self.timers[timer] = None
def delete_processing_time_timer(self, namespace: N, t: int):
current_key = self._keyed_state_backend.get_current_key()
timer = (TimerOperandType.DELETE_PROC_TIMER, InternalTimerImpl(t, current_key, namespace))
self.timers[timer] = None
def delete_event_time_timer(self, namespace: N, t: int):
current_key = self._keyed_state_backend.get_current_key()
timer = (TimerOperandType.DELETE_EVENT_TIMER, InternalTimerImpl(t, current_key, namespace))
self.timers[timer] = None
class TimerServiceImpl(TimerService):
"""
Internal implementation of TimerService.
"""
def __init__(self, internal_timer_service: InternalTimerServiceImpl):
self._internal = internal_timer_service
self.timers = self._internal.timers
def current_processing_time(self) -> int:
return self._internal.current_processing_time()
def current_watermark(self) -> int:
return self._internal.current_watermark()
def advance_watermark(self, wm):
self._internal.advance_watermark(wm)
def register_processing_time_timer(self, t: int):
self._internal.register_processing_time_timer(None, t)
def register_event_time_timer(self, t: int):
self._internal.register_event_time_timer(None, t)
def delete_processing_time_timer(self, t: int):
self._internal.delete_processing_time_timer(None, t)
def delete_event_time_timer(self, t: int):
self._internal.delete_event_time_timer(None, t)
| clarkyzl/flink | flink-python/pyflink/fn_execution/timerservice_impl.py | Python | apache-2.0 | 4,824 |
from mainapp import create_app
app = create_app()
if __name__ == '__main__':
app.run(host='0.0.0.0')
| jonaubf/flask-mongo-testapp | testapp/run.py | Python | apache-2.0 | 107 |
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v2 EC2 Credentials action implementations"""
import logging
import six
from cliff import command
from cliff import lister
from cliff import show
from openstackclient.common import utils
from openstackclient.i18n import _ # noqa
class CreateEC2Creds(show.ShowOne):
"""Create EC2 credentials"""
log = logging.getLogger(__name__ + ".CreateEC2Creds")
def get_parser(self, prog_name):
parser = super(CreateEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'--project',
metavar='<project>',
help=_('Specify a project [admin only]'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify a user [admin only]'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.project:
project = utils.find_resource(
identity_client.tenants,
parsed_args.project,
).id
else:
# Get the project from the current auth
project = identity_client.auth_tenant_id
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = identity_client.auth_user_id
creds = identity_client.ec2.create(user, project)
info = {}
info.update(creds._info)
return zip(*sorted(six.iteritems(info)))
class DeleteEC2Creds(command.Command):
"""Delete EC2 credentials"""
log = logging.getLogger(__name__ + '.DeleteEC2Creds')
def get_parser(self, prog_name):
parser = super(DeleteEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'access_key',
metavar='<access-key>',
help=_('Credentials access key'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify a user [admin only]'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = identity_client.auth_user_id
identity_client.ec2.delete(user, parsed_args.access_key)
class ListEC2Creds(lister.Lister):
"""List EC2 credentials"""
log = logging.getLogger(__name__ + '.ListEC2Creds')
def get_parser(self, prog_name):
parser = super(ListEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify a user [admin only]'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = identity_client.auth_user_id
columns = ('access', 'secret', 'tenant_id', 'user_id')
column_headers = ('Access', 'Secret', 'Project ID', 'User ID')
data = identity_client.ec2.list(user)
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class ShowEC2Creds(show.ShowOne):
"""Show EC2 credentials"""
log = logging.getLogger(__name__ + '.ShowEC2Creds')
def get_parser(self, prog_name):
parser = super(ShowEC2Creds, self).get_parser(prog_name)
parser.add_argument(
'access_key',
metavar='<access-key>',
help=_('Credentials access key'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify a user [admin only]'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
else:
# Get the user from the current auth
user = identity_client.auth_user_id
creds = identity_client.ec2.get(user, parsed_args.access_key)
info = {}
info.update(creds._info)
return zip(*sorted(six.iteritems(info)))
| varunarya10/python-openstackclient | openstackclient/identity/v2_0/ec2creds.py | Python | apache-2.0 | 5,662 |
# Copyright 2012 SINA Corporation
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import argparse
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
import six
import stevedore.named
from climate.openstack.common import gettextutils
from climate.openstack.common import importutils
gettextutils.install('climate')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
DICTOPT = "DictOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
DICTOPT: 'dict value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT, DICTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def generate(argv):
parser = argparse.ArgumentParser(
description='generate sample configuration file',
)
parser.add_argument('-m', dest='modules', action='append')
parser.add_argument('-l', dest='libraries', action='append')
parser.add_argument('srcfiles', nargs='*')
parsed_args = parser.parse_args(argv)
mods_by_pkg = dict()
for filepath in parsed_args.srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
if parsed_args.modules:
for module_name in parsed_args.modules:
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name,
opts))
# Look for entry points defined in libraries (or applications) for
# option discovery, and include their return values in the output.
#
# Each entry point should be a function returning an iterable
# of pairs with the group name (or None for the default group)
# and the list of Opt instances for that group.
if parsed_args.libraries:
loader = stevedore.named.NamedExtensionManager(
'oslo.config.opts',
names=list(set(parsed_args.libraries)),
invoke_on_load=False,
)
for ext in loader:
for group, opts in ext.plugin():
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
opt_list.append((ext.name, opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
raise RuntimeError("Unable to import module %s" % mod_str)
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group in sorted(opts_by_group.keys()):
print_group_opts(group, opts_by_group[group])
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except Exception as e:
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
if not(value['opt'] != opt):
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for value in cfg.CONF.values():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in opts_by_module:
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
return 'climate'
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help = u'%s (%s)' % (opt_help,
OPT_TYPES[opt_type])
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
if opt.deprecated_opts:
for deprecated_opt in opt.deprecated_opts:
if deprecated_opt.name:
deprecated_group = (deprecated_opt.group if
deprecated_opt.group else "DEFAULT")
print('# Deprecated group/name - [%s]/%s' %
(deprecated_group,
deprecated_opt.name))
try:
if opt_default is None:
print('#%s=<None>' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, six.string_types))
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == DICTOPT:
assert(isinstance(opt_default, dict))
opt_default_strlist = [str(key) + ':' + str(value)
for (key, value) in opt_default.items()]
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def main():
generate(sys.argv[1:])
if __name__ == '__main__':
main()
| frossigneux/blazar | climate/openstack/common/config/generator.py | Python | apache-2.0 | 10,412 |
"""
IP Types
"""
import logging
from ipaddress import ip_address
from socket import AF_INET, AF_INET6
from vpp_papi import VppEnum
from vpp_object import VppObject
try:
text_type = unicode
except NameError:
text_type = str
_log = logging.getLogger(__name__)
class DpoProto:
DPO_PROTO_IP4 = 0
DPO_PROTO_IP6 = 1
DPO_PROTO_MPLS = 2
DPO_PROTO_ETHERNET = 3
DPO_PROTO_BIER = 4
DPO_PROTO_NSH = 5
INVALID_INDEX = 0xffffffff
def get_dpo_proto(addr):
if ip_address(addr).version == 6:
return DpoProto.DPO_PROTO_IP6
else:
return DpoProto.DPO_PROTO_IP4
class VppIpAddressUnion():
def __init__(self, addr):
self.addr = addr
self.ip_addr = ip_address(text_type(self.addr))
def encode(self):
if self.version == 6:
return {'ip6': self.ip_addr}
else:
return {'ip4': self.ip_addr}
@property
def version(self):
return self.ip_addr.version
@property
def address(self):
return self.addr
@property
def length(self):
return self.ip_addr.max_prefixlen
@property
def bytes(self):
return self.ip_addr.packed
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.ip_addr == other.ip_addr
elif hasattr(other, "ip4") and hasattr(other, "ip6"):
# vl_api_address_union_t
if 4 == self.version:
return self.ip_addr == other.ip4
else:
return self.ip_addr == other.ip6
else:
raise Exception("Comparing VppIpAddressUnions:%s"
" with incomparable type: %s",
self, other)
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.ip_addr)
class VppIpMPrefix():
def __init__(self, saddr, gaddr, glen):
self.saddr = saddr
self.gaddr = gaddr
self.glen = glen
if ip_address(self.saddr).version != \
ip_address(self.gaddr).version:
raise ValueError('Source and group addresses must be of the '
'same address family.')
def encode(self):
return {
'af': ip_address(self.gaddr).vapi_af,
'grp_address': {
ip_address(self.gaddr).vapi_af_name: self.gaddr
},
'src_address': {
ip_address(self.saddr).vapi_af_name: self.saddr
},
'grp_address_length': self.glen,
}
@property
def length(self):
return self.glen
@property
def version(self):
return ip_address(self.gaddr).version
def __str__(self):
return "(%s,%s)/%d" % (self.saddr, self.gaddr, self.glen)
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.glen == other.glen and
self.saddr == other.gaddr and
self.saddr == other.saddr)
elif (hasattr(other, "grp_address_length") and
hasattr(other, "grp_address") and
hasattr(other, "src_address")):
# vl_api_mprefix_t
if 4 == self.version:
return (self.glen == other.grp_address_length and
self.gaddr == str(other.grp_address.ip4) and
self.saddr == str(other.src_address.ip4))
else:
return (self.glen == other.grp_address_length and
self.gaddr == str(other.grp_address.ip6) and
self.saddr == str(other.src_address.ip6))
return NotImplemented
class VppIpPuntPolicer(VppObject):
def __init__(self, test, policer_index, is_ip6=False):
self._test = test
self._policer_index = policer_index
self._is_ip6 = is_ip6
def add_vpp_config(self):
self._test.vapi.ip_punt_police(policer_index=self._policer_index,
is_ip6=self._is_ip6, is_add=True)
def remove_vpp_config(self):
self._test.vapi.ip_punt_police(policer_index=self._policer_index,
is_ip6=self._is_ip6, is_add=False)
def query_vpp_config(self):
NotImplemented
class VppIpPuntRedirect(VppObject):
def __init__(self, test, rx_index, tx_index, nh_addr):
self._test = test
self._rx_index = rx_index
self._tx_index = tx_index
self._nh_addr = ip_address(nh_addr)
def encode(self):
return {"rx_sw_if_index": self._rx_index,
"tx_sw_if_index": self._tx_index, "nh": self._nh_addr}
def add_vpp_config(self):
self._test.vapi.ip_punt_redirect(punt=self.encode(), is_add=True)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.ip_punt_redirect(punt=self.encode(), is_add=False)
def get_vpp_config(self):
is_ipv6 = True if self._nh_addr.version == 6 else False
return self._test.vapi.ip_punt_redirect_dump(
sw_if_index=self._rx_index, is_ipv6=is_ipv6)
def query_vpp_config(self):
if self.get_vpp_config():
return True
return False
class VppIpPathMtu(VppObject):
def __init__(self, test, nh, pmtu, table_id=0):
self._test = test
self.nh = nh
self.pmtu = pmtu
self.table_id = table_id
def add_vpp_config(self):
self._test.vapi.ip_path_mtu_update(pmtu={'nh': self.nh,
'table_id': self.table_id,
'path_mtu': self.pmtu})
self._test.registry.register(self, self._test.logger)
return self
def modify(self, pmtu):
self.pmtu = pmtu
self._test.vapi.ip_path_mtu_update(pmtu={'nh': self.nh,
'table_id': self.table_id,
'path_mtu': self.pmtu})
return self
def remove_vpp_config(self):
self._test.vapi.ip_path_mtu_update(pmtu={'nh': self.nh,
'table_id': self.table_id,
'path_mtu': 0})
def query_vpp_config(self):
ds = list(self._test.vapi.vpp.details_iter(
self._test.vapi.ip_path_mtu_get))
for d in ds:
if self.nh == str(d.pmtu.nh) \
and self.table_id == d.pmtu.table_id \
and self.pmtu == d.pmtu.path_mtu:
return True
return False
def object_id(self):
return ("ip-path-mtu-%d-%s-%d" % (self.table_id,
self.nh,
self.pmtu))
def __str__(self):
return self.object_id()
| FDio/vpp | test/vpp_ip.py | Python | apache-2.0 | 6,921 |
import numpy as np
from math import sin, pi, cos
from banti.glyph import Glyph
halfsize = 40
size = 2*halfsize + 1
picture = np.zeros((size, size))
for t in range(-135, 135):
x = round(halfsize + halfsize * cos(pi * t / 180))
y = round(halfsize + halfsize * sin(pi * t / 180))
picture[x][y] = 1
zoomsz = 1 * halfsize
b = Glyph(['O', 0, 0, size, size, 0, 0, 0, 0, None])
b.set_pix(picture)
c = Glyph()
for t in range(0, 360, 15):
x = round(zoomsz + zoomsz * cos(pi * t / 180))
y = round(zoomsz + zoomsz * sin(pi * t / 180))
b.set_xy_wh((x, y, size, size))
c = c + b
print(b)
print(c) | rakeshvar/telugu_ocr_banti | tests/glyph_test.py | Python | apache-2.0 | 613 |
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import logging
import os.path
import re
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._internal.exceptions import BadCommand, InstallationError
from pip._internal.utils.misc import display_path, hide_url
from pip._internal.utils.subprocess import make_command
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.vcs.versioncontrol import (
RemoteNotFoundError,
VersionControl,
find_path_to_setup_from_repo_root,
vcs,
)
if MYPY_CHECK_RUNNING:
from typing import Optional, Tuple
from pip._internal.utils.misc import HiddenText
from pip._internal.vcs.versioncontrol import AuthInfo, RevOptions
urlsplit = urllib_parse.urlsplit
urlunsplit = urllib_parse.urlunsplit
logger = logging.getLogger(__name__)
HASH_REGEX = re.compile('^[a-fA-F0-9]{40}$')
def looks_like_hash(sha):
return bool(HASH_REGEX.match(sha))
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = (
'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',
)
# Prevent the user's environment variables from interfering with pip:
# https://github.com/pypa/pip/issues/1130
unset_environ = ('GIT_DIR', 'GIT_WORK_TREE')
default_arg_rev = 'HEAD'
@staticmethod
def get_base_rev_args(rev):
return [rev]
def is_immutable_rev_checkout(self, url, dest):
# type: (str, str) -> bool
_, rev_options = self.get_url_rev_options(hide_url(url))
if not rev_options.rev:
return False
if not self.is_commit_id_equal(dest, rev_options.rev):
# the current commit is different from rev,
# which means rev was something else than a commit hash
return False
# return False in the rare case rev is both a commit hash
# and a tag or a branch; we don't want to cache in that case
# because that branch/tag could point to something else in the future
is_tag_or_branch = bool(
self.get_revision_sha(dest, rev_options.rev)[0]
)
return not is_tag_or_branch
def get_git_version(self):
VERSION_PFX = 'git version '
version = self.run_command(
['version'], show_stdout=False, stdout_only=True
)
if version.startswith(VERSION_PFX):
version = version[len(VERSION_PFX):].split()[0]
else:
version = ''
# get first 3 positions of the git version because
# on windows it is x.y.z.windows.t, and this parses as
# LegacyVersion which always smaller than a Version.
version = '.'.join(version.split('.')[:3])
return parse_version(version)
@classmethod
def get_current_branch(cls, location):
"""
Return the current branch, or None if HEAD isn't at a branch
(e.g. detached HEAD).
"""
# git-symbolic-ref exits with empty stdout if "HEAD" is a detached
# HEAD rather than a symbolic ref. In addition, the -q causes the
# command to exit with status code 1 instead of 128 in this case
# and to suppress the message to stderr.
args = ['symbolic-ref', '-q', 'HEAD']
output = cls.run_command(
args,
extra_ok_returncodes=(1, ),
show_stdout=False,
stdout_only=True,
cwd=location,
)
ref = output.strip()
if ref.startswith('refs/heads/'):
return ref[len('refs/heads/'):]
return None
def export(self, location, url):
# type: (str, HiddenText) -> None
"""Export the Git repository at the url to the destination location"""
if not location.endswith('/'):
location = location + '/'
with TempDirectory(kind="export") as temp_dir:
self.unpack(temp_dir.path, url=url)
self.run_command(
['checkout-index', '-a', '-f', '--prefix', location],
show_stdout=False, cwd=temp_dir.path
)
@classmethod
def get_revision_sha(cls, dest, rev):
"""
Return (sha_or_none, is_branch), where sha_or_none is a commit hash
if the revision names a remote branch or tag, otherwise None.
Args:
dest: the repository directory.
rev: the revision name.
"""
# Pass rev to pre-filter the list.
output = cls.run_command(
['show-ref', rev],
cwd=dest,
show_stdout=False,
stdout_only=True,
on_returncode='ignore',
)
refs = {}
for line in output.strip().splitlines():
try:
sha, ref = line.split()
except ValueError:
# Include the offending line to simplify troubleshooting if
# this error ever occurs.
raise ValueError('unexpected show-ref line: {!r}'.format(line))
refs[ref] = sha
branch_ref = 'refs/remotes/origin/{}'.format(rev)
tag_ref = 'refs/tags/{}'.format(rev)
sha = refs.get(branch_ref)
if sha is not None:
return (sha, True)
sha = refs.get(tag_ref)
return (sha, False)
@classmethod
def _should_fetch(cls, dest, rev):
"""
Return true if rev is a ref or is a commit that we don't have locally.
Branches and tags are not considered in this method because they are
assumed to be always available locally (which is a normal outcome of
``git clone`` and ``git fetch --tags``).
"""
if rev.startswith("refs/"):
# Always fetch remote refs.
return True
if not looks_like_hash(rev):
# Git fetch would fail with abbreviated commits.
return False
if cls.has_commit(dest, rev):
# Don't fetch if we have the commit locally.
return False
return True
@classmethod
def resolve_revision(cls, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> RevOptions
"""
Resolve a revision to a new RevOptions object with the SHA1 of the
branch, tag, or ref if found.
Args:
rev_options: a RevOptions object.
"""
rev = rev_options.arg_rev
# The arg_rev property's implementation for Git ensures that the
# rev return value is always non-None.
assert rev is not None
sha, is_branch = cls.get_revision_sha(dest, rev)
if sha is not None:
rev_options = rev_options.make_new(sha)
rev_options.branch_name = rev if is_branch else None
return rev_options
# Do not show a warning for the common case of something that has
# the form of a Git commit hash.
if not looks_like_hash(rev):
logger.warning(
"Did not find branch or tag '%s', assuming revision or ref.",
rev,
)
if not cls._should_fetch(dest, rev):
return rev_options
# fetch the requested revision
cls.run_command(
make_command('fetch', '-q', url, rev_options.to_args()),
cwd=dest,
)
# Change the revision to the SHA of the ref we fetched
sha = cls.get_revision(dest, rev='FETCH_HEAD')
rev_options = rev_options.make_new(sha)
return rev_options
@classmethod
def is_commit_id_equal(cls, dest, name):
"""
Return whether the current commit hash equals the given name.
Args:
dest: the repository directory.
name: a string name.
"""
if not name:
# Then avoid an unnecessary subprocess call.
return False
return cls.get_revision(dest) == name
def fetch_new(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
rev_display = rev_options.to_display()
logger.info('Cloning %s%s to %s', url, rev_display, display_path(dest))
self.run_command(make_command('clone', '-q', url, dest))
if rev_options.rev:
# Then a specific revision was requested.
rev_options = self.resolve_revision(dest, url, rev_options)
branch_name = getattr(rev_options, 'branch_name', None)
if branch_name is None:
# Only do a checkout if the current commit id doesn't match
# the requested revision.
if not self.is_commit_id_equal(dest, rev_options.rev):
cmd_args = make_command(
'checkout', '-q', rev_options.to_args(),
)
self.run_command(cmd_args, cwd=dest)
elif self.get_current_branch(dest) != branch_name:
# Then a specific branch was requested, and that branch
# is not yet checked out.
track_branch = 'origin/{}'.format(branch_name)
cmd_args = [
'checkout', '-b', branch_name, '--track', track_branch,
]
self.run_command(cmd_args, cwd=dest)
#: repo may contain submodules
self.update_submodules(dest)
def switch(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
self.run_command(
make_command('config', 'remote.origin.url', url),
cwd=dest,
)
cmd_args = make_command('checkout', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
self.update_submodules(dest)
def update(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
# First fetch changes from the default remote
if self.get_git_version() >= parse_version('1.9.0'):
# fetch tags in addition to everything else
self.run_command(['fetch', '-q', '--tags'], cwd=dest)
else:
self.run_command(['fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maybe even origin/master)
rev_options = self.resolve_revision(dest, url, rev_options)
cmd_args = make_command('reset', '--hard', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
#: update submodules
self.update_submodules(dest)
@classmethod
def get_remote_url(cls, location):
"""
Return URL of the first remote encountered.
Raises RemoteNotFoundError if the repository does not have a remote
url configured.
"""
# We need to pass 1 for extra_ok_returncodes since the command
# exits with return code 1 if there are no matching lines.
stdout = cls.run_command(
['config', '--get-regexp', r'remote\..*\.url'],
extra_ok_returncodes=(1, ),
show_stdout=False,
stdout_only=True,
cwd=location,
)
remotes = stdout.splitlines()
try:
found_remote = remotes[0]
except IndexError:
raise RemoteNotFoundError
for remote in remotes:
if remote.startswith('remote.origin.url '):
found_remote = remote
break
url = found_remote.split(' ')[1]
return url.strip()
@classmethod
def has_commit(cls, location, rev):
"""
Check if rev is a commit that is available in the local repository.
"""
try:
cls.run_command(
['rev-parse', '-q', '--verify', "sha^" + rev],
cwd=location,
log_failed_cmd=False,
)
except InstallationError:
return False
else:
return True
@classmethod
def get_revision(cls, location, rev=None):
if rev is None:
rev = 'HEAD'
current_rev = cls.run_command(
['rev-parse', rev],
show_stdout=False,
stdout_only=True,
cwd=location,
)
return current_rev.strip()
@classmethod
def get_subdirectory(cls, location):
"""
Return the path to setup.py, relative to the repo root.
Return None if setup.py is in the repo root.
"""
# find the repo root
git_dir = cls.run_command(
['rev-parse', '--git-dir'],
show_stdout=False,
stdout_only=True,
cwd=location,
).strip()
if not os.path.isabs(git_dir):
git_dir = os.path.join(location, git_dir)
repo_root = os.path.abspath(os.path.join(git_dir, '..'))
return find_path_to_setup_from_repo_root(location, repo_root)
@classmethod
def get_url_rev_and_auth(cls, url):
# type: (str) -> Tuple[str, Optional[str], AuthInfo]
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes don't
work with a ssh:// scheme (e.g. GitHub). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
# Works around an apparent Git bug
# (see https://article.gmane.org/gmane.comp.version-control.git/146500)
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = (
initial_slashes +
urllib_request.url2pathname(path)
.replace('\\', '/').lstrip('/')
)
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit(
(scheme[after_plus:], netloc, newpath, query, fragment),
)
if '://' not in url:
assert 'file:' not in url
url = url.replace('git+', 'git+ssh://')
url, rev, user_pass = super(Git, cls).get_url_rev_and_auth(url)
url = url.replace('ssh://', '')
else:
url, rev, user_pass = super(Git, cls).get_url_rev_and_auth(url)
return url, rev, user_pass
@classmethod
def update_submodules(cls, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
cls.run_command(
['submodule', 'update', '--init', '--recursive', '-q'],
cwd=location,
)
@classmethod
def get_repository_root(cls, location):
loc = super(Git, cls).get_repository_root(location)
if loc:
return loc
try:
r = cls.run_command(
['rev-parse', '--show-toplevel'],
cwd=location,
show_stdout=False,
stdout_only=True,
on_returncode='raise',
log_failed_cmd=False,
)
except BadCommand:
logger.debug("could not determine if %s is under git control "
"because git is not available", location)
return None
except InstallationError:
return None
return os.path.normpath(r.rstrip('\r\n'))
vcs.register(Git)
| pantsbuild/pex | pex/vendor/_vendored/pip/pip/_internal/vcs/git.py | Python | apache-2.0 | 15,599 |
# coding: utf-8
#
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Base on code in migrate/changeset/databases/sqlite.py which is under
# the following license:
#
# The MIT License
#
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import re
from migrate.changeset import ansisql
from migrate.changeset.databases import sqlite
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
import sqlalchemy
from sqlalchemy.schema import UniqueConstraint
from essential.db import exception
from essential.gettextutils import _
def _get_unique_constraints(self, table):
"""Retrieve information about existing unique constraints of the table
This feature is needed for _recreate_table() to work properly.
Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x.
"""
data = table.metadata.bind.execute(
"""SELECT sql
FROM sqlite_master
WHERE
type='table' AND
name=:table_name""",
table_name=table.name
).fetchone()[0]
UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
return [
UniqueConstraint(
*[getattr(table.columns, c.strip(' "')) for c in cols.split(",")],
name=name
)
for name, cols in re.findall(UNIQUE_PATTERN, data)
]
def _recreate_table(self, table, column=None, delta=None, omit_uniques=None):
"""Recreate the table properly
Unlike the corresponding original method of sqlalchemy-migrate this one
doesn't drop existing unique constraints when creating a new one.
"""
table_name = self.preparer.format_table(table)
# we remove all indexes so as not to have
# problems during copy and re-create
for index in table.indexes:
index.drop()
# reflect existing unique constraints
for uc in self._get_unique_constraints(table):
table.append_constraint(uc)
# omit given unique constraints when creating a new table if required
table.constraints = set([
cons for cons in table.constraints
if omit_uniques is None or cons.name not in omit_uniques
])
self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
self.execute()
insertion_string = self._modify_table(table, column, delta)
table.create(bind=self.connection)
self.append(insertion_string % {'table_name': table_name})
self.execute()
self.append('DROP TABLE migration_tmp')
self.execute()
def _visit_migrate_unique_constraint(self, *p, **k):
"""Drop the given unique constraint
The corresponding original method of sqlalchemy-migrate just
raises NotImplemented error
"""
self.recreate_table(p[0].table, omit_uniques=[p[0].name])
def patch_migrate():
"""A workaround for SQLite's inability to alter things
SQLite abilities to alter tables are very limited (please read
http://www.sqlite.org/lang_altertable.html for more details).
E. g. one can't drop a column or a constraint in SQLite. The
workaround for this is to recreate the original table omitting
the corresponding constraint (or column).
sqlalchemy-migrate library has recreate_table() method that
implements this workaround, but it does it wrong:
- information about unique constraints of a table
is not retrieved. So if you have a table with one
unique constraint and a migration adding another one
you will end up with a table that has only the
latter unique constraint, and the former will be lost
- dropping of unique constraints is not supported at all
The proper way to fix this is to provide a pull-request to
sqlalchemy-migrate, but the project seems to be dead. So we
can go on with monkey-patching of the lib at least for now.
"""
# this patch is needed to ensure that recreate_table() doesn't drop
# existing unique constraints of the table when creating a new one
helper_cls = sqlite.SQLiteHelper
helper_cls.recreate_table = _recreate_table
helper_cls._get_unique_constraints = _get_unique_constraints
# this patch is needed to be able to drop existing unique constraints
constraint_cls = sqlite.SQLiteConstraintDropper
constraint_cls.visit_migrate_unique_constraint = \
_visit_migrate_unique_constraint
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
sqlite.SQLiteConstraintGenerator)
def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True):
"""Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest
available version.
:param init_version: Initial database version
:param sanity_check: Require schema sanity checking for all tables
"""
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.DbMigrationError(
message=_("version should be an integer"))
current_version = db_version(engine, abs_path, init_version)
repository = _find_migrate_repo(abs_path)
if sanity_check:
_db_schema_sanity_check(engine)
if version is None or version > current_version:
return versioning_api.upgrade(engine, repository, version)
else:
return versioning_api.downgrade(engine, repository,
version)
def _db_schema_sanity_check(engine):
"""Ensure all database tables were created with required parameters.
:param engine: SQLAlchemy engine instance for a given database
"""
if engine.name == 'mysql':
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
'from information_schema.TABLES '
'where TABLE_SCHEMA=%s and '
'TABLE_COLLATION NOT LIKE "%%utf8%%"')
table_names = [res[0] for res in engine.execute(onlyutf8_sql,
engine.url.database)]
if len(table_names) > 0:
raise ValueError(_('Tables "%s" have non utf8 collation, '
'please make sure all tables are CHARSET=utf8'
) % ','.join(table_names))
def db_version(engine, abs_path, init_version):
"""Show the current version of the repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
try:
return versioning_api.db_version(engine, repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0 or 'alembic_version' in tables:
db_version_control(engine, abs_path, version=init_version)
return versioning_api.db_version(engine, repository)
else:
raise exception.DbMigrationError(
message=_(
"The database is not under version control, but has "
"tables. Please stamp the current version of the schema "
"manually."))
def db_version_control(engine, abs_path, version=None):
"""Mark a database as under this repository's version control.
Once a database is under version control, schema changes should
only be done via change scripts in this repository.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
versioning_api.version_control(engine, repository, version)
return version
def _find_migrate_repo(abs_path):
"""Get the project's change script repository
:param abs_path: Absolute path to migrate repository
"""
if not os.path.exists(abs_path):
raise exception.DbMigrationError("Path %s not found" % abs_path)
return Repository(abs_path)
| gaolichuang/py-essential | essential/db/sqlalchemy/migration.py | Python | apache-2.0 | 10,186 |
# Copyright 2008-2013 Software freedom conservancy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WebElement implementation."""
import hashlib
import os
import zipfile
try:
from StringIO import StringIO as IOStream
except ImportError: # 3+
from io import BytesIO as IOStream
import base64
from .command import Command
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import InvalidSelectorException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
try:
str = basestring
except NameError:
pass
class WebElement(object):
"""Represents an HTML element.
Generally, all interesting operations to do with interacting with a page
will be performed through this interface."""
def __init__(self, parent, id_):
self._parent = parent
self._id = id_
@property
def tag_name(self):
"""Gets this element's tagName property."""
return self._execute(Command.GET_ELEMENT_TAG_NAME)['value']
@property
def text(self):
"""Gets the text of the element."""
return self._execute(Command.GET_ELEMENT_TEXT)['value']
def click(self):
"""Clicks the element."""
self._execute(Command.CLICK_ELEMENT)
def submit(self):
"""Submits a form."""
self._execute(Command.SUBMIT_ELEMENT)
def clear(self):
"""Clears the text if it's a text entry element."""
self._execute(Command.CLEAR_ELEMENT)
def get_attribute(self, name):
"""Gets the attribute value.
:Args:
- name - name of the attribute property to retieve.
Example::
# Check if the 'active' css class is applied to an element.
is_active = "active" in target_element.get_attribute("class")
"""
resp = self._execute(Command.GET_ELEMENT_ATTRIBUTE, {'name': name})
attributeValue = ''
if resp['value'] is None:
attributeValue = None
else:
attributeValue = resp['value']
if name != 'value' and attributeValue.lower() in ('true', 'false'):
attributeValue = attributeValue.lower()
return attributeValue
def is_selected(self):
"""Whether the element is selected.
Can be used to check if a checkbox or radio button is selected.
"""
return self._execute(Command.IS_ELEMENT_SELECTED)['value']
def is_enabled(self):
"""Whether the element is enabled."""
return self._execute(Command.IS_ELEMENT_ENABLED)['value']
def find_element_by_id(self, id_):
"""Finds element within the child elements of this element.
:Args:
- id_ - ID of child element to locate.
"""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
"""Finds a list of elements within the children of this element
with the matching ID.
:Args:
- id_ - Id of child element to find.
"""
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
"""Find element with in this element's children by name.
:Args:
- name - name property of the element to find.
"""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
"""Finds a list of elements with in this element's children by name.
:Args:
- name - name property to search for.
"""
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
"""Finds element with in this element's children by visible link text.
:Args:
- link_text - Link text string to search for.
"""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
"""Finds a list of elements with in this element's children by visible link text.
:Args:
- link_text - Link text string to search for.
"""
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
"""Finds element with in this element's children by parial visible link text.
:Args:
- link_text - Link text string to search for.
"""
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
"""Finds a list of elements with in this element's children by link text.
:Args:
- link_text - Link text string to search for.
"""
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
"""Finds element with in this element's children by tag name.
:Args:
- name - name of html tag (eg: h1, a, span)
"""
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
"""Finds a list of elements with in this element's children by tag name.
:Args:
- name - name of html tag (eg: h1, a, span)
"""
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
"""Finds element by xpath.
:Args:
xpath - xpath of element to locate. "//input[@class='myelement']"
Note: The base path will be relative to this element's location.
This will select the first link under this element.::
myelement.find_elements_by_xpath(".//a")
However, this will select the first link on the page.
myelement.find_elements_by_xpath("//a")
"""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""Finds elements within the elements by xpath.
:Args:
- xpath - xpath locator string.
Note: The base path will be relative to this element's location.
This will select all links under this element.::
myelement.find_elements_by_xpath(".//a")
However, this will select all links in the page itself.
myelement.find_elements_by_xpath("//a")
"""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
"""Finds an element within this element's children by their class name.
:Args:
- name - class name to search on.
"""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""Finds a list of elements within children of this element by their class name.
:Args:
- name - class name to search on.
"""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""Find and return an element that's a child of this element by CSS selector.
:Args:
- css_selector - CSS selctor string, ex: 'a.nav#home'
"""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""Find and return list of multiple elements within the children of this
element by CSS selector.
:Args:
- css_selector - CSS selctor string, ex: 'a.nav#home'
"""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def send_keys(self, *value):
"""Simulates typing into the element.
:Args:
- value - A string for typing, or setting form fields. For setting
file inputs, this could be a local file path.
Use this to send simple key events or to fill out form fields::
form_textfield = driver.find_element_by_name('username')
form_textfield.send_keys("admin")
This can also be used to set file inputs.::
file_input = driver.find_element_by_name('profilePic')
file_input.send_keys("path/to/profilepic.gif")
# Generally it's better to wrap the file path in one of the methods
# in os.path to return the actual path to support cross OS testing.
# file_input.send_keys(os.path.abspath("path/to/profilepic.gif"))
"""
# transfer file to another machine only if remote driver is used
# the same behaviour as for java binding
if self.parent._is_remote:
local_file = LocalFileDetector.is_local_file(*value)
if local_file is not None:
value = self._upload(local_file)
typing = []
for val in value:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = val.__str__()
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
self._execute(Command.SEND_KEYS_TO_ELEMENT, {'value': typing})
# RenderedWebElement Items
def is_displayed(self):
"""Whether the element would be visible to a user
"""
return self._execute(Command.IS_ELEMENT_DISPLAYED)['value']
@property
def location_once_scrolled_into_view(self):
"""CONSIDERED LIABLE TO CHANGE WITHOUT WARNING. Use this to discover where on the screen an
element is so that we can click it. This method should cause the element to be scrolled
into view.
Returns the top lefthand corner location on the screen, or None if the element is not visible"""
return self._execute(Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW)['value']
@property
def size(self):
""" Returns the size of the element """
size = self._execute(Command.GET_ELEMENT_SIZE)['value']
new_size = {}
new_size["height"] = size["height"]
new_size["width"] = size["width"]
return new_size
def value_of_css_property(self, property_name):
""" Returns the value of a CSS property """
return self._execute(Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY,
{'propertyName': property_name})['value']
@property
def location(self):
""" Returns the location of the element in the renderable canvas"""
old_loc = self._execute(Command.GET_ELEMENT_LOCATION)['value']
new_loc = {"x": old_loc['x'],
"y": old_loc['y']}
return new_loc
@property
def rect(self):
""" Returns a dictionary with the size and location of the element"""
return self._execute(Command.GET_ELEMENT_RECT)['value']
@property
def parent(self):
""" Returns parent element is available. """
return self._parent
@property
def id(self):
""" Returns internal id used by selenium.
This is mainly for internal use. Simple use cases such as checking if 2 webelements
refer to the same element, can be done using '=='::
if element1 == element2:
print("These 2 are equal")
"""
return self._id
def __eq__(self, element):
if self._id == element.id:
return True
else:
return self._execute(Command.ELEMENT_EQUALS, {'other': element.id})['value']
# Private Methods
def _execute(self, command, params=None):
"""Executes a command against the underlying HTML element.
Args:
command: The name of the command to _execute as a string.
params: A dictionary of named parameters to send with the command.
Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {}
params['id'] = self._id
return self._parent.execute(command, params)
def find_element(self, by=By.ID, value=None):
if not By.is_valid(by) or not isinstance(value, str):
raise InvalidSelectorException("Invalid locator values passed in")
return self._execute(Command.FIND_CHILD_ELEMENT,
{"using": by, "value": value})['value']
def find_elements(self, by=By.ID, value=None):
if not By.is_valid(by) or not isinstance(value, str):
raise InvalidSelectorException("Invalid locator values passed in")
return self._execute(Command.FIND_CHILD_ELEMENTS,
{"using": by, "value": value})['value']
def __hash__(self):
return int(hashlib.md5(self._id.encode('utf-8')).hexdigest(), 16)
def _upload(self, filename):
fp = IOStream()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
zipped.write(filename, os.path.split(filename)[1])
zipped.close()
content = base64.encodestring(fp.getvalue())
if not isinstance(content, str):
content = content.decode('utf-8')
try:
return self._execute(Command.UPLOAD_FILE,
{'file': content})['value']
except WebDriverException as e:
if "Unrecognized command: POST" in e.__str__():
return filename
elif "Command not found: POST " in e.__str__():
return filename
elif '{"status":405,"value":["GET","HEAD","DELETE"]}' in e.__str__():
return filename
else:
raise e
class LocalFileDetector(object):
@classmethod
def is_local_file(cls, *keys):
file_path = ''
typing = []
for val in keys:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = val.__str__()
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
file_path = ''.join(typing)
if file_path is '':
return None
try:
if os.path.isfile(file_path):
return file_path
except:
pass
return None
| RamaraoDonta/ramarao-clone | py/selenium/webdriver/remote/webelement.py | Python | apache-2.0 | 14,925 |
"""This component provides support to the Ring Door Bell camera."""
import asyncio
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.const import ATTR_ATTRIBUTION, CONF_SCAN_INTERVAL
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream
from homeassistant.util import dt as dt_util
from . import ATTRIBUTION, DATA_RING, NOTIFICATION_ID
CONF_FFMPEG_ARGUMENTS = 'ffmpeg_arguments'
FORCE_REFRESH_INTERVAL = timedelta(minutes=45)
_LOGGER = logging.getLogger(__name__)
NOTIFICATION_TITLE = 'Ring Camera Setup'
SCAN_INTERVAL = timedelta(seconds=90)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_FFMPEG_ARGUMENTS): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=SCAN_INTERVAL): cv.time_period,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a Ring Door Bell and StickUp Camera."""
ring = hass.data[DATA_RING]
cams = []
cams_no_plan = []
for camera in ring.doorbells:
if camera.has_subscription:
cams.append(RingCam(hass, camera, config))
else:
cams_no_plan.append(camera)
for camera in ring.stickup_cams:
if camera.has_subscription:
cams.append(RingCam(hass, camera, config))
else:
cams_no_plan.append(camera)
# show notification for all cameras without an active subscription
if cams_no_plan:
cameras = str(', '.join([camera.name for camera in cams_no_plan]))
err_msg = '''A Ring Protect Plan is required for the''' \
''' following cameras: {}.'''.format(cameras)
_LOGGER.error(err_msg)
hass.components.persistent_notification.create(
'Error: {}<br />'
'You will need to restart hass after fixing.'
''.format(err_msg),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID)
add_entities(cams, True)
return True
class RingCam(Camera):
"""An implementation of a Ring Door Bell camera."""
def __init__(self, hass, camera, device_info):
"""Initialize a Ring Door Bell camera."""
super(RingCam, self).__init__()
self._camera = camera
self._hass = hass
self._name = self._camera.name
self._ffmpeg = hass.data[DATA_FFMPEG]
self._ffmpeg_arguments = device_info.get(CONF_FFMPEG_ARGUMENTS)
self._last_video_id = self._camera.last_recording_id
self._video_url = self._camera.recording_url(self._last_video_id)
self._utcnow = dt_util.utcnow()
self._expires_at = FORCE_REFRESH_INTERVAL + self._utcnow
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self._camera.id
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
'device_id': self._camera.id,
'firmware': self._camera.firmware,
'kind': self._camera.kind,
'timezone': self._camera.timezone,
'type': self._camera.family,
'video_url': self._video_url,
}
async def async_camera_image(self):
"""Return a still image response from the camera."""
from haffmpeg.tools import ImageFrame, IMAGE_JPEG
ffmpeg = ImageFrame(self._ffmpeg.binary, loop=self.hass.loop)
if self._video_url is None:
return
image = await asyncio.shield(ffmpeg.get_image(
self._video_url, output_format=IMAGE_JPEG,
extra_cmd=self._ffmpeg_arguments))
return image
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
from haffmpeg.camera import CameraMjpeg
if self._video_url is None:
return
stream = CameraMjpeg(self._ffmpeg.binary, loop=self.hass.loop)
await stream.open_camera(
self._video_url, extra_cmd=self._ffmpeg_arguments)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.hass, request, stream_reader,
self._ffmpeg.ffmpeg_stream_content_type)
finally:
await stream.close()
@property
def should_poll(self):
"""Update the image periodically."""
return True
def update(self):
"""Update camera entity and refresh attributes."""
_LOGGER.debug("Checking if Ring DoorBell needs to refresh video_url")
self._camera.update()
self._utcnow = dt_util.utcnow()
try:
last_event = self._camera.history(limit=1)[0]
except (IndexError, TypeError):
return
last_recording_id = last_event['id']
video_status = last_event['recording']['status']
if video_status == 'ready' and \
(self._last_video_id != last_recording_id or
self._utcnow >= self._expires_at):
video_url = self._camera.recording_url(last_recording_id)
if video_url:
_LOGGER.info("Ring DoorBell properties refreshed")
# update attributes if new video or if URL has expired
self._last_video_id = last_recording_id
self._video_url = video_url
self._expires_at = FORCE_REFRESH_INTERVAL + self._utcnow
| jabesq/home-assistant | homeassistant/components/ring/camera.py | Python | apache-2.0 | 5,739 |
# Copyright 2015 Rafe Kaplan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import parser
class Rule(parser.Parser):
def __init__(self, expected_attr_type=None):
self.__expected_attr_type = expected_attr_type
@property
def attr_type(self):
if self.__expected_attr_type:
return self.__expected_attr_type
else:
try:
inner_parser = self.__parser
except AttributeError:
raise NotImplementedError
else:
return inner_parser.attr_type
@property
def parser(self):
return self.__parser
@parser.setter
def parser(self, value):
if self.__expected_attr_type and self.__expected_attr_type != value.attr_type:
raise ValueError('Unexpected attribute type')
self.__parser = parser.as_parser(value)
def _parse(self, state, *args, **kwargs):
with state.open_scope(*args, **kwargs):
self.__parser._parse(state)
def __imod__(self, other):
self.parser = other
return self
def __call__(self, *args, **kwargs):
return RuleCall(self, *args, **kwargs)
class RuleCall(parser.Unary):
def __init__(self, rule, *args, **kwargs):
if not isinstance(rule, Rule):
raise TypeError('Expected rule to be type Rule, was {}'.format(type(rule).__name__))
super(RuleCall, self).__init__(rule)
self.__args = args
self.__kwargs = kwargs
@property
def args(self):
return self.__args
@property
def kwargs(self):
return dict(self.__kwargs)
def _parse(self, state):
args = [state.invoke(a) for a in self.__args]
kwargs = {k: state.invoke(v) for k, v in self.__kwargs.items()}
self.parser._parse(state, *args, **kwargs)
| slobberchops/booze | src/booze/gin/rule.py | Python | apache-2.0 | 2,341 |
# ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
| NervanaSystems/neon | neon/backends/kernels/cuda/__init__.py | Python | apache-2.0 | 748 |
import os
import sys
import argparse
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestermisc.selfcheck import harvesterPackageInfo
def main():
oparser = argparse.ArgumentParser(prog='prescript', add_help=True)
oparser.add_argument('-f', '--local_info_file', action='store', dest='local_info_file', help='path of harvester local info file')
if len(sys.argv) == 1:
print('No argument or flag specified. Did nothing')
sys.exit(0)
args = oparser.parse_args(sys.argv[1:])
local_info_file = os.path.normpath(args.local_info_file)
hpi = harvesterPackageInfo(local_info_file=local_info_file)
if hpi.package_changed:
print('Harvester package changed')
#TODO
pass
hpi.renew_local_info()
else:
print('Harvester package unchanged. Skipped')
if __name__ == '__main__':
main()
| dougbenjamin/panda-harvester | pandaharvester/harvesterscripts/prescript.py | Python | apache-2.0 | 901 |
import tempfile
from git import InvalidGitRepositoryError
try:
from unittest2 import TestCase
from mock import patch, Mock
except ImportError:
from unittest import TestCase
from mock import patch, Mock
import textwrap
from datetime import datetime
from botocore.exceptions import ClientError
from dateutil.tz import tzutc
from cfn_sphere import util, CloudFormationStack
from cfn_sphere.exceptions import CfnSphereException, CfnSphereBotoError
from cfn_sphere.template import CloudFormationTemplate
class UtilTests(TestCase):
def test_convert_yaml_to_json_string_returns_valid_json_string(self):
data = textwrap.dedent("""
foo:
foo: baa
""")
self.assertEqual('{\n "foo": {\n "foo": "baa"\n }\n}', util.convert_yaml_to_json_string(data))
def test_convert_yaml_to_json_string_returns_valid_json_string_on_empty_string_input(self):
data = ""
self.assertEqual('{}', util.convert_yaml_to_json_string(data))
def test_convert_json_to_yaml_string_returns_valid_yaml_string(self):
data = textwrap.dedent("""
{
"foo": {
"foo": "baa"
}
}
""")
self.assertEqual('foo:\n foo: baa\n', util.convert_json_to_yaml_string(data))
def test_convert_json_to_yaml_string_returns_empty_string_on_empty_json_input(self):
data = {}
self.assertEqual('', util.convert_json_to_yaml_string(data))
@patch("cfn_sphere.util.urllib2.urlopen")
def test_get_cfn_api_server_time_returns_gmt_datetime(self, urlopen_mock):
urlopen_mock.return_value.info.return_value.get.return_value = "Mon, 21 Sep 2015 17:17:26 GMT"
expected_timestamp = datetime(year=2015, month=9, day=21, hour=17, minute=17, second=26, tzinfo=tzutc())
self.assertEqual(expected_timestamp, util.get_cfn_api_server_time())
@patch("cfn_sphere.util.urllib2.urlopen")
def test_get_cfn_api_server_time_raises_exception_on_empty_date_header(self, urlopen_mock):
urlopen_mock.return_value.info.return_value.get.return_value = ""
with self.assertRaises(CfnSphereException):
util.get_cfn_api_server_time()
def test_with_boto_retry_retries_method_call_for_throttling_exception(self):
count_func = Mock()
@util.with_boto_retry(max_retries=1, pause_time_multiplier=1)
def my_retried_method(count_func):
count_func()
exception = CfnSphereBotoError(
ClientError(error_response={"Error": {"Code": "Throttling", "Message": "Rate exceeded"}},
operation_name="DescribeStacks"))
raise exception
with self.assertRaises(CfnSphereBotoError):
my_retried_method(count_func)
self.assertEqual(2, count_func.call_count)
def test_with_boto_retry_does_not_retry_for_simple_exception(self):
count_func = Mock()
@util.with_boto_retry(max_retries=1, pause_time_multiplier=1)
def my_retried_method(count_func):
count_func()
raise Exception
with self.assertRaises(Exception):
my_retried_method(count_func)
self.assertEqual(1, count_func.call_count)
def test_with_boto_retry_does_not_retry_for_another_boto_client_error(self):
count_func = Mock()
@util.with_boto_retry(max_retries=1, pause_time_multiplier=1)
def my_retried_method(count_func):
count_func()
exception = ClientError(error_response={"Error": {"Code": "Another Error", "Message": "Foo"}},
operation_name="DescribeStacks")
raise exception
with self.assertRaises(ClientError):
my_retried_method(count_func)
self.assertEqual(1, count_func.call_count)
def test_with_boto_retry_does_not_retry_without_exception(self):
count_func = Mock()
@util.with_boto_retry(max_retries=1, pause_time_multiplier=1)
def my_retried_method(count_func):
count_func()
return "foo"
self.assertEqual("foo", my_retried_method(count_func))
self.assertEqual(1, count_func.call_count)
def test_get_pretty_parameters_string(self):
template_body = {
'Parameters': {
'myParameter1': {
'Type': 'String',
'NoEcho': True
},
'myParameter2': {
'Type': 'String'
},
'myParameter3': {
'Type': 'Number',
'NoEcho': 'true'
},
'myParameter4': {
'Type': 'Number',
'NoEcho': 'false'
},
'myParameter5': {
'Type': 'Number',
'NoEcho': False
}
}
}
parameters = {
'myParameter1': 'super-secret',
'myParameter2': 'not-that-secret',
'myParameter3': 'also-super-secret',
'myParameter4': 'could-be-public',
'myParameter5': 'also-ok'
}
template = CloudFormationTemplate(template_body, 'just-another-template')
stack = CloudFormationStack(template, parameters, 'just-another-stack', 'eu-west-1')
expected_string = """+--------------+-----------------+
| Parameter | Value |
+--------------+-----------------+
| myParameter1 | *** |
| myParameter2 | not-that-secret |
| myParameter3 | *** |
| myParameter4 | could-be-public |
| myParameter5 | also-ok |
+--------------+-----------------+"""
self.assertEqual(expected_string, util.get_pretty_parameters_string(stack))
def test_get_pretty_stack_outputs_returns_proper_table(self):
outputs = [
{
'OutputKey': 'key1',
'OutputValue': 'value1',
'Description': 'desc1'
}, {
'OutputKey': 'key2',
'OutputValue': 'value2',
'Description': 'desc2'
}, {
'OutputKey': 'key3',
'OutputValue': 'value3',
'Description': 'desc3'
}
]
expected = """+--------+--------+
| Output | Value |
+--------+--------+
| key1 | value1 |
| key2 | value2 |
| key3 | value3 |
+--------+--------+"""
result = util.get_pretty_stack_outputs(outputs)
self.assertEqual(expected, result)
def test_strip_string_strips_string(self):
s = "sfsdklgashgslkadghkafhgaknkbndkjfbnwurtqwhgsdnkshGLSAKGKLDJFHGSKDLGFLDFGKSDFLGKHAsdjdghskjdhsdcxbvwerA323"
result = util.strip_string(s)
self.assertEqual(
"sfsdklgashgslkadghkafhgaknkbndkjfbnwurtqwhgsdnkshGLSAKGKLDJFHGSKDLGFLDFGKSDFLGKHAsdjdghskjdhsdcxbvwe...",
result)
def test_strip_string_doesnt_strip_short_strings(self):
s = "my-short-string"
result = util.strip_string(s)
self.assertEqual("my-short-string...", result)
@patch("cfn_sphere.util.Repo")
def test_get_git_repository_remote_url_returns_none_if_no_repository_present(self, repo_mock):
repo_mock.side_effect = InvalidGitRepositoryError
self.assertEqual(None, util.get_git_repository_remote_url(tempfile.mkdtemp()))
@patch("cfn_sphere.util.Repo")
def test_get_git_repository_remote_url_returns_repo_url(self, repo_mock):
url = "http://config.repo.git"
repo_mock.return_value.remotes.origin.url = url
self.assertEqual(url, util.get_git_repository_remote_url(tempfile.mkdtemp()))
@patch("cfn_sphere.util.Repo")
def test_get_git_repository_remote_url_returns_repo_url_from_parent_dir(self, repo_mock):
url = "http://config.repo.git"
repo_object_mock = Mock()
repo_object_mock.remotes.origin.url = url
repo_mock.side_effect = [InvalidGitRepositoryError, repo_object_mock]
self.assertEqual(url, util.get_git_repository_remote_url(tempfile.mkdtemp()))
def test_get_git_repository_remote_url_returns_none_for_none_working_dir(self):
self.assertEqual(None, util.get_git_repository_remote_url(None))
def test_get_git_repository_remote_url_returns_none_for_empty_string_working_dir(self):
self.assertEqual(None, util.get_git_repository_remote_url(""))
def test_kv_list_to_dict_returns_empty_dict_for_empty_list(self):
result = util.kv_list_to_dict([])
self.assertEqual({}, result)
def test_kv_list_to_dict(self):
result = util.kv_list_to_dict(["k1=v1", "k2=v2"])
self.assertEqual({"k1": "v1", "k2": "v2"}, result)
def test_kv_list_to_dict_raises_exception_on_syntax_error(self):
with self.assertRaises(CfnSphereException):
util.kv_list_to_dict(["k1=v1", "k2:v2"])
| marco-hoyer/cfn-sphere | src/unittest/python/util_tests.py | Python | apache-2.0 | 8,933 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic tests for TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ReshapeTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
outputs = []
# Here we test two types of reshapes, one changes the batch dimension and
# the other does not. Note that we're not able to test reshaping to
# scalar, since TRT requires input tensor to be of rank at least 2, so a
# reshape with scalar input will be filtered out of the segment before
# conversion.
#
# These reshapes happen at batch dimension, thus conversion should fail.
for shape in [[2, 50, 24, 24, 2], [-1, 50, 24, 24, 2], [2, 50, -1, 24, 2]]:
incompatible_reshape = array_ops.reshape(inp, shape)
reshape_back = array_ops.reshape(incompatible_reshape, [-1, 24, 24, 2])
outputs.append(self.trt_incompatible_op(reshape_back))
# Add another block with many reshapes that don't change the batch
# dimension.
compatible_reshape = array_ops.reshape(
inp, [-1, 24 * 24, 2], name="reshape-0")
compatible_reshape = array_ops.reshape(
compatible_reshape, [100, 24, -1], name="reshape-1")
compatible_reshape = array_ops.reshape(
compatible_reshape, [100, 24 * 2, 24], name="reshape-2")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 24, 24 * 2], name="reshape-3")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 6, 4, 24, 2], name="reshape-4")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 6, 4, 6, 4, 2, 1], name="reshape-5")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 24, 24, 2], name="reshape-6")
outputs.append(self.trt_incompatible_op(compatible_reshape))
return math_ops.add_n(outputs, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]],
[[100, 24, 24, 2]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["reshape-%d" % i for i in range(7)] +
["reshape-%d/shape" % i for i in range(7)]
}
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
return (not trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.dynamic_engine)
class TransposeTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
# Add a block with compatible transposes.
compatible_transpose = array_ops.transpose(
inp, [0, 3, 1, 2], name="transpose-1")
compatible_transpose = array_ops.transpose(
compatible_transpose, [0, 2, 3, 1], name="transposeback")
# Add an incompatible op so the first block will not be in the same
# subgraph where the following block belongs.
bridge = self.trt_incompatible_op(compatible_transpose)
# Add a block with incompatible transposes.
#
# Note: by default Grappler will run the TRT optimizer twice. At the
# first time it will group the two transpose ops below to same segment
# then fail the conversion due to the expected batch dimension problem.
# At the second time, since the input of bridge op is TRTEngineOp_0, it
# will fail to do shape inference which then cause conversion to fail.
# TODO(laigd): support shape inference, make TRT optimizer run only
# once, and fix this.
incompatible_transpose = array_ops.transpose(
bridge, [2, 1, 0, 3], name="transpose-2")
excluded_transpose = array_ops.transpose(
incompatible_transpose, [0, 2, 3, 1], name="transpose-3")
return array_ops.identity(excluded_transpose, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]],
[[24, 100, 2, 24]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": [
"transpose-1", "transpose-1/perm", "transposeback",
"transposeback/perm"
]
}
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
return (not trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.dynamic_engine)
if __name__ == "__main__":
test.main()
| ghchinoy/tensorflow | tensorflow/python/compiler/tensorrt/test/reshape_transpose_test.py | Python | apache-2.0 | 5,407 |
from O365 import attachment
import unittest
import json
import base64
from random import randint
att_rep = open('attachment.json','r').read()
att_j = json.loads(att_rep)
class TestAttachment (unittest.TestCase):
def setUp(self):
self.att = attachment.Attachment(att_j['value'][0])
def test_isType(self):
self.assertTrue(self.att.isType('txt'))
def test_getType(self):
self.assertEqual(self.att.getType(),'.txt')
def test_save(self):
name = self.att.json['Name']
name1 = self.newFileName(name)
self.att.json['Name'] = name1
self.assertTrue(self.att.save('/tmp'))
with open('/tmp/'+name1,'r') as ins:
f = ins.read()
self.assertEqual('testing w00t!',f)
name2 = self.newFileName(name)
self.att.json['Name'] = name2
self.assertTrue(self.att.save('/tmp/'))
with open('/tmp/'+name2,'r') as ins:
f = ins.read()
self.assertEqual('testing w00t!',f)
def newFileName(self,val):
for i in range(4):
val = str(randint(0,9)) + val
return val
def test_getByteString(self):
self.assertEqual(self.att.getByteString(),b'testing w00t!')
def test_getBase64(self):
self.assertEqual(self.att.getBase64(),'dGVzdGluZyB3MDB0IQ==\n')
def test_setByteString(self):
test_string = b'testing testie test'
self.att.setByteString(test_string)
enc = base64.encodebytes(test_string)
self.assertEqual(self.att.json['ContentBytes'],enc)
def setBase64(self):
wrong_test_string = 'I am sooooo not base64 encoded.'
right_test_string = 'Base64 <3 all around!'
enc = base64.encodestring(right_test_string)
self.assertRaises(self.att.setBase64(wrong_test_string))
self.assertEqual(self.att.json['ContentBytes'],'dGVzdGluZyB3MDB0IQ==\n')
self.att.setBase64(enc)
self.assertEqual(self.att.json['ContentBytes'],enc)
if __name__ == '__main__':
unittest.main()
| roycem90/python-o365 | tests/test_attachment.py | Python | apache-2.0 | 1,812 |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base model class."""
__author__ = 'Sean Lip'
import feconf
import utils
from core.platform import models
transaction_services = models.Registry.import_transaction_services()
from google.appengine.ext import ndb
class BaseModel(ndb.Model):
"""Base model for all persistent object storage classes."""
# When this entity was first created.
created_on = ndb.DateTimeProperty(auto_now_add=True)
# When this entity was last updated.
last_updated = ndb.DateTimeProperty(auto_now=True)
# Whether the current version of the file is deleted.
deleted = ndb.BooleanProperty(indexed=True, default=False)
@property
def id(self):
"""A unique id for this model instance."""
return self.key.id()
def _pre_put_hook(self):
"""This is run before model instances are saved to the datastore.
Subclasses of BaseModel should override this method.
"""
pass
class EntityNotFoundError(Exception):
"""Raised when no entity for a given id exists in the datastore."""
pass
@classmethod
def get(cls, entity_id, strict=True):
"""Gets an entity by id. Fails noisily if strict == True.
Args:
entity_id: str. The id of the entity.
strict: bool. Whether to fail noisily if no entity with the given id
exists in the datastore.
Returns:
None, if strict == False and no undeleted entity with the given id
exists in the datastore. Otherwise, the entity instance that
corresponds to the given id.
Raises:
- base_models.BaseModel.EntityNotFoundError: if strict == True and
no undeleted entity with the given id exists in the datastore.
"""
entity = cls.get_by_id(entity_id)
if entity and entity.deleted:
entity = None
if strict and entity is None:
raise cls.EntityNotFoundError(
'Entity for class %s with id %s not found' %
(cls.__name__, entity_id))
return entity
def put(self):
super(BaseModel, self).put()
@classmethod
def get_multi(cls, entity_ids):
entity_keys = [ndb.Key(cls, entity_id) for entity_id in entity_ids]
return ndb.get_multi(entity_keys)
@classmethod
def put_multi(cls, entities):
return ndb.put_multi(entities)
def delete(self):
super(BaseModel, self).key.delete()
@classmethod
def get_all(cls, include_deleted_entities=False):
"""Returns a filterable iterable of all entities of this class.
If include_deleted_entities is True then entities that have been marked
deleted are returned as well.
"""
query = cls.query()
if not include_deleted_entities:
query = query.filter(cls.deleted == False)
return query
@classmethod
def get_new_id(cls, entity_name):
"""Gets a new id for an entity, based on its name.
The returned id is guaranteed to be unique among all instances of this
entity.
Args:
entity_name: the name of the entity. Coerced to a utf-8 encoded
string. Defaults to ''.
Returns:
str: a new unique id for this entity class.
Raises:
- Exception: if an id cannot be generated within a reasonable number
of attempts.
"""
try:
entity_name = unicode(entity_name).encode('utf-8')
except Exception:
entity_name = ''
MAX_RETRIES = 10
RAND_RANGE = 127 * 127
ID_LENGTH = 12
for i in range(MAX_RETRIES):
new_id = utils.convert_to_hash(
'%s%s' % (entity_name, utils.get_random_int(RAND_RANGE)),
ID_LENGTH)
if not cls.get_by_id(new_id):
return new_id
raise Exception('New id generator is producing too many collisions.')
class VersionedModel(BaseModel):
"""Model that handles storage of the version history of model instances.
To use this class, you must declare a SNAPSHOT_METADATA_CLASS and a
SNAPSHOT_CONTENT_CLASS. The former must contain the String fields
'committer_id', 'commit_type' and 'commit_message', and a JSON field for
the Python list of dicts, 'commit_cmds'. The latter must contain the JSON
field 'content'. The item that is being versioned must be serializable to a
JSON blob.
Note that commit() should be used for VersionedModels, as opposed to put()
for direct subclasses of BaseModel.
"""
# The class designated as the snapshot model. This should be a subclass of
# BaseSnapshotMetadataModel.
SNAPSHOT_METADATA_CLASS = None
# The class designated as the snapshot content model. This should be a
# subclass of BaseSnapshotContentModel.
SNAPSHOT_CONTENT_CLASS = None
# Whether reverting is allowed. Default is False.
ALLOW_REVERT = False
### IMPORTANT: Subclasses should only overwrite things above this line. ###
# The possible commit types.
_COMMIT_TYPE_CREATE = 'create'
_COMMIT_TYPE_REVERT = 'revert'
_COMMIT_TYPE_EDIT = 'edit'
_COMMIT_TYPE_DELETE = 'delete'
# A list containing the possible commit types.
COMMIT_TYPE_CHOICES = [
_COMMIT_TYPE_CREATE, _COMMIT_TYPE_REVERT, _COMMIT_TYPE_EDIT,
_COMMIT_TYPE_DELETE
]
# The delimiter used to separate the version number from the model instance
# id. To get the instance id from a snapshot id, use Python's rfind()
# method to find the location of this delimiter.
_VERSION_DELIMITER = '-'
# The reserved prefix for keys that are automatically inserted into a
# commit_cmd dict by this model.
_AUTOGENERATED_PREFIX = 'AUTO'
# The current version number of this instance. In each PUT operation,
# this number is incremented and a snapshot of the modified instance is
# stored in the snapshot metadata and content models. The snapshot
# version number starts at 1 when the model instance is first created.
# All data in this instance represents the version at HEAD; data about the
# previous versions is stored in the snapshot models.
version = ndb.IntegerProperty(default=0)
def _require_not_marked_deleted(self):
if self.deleted:
raise Exception('This model instance has been deleted.')
def _compute_snapshot(self):
"""Generates a snapshot (a Python dict) from the model fields."""
return self.to_dict(exclude=['created_on', 'last_updated'])
def _reconstitute(self, snapshot_dict):
"""Makes this instance into a reconstitution of the given snapshot."""
self.populate(**snapshot_dict)
return self
def _reconstitute_from_snapshot_id(self, snapshot_id):
"""Makes this instance into a reconstitution of the given snapshot."""
snapshot_model = self.SNAPSHOT_CONTENT_CLASS.get(snapshot_id)
snapshot_dict = snapshot_model.content
return self._reconstitute(snapshot_dict)
@classmethod
def _get_snapshot_id(cls, instance_id, version_number):
return '%s%s%s' % (
instance_id, cls._VERSION_DELIMITER, version_number)
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
if self.SNAPSHOT_METADATA_CLASS is None:
raise Exception('No snapshot metadata class defined.')
if self.SNAPSHOT_CONTENT_CLASS is None:
raise Exception('No snapshot content class defined.')
if not isinstance(commit_cmds, list):
raise Exception(
'Expected commit_cmds to be a list of dicts, received %s'
% commit_cmds)
for item in commit_cmds:
if not isinstance(item, dict):
raise Exception(
'Expected commit_cmds to be a list of dicts, received %s'
% commit_cmds)
self.version += 1
snapshot = self._compute_snapshot()
snapshot_id = self._get_snapshot_id(self.id, self.version)
snapshot_metadata_instance = self.SNAPSHOT_METADATA_CLASS(
id=snapshot_id, committer_id=committer_id, commit_type=commit_type,
commit_message=commit_message, commit_cmds=commit_cmds)
snapshot_content_instance = self.SNAPSHOT_CONTENT_CLASS(
id=snapshot_id, content=snapshot)
transaction_services.run_in_transaction(
ndb.put_multi,
[snapshot_metadata_instance, snapshot_content_instance, self])
def delete(self, committer_id, commit_message, force_deletion=False):
if force_deletion:
current_version = self.version
version_numbers = [str(num + 1) for num in range(current_version)]
snapshot_ids = [
self._get_snapshot_id(self.id, version_number)
for version_number in version_numbers]
metadata_keys = [
ndb.Key(self.SNAPSHOT_METADATA_CLASS, snapshot_id)
for snapshot_id in snapshot_ids]
ndb.delete_multi(metadata_keys)
content_keys = [
ndb.Key(self.SNAPSHOT_CONTENT_CLASS, snapshot_id)
for snapshot_id in snapshot_ids]
ndb.delete_multi(content_keys)
super(VersionedModel, self).delete()
else:
self._require_not_marked_deleted()
self.deleted = True
CMD_DELETE = '%s_mark_deleted' % self._AUTOGENERATED_PREFIX
commit_cmds = [{
'cmd': CMD_DELETE
}]
self._trusted_commit(
committer_id, self._COMMIT_TYPE_DELETE, commit_message,
commit_cmds)
def put(self, *args, **kwargs):
"""For VersionedModels, this method is replaced with commit()."""
raise NotImplementedError
def commit(self, committer_id, commit_message, commit_cmds):
"""Saves a version snapshot and updates the model.
commit_cmds should give sufficient information to reconstruct the
commit.
"""
self._require_not_marked_deleted()
for commit_cmd in commit_cmds:
if 'cmd' not in commit_cmd:
raise Exception(
'Invalid commit_cmd: %s. Expected a \'cmd\' key.'
% commit_cmd)
if commit_cmd['cmd'].startswith(self._AUTOGENERATED_PREFIX):
raise Exception(
'Invalid change list command: ' % commit_cmd['cmd'])
commit_type = (
self._COMMIT_TYPE_CREATE if self.version == 0 else
self._COMMIT_TYPE_EDIT)
self._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
def revert(self, committer_id, commit_message, version_number):
self._require_not_marked_deleted()
if not self.ALLOW_REVERT:
raise Exception(
'Reverting of objects of type %s is not allowed.'
% self.__class__.__name__)
CMD_REVERT = '%s_revert_version_number' % self._AUTOGENERATED_PREFIX
commit_cmds = [{
'cmd': CMD_REVERT,
'version_number': version_number
}]
# Do not overwrite the version number.
current_version = self.version
snapshot_id = self._get_snapshot_id(self.id, version_number)
self._reconstitute_from_snapshot_id(snapshot_id)
self.version = current_version
self._trusted_commit(
committer_id, self._COMMIT_TYPE_REVERT, commit_message,
commit_cmds)
@classmethod
def get_version(cls, model_instance_id, version_number):
"""Returns a model instance representing the given version.
The snapshot content is used to populate this model instance. The
snapshot metadata is not used.
"""
cls.get(model_instance_id)._require_not_marked_deleted()
snapshot_id = cls._get_snapshot_id(model_instance_id, version_number)
return cls(id=model_instance_id)._reconstitute_from_snapshot_id(
snapshot_id)
@classmethod
def get(cls, entity_id, strict=True, version=None):
"""Gets an entity by id. Fails noisily if strict == True."""
if version is None:
return super(VersionedModel, cls).get(entity_id, strict=strict)
else:
return cls.get_version(entity_id, version)
@classmethod
def get_snapshots_metadata(cls, model_instance_id, version_numbers):
"""Returns a list of dicts, each representing a model snapshot.
One dict is returned for each version number in the list of version
numbers requested. If any of the version numbers does not exist, an
error is raised.
"""
cls.get(model_instance_id)._require_not_marked_deleted()
snapshot_ids = [
cls._get_snapshot_id(model_instance_id, version_number)
for version_number in version_numbers]
metadata_keys = [
ndb.Key(cls.SNAPSHOT_METADATA_CLASS, snapshot_id)
for snapshot_id in snapshot_ids]
returned_models = ndb.get_multi(metadata_keys)
for ind, model in enumerate(returned_models):
if model is None:
raise Exception(
'Invalid version number %s for model %s with id %s'
% (version_numbers[ind], cls.__name__, model_instance_id))
return [{
'committer_id': model.committer_id,
'commit_message': model.commit_message,
'commit_cmds': model.commit_cmds,
'commit_type': model.commit_type,
'version_number': version_numbers[ind],
'created_on': model.created_on.strftime(
feconf.HUMAN_READABLE_DATETIME_FORMAT),
} for (ind, model) in enumerate(returned_models)]
class BaseSnapshotMetadataModel(BaseModel):
"""Base class for snapshot metadata classes.
The id of this model is computed using VersionedModel.get_snapshot_id().
"""
# The id of the user who committed this revision.
committer_id = ndb.StringProperty(required=True)
# The type of the commit associated with this snapshot.
commit_type = ndb.StringProperty(
required=True, choices=VersionedModel.COMMIT_TYPE_CHOICES)
# The commit message associated with this snapshot.
commit_message = ndb.TextProperty(indexed=False)
# A sequence of commands that can be used to describe this commit.
# Represented as a list of dicts.
commit_cmds = ndb.JsonProperty(indexed=False)
class BaseSnapshotContentModel(BaseModel):
"""Base class for snapshot content classes.
The id of this model is computed using VersionedModel.get_snapshot_id().
"""
# The snapshot content, as a JSON blob.
content = ndb.JsonProperty(indexed=False)
| openhatch/oh-missions-oppia-beta | core/storage/base_model/gae_models.py | Python | apache-2.0 | 15,533 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration relu tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, dtype, a_min, a_max):
assert a_min >= np.iinfo(dtype).min and a_max <= np.iinfo(dtype).max
a = relay.var("a", shape=shape, dtype=dtype)
relu = relay.clip(a, a_min=a_min, a_max=a_max)
return relu
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_relu(dtype):
trials = [
((1, 4, 4, 4), 65, 178, "uint8"),
((1, 8, 4, 2), 1, 254, "uint8"),
((1, 16), 12, 76, "uint8"),
((1, 4, 4, 4), 65, 125, "int8"),
((1, 8, 4, 2), -100, 100, "int8"),
((1, 16), -120, -20, "int8"),
]
np.random.seed(0)
for shape, a_min, a_max, trial_dtype in trials:
if trial_dtype == dtype:
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
outputs = []
for npu in [False, True]:
model = _get_model(inputs["a"].shape, dtype, a_min, a_max)
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
def test_relu_failure():
trials = [
((1, 4, 4, 4, 4), "uint8", 65, 78, "dimensions=5, dimensions must be <= 4"),
((1, 8, 4, 2), "int16", 1, 254, "dtype='int16', dtype must be either uint8, int8 or int32"),
((1, 8, 4, 2), "uint8", 254, 1, "Relu has lower bound > upper bound"),
((2, 2, 2, 2), "uint8", 1, 63, "batch size=2, batch size must = 1; "),
]
for shape, dtype, a_min, a_max, err_msg in trials:
model = _get_model(shape, dtype, a_min, a_max)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| dmlc/tvm | tests/python/contrib/test_ethosn/test_relu.py | Python | apache-2.0 | 2,921 |
from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.custom_codec import *
from hazelcast.util import ImmutableLazyDataList
from hazelcast.protocol.codec.map_message_type import *
REQUEST_TYPE = MAP_REMOVEENTRYLISTENER
RESPONSE_TYPE = 101
RETRYABLE = True
def calculate_size(name, registration_id):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += calculate_size_str(registration_id)
return data_size
def encode_request(name, registration_id):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, registration_id))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_str(registration_id)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
parameters['response'] = client_message.read_bool()
return parameters
| cangencer/hazelcast-python-client | hazelcast/protocol/codec/map_remove_entry_listener_codec.py | Python | apache-2.0 | 1,199 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for checks."""
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.checks import hints
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import config_file as rdf_config_file
from grr.lib.rdfvalues import protodict as rdf_protodict
class HintsTests(test_lib.GRRBaseTest):
"""Test hint operations."""
def testCheckOverlay(self):
"""Overlay(hint1, hint2) should populate hint2 with the values of hint1."""
# Fully populated hint.
full = {
"problem": "Terminator needs trousers.\n",
"fix": "Give me your clothes.\n",
"format": "{mission}, {target}\n",
"summary": "I'll be back."
}
# Partial hint
partial = {
"problem": "Terminator needs to go shopping.",
"fix": "Phased plasma rifle in the 40-watt range.",
"format": "",
"summary": ""
}
# Partial overlaid with full.
overlay = {
"problem": "Terminator needs to go shopping.",
"fix": "Phased plasma rifle in the 40-watt range.",
"format": "{mission}, {target}",
"summary": "I'll be back."
}
# Empty hint.
empty = {"problem": "", "fix": "", "format": "", "summary": ""}
# Empty hint should not clobber populated hint.
starts_full = full.copy()
starts_empty = empty.copy()
hints.Overlay(starts_full, starts_empty)
self.assertDictEqual(full, starts_full)
self.assertDictEqual(empty, starts_empty)
# Populate empty hint from partially populated hint.
starts_partial = partial.copy()
starts_empty = empty.copy()
hints.Overlay(starts_empty, starts_partial)
self.assertDictEqual(partial, starts_partial)
self.assertDictEqual(partial, starts_empty)
# Overlay the full and partial hints to get the hybrid.
starts_full = full.copy()
starts_partial = partial.copy()
hints.Overlay(starts_partial, starts_full)
self.assertDictEqual(full, starts_full)
self.assertDictEqual(overlay, starts_partial)
def testRdfFormatter(self):
"""Hints format RDF values with arbitrary values and attributes."""
# Create a complex RDF value
rdf = rdf_client.ClientSummary()
rdf.system_info.system = "Linux"
rdf.system_info.node = "coreai.skynet.com"
# Users (repeated)
rdf.users = [rdf_client.User(username=u) for u in ("root", "jconnor")]
# Interface (nested, repeated)
addresses = [
rdf_client.NetworkAddress(human_readable=a)
for a in ("1.1.1.1", "2.2.2.2", "3.3.3.3")
]
eth0 = rdf_client.Interface(ifname="eth0", addresses=addresses[:2])
ppp0 = rdf_client.Interface(ifname="ppp0", addresses=addresses[2])
rdf.interfaces = [eth0, ppp0]
template = ("{system_info.system} {users.username} {interfaces.ifname} "
"{interfaces.addresses.human_readable}\n")
hinter = hints.Hinter(template=template)
expected = "Linux root,jconnor eth0,ppp0 1.1.1.1,2.2.2.2,3.3.3.3"
result = hinter.Render(rdf)
self.assertEqual(expected, result)
def testRdfFormatterHandlesKeyValuePair(self):
"""rdfvalue.KeyValue items need special handling to expand k and v."""
key = rdf_protodict.DataBlob().SetValue("skynet")
value = rdf_protodict.DataBlob().SetValue([1997])
rdf = rdf_protodict.KeyValue(k=key, v=value)
template = "{k}: {v}"
hinter = hints.Hinter(template=template)
expected = "skynet: 1997"
result = hinter.Render(rdf)
self.assertEqual(expected, result)
def testRdfFormatterAttributedDict(self):
sshd = rdf_config_file.SshdConfig()
sshd.config = rdf_protodict.AttributedDict(skynet="operational")
template = "{config.skynet}"
hinter = hints.Hinter(template=template)
expected = "operational"
result = hinter.Render(sshd)
self.assertEqual(expected, result)
def testRdfFormatterFanOut(self):
rdf = rdf_protodict.Dict()
user1 = rdf_client.User(username="drexler")
user2 = rdf_client.User(username="joy")
rdf["cataclysm"] = "GreyGoo"
rdf["thinkers"] = [user1, user2]
rdf["reference"] = {
"ecophage": ["bots", ["nanobots", ["picobots"]]],
"doomsday": {
"books": ["cats cradle", "prey"]
}
}
template = ("{cataclysm}; {thinkers.username}; {reference.ecophage}; "
"{reference.doomsday}\n")
hinter = hints.Hinter(template=template)
expected = ("GreyGoo; drexler,joy; bots,nanobots,picobots; "
"books:cats cradle,prey")
result = hinter.Render(rdf)
self.assertEqual(expected, result)
def testStatModeFormat(self):
rdf = rdf_client.StatEntry(st_mode=33204)
expected = "-rw-rw-r--"
template = "{st_mode}"
hinter = hints.Hinter(template=template)
result = hinter.Render(rdf)
self.assertEqual(expected, result)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| destijl/grr | grr/lib/checks/hints_test.py | Python | apache-2.0 | 4,932 |
#!/usr/bin/env python
# encoding=utf-8
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pexpect
import pytest
import shlex
import shutil
import socket
import signal
from impala_shell_results import get_shell_cmd_result, cancellation_helper
from subprocess import Popen, PIPE
from tests.common.impala_service import ImpaladService
from tests.verifiers.metric_verifier import MetricVerifier
from time import sleep
SHELL_CMD = "%s/bin/impala-shell.sh" % os.environ['IMPALA_HOME']
SHELL_HISTORY_FILE = os.path.expanduser("~/.impalahistory")
TMP_HISTORY_FILE = os.path.expanduser("~/.impalahistorytmp")
class TestImpalaShellInteractive(object):
"""Test the impala shell interactively"""
def _send_cmd_to_shell(self, p, cmd):
"""Given an open shell process, write a cmd to stdin
This method takes care of adding the delimiter and EOL, callers should send the raw
command.
"""
p.stdin.write("%s;\n" % cmd)
p.stdin.flush()
def _start_new_shell_process(self, args=None):
"""Starts a shell process and returns the process handle"""
cmd = "%s %s" % (SHELL_CMD, args) if args else SHELL_CMD
return Popen(shlex.split(SHELL_CMD), shell=True, stdout=PIPE,
stdin=PIPE, stderr=PIPE)
@classmethod
def setup_class(cls):
if os.path.exists(SHELL_HISTORY_FILE):
shutil.move(SHELL_HISTORY_FILE, TMP_HISTORY_FILE)
@classmethod
def teardown_class(cls):
if os.path.exists(TMP_HISTORY_FILE): shutil.move(TMP_HISTORY_FILE, SHELL_HISTORY_FILE)
@pytest.mark.execute_serially
def test_escaped_quotes(self):
"""Test escaping quotes"""
# test escaped quotes outside of quotes
result = run_impala_shell_interactive("select \\'bc';")
assert "could not match input" in result.stderr
result = run_impala_shell_interactive("select \\\"bc\";")
assert "could not match input" in result.stderr
# test escaped quotes within quotes
result = run_impala_shell_interactive("select 'ab\\'c';")
assert "Fetched 1 row(s)" in result.stderr
result = run_impala_shell_interactive("select \"ab\\\"c\";")
assert "Fetched 1 row(s)" in result.stderr
@pytest.mark.execute_serially
def test_cancellation(self):
impalad = ImpaladService(socket.getfqdn())
impalad.wait_for_num_in_flight_queries(0)
command = "select sleep(10000);"
p = self._start_new_shell_process()
self._send_cmd_to_shell(p, command)
sleep(1)
# iterate through all processes with psutil
shell_pid = cancellation_helper()
sleep(2)
os.kill(shell_pid, signal.SIGINT)
result = get_shell_cmd_result(p)
assert impalad.wait_for_num_in_flight_queries(0)
@pytest.mark.execute_serially
def test_unicode_input(self):
"Test queries containing non-ascii input"
# test a unicode query spanning multiple lines
unicode_text = u'\ufffd'
args = "select '%s'\n;" % unicode_text.encode('utf-8')
result = run_impala_shell_interactive(args)
assert "Fetched 1 row(s)" in result.stderr
@pytest.mark.execute_serially
def test_welcome_string(self):
"""Test that the shell's welcome message is only printed once
when the shell is started. Ensure it is not reprinted on errors.
Regression test for IMPALA-1153
"""
result = run_impala_shell_interactive('asdf;')
assert result.stdout.count("Welcome to the Impala shell") == 1
result = run_impala_shell_interactive('select * from non_existent_table;')
assert result.stdout.count("Welcome to the Impala shell") == 1
@pytest.mark.execute_serially
def test_bash_cmd_timing(self):
"""Test existence of time output in bash commands run from shell"""
args = "! ls;"
result = run_impala_shell_interactive(args)
assert "Executed in" in result.stderr
@pytest.mark.execute_serially
def test_reconnect(self):
"""Regression Test for IMPALA-1235
Verifies that a connect command by the user is honoured.
"""
def get_num_open_sessions(impala_service):
"""Helper method to retrieve the number of open sessions"""
return impala_service.get_metric_value('impala-server.num-open-beeswax-sessions')
hostname = socket.getfqdn()
initial_impala_service = ImpaladService(hostname)
target_impala_service = ImpaladService(hostname, webserver_port=25001,
beeswax_port=21001, be_port=22001)
# Get the initial state for the number of sessions.
num_sessions_initial = get_num_open_sessions(initial_impala_service)
num_sessions_target = get_num_open_sessions(target_impala_service)
# Connect to localhost:21000 (default)
p = self._start_new_shell_process()
sleep(2)
# Make sure we're connected <hostname>:21000
assert get_num_open_sessions(initial_impala_service) == num_sessions_initial + 1, \
"Not connected to %s:21000" % hostname
self._send_cmd_to_shell(p, "connect %s:21001" % hostname)
# Wait for a little while
sleep(2)
# The number of sessions on the target impalad should have been incremented.
assert get_num_open_sessions(target_impala_service) == num_sessions_target + 1, \
"Not connected to %s:21001" % hostname
# The number of sessions on the initial impalad should have been decremented.
assert get_num_open_sessions(initial_impala_service) == num_sessions_initial, \
"Connection to %s:21000 should have been closed" % hostname
@pytest.mark.execute_serially
def test_ddl_queries_are_closed(self):
"""Regression test for IMPALA-1317
The shell does not call close() for alter, use and drop queries, leaving them in
flight. This test issues those queries in interactive mode, and checks the debug
webpage to confirm that they've been closed.
TODO: Add every statement type.
"""
TMP_DB = 'inflight_test_db'
TMP_TBL = 'tmp_tbl'
MSG = '%s query should be closed'
NUM_QUERIES = 'impala-server.num-queries'
impalad = ImpaladService(socket.getfqdn())
p = self._start_new_shell_process()
try:
start_num_queries = impalad.get_metric_value(NUM_QUERIES)
self._send_cmd_to_shell(p, 'create database if not exists %s' % TMP_DB)
self._send_cmd_to_shell(p, 'use %s' % TMP_DB)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 2)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'use'
self._send_cmd_to_shell(p, 'create table %s(i int)' % TMP_TBL)
self._send_cmd_to_shell(p, 'alter table %s add columns (j int)' % TMP_TBL)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 4)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'alter'
self._send_cmd_to_shell(p, 'drop table %s' % TMP_TBL)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 5)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'drop'
finally:
run_impala_shell_interactive("drop table if exists %s.%s;" % (TMP_DB, TMP_TBL))
run_impala_shell_interactive("drop database if exists foo;")
@pytest.mark.execute_serially
def test_multiline_queries_in_history(self):
"""Test to ensure that multiline queries with comments are preserved in history
Ensure that multiline queries are preserved when they're read back from history.
Additionally, also test that comments are preserved.
"""
# regex for pexpect, a shell prompt is expected after each command..
prompt_regex = '.*%s:2100.*' % socket.getfqdn()
# readline gets its input from tty, so using stdin does not work.
child_proc = pexpect.spawn(SHELL_CMD)
queries = ["select\n1--comment;",
"select /*comment*/\n1;",
"select\n/*comm\nent*/\n1;"]
for query in queries:
child_proc.expect(prompt_regex)
child_proc.sendline(query)
child_proc.expect(prompt_regex)
child_proc.sendline('quit;')
p = self._start_new_shell_process()
self._send_cmd_to_shell(p, 'history')
result = get_shell_cmd_result(p)
for query in queries:
assert query in result.stderr, "'%s' not in '%s'" % (query, result.stderr)
def run_impala_shell_interactive(command, shell_args=''):
"""Runs a command in the Impala shell interactively."""
cmd = "%s %s" % (SHELL_CMD, shell_args)
# workaround to make Popen environment 'utf-8' compatible
# since piping defaults to ascii
my_env = os.environ
my_env['PYTHONIOENCODING'] = 'utf-8'
p = Popen(shlex.split(cmd), shell=True, stdout=PIPE,
stdin=PIPE, stderr=PIPE, env=my_env)
p.stdin.write(command + "\n")
p.stdin.flush()
return get_shell_cmd_result(p)
| andybab/Impala | tests/shell/test_shell_interactive.py | Python | apache-2.0 | 9,064 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import DBMS
from lib.core.settings import MAXDB_SYSTEM_DBS
from lib.core.unescaper import unescaper
from plugins.dbms.maxdb.enumeration import Enumeration
from plugins.dbms.maxdb.filesystem import Filesystem
from plugins.dbms.maxdb.fingerprint import Fingerprint
from plugins.dbms.maxdb.syntax import Syntax
from plugins.dbms.maxdb.takeover import Takeover
from plugins.generic.misc import Miscellaneous
class MaxDBMap(Syntax, Fingerprint, Enumeration, Filesystem, Miscellaneous, Takeover):
"""
This class defines SAP MaxDB methods
"""
def __init__(self):
self.excludeDbsList = MAXDB_SYSTEM_DBS
Syntax.__init__(self)
Fingerprint.__init__(self)
Enumeration.__init__(self)
Filesystem.__init__(self)
Miscellaneous.__init__(self)
Takeover.__init__(self)
unescaper[DBMS.MAXDB] = Syntax.escape
| glaudsonml/kurgan-ai | tools/sqlmap/plugins/dbms/maxdb/__init__.py | Python | apache-2.0 | 1,033 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
from op_test import OpTest
class ApiFMaxTest(unittest.TestCase):
"""ApiFMaxTest"""
def setUp(self):
"""setUp"""
if core.is_compiled_with_cuda():
self.place = core.CUDAPlace(0)
else:
self.place = core.CPUPlace()
self.input_x = np.random.rand(10, 15).astype("float32")
self.input_y = np.random.rand(10, 15).astype("float32")
self.input_z = np.random.rand(15).astype("float32")
self.input_a = np.array([0, np.nan, np.nan]).astype('int64')
self.input_b = np.array([2, np.inf, -np.inf]).astype('int64')
self.input_c = np.array([4, 1, 3]).astype('int64')
self.np_expected1 = np.fmax(self.input_x, self.input_y)
self.np_expected2 = np.fmax(self.input_x, self.input_z)
self.np_expected3 = np.fmax(self.input_a, self.input_c)
self.np_expected4 = np.fmax(self.input_b, self.input_c)
def test_static_api(self):
"""test_static_api"""
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data_x = paddle.static.data("x", shape=[10, 15], dtype="float32")
data_y = paddle.static.data("y", shape=[10, 15], dtype="float32")
result_fmax = paddle.fmax(data_x, data_y)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"x": self.input_x,
"y": self.input_y},
fetch_list=[result_fmax])
self.assertTrue(np.allclose(res, self.np_expected1))
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data_x = paddle.static.data("x", shape=[10, 15], dtype="float32")
data_z = paddle.static.data("z", shape=[15], dtype="float32")
result_fmax = paddle.fmax(data_x, data_z)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"x": self.input_x,
"z": self.input_z},
fetch_list=[result_fmax])
self.assertTrue(np.allclose(res, self.np_expected2))
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data_a = paddle.static.data("a", shape=[3], dtype="int64")
data_c = paddle.static.data("c", shape=[3], dtype="int64")
result_fmax = paddle.fmax(data_a, data_c)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"a": self.input_a,
"c": self.input_c},
fetch_list=[result_fmax])
self.assertTrue(np.allclose(res, self.np_expected3))
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data_b = paddle.static.data("b", shape=[3], dtype="int64")
data_c = paddle.static.data("c", shape=[3], dtype="int64")
result_fmax = paddle.fmax(data_b, data_c)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"b": self.input_b,
"c": self.input_c},
fetch_list=[result_fmax])
self.assertTrue(np.allclose(res, self.np_expected4))
def test_dynamic_api(self):
"""test_dynamic_api"""
paddle.disable_static()
x = paddle.to_tensor(self.input_x)
y = paddle.to_tensor(self.input_y)
z = paddle.to_tensor(self.input_z)
a = paddle.to_tensor(self.input_a)
b = paddle.to_tensor(self.input_b)
c = paddle.to_tensor(self.input_c)
res = paddle.fmax(x, y)
res = res.numpy()
self.assertTrue(np.allclose(res, self.np_expected1))
# test broadcast
res = paddle.fmax(x, z)
res = res.numpy()
self.assertTrue(np.allclose(res, self.np_expected2))
res = paddle.fmax(a, c)
res = res.numpy()
self.assertTrue(np.allclose(res, self.np_expected3))
res = paddle.fmax(b, c)
res = res.numpy()
self.assertTrue(np.allclose(res, self.np_expected4))
class TestElementwiseFmaxOp(OpTest):
"""TestElementwiseFmaxOp"""
def setUp(self):
"""setUp"""
self.op_type = "elementwise_fmax"
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float64")
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float64")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.fmax(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
"""test_check_output"""
self.check_output()
def test_check_grad_normal(self):
"""test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x"""
self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
"""test_check_grad_ingore_y"""
self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
class TestElementwiseFmax2Op(OpTest):
"""TestElementwiseFmax2Op"""
def setUp(self):
"""setUp"""
self.op_type = "elementwise_fmax"
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float64")
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float64")
y[2, 10:] = np.nan
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.fmax(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
"""test_check_output"""
self.check_output()
def test_check_grad_normal(self):
"""test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x"""
self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
"""test_check_grad_ingore_y"""
self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
| luotao1/Paddle | python/paddle/fluid/tests/unittests/test_fmax_op.py | Python | apache-2.0 | 7,540 |
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import struct
import socket
import logging
import netaddr
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_2_parser
from ryu.lib import hub
from ryu.lib import mac
LOG = logging.getLogger('ryu.lib.ofctl_v1_2')
DEFAULT_TIMEOUT = 1.0
def str_to_int(src):
if isinstance(src, str):
if src.startswith("0x") or src.startswith("0X"):
dst = int(src, 16)
else:
dst = int(src)
else:
dst = src
return dst
def to_action(dp, dic):
ofp = dp.ofproto
parser = dp.ofproto_parser
action_type = dic.get('type')
if action_type == 'OUTPUT':
out_port = int(dic.get('port', ofp.OFPP_ANY))
max_len = int(dic.get('max_len', ofp.OFPCML_MAX))
result = parser.OFPActionOutput(out_port, max_len)
elif action_type == 'COPY_TTL_OUT':
result = parser.OFPActionCopyTtlOut()
elif action_type == 'COPY_TTL_IN':
result = parser.OFPActionCopyTtlIn()
elif action_type == 'SET_MPLS_TTL':
mpls_ttl = int(dic.get('mpls_ttl'))
result = parser.OFPActionSetMplsTtl(mpls_ttl)
elif action_type == 'DEC_MPLS_TTL':
result = parser.OFPActionDecMplsTtl()
elif action_type == 'PUSH_VLAN':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushVlan(ethertype)
elif action_type == 'POP_VLAN':
result = parser.OFPActionPopVlan()
elif action_type == 'PUSH_MPLS':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushMpls(ethertype)
elif action_type == 'POP_MPLS':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPopMpls(ethertype)
elif action_type == 'SET_QUEUE':
queue_id = int(dic.get('queue_id'))
result = parser.OFPActionSetQueue(queue_id)
elif action_type == 'GROUP':
group_id = int(dic.get('group_id'))
result = parser.OFPActionGroup(group_id)
elif action_type == 'SET_NW_TTL':
nw_ttl = int(dic.get('nw_ttl'))
result = parser.OFPActionSetNwTtl(nw_ttl)
elif action_type == 'DEC_NW_TTL':
result = parser.OFPActionDecNwTtl()
elif action_type == 'SET_FIELD':
field = dic.get('field')
value = dic.get('value')
result = parser.OFPActionSetField(**{field: value})
else:
result = None
return result
def to_actions(dp, acts):
inst = []
actions = []
ofp = dp.ofproto
parser = dp.ofproto_parser
for a in acts:
action = to_action(dp, a)
if action is not None:
actions.append(action)
else:
action_type = a.get('type')
if action_type == 'GOTO_TABLE':
table_id = int(a.get('table_id'))
inst.append(parser.OFPInstructionGotoTable(table_id))
elif action_type == 'WRITE_METADATA':
metadata = str_to_int(a.get('metadata'))
metadata_mask = (str_to_int(a['metadata_mask'])
if 'metadata_mask' in a
else parser.UINT64_MAX)
inst.append(
parser.OFPInstructionWriteMetadata(
metadata, metadata_mask))
else:
LOG.debug('Unknown action type: %s' % action_type)
inst.append(parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions))
return inst
def action_to_str(act):
action_type = act.cls_action_type
if action_type == ofproto_v1_2.OFPAT_OUTPUT:
buf = 'OUTPUT:' + str(act.port)
elif action_type == ofproto_v1_2.OFPAT_COPY_TTL_OUT:
buf = 'COPY_TTL_OUT'
elif action_type == ofproto_v1_2.OFPAT_COPY_TTL_IN:
buf = 'COPY_TTL_IN'
elif action_type == ofproto_v1_2.OFPAT_SET_MPLS_TTL:
buf = 'SET_MPLS_TTL:' + str(act.mpls_ttl)
elif action_type == ofproto_v1_2.OFPAT_DEC_MPLS_TTL:
buf = 'DEC_MPLS_TTL'
elif action_type == ofproto_v1_2.OFPAT_PUSH_VLAN:
buf = 'PUSH_VLAN:' + str(act.ethertype)
elif action_type == ofproto_v1_2.OFPAT_POP_VLAN:
buf = 'POP_VLAN'
elif action_type == ofproto_v1_2.OFPAT_PUSH_MPLS:
buf = 'PUSH_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_2.OFPAT_POP_MPLS:
buf = 'POP_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_2.OFPAT_SET_QUEUE:
buf = 'SET_QUEUE:' + str(act.queue_id)
elif action_type == ofproto_v1_2.OFPAT_GROUP:
buf = 'GROUP:' + str(act.group_id)
elif action_type == ofproto_v1_2.OFPAT_SET_NW_TTL:
buf = 'SET_NW_TTL:' + str(act.nw_ttl)
elif action_type == ofproto_v1_2.OFPAT_DEC_NW_TTL:
buf = 'DEC_NW_TTL'
elif action_type == ofproto_v1_2.OFPAT_SET_FIELD:
buf = 'SET_FIELD: {%s:%s}' % (act.key, act.value)
else:
buf = 'UNKNOWN'
return buf
def actions_to_str(instructions):
actions = []
for instruction in instructions:
if isinstance(instruction,
ofproto_v1_2_parser.OFPInstructionActions):
for a in instruction.actions:
actions.append(action_to_str(a))
elif isinstance(instruction,
ofproto_v1_2_parser.OFPInstructionGotoTable):
buf = 'GOTO_TABLE:' + str(instruction.table_id)
actions.append(buf)
elif isinstance(instruction,
ofproto_v1_2_parser.OFPInstructionWriteMetadata):
buf = ('WRITE_METADATA:0x%x/0x%x' % (instruction.metadata,
instruction.metadata_mask)
if instruction.metadata_mask
else 'WRITE_METADATA:0x%x' % instruction.metadata)
actions.append(buf)
else:
continue
return actions
def to_match(dp, attrs):
convert = {'in_port': int,
'in_phy_port': int,
'metadata': to_match_metadata,
'dl_dst': to_match_eth,
'dl_src': to_match_eth,
'eth_dst': to_match_eth,
'eth_src': to_match_eth,
'dl_type': int,
'eth_type': int,
'dl_vlan': to_match_vid,
'vlan_vid': to_match_vid,
'vlan_pcp': int,
'ip_dscp': int,
'ip_ecn': int,
'nw_proto': int,
'ip_proto': int,
'nw_src': to_match_ip,
'nw_dst': to_match_ip,
'ipv4_src': to_match_ip,
'ipv4_dst': to_match_ip,
'tp_src': int,
'tp_dst': int,
'tcp_src': int,
'tcp_dst': int,
'udp_src': int,
'udp_dst': int,
'sctp_src': int,
'sctp_dst': int,
'icmpv4_type': int,
'icmpv4_code': int,
'arp_op': int,
'arp_spa': to_match_ip,
'arp_tpa': to_match_ip,
'arp_sha': to_match_eth,
'arp_tha': to_match_eth,
'ipv6_src': to_match_ip,
'ipv6_dst': to_match_ip,
'ipv6_flabel': int,
'icmpv6_type': int,
'icmpv6_code': int,
'ipv6_nd_target': to_match_ip,
'ipv6_nd_sll': to_match_eth,
'ipv6_nd_tll': to_match_eth,
'mpls_label': int,
'mpls_tc': int}
keys = {'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'}
if attrs.get('dl_type') == ether.ETH_TYPE_ARP or \
attrs.get('eth_type') == ether.ETH_TYPE_ARP:
if 'nw_src' in attrs and 'arp_spa' not in attrs:
attrs['arp_spa'] = attrs['nw_src']
del attrs['nw_src']
if 'nw_dst' in attrs and 'arp_tpa' not in attrs:
attrs['arp_tpa'] = attrs['nw_dst']
del attrs['nw_dst']
kwargs = {}
for key, value in attrs.items():
if key in convert:
value = convert[key](value)
if key in keys:
# For old field name
key = keys[key]
if key == 'tp_src' or key == 'tp_dst':
# TCP/UDP port
conv = {inet.IPPROTO_TCP: {'tp_src': 'tcp_src',
'tp_dst': 'tcp_dst'},
inet.IPPROTO_UDP: {'tp_src': 'udp_src',
'tp_dst': 'udp_dst'}}
ip_proto = attrs.get('nw_proto', attrs.get('ip_proto', 0))
key = conv[ip_proto][key]
kwargs[key] = value
else:
# others
kwargs[key] = value
return dp.ofproto_parser.OFPMatch(**kwargs)
def to_match_eth(value):
if '/' in value:
value = value.split('/')
return value[0], value[1]
else:
return value
def to_match_ip(value):
if '/' in value:
ip = netaddr.ip.IPNetwork(value)
ip_addr = str(ip.ip)
ip_mask = str(ip.netmask)
return ip_addr, ip_mask
else:
return value
def to_match_vid(value):
# NOTE: If "vlan_id/dl_vlan" field is described as decimal int value
# (and decimal string value), it is treated as values of
# VLAN tag, and OFPVID_PRESENT(0x1000) bit is automatically
# applied. OTOH, If it is described as hexadecimal string,
# treated as values of oxm_value (including OFPVID_PRESENT
# bit), and OFPVID_PRESENT bit is NOT automatically applied.
if isinstance(value, int):
# described as decimal int value
return value | ofproto_v1_2.OFPVID_PRESENT
else:
if '/' in value:
val = value.split('/')
return int(val[0], 0), int(val[1], 0)
else:
if value.isdigit():
# described as decimal string value
return int(value, 10) | ofproto_v1_2.OFPVID_PRESENT
else:
return int(value, 0)
def to_match_metadata(value):
if '/' in value:
value = value.split('/')
return str_to_int(value[0]), str_to_int(value[1])
else:
return str_to_int(value)
def match_to_str(ofmatch):
keys = {'eth_src': 'dl_src',
'eth_dst': 'dl_dst',
'eth_type': 'dl_type',
'vlan_vid': 'dl_vlan',
'ipv4_src': 'nw_src',
'ipv4_dst': 'nw_dst',
'ip_proto': 'nw_proto',
'tcp_src': 'tp_src',
'tcp_dst': 'tp_dst',
'udp_src': 'tp_src',
'udp_dst': 'tp_dst'
}
match = {}
ofmatch = ofmatch.to_jsondict()['OFPMatch']
ofmatch = ofmatch['oxm_fields']
for match_field in ofmatch:
key = match_field['OXMTlv']['field']
if key in keys:
key = keys[key]
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if key == 'dl_vlan':
value = match_vid_to_str(value, mask)
elif key == 'metadata':
value = match_metadata_to_str(value, mask)
else:
if mask is not None:
value = value + '/' + mask
else:
value = value
match.setdefault(key, value)
return match
def match_metadata_to_str(value, mask):
return ('%d/%d' % (value, mask) if mask else '%d' % value)
def match_vid_to_str(value, mask):
if mask is not None:
value = '0x%04x/0x%04x' % (value, mask)
else:
if value & ofproto_v1_2.OFPVID_PRESENT:
value = str(value & ~ofproto_v1_2.OFPVID_PRESENT)
else:
value = '0x%04x' % value
return value
def send_stats_request(dp, stats, waiters, msgs):
dp.set_xid(stats)
waiters_per_dp = waiters.setdefault(dp.id, {})
lock = hub.Event()
waiters_per_dp[stats.xid] = (lock, msgs)
dp.send_msg(stats)
lock.wait(timeout=DEFAULT_TIMEOUT)
if not lock.is_set():
del waiters_per_dp[stats.xid]
def get_desc_stats(dp, waiters):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
s = {}
for msg in msgs:
stats = msg.body
s = {'mfr_desc': stats.mfr_desc,
'hw_desc': stats.hw_desc,
'sw_desc': stats.sw_desc,
'serial_num': stats.serial_num,
'dp_desc': stats.dp_desc}
desc = {str(dp.id): s}
return desc
def get_queue_stats(dp, waiters):
ofp = dp.ofproto
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, ofp.OFPP_ANY,
ofp.OFPQ_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
s = []
for msg in msgs:
stats = msg.body
for stat in stats:
s.append({'port_no': stat.port_no,
'queue_id': stat.queue_id,
'tx_bytes': stat.tx_bytes,
'tx_errors': stat.tx_errors,
'tx_packets': stat.tx_packets})
desc = {str(dp.id): s}
return desc
def get_flow_stats(dp, waiters, flow={}):
table_id = int(flow.get('table_id', dp.ofproto.OFPTT_ALL))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, table_id, out_port, out_group, cookie, cookie_mask, match)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
flows = []
for msg in msgs:
for stats in msg.body:
actions = actions_to_str(stats.instructions)
match = match_to_str(stats.match)
s = {'priority': stats.priority,
'cookie': stats.cookie,
'idle_timeout': stats.idle_timeout,
'hard_timeout': stats.hard_timeout,
'actions': actions,
'match': match,
'byte_count': stats.byte_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'packet_count': stats.packet_count,
'table_id': stats.table_id,
'length': stats.length}
flows.append(s)
flows = {str(dp.id): flows}
return flows
def get_port_stats(dp, waiters):
stats = dp.ofproto_parser.OFPPortStatsRequest(
dp, dp.ofproto.OFPP_ANY, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ports = []
for msg in msgs:
for stats in msg.body:
s = {'port_no': stats.port_no,
'rx_packets': stats.rx_packets,
'tx_packets': stats.tx_packets,
'rx_bytes': stats.rx_bytes,
'tx_bytes': stats.tx_bytes,
'rx_dropped': stats.rx_dropped,
'tx_dropped': stats.tx_dropped,
'rx_errors': stats.rx_errors,
'tx_errors': stats.tx_errors,
'rx_frame_err': stats.rx_frame_err,
'rx_over_err': stats.rx_over_err,
'rx_crc_err': stats.rx_crc_err,
'collisions': stats.collisions}
ports.append(s)
ports = {str(dp.id): ports}
return ports
def get_group_stats(dp, waiters):
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, dp.ofproto.OFPG_ALL, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
groups = []
for msg in msgs:
for stats in msg.body:
bucket_counters = []
for bucket_counter in stats.bucket_counters:
c = {'packet_count': bucket_counter.packet_count,
'byte_count': bucket_counter.byte_count}
bucket_counters.append(c)
g = {'length': stats.length,
'group_id': stats.group_id,
'ref_count': stats.ref_count,
'packet_count': stats.packet_count,
'byte_count': stats.byte_count,
'bucket_stats': bucket_counters}
groups.append(g)
groups = {str(dp.id): groups}
return groups
def get_group_features(dp, waiters):
ofp = dp.ofproto
type_convert = {ofp.OFPGT_ALL: 'ALL',
ofp.OFPGT_SELECT: 'SELECT',
ofp.OFPGT_INDIRECT: 'INDIRECT',
ofp.OFPGT_FF: 'FF'}
cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT',
ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS',
ofp.OFPGFC_CHAINING: 'CHAINING',
ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'}
act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT',
ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT',
ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN',
ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL',
ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL',
ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN',
ofp.OFPAT_POP_VLAN: 'POP_VLAN',
ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS',
ofp.OFPAT_POP_MPLS: 'POP_MPLS',
ofp.OFPAT_SET_QUEUE: 'SET_QUEUE',
ofp.OFPAT_GROUP: 'GROUP',
ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL',
ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL',
ofp.OFPAT_SET_FIELD: 'SET_FIELD'}
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
features = []
for msg in msgs:
feature = msg.body
types = []
for k, v in type_convert.items():
if (1 << k) & feature.types:
types.append(v)
capabilities = []
for k, v in cap_convert.items():
if k & feature.capabilities:
capabilities.append(v)
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
actions = []
for k1, v1 in type_convert.items():
acts = []
for k2, v2 in act_convert.items():
if (1 << k2) & feature.actions[k1]:
acts.append(v2)
actions.append({v1: acts})
f = {'types': types,
'capabilities': capabilities,
'max_groups': max_groups,
'actions': actions}
features.append(f)
features = {str(dp.id): features}
return features
def get_group_desc(dp, waiters):
type_convert = {dp.ofproto.OFPGT_ALL: 'ALL',
dp.ofproto.OFPGT_SELECT: 'SELECT',
dp.ofproto.OFPGT_INDIRECT: 'INDIRECT',
dp.ofproto.OFPGT_FF: 'FF'}
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
descs = []
for msg in msgs:
for stats in msg.body:
buckets = []
for bucket in stats.buckets:
actions = []
for action in bucket.actions:
actions.append(action_to_str(action))
b = {'weight': bucket.weight,
'watch_port': bucket.watch_port,
'watch_group': bucket.watch_group,
'actions': actions}
buckets.append(b)
d = {'type': type_convert.get(stats.type),
'group_id': stats.group_id,
'buckets': buckets}
descs.append(d)
descs = {str(dp.id): descs}
return descs
def get_port_desc(dp, waiters):
stats = dp.ofproto_parser.OFPFeaturesRequest(dp)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
descs = []
for msg in msgs:
stats = msg.ports
for stat in stats.values():
d = {'port_no': stat.port_no,
'hw_addr': stat.hw_addr,
'name': stat.name,
'config': stat.config,
'state': stat.state,
'curr': stat.curr,
'advertised': stat.advertised,
'supported': stat.supported,
'peer': stat.peer,
'curr_speed': stat.curr_speed,
'max_speed': stat.max_speed}
descs.append(d)
descs = {str(dp.id): descs}
return descs
def mod_flow_entry(dp, flow, cmd):
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
table_id = int(flow.get('table_id', 0))
idle_timeout = int(flow.get('idle_timeout', 0))
hard_timeout = int(flow.get('hard_timeout', 0))
priority = int(flow.get('priority', 0))
buffer_id = int(flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
flags = int(flow.get('flags', 0))
match = to_match(dp, flow.get('match', {}))
inst = to_actions(dp, flow.get('actions', []))
flow_mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie, cookie_mask, table_id, cmd, idle_timeout,
hard_timeout, priority, buffer_id, out_port, out_group,
flags, match, inst)
dp.send_msg(flow_mod)
def mod_group_entry(dp, group, cmd):
type_convert = {'ALL': dp.ofproto.OFPGT_ALL,
'SELECT': dp.ofproto.OFPGT_SELECT,
'INDIRECT': dp.ofproto.OFPGT_INDIRECT,
'FF': dp.ofproto.OFPGT_FF}
type_ = type_convert.get(group.get('type', 'ALL'))
if type_ is None:
LOG.debug('Unknown type: %s', group.get('type'))
group_id = int(group.get('group_id', 0))
buckets = []
for bucket in group.get('buckets', []):
weight = int(bucket.get('weight', 0))
watch_port = int(bucket.get('watch_port', dp.ofproto.OFPP_ANY))
watch_group = int(bucket.get('watch_group', dp.ofproto.OFPG_ANY))
actions = []
for dic in bucket.get('actions', []):
action = to_action(dp, dic)
if action is not None:
actions.append(action)
buckets.append(dp.ofproto_parser.OFPBucket(
weight, watch_port, watch_group, actions))
group_mod = dp.ofproto_parser.OFPGroupMod(
dp, cmd, type_, group_id, buckets)
dp.send_msg(group_mod)
def mod_port_behavior(dp, port_config):
port_no = int(port_config.get('port_no', 0))
hw_addr = port_config.get('hw_addr')
config = int(port_config.get('config', 0))
mask = int(port_config.get('mask', 0))
advertise = int(port_config.get('advertise'))
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, advertise)
dp.send_msg(port_mod)
def send_experimenter(dp, exp):
experimenter = exp.get('experimenter', 0)
exp_type = exp.get('exp_type', 0)
data_type = exp.get('data_type', 'ascii')
if data_type != 'ascii' and data_type != 'base64':
LOG.debug('Unknown data type: %s', data_type)
data = exp.get('data', '')
if data_type == 'base64':
data = base64.b64decode(data)
expmsg = dp.ofproto_parser.OFPExperimenter(
dp, experimenter, exp_type, data)
dp.send_msg(expmsg)
| Neil741/ryu-master | ryu/lib/ofctl_v1_2.py | Python | apache-2.0 | 24,202 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles database requests from other nova services."""
import copy
from nova.api.ec2 import ec2utils
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute_api
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova.db import base
from nova import exception
from nova.image import glance
from nova import manager
from nova import network
from nova.network.security_group import openstack_driver
from nova import notifications
from nova.objects import base as nova_object
from nova.objects import instance as instance_obj
from nova.objects import migration as migration_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova.scheduler import utils as scheduler_utils
LOG = logging.getLogger(__name__)
# Instead of having a huge list of arguments to instance_update(), we just
# accept a dict of fields to update and use this whitelist to validate it.
allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
'power_state', 'access_ip_v4', 'access_ip_v6',
'launched_at', 'terminated_at', 'host', 'node',
'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb',
'instance_type_id', 'root_device_name', 'launched_on',
'progress', 'vm_mode', 'default_ephemeral_device',
'default_swap_device', 'root_device_name',
'system_metadata', 'updated_at'
]
# Fields that we want to convert back into a datetime object.
datetime_fields = ['launched_at', 'terminated_at', 'updated_at']
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
RPC_API_VERSION = '1.58'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self._network_api = None
self._compute_api = None
self.compute_task_mgr = ComputeTaskManager()
self.quotas = quota.QUOTAS
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def create_rpc_dispatcher(self, *args, **kwargs):
kwargs['additional_apis'] = [self.compute_task_mgr]
return super(ConductorManager, self).create_rpc_dispatcher(*args,
**kwargs)
@property
def network_api(self):
# NOTE(danms): We need to instantiate our network_api on first use
# to avoid the circular dependency that exists between our init
# and network_api's
if self._network_api is None:
self._network_api = network.API()
return self._network_api
@property
def compute_api(self):
if self._compute_api is None:
self._compute_api = compute_api.API()
return self._compute_api
def ping(self, context, arg):
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# now a part of the base rpc API.
return jsonutils.to_primitive({'service': 'conductor', 'arg': arg})
@rpc_common.client_exceptions(KeyError, ValueError,
exception.InvalidUUID,
exception.InstanceNotFound,
exception.UnexpectedTaskStateError)
def instance_update(self, context, instance_uuid,
updates, service=None):
for key, value in updates.iteritems():
if key not in allowed_updates:
LOG.error(_("Instance update attempted for "
"'%(key)s' on %(instance_uuid)s"),
{'key': key, 'instance_uuid': instance_uuid})
raise KeyError("unexpected update keyword '%s'" % key)
if key in datetime_fields and isinstance(value, basestring):
updates[key] = timeutils.parse_strtime(value)
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance_uuid, updates)
notifications.send_update(context, old_ref, instance_ref, service)
return jsonutils.to_primitive(instance_ref)
@rpc_common.client_exceptions(exception.InstanceNotFound)
def instance_get(self, context, instance_id):
return jsonutils.to_primitive(
self.db.instance_get(context, instance_id))
@rpc_common.client_exceptions(exception.InstanceNotFound)
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join=None):
return jsonutils.to_primitive(
self.db.instance_get_by_uuid(context, instance_uuid,
columns_to_join))
# NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all(self, context):
return jsonutils.to_primitive(self.db.instance_get_all(context))
def instance_get_all_by_host(self, context, host, node=None,
columns_to_join=None):
if node is not None:
result = self.db.instance_get_all_by_host_and_node(
context.elevated(), host, node)
else:
result = self.db.instance_get_all_by_host(context.elevated(), host,
columns_to_join)
return jsonutils.to_primitive(result)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
@rpc_common.client_exceptions(exception.MigrationNotFound)
def migration_get(self, context, migration_id):
migration_ref = self.db.migration_get(context.elevated(),
migration_id)
return jsonutils.to_primitive(migration_ref)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def migration_get_unconfirmed_by_dest_compute(self, context,
confirm_window,
dest_compute):
migrations = self.db.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute)
return jsonutils.to_primitive(migrations)
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
migrations = self.db.migration_get_in_progress_by_host_and_node(
context, host, node)
return jsonutils.to_primitive(migrations)
# NOTE(comstud): This method can be removed in v2.0 of the RPC API.
def migration_create(self, context, instance, values):
values.update({'instance_uuid': instance['uuid'],
'source_compute': instance['host'],
'source_node': instance['node']})
migration_ref = self.db.migration_create(context.elevated(), values)
return jsonutils.to_primitive(migration_ref)
@rpc_common.client_exceptions(exception.MigrationNotFound)
def migration_update(self, context, migration, status):
migration_ref = self.db.migration_update(context.elevated(),
migration['id'],
{'status': status})
return jsonutils.to_primitive(migration_ref)
@rpc_common.client_exceptions(exception.AggregateHostExists)
def aggregate_host_add(self, context, aggregate, host):
host_ref = self.db.aggregate_host_add(context.elevated(),
aggregate['id'], host)
return jsonutils.to_primitive(host_ref)
@rpc_common.client_exceptions(exception.AggregateHostNotFound)
def aggregate_host_delete(self, context, aggregate, host):
self.db.aggregate_host_delete(context.elevated(),
aggregate['id'], host)
@rpc_common.client_exceptions(exception.AggregateNotFound)
def aggregate_get(self, context, aggregate_id):
aggregate = self.db.aggregate_get(context.elevated(), aggregate_id)
return jsonutils.to_primitive(aggregate)
def aggregate_get_by_host(self, context, host, key=None):
aggregates = self.db.aggregate_get_by_host(context.elevated(),
host, key)
return jsonutils.to_primitive(aggregates)
def aggregate_metadata_add(self, context, aggregate, metadata,
set_delete=False):
new_metadata = self.db.aggregate_metadata_add(context.elevated(),
aggregate['id'],
metadata, set_delete)
return jsonutils.to_primitive(new_metadata)
@rpc_common.client_exceptions(exception.AggregateMetadataNotFound)
def aggregate_metadata_delete(self, context, aggregate, key):
self.db.aggregate_metadata_delete(context.elevated(),
aggregate['id'], key)
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
result = self.db.aggregate_metadata_get_by_host(context, host, key)
return jsonutils.to_primitive(result)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in=None, bw_out=None,
last_ctr_in=None, last_ctr_out=None,
last_refreshed=None,
update_cells=True):
if [bw_in, bw_out, last_ctr_in, last_ctr_out].count(None) != 4:
self.db.bw_usage_update(context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed,
update_cells=update_cells)
usage = self.db.bw_usage_get(context, uuid, start_period, mac)
return jsonutils.to_primitive(usage)
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# deprecated in favor of the method in the base API.
def get_backdoor_port(self, context):
return self.backdoor_port
def security_group_get_by_instance(self, context, instance):
group = self.db.security_group_get_by_instance(context,
instance['uuid'])
return jsonutils.to_primitive(group)
def security_group_rule_get_by_security_group(self, context, secgroup):
rules = self.db.security_group_rule_get_by_security_group(
context, secgroup['id'])
return jsonutils.to_primitive(rules, max_depth=4)
def provider_fw_rule_get_all(self, context):
rules = self.db.provider_fw_rule_get_all(context)
return jsonutils.to_primitive(rules)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
info = self.db.agent_build_get_by_triple(context, hypervisor, os,
architecture)
return jsonutils.to_primitive(info)
def block_device_mapping_update_or_create(self, context, values,
create=None):
if create is None:
bdm = self.db.block_device_mapping_update_or_create(context,
values)
elif create is True:
bdm = self.db.block_device_mapping_create(context, values)
else:
bdm = self.db.block_device_mapping_update(context,
values['id'],
values)
# NOTE:comstud): 'bdm' is always in the new format, so we
# account for this in cells/messaging.py
self.cells_rpcapi.bdm_update_or_create_at_top(context, bdm,
create=create)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
bdms = self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid'])
if legacy:
bdms = block_device.legacy_mapping(bdms)
return jsonutils.to_primitive(bdms)
def block_device_mapping_destroy(self, context, bdms=None,
instance=None, volume_id=None,
device_name=None):
if bdms is not None:
for bdm in bdms:
self.db.block_device_mapping_destroy(context, bdm['id'])
# NOTE(comstud): bdm['id'] will be different in API cell,
# so we must try to destroy by device_name or volume_id.
# We need an instance_uuid in order to do this properly,
# too.
# I hope to clean a lot of this up in the object
# implementation.
instance_uuid = (bdm['instance_uuid'] or
(instance and instance['uuid']))
if not instance_uuid:
continue
# Better to be safe than sorry. device_name is not
# NULLable, however it could be an empty string.
if bdm['device_name']:
self.cells_rpcapi.bdm_destroy_at_top(
context, instance_uuid,
device_name=bdm['device_name'])
elif bdm['volume_id']:
self.cells_rpcapi.bdm_destroy_at_top(
context, instance_uuid,
volume_id=bdm['volume_id'])
elif instance is not None and volume_id is not None:
self.db.block_device_mapping_destroy_by_instance_and_volume(
context, instance['uuid'], volume_id)
self.cells_rpcapi.bdm_destroy_at_top(
context, instance['uuid'], volume_id=volume_id)
elif instance is not None and device_name is not None:
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device_name)
self.cells_rpcapi.bdm_destroy_at_top(
context, instance['uuid'], device_name=device_name)
else:
# NOTE(danms): This shouldn't happen
raise exception.Invalid(_("Invalid block_device_mapping_destroy"
" invocation"))
def instance_get_all_by_filters(self, context, filters, sort_key,
sort_dir, columns_to_join=None):
result = self.db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir,
columns_to_join=columns_to_join)
return jsonutils.to_primitive(result)
# NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all_hung_in_rebooting(self, context, timeout):
result = self.db.instance_get_all_hung_in_rebooting(context, timeout)
return jsonutils.to_primitive(result)
def instance_get_active_by_window(self, context, begin, end=None,
project_id=None, host=None):
# Unused, but cannot remove until major RPC version bump
result = self.db.instance_get_active_by_window(context, begin, end,
project_id, host)
return jsonutils.to_primitive(result)
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
result = self.db.instance_get_active_by_window_joined(
context, begin, end, project_id, host)
return jsonutils.to_primitive(result)
def instance_destroy(self, context, instance):
self.db.instance_destroy(context, instance['uuid'])
def instance_info_cache_delete(self, context, instance):
self.db.instance_info_cache_delete(context, instance['uuid'])
def instance_info_cache_update(self, context, instance, values):
self.db.instance_info_cache_update(context, instance['uuid'],
values)
def instance_type_get(self, context, instance_type_id):
result = self.db.flavor_get(context, instance_type_id)
return jsonutils.to_primitive(result)
def instance_fault_create(self, context, values):
result = self.db.instance_fault_create(context, values)
return jsonutils.to_primitive(result)
# NOTE(kerrin): This method can be removed in v2.0 of the RPC API.
def vol_get_usage_by_time(self, context, start_time):
result = self.db.vol_get_usage_by_time(context, start_time)
return jsonutils.to_primitive(result)
# NOTE(kerrin): The last_refreshed argument is unused by this method
# and can be removed in v2.0 of the RPC API.
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
vol_usage = self.db.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance['uuid'],
instance['project_id'],
instance['user_id'],
instance['availability_zone'],
update_totals)
# We have just updated the database, so send the notification now
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
@rpc_common.client_exceptions(exception.ComputeHostNotFound,
exception.HostBinaryNotFound)
def service_get_all_by(self, context, topic=None, host=None, binary=None):
if not any((topic, host, binary)):
result = self.db.service_get_all(context)
elif all((topic, host)):
if topic == 'compute':
result = self.db.service_get_by_compute_host(context, host)
# FIXME(comstud) Potentially remove this on bump to v2.0
result = [result]
else:
result = self.db.service_get_by_host_and_topic(context,
host, topic)
elif all((host, binary)):
result = self.db.service_get_by_args(context, host, binary)
elif topic:
result = self.db.service_get_all_by_topic(context, topic)
elif host:
result = self.db.service_get_all_by_host(context, host)
return jsonutils.to_primitive(result)
def action_event_start(self, context, values):
evt = self.db.action_event_start(context, values)
return jsonutils.to_primitive(evt)
def action_event_finish(self, context, values):
evt = self.db.action_event_finish(context, values)
return jsonutils.to_primitive(evt)
def service_create(self, context, values):
svc = self.db.service_create(context, values)
return jsonutils.to_primitive(svc)
@rpc_common.client_exceptions(exception.ServiceNotFound)
def service_destroy(self, context, service_id):
self.db.service_destroy(context, service_id)
def compute_node_create(self, context, values):
result = self.db.compute_node_create(context, values)
return jsonutils.to_primitive(result)
def compute_node_update(self, context, node, values, prune_stats=False):
result = self.db.compute_node_update(context, node['id'], values,
prune_stats)
return jsonutils.to_primitive(result)
def compute_node_delete(self, context, node):
result = self.db.compute_node_delete(context, node['id'])
return jsonutils.to_primitive(result)
@rpc_common.client_exceptions(exception.ServiceNotFound)
def service_update(self, context, service, values):
svc = self.db.service_update(context, service['id'], values)
return jsonutils.to_primitive(svc)
def task_log_get(self, context, task_name, begin, end, host, state=None):
result = self.db.task_log_get(context, task_name, begin, end, host,
state)
return jsonutils.to_primitive(result)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
result = self.db.task_log_begin_task(context.elevated(), task_name,
begin, end, host, task_items,
message)
return jsonutils.to_primitive(result)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message=None):
result = self.db.task_log_end_task(context.elevated(), task_name,
begin, end, host, errors, message)
return jsonutils.to_primitive(result)
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period,
ignore_missing_network_data,
system_metadata, extra_usage_info)
def security_groups_trigger_handler(self, context, event, args):
self.security_group_api.trigger_handler(event, context, *args)
def security_groups_trigger_members_refresh(self, context, group_ids):
self.security_group_api.trigger_members_refresh(context, group_ids)
def network_migrate_instance_start(self, context, instance, migration):
self.network_api.migrate_instance_start(context, instance, migration)
def network_migrate_instance_finish(self, context, instance, migration):
self.network_api.migrate_instance_finish(context, instance, migration)
def quota_commit(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.commit(context, reservations, project_id=project_id,
user_id=user_id)
def quota_rollback(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.rollback(context, reservations, project_id=project_id,
user_id=user_id)
def get_ec2_ids(self, context, instance):
ec2_ids = {}
ec2_ids['instance-id'] = ec2utils.id_to_ec2_inst_id(instance['uuid'])
ec2_ids['ami-id'] = ec2utils.glance_id_to_ec2_id(context,
instance['image_ref'])
for image_type in ['kernel', 'ramdisk']:
if '%s_id' % image_type in instance:
image_id = instance['%s_id' % image_type]
ec2_image_type = ec2utils.image_type(image_type)
ec2_id = ec2utils.glance_id_to_ec2_id(context, image_id,
ec2_image_type)
ec2_ids['%s-id' % image_type] = ec2_id
return ec2_ids
# NOTE(danms): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def compute_stop(self, context, instance, do_cast=True):
# NOTE(mriedem): Clients using an interface before 1.43 will be sending
# dicts so we need to handle that here since compute/api::stop()
# requires an object.
if isinstance(instance, dict):
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance)
self.compute_api.stop(context, instance, do_cast)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def compute_confirm_resize(self, context, instance, migration_ref):
if isinstance(instance, dict):
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=attrs)
if isinstance(migration_ref, dict):
migration_ref = migration_obj.Migration._from_db_object(
context.elevated(), migration_ref)
self.compute_api.confirm_resize(context, instance,
migration=migration_ref)
def compute_unrescue(self, context, instance):
self.compute_api.unrescue(context, instance)
def _object_dispatch(self, target, method, context, args, kwargs):
"""Dispatch a call to an object method.
This ensures that object methods get called and any exception
that is raised gets wrapped in a ClientException for forwarding
back to the caller (without spamming the conductor logs).
"""
try:
# NOTE(danms): Keep the getattr inside the try block since
# a missing method is really a client problem
return getattr(target, method)(context, *args, **kwargs)
except Exception:
raise rpc_common.ClientException()
def object_class_action(self, context, objname, objmethod,
objver, args, kwargs):
"""Perform a classmethod action on an object."""
objclass = nova_object.NovaObject.obj_class_from_name(objname,
objver)
return self._object_dispatch(objclass, objmethod, context,
args, kwargs)
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on an object."""
oldobj = copy.copy(objinst)
result = self._object_dispatch(objinst, objmethod, context,
args, kwargs)
updates = dict()
# NOTE(danms): Diff the object with the one passed to us and
# generate a list of changes to forward back
for field in objinst.fields:
if not objinst.obj_attr_is_set(field):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(field) or
oldobj[field] != objinst[field]):
updates[field] = objinst._attr_to_primitive(field)
# This is safe since a field named this would conflict with the
# method anyway
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
# NOTE(danms): This method is now deprecated and can be removed in
# v2.0 of the RPC API
def compute_reboot(self, context, instance, reboot_type):
self.compute_api.reboot(context, instance, reboot_type)
class ComputeTaskManager(base.Base):
"""Namespace for compute methods.
This class presents an rpc API for nova-conductor under the 'compute_task'
namespace. The methods here are compute operations that are invoked
by the API service. These methods see the operation to completion, which
may involve coordinating activities on multiple compute nodes.
"""
RPC_API_NAMESPACE = 'compute_task'
RPC_API_VERSION = '1.6'
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.image_service = glance.get_default_image_service()
self.quotas = quota.QUOTAS
@rpc_common.client_exceptions(exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.MigrationPreCheckError)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None):
if instance and not isinstance(instance, instance_obj.Instance):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=attrs)
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit)
elif not live and not rebuild and flavor:
instance_uuid = instance['uuid']
with compute_utils.EventReporter(context, ConductorManager(),
'cold_migrate', instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
reservations)
else:
raise NotImplementedError()
def _cold_migrate(self, context, instance, flavor, filter_properties,
reservations):
image_ref = instance.image_ref
image = compute_utils.get_image_metadata(
context, self.image_service, image_ref, instance)
request_spec = scheduler_utils.build_request_spec(
context, image, [instance], instance_type=flavor)
try:
hosts = self.scheduler_rpcapi.select_destinations(
context, request_spec, filter_properties)
host_state = hosts[0]
except exception.NoValidHost as ex:
vm_state = instance['vm_state']
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
if reservations:
self.quotas.rollback(context, reservations)
LOG.warning(_("No valid host found for cold migrate"))
return
try:
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
# context is not serializable
filter_properties.pop('context', None)
# TODO(timello): originally, instance_type in request_spec
# on compute.api.resize does not have 'extra_specs', so we
# remove it for now to keep tests backward compatibility.
request_spec['instance_type'].pop('extra_specs')
(host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.prep_resize(
context, image, instance,
flavor, host,
reservations, request_spec=request_spec,
filter_properties=filter_properties, node=node)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': vm_states.ERROR,
'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
if reservations:
self.quotas.rollback(context, reservations)
def _set_vm_state_and_notify(self, context, method, updates, ex,
request_spec):
scheduler_utils.set_vm_state_and_notify(
context, 'compute_task', method, updates,
ex, request_spec, self.db)
def _live_migrate(self, context, instance, scheduler_hint,
block_migration, disk_over_commit):
destination = scheduler_hint.get("host")
try:
live_migrate.execute(context, instance, destination,
block_migration, disk_over_commit)
except (exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.MigrationPreCheckError) as ex:
with excutils.save_and_reraise_exception():
#TODO(johngarbutt) - eventually need instance actions here
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
scheduler_utils.set_vm_state_and_notify(context,
'compute_task', 'migrate_server',
dict(vm_state=instance['vm_state'],
task_state=None,
expected_task_state=task_states.MIGRATING,),
ex, request_spec, self.db)
except Exception as ex:
with excutils.save_and_reraise_exception():
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
scheduler_utils.set_vm_state_and_notify(context,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ERROR},
ex, request_spec, self.db)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# NOTE(alaski): For compatibility until a new scheduler method is used.
request_spec.update({'block_device_mapping': block_device_mapping,
'security_group': security_groups})
self.scheduler_rpcapi.run_instance(context, request_spec=request_spec,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks, is_first_time=True,
filter_properties=filter_properties,
legacy_bdm_in_spec=legacy_bdm)
def _get_image(self, context, image_id):
if not image_id:
return None
return self.image_service.show(context, image_id)
def _delete_image(self, context, image_id):
(image_service, image_id) = glance.get_remote_image_service(context,
image_id)
return image_service.delete(context, image_id)
def _schedule_instances(self, context, image, filter_properties,
*instances):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# dict(host='', nodename='', limits='')
hosts = self.scheduler_rpcapi.select_destinations(context,
request_spec, filter_properties)
return hosts
def unshelve_instance(self, context, instance):
sys_meta = instance.system_metadata
if instance.vm_state == vm_states.SHELVED:
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=task_states.UNSHELVING)
self.compute_rpcapi.start_instance(context, instance)
snapshot_id = sys_meta.get('shelved_image_id')
if snapshot_id:
self._delete_image(context, snapshot_id)
elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
try:
with compute_utils.EventReporter(context, self.db,
'get_image_info', instance.uuid):
image = self._get_image(context,
sys_meta['shelved_image_id'])
except exception.ImageNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_('Unshelve attempted but vm_state not SHELVED '
'or SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
hosts = self._schedule_instances(context, image, [], instance)
host = hosts.pop(0)['host']
self.compute_rpcapi.unshelve_instance(context, instance, host,
image)
else:
LOG.error(_('Unshelve attempted but vm_state not SHELVED or '
'SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
for key in ['shelved_at', 'shelved_image_id', 'shelved_host']:
if key in sys_meta:
del(sys_meta[key])
instance.system_metadata = sys_meta
instance.save()
| pombredanne/MOG | nova/conductor/manager.py | Python | apache-2.0 | 39,345 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mixup: Beyond Empirical Risk Minimization.
Adaption to SSL of MixUp: https://arxiv.org/abs/1710.09412
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, utils, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class Mixup(models.MultiModel):
def augment(self, x, l, beta, **kwargs):
del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
xmix = x * mix + x[::-1] * (1 - mix)
lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])
return xmix, lmix
def model(self, batch, lr, wd, ema, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
def get_logits(x):
logits = classifier(x, training=True)
return logits
x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)
logits_x = get_logits(x)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)
labels_y = tf.stop_gradient(labels_y)
logits_y = get_logits(y)
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/xeu', loss_xeu)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = Mixup(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
| google-research/remixmatch | mixup.py | Python | apache-2.0 | 4,608 |
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import patch, Mock, call
from nose_parameterized import parameterized
from netaddr import IPAddress, IPNetwork
from subprocess import CalledProcessError
from calico_ctl.bgp import *
from calico_ctl import container
from calico_ctl import utils
from pycalico.datastore_datatypes import Endpoint, IPPool
class TestContainer(unittest.TestCase):
@parameterized.expand([
({'<CONTAINER>':'node1', 'ip':1, 'add':1, '<IP>':'127.a.0.1'}, True),
({'<CONTAINER>':'node1', 'ip':1, 'add':1, '<IP>':'aa:bb::zz'}, True),
({'add':1, '<CONTAINER>':'node1', '<IP>':'127.a.0.1'}, True),
({'add':1, '<CONTAINER>':'node1', '<IP>':'aa:bb::zz'}, True)
])
def test_validate_arguments(self, case, sys_exit_called):
"""
Test validate_arguments for calicoctl container command
"""
with patch('sys.exit', autospec=True) as m_sys_exit:
# Call method under test
container.validate_arguments(case)
# Assert method exits if bad input
self.assertEqual(m_sys_exit.called, sys_exit_called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_add(self, m_netns, m_get_pool_or_exit, m_client,
m_get_container_info_or_exit, m_enforce_root):
"""
Test container_add method of calicoctl container command
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'},
'HostConfig': {'NetworkMode': "not host"}
}
m_client.get_endpoint.side_effect = KeyError
m_client.get_default_next_hops.return_value = 'next_hops'
# Call method under test
test_return = container.container_add('container1', '1.1.1.1', 'interface')
# Assert
m_enforce_root.assert_called_once_with()
m_get_container_info_or_exit.assert_called_once_with('container1')
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
m_get_pool_or_exit.assert_called_once_with(IPAddress('1.1.1.1'))
m_client.get_default_next_hops.assert_called_once_with(utils.hostname)
# Check an enpoint object was returned
self.assertTrue(isinstance(test_return, Endpoint))
self.assertTrue(m_netns.create_veth.called)
self.assertTrue(m_netns.move_veth_into_ns.called)
self.assertTrue(m_netns.add_ip_to_ns_veth.called)
self.assertTrue(m_netns.add_ns_default_route.called)
self.assertTrue(m_netns.get_ns_veth_mac.called)
self.assertTrue(m_client.set_endpoint.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
def test_container_add_container_host_ns(self, m_client,
m_get_container_info_or_exit, m_enforce_root):
"""
Test container_add method of calicoctl container command when the
container shares the host namespace.
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'},
'HostConfig': {'NetworkMode': 'host'}
}
m_client.get_endpoint.side_effect = KeyError
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_add,
'container1', '1.1.1.1', 'interface')
m_enforce_root.assert_called_once_with()
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
def test_container_add_existing_container(
self, m_get_pool_or_exit, m_client, m_get_container_info_or_exit,
m_enforce_root):
"""
Test container_add when a container already exists.
Do not raise an exception when the client tries 'get_endpoint'
Assert that the system then exits and all expected calls are made
"""
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_add,
'container1', '1.1.1.1', 'interface')
# Assert only expected calls were made
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_get_pool_or_exit.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
def test_container_add_container_not_running(
self, m_get_pool_or_exit, m_client,
m_get_container_info_or_exit, m_enforce_root):
"""
Test container_add when a container is not running
get_container_info_or_exit returns a running state of value 0
Assert that the system then exits and all expected calls are made
"""
# Set up mock object
m_client.get_endpoint.side_effect = KeyError
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 0, 'Pid': 'Pid_info'}
}
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_add,
'container1', '1.1.1.1', 'interface')
# Assert only expected calls were made
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_get_pool_or_exit.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
def test_container_add_not_ipv4_configured(
self, m_get_pool_or_exit, m_client, m_get_container_info_or_exit,
m_enforce_root):
"""
Test container_add when the client cannot obtain next hop IPs
client.get_default_next_hops returns an empty dictionary, which produces
a KeyError when trying to determine the IP.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_client.get_endpoint.side_effect = KeyError
m_client.get_default_next_hops.return_value = {}
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_add,
'container1', '1.1.1.1', 'interface')
# Assert only expected calls were made
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_client.get_default_next_hops.called)
self.assertFalse(m_client.assign_address.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_add_ip_previously_assigned(
self, m_netns, m_get_pool_or_exit, m_client,
m_get_container_info_or_exit, m_enforce_root):
"""
Test container_add when an ip address is already assigned in pool
client.assign_address returns an empty list.
Assert that the system then exits and all expected calls are made
"""
# Set up mock object
m_client.get_endpoint.side_effect = KeyError
m_client.assign_address.return_value = []
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_add,
'container1', '1.1.1.1', 'interface')
# Assert only expected calls were made
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_client.get_default_next_hops.called)
self.assertTrue(m_client.assign_address.called)
self.assertFalse(m_netns.create_veth.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_id', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_remove(self, m_netns, m_client, m_get_container_id,
m_enforce_root):
"""
Test for container_remove of calicoctl container command
"""
# Set up mock objects
m_get_container_id.return_value = 666
ipv4_nets = set()
ipv4_nets.add(IPNetwork(IPAddress('1.1.1.1')))
ipv6_nets = set()
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv4_nets = ipv4_nets
m_endpoint.ipv6_nets = ipv6_nets
m_endpoint.endpoint_id = 12
m_endpoint.name = "eth1234"
ippool = IPPool('1.1.1.1/24')
m_client.get_endpoint.return_value = m_endpoint
m_client.get_ip_pools.return_value = [ippool]
# Call method under test
container.container_remove('container1')
# Assert
m_enforce_root.assert_called_once_with()
m_get_container_id.assert_called_once_with('container1')
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
self.assertEqual(m_client.unassign_address.call_count, 1)
m_netns.remove_veth.assert_called_once_with("eth1234")
m_client.remove_workload.assert_called_once_with(
utils.hostname, utils.ORCHESTRATOR_ID, 666)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_container_id', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
def test_container_remove_no_endpoint(
self, m_client, m_get_container_id, m_enforce_root):
"""
Test for container_remove when the client cannot obtain an endpoint
client.get_endpoint raises a KeyError.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_client.get_endpoint.side_effect = KeyError
# Call function under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_remove, 'container1')
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_id.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_client.get_ip_pools.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_add_ipv4(
self, m_netns, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add with an ipv4 ip argument
Assert that the correct calls associated with an ipv4 address are made
"""
# Set up mock objects
pool_return = 'pool'
m_get_pool_or_exit.return_value = pool_return
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_endpoint = Mock()
m_client.get_endpoint.return_value = m_endpoint
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
ip_addr = IPAddress(ip)
interface = 'interface'
# Call method under test
container.container_ip_add(container_name, ip, interface)
# Assert
m_enforce_root.assert_called_once_with()
m_get_pool_or_exit.assert_called_once_with(ip_addr)
m_get_container_info_or_exit.assert_called_once_with(container_name)
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
m_client.assign_address.assert_called_once_with(pool_return, ip_addr)
m_endpoint.ipv4_nets.add.assert_called_once_with(IPNetwork(ip_addr))
m_client.update_endpoint.assert_called_once_with(m_endpoint)
m_netns.add_ip_to_ns_veth.assert_called_once_with(
'Pid_info', ip_addr, interface
)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_add_ipv6(
self, m_netns, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add with an ipv6 ip argument
Assert that the correct calls associated with an ipv6 address are made
"""
# Set up mock objects
pool_return = 'pool'
m_get_pool_or_exit.return_value = pool_return
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_endpoint = Mock()
m_client.get_endpoint.return_value = m_endpoint
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
ip_addr = IPAddress(ip)
interface = 'interface'
# Call method under test
container.container_ip_add(container_name, ip, interface)
# Assert
m_enforce_root.assert_called_once_with()
m_get_pool_or_exit.assert_called_once_with(ip_addr)
m_get_container_info_or_exit.assert_called_once_with(container_name)
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
m_client.assign_address.assert_called_once_with(pool_return, ip_addr)
m_endpoint.ipv6_nets.add.assert_called_once_with(IPNetwork(ip_addr))
m_client.update_endpoint.assert_called_once_with(m_endpoint)
m_netns.add_ip_to_ns_veth.assert_called_once_with(
'Pid_info', ip_addr, interface
)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client.get_endpoint', autospec=True)
def test_container_ip_add_container_not_running(
self, m_client_get_endpoint, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add when the container is not running
get_container_info_or_exit returns a running state of value 0.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 0, 'Pid': 'Pid_info'}
}
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertFalse(m_client_get_endpoint.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.print_container_not_in_calico_msg', autospec=True)
def test_container_ip_add_container_not_in_calico(
self, m_print_container_not_in_calico_msg, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add when the container is not networked into calico
client.get_endpoint raises a KeyError.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_client.get_endpoint.return_value = Mock()
m_client.get_endpoint.side_effect = KeyError
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test expecting a System Exit
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
m_print_container_not_in_calico_msg.assert_called_once_with(container_name)
self.assertFalse(m_client.assign_address.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_add_fail_assign_address(
self, m_netns, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add when the client cannot assign an IP
client.assign_address returns an empty list.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_client.assign_address.return_value = []
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_netns.add_ip_to_ns_veth.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns.add_ip_to_ns_veth', autospec=True)
def test_container_ip_add_error_updating_datastore(
self, m_netns_add_ip_to_ns_veth, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_add when the client fails to update endpoint
client.update_endpoint raises a KeyError.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_pool_or_exit.return_value = 'pool'
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_client.update_endpoint.side_effect = KeyError
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_client.assign_address.called)
m_client.unassign_address.assert_called_once_with('pool', ip)
self.assertFalse(m_netns_add_ip_to_ns_veth.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns.add_ip_to_ns_veth', autospec=True)
def test_container_ip_add_netns_error_ipv4(
self, m_netns_add_ip_to_ns_veth, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test container_ip_add when netns cannot add an ipv4 to interface
netns.add_ip_to_ns_veth throws a CalledProcessError.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_get_pool_or_exit.return_value = 'pool'
m_endpoint = Mock()
m_client.get_endpoint.return_value = m_endpoint
err = CalledProcessError(
1, m_netns_add_ip_to_ns_veth, "Error updating container")
m_netns_add_ip_to_ns_veth.side_effect = err
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_client.assign_address.called)
self.assertTrue(m_netns_add_ip_to_ns_veth.called)
m_endpoint.ipv4_nets.remove.assert_called_once_with(
IPNetwork(IPAddress(ip))
)
m_client.update_endpoint.assert_has_calls([
call(m_endpoint), call(m_endpoint)])
m_client.unassign_address.assert_called_once_with('pool', ip)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.print_container_not_in_calico_msg', autospec=True)
@patch('calico_ctl.container.netns.add_ip_to_ns_veth', autospec=True)
def test_container_ip_add_netns_error_ipv6(
self, m_netns, m_print_container_not_in_calico_msg, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test container_ip_add when netns cannot add an ipv6 to interface
netns.add_ip_to_ns_veth throws a CalledProcessError.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_get_pool_or_exit.return_value = 'pool'
m_endpoint = Mock()
m_client.get_endpoint.return_value = m_endpoint
err = CalledProcessError(1, m_netns, "Error updating container")
m_netns.side_effect = err
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test
self.assertRaises(SystemExit, container.container_ip_add,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_client.assign_address.called)
self.assertTrue(m_netns.called)
m_endpoint.ipv6_nets.remove.assert_called_once_with(
IPNetwork(IPAddress(ip))
)
m_client.update_endpoint.assert_has_calls([
call(m_endpoint), call(m_endpoint)])
m_client.unassign_address.assert_called_once_with('pool', ip)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_remove_ipv4(self, m_netns, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test container_ip_remove with an ipv4 ip argument
"""
# Set up mock objects
m_get_pool_or_exit.return_value = 'pool'
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
ipv4_nets = set()
ipv4_nets.add(IPNetwork(IPAddress('1.1.1.1')))
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv4_nets = ipv4_nets
m_client.get_endpoint.return_value = m_endpoint
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1.1.1.1'
interface = 'interface'
# Call method under test
container.container_ip_remove(container_name, ip, interface)
# Assert
m_enforce_root.assert_called_once_with()
m_get_pool_or_exit.assert_called_once_with(IPAddress(ip))
m_get_container_info_or_exit.assert_called_once_with(container_name)
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
m_client.update_endpoint.assert_called_once_with(m_endpoint)
m_netns.remove_ip_from_ns_veth.assert_called_once_with(
'Pid_info',
IPAddress(ip),
interface
)
m_client.unassign_address.assert_called_once_with('pool', ip)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_remove_ipv6(self, m_netns, m_client,
m_get_container_info_or_exit, m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_remove with an ipv6 ip argument
"""
# Set up mock objects
m_get_pool_or_exit.return_value = 'pool'
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
ipv6_nets = set()
ipv6_nets.add(IPNetwork(IPAddress('1::1')))
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv6_nets = ipv6_nets
m_client.get_endpoint.return_value = m_endpoint
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test
container.container_ip_remove(container_name, ip, interface)
# Assert
m_enforce_root.assert_called_once_with()
m_get_pool_or_exit.assert_called_once_with(IPAddress(ip))
m_get_container_info_or_exit.assert_called_once_with(container_name)
m_client.get_endpoint.assert_called_once_with(
hostname=utils.hostname,
orchestrator_id=utils.ORCHESTRATOR_ID,
workload_id=666
)
m_client.update_endpoint.assert_called_once_with(m_endpoint)
m_netns.remove_ip_from_ns_veth.assert_called_once_with(
'Pid_info',
IPAddress(ip),
interface
)
m_client.unassign_address.assert_called_once_with('pool', ip)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
def test_container_ip_remove_not_running(
self, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test for container_ip_remove when the container is not running
get_container_info_or_exit returns a running state of value 0.
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 0, 'Pid': 'Pid_info'}
}
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_remove,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertFalse(m_client.get_endpoint.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
def test_container_ip_remove_ip_not_assigned(
self, m_client, m_get_container_info_or_exit, m_get_pool_or_exit,
m_enforce_root):
"""
Test container_ip_remove when an IP address is not assigned to a container
client.get_endpoint returns an endpoint with no ip nets
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
ipv6_nets = set()
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv6_nets = ipv6_nets
m_client.get_endpoint.return_value = m_endpoint
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_remove,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_client.update_endpoint.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
def test_container_ip_remove_container_not_on_calico(
self, m_client, m_get_container_info_or_exit, m_get_pool_or_exit,
m_enforce_root):
"""
Test for container_ip_remove when container is not networked into Calico
client.get_endpoint raises a KeyError
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
m_client.get_endpoint.side_effect = KeyError
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_remove,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertFalse(m_client.update_endpoint.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_remove_fail_updating_datastore(
self, m_netns, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test container_ip_remove when client fails to update endpoint in datastore
client.update_endpoint throws a KeyError
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
ipv6_nets = set()
ipv6_nets.add(IPNetwork(IPAddress('1::1')))
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv6_nets = ipv6_nets
m_client.get_endpoint.return_value = m_endpoint
m_client.update_endpoint.side_effect = KeyError
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_remove,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_client.update_endpoint.called)
self.assertFalse(m_netns.remove_ip_from_ns_veth.called)
@patch('calico_ctl.container.enforce_root', autospec=True)
@patch('calico_ctl.container.get_pool_or_exit', autospec=True)
@patch('calico_ctl.container.get_container_info_or_exit', autospec=True)
@patch('calico_ctl.container.client', autospec=True)
@patch('calico_ctl.container.netns', autospec=True)
def test_container_ip_remove_netns_error(
self, m_netns, m_client, m_get_container_info_or_exit,
m_get_pool_or_exit, m_enforce_root):
"""
Test container_ip_remove when client fails on removing ip from interface
netns.remove_ip_from_ns_veth raises a CalledProcessError
Assert that the system then exits and all expected calls are made
"""
# Set up mock objects
m_get_container_info_or_exit.return_value = {
'Id': 666,
'State': {'Running': 1, 'Pid': 'Pid_info'}
}
ipv6_nets = set()
ipv6_nets.add(IPNetwork(IPAddress('1::1')))
m_endpoint = Mock(spec=Endpoint)
m_endpoint.ipv6_nets = ipv6_nets
m_client.get_endpoint.return_value = m_endpoint
err = CalledProcessError(1, m_netns, "Error removing ip")
m_netns.remove_ip_from_ns_veth.side_effect = err
# Set up arguments to pass to method under test
container_name = 'container1'
ip = '1::1'
interface = 'interface'
# Call method under test expecting a SystemExit
self.assertRaises(SystemExit, container.container_ip_remove,
container_name, ip, interface)
# Assert
self.assertTrue(m_enforce_root.called)
self.assertTrue(m_get_pool_or_exit.called)
self.assertTrue(m_get_container_info_or_exit.called)
self.assertTrue(m_client.get_endpoint.called)
self.assertTrue(m_client.update_endpoint.called)
self.assertTrue(m_netns.remove_ip_from_ns_veth.called)
self.assertFalse(m_client.unassign_address.called)
| TeaBough/calico-docker | calico_containers/tests/unit/container_test.py | Python | apache-2.0 | 39,497 |
#!/usr/bin/env python
#
# Copyright 2015-2021 Flavio Garcia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from firenado.util.sqlalchemy_util import Base, base_to_dict
from sqlalchemy import Column, String
from sqlalchemy.types import Integer, DateTime
from sqlalchemy.sql import text
import unittest
class TestBase(Base):
__tablename__ = "test"
id = Column("id", Integer, primary_key=True)
username = Column("username", String(150), nullable=False)
first_name = Column("first_name", String(150), nullable=False)
last_name = Column("last_name", String(150), nullable=False)
password = Column("password", String(150), nullable=False)
email = Column("email", String(150), nullable=False)
created = Column("created", DateTime, nullable=False,
server_default=text("now()"))
modified = Column("modified", DateTime, nullable=False,
server_default=text("now()"))
class BaseToDictTestCase(unittest.TestCase):
def setUp(self):
self.test_object = TestBase()
self.test_object.id = 1
self.test_object.username = "anusername"
self.test_object.password = "apassword"
self.test_object.first_name = "Test"
self.test_object.last_name = "Object"
self.test_object.email = "[email protected]"
def test_base_to_dict(self):
dict_from_base = base_to_dict(self.test_object)
self.assertEqual(dict_from_base['id'], self.test_object.id)
self.assertEqual(dict_from_base['username'], self.test_object.username)
self.assertEqual(dict_from_base['password'], self.test_object.password)
self.assertEqual(dict_from_base['first_name'],
self.test_object.first_name)
self.assertEqual(dict_from_base['last_name'],
self.test_object.last_name)
self.assertEqual(dict_from_base['email'], self.test_object.email)
self.assertEqual(dict_from_base['created'], self.test_object.created)
self.assertEqual(dict_from_base['modified'], self.test_object.modified)
def test_base_to_dict(self):
dict_from_base = base_to_dict(self.test_object,
["id", "username", "first_name"])
self.assertEqual(dict_from_base['id'], self.test_object.id)
self.assertEqual(dict_from_base['username'], self.test_object.username)
self.assertEqual(dict_from_base['first_name'],
self.test_object.first_name)
self.assertTrue("password" not in dict_from_base)
self.assertTrue("last_name" not in dict_from_base)
self.assertTrue("email" not in dict_from_base)
self.assertTrue("created" not in dict_from_base)
self.assertTrue("modified" not in dict_from_base)
| piraz/firenado | tests/util/sqlalchemy_util_test.py | Python | apache-2.0 | 3,289 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import logging
import traceback
import mallet_lda
class MalletTagTopics(mallet_lda.MalletLDA):
"""
Topic modeling with separation based on tags
"""
def _basic_params(self):
self.name = 'mallet_lda_tags'
self.categorical = False
self.template_name = 'mallet_lda'
self.dry_run = False
self.topics = 50
self.dfr = len(self.extra_args) > 0
if self.dfr:
self.dfr_dir = self.extra_args[0]
def post_setup(self):
if self.named_args is not None:
if 'tags' in self.named_args:
self.tags = self.named_args['tags']
for filename in self.metadata.keys():
my_tags = [x for (x, y) in self.tags.iteritems()
if int(self.metadata[filename]['itemID'
]) in y]
if len(my_tags) > 0:
self.metadata[filename]['label'] = my_tags[0]
else:
del self.metadata[filename]
self.files.remove(filename)
if __name__ == '__main__':
try:
processor = MalletTagTopics(track_progress=False)
processor.process()
except:
logging.error(traceback.format_exc())
| ChristianFrisson/papermachines | chrome/content/papermachines/processors/mallet_lda_tags.py | Python | bsd-2-clause | 1,353 |
import sys
import os
import subprocess
import string
printable = set(string.printable)
def sanitize(txt):
txt = ''.join(filter(lambda c: c in printable, txt))
return txt
def traverse(t, outfile):
print>>outfile, sanitize(t.code+'\t'+t.description)
for c in t.children:
traverse(c, outfile)
def getEdges(t, outfile):
for c in t.children:
print >>outfile, sanitize(t.code+'\t'+c.code)
getEdges(c, outfile)
print 'cloning github repository sirrice/icd9.git'
subprocess.call('git clone https://github.com/sirrice/icd9.git', shell=1)
sys.path.append('icd9')
from icd9 import ICD9
tree = ICD9('icd9/codes.json')
toplevelnodes = tree.children
print 'creating name file'
outfile = file('code.names', 'w')
traverse(tree, outfile)
outfile.close()
print 'creating edges file'
outfile = file('code.edges', 'w')
getEdges(tree, outfile)
outfile.close()
print 'cleaning up'
#os.chdir('..')
#subprocess.call('rm -rf icd9', shell=1)
| yhalpern/anchorExplorer | examples/ICD9/load_ICD9_structure.py | Python | bsd-2-clause | 971 |
#!/usr/bin/env python
"""Bootstrap setuptools installation
To use setuptools in your package's setup.py, include this
file in the same directory and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
To require a specific version of setuptools, set a download
mirror, or use an alternate download directory, simply supply
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import zipfile
import optparse
import subprocess
import platform
import textwrap
import contextlib
from distutils import log
try:
# noinspection PyCompatibility
from urllib.request import urlopen
except ImportError:
# noinspection PyCompatibility
from urllib2 import urlopen
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "7.0"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
"""
Return True if the command succeeded.
"""
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(archive_filename, install_args=()):
with archive_context(archive_filename):
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
def _build_egg(egg, archive_filename, to_dir):
with archive_context(archive_filename):
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
@contextlib.contextmanager
def archive_context(filename):
# extracting the archive
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
with ContextualZipFile(filename) as archive:
archive.extractall()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
yield
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
archive = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, archive, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
to_dir = os.path.abspath(to_dir)
rep_modules = 'pkg_resources', 'setuptools'
imported = set(sys.modules).intersection(rep_modules)
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir, download_delay)
except pkg_resources.VersionConflict as VC_err:
if imported:
msg = textwrap.dedent("""
The required version of setuptools (>={version}) is not available,
and can't be installed while this script is running. Please
install a more recent version first, using
'easy_install -U setuptools'.
(Currently using {VC_err.args[0]!r})
""").format(VC_err=VC_err, version=version)
sys.stderr.write(msg)
sys.exit(2)
# otherwise, reload ok
del pkg_resources, sys.modules['pkg_resources']
return _do_download(version, download_base, to_dir, download_delay)
def _clean_check(cmd, target):
"""
Run the command to download target. If the command fails, clean up before
re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
ps_cmd = (
"[System.Net.WebRequest]::DefaultWebProxy.Credentials = "
"[System.Net.CredentialCache]::DefaultCredentials; "
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)"
% vars()
)
cmd = [
'powershell',
'-Command',
ps_cmd,
]
_clean_check(cmd, target)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
src = urlopen(url)
try:
# Read all the data in one block.
data = src.read()
finally:
src.close()
# Write all the data in one block to avoid creating a partial file.
with open(target, "wb") as dst:
dst.write(data)
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = (
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
)
viable_downloaders = (dl for dl in downloaders if dl.viable())
return next(viable_downloaders, None)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15, downloader_factory=get_best_downloader):
"""
Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an sdist for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
zip_name = "setuptools-%s.zip" % version
url = download_base + zip_name
saveto = os.path.join(to_dir, zip_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
return ['--user'] if options.user_install else []
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
parser.add_option(
'--version', help="Specify which version to download",
default=DEFAULT_VERSION,
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main():
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
archive = download_setuptools(
version=options.version,
download_base=options.download_base,
downloader_factory=options.downloader_factory,
)
return _install(archive, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
| mtholder/peyotl | ez_setup.py | Python | bsd-2-clause | 10,592 |
import sys
import unittest
from streamlink.plugin.api.utils import itertags
def unsupported_versions_1979():
"""Unsupported python versions for itertags
3.7.0 - 3.7.2 and 3.8.0a1
- https://github.com/streamlink/streamlink/issues/1979
- https://bugs.python.org/issue34294
"""
v = sys.version_info
if (v.major == 3) and (
# 3.7.0 - 3.7.2
(v.minor == 7 and v.micro <= 2)
# 3.8.0a1
or (v.minor == 8 and v.micro == 0 and v.releaselevel == 'alpha' and v.serial <= 1)
):
return True
else:
return False
class TestPluginUtil(unittest.TestCase):
test_html = """
<!doctype html>
<html lang="en" class="no-js">
<title>Title</title>
<meta property="og:type" content= "website" />
<meta property="og:url" content="http://test.se/"/>
<meta property="og:site_name" content="Test" />
<script src="https://test.se/test.js"></script>
<link rel="stylesheet" type="text/css" href="https://test.se/test.css">
<script>Tester.ready(function () {
alert("Hello, world!"); });</script>
<p>
<a
href="http://test.se/foo">bar</a>
</p>
</html>
""" # noqa: W291
def test_itertags_single_text(self):
title = list(itertags(self.test_html, "title"))
self.assertTrue(len(title), 1)
self.assertEqual(title[0].tag, "title")
self.assertEqual(title[0].text, "Title")
self.assertEqual(title[0].attributes, {})
def test_itertags_attrs_text(self):
script = list(itertags(self.test_html, "script"))
self.assertTrue(len(script), 2)
self.assertEqual(script[0].tag, "script")
self.assertEqual(script[0].text, "")
self.assertEqual(script[0].attributes, {"src": "https://test.se/test.js"})
self.assertEqual(script[1].tag, "script")
self.assertEqual(script[1].text.strip(), """Tester.ready(function () {\nalert("Hello, world!"); });""")
self.assertEqual(script[1].attributes, {})
@unittest.skipIf(unsupported_versions_1979(),
"python3.7 issue, see bpo-34294")
def test_itertags_multi_attrs(self):
metas = list(itertags(self.test_html, "meta"))
self.assertTrue(len(metas), 3)
self.assertTrue(all(meta.tag == "meta" for meta in metas))
self.assertEqual(metas[0].text, None)
self.assertEqual(metas[1].text, None)
self.assertEqual(metas[2].text, None)
self.assertEqual(metas[0].attributes, {"property": "og:type", "content": "website"})
self.assertEqual(metas[1].attributes, {"property": "og:url", "content": "http://test.se/"})
self.assertEqual(metas[2].attributes, {"property": "og:site_name", "content": "Test"})
def test_multi_line_a(self):
anchor = list(itertags(self.test_html, "a"))
self.assertTrue(len(anchor), 1)
self.assertEqual(anchor[0].tag, "a")
self.assertEqual(anchor[0].text, "bar")
self.assertEqual(anchor[0].attributes, {"href": "http://test.se/foo"})
@unittest.skipIf(unsupported_versions_1979(),
"python3.7 issue, see bpo-34294")
def test_no_end_tag(self):
links = list(itertags(self.test_html, "link"))
self.assertTrue(len(links), 1)
self.assertEqual(links[0].tag, "link")
self.assertEqual(links[0].text, None)
self.assertEqual(links[0].attributes, {"rel": "stylesheet",
"type": "text/css",
"href": "https://test.se/test.css"})
def test_tag_inner_tag(self):
links = list(itertags(self.test_html, "p"))
self.assertTrue(len(links), 1)
self.assertEqual(links[0].tag, "p")
self.assertEqual(links[0].text.strip(), '<a \nhref="http://test.se/foo">bar</a>')
self.assertEqual(links[0].attributes, {})
| beardypig/streamlink | tests/test_plugin_utils.py | Python | bsd-2-clause | 3,845 |
import batoid
import numpy as np
from test_helpers import timer, do_pickle, all_obj_diff, init_gpu, rays_allclose
@timer
def test_properties():
rng = np.random.default_rng(5)
for i in range(100):
R = rng.normal(0.0, 0.3) # negative allowed
sphere = batoid.Sphere(R)
assert sphere.R == R
do_pickle(sphere)
@timer
def test_sag():
rng = np.random.default_rng(57)
for i in range(100):
R = 1./rng.normal(0.0, 0.3)
sphere = batoid.Sphere(R)
for j in range(10):
x = rng.uniform(-0.7*abs(R), 0.7*abs(R))
y = rng.uniform(-0.7*abs(R), 0.7*abs(R))
result = sphere.sag(x, y)
np.testing.assert_allclose(
result,
R*(1-np.sqrt(1.0-(x*x + y*y)/R/R))
)
# Check that it returned a scalar float and not an array
assert isinstance(result, float)
# Check 0,0
np.testing.assert_allclose(sphere.sag(0, 0), 0.0, rtol=0, atol=1e-17)
# Check vectorization
x = rng.uniform(-0.7*abs(R), 0.7*abs(R), size=(10, 10))
y = rng.uniform(-0.7*abs(R), 0.7*abs(R), size=(10, 10))
np.testing.assert_allclose(
sphere.sag(x, y),
R*(1-np.sqrt(1.0-(x*x + y*y)/R/R))
)
# Make sure non-unit stride arrays also work
np.testing.assert_allclose(
sphere.sag(x[::5,::2], y[::5,::2]),
R*(1-np.sqrt(1.0-(x*x + y*y)/R/R))[::5,::2]
)
do_pickle(sphere)
@timer
def test_normal():
rng = np.random.default_rng(577)
for i in range(100):
R = 1./rng.normal(0.0, 0.3)
sphere = batoid.Sphere(R)
for j in range(10):
x = rng.uniform(-0.7*abs(R), 0.7*abs(R))
y = rng.uniform(-0.7*abs(R), 0.7*abs(R))
result = sphere.normal(x, y)
r = np.hypot(x, y)
rat = r/R
dzdr = rat/np.sqrt(1-rat*rat)
nz = 1/np.sqrt(1+dzdr*dzdr)
normal = np.array([-x/r*dzdr*nz, -y/r*dzdr*nz, nz])
np.testing.assert_allclose(result, normal)
# Check 0,0
np.testing.assert_equal(sphere.normal(0, 0), np.array([0, 0, 1]))
# Check vectorization
x = rng.uniform(-0.7*abs(R), 0.7*abs(R), size=(10, 10))
y = rng.uniform(-0.7*abs(R), 0.7*abs(R), size=(10, 10))
r = np.hypot(x, y)
rat = r/R
dzdr = rat/np.sqrt(1-rat*rat)
nz = 1/np.sqrt(1+dzdr*dzdr)
normal = np.dstack([-x/r*dzdr*nz, -y/r*dzdr*nz, nz])
np.testing.assert_allclose(
sphere.normal(x, y),
normal
)
# Make sure non-unit stride arrays also work
np.testing.assert_allclose(
sphere.normal(x[::5,::2], y[::5,::2]),
normal[::5, ::2]
)
@timer
def test_intersect():
rng = np.random.default_rng(5772)
size = 10_000
for i in range(100):
R = 1./rng.normal(0.0, 0.3)
sphereCoordSys = batoid.CoordSys(origin=[0, 0, -1])
sphere = batoid.Sphere(R)
x = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size)
y = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size)
z = np.full_like(x, -2*abs(R))
# If we shoot rays straight up, then it's easy to predict the intersection
vx = np.zeros_like(x)
vy = np.zeros_like(x)
vz = np.ones_like(x)
rv = batoid.RayVector(x, y, z, vx, vy, vz)
np.testing.assert_allclose(rv.z, -2*abs(R))
rv2 = batoid.intersect(sphere, rv.copy(), sphereCoordSys)
assert rv2.coordSys == sphereCoordSys
rv2 = rv2.toCoordSys(batoid.CoordSys())
np.testing.assert_allclose(rv2.x, x)
np.testing.assert_allclose(rv2.y, y)
np.testing.assert_allclose(rv2.z, sphere.sag(x, y)-1, rtol=0, atol=1e-9)
# Check default intersect coordTransform
rv2 = rv.copy().toCoordSys(sphereCoordSys)
batoid.intersect(sphere, rv2)
assert rv2.coordSys == sphereCoordSys
rv2 = rv2.toCoordSys(batoid.CoordSys())
np.testing.assert_allclose(rv2.x, x)
np.testing.assert_allclose(rv2.y, y)
np.testing.assert_allclose(rv2.z, sphere.sag(x, y)-1, rtol=0, atol=1e-9)
@timer
def test_reflect():
rng = np.random.default_rng(57721)
size = 10_000
for i in range(100):
R = 1./rng.normal(0.0, 0.3)
sphere = batoid.Sphere(R)
x = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size)
y = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size)
z = np.full_like(x, -2*abs(R))
vx = rng.uniform(-1e-5, 1e-5, size=size)
vy = rng.uniform(-1e-5, 1e-5, size=size)
vz = np.full_like(x, 1)
rv = batoid.RayVector(x, y, z, vx, vy, vz)
rvr = batoid.reflect(sphere, rv.copy())
rvr2 = sphere.reflect(rv.copy())
rays_allclose(rvr, rvr2)
# print(f"{np.sum(rvr.failed)/len(rvr)*100:.2f}% failed")
normal = sphere.normal(rvr.x, rvr.y)
# Test law of reflection
a0 = np.einsum("ad,ad->a", normal, rv.v)[~rvr.failed]
a1 = np.einsum("ad,ad->a", normal, -rvr.v)[~rvr.failed]
np.testing.assert_allclose(
a0, a1,
rtol=0, atol=1e-12
)
# Test that rv.v, rvr.v and normal are all in the same plane
np.testing.assert_allclose(
np.einsum(
"ad,ad->a",
np.cross(normal, rv.v),
rv.v
)[~rvr.failed],
0.0,
rtol=0, atol=1e-12
)
@timer
def test_refract():
rng = np.random.default_rng(577215)
size = 10_000
for i in range(100):
R = 1./rng.normal(0.0, 0.3)
sphere = batoid.Sphere(R)
m0 = batoid.ConstMedium(rng.normal(1.2, 0.01))
m1 = batoid.ConstMedium(rng.normal(1.3, 0.01))
x = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size)
y = rng.uniform(-0.3*abs(R), 0.3*abs(R), size=size)
z = np.full_like(x, -2*abs(R))
vx = rng.uniform(-1e-5, 1e-5, size=size)
vy = rng.uniform(-1e-5, 1e-5, size=size)
vz = np.sqrt(1-vx*vx-vy*vy)/m0.n
rv = batoid.RayVector(x, y, z, vx, vy, vz)
rvr = batoid.refract(sphere, rv.copy(), m0, m1)
rvr2 = sphere.refract(rv.copy(), m0, m1)
rays_allclose(rvr, rvr2)
# print(f"{np.sum(rvr.failed)/len(rvr)*100:.2f}% failed")
normal = sphere.normal(rvr.x, rvr.y)
# Test Snell's law
s0 = np.sum(np.cross(normal, rv.v*m0.n)[~rvr.failed], axis=-1)
s1 = np.sum(np.cross(normal, rvr.v*m1.n)[~rvr.failed], axis=-1)
np.testing.assert_allclose(
m0.n*s0, m1.n*s1,
rtol=0, atol=1e-9
)
# Test that rv.v, rvr.v and normal are all in the same plane
np.testing.assert_allclose(
np.einsum(
"ad,ad->a",
np.cross(normal, rv.v),
rv.v
)[~rvr.failed],
0.0,
rtol=0, atol=1e-12
)
@timer
def test_ne():
objs = [
batoid.Sphere(1.0),
batoid.Sphere(2.0),
batoid.Plane()
]
all_obj_diff(objs)
@timer
def test_fail():
sphere = batoid.Sphere(1.0)
rv = batoid.RayVector(0, 10, 0, 0, 0, -1) # Too far to side
rv2 = batoid.intersect(sphere, rv.copy())
np.testing.assert_equal(rv2.failed, np.array([True]))
# This one passes
rv = batoid.RayVector(0, 0, 0, 0, 0, -1)
rv2 = batoid.intersect(sphere, rv.copy())
np.testing.assert_equal(rv2.failed, np.array([False]))
if __name__ == '__main__':
test_properties()
test_sag()
test_normal()
test_intersect()
test_reflect()
test_refract()
test_ne()
test_fail()
| jmeyers314/batoid | tests/test_Sphere.py | Python | bsd-2-clause | 7,740 |
#!/usr/bin/env python3
import sys
import re
import mpmath as mp
mp.dps=250
mp.mp.dps = 250
if len(sys.argv) != 2:
print("Usage: format_CIAAW.py ciaawfile")
quit(1)
path = sys.argv[1]
atomre = re.compile(r'^(\d+) +(\w\w*) +(\w+) +\[?(\d+)\]?\*? +(.*) *$')
isore = re.compile(r'^(\d+)\*? +(\[?\d.*.*\]?) *$')
brange = re.compile(r'^\[([\d\.]+),([\d\.]+)\].*$')
buncertain = re.compile(r'^([\d\.]+)\((\d+)\)[a-z]*$')
bnum = re.compile(r'^([\d\d]+)$')
atommassline = re.compile(r'^(\d+) +(\w\w*) +(\w+) +(.*) *$')
def NumberStr(n):
# Replace spaces
s = n.replace(' ', '')
# remove "exactly" for the carbon mass
s = s.replace('(exactly)', '')
# if only a number, put it three times
m = bnum.match(s)
if m:
s = "{:<25} {:<25} {:<25}".format(m.group(1), m.group(1), m.group(1))
# if parentheses uncertainty...
m = buncertain.match(s)
if m:
# tricky. duplicate the first part as a string
s2 = m.group(1)
# but replace with all zero
s2 = re.sub(r'\d', '0', s2)
# now replace last characters
l = len(m.group(2))
s2 = s2[:len(s2)-l] + m.group(2)
# convert to a float
serr = mp.mpf(s2)
scenter = mp.mpf(m.group(1))
s = "{:<25} {:<25} {:<25}".format(mp.nstr(scenter, 18), mp.nstr(scenter-serr, 18), mp.nstr(scenter+serr, 18))
# Replace bracketed ranges with parentheses
m = brange.match(s)
if m:
slow = mp.mpf(m.group(1))
shigh = mp.mpf(m.group(2))
smid = (shigh + slow)/mp.mpf("2.0")
s = "{:<25} {:<25} {:<25}".format(mp.nstr(smid, 18), mp.nstr(slow, 18), mp.nstr(shigh, 18))
# just a dash?
if s == "-":
s = "{:<25} {:<25} {:<25}".format(0, 0, 0)
return s
# First 5 lines are comments
filelines = [ x.strip() for x in open(path).readlines() ]
curatom = None
for line in filelines:
matomre = atomre.match(line)
misore = isore.match(line)
matommass = atommassline.match(line)
if matomre:
curatom = "{:<5} {:<5}".format(matomre.group(1), matomre.group(2))
print("{} {:<6} {:<25}".format(curatom, matomre.group(4), NumberStr(matomre.group(5))))
elif misore:
print("{} {:<6} {:<25}".format(curatom, misore.group(1), NumberStr(misore.group(2))))
elif matommass:
curatom = "{:<5} {:<5}".format(matommass.group(1), matommass.group(2))
print("{} {:<25}".format(curatom, NumberStr(matommass.group(4))))
else:
print(line) # comment lines, etc
| pulsar-chem/Pulsar-Core | scripts/data/format_CIAAW.py | Python | bsd-3-clause | 2,449 |
import warnings
import unittest
import sys
from nose.tools import assert_raises
from gplearn.skutils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| danbob123/gplearn | gplearn/skutils/tests/test_testing.py | Python | bsd-3-clause | 3,785 |
# -*- coding: utf-8 -*-
#
# アルゴリズムデザインコンテストのさまざまな処理
#
# Copyright (C) 2015 Fujitsu
import numberlink
from datastore import *
from hashlib import sha1, sha256
from flask import make_response, render_template
import random
import datetime
from tz import gae_datetime_JST
from define import DEFAULT_YEAR
def adc_response(msg, isjson, code=200, json_encoded=False):
if json_encoded:
body = msg
else:
template = 'response.json' if isjson else 'response.html'
body = render_template(template, msg=msg)
resp = make_response(body)
if code == 200:
resp.status = 'OK'
elif code == 400:
resp.status = 'Bad Request'
elif code == 401:
resp.status = 'Unauthorized'
resp.status_code = code
resp.headers['Content-Type'] = 'application/json' if isjson else 'text/html; charset=utf-8'
return resp
def adc_response_html(html, code=200):
template = 'raw.html'
body = render_template(template, raw=html)
resp = make_response(body)
resp.status_code = code
resp.headers['Content-Type'] = 'text/html; charset=utf-8'
return resp
def adc_response_text(body, code=200):
resp = make_response(body)
resp.status_code = code
resp.headers['Content-Type'] = 'text/plain; charset=utf-8'
return resp
def adc_response_json(body, code=200):
resp = make_response(body)
resp.status_code = code
resp.headers['Content-Type'] = 'application/json'
return resp
def adc_response_Q_data(result):
"問題テキストデータを返す"
if result is None:
code = 404
text = "Not Found\r\n"
else:
code = 200
text = result.text
return adc_response_text(text, code)
def log(username, what):
root = log_key()
i = Log(parent = root,
username = username,
what = what)
i.put()
def log_get_or_delete(username=None, fetch_num=100, when=None, delete=False):
query = Log.query(ancestor = log_key()).order(-Log.date)
if username is not None:
query = query.filter(Log.username == username)
if when is not None:
before = datetime.datetime.now() - when
#print "before=", before
query = query.filter(Log.date > before)
q = query.fetch(fetch_num)
results = []
for i in q:
if delete:
tmp = { 'date': gae_datetime_JST(i.date) }
i.key.delete()
else:
tmp = { 'date': gae_datetime_JST(i.date),
'username': i.username,
'what': i.what }
results.append( tmp )
return results
def adc_login(salt, username, password, users):
"パスワードがあっているかチェックする"
hashed256 = hashed_password(username, password, salt)
u = adc_get_user_info(username, users)
if u is not None and u[1]==hashed256:
return u
else:
return None
def adc_change_password(salt, username, users, attr, priv_admin=False):
"パスワード変更。管理者は他人のパスワードも変更できる。"
if ('password_old' in attr and
'password_new1' in attr and
'password_new2' in attr):
if not priv_admin: # 管理者でないときは、現在のパスワードをチェック
u = adc_login(salt, username, attr['password_old'], users)
if u is None:
return False, "password mismatched"
if attr['password_new1'] != attr['password_new2']:
return False, "new password is not same"
if change_password(username, attr['password_new1'].encode('utf-8'), salt):
return True, "password changed"
else:
return False, "password change failed"
else:
return False, "error"
def adc_get_user_info(username, users):
# まずはローカルに定義されたユーザを検索
for u in users:
if username == (u[0]):
return u
# 次に、データベースに登録されたユーザを検索
r = get_userinfo(username)
if r is not None:
return [r.username, r.password, r.displayname, r.uid, r.gid]
else:
return None
def adc_get_user_list(users):
res = []
# まずはローカルに定義されたユーザを検索
for u in users:
res.append(u[0])
# 次に、データベースに登録されたユーザを検索
res2 = get_username_list()
res.extend(res2)
return res
def insert_Q_data(q_num, text, author="DASymposium", year=DEFAULT_YEAR, uniq=True):
"""
問題データをデータベースに登録する。
uniq==Trueのとき、q_numとauthorが重複する場合、登録は失敗する。
"""
#重複チェック
if uniq:
q = get_user_Q_data(q_num, author, year)
if q is not None:
return (False, "Error: Q%d data already exists" % q_num) # 重複エラー
# 問題データのチェック
(size, line_num, line_mat, msg, ok) = numberlink.read_input_data(text)
if not ok:
return (False, "Error: syntax error in Q data\n"+msg)
# text2は、textを正規化したテキストデータ(改行コードなど)
text2 = numberlink.generate_Q_data(size, line_num, line_mat)
# rootエンティティを決める
userinfo = get_userinfo(author)
if userinfo is None:
return (False, "Error: user not found: %s" % author)
else:
root = userinfo.key
# 問題データのエンティティ
q = Question( parent = root,
id = str(q_num),
qnum = q_num,
text = text2,
rows = size[1], # Y
cols = size[0], # X
linenum = line_num,
author = author )
# 登録する
q.put()
#
return (True, size, line_num)
def update_Q_data(q_num, text, author="DASymposium", year=DEFAULT_YEAR):
"問題データを変更する"
# 問題データの内容チェック
(size, line_num, line_mat, msg, ok) = numberlink.read_input_data(text)
if not ok:
return (False, "Error: syntax error in Q data\n"+msg, None, None)
text2 = numberlink.generate_Q_data(size, line_num, line_mat)
# 既存のエンティティを取り出す
res = get_user_Q_data(q_num, author, year)
if res is None:
num = 0
else:
num = 1
res.text = text2
res.rows = size[1]
res.cols = size[0]
res.linenum = line_num
res.put()
return (True, num, size, line_num)
def get_Q_data(q_num, year=DEFAULT_YEAR, fetch_num=5):
"出題の番号を指定して、Question問題データをデータベースから取り出す"
qla = ndb.Key(QuestionListAll, 'master', parent=qdata_key()).get()
if qla is None:
return None
# q_numは1から始まる整数なので、配列のインデックスとは1だけずれる
qn = q_num-1
if qn < 0 or len(qla.qs) <= qn:
return None
return qla.qs[q_num-1].get()
def get_Q_author_all():
"出題の番号から、authorを引けるテーブルを作る"
qla = ndb.Key(QuestionListAll, 'master', parent=qdata_key()).get()
if qla is None:
return None
authors = ['']*(len(qla.qs)+1) # q_numは1から始まるので、+1しておく
qn = 1 # 出題番号
for q_key in qla.qs:
q = q_key.get()
authors[qn] = q.author
qn += 1
# q.qnum は、問題登録したときの番号であり、出題番号ではない
return authors
def get_Q_data_text(q_num, year=DEFAULT_YEAR, fetch_num=5):
"問題のテキストを返す"
result = get_Q_data(q_num, year, fetch_num)
if result is not None:
text = result.text
ret = True
else: # result is None
text = "Error: data not found: Q%d" % q_num
ret = False
return ret, text
def get_user_Q_data(q_num, author, year=DEFAULT_YEAR, fetch_num=99):
"qnumとauthorを指定して問題データをデータベースから取り出す"
userinfo = get_userinfo(author)
if userinfo is None:
root = qdata_key(year)
else:
root = userinfo.key
key = ndb.Key(Question, str(q_num), parent=root)
return key.get()
def get_admin_Q_all():
"データベースに登録されたすべての問題の一覧リスト"
#query = Question.query().order(Question.author, Question.qnum)
query = Question.query(ancestor=userlist_key()).order(Question.author, Question.qnum)
q = query.fetch()
num = len(q)
out = str(num) + "\n"
for i in q:
dt = gae_datetime_JST(i.date)
out += "Q%02d SIZE %dX%d LINE_NUM %d (%s) %s\n" % (i.qnum, i.cols, i.rows, i.linenum, i.author, dt)
return out
def admin_Q_list_get():
"コンテストの出題リストを取り出す"
qla = ndb.Key(QuestionListAll, 'master', parent=qdata_key()).get()
if qla is None:
return ''
else:
return qla.text_admin
def admin_Q_list_create():
"コンテスト用の出題リストを作成する"
#query = Question.query(ancestor=userlist_key()).order(Question.author, Question.qnum)
query = Question.query(ancestor=userlist_key())
qlist = []
q = query.fetch()
num = len(q)
for i in q:
qlist.append([i.qnum, i.author, i.key])
random.shuffle(qlist)
out = str(num) + "\n"
root = qdata_key()
#既存の問題リストを削除する … のはやめた
#out += admin_Q_list_delete() + "\n"
num = 1
out_admin = ""
out_user = ""
qs = []
for i in qlist:
qs.append(i[2])
out_admin += "Q%d %s %d\n" % (num, i[1], i[0])
out_user += "Q%d\n" % num
num += 1
out += out_admin
qla = QuestionListAll.get_or_insert('master', parent=root, qs=qs, text_admin=out_admin, text_user=out_user)
if qla.text_admin != out_admin:
out += "Already inserted\n"
return out
def admin_Q_list_delete():
"コンテストの出題リストを削除する"
root = qdata_key()
ndb.Key(QuestionListAll, 'master', parent=root).delete()
return "DELETE Q-list"
def get_Q_all(html=False):
"問題データの一覧リストを返す"
qla = ndb.Key(QuestionListAll, 'master', parent=qdata_key()).get()
if qla is None:
return ''
if html:
out = ""
num=1
for i in qla.text_user.splitlines():
out += '<a href="/Q/%d">%s</a><br />\n' % (num, i)
num += 1
return out
else:
return qla.text_user
def menu_post_A(username):
"回答ファイルをアップロードするフォームを返す"
qla = ndb.Key(QuestionListAll, 'master', parent=qdata_key()).get()
if qla is None:
return ''
out = ""
num=1
for i in qla.text_user.splitlines():
out += '<a href="/A/%s/Q/%d">post answer %s</a><br />\n' % (username, num, i)
num += 1
return out
def post_A(username, atext, form):
anum = (int)(form['anum'])
cpu_sec = 0
mem_byte = 0
try:
cpu_sec = (float)(form['cpu_sec'])
mem_byte = (int)(form['mem_byte'])
except ValueError:
# (float)'' がエラー
pass
misc_text = form['misc_text']
print "A%d\n%f\n%d\n%s" % (anum, cpu_sec, mem_byte, misc_text.encode('utf-8'))
return put_A_data(anum, username, atext, cpu_sec, mem_byte, misc_text)
def get_user_Q_all(author, html=None):
"authorを指定して、問題データの一覧リストを返す"
userinfo = get_userinfo(author)
if userinfo is None:
root = qdata_key()
else:
root = userinfo.key
query = Question.query( ancestor = root ).order(Question.qnum)
#query = query.filter(Question.author == author )
q = query.fetch()
num = len(q)
out = ""
for i in q:
if html is None:
out += "Q%d SIZE %dX%d LINE_NUM %d (%s)\n" % (i.qnum, i.cols, i.rows, i.linenum, i.author)
else:
url = '/user/%s/Q/%d' % (author, i.qnum)
out += '<a href="%s">Q%d SIZE %dX%d LINE_NUM %d (%s)</a><br />\n' % (url, i.qnum, i.cols, i.rows, i.linenum, i.author)
return out
def delete_user_Q_data(q_num, author, year=DEFAULT_YEAR):
"qnumとauthorを指定して、問題データをデータベースから削除する"
res = get_user_Q_data(q_num, author, year)
msg = ""
if res is None:
msg = "Q%d data not found" % q_num
else:
msg += "DELETE /user/%s/Q/%d\n" % (author, q_num)
res.key.delete()
return msg
def get_admin_A_all():
"データベースに登録されたすべての回答データの一覧リスト"
#query = Answer.query(ancestor=userlist_key()).order(Answer.owner, Answer.anum)
query = Answer.query(ancestor=userlist_key())
q = query.fetch()
num = len(q)
out = str(num) + "\n"
for i in q:
dt = gae_datetime_JST(i.date)
out += "A%02d (%s) %s\n" % (i.anum, i.owner, dt)
return out
def get_A_data(a_num=None, username=None):
"""
データベースから回答データを取り出す。
a_numがNoneのとき、複数のデータを返す。
a_numが数値のとき、その数値のデータを1つだけ返す。存在しないときはNone。
"""
if username is None:
root = userlist_key()
else:
userinfo = get_userinfo(username)
if userinfo is None:
msg = "ERROR: user not found: %s" % username
return False, msg, None
root = userinfo.key
if a_num is not None:
a = ndb.Key(Answer, str(a_num), parent=root).get()
return True, a, root
#query = Answer.query(ancestor=root).order(Answer.anum)
query = Answer.query(ancestor=root)
#if a_num is not None:
# query = query.filter(Answer.anum == a_num)
q = query.fetch()
return True, q, root
def put_A_data(a_num, username, text, cpu_sec=None, mem_byte=None, misc_text=None):
"回答データをデータベースに格納する"
msg = ""
# 出題データを取り出す
ret, q_text = get_Q_data_text(a_num)
if not ret:
msg = "Error in Q%d data: " % a_num + q_text
return False, msg
# 重複回答していないかチェック
ret, q, root = get_A_data(a_num, username)
if ret==True and q is not None:
msg += "ERROR: duplicated answer\n";
return False, msg
# 回答データのチェックをする
judges, msg = numberlink.check_A_data(text, q_text)
q = 0.0
if judges[0] != True:
msg += "Error in answer A%d\n" % a_num
check_A = False
else:
check_A = True # 正解
q = judges[1]
# 解の品質
msg += "Quality factor = %1.19f\n" % q
# データベースに登録する。不正解でも登録する
a = Answer( parent = root,
id = str(a_num),
anum = a_num,
text = text,
owner = username,
cpu_sec = cpu_sec,
mem_byte = mem_byte,
misc_text = misc_text,
result = msg[-1499:], # 長さ制限がある。末尾のみ保存。
judge = int(check_A),
q_factor = q )
a_key = a.put()
return True, msg
def put_A_info(a_num, username, info):
"回答データの補足情報をデータベースに格納する"
msg = ""
# 回答データを取り出す。rootはUserInfoのkey、aはAnswer
ret, a, root = get_A_data(a_num, username)
if ret==False or a is None:
if ret==False: msg += a + "\n"
msg += "ERROR: A%d data not found" % a_num
return False, msg
a.cpu_sec = info['cpu_sec']
a.mem_byte = info['mem_byte']
a.misc_text = info['misc_text']
a.put()
msg += "UPDATE A%d info\n" % a_num
return True, msg
def get_or_delete_A_data(a_num=None, username=None, delete=False):
"回答データをデータベースから、削除or取り出し"
ret, q, root = get_A_data(a_num=a_num, username=username)
if not ret:
return False, q # q==msg
if q is None:
return ret, []
result = []
if a_num is None: # a_num==Noneのとき、データが複数個になる
q2 = q
else:
q2 = [q]
if delete:
get_or_delete_A_info(a_num=a_num, username=username, delete=True)
for i in q2:
result.append("DELETE A%d" % i.anum)
i.key.delete()
else: # GETの場合
for i in q2:
result.append("GET A%d" % i.anum)
result.append(i.text)
return True, result
def get_user_A_all(username, html=None):
"ユーザーを指定して、回答データの一覧リストを返す"
ret, q, root = get_A_data(username=username)
if not ret:
return False, q
text = ""
for i in q:
if html:
text += '<a href="/A/%s/Q/%d">A%d</a> <a href="/A/%s/Q/%d/info">info</a><br />\n' % (username, i.anum, i.anum, username, i.anum)
else:
text += 'A%d\n' % i.anum
return True, text
def get_or_delete_A_info(a_num=None, username=None, delete=False):
"回答データの補足情報をデータベースから、削除or取り出し"
msg = ""
r, a, root = get_A_data(a_num, username)
if not r:
return False, a, None
if a_num is None:
q = a
else:
if a is None:
msg += "A%d not found" % a_num
return True, msg, []
q = [a]
results = []
num = 0
for i in q:
num += 1
if delete:
results.append({'anum': i.anum})
i.cpu_sec = None
i.mem_byte = None
i.misc_text = None
i.put()
else:
tmp = i.to_dict()
del tmp['text']
results.append( tmp )
method = 'DELETE' if delete else 'GET'
a_num2 = 0 if a_num is None else a_num
msg += "%s A%d info %d" % (method, a_num2, num)
return True, msg, results
def hashed_password(username, password, salt):
"ハッシュ化したパスワード"
tmp = salt + username.encode('utf-8') + password.encode('utf-8')
return sha256(tmp).hexdigest()
def create_user(username, password, displayname, uid, gid, salt):
"ユーザーをデータベースに登録"
hashed = hashed_password(username, password, salt)
userlist = userlist_key()
u = UserInfo( parent = userlist,
id = username,
username = username,
password = hashed,
displayname = displayname,
uid = uid,
gid = gid )
u.put()
def change_password(username, password, salt):
"パスワード変更"
info = get_userinfo(username)
if info is None:
return False
hashed = hashed_password(username, password, salt)
info.password = hashed
info.put()
return True
def get_username_list():
"ユーザー名の一覧リストをデータベースから取り出す"
#query = UserInfo.query( ancestor = userlist_key() ).order(UserInfo.uid)
query = UserInfo.query( ancestor = userlist_key() )
q = query.fetch()
res = []
for u in q:
res.append(u.username)
return res
def get_userinfo(username):
"ユーザー情報をデータベースから取り出す"
key = ndb.Key(UserInfo, username, parent=userlist_key())
info = key.get()
return info
def delete_user(username):
"ユーザーをデータベースから削除"
userinfo = get_userinfo(username)
if userinfo is None:
return 0
else:
userinfo.key.delete()
return 1
return n
def Q_check(qtext):
"問題ファイルの妥当性チェックを行う"
hr = '-'*40 + "\n"
(size, line_num, line_mat, msg, ok) = numberlink.read_input_data(qtext)
if ok:
q = numberlink.generate_Q_data(size, line_num, line_mat)
out = "OK\n" + hr + q + hr
else:
out = "NG\n" + hr + qtext + hr + msg
return out, ok
def calc_score_all():
"スコア計算"
authors = get_Q_author_all()
#print "authors=", authors
q_factors = {}
q_point = {}
ok_point = {}
bonus_point = {}
result = {}
misc = {}
query = Answer.query(ancestor=userlist_key())
q = query.fetch()
all_numbers = {}
all_users = {}
for i in q:
#anum = 'A%d' % i.anum
anum = 'A%02d' % i.anum
username = i.owner
all_numbers[anum] = 1
all_users[username] = 1
# 正解ポイント
if not(anum in ok_point):
ok_point[anum] = {}
ok_point[anum][username] = i.judge
# 品質ポイント
if not(anum in q_factors):
q_factors[anum] = {}
q_factors[anum][username] = i.q_factor
# 出題ボーナスポイント
if i.judge in (0,1) and authors[i.anum] == username:
#print "check_bonus:", i.anum, i.judge, authors[i.anum], username
if not(anum in bonus_point):
bonus_point[anum] = {}
bonus_point[anum][username] = i.judge
# result(ログメッセージ)
if not(anum in result):
result[anum] = {}
result[anum][username] = i.result
# (その他) date, cpu_sec, mem_byte, misc_text
if not(anum in misc):
misc[anum] = {}
misc[anum][username] = [i.date, i.cpu_sec, i.mem_byte, i.misc_text]
#print "ok_point=", ok_point
#print "bonus_point=", bonus_point
#print "q_factors=", q_factors
#print "result=\n", result
# 品質ポイントを計算する
q_pt = 10.0
for anum, values in q_factors.iteritems(): # 問題番号ごとに
#print "anum=", anum
qf_total = 0.0 # Q_factorの合計
for user, qf in values.iteritems():
#print "qf=", qf
qf_total += qf
#print "qf_total=", qf_total
for user, qf in values.iteritems():
if qf_total == 0.0:
tmp = 0.0
else:
tmp = q_pt * qf / qf_total
if not anum in q_point:
q_point[anum] = {}
q_point[anum][user] = tmp
#print "q_point=", q_point
# 集計する
tmp = ['']*(len(all_numbers) + 1)
i = 0
for anum in sorted(all_numbers.keys()):
tmp[i] = anum
i += 1
tmp[i] = 'TOTAL'
score_board = {'/header/': tmp} # 見出しの行
for user in sorted(all_users.keys()):
#print user
if not(user in score_board):
score_board[user] = [0]*(len(all_numbers) + 1)
i = 0
ptotal = 0.0
for anum in sorted(all_numbers.keys()):
#print anum
p = 0.0
if user in ok_point[anum]: p += ok_point[anum][user]
if user in q_point[anum]: p += q_point[anum][user]
if anum in bonus_point and user in bonus_point[anum]:
p += bonus_point[anum][user]
#print "p=", p
score_board[user][i] = p
ptotal += p
i += 1
score_board[user][i] = ptotal
#print "score_board=", score_board
return score_board, ok_point, q_point, bonus_point, q_factors, result, misc
def html_score_board(score_board):
hd_key = '/header/'
out = '<table border=1>\n'
line = '<tr><th>-</th>'
for hd in score_board[hd_key]:
line += '<th>%s</th>' % hd
line += '</tr>\n'
out += line
for user in sorted(score_board.keys()):
if user == hd_key: continue
line = '<tr><th>%s</th>' % user
for val in score_board[user]:
line += '<td>%1.1f</td>' % val
line += '</tr>\n'
out += line
out += '</table>\n'
#print "out=\n", out
return out
| dasadc/conmgr | adc2018/server/adcutil.py | Python | bsd-3-clause | 23,866 |
from constance.admin import ConstanceForm
from django.forms import fields
from django.test import TestCase
class TestForm(TestCase):
def test_form_field_types(self):
f = ConstanceForm({})
self.assertIsInstance(f.fields['INT_VALUE'], fields.IntegerField)
self.assertIsInstance(f.fields['BOOL_VALUE'], fields.BooleanField)
self.assertIsInstance(f.fields['STRING_VALUE'], fields.CharField)
self.assertIsInstance(f.fields['DECIMAL_VALUE'], fields.DecimalField)
self.assertIsInstance(f.fields['DATETIME_VALUE'], fields.SplitDateTimeField)
self.assertIsInstance(f.fields['TIMEDELTA_VALUE'], fields.DurationField)
self.assertIsInstance(f.fields['FLOAT_VALUE'], fields.FloatField)
self.assertIsInstance(f.fields['DATE_VALUE'], fields.DateField)
self.assertIsInstance(f.fields['TIME_VALUE'], fields.TimeField)
# from CONSTANCE_ADDITIONAL_FIELDS
self.assertIsInstance(f.fields['CHOICE_VALUE'], fields.ChoiceField)
self.assertIsInstance(f.fields['EMAIL_VALUE'], fields.EmailField)
| jazzband/django-constance | tests/test_form.py | Python | bsd-3-clause | 1,084 |
from pybrain.rl.environments.timeseries.maximizereturntask import DifferentialSharpeRatioTask
from pybrain.rl.environments.timeseries.timeseries import AR1Environment, SnPEnvironment
from pybrain.rl.learners.valuebased.linearfa import Q_LinFA
from pybrain.rl.agents.linearfa import LinearFA_Agent
from pybrain.rl.experiments import ContinuousExperiment
from matplotlib import pyplot
"""
This script aims to create a trading model that trades on a simple AR(1) process
"""
env=AR1Environment(2000)
task=DifferentialSharpeRatioTask(env)
learner = Q_LinFA(2,1)
agent = LinearFA_Agent(learner)
exp = ContinuousExperiment(task,agent)
from decimal import Decimal
ts=env.ts.tolist()
exp.doInteractionsAndLearn(1999)
actionHist=env.actionHistory
pyplot.plot(ts[0])
pyplot.plot(actionHist)
pyplot.show()
#snp_rets=env.importSnP().tolist()[0]
#print(snp_rets.tolist()[0])
#pyplot.plot(snp_rets)
#pyplot.show()
#cumret= cumsum(multiply(ts,actionHist))
#exp.doInteractions(200)
| samstern/MSc-Project | pybrain/rl/environments/timeseries/test programs/ar1TestScript.py | Python | bsd-3-clause | 976 |
from datetime import datetime, timedelta
from django import http
from django.conf import settings
from django.core.exceptions import PermissionDenied
import mock
import pytest
from olympia.amo.tests import BaseTestCase, TestCase
from olympia.amo import decorators, get_user, set_user
from olympia.amo.urlresolvers import reverse
from olympia.users.models import UserProfile
pytestmark = pytest.mark.django_db
def test_post_required():
def func(request):
return mock.sentinel.response
g = decorators.post_required(func)
request = mock.Mock()
request.method = 'GET'
assert isinstance(g(request), http.HttpResponseNotAllowed)
request.method = 'POST'
assert g(request) == mock.sentinel.response
def test_json_view():
"""Turns a Python object into a response."""
def func(request):
return {'x': 1}
response = decorators.json_view(func)(mock.Mock())
assert isinstance(response, http.HttpResponse)
assert response.content == '{"x": 1}'
assert response['Content-Type'] == 'application/json'
assert response.status_code == 200
def test_json_view_normal_response():
"""Normal responses get passed through."""
expected = http.HttpResponseForbidden()
def func(request):
return expected
response = decorators.json_view(func)(mock.Mock())
assert expected is response
assert response['Content-Type'] == 'text/html; charset=utf-8'
def test_json_view_error():
"""json_view.error returns 400 responses."""
response = decorators.json_view.error({'msg': 'error'})
assert isinstance(response, http.HttpResponseBadRequest)
assert response.content == '{"msg": "error"}'
assert response['Content-Type'] == 'application/json'
def test_json_view_status():
def func(request):
return {'x': 1}
response = decorators.json_view(func, status_code=202)(mock.Mock())
assert response.status_code == 202
def test_json_view_response_status():
response = decorators.json_response({'msg': 'error'}, status_code=202)
assert response.content == '{"msg": "error"}'
assert response['Content-Type'] == 'application/json'
assert response.status_code == 202
class TestTaskUser(TestCase):
fixtures = ['base/users']
def test_set_task_user(self):
@decorators.set_task_user
def some_func():
return get_user()
set_user(UserProfile.objects.get(username='regularuser'))
assert get_user().pk == 999
assert some_func().pk == int(settings.TASK_USER_ID)
assert get_user().pk == 999
class TestLoginRequired(BaseTestCase):
def setUp(self):
super(TestLoginRequired, self).setUp()
self.f = mock.Mock()
self.f.__name__ = 'function'
self.request = mock.Mock()
self.request.user.is_authenticated.return_value = False
self.request.get_full_path.return_value = 'path'
def test_normal(self):
func = decorators.login_required(self.f)
response = func(self.request)
assert not self.f.called
assert response.status_code == 302
assert response['Location'] == (
'%s?to=%s' % (reverse('users.login'), 'path'))
def test_no_redirect(self):
func = decorators.login_required(self.f, redirect=False)
response = func(self.request)
assert not self.f.called
assert response.status_code == 401
def test_decorator_syntax(self):
# @login_required(redirect=False)
func = decorators.login_required(redirect=False)(self.f)
response = func(self.request)
assert not self.f.called
assert response.status_code == 401
def test_no_redirect_success(self):
func = decorators.login_required(redirect=False)(self.f)
self.request.user.is_authenticated.return_value = True
func(self.request)
assert self.f.called
class TestSetModifiedOn(TestCase):
fixtures = ['base/users']
@decorators.set_modified_on
def some_method(self, worked):
return worked
def test_set_modified_on(self):
users = list(UserProfile.objects.all()[:3])
self.some_method(True, set_modified_on=users)
for user in users:
assert UserProfile.objects.get(pk=user.pk).modified.date() == (
datetime.today().date())
def test_not_set_modified_on(self):
yesterday = datetime.today() - timedelta(days=1)
qs = UserProfile.objects.all()
qs.update(modified=yesterday)
users = list(qs[:3])
self.some_method(False, set_modified_on=users)
for user in users:
date = UserProfile.objects.get(pk=user.pk).modified.date()
assert date < datetime.today().date()
class TestPermissionRequired(TestCase):
def setUp(self):
super(TestPermissionRequired, self).setUp()
self.f = mock.Mock()
self.f.__name__ = 'function'
self.request = mock.Mock()
@mock.patch('olympia.access.acl.action_allowed')
def test_permission_not_allowed(self, action_allowed):
action_allowed.return_value = False
func = decorators.permission_required('', '')(self.f)
with self.assertRaises(PermissionDenied):
func(self.request)
@mock.patch('olympia.access.acl.action_allowed')
def test_permission_allowed(self, action_allowed):
action_allowed.return_value = True
func = decorators.permission_required('', '')(self.f)
func(self.request)
assert self.f.called
@mock.patch('olympia.access.acl.action_allowed')
def test_permission_allowed_correctly(self, action_allowed):
func = decorators.permission_required('Admin', '%')(self.f)
func(self.request)
action_allowed.assert_called_with(self.request, 'Admin', '%')
| andymckay/addons-server | src/olympia/amo/tests/test_decorators.py | Python | bsd-3-clause | 5,822 |
from django.core.management.base import BaseCommand
import amo
from mkt.webapps.models import AddonPremium
class Command(BaseCommand):
help = 'Clean up existing AddonPremium objects for free apps.'
def handle(self, *args, **options):
(AddonPremium.objects.filter(addon__premium_type__in=amo.ADDON_FREES)
.delete())
| andymckay/zamboni | mkt/developers/management/commands/cleanup_addon_premium.py | Python | bsd-3-clause | 364 |
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-LockWorkStation',
'Author': ['@harmj0y'],
'Description': ("Locks the workstation's display."),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'http://poshcode.org/1640'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
script = """
Function Invoke-LockWorkStation {
# region define P/Invoke types dynamically
# stolen from PowerSploit https://github.com/mattifestation/PowerSploit/blob/master/Mayhem/Mayhem.psm1
# thanks matt and chris :)
$DynAssembly = New-Object System.Reflection.AssemblyName('Win32')
$AssemblyBuilder = [AppDomain]::CurrentDomain.DefineDynamicAssembly($DynAssembly, [Reflection.Emit.AssemblyBuilderAccess]::Run)
$ModuleBuilder = $AssemblyBuilder.DefineDynamicModule('Win32', $False)
$TypeBuilder = $ModuleBuilder.DefineType('Win32.User32', 'Public, Class')
$DllImportConstructor = [Runtime.InteropServices.DllImportAttribute].GetConstructor(@([String]))
$SetLastError = [Runtime.InteropServices.DllImportAttribute].GetField('SetLastError')
$SetLastErrorCustomAttribute = New-Object Reflection.Emit.CustomAttributeBuilder($DllImportConstructor,
@('User32.dll'),
[Reflection.FieldInfo[]]@($SetLastError),
@($True))
# Define [Win32.User32]::LockWorkStation()
$PInvokeMethod = $TypeBuilder.DefinePInvokeMethod('LockWorkStation',
'User32.dll',
([Reflection.MethodAttributes]::Public -bor [Reflection.MethodAttributes]::Static),
[Reflection.CallingConventions]::Standard,
[Bool],
[Type[]]@(),
[Runtime.InteropServices.CallingConvention]::Winapi,
[Runtime.InteropServices.CharSet]::Ansi)
$PInvokeMethod.SetCustomAttribute($SetLastErrorCustomAttribute)
$User32 = $TypeBuilder.CreateType()
$Null = $User32::LockWorkStation()
}
Invoke-LockWorkStation; "Workstation locked."
"""
return script
| Hackplayers/Empire-mod-Hpys-tests | lib/modules/powershell/management/lock.py | Python | bsd-3-clause | 3,056 |
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
""" This python module defines Connection class.
"""
import copy
from vistrails.db.domain import DBConnection
from vistrails.core.vistrail.port import PortEndPoint, Port
import unittest
from vistrails.db.domain import IdScope
################################################################################
class Connection(DBConnection):
""" A Connection is a connection between two modules.
Right now there's only Module connections.
"""
##########################################################################
# Constructors and copy
@staticmethod
def from_port_specs(source, dest):
"""from_port_specs(source: PortSpec, dest: PortSpec) -> Connection
Static method that creates a Connection given source and
destination ports.
"""
conn = Connection()
conn.source = copy.copy(source)
conn.destination = copy.copy(dest)
return conn
@staticmethod
def fromID(id):
"""fromTypeID(id: int) -> Connection
Static method that creates a Connection given an id.
"""
conn = Connection()
conn.id = id
conn.source.endPoint = PortEndPoint.Source
conn.destination.endPoint = PortEndPoint.Destination
return conn
def __init__(self, *args, **kwargs):
"""__init__() -> Connection
Initializes source and destination ports.
"""
DBConnection.__init__(self, *args, **kwargs)
if self.id is None:
self.db_id = -1
if not len(self.ports) > 0:
self.source = Port(type='source')
self.destination = Port(type='destination')
def __copy__(self):
"""__copy__() -> Connection - Returns a clone of self.
"""
return Connection.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBConnection.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = Connection
for port in cp.ports:
Port.convert(port)
return cp
##########################################################################
@staticmethod
def convert(_connection):
# print "ports: %s" % _Connection._get_ports(_connection)
if _connection.__class__ == Connection:
return
_connection.__class__ = Connection
for port in _connection.ports:
Port.convert(port)
##########################################################################
# Properties
id = DBConnection.db_id
ports = DBConnection.db_ports
def add_port(self, port):
self.db_add_port(port)
def _get_sourceId(self):
""" _get_sourceId() -> int
Returns the module id of source port. Do not use this function,
use sourceId property: c.sourceId
"""
return self.source.moduleId
def _set_sourceId(self, id):
""" _set_sourceId(id : int) -> None
Sets this connection source id. It updates both self.source.moduleId
and self.source.id. Do not use this function, use sourceId
property: c.sourceId = id
"""
self.source.moduleId = id
self.source.id = id
sourceId = property(_get_sourceId, _set_sourceId)
def _get_destinationId(self):
""" _get_destinationId() -> int
Returns the module id of dest port. Do not use this function,
use sourceId property: c.destinationId
"""
return self.destination.moduleId
def _set_destinationId(self, id):
""" _set_destinationId(id : int) -> None
Sets this connection destination id. It updates self.dest.moduleId.
Do not use this function, use destinationId property:
c.destinationId = id
"""
self.destination.moduleId = id
destinationId = property(_get_destinationId, _set_destinationId)
def _get_source(self):
"""_get_source() -> Port
Returns source port. Do not use this function, use source property:
c.source
"""
try:
return self.db_get_port_by_type('source')
except KeyError:
pass
return None
def _set_source(self, source):
"""_set_source(source: Port) -> None
Sets this connection source port. Do not use this function,
use source property instead: c.source = source
"""
try:
port = self.db_get_port_by_type('source')
self.db_delete_port(port)
except KeyError:
pass
if source is not None:
self.db_add_port(source)
source = property(_get_source, _set_source)
def _get_destination(self):
"""_get_destination() -> Port
Returns destination port. Do not use this function, use destination
property: c.destination
"""
# return self.db_ports['destination']
try:
return self.db_get_port_by_type('destination')
except KeyError:
pass
return None
def _set_destination(self, dest):
"""_set_destination(dest: Port) -> None
Sets this connection destination port. Do not use this
function, use destination property instead: c.destination = dest
"""
try:
port = self.db_get_port_by_type('destination')
self.db_delete_port(port)
except KeyError:
pass
if dest is not None:
self.db_add_port(dest)
destination = property(_get_destination, _set_destination)
dest = property(_get_destination, _set_destination)
##########################################################################
# Operators
def __str__(self):
"""__str__() -> str - Returns a string representation of a Connection
object.
"""
rep = "<connection id='%s'>%s%s</connection>"
return rep % (str(self.id), str(self.source), str(self.destination))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
if type(other) != type(self):
return False
return (self.source == other.source and
self.dest == other.dest)
def equals_no_id(self, other):
"""Checks equality up to ids (connection and ports)."""
if type(self) != type(other):
return False
return (self.source.equals_no_id(other.source) and
self.dest.equals_no_id(other.dest))
################################################################################
# Testing
class TestConnection(unittest.TestCase):
def create_connection(self, id_scope=IdScope()):
from vistrails.core.vistrail.port import Port
from vistrails.core.modules.basic_modules import identifier as basic_pkg
source = Port(id=id_scope.getNewId(Port.vtType),
type='source',
moduleId=21L,
moduleName='String',
name='value',
signature='(%s:String)' % basic_pkg)
destination = Port(id=id_scope.getNewId(Port.vtType),
type='destination',
moduleId=20L,
moduleName='Float',
name='value',
signature='(%s:Float)' % basic_pkg)
connection = Connection(id=id_scope.getNewId(Connection.vtType),
ports=[source, destination])
return connection
def test_copy(self):
id_scope = IdScope()
c1 = self.create_connection(id_scope)
c2 = copy.copy(c1)
self.assertEquals(c1, c2)
self.assertEquals(c1.id, c2.id)
c3 = c1.do_copy(True, id_scope, {})
self.assertEquals(c1, c3)
self.assertNotEquals(c1.id, c3.id)
def test_serialization(self):
import vistrails.core.db.io
c1 = self.create_connection()
xml_str = vistrails.core.db.io.serialize(c1)
c2 = vistrails.core.db.io.unserialize(xml_str, Connection)
self.assertEquals(c1, c2)
self.assertEquals(c1.id, c2.id)
def testEmptyConnection(self):
"""Tests sane initialization of empty connection"""
c = Connection()
self.assertEquals(c.source.endPoint, PortEndPoint.Source)
self.assertEquals(c.destination.endPoint, PortEndPoint.Destination)
if __name__ == '__main__':
unittest.main()
| VisTrails/VisTrails | vistrails/core/vistrail/connection.py | Python | bsd-3-clause | 10,497 |
from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from nose import SkipTest
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the sytem
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
#check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones((1))
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones((50))
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange((49))
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange((47))[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange((47)) % 7
tgt = np.sort(np.arange((47)) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i,:], np.array([i] * d1.shape[0],
dtype=dt))
# array_less does not seem to work right
at((p[:i,:] <= p[i,:]).all(),
msg="%d: %r <= %r" % (i, p[i,:], p[:i,:]))
at((p[i + 1:,:] > p[i,:]).all(),
msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None,:]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# General case of possible ravel that is not contiguous but
# works and includes a 1-sized axis with non matching stride
a = a.swapaxes(-1, -2) # swap back to C-order
assert_(np.may_share_memory(a.ravel(order='C'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
a = a.T # swap all to Fortran order
assert_(np.may_share_memory(a.ravel(order='F'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
# Test negative strides:
a = np.arange(4)[::-1].reshape(2, 2)
assert_(np.may_share_memory(a.ravel(order='C'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# Test keeporder with weirdly strided 1-sized dims (1-d first stride)
a = np.arange(8)[::2].reshape(1, 2, 2, 1) # neither C, nor F order
strides = list(a.strides)
strides[0] = -12
strides[-1] = 0
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('K'), a.ravel('C'))
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
#Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
r = func(*inputs, **kw)
if 'out' in kw:
return r
else:
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.0,3.51,4.0')
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(IndexError, b[0].__setitem__, fnn, 1)
assert_raises(IndexError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
#for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert memoryview(c).strides == (800, 80, 8)
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert memoryview(fortran).strides == (8, 80, 800)
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
#All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
from numpy.core._internal import _view_is_safe
class TestObjViewSafetyFuncs(TestCase):
def test_view_safety(self):
psize = np.dtype('p').itemsize
# creates dtype but with extra character code - for missing 'p' fields
def mtype(s):
n, offset, fields = 0, 0, []
for c in s.split(','): # subarrays won't work
if c != '-':
fields.append(('f{0}'.format(n), c, offset))
n += 1
offset += np.dtype(c).itemsize if c != '-' else psize
names, formats, offsets = zip(*fields)
return np.dtype({'names': names, 'formats': formats,
'offsets': offsets, 'itemsize': offset})
# test nonequal itemsizes with objects:
# these should succeed:
_view_is_safe(np.dtype('O,p,O,p'), np.dtype('O,p,O,p,O,p'))
_view_is_safe(np.dtype('O,O'), np.dtype('O,O,O'))
# these should fail:
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('O,O'))
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('O,p'))
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('p,O'))
# test nonequal itemsizes with missing fields:
# these should succeed:
_view_is_safe(mtype('-,p,-,p'), mtype('-,p,-,p,-,p'))
_view_is_safe(np.dtype('p,p'), np.dtype('p,p,p'))
# these should fail:
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,p'))
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,-'))
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('-,p'))
# scans through positions at which we can view a type
def scanView(d1, otype):
goodpos = []
for shift in range(d1.itemsize - np.dtype(otype).itemsize+1):
d2 = np.dtype({'names': ['f0'], 'formats': [otype],
'offsets': [shift], 'itemsize': d1.itemsize})
try:
_view_is_safe(d1, d2)
except TypeError:
pass
else:
goodpos.append(shift)
return goodpos
# test partial overlap with object field
assert_equal(scanView(np.dtype('p,O,p,p,O,O'), 'p'),
[0] + list(range(2*psize, 3*psize+1)))
assert_equal(scanView(np.dtype('p,O,p,p,O,O'), 'O'),
[psize, 4*psize, 5*psize])
# test partial overlap with missing field
assert_equal(scanView(mtype('p,-,p,p,-,-'), 'p'),
[0] + list(range(2*psize, 3*psize+1)))
# test nested structures with objects:
nestedO = np.dtype([('f0', 'p'), ('f1', 'p,O,p')])
assert_equal(scanView(nestedO, 'p'), list(range(psize+1)) + [3*psize])
assert_equal(scanView(nestedO, 'O'), [2*psize])
# test nested structures with missing fields:
nestedM = np.dtype([('f0', 'p'), ('f1', mtype('p,-,p'))])
assert_equal(scanView(nestedM, 'p'), list(range(psize+1)) + [3*psize])
# test subarrays with objects
subarrayO = np.dtype('p,(2,3)O,p')
assert_equal(scanView(subarrayO, 'p'), [0, 7*psize])
assert_equal(scanView(subarrayO, 'O'),
list(range(psize, 6*psize+1, psize)))
#test dtype with overlapping fields
overlapped = np.dtype({'names': ['f0', 'f1', 'f2', 'f3'],
'formats': ['p', 'p', 'p', 'p'],
'offsets': [0, 1, 3*psize-1, 3*psize],
'itemsize': 4*psize})
assert_equal(scanView(overlapped, 'p'), [0, 1, 3*psize-1, 3*psize])
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
if __name__ == "__main__":
run_module_suite()
| MichaelAquilina/numpy | numpy/core/tests/test_multiarray.py | Python | bsd-3-clause | 220,691 |
import urllib
from canvas import util
def make_cookie_key(key):
return 'after_signup_' + str(key)
def _get(request, key):
key = make_cookie_key(key)
val = request.COOKIES.get(key)
if val is not None:
val = util.loads(urllib.unquote(val))
return (key, val,)
def get_posted_comment(request):
'''
Gets a comment waiting to be posted, if one exists.
Returns a pair containing the cookie key used to retrieve it and its deserialized JSON.
'''
#TODO use dcramer's django-cookies so that we don't rely on having the response object to mutate cookies.
# That would make this API much cleaner and isolated.
return _get(request, 'post_comment')
| canvasnetworks/canvas | website/canvas/after_signup.py | Python | bsd-3-clause | 698 |
import numpy as np
from Coupling import Coupling
class Coupling2DCavities2D(Coupling):
"""
Coupling for cavity2D to cavity transmission.
"""
@property
def impedance_from(self):
"""
Choses the right impedance of subsystem_from.
Applies boundary conditions correction as well.
"""
return self.subsystem_from.impedance
@property
def impedance_to(self):
"""
Choses the right impedance of subsystem_from.
Applies boundary conditions correction as well.
"""
return self.subsystem_to.impedance
@property
def tau(self):
"""
Transmission coefficient.
"""
return np.zeros(self.frequency.amount)
@property
def clf(self):
"""
Coupling loss factor for transmission from a 2D cavity to a cavity.
.. math:: \\eta_{12} = \\frac{ \\tau_{12}}{4 \\pi}
See BAC, equation 3.14
"""
return self.tau / (4.0 * np.pi) | FRidh/Sea | Sea/model/couplings/Coupling2DCavities2D.py | Python | bsd-3-clause | 1,049 |
''' The `Filter` hierarchy contains Transformer classes that take a `Stim`
of one type as input and return a `Stim` of the same type as output (but with
some changes to its data).
'''
from .audio import (AudioTrimmingFilter,
AudioResamplingFilter)
from .base import TemporalTrimmingFilter
from .image import (ImageCroppingFilter,
ImageResizingFilter,
PillowImageFilter)
from .text import (WordStemmingFilter,
TokenizingFilter,
TokenRemovalFilter,
PunctuationRemovalFilter,
LowerCasingFilter)
from .video import (FrameSamplingFilter,
VideoTrimmingFilter)
__all__ = [
'AudioTrimmingFilter',
'AudioResamplingFilter',
'TemporalTrimmingFilter',
'ImageCroppingFilter',
'ImageResizingFilter',
'PillowImageFilter',
'WordStemmingFilter',
'TokenizingFilter',
'TokenRemovalFilter',
'PunctuationRemovalFilter',
'LowerCasingFilter',
'FrameSamplingFilter',
'VideoTrimmingFilter'
]
| tyarkoni/pliers | pliers/filters/__init__.py | Python | bsd-3-clause | 1,079 |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='reddit_gold',
description='reddit gold',
version='0.1',
author='Chad Birch',
author_email='[email protected]',
packages=find_packages(),
install_requires=[
'r2',
],
entry_points={
'r2.plugin':
['gold = reddit_gold:Gold']
},
include_package_data=True,
zip_safe=False,
)
| madbook/reddit-plugin-gold | setup.py | Python | bsd-3-clause | 418 |
# -*- coding: utf-8 -*-
from django.contrib import admin
from ionyweb.plugin_app.plugin_video.models import Plugin_Video
admin.site.register(Plugin_Video)
| makinacorpus/ionyweb | ionyweb/plugin_app/plugin_video/admin.py | Python | bsd-3-clause | 157 |
from __future__ import unicode_literals
__all__ = (
'Key',
'Keys',
)
class Key(object):
def __init__(self, name):
#: Descriptive way of writing keys in configuration files. e.g. <C-A>
#: for ``Control-A``.
self.name = name
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.name)
class Keys(object):
Escape = Key('<Escape>')
ControlA = Key('<C-A>')
ControlB = Key('<C-B>')
ControlC = Key('<C-C>')
ControlD = Key('<C-D>')
ControlE = Key('<C-E>')
ControlF = Key('<C-F>')
ControlG = Key('<C-G>')
ControlH = Key('<C-H>')
ControlI = Key('<C-I>') # Tab
ControlJ = Key('<C-J>') # Enter
ControlK = Key('<C-K>')
ControlL = Key('<C-L>')
ControlM = Key('<C-M>') # Enter
ControlN = Key('<C-N>')
ControlO = Key('<C-O>')
ControlP = Key('<C-P>')
ControlQ = Key('<C-Q>')
ControlR = Key('<C-R>')
ControlS = Key('<C-S>')
ControlT = Key('<C-T>')
ControlU = Key('<C-U>')
ControlV = Key('<C-V>')
ControlW = Key('<C-W>')
ControlX = Key('<C-X>')
ControlY = Key('<C-Y>')
ControlZ = Key('<C-Z>')
ControlSpace = Key('<C-Space>')
ControlBackslash = Key('<C-Backslash>')
ControlSquareClose = Key('<C-SquareClose>')
ControlCircumflex = Key('<C-Circumflex>')
ControlUnderscore = Key('<C-Underscore>')
ControlLeft = Key('<C-Left>')
ControlRight = Key('<C-Right>')
ControlUp = Key('<C-Up>')
ControlDown = Key('<C-Down>')
Up = Key('<Up>')
Down = Key('<Down>')
Right = Key('<Right>')
Left = Key('<Left>')
Home = Key('<Home>')
End = Key('<End>')
Delete = Key('<Delete>')
ShiftDelete = Key('<ShiftDelete>')
PageUp = Key('<PageUp>')
PageDown = Key('<PageDown>')
BackTab = Key('<BackTab>') # shift + tab
Tab = ControlI
Backspace = ControlH
F1 = Key('<F1>')
F2 = Key('<F2>')
F3 = Key('<F3>')
F4 = Key('<F4>')
F5 = Key('<F5>')
F6 = Key('<F6>')
F7 = Key('<F7>')
F8 = Key('<F8>')
F9 = Key('<F9>')
F10 = Key('<F10>')
F11 = Key('<F11>')
F12 = Key('<F12>')
F13 = Key('<F13>')
F14 = Key('<F14>')
F15 = Key('<F15>')
F16 = Key('<F16>')
F17 = Key('<F17>')
F18 = Key('<F18>')
F19 = Key('<F19>')
F20 = Key('<F20>')
# Matches any key.
Any = Key('<Any>')
# Special
CPRResponse = Key('<Cursor-Position-Response>')
| jaseg/python-prompt-toolkit | prompt_toolkit/keys.py | Python | bsd-3-clause | 2,546 |
#!/usr/bin/env python
"""
================
sMRI: FreeSurfer
================
This script, smri_freesurfer.py, demonstrates the ability to call reconall on
a set of subjects and then make an average subject.
python smri_freesurfer.py
Import necessary modules from nipype.
"""
import os
import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio
from nipype.interfaces.freesurfer.preprocess import ReconAll
from nipype.interfaces.freesurfer.utils import MakeAverageSubject
subject_list = ['s1', 's3']
data_dir = os.path.abspath('data')
subjects_dir = os.path.abspath('amri_freesurfer_tutorial/subjects_dir')
wf = pe.Workflow(name="l1workflow")
wf.base_dir = os.path.abspath('amri_freesurfer_tutorial/workdir')
"""
Grab data
"""
datasource = pe.MapNode(interface=nio.DataGrabber(infields=['subject_id'],
outfields=['struct']),
name='datasource',
iterfield=['subject_id'])
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '%s/%s.nii'
datasource.inputs.template_args = dict(struct=[['subject_id', 'struct']])
datasource.inputs.subject_id = subject_list
"""
Run recon-all
"""
recon_all = pe.MapNode(interface=ReconAll(), name='recon_all',
iterfield=['subject_id', 'T1_files'])
recon_all.inputs.subject_id = subject_list
if not os.path.exists(subjects_dir):
os.mkdir(subjects_dir)
recon_all.inputs.subjects_dir = subjects_dir
wf.connect(datasource, 'struct', recon_all, 'T1_files')
"""
Make average subject
"""
average = pe.Node(interface=MakeAverageSubject(), name="average")
average.inputs.subjects_dir = subjects_dir
wf.connect(recon_all, 'subject_id', average, 'subjects_ids')
wf.run("MultiProc", plugin_args={'n_procs': 4})
| FredLoney/nipype | examples/smri_freesurfer.py | Python | bsd-3-clause | 1,804 |
import shutil
from nose.tools import *
from holland.lib.lvm import LogicalVolume
from holland.lib.lvm.snapshot import *
from tests.constants import *
class TestSnapshot(object):
def setup(self):
self.tmpdir = tempfile.mkdtemp()
def teardown(self):
shutil.rmtree(self.tmpdir)
def test_snapshot_fsm(self):
lv = LogicalVolume.lookup('%s/%s' % (TEST_VG, TEST_LV))
name = lv.lv_name + '_snapshot'
size = 1 # extent
snapshot = Snapshot(name, size, self.tmpdir)
snapshot.start(lv)
def test_snapshot_fsm_with_callbacks(self):
lv = LogicalVolume.lookup('%s/%s' % (TEST_VG, TEST_LV))
name = lv.lv_name + '_snapshot'
size = 1 # extent
snapshot = Snapshot(name, size, self.tmpdir)
def handle_event(event, *args, **kwargs):
pass
snapshot.register('pre-mount', handle_event)
snapshot.register('post-mount', handle_event)
snapshot.start(lv)
def test_snapshot_fsm_with_failures(self):
lv = LogicalVolume.lookup('%s/%s' % (TEST_VG, TEST_LV))
name = lv.lv_name + '_snapshot'
size = 1 # extent
snapshot = Snapshot(name, size, self.tmpdir)
def bad_callback(event, *args, **kwargs):
raise Exception("Oooh nooo!")
for evt in ('initialize', 'pre-snapshot', 'post-snapshot',
'pre-mount', 'post-mount', 'pre-unmount', 'post-unmount',
'pre-remove', 'post-remove', 'finish'):
snapshot.register(evt, bad_callback)
assert_raises(CallbackFailuresError, snapshot.start, lv)
snapshot.unregister(evt, bad_callback)
if snapshot.sigmgr._handlers:
raise Exception("WTF. sigmgr handlers still exist when checking event => %r", evt)
| m00dawg/holland | plugins/holland.lib.lvm/tests/xfs/test_snapshot.py | Python | bsd-3-clause | 1,824 |
from django.apps import AppConfig
class ContentStoreAppConfig(AppConfig):
name = "contentstore"
def ready(self):
import contentstore.signals
contentstore.signals
| praekelt/seed-stage-based-messaging | contentstore/apps.py | Python | bsd-3-clause | 190 |
# -*- coding: utf-8 -*-
"""Git tools."""
from shlex import split
from plumbum import ProcessExecutionError
from plumbum.cmd import git
DEVELOPMENT_BRANCH = "develop"
def run_git(*args, dry_run=False, quiet=False):
"""Run a git command, print it before executing and capture the output."""
command = git[split(" ".join(args))]
if not quiet:
print("{}{}".format("[DRY-RUN] " if dry_run else "", command))
if dry_run:
return ""
rv = command()
if not quiet and rv:
print(rv)
return rv
def branch_exists(branch):
"""Return True if the branch exists."""
try:
run_git("rev-parse --verify {}".format(branch), quiet=True)
return True
except ProcessExecutionError:
return False
def get_current_branch():
"""Get the current branch name."""
return run_git("rev-parse --abbrev-ref HEAD", quiet=True).strip()
| andreoliw/clitoolkit | clit/git.py | Python | bsd-3-clause | 899 |
from __future__ import unicode_literals
from .atomicparsley import AtomicParsleyPP
from .ffmpeg import (
FFmpegPostProcessor,
FFmpegAudioFixPP,
FFmpegEmbedSubtitlePP,
FFmpegExtractAudioPP,
FFmpegFixupStretchedPP,
FFmpegMergerPP,
FFmpegMetadataPP,
FFmpegVideoConvertorPP,
)
from .xattrpp import XAttrMetadataPP
from .execafterdownload import ExecAfterDownloadPP
def get_postprocessor(key):
return globals()[key + 'PP']
__all__ = [
'AtomicParsleyPP',
'ExecAfterDownloadPP',
'FFmpegAudioFixPP',
'FFmpegEmbedSubtitlePP',
'FFmpegExtractAudioPP',
'FFmpegFixupStretchedPP',
'FFmpegMergerPP',
'FFmpegMetadataPP',
'FFmpegPostProcessor',
'FFmpegVideoConvertorPP',
'XAttrMetadataPP',
]
| janusnic/youtube-dl-GUI | youtube_dl/postprocessor/__init__.py | Python | mit | 760 |
__author__ = 'Nishanth'
from juliabox.cloud import JBPluginCloud
from juliabox.jbox_util import JBoxCfg, retry_on_errors
from googleapiclient.discovery import build
from oauth2client.client import GoogleCredentials
import threading
class JBoxGCD(JBPluginCloud):
provides = [JBPluginCloud.JBP_DNS, JBPluginCloud.JBP_DNS_GCD]
threadlocal = threading.local()
INSTALLID = None
REGION = None
DOMAIN = None
@staticmethod
def configure():
cloud_host = JBoxCfg.get('cloud_host')
JBoxGCD.INSTALLID = cloud_host['install_id']
JBoxGCD.REGION = cloud_host['region']
JBoxGCD.DOMAIN = cloud_host['domain']
@staticmethod
def domain():
if JBoxGCD.DOMAIN is None:
JBoxGCD.configure()
return JBoxGCD.DOMAIN
@staticmethod
def connect():
c = getattr(JBoxGCD.threadlocal, 'conn', None)
if c is None:
JBoxGCD.configure()
creds = GoogleCredentials.get_application_default()
JBoxGCD.threadlocal.conn = c = build("dns", "v1", credentials=creds)
return c
@staticmethod
@retry_on_errors(retries=2)
def add_cname(name, value):
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'additions': [
{'rrdatas': [value],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': 300} ] }).execute()
@staticmethod
@retry_on_errors(retries=2)
def delete_cname(name):
resp = JBoxGCD.connect().resourceRecordSets().list(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
name=name, type='A').execute()
if len(resp['rrsets']) == 0:
JBoxGCD.log_debug('No prior dns registration found for %s', name)
else:
cname = resp['rrsets'][0]['rrdatas'][0]
ttl = resp['rrsets'][0]['ttl']
JBoxGCD.connect().changes().create(
project=JBoxGCD.INSTALLID, managedZone=JBoxGCD.REGION,
body={'kind': 'dns#change',
'deletions': [
{'rrdatas': [str(cname)],
'kind': 'dns#resourceRecordSet',
'type': 'A',
'name': name,
'ttl': ttl} ] }).execute()
JBoxGCD.log_warn('Prior dns registration was found for %s', name)
| JuliaLang/JuliaBox | engine/src/juliabox/plugins/dns_gcd/impl_gcd.py | Python | mit | 2,592 |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from datetime import timedelta
from flask import flash, redirect, request, session
from indico.core.db import db
from indico.modules.admin import RHAdminBase
from indico.modules.news import logger, news_settings
from indico.modules.news.forms import NewsForm, NewsSettingsForm
from indico.modules.news.models.news import NewsItem
from indico.modules.news.util import get_recent_news
from indico.modules.news.views import WPManageNews, WPNews
from indico.util.date_time import now_utc
from indico.util.i18n import _
from indico.web.flask.util import url_for
from indico.web.forms.base import FormDefaults
from indico.web.rh import RH
from indico.web.util import jsonify_data, jsonify_form
class RHNews(RH):
@staticmethod
def _is_new(item):
days = news_settings.get('new_days')
if not days:
return False
return item.created_dt.date() >= (now_utc() - timedelta(days=days)).date()
def _process(self):
news = NewsItem.query.order_by(NewsItem.created_dt.desc()).all()
return WPNews.render_template('news.html', news=news, _is_new=self._is_new)
class RHNewsItem(RH):
normalize_url_spec = {
'locators': {
lambda self: self.item.locator.slugged
}
}
def _process_args(self):
self.item = NewsItem.get_or_404(request.view_args['news_id'])
def _process(self):
return WPNews.render_template('news_item.html', item=self.item)
class RHManageNewsBase(RHAdminBase):
pass
class RHManageNews(RHManageNewsBase):
def _process(self):
news = NewsItem.query.order_by(NewsItem.created_dt.desc()).all()
return WPManageNews.render_template('admin/news.html', 'news', news=news)
class RHNewsSettings(RHManageNewsBase):
def _process(self):
form = NewsSettingsForm(obj=FormDefaults(**news_settings.get_all()))
if form.validate_on_submit():
news_settings.set_multi(form.data)
get_recent_news.clear_cached()
flash(_('Settings have been saved'), 'success')
return jsonify_data()
return jsonify_form(form)
class RHCreateNews(RHManageNewsBase):
def _process(self):
form = NewsForm()
if form.validate_on_submit():
item = NewsItem()
form.populate_obj(item)
db.session.add(item)
db.session.flush()
get_recent_news.clear_cached()
logger.info('News %r created by %s', item, session.user)
flash(_("News '{title}' has been posted").format(title=item.title), 'success')
return jsonify_data(flash=False)
return jsonify_form(form)
class RHManageNewsItemBase(RHManageNewsBase):
def _process_args(self):
RHManageNewsBase._process_args(self)
self.item = NewsItem.get_or_404(request.view_args['news_id'])
class RHEditNews(RHManageNewsItemBase):
def _process(self):
form = NewsForm(obj=self.item)
if form.validate_on_submit():
old_title = self.item.title
form.populate_obj(self.item)
db.session.flush()
get_recent_news.clear_cached()
logger.info('News %r modified by %s', self.item, session.user)
flash(_("News '{title}' has been updated").format(title=old_title), 'success')
return jsonify_data(flash=False)
return jsonify_form(form)
class RHDeleteNews(RHManageNewsItemBase):
def _process(self):
db.session.delete(self.item)
get_recent_news.clear_cached()
flash(_("News '{title}' has been deleted").format(title=self.item.title), 'success')
logger.info('News %r deleted by %r', self.item, session.user)
return redirect(url_for('news.manage'))
| pferreir/indico | indico/modules/news/controllers.py | Python | mit | 3,954 |
#!/usr/bin/env python
import sys
def fix_terminator(tokens):
if not tokens:
return
last = tokens[-1]
if last not in ('.', '?', '!') and last.endswith('.'):
tokens[-1] = last[:-1]
tokens.append('.')
def balance_quotes(tokens):
count = tokens.count("'")
if not count:
return
processed = 0
for i, token in enumerate(tokens):
if token == "'":
if processed % 2 == 0 and (i == 0 or processed != count - 1):
tokens[i] = "`"
processed += 1
def output(tokens):
if not tokens:
return
# fix_terminator(tokens)
balance_quotes(tokens)
print ' '.join(tokens)
prev = None
for line in sys.stdin:
tokens = line.split()
if len(tokens) == 1 and tokens[0] in ('"', "'", ')', ']'):
prev.append(tokens[0])
else:
output(prev)
prev = tokens
output(prev)
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_nlu/ext/candc/src/lib/tokeniser/fixes.py | Python | mit | 820 |
# -*- coding: utf-8 -*-
"""
webModifySqlAPI
~~~~~~~~~~~~~~
为web应用与后台数据库操作(插入,更新,删除操作)的接口
api_functions 中的DataApiFunc.py为其接口函数汇聚点,所有全局变量设置都在此;所有后台函数调用都在此设置
Implementation helpers for the JSON support in Flask.
:copyright: (c) 2015 by Armin kissf lu.
:license: ukl, see LICENSE for more details.
"""
from . import api
from flask import json
from flask import request
from bson import json_util
# DataApiFunc 为数据库更新、插入、删除数据等操作函数
from api_functions.DataApiFunc import (deleManuleVsimSrc,
insertManuleVsimSrc,
updateManuleVsimSrc,
deleteNewVsimTestInfo,
insertNewVsimTestInfo,
updateNewVsimTestInfo)
@api.route('/delet_manulVsim/', methods=['POST'])
def delet_manulVsim():
"""
:return:
"""
if request.method == 'POST':
arrayData = request.get_array(field_name='file')
return deleManuleVsimSrc(array_data=arrayData)
else:
returnJsonData = {'err': True, 'errinfo': '操作违法!', 'data': []}
return json.dumps(returnJsonData, sort_keys=True, indent=4, default=json_util.default)
@api.route('/insert_manulVsim/', methods=['POST'])
def insert_manulVsim():
"""
:return:
"""
if request.method == 'POST':
arrayData = request.get_array(field_name='file')
return insertManuleVsimSrc(array_data=arrayData)
else:
returnJsonData = {'err': True, 'errinfo': '操作违法!', 'data': []}
return json.dumps(returnJsonData, sort_keys=True, indent=4, default=json_util.default)
@api.route('/update_manulVsim/', methods=['POST'])
def update_manulVsim():
"""
:return:
"""
if request.method == 'POST':
arrayData = request.get_array(field_name='file')
return updateManuleVsimSrc(array_data=arrayData)
else:
returnJsonData = {'err': True, 'errinfo': '操作违法!', 'data': []}
return json.dumps(returnJsonData, sort_keys=True, indent=4, default=json_util.default)
@api.route('/delet_newvsimtest_info_table/', methods=['POST'])
def delet_newvsimtest_info_table():
"""
:return:
"""
if request.method == 'POST':
arrayData = request.get_array(field_name='file')
return deleteNewVsimTestInfo(array_data=arrayData)
else:
returnJsonData = {'err': True, 'errinfo': '操作违法!', 'data': []}
return json.dumps(returnJsonData, sort_keys=True, indent=4, default=json_util.default)
@api.route('/insert_newvsimtest_info_table/', methods=['POST'])
def insert_newvsimtest_info_table():
"""
:return:
"""
if request.method == 'POST':
arrayData = request.get_array(field_name='file')
return insertNewVsimTestInfo(array_data=arrayData)
else:
returnJsonData = {'err': True, 'errinfo': '操作违法!', 'data': []}
return json.dumps(returnJsonData, sort_keys=True, indent=4, default=json_util.default)
@api.route('/update_newvsimtest_info_table/', methods=['POST'])
def update_newvsimtest_info_table():
"""
:return:
"""
if request.method == 'POST':
arrayData = request.get_array(field_name='file')
return updateNewVsimTestInfo(array_data=arrayData)
else:
returnJsonData = {'err': True, 'errinfo': '操作违法!', 'data': []}
return json.dumps(returnJsonData, sort_keys=True, indent=4, default=json_util.default) | dadawangweb/ukl-gsvc-web | app/api_1_0/webModifySqlAPI.py | Python | mit | 3,719 |
import os, scrapy, argparse
from realclearpolitics.spiders.spider import RcpSpider
from scrapy.crawler import CrawlerProcess
parser = argparse.ArgumentParser('Scrap realclearpolitics polls data')
parser.add_argument('url', action="store")
parser.add_argument('--locale', action="store", default='')
parser.add_argument('--race', action="store", default='primary')
parser.add_argument('--csv', dest='to_csv', action='store_true')
parser.add_argument('--output', dest='output', action='store')
args = parser.parse_args()
url = args.url
extra_fields = { 'locale': args.locale, 'race': args.race }
if (args.to_csv):
if args.output is None:
filename = url.split('/')[-1].split('.')[0]
output = filename + ".csv"
print("No output file specified : using " + output)
else:
output = args.output
if not output.endswith(".csv"):
output = output + ".csv"
if os.path.isfile(output):
os.remove(output)
os.system("scrapy crawl realclearpoliticsSpider -a url="+url+" -o "+output)
else:
settings = {
'ITEM_PIPELINES' : {
'realclearpolitics.pipeline.PollPipeline': 300,
},
'LOG_LEVEL' : 'ERROR',
'DOWNLOAD_HANDLERS' : {'s3': None,}
}
process = CrawlerProcess(settings);
process.crawl(RcpSpider, url, extra_fields)
process.start()
| dpxxdp/berniemetrics | private/scrapers/realclearpolitics-scraper/scraper.py | Python | mit | 1,355 |
from panda3d.core import NodePath, DecalEffect
import DNANode
import DNAWall
import random
class DNAFlatBuilding(DNANode.DNANode):
COMPONENT_CODE = 9
currentWallHeight = 0
def __init__(self, name):
DNANode.DNANode.__init__(self, name)
self.width = 0
self.hasDoor = False
def setWidth(self, width):
self.width = width
def getWidth(self):
return self.width
def setCurrentWallHeight(self, currentWallHeight):
DNAFlatBuilding.currentWallHeight = currentWallHeight
def getCurrentWallHeight(self):
return DNAFlatBuilding.currentWallHeight
def setHasDoor(self, hasDoor):
self.hasDoor = hasDoor
def getHasDoor(self):
return self.hasDoor
def makeFromDGI(self, dgi):
DNANode.DNANode.makeFromDGI(self, dgi)
self.width = dgi.getInt16() / 100.0
self.hasDoor = dgi.getBool()
def setupSuitFlatBuilding(self, nodePath, dnaStorage):
name = self.getName()
if name[:2] != 'tb':
return
name = 'sb' + name[2:]
node = nodePath.attachNewNode(name)
node.setPosHpr(self.getPos(), self.getHpr())
numCodes = dnaStorage.getNumCatalogCodes('suit_wall')
if numCodes < 1:
return
code = dnaStorage.getCatalogCode(
'suit_wall', random.randint(0, numCodes - 1))
wallNode = dnaStorage.findNode(code)
if not wallNode:
return
wallNode = wallNode.copyTo(node, 0)
wallScale = wallNode.getScale()
wallScale.setX(self.width)
wallScale.setZ(DNAFlatBuilding.currentWallHeight)
wallNode.setScale(wallScale)
if self.getHasDoor():
wallNodePath = node.find('wall_*')
doorNode = dnaStorage.findNode('suit_door')
doorNode = doorNode.copyTo(wallNodePath, 0)
doorNode.setScale(NodePath(), (1, 1, 1))
doorNode.setPosHpr(0.5, 0, 0, 0, 0, 0)
wallNodePath.setEffect(DecalEffect.make())
node.flattenMedium()
node.stash()
def setupCogdoFlatBuilding(self, nodePath, dnaStorage):
name = self.getName()
if name[:2] != 'tb':
return
name = 'cb' + name[2:]
node = nodePath.attachNewNode(name)
node.setPosHpr(self.getPos(), self.getHpr())
numCodes = dnaStorage.getNumCatalogCodes('cogdo_wall')
if numCodes < 1:
return
code = dnaStorage.getCatalogCode(
'cogdo_wall', random.randint(0, numCodes - 1))
wallNode = dnaStorage.findNode(code)
if not wallNode:
return
wallNode = wallNode.copyTo(node, 0)
wallScale = wallNode.getScale()
wallScale.setX(self.width)
wallScale.setZ(DNAFlatBuilding.currentWallHeight)
wallNode.setScale(wallScale)
if self.getHasDoor():
wallNodePath = node.find('wall_*')
doorNode = dnaStorage.findNode('suit_door')
doorNode = doorNode.copyTo(wallNodePath, 0)
doorNode.setScale(NodePath(), (1, 1, 1))
doorNode.setPosHpr(0.5, 0, 0, 0, 0, 0)
wallNodePath.setEffect(DecalEffect.make())
node.flattenMedium()
node.stash()
def traverse(self, nodePath, dnaStorage):
DNAFlatBuilding.currentWallHeight = 0
node = nodePath.attachNewNode(self.getName())
internalNode = node.attachNewNode(self.getName() + '-internal')
scale = self.getScale()
scale.setX(self.width)
internalNode.setScale(scale)
node.setPosHpr(self.getPos(), self.getHpr())
for child in self.children:
if isinstance(child, DNAWall.DNAWall):
child.traverse(internalNode, dnaStorage)
else:
child.traverse(node, dnaStorage)
if DNAFlatBuilding.currentWallHeight == 0:
print 'empty flat building with no walls'
else:
cameraBarrier = dnaStorage.findNode('wall_camera_barrier')
if cameraBarrier is None:
raise DNAError.DNAError('DNAFlatBuilding requires that there is a wall_camera_barrier in storage')
cameraBarrier = cameraBarrier.copyTo(internalNode, 0)
cameraBarrier.setScale((1, 1, DNAFlatBuilding.currentWallHeight))
internalNode.flattenStrong()
collisionNode = node.find('**/door_*/+CollisionNode')
if not collisionNode.isEmpty():
collisionNode.setName('KnockKnockDoorSphere_' + dnaStorage.getBlock(self.getName()))
cameraBarrier.wrtReparentTo(nodePath, 0)
wallCollection = internalNode.findAllMatches('wall*')
wallHolder = node.attachNewNode('wall_holder')
wallDecal = node.attachNewNode('wall_decal')
windowCollection = internalNode.findAllMatches('**/window*')
doorCollection = internalNode.findAllMatches('**/door*')
corniceCollection = internalNode.findAllMatches('**/cornice*_d')
wallCollection.reparentTo(wallHolder)
windowCollection.reparentTo(wallDecal)
doorCollection.reparentTo(wallDecal)
corniceCollection.reparentTo(wallDecal)
for i in xrange(wallHolder.getNumChildren()):
iNode = wallHolder.getChild(i)
iNode.clearTag('DNACode')
iNode.clearTag('DNARoot')
wallHolder.flattenStrong()
wallDecal.flattenStrong()
holderChild0 = wallHolder.getChild(0)
wallDecal.getChildren().reparentTo(holderChild0)
holderChild0.reparentTo(internalNode)
holderChild0.setEffect(DecalEffect.make())
wallHolder.removeNode()
wallDecal.removeNode()
self.setupSuitFlatBuilding(nodePath, dnaStorage)
self.setupCogdoFlatBuilding(nodePath, dnaStorage)
node.flattenStrong()
| Spiderlover/Toontown | toontown/dna/DNAFlatBuilding.py | Python | mit | 5,943 |
# -*- coding: utf-8 -*-
"""
Various i18n functions.
Helper functions for both the internal translation system
and for TranslateWiki-based translations.
By default messages are assumed to reside in a package called
'scripts.i18n'. In pywikibot 2.0, that package is not packaged
with pywikibot, and pywikibot 2.0 does not have a hard dependency
on any i18n messages. However, there are three user input questions
in pagegenerators which will use i18 messages if they can be loaded.
The default message location may be changed by calling
L{set_message_package} with a package name. The package must contain
an __init__.py, and a message bundle called 'pywikibot' containing
messages. See L{twntranslate} for more information on the messages.
"""
#
# (C) Pywikibot team, 2004-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import sys
import re
import locale
import json
import os
import pkgutil
from collections import defaultdict
from pywikibot import Error
from .plural import plural_rules
import pywikibot
from . import config2 as config
if sys.version_info[0] > 2:
basestring = (str, )
PLURAL_PATTERN = r'{{PLURAL:(?:%\()?([^\)]*?)(?:\)d)?\|(.*?)}}'
# Package name for the translation messages. The messages data must loaded
# relative to that package name. In the top of this package should be
# directories named after for each script/message bundle, and each directory
# should contain JSON files called <lang>.json
_messages_package_name = 'scripts.i18n'
# Flag to indicate whether translation messages are available
_messages_available = None
# Cache of translated messages
_cache = defaultdict(dict)
def set_messages_package(package_name):
"""Set the package name where i18n messages are located."""
global _messages_package_name
global _messages_available
_messages_package_name = package_name
_messages_available = None
def messages_available():
"""
Return False if there are no i18n messages available.
To determine if messages are available, it looks for the package name
set using L{set_messages_package} for a message bundle called 'pywikibot'
containing messages.
@rtype: bool
"""
global _messages_available
if _messages_available is not None:
return _messages_available
try:
__import__(_messages_package_name)
except ImportError:
_messages_available = False
return False
_messages_available = True
return True
def _altlang(code):
"""Define fallback languages for particular languages.
If no translation is available to a specified language, translate() will
try each of the specified fallback languages, in order, until it finds
one with a translation, with 'en' and '_default' as a last resort.
For example, if for language 'xx', you want the preference of languages
to be: xx > fr > ru > en, you let this method return ['fr', 'ru'].
This code is used by other translating methods below.
@param code: The language code
@type code: string
@return: language codes
@rtype: list of str
"""
# Akan
if code in ['ak', 'tw']:
return ['ak', 'tw']
# Amharic
if code in ['aa', 'ti']:
return ['am']
# Arab
if code in ['arc', 'arz', 'so']:
return ['ar']
if code == 'kab':
return ['ar', 'fr']
# Bulgarian
if code in ['cu', 'mk']:
return ['bg', 'sr', 'sh']
# Czech
if code in ['cs', 'sk']:
return ['cs', 'sk']
# German
if code in ['bar', 'frr', 'ksh', 'pdc', 'pfl']:
return ['de']
if code == 'lb':
return ['de', 'fr']
if code in ['als', 'gsw']:
return ['als', 'gsw', 'de']
if code == 'nds':
return ['nds-nl', 'de']
if code in ['dsb', 'hsb']:
return ['hsb', 'dsb', 'de']
if code == 'sli':
return ['de', 'pl']
if code == 'rm':
return ['de', 'it']
if code == 'stq':
return ['nds', 'de']
# Greek
if code in ['grc', 'pnt']:
return ['el']
# Esperanto
if code in ['io', 'nov']:
return ['eo']
# Spanish
if code in ['an', 'arn', 'ast', 'ay', 'ca', 'ext', 'lad', 'nah', 'nv', 'qu',
'yua']:
return ['es']
if code in ['gl', 'gn']:
return ['es', 'pt']
if code == 'eu':
return ['es', 'fr']
if code == 'cbk-zam':
return ['es', 'tl']
# Estonian
if code in ['fiu-vro', 'vro']:
return ['fiu-vro', 'vro', 'et']
if code == 'liv':
return ['et', 'lv']
# Persian (Farsi)
if code == 'ps':
return ['fa']
if code in ['glk', 'mzn']:
return ['glk', 'mzn', 'fa', 'ar']
# Finnish
if code == 'vep':
return ['fi', 'ru']
if code == 'fit':
return ['fi', 'sv']
# French
if code in ['bm', 'br', 'ht', 'kg', 'ln', 'mg', 'nrm', 'pcd',
'rw', 'sg', 'ty', 'wa']:
return ['fr']
if code == 'oc':
return ['fr', 'ca', 'es']
if code in ['co', 'frp']:
return ['fr', 'it']
# Hindi
if code in ['sa']:
return ['hi']
if code in ['ne', 'new']:
return ['ne', 'new', 'hi']
if code in ['bh', 'bho']:
return ['bh', 'bho']
# Indonesian and Malay
if code in ['ace', 'bug', 'bjn', 'id', 'jv', 'ms', 'su']:
return ['id', 'ms', 'jv']
if code == 'map-bms':
return ['jv', 'id', 'ms']
# Inuit languages
if code in ['ik', 'iu']:
return ['iu', 'kl']
if code == 'kl':
return ['da', 'iu', 'no', 'nb']
# Italian
if code in ['eml', 'fur', 'lij', 'lmo', 'nap', 'pms', 'roa-tara', 'sc',
'scn', 'vec']:
return ['it']
# Lithuanian
if code in ['bat-smg', 'sgs']:
return ['bat-smg', 'sgs', 'lt']
# Latvian
if code == 'ltg':
return ['lv']
# Dutch
if code in ['af', 'fy', 'li', 'pap', 'srn', 'vls', 'zea']:
return ['nl']
if code == ['nds-nl']:
return ['nds', 'nl']
# Polish
if code in ['csb', 'szl']:
return ['pl']
# Portuguese
if code in ['fab', 'mwl', 'tet']:
return ['pt']
# Romanian
if code in ['roa-rup', 'rup']:
return ['roa-rup', 'rup', 'ro']
if code == 'mo':
return ['ro']
# Russian and Belarusian
if code in ['ab', 'av', 'ba', 'bxr', 'ce', 'cv', 'inh', 'kk', 'koi', 'krc',
'kv', 'ky', 'lbe', 'lez', 'mdf', 'mhr', 'mn', 'mrj', 'myv',
'os', 'sah', 'tg', 'udm', 'uk', 'xal']:
return ['ru']
if code in ['kbd', 'ady']:
return ['kbd', 'ady', 'ru']
if code == 'tt':
return ['tt-cyrl', 'ru']
if code in ['be', 'be-x-old', 'be-tarask']:
return ['be', 'be-x-old', 'be-tarask', 'ru']
if code == 'kaa':
return ['uz', 'ru']
# Serbocroatian
if code in ['bs', 'hr', 'sh']:
return ['sh', 'hr', 'bs', 'sr', 'sr-el']
if code == 'sr':
return ['sr-el', 'sh', 'hr', 'bs']
# Tagalog
if code in ['bcl', 'ceb', 'ilo', 'pag', 'pam', 'war']:
return ['tl']
# Turkish and Kurdish
if code in ['diq', 'ku']:
return ['ku', 'ku-latn', 'tr']
if code == 'gag':
return ['tr']
if code == 'ckb':
return ['ku']
# Ukrainian
if code in ['crh', 'crh-latn']:
return ['crh', 'crh-latn', 'uk', 'ru']
if code in ['rue']:
return ['uk', 'ru']
# Chinese
if code in ['zh-classical', 'lzh', 'minnan', 'zh-min-nan', 'nan', 'zh-tw',
'zh', 'zh-hans']:
return ['zh', 'zh-hans', 'zh-tw', 'zh-cn', 'zh-classical', 'lzh']
if code in ['cdo', 'gan', 'hak', 'ii', 'wuu', 'za', 'zh-classical', 'lzh',
'zh-cn', 'zh-yue', 'yue']:
return ['zh', 'zh-hans' 'zh-cn', 'zh-tw', 'zh-classical', 'lzh']
# Scandinavian languages
if code in ['da', 'sv']:
return ['da', 'no', 'nb', 'sv', 'nn']
if code in ['fo', 'is']:
return ['da', 'no', 'nb', 'nn', 'sv']
if code == 'nn':
return ['no', 'nb', 'sv', 'da']
if code in ['no', 'nb']:
return ['no', 'nb', 'da', 'nn', 'sv']
if code == 'se':
return ['sv', 'no', 'nb', 'nn', 'fi']
# Other languages
if code in ['bi', 'tpi']:
return ['bi', 'tpi']
if code == 'yi':
return ['he', 'de']
if code in ['ia', 'ie']:
return ['ia', 'la', 'it', 'fr', 'es']
if code == 'xmf':
return ['ka']
if code in ['nso', 'st']:
return ['st', 'nso']
if code in ['kj', 'ng']:
return ['kj', 'ng']
if code in ['meu', 'hmo']:
return ['meu', 'hmo']
if code == ['as']:
return ['bn']
# Default value
return []
class TranslationError(Error, ImportError):
"""Raised when no correct translation could be found."""
# Inherits from ImportError, as this exception is now used
# where previously an ImportError would have been raised,
# and may have been caught by scripts as such.
pass
def _get_translation(lang, twtitle):
"""
Return message of certain twtitle if exists.
For internal use, don't use it directly.
"""
if twtitle in _cache[lang]:
return _cache[lang][twtitle]
message_bundle = twtitle.split('-')[0]
trans_text = None
filename = '%s/%s.json' % (message_bundle, lang)
try:
trans_text = pkgutil.get_data(
_messages_package_name, filename).decode('utf-8')
except (OSError, IOError): # file open can cause several exceptions
_cache[lang][twtitle] = None
return
transdict = json.loads(trans_text)
_cache[lang].update(transdict)
try:
return transdict[twtitle]
except KeyError:
return
def _extract_plural(code, message, parameters):
"""Check for the plural variants in message and replace them.
@param message: the message to be replaced
@type message: unicode string
@param parameters: plural parameters passed from other methods
@type parameters: int, basestring, tuple, list, dict
"""
plural_items = re.findall(PLURAL_PATTERN, message)
if plural_items: # we found PLURAL patterns, process it
if len(plural_items) > 1 and isinstance(parameters, (tuple, list)) and \
len(plural_items) != len(parameters):
raise ValueError("Length of parameter does not match PLURAL "
"occurrences.")
i = 0
for selector, variants in plural_items:
if isinstance(parameters, dict):
num = int(parameters[selector])
elif isinstance(parameters, basestring):
num = int(parameters)
elif isinstance(parameters, (tuple, list)):
num = int(parameters[i])
i += 1
else:
num = parameters
# TODO: check against plural_rules[code]['nplurals']
try:
index = plural_rules[code]['plural'](num)
except KeyError:
index = plural_rules['_default']['plural'](num)
except TypeError:
# we got an int, not a function
index = plural_rules[code]['plural']
repl = variants.split('|')[index]
message = re.sub(PLURAL_PATTERN, repl, message, count=1)
return message
DEFAULT_FALLBACK = ('_default', )
def translate(code, xdict, parameters=None, fallback=False):
"""Return the most appropriate translation from a translation dict.
Given a language code and a dictionary, returns the dictionary's value for
key 'code' if this key exists; otherwise tries to return a value for an
alternative language that is most applicable to use on the wiki in
language 'code' except fallback is False.
The language itself is always checked first, then languages that
have been defined to be alternatives, and finally English. If none of
the options gives result, we just take the one language from xdict which may
not be always the same. When fallback is iterable it'll return None if no
code applies (instead of returning one).
For PLURAL support have a look at the twntranslate method
@param code: The language code
@type code: string or Site object
@param xdict: dictionary with language codes as keys or extended dictionary
with family names as keys containing language dictionaries or
a single (unicode) string. May contain PLURAL tags as
described in twntranslate
@type xdict: dict, string, unicode
@param parameters: For passing (plural) parameters
@type parameters: dict, string, unicode, int
@param fallback: Try an alternate language code. If it's iterable it'll
also try those entries and choose the first match.
@type fallback: boolean or iterable
"""
family = pywikibot.config.family
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
family = code.family.name
code = code.code
# Check whether xdict has multiple projects
if isinstance(xdict, dict):
if family in xdict:
xdict = xdict[family]
elif 'wikipedia' in xdict:
xdict = xdict['wikipedia']
# Get the translated string
if not isinstance(xdict, dict):
trans = xdict
elif not xdict:
trans = None
else:
codes = [code]
if fallback is True:
codes += _altlang(code) + ['_default', 'en']
elif fallback is not False:
codes += list(fallback)
for code in codes:
if code in xdict:
trans = xdict[code]
break
else:
if fallback is not True:
# this shouldn't simply return "any one" code but when fallback
# was True before 65518573d2b0, it did just that. When False it
# did just return None. It's now also returning None in the new
# iterable mode.
return
code = list(xdict.keys())[0]
trans = xdict[code]
if trans is None:
return # return None if we have no translation found
if parameters is None:
return trans
# else we check for PLURAL variants
trans = _extract_plural(code, trans, parameters)
if parameters:
try:
return trans % parameters
except (KeyError, TypeError):
# parameter is for PLURAL variants only, don't change the string
pass
return trans
def twtranslate(code, twtitle, parameters=None, fallback=True):
"""
Translate a message.
The translations are retrieved from json files in messages_package_name.
fallback parameter must be True for i18n and False for L10N or testing
purposes.
@param code: The language code
@param twtitle: The TranslateWiki string title, in <package>-<key> format
@param parameters: For passing parameters.
@param fallback: Try an alternate language code
@type fallback: boolean
"""
if not messages_available():
raise TranslationError(
'Unable to load messages package %s for bundle %s'
'\nIt can happen due to lack of i18n submodule or files. '
'Read https://mediawiki.org/wiki/PWB/i18n'
% (_messages_package_name, twtitle))
code_needed = False
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
lang = code.code
# check whether we need the language code back
elif isinstance(code, list):
lang = code.pop()
code_needed = True
else:
lang = code
# There are two possible failure modes: the translation dict might not have
# the language altogether, or a specific key could be untranslated. Both
# modes are caught with the KeyError.
langs = [lang]
if fallback:
langs += _altlang(lang) + ['en']
for alt in langs:
trans = _get_translation(alt, twtitle)
if trans:
break
else:
raise TranslationError(
'No English translation has been defined for TranslateWiki key'
' %r\nIt can happen due to lack of i18n submodule or files. '
'Read https://mediawiki.org/wiki/PWB/i18n' % twtitle)
# send the language code back via the given list
if code_needed:
code.append(alt)
if parameters:
return trans % parameters
else:
return trans
# Maybe this function should be merged with twtranslate
def twntranslate(code, twtitle, parameters=None):
r"""Translate a message with plural support.
Support is implemented like in MediaWiki extension. If the TranslateWiki
message contains a plural tag inside which looks like::
{{PLURAL:<number>|<variant1>|<variant2>[|<variantn>]}}
it takes that variant calculated by the plural_rules depending on the number
value. Multiple plurals are allowed.
As an examples, if we had several json dictionaries in test folder like:
en.json:
{
"test-plural": "Bot: Changing %(num)s {{PLURAL:%(num)d|page|pages}}.",
}
fr.json:
{
"test-plural": "Robot: Changer %(descr)s {{PLURAL:num|une page|quelques pages}}.",
}
and so on.
>>> from pywikibot import i18n
>>> i18n.set_messages_package('tests.i18n')
>>> # use a number
>>> str(i18n.twntranslate('en', 'test-plural', 0) % {'num': 'no'})
'Bot: Changing no pages.'
>>> # use a string
>>> str(i18n.twntranslate('en', 'test-plural', '1') % {'num': 'one'})
'Bot: Changing one page.'
>>> # use a dictionary
>>> str(i18n.twntranslate('en', 'test-plural', {'num':2}))
'Bot: Changing 2 pages.'
>>> # use additional format strings
>>> str(i18n.twntranslate('fr', 'test-plural', {'num': 1, 'descr': 'seulement'}))
'Robot: Changer seulement une page.'
>>> # use format strings also outside
>>> str(i18n.twntranslate('fr', 'test-plural', 10) % {'descr': 'seulement'})
'Robot: Changer seulement quelques pages.'
The translations are retrieved from i18n.<package>, based on the callers
import table.
@param code: The language code
@param twtitle: The TranslateWiki string title, in <package>-<key> format
@param parameters: For passing (plural) parameters.
"""
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
code = code.code
# we send the code via list and get the alternate code back
code = [code]
trans = twtranslate(code, twtitle)
# get the alternate language code modified by twtranslate
lang = code.pop()
# check for PLURAL variants
trans = _extract_plural(lang, trans, parameters)
# we always have a dict for replacement of translatewiki messages
if parameters and isinstance(parameters, dict):
try:
return trans % parameters
except KeyError:
# parameter is for PLURAL variants only, don't change the string
pass
return trans
def twhas_key(code, twtitle):
"""
Check if a message has a translation in the specified language code.
The translations are retrieved from i18n.<package>, based on the callers
import table.
No code fallback is made.
@param code: The language code
@param twtitle: The TranslateWiki string title, in <package>-<key> format
"""
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
code = code.code
transdict = _get_translation(code, twtitle)
if transdict is None:
return False
return True
def twget_keys(twtitle):
"""
Return all language codes for a special message.
@param twtitle: The TranslateWiki string title, in <package>-<key> format
"""
# obtain the directory containing all the json files for this package
package = twtitle.split("-")[0]
mod = __import__(_messages_package_name, fromlist=[str('__file__')])
pathname = os.path.join(os.path.dirname(mod.__file__), package)
# build a list of languages in that directory
langs = [filename.partition('.')[0]
for filename in sorted(os.listdir(pathname))
if filename.endswith('.json')]
# exclude languages does not have this specific message in that package
# i.e. an incomplete set of translated messages.
return [lang for lang in langs
if lang != 'qqq' and
_get_translation(lang, twtitle)]
def input(twtitle, parameters=None, password=False, fallback_prompt=None):
"""
Ask the user a question, return the user's answer.
The prompt message is retrieved via L{twtranslate} and either uses the
config variable 'userinterface_lang' or the default locale as the language
code.
@param twtitle: The TranslateWiki string title, in <package>-<key> format
@param parameters: The values which will be applied to the translated text
@param password: Hides the user's input (for password entry)
@param fallback_prompt: The English prompt if i18n is not available.
@rtype: unicode string
"""
if not messages_available():
if not fallback_prompt:
raise TranslationError(
'Unable to load messages package %s for bundle %s'
% (_messages_package_name, twtitle))
else:
prompt = fallback_prompt
else:
code = config.userinterface_lang or \
locale.getdefaultlocale()[0].split('_')[0]
prompt = twtranslate(code, twtitle, parameters)
return pywikibot.input(prompt, password)
| emijrp/pywikibot-core | pywikibot/i18n.py | Python | mit | 21,745 |
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
import sys
from pubnub import PubnubTornado as Pubnub
publish_key = len(sys.argv) > 1 and sys.argv[1] or 'demo'
subscribe_key = len(sys.argv) > 2 and sys.argv[2] or 'demo'
secret_key = len(sys.argv) > 3 and sys.argv[3] or 'demo'
cipher_key = len(sys.argv) > 4 and sys.argv[4] or ''
ssl_on = len(sys.argv) > 5 and bool(sys.argv[5]) or False
## -----------------------------------------------------------------------
## Initiate Pubnub State
## -----------------------------------------------------------------------
pubnub = Pubnub(publish_key=publish_key, subscribe_key=subscribe_key,
secret_key=secret_key, cipher_key=cipher_key, ssl_on=ssl_on)
channel = 'hello_world'
# Asynchronous usage
def callback(message):
print(message)
pubnub.here_now(channel, callback=callback, error=callback)
pubnub.start()
| teddywing/pubnub-python | python-tornado/examples/here-now.py | Python | mit | 1,031 |
# -*- coding: utf-8 -*-
import numbers
import numpy as np
from ..constants import BOLTZMANN_IN_MEV_K
from ..energy import Energy
class Analysis(object):
r"""Class containing methods for the Data class
Attributes
----------
detailed_balance_factor
Methods
-------
integrate
position
width
scattering_function
dynamic_susceptibility
estimate_background
get_keys
get_bounds
"""
@property
def detailed_balance_factor(self):
r"""Returns the detailed balance factor (sometimes called the Bose
factor)
Parameters
----------
None
Returns
-------
dbf : ndarray
The detailed balance factor (temperature correction)
"""
return 1. - np.exp(-self.Q[:, 3] / BOLTZMANN_IN_MEV_K / self.temp)
def integrate(self, bounds=None, background=None, hkle=True):
r"""Returns the integrated intensity within given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : float
The integrated intensity either over all data, or within
specified boundaries
"""
result = 0
for key in self.get_keys(hkle):
result += np.trapz(self.intensity[self.get_bounds(bounds)] - self.estimate_background(background),
np.squeeze(self.data[key][self.get_bounds(bounds)]))
return result
def position(self, bounds=None, background=None, hkle=True):
r"""Returns the position of a peak within the given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : tup
The result is a tuple with position in each dimension of Q,
(h, k, l, e)
"""
result = ()
for key in self.get_keys(hkle):
_result = 0
for key_integrate in self.get_keys(hkle):
_result += np.trapz(self.data[key][self.get_bounds(bounds)] *
(self.intensity[self.get_bounds(bounds)] - self.estimate_background(background)),
self.data[key_integrate][self.get_bounds(bounds)]) / self.integrate(bounds, background)
result += (np.squeeze(_result),)
if hkle:
return result
else:
return dict((key, value) for key, value in zip(self.get_keys(hkle), result))
def width(self, bounds=None, background=None, fwhm=False, hkle=True):
r"""Returns the mean-squared width of a peak within the given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
fwhm : bool, optional
If True, returns width in fwhm, otherwise in mean-squared width.
Default: False
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : tup
The result is a tuple with the width in each dimension of Q,
(h, k, l, e)
"""
result = ()
for key in self.get_keys(hkle):
_result = 0
for key_integrate in self.get_keys(hkle):
_result += np.trapz((self.data[key][self.get_bounds(bounds)] -
self.position(bounds, background, hkle=False)[key]) ** 2 *
(self.intensity[self.get_bounds(bounds)] - self.estimate_background(background)),
self.data[key_integrate][self.get_bounds(bounds)]) / self.integrate(bounds, background)
if fwhm:
result += (np.sqrt(np.squeeze(_result)) * 2. * np.sqrt(2. * np.log(2.)),)
else:
result += (np.squeeze(_result),)
if hkle:
return result
else:
return dict((key, value) for key, value in zip(self.get_keys(hkle), result))
def scattering_function(self, material, ei):
r"""Returns the neutron scattering function, i.e. the detector counts
scaled by :math:`4 \pi / \sigma_{\mathrm{tot}} * k_i/k_f`.
Parameters
----------
material : object
Definition of the material given by the :py:class:`.Material`
class
ei : float
Incident energy in meV
Returns
-------
counts : ndarray
The detector counts scaled by the total scattering cross section
and ki/kf
"""
ki = Energy(energy=ei).wavevector
kf = Energy(energy=ei - self.e).wavevector
return 4 * np.pi / material.total_scattering_cross_section * ki / kf * self.detector
def dynamic_susceptibility(self, material, ei):
r"""Returns the dynamic susceptibility
:math:`\chi^{\prime\prime}(\mathbf{Q},\hbar\omega)`
Parameters
----------
material : object
Definition of the material given by the :py:class:`.Material`
class
ei : float
Incident energy in meV
Returns
-------
counts : ndarray
The detector counts turned into the scattering function multiplied
by the detailed balance factor
"""
return self.scattering_function(material, ei) * self.detailed_balance_factor
def estimate_background(self, bg_params):
r"""Estimate the background according to ``type`` specified.
Parameters
----------
bg_params : dict
Input dictionary has keys 'type' and 'value'. Types are
* 'constant' : background is the constant given by 'value'
* 'percent' : background is estimated by the bottom x%, where x
is value
* 'minimum' : background is estimated as the detector counts
Returns
-------
background : float or ndarray
Value determined to be the background. Will return ndarray only if
`'type'` is `'constant'` and `'value'` is an ndarray
"""
if isinstance(bg_params, type(None)):
return 0
elif isinstance(bg_params, numbers.Number):
return bg_params
elif bg_params['type'] == 'constant':
return bg_params['value']
elif bg_params['type'] == 'percent':
inten = self.intensity[self.intensity >= 0.]
Npts = int(inten.size * (bg_params['value'] / 100.))
min_vals = inten[np.argsort(inten)[:Npts]]
background = np.average(min_vals)
return background
elif bg_params['type'] == 'minimum':
return min(self.intensity)
else:
return 0
def get_bounds(self, bounds):
r"""Generates a to_fit tuple if bounds is present in kwargs
Parameters
----------
bounds : dict
Returns
-------
to_fit : tuple
Tuple of indices
"""
if bounds is not None:
return np.where(bounds)
else:
return np.where(self.Q[:, 0])
def get_keys(self, hkle):
r"""Returns all of the Dictionary key names
Parameters
----------
hkle : bool
If True only returns keys for h,k,l,e, otherwise returns all keys
Returns
-------
keys : list
:py:attr:`.Data.data` dictionary keys
"""
if hkle:
return [key for key in self.data if key in self.Q_keys.values()]
else:
return [key for key in self.data if key not in self.data_keys.values()]
| neutronpy/neutronpy | neutronpy/data/analysis.py | Python | mit | 8,726 |
from itertools import imap, chain
def set_name(name, f):
try:
f.__pipetools__name__ = name
except (AttributeError, UnicodeEncodeError):
pass
return f
def get_name(f):
from pipetools.main import Pipe
pipetools_name = getattr(f, '__pipetools__name__', None)
if pipetools_name:
return pipetools_name() if callable(pipetools_name) else pipetools_name
if isinstance(f, Pipe):
return repr(f)
return f.__name__ if hasattr(f, '__name__') else repr(f)
def repr_args(*args, **kwargs):
return ', '.join(chain(
imap('{0!r}'.format, args),
imap('{0[0]}={0[1]!r}'.format, kwargs.iteritems())))
| katakumpo/pipetools | pipetools/debug.py | Python | mit | 672 |
import unittest
from PyFoam.Basics.MatplotlibTimelines import MatplotlibTimelines
theSuite=unittest.TestSuite()
| Unofficial-Extend-Project-Mirror/openfoam-extend-Breeder-other-scripting-PyFoam | unittests/Basics/test_MatplotlibTimelines.py | Python | gpl-2.0 | 114 |
#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error, software_manager
sm = software_manager.SoftwareManager()
class sblim_sfcb(test.test):
"""
Autotest module for testing basic functionality
of sblim_sfcb
@author Wang Tao <[email protected]>
"""
version = 1
nfail = 0
path = ''
def initialize(self, test_path=''):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
if not sm.check_installed('gcc'):
logging.debug("gcc missing - trying to install")
sm.install('gcc')
ret_val = subprocess.Popen(['make', 'all'], cwd="%s/sblim_sfcb" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./sblim-sfcb-test.sh'], cwd="%s/sblim_sfcb" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
| rajashreer7/autotest-client-tests | linux-tools/sblim_sfcb/sblim_sfcb.py | Python | gpl-2.0 | 1,610 |
"""
Test cases adapted from the test_bsddb.py module in Python's
regression test suite.
"""
import sys, os, string
import unittest
import tempfile
from test_all import verbose
try:
# For Python 2.3
from bsddb import db, hashopen, btopen, rnopen
except ImportError:
# For earlier Pythons w/distutils pybsddb
from bsddb3 import db, hashopen, btopen, rnopen
class CompatibilityTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
def tearDown(self):
try:
os.remove(self.filename)
except os.error:
pass
def test01_btopen(self):
self.do_bthash_test(btopen, 'btopen')
def test02_hashopen(self):
self.do_bthash_test(hashopen, 'hashopen')
def test03_rnopen(self):
data = string.split("The quick brown fox jumped over the lazy dog.")
if verbose:
print "\nTesting: rnopen"
f = rnopen(self.filename, 'c')
for x in range(len(data)):
f[x+1] = data[x]
getTest = (f[1], f[2], f[3])
if verbose:
print '%s %s %s' % getTest
assert getTest[1] == 'quick', 'data mismatch!'
f[25] = 'twenty-five'
f.close()
del f
f = rnopen(self.filename, 'w')
f[20] = 'twenty'
def noRec(f):
rec = f[15]
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f['a string']
self.assertRaises(TypeError, badKey, f)
del f[3]
rec = f.first()
while rec:
if verbose:
print rec
try:
rec = f.next()
except KeyError:
break
f.close()
def test04_n_flag(self):
f = hashopen(self.filename, 'n')
f.close()
def do_bthash_test(self, factory, what):
if verbose:
print '\nTesting: ', what
f = factory(self.filename, 'c')
if verbose:
print 'creation...'
# truth test
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
f['0'] = ''
f['a'] = 'Guido'
f['b'] = 'van'
f['c'] = 'Rossum'
f['d'] = 'invented'
f['f'] = 'Python'
if verbose:
print '%s %s %s' % (f['a'], f['b'], f['c'])
if verbose:
print 'key ordering...'
f.set_location(f.first()[0])
while 1:
try:
rec = f.next()
except KeyError:
assert rec == f.last(), 'Error, last <> last!'
f.previous()
break
if verbose:
print rec
assert f.has_key('f'), 'Error, missing key!'
f.sync()
f.close()
# truth test
try:
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
except db.DBError:
pass
else:
self.fail("Exception expected")
del f
if verbose:
print 'modification...'
f = factory(self.filename, 'w')
f['d'] = 'discovered'
if verbose:
print 'access...'
for key in f.keys():
word = f[key]
if verbose:
print word
def noRec(f):
rec = f['no such key']
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f[15]
self.assertRaises(TypeError, badKey, f)
f.close()
#----------------------------------------------------------------------
def test_suite():
return unittest.makeSuite(CompatibilityTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| trivoldus28/pulsarch-verilog | tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/bsddb/test/test_compat.py | Python | gpl-2.0 | 3,862 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2010 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Python classes
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
#-------------------------------------------------------------------------
#
# GTK classes
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import Pango
_TAB = Gdk.keyval_from_name("Tab")
_ENTER = Gdk.keyval_from_name("Enter")
#-------------------------------------------------------------------------
#
# Gramps classes
#
#-------------------------------------------------------------------------
from .surnamemodel import SurnameModel
from .embeddedlist import EmbeddedList, TEXT_EDIT_COL
from ...ddtargets import DdTargets
from gramps.gen.lib import Surname, NameOriginType
from ...utils import match_primary_mask, no_match_primary_mask
# table for skipping illegal control chars
INVISIBLE = dict.fromkeys(list(range(32)))
#-------------------------------------------------------------------------
#
# SurnameTab
#
#-------------------------------------------------------------------------
class SurnameTab(EmbeddedList):
_HANDLE_COL = 5
_DND_TYPE = DdTargets.SURNAME
_MSG = {
'add' : _('Create and add a new surname'),
'del' : _('Remove the selected surname'),
'edit' : _('Edit the selected surname'),
'up' : _('Move the selected surname upwards'),
'down' : _('Move the selected surname downwards'),
}
#index = column in model. Value =
# (name, sortcol in model, width, markup/text
_column_names = [
(_('Prefix'), 0, 150, TEXT_EDIT_COL, -1, None),
(_('Surname'), 1, -1, TEXT_EDIT_COL, -1, None),
(_('Connector'), 2, 100, TEXT_EDIT_COL, -1, None),
]
_column_combo = (_('Origin'), -1, 150, 3) # name, sort, width, modelcol
_column_toggle = (_('Primary', 'Name'), -1, 80, 4)
def __init__(self, dbstate, uistate, track, name, on_change=None,
top_label='<b>%s</b>' % _("Multiple Surnames") ):
self.obj = name
self.on_change = on_change
self.curr_col = -1
self.curr_cellr = None
self.curr_celle = None
EmbeddedList.__init__(self, dbstate, uistate, track, _('Family Surnames'),
SurnameModel, move_buttons=True,
top_label=top_label)
def build_columns(self):
#first the standard text columns with normal method
EmbeddedList.build_columns(self)
# now we add the two special columns
# combobox for type
colno = len(self.columns)
name = self._column_combo[0]
renderer = Gtk.CellRendererCombo()
renderer.set_property('ellipsize', Pango.EllipsizeMode.END)
# set up the comboentry editable
no = NameOriginType()
self.cmborig = Gtk.ListStore(GObject.TYPE_INT, GObject.TYPE_STRING)
self.cmborigmap = no.get_map().copy()
#sort the keys based on the value
keys = sorted(self.cmborigmap, key=lambda x: glocale.sort_key(self.cmborigmap[x]))
for key in keys:
if key != no.get_custom():
self.cmborig.append(row=[key, self.cmborigmap[key]])
additional = self.dbstate.db.get_origin_types()
if additional:
for type in additional:
if type:
self.cmborig.append(row=[no.get_custom(), type])
renderer.set_property("model", self.cmborig)
renderer.set_property("text-column", 1)
renderer.set_property('editable', not self.dbstate.db.readonly)
renderer.connect('editing_started', self.on_edit_start_cmb, colno)
renderer.connect('edited', self.on_orig_edited, self._column_combo[3])
# add to treeview
column = Gtk.TreeViewColumn(name, renderer, text=self._column_combo[3])
column.set_resizable(True)
column.set_sort_column_id(self._column_combo[1])
column.set_min_width(self._column_combo[2])
column.set_expand(False)
self.columns.append(column)
self.tree.append_column(column)
# toggle box for primary
colno += 1
name = self._column_toggle[0]
renderer = Gtk.CellRendererToggle()
renderer.set_property('activatable', True)
renderer.set_property('radio', True)
renderer.connect( 'toggled', self.on_prim_toggled, self._column_toggle[3])
# add to treeview
column = Gtk.TreeViewColumn(name, renderer, active=self._column_toggle[3])
column.set_resizable(False)
column.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
column.set_alignment(0.5)
column.set_sort_column_id(self._column_toggle[1])
column.set_max_width(self._column_toggle[2])
self.columns.append(column)
self.tree.append_column(column)
## def by_value(self, first, second):
## """
## Method for sorting keys based on the values.
## """
## fvalue = self.cmborigmap[first]
## svalue = self.cmborigmap[second]
## return glocale.strcoll(fvalue, svalue)
def setup_editable_col(self):
"""
inherit this and set the variables needed for editable columns
Variable edit_col_funcs needs to be a dictionary from model col_nr to
function to call for
Example:
self.edit_col_funcs ={1: {'edit_start': self.on_edit_start,
'edited': self.on_edited
}}
"""
self.edit_col_funcs = {
0: {'edit_start': self.on_edit_start,
'edited': self.on_edit_inline},
1: {'edit_start': self.on_edit_start,
'edited': self.on_edit_inline},
2: {'edit_start': self.on_edit_start,
'edited': self.on_edit_inline}}
def get_data(self):
return self.obj.get_surname_list()
def is_empty(self):
return len(self.model)==0
def _get_surn_from_model(self):
"""
Return new surname_list for storing in the name based on content of
the model
"""
new_list = []
for idx in range(len(self.model)):
node = self.model.get_iter(idx)
surn = self.model.get_value(node, 5)
surn.set_prefix(self.model.get_value(node, 0))
surn.set_surname(self.model.get_value(node, 1))
surn.set_connector(self.model.get_value(node, 2))
surn.get_origintype().set(self.model.get_value(node, 3))
surn.set_primary(self.model.get_value(node, 4))
new_list += [surn]
return new_list
def update(self):
"""
Store the present data in the model to the name object
"""
new_map = self._get_surn_from_model()
self.obj.set_surname_list(new_map)
# update name in previews
if self.on_change:
self.on_change()
def post_rebuild(self, prebuildpath):
"""
Called when data model has changed, in particular necessary when row
order is updated.
@param prebuildpath: path selected before rebuild, None if none
@type prebuildpath: tree path
"""
if self.on_change:
self.on_change()
def column_order(self):
# order of columns for EmbeddedList. Only the text columns here
return ((1, 0), (1, 1), (1, 2))
def add_button_clicked(self, obj):
"""Add button is clicked, add a surname to the person"""
prim = False
if len(self.obj.get_surname_list()) == 0:
prim = True
node = self.model.append(row=['', '', '', str(NameOriginType()), prim,
Surname()])
self.selection.select_iter(node)
path = self.model.get_path(node)
self.tree.set_cursor_on_cell(path,
focus_column=self.columns[0],
focus_cell=None,
start_editing=True)
self.update()
def del_button_clicked(self, obj):
"""
Delete button is clicked. Remove from the model
"""
(model, node) = self.selection.get_selected()
if node:
self.model.remove(node)
self.update()
def on_edit_start(self, cellr, celle, path, colnr):
""" start of editing. Store stuff so we know when editing ends where we
are
"""
self.curr_col = colnr
self.curr_cellr = cellr
self.curr_celle = celle
def on_edit_start_cmb(self, cellr, celle, path, colnr):
"""
An edit starts in the origin type column
This means a cmb has been created as celle, and we can set up the stuff
we want this cmb to contain: autocompletion, stop edit when selection
in the cmb happens.
"""
self.on_edit_start(cellr, celle, path, colnr)
#set up autocomplete
entry = celle.get_child()
entry.set_width_chars(10)
completion = Gtk.EntryCompletion()
completion.set_model(self.cmborig)
completion.set_minimum_key_length(1)
completion.set_text_column(1)
entry.set_completion(completion)
#
celle.connect('changed', self.on_origcmb_change, path, colnr)
def on_edit_start_toggle(self, cellr, celle, path, colnr):
"""
Edit
"""
self.on_edit_start(cellr, celle, path, colnr)
def on_edit_inline(self, cell, path, new_text, colnr):
"""
Edit is happening. The model is updated and the surname objects updated.
colnr must be the column in the model.
"""
node = self.model.get_iter(path)
text = new_text.translate(INVISIBLE).strip()
self.model.set_value(node, colnr, text)
self.update()
def on_orig_edited(self, cellr, path, new_text, colnr):
"""
An edit is finished in the origin type column. For a cmb in an editor,
the model may only be updated when typing is finished, as editing stops
automatically on update of the model.
colnr must be the column in the model.
"""
self.on_edit_inline(cellr, path, new_text, colnr)
def on_origcmb_change(self, cmb, path, colnr):
"""
A selection occured in the cmb of the origin type column. colnr must
be the column in the model.
"""
act = cmb.get_active()
if act == -1:
return
self.on_orig_edited(None, path,
self.cmborig.get_value(
self.cmborig.get_iter((act,)),1),
colnr)
def on_prim_toggled(self, cell, path, colnr):
"""
Primary surname on path is toggled. colnr must be the col
in the model
"""
#obtain current value
node = self.model.get_iter(path)
old_val = self.model.get_value(node, colnr)
for nr in range(len(self.obj.get_surname_list())):
if nr == int(path[0]):
if old_val:
#True remains True
break
else:
#This value becomes True
self.model.set_value(self.model.get_iter((nr,)), colnr, True)
else:
self.model.set_value(self.model.get_iter((nr,)), colnr, False)
self.update()
return
def edit_button_clicked(self, obj):
""" Edit button clicked
"""
(model, node) = self.selection.get_selected()
if node:
path = self.model.get_path(node)
self.tree.set_cursor_on_cell(path,
focus_column=self.columns[0],
focus_cell=None,
start_editing=True)
def key_pressed(self, obj, event):
"""
Handles the key being pressed.
Here we make sure tab moves to next or previous value in row on TAB
"""
if not EmbeddedList.key_pressed(self, obj, event):
if event.type == Gdk.EventType.KEY_PRESS and event.keyval in (_TAB,):
if no_match_primary_mask(event.get_state(),
Gdk.ModifierType.SHIFT_MASK):
return self.next_cell()
elif match_primary_mask(event.get_state(), Gdk.ModifierType.SHIFT_MASK):
return self.prev_cell()
else:
return
else:
return
return True
def next_cell(self):
"""
Move to the next cell to edit it
"""
(model, node) = self.selection.get_selected()
if node:
path = self.model.get_path(node).get_indices()[0]
nccol = self.curr_col+1
if nccol < 4:
if self.curr_celle:
self.curr_celle.editing_done()
self.tree.set_cursor_on_cell(Gtk.TreePath((path,)),
focus_column=self.columns[nccol],
focus_cell=None,
start_editing=True)
elif nccol == 4:
#go to next line if there is one
if path < len(self.obj.get_surname_list()):
newpath = Gtk.TreePath((path+1,))
self.curr_celle.editing_done()
self.selection.select_path(newpath)
self.tree.set_cursor_on_cell(newpath,
focus_column=self.columns[0],
focus_cell=None,
start_editing=True)
else:
#stop editing
self.curr_celle.editing_done()
return
return True
def prev_cell(self):
"""
Move to the next cell to edit it
"""
(model, node) = self.selection.get_selected()
if node:
path = self.model.get_path(node).get_indices()[0]
if self.curr_col > 0:
self.tree.set_cursor_on_cell(Gtk.TreePath((path,)),
focus_column=self.columns[self.curr_col-1],
focus_cell=None,
start_editing=True)
elif self.curr_col == 0:
#go to prev line if there is one
if path > 0:
newpath = Gtk.TreePath((path-1,))
self.selection.select_path(newpath)
self.tree.set_cursor_on_cell(newpath,
focus_column=self.columns[-2],
focus_cell=None,
start_editing=True)
else:
#stop editing
self.curr_celle.editing_done()
return
return True
| Fedik/gramps | gramps/gui/editors/displaytabs/surnametab.py | Python | gpl-2.0 | 16,204 |
#!/usr/bin/env python
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# By David Harrison
# I was playing with doctest when I wrote this. I still haven't
# decided how useful doctest is as opposed to implementing unit tests
# directly. --Dave
if __name__ == '__main__':
import sys
sys.path = ['.','..'] + sys.path # HACK to simplify unit testing.
from BTL.translation import _
class BEGIN: # represents special BEGIN location before first next.
pass
from UserDict import DictMixin
from cmap_swig import *
import sys
from weakref import WeakKeyDictionary
LEAK_TEST = False
class CMap(object,DictMixin):
"""In-order mapping. Provides same operations and behavior as a dict,
but provides in-order iteration. Additionally provides operations to
find the nearest key <= or >= a given key.
This provides a significantly wider set of operations than
berkeley db BTrees, but it provides no means for persistence.
LIMITATION: The key must be a python numeric type, e.g., an integer
or a float. The value can be any python object.
Operation: Time Applicable
Complexity: Methods:
---------------------------------------------------
Item insertion: O(log n) append, __setitem__
Item deletion: O(log n + k) __delitem__, erase
Key search: O(log n) __getitem__, get, find,
__contains__
Value search: n/a
Iteration step: amortized O(1), next, prev
worst-case O(log n)
Memory: O(n)
n = number of elements in map. k = number of iterators pointing
into map. CMap assumes there are few iterators in existence at
any given time.
Iterators are not invalidated by insertions. Iterators are
invalidated by deletions only when the key-value pair
referenced is deleted. Deletion has a '+k' because the
__delitem__ searches linearly through the set of iterators
pointing into this map to find any iterator pointing at the
deleted item and then invalidates the iterator.
This class is backed by the C++ STL map class, but conforms
to the Python container interface."""
class _AbstractIterator:
"""Iterates over elements in the map in order."""
def __init__(self, m, si = BEGIN ): # "s.." implies swig object.
"""Creates an iterator pointing to element si in map m.
Do not instantiate directly. Use iterkeys, itervalues, or
iteritems.
The _AbstractIterator takes ownership of any C++ iterator
(i.e., the swig object 'si') and will deallocate it when
the iterator is deallocated.
Examples of typical behavior:
>>> from CMap import *
>>> m = CMap()
>>> m[12] = 6
>>> m[9] = 4
>>> for k in m:
... print int(k)
...
9
12
>>>
Example edge cases (empty map):
>>> from CMap import *
>>> m = CMap()
>>> try:
... i = m.__iter__()
... i.value()
... except IndexError:
... print 'IndexError.'
...
IndexError.
>>> try:
... i.next()
... except StopIteration:
... print 'stopped'
...
stopped
@param map: CMap.
@param node: Node that this iterator will point at. If None
then the iterator points to end(). If BEGIN
then the iterator points to one before the beginning.
"""
assert isinstance(m, CMap)
assert not isinstance(si, CMap._AbstractIterator)
if si == None:
self._si = map_end(m._smap)
else:
self._si = si # C++ iterator wrapped by swig.
self._map = m
m._iterators[self] = 1 # using map as set of weak references.
def __hash__(self):
return id(self)
def __cmp__(self, other):
if not self._si or not other._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN and other._si == BEGIN: return 0
if self._si == BEGIN and other._si != BEGIN: return -1
elif self._si != BEGIN and other._si == BEGIN: return 1
return iter_cmp(self._map._smap, self._si, other._si )
def at_begin(self):
"""equivalent to self == m.begin() where m is a CMap.
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.begin()
>>> i == m.begin()
True
>>> i.at_begin()
True
>>> i == m.end() # no elements so begin()==end()
True
>>> i.at_end()
True
>>> m[6] = 'foo' # insertion does not invalidate iterators.
>>> i = m.begin()
>>> i == m.end()
False
>>> i.value()
'foo'
>>> try: # test at_begin when not at beginning.
... i.next()
... except StopIteration:
... print 'ok'
ok
>>> i.at_begin()
False
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN: # BEGIN is one before begin(). Yuck!!
return False
return map_iter_at_begin(self._map._smap, self._si)
def at_end(self):
"""equivalent to self == m.end() where m is a CMap, but
at_end is faster because it avoids the dynamic memory
alloation in m.end().
>>> from CMap import CMap
>>> m = CMap()
>>> m[6] = 'foo'
>>> i = m.end() # test when at end.
>>> i == m.end()
True
>>> i.at_end()
True
>>> int(i.prev())
6
>>> i.at_end() # testing when not at end.
False
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
return False
return map_iter_at_end(self._map._smap, self._si)
def key(self):
"""@return: the key of the key-value pair referenced by this
iterator.
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
raise IndexError(_("Cannot dereference iterator until after "
"first call to .next."))
elif map_iter_at_end(self._map._smap, self._si):
raise IndexError()
return iter_key(self._si)
def value(self):
"""@return: the value of the key-value pair currently referenced
by this iterator.
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
raise IndexError(_("Cannot dereference iterator until after "
"first call to next."))
elif map_iter_at_end(self._map._smap, self._si):
raise IndexError()
return iter_value(self._si)
def item(self):
"""@return the key-value pair referenced by this iterator.
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
return self.key(), self.value()
def _next(self):
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
self._si = map_begin(self._map._smap)
if map_iter_at_end(self._map._smap,self._si):
raise StopIteration
return
if map_iter_at_end(self._map._smap,self._si):
raise StopIteration
iter_incr(self._si)
if map_iter_at_end(self._map._smap,self._si):
raise StopIteration
def _prev(self):
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
raise StopIteration()
elif map_iter_at_begin(self._map._smap, self._si):
self._si = BEGIN
raise StopIteration
iter_decr(self._si)
def __del__(self):
# Python note: if a reference to x is intentionally
# eliminated using "del x" and there are other references
# to x then __del__ does not get called at this time.
# Only when the last reference is deleted by an intentional
# "del" or when the reference goes out of scope does
# the __del__ method get called.
self._invalidate()
def _invalidate(self):
if self._si == None:
return
try:
del self._map._iterators[self]
except KeyError:
pass # could've been removed because weak reference,
# and because _invalidate is called from __del__.
if self._si != BEGIN:
iter_delete(self._si)
self._si = None
def __iter__(self):
"""If the iterator is itself iteratable then we do things like:
>>> from CMap import CMap
>>> m = CMap()
>>> m[10] = 'foo'
>>> m[11] = 'bar'
>>> for x in m.itervalues():
... print x
...
foo
bar
"""
return self
def __len__(self):
return len(self._map)
class KeyIterator(_AbstractIterator):
def next(self):
"""Returns the next key in the map.
Insertion does not invalidate iterators. Deletion only
invalidates an iterator if the iterator pointed at the
key-value pair being deleted.
This is implemented by moving the iterator and then
dereferencing it. If we dereferenced and then moved
then we would get the odd behavior:
Ex: I have keys [1,2,3]. The iterator i points at 1.
print i.next() # prints 1
print i.next() # prints 2
print i.prev() # prints 3
print i.prev() # prints 2
However, because we move and then dereference, when an
iterator is first created it points to nowhere
so that the first next moves to the first element.
Ex:
>>> from CMap import *
>>> m = CMap()
>>> m[5] = 1
>>> m[8] = 4
>>> i = m.__iter__()
>>> print int(i.next())
5
>>> print int(i.next())
8
>>> print int(i.prev())
5
We are still left with the odd behavior that an
iterator cannot be dereferenced until after the first next().
Ex edge cases:
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.__iter__()
>>> try:
... i.prev()
... except StopIteration:
... print 'StopIteration'
...
StopIteration
>>> m[5]='a'
>>> i = m.iterkeys()
>>> int(i.next())
5
>>> try: i.next()
... except StopIteration: print 'StopIteration'
...
StopIteration
>>> int(i.prev())
5
>>> try: int(i.prev())
... except StopIteration: print 'StopIteration'
...
StopIteration
>>> int(i.next())
5
"""
self._next()
return self.key()
def prev(self):
"""Returns the previous key in the map.
See next() for more detail and examples.
"""
self._prev()
return self.key()
class ValueIterator(_AbstractIterator):
def next(self):
"""@return: next value in the map.
>>> from CMap import *
>>> m = CMap()
>>> m[5] = 10
>>> m[6] = 3
>>> i = m.itervalues()
>>> int(i.next())
10
>>> int(i.next())
3
"""
self._next()
return self.value()
def prev(self):
self._prev()
return self.value()
class ItemIterator(_AbstractIterator):
def next(self):
"""@return: next item in the map's key ordering.
>>> from CMap import CMap
>>> m = CMap()
>>> m[5] = 10
>>> m[6] = 3
>>> i = m.iteritems()
>>> k,v = i.next()
>>> int(k)
5
>>> int(v)
10
>>> k,v = i.next()
>>> int(k)
6
>>> int(v)
3
"""
self._next()
return self.key(), self.value()
def prev(self):
self._prev()
return self.key(), self.value()
def __init__(self, d={} ):
"""Instantiate RBTree containing values from passed dict and
ordered based on cmp.
>>> m = CMap()
>>> len(m)
0
>>> m[5]=2
>>> len(m)
1
>>> print m[5]
2
"""
#self._index = {} # to speed up searches.
self._smap = map_constructor() # C++ map wrapped by swig.
for key, value in d.items():
self[key]=value
self._iterators = WeakKeyDictionary()
# whenever node is deleted. search iterators
# for any iterator that becomes invalid.
def __contains__(self,x):
return self.get(x) != None
def __iter__(self):
"""@return: KeyIterator positioned one before the beginning of the
key ordering so that the first next() returns the first key."""
return CMap.KeyIterator(self)
def begin(self):
"""Returns an iterator pointing at first key-value pair. This
differs from iterkeys, itervalues, and iteritems which return an
iterator pointing one before the first key-value pair.
@return: key iterator to first key-value.
>>> from CMap import *
>>> m = CMap()
>>> m[5.0] = 'a'
>>> i = m.begin()
>>> int(i.key()) # raises no IndexError.
5
>>> i = m.iterkeys()
>>> try:
... i.key()
... except IndexError:
... print 'IndexError raised'
...
IndexError raised
"""
i = CMap.KeyIterator(self, map_begin(self._smap) )
return i
def end(self):
"""Returns an iterator pointing after end of key ordering.
The iterator's prev method will move to the last
key-value pair in the ordering. This in keeping with
the notion that a range is specified as [i,j) where
j is not in the range, and the range [i,j) where i==j
is an empty range.
This operation takes O(1) time.
@return: key iterator one after end.
"""
i = CMap.KeyIterator(self,None) # None means one after last node.
return i
def iterkeys(self):
return CMap.KeyIterator(self)
def itervalues(self):
return CMap.ValueIterator(self)
def iteritems(self):
return CMap.ItemIterator(self)
def __len__(self):
return map_size(self._smap)
def __str__(self):
s = "{"
first = True
for k,v in self.items():
if first:
first = False
else:
s += ", "
if type(v) == str:
s += "%s: '%s'" % (k,v)
else:
s += "%s: %s" % (k,v)
s += "}"
return s
def __repr__(self):
return self.__str__()
def __getitem__(self, key):
# IMPL 1: without _index
return map_find(self._smap,key) # raises KeyError if key not found
# IMPL 2: with _index.
#return iter_value(self._index[key])
def __setitem__(self, key, value):
"""
>>> from CMap import CMap
>>> m = CMap()
>>> m[6] = 'bar'
>>> m[6]
'bar'
>>>
"""
assert type(key) == int or type(key) == float
# IMPL 1. without _index.
map_set(self._smap,key,value)
## IMPL 2. with _index
## If using indices following allows us to perform only one search.
#i = map_insert_iter(self._smap,key,value)
#if iter_value(i) != value:
# iter_set(i,value)
#else: self._index[key] = i
## END IMPL2
def __delitem__(self, key):
"""Deletes the item with matching key from the map.
This takes O(log n + k) where n is the number of elements
in the map and k is the number of iterators pointing into the map.
Before deleting the item it linearly searches through
all iterators pointing into the map and invalidates any that
are pointing at the item about to be deleted.
>>> from CMap import CMap
>>> m = CMap()
>>> m[12] = 'foo'
>>> m[13] = 'bar'
>>> m[14] = 'boo'
>>> del m[12]
>>> try:
... m[12]
... except KeyError:
... print 'ok'
...
ok
>>> j = m.begin()
>>> int(j.next())
14
>>> i = m.begin()
>>> i.value()
'bar'
>>> del m[13] # delete object referenced by an iterator
>>> try:
... i.value()
... except RuntimeError:
... print 'ok'
ok
>>> j.value() # deletion should not invalidate other iterators.
'boo'
"""
#map_erase( self._smap, key ) # map_erase is dangerous. It could
# delete the node causing an iterator
# to become invalid. --Dave
si = map_find_iter( self._smap, key ) # si = swig'd iterator.
if map_iter_at_end(self._smap, si):
iter_delete(si)
raise KeyError(key)
for i in list(self._iterators):
if iter_cmp( self._smap, i._si, si ) == 0:
i._invalidate()
map_iter_erase( self._smap, si )
iter_delete(si)
#iter_delete( self._index[key] ) # IMPL 2. with _index.
#del self._index[key] # IMPL 2. with _index.
def erase(self, iter):
"""Remove item pointed to by the iterator. All iterators that
point at the erased item including the passed iterator
are immediately invalidated after the deletion completes.
>>> from CMap import CMap
>>> m = CMap()
>>> m[12] = 'foo'
>>> i = m.find(12)
>>> m.erase(i)
>>> len(m) == 0
True
"""
if not iter._si:
raise RuntimeError( _("invalid iterator") )
if iter._si == BEGIN:
raise IndexError(_("Iterator does not point at key-value pair" ))
if self is not iter._map:
raise IndexError(_("Iterator points into a different CMap."))
if map_iter_at_end(self._smap, iter._si):
raise IndexError( _("Cannot erase end() iterator.") )
# invalidate iterators.
for i in list(self._iterators):
if iter._si is not i._si and iiter_cmp( self._smmap, iter._si, i._si ) == 0:
i._invalidate()
# remove item from the map.
map_iter_erase( self._smap, iter._si )
# invalidate last iterator pointing to the deleted location in the map.
iter._invalidate()
def __del__(self):
# invalidate all iterators.
for i in list(self._iterators):
i._invalidate()
map_delete(self._smap)
def get(self, key, default=None):
"""@return value corresponding to specified key or return 'default'
if the key is not found.
"""
try:
return map_find(self._smap,key) # IMPL 1. without _index.
#return iter_value(self._index[key]) # IMPL 2. with _index.
except KeyError:
return default
def keys(self):
"""
>>> from CMap import *
>>> m = CMap()
>>> m[4.0] = 7
>>> m[6.0] = 3
>>> [int(x) for x in m.keys()] # m.keys() but guaranteed integers.
[4, 6]
"""
k = []
for key in self:
k.append(key)
return k
def values(self):
"""
>>> from CMap import CMap
>>> m = CMap()
>>> m[4.0] = 7
>>> m[6.0] = 3
>>> m.values()
[7, 3]
"""
i = self.itervalues()
v = []
try:
while True:
v.append(i.next())
except StopIteration:
pass
return v
def items(self):
"""
>>> from CMap import CMap
>>> m = CMap()
>>> m[4.0] = 7
>>> m[6.0] = 3
>>> [(int(x[0]),int(x[1])) for x in m.items()]
[(4, 7), (6, 3)]
"""
i = self.iteritems()
itms = []
try:
while True:
itms.append(i.next())
except StopIteration:
pass
return itms
def has_key(self, key):
"""
>>> from CMap import CMap
>>> m = CMap()
>>> m[4.0] = 7
>>> if m.has_key(4): print 'ok'
...
ok
>>> if not m.has_key(7): print 'ok'
...
ok
"""
try:
self[key]
except KeyError:
return False
return True
def clear(self):
"""delete all entries
>>> from CMap import CMap
>>> m = CMap()
>>> m[4] = 7
>>> m.clear()
>>> print len(m)
0
"""
self.__del__()
self._smap = map_constructor()
def copy(self):
"""return shallow copy"""
return CMap(self)
def lower_bound(self,key):
"""
Finds smallest key equal to or above the lower bound.
Takes O(log n) time.
@param x: Key of (key, value) pair to be located.
@return: Key Iterator pointing to first item equal to or greater
than key, or end() if no such item exists.
>>> from CMap import CMap
>>> m = CMap()
>>> m[10] = 'foo'
>>> m[15] = 'bar'
>>> i = m.lower_bound(11) # iterator.
>>> int(i.key())
15
>>> i.value()
'bar'
Edge cases:
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.lower_bound(11)
>>> if i == m.end(): print 'ok'
...
ok
>>> m[10] = 'foo'
>>> i = m.lower_bound(11)
>>> if i == m.end(): print 'ok'
...
ok
>>> i = m.lower_bound(9)
>>> if i == m.begin(): print 'ok'
...
ok
"""
return CMap.KeyIterator(self, map_lower_bound( self._smap, key ))
def upper_bound(self, key):
"""
Finds largest key equal to or below the upper bound. In keeping
with the [begin,end) convention, the returned iterator
actually points to the key one above the upper bound.
Takes O(log n) time.
@param x: Key of (key, value) pair to be located.
@return: Iterator pointing to first element equal to or greater than
key, or end() if no such item exists.
>>> from CMap import CMap
>>> m = CMap()
>>> m[10] = 'foo'
>>> m[15] = 'bar'
>>> m[17] = 'choo'
>>> i = m.upper_bound(11) # iterator.
>>> i.value()
'bar'
Edge cases:
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.upper_bound(11)
>>> if i == m.end(): print 'ok'
...
ok
>>> m[10] = 'foo'
>>> i = m.upper_bound(9)
>>> i.value()
'foo'
>>> i = m.upper_bound(11)
>>> if i == m.end(): print 'ok'
...
ok
"""
return CMap.KeyIterator(self, map_upper_bound( self._smap, key ))
def find(self,key):
"""
Finds the item with matching key and returns a KeyIterator
pointing at the item. If no match is found then returns end().
Takes O(log n) time.
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.find(10)
>>> if i == m.end(): print 'ok'
...
ok
>>> m[10] = 'foo'
>>> i = m.find(10)
>>> int(i.key())
10
>>> i.value()
'foo'
"""
return CMap.KeyIterator(self, map_find_iter( self._smap, key ))
def update_key( self, iter, key ):
"""
Modifies the key of the item referenced by iter. If the
key change is small enough that no reordering occurs then
this takes amortized O(1) time. If a reordering occurs then
this takes O(log n).
WARNING!!! The passed iterator MUST be assumed to be invalid
upon return and should be deallocated.
Typical use:
>>> from CMap import CMap
>>> m = CMap()
>>> m[10] = 'foo'
>>> m[8] = 'bar'
>>> i = m.find(10)
>>> m.update_key(i,7) # i is assumed to be invalid upon return.
>>> del i
>>> [(int(x[0]),x[1]) for x in m.items()] # reordering occurred.
[(7, 'foo'), (8, 'bar')]
>>> i = m.find(8)
>>> m.update_key(i,9) # no reordering.
>>> del i
>>> [(int(x[0]),x[1]) for x in m.items()]
[(7, 'foo'), (9, 'bar')]
Edge cases:
>>> i = m.find(7)
>>> i.value()
'foo'
>>> try: # update to key already in the map.
... m.update_key(i,9)
... except KeyError:
... print 'ok'
...
ok
>>> m[7]
'foo'
>>> i = m.iterkeys()
>>> try: # updating an iter pointing at BEGIN.
... m.update_key(i,10)
... except IndexError:
... print 'ok'
...
ok
>>> i = m.end()
>>> try: # updating an iter pointing at end().
... m.update_key(i,10)
... except IndexError:
... print 'ok'
...
ok
"""
assert isinstance(iter,CMap._AbstractIterator)
if iter._si == BEGIN:
raise IndexError( _("Iterator does not point at key-value pair") )
if self is not iter._map:
raise IndexError(_("Iterator points into a different CIndexedMap."))
if map_iter_at_end(self._smap, iter._si):
raise IndexError( _("Cannot update end() iterator.") )
map_iter_update_key(self._smap, iter._si, key)
def append(self, key, value):
"""Performs an insertion with the hint that it probably should
go at the end.
Raises KeyError if the key is already in the map.
>>> from CMap import CMap
>>> m = CMap()
>>> m.append(5.0,'foo') # append to empty map.
>>> len(m)
1
>>> [int(x) for x in m.keys()] # see note (1)
[5]
>>> m.append(10.0, 'bar') # append in-order
>>> [(int(x[0]),x[1]) for x in m.items()]
[(5, 'foo'), (10, 'bar')]
>>> m.append(3.0, 'coo') # out-of-order.
>>> [(int(x[0]),x[1]) for x in m.items()]
[(3, 'coo'), (5, 'foo'), (10, 'bar')]
>>> try:
... m.append(10.0, 'blah') # append key already in map.
... except KeyError:
... print 'ok'
...
ok
>>> [(int(x[0]),x[1]) for x in m.items()]
[(3, 'coo'), (5, 'foo'), (10, 'bar')]
>>>
note (1): int(x[0]) is used because 5.0 can appear as either 5
or 5.0 depending on the version of python.
"""
map_append(self._smap,key,value)
class CIndexedMap(CMap):
"""This is an ordered mapping, exactly like CMap except that it
provides a cross-index allowing average O(1) searches based on value.
This adds the constraint that values must be unique.
Operation: Time Applicable
Complexity: Methods:
---------------------------------------------------
Item insertion: O(log n) append, __setitem__
Item deletion: O(log n + k) __delitem__, erase
Key search: O(log n) __getitem__, get, find,
__contains__
Value search: average O(1) as per dict
Iteration step: amortized O(1), next, prev
worst-case O(log n)
Memory: O(n)
n = number of elements in map. k = number of iterators pointing
into map. CIndexedMap assumes there are few iterators in existence
at any given time.
The hash table increases the factor in the
O(n) memory cost of the Map by a constant
"""
def __init__(self, dict={} ):
CMap.__init__(self,dict)
self._value_index = {} # cross-index. maps value->iterator.
def __setitem__(self, key, value):
"""
>>> from CMap import *
>>> m = CIndexedMap()
>>> m[6] = 'bar'
>>> m[6]
'bar'
>>> int(m.get_key_by_value('bar'))
6
>>> try:
... m[7] = 'bar'
... except ValueError:
... print 'value error'
value error
>>> m[6] = 'foo'
>>> m[6]
'foo'
>>> m[7] = 'bar'
>>> m[7]
'bar'
>>> m[7] = 'bar' # should not raise exception
>>> m[7] = 'goo'
>>> m.get_key_by_value('bar') # should return None.
>>>
"""
assert type(key) == int or type(key) == float
if self._value_index.has_key(value) and \
iter_key(self._value_index[value]) != key:
raise ValueError( _("Value %s already exists. Values must be "
"unique.") % str(value) )
si = map_insert_iter(self._smap,key,value) # si points where insert
# should occur whether
# insert succeeded or not.
# si == "swig iterator"
sival = iter_value(si)
if sival != value: # if insert failed because k already exists
iter_set(si,value) # then force set.
self._value_index[value] = si
viter = self._value_index[sival]
iter_delete(viter) # remove old value from index
del self._value_index[sival]
else: # else insert succeeded so update index.
self._value_index[value] = si
#self._index[key] = si # IMPL 2. with _index.
def __delitem__(self, key):
"""
>>> from CMap import CIndexedMap
>>> m = CIndexedMap()
>>> m[6] = 'bar'
>>> m[6]
'bar'
>>> int(m.get_key_by_value('bar'))
6
>>> del m[6]
>>> if m.get_key_by_value('bar'):
... print 'found'
... else:
... print 'not found.'
not found.
"""
i = map_find_iter( self._smap, key )
if map_iter_at_end( self._smap, i ):
iter_delete(i)
raise KeyError(key)
else:
value = iter_value(i)
for i in list(self._iterators):
if iter_cmp( self._smap, i._si, iter._si ) == 0:
i._invalidate()
map_iter_erase( self._smap, i )
viter = self._value_index[value]
iter_delete(i)
iter_delete( viter )
del self._value_index[value]
#del self._index[key] # IMPL 2. with _index.
assert map_size(self._smap) == len(self._value_index)
def has_value(self, value):
return self._value_index.has_key(value)
def get_key_by_value(self, value):
"""Returns the key cross-indexed from the passed unique value, or
returns None if the value is not in the map."""
si = self._value_index.get(value) # si == "swig iterator"
if si == None: return None
return iter_key(si)
def append( self, key, value ):
"""See CMap.append
>>> from CMap import CIndexedMap
>>> m = CIndexedMap()
>>> m.append(5,'foo')
>>> [(int(x[0]),x[1]) for x in m.items()]
[(5, 'foo')]
>>> m.append(10, 'bar')
>>> [(int(x[0]),x[1]) for x in m.items()]
[(5, 'foo'), (10, 'bar')]
>>> m.append(3, 'coo') # out-of-order.
>>> [(int(x[0]),x[1]) for x in m.items()]
[(3, 'coo'), (5, 'foo'), (10, 'bar')]
>>> int(m.get_key_by_value( 'bar' ))
10
>>> try:
... m.append(10, 'blah') # append key already in map.
... except KeyError:
... print 'ok'
...
ok
>>> [(int(x[0]),x[1]) for x in m.items()]
[(3, 'coo'), (5, 'foo'), (10, 'bar')]
>>> try:
... m.append(10, 'coo') # append value already in map.
... except ValueError:
... print 'ok'
...
ok
"""
if self._value_index.has_key(value) and \
iter_key(self._value_index[value]) != key:
raise ValueError(_("Value %s already exists and value must be "
"unique.") % str(value) )
si = map_append_iter(self._smap,key,value)
if iter_value(si) != value:
iter_delete(si)
raise KeyError(key)
self._value_index[value] = si
def find_key_by_value(self, value):
"""Returns a key iterator cross-indexed from the passed unique value
or end() if no value found.
>>> from Map import *
>>> m = CIndexedMap()
>>> m[6] = 'abc'
>>> i = m.find_key_by_value('abc')
>>> int(i.key())
6
>>> i = m.find_key_by_value('xyz')
>>> if i == m.end(): print 'i points at end()'
i points at end()
"""
si = self._value_index.get(value) # si == "swig iterator."
if si != None:
si = iter_copy(si); # copy else operations like increment on the
# KeyIterator would modify the value index.
return CMap.KeyIterator(self,si)
def copy(self):
"""return shallow copy"""
return CIndexedMap(self)
def update_key( self, iter, key ):
"""
see CMap.update_key.
WARNING!! You MUST assume that the passed iterator is invalidated
upon return.
Typical use:
>>> from CMap import CIndexedMap
>>> m = CIndexedMap()
>>> m[10] = 'foo'
>>> m[8] = 'bar'
>>> i = m.find(10)
>>> m.update_key(i,7) # i is assumed to be invalid upon return.
>>> del i
>>> int(m.get_key_by_value('foo'))
7
>>> [(int(x[0]),x[1]) for x in m.items()] # reordering occurred.
[(7, 'foo'), (8, 'bar')]
>>> i = m.find(8)
>>> m.update_key(i,9) # no reordering.
>>> del i
>>> [(int(x[0]),x[1]) for x in m.items()]
[(7, 'foo'), (9, 'bar')]
Edge cases:
>>> i = m.find(7)
>>> i.value()
'foo'
>>> try:
... m.update_key(i,9)
... except KeyError:
... print 'ok'
...
ok
>>> m[7]
'foo'
>>> int(m.get_key_by_value('foo'))
7
>>> i = m.iterkeys()
>>> try: # updating an iter pointing at BEGIN.
... m.update_key(i,10)
... except IndexError:
... print 'ok'
...
ok
>>> i = m.end()
>>> try: # updating an iter pointing at end().
... m.update_key(i,10)
... except IndexError:
... print 'ok'
...
ok
"""
if not iter._si:
raise RuntimeError( _("invalid iterator") )
if iter._si == BEGIN:
raise IndexError(_("Iterator does not point at key-value pair" ))
if self is not iter._map:
raise IndexError(_("Iterator points into a different "
"CIndexedMap."))
if map_iter_at_end(self._smap, iter._si):
raise IndexError( _("Cannot update end() iterator.") )
si = map_iter_update_key_iter(self._smap, iter._si, key)
# raises KeyError if key already in map.
if si != iter._si: # if map is reordered...
value = iter.value();
val_si = self._value_index[value]
iter_delete(val_si)
self._value_index[value] = si
def erase(self, iter):
"""Remove item pointed to by the iterator. Iterator is immediately
invalidated after the deletion completes."""
if not iter._si:
raise RuntimeError( _("invalid iterator") )
if iter._si == BEGIN:
raise IndexError(_("Iterator does not point at key-value pair." ))
if self is not iter._map:
raise IndexError(_("Iterator points into a different "
"CIndexedMap."))
if map_iter_at_end(self._smap, iter._si):
raise IndexError( _("Cannot update end() iterator.") )
value = iter.value()
CMap.erase(self,iter)
del self._value_index[value]
if __name__ == "__main__":
import doctest
import random
##############################################
# UNIT TESTS
print "Testing module"
doctest.testmod(sys.modules[__name__])
print "doctest complete."
##############################################
# MEMORY LEAK TESTS
if LEAK_TEST:
i = 0
import gc
class X:
x = range(1000) # something moderately big.
# TEST 1. This does not cause memory to grow.
#m = CMap()
#map_insert(m._smap,10,X())
#while True:
# i += 1
# it = map_find_iter( m._smap, 10 )
# iter_delete(it)
# del it
# if i % 100 == 0:
# gc.collect()
# TEST 2: This does not caus a memory leak.
#m = map_constructor_double()
#while True:
# i += 1
# map_insert_double(m,10,5) # here
# it = map_find_iter_double( m, 10 )
# map_iter_erase_double( m, it ) # or here is the problem.
# iter_delete_double(it)
# del it
# #assert len(m) == 0
# assert map_size_double(m) == 0
# if i % 100 == 0:
# gc.collect()
# TEST 3. No memory leak
#m = CMap()
#while True:
# i += 1
# map_insert(m._smap,10,X()) # here
# it = map_find_iter( m._smap, 10 )
# map_iter_erase( m._smap, it ) # or here is the problem.
# iter_delete(it)
# del it
# assert len(m) == 0
# assert map_size(m._smap) == 0
# if i % 100 == 0:
# gc.collect()
# TEST 4: map creation and deletion.
#while True:
# m = map_constructor()
# map_delete(m);
# TEST 5: test iteration.
#m = map_constructor()
#for i in xrange(10):
# map_insert(m,i,X())
#while True:
# i = map_begin(m)
# while not map_iter_at_begin(m,i):
# iter_incr(i)
# iter_delete(i)
# TEST 6:
#m = map_constructor()
#for i in xrange(10):
# map_insert(m,i,X())
#while True:
# map_find( m, random.randint(0,9) )
# TEST 7:
#m = map_constructor()
#for i in xrange(50):
# map_insert( m, i, X() )
#while True:
# for i in xrange(50):
# map_set( m, i, X() )
# TEST 8
# aha! Another leak! Fixed.
#m = map_constructor()
#while True:
# i += 1
# map_insert(m,10,X())
# map_erase(m,10)
# assert map_size(m) == 0
# TEST 9
m = map_constructor()
for i in xrange(50):
map_insert( m, i, X() )
while True:
it = map_find_iter( m, 5 )
map_iter_update_key( m, it, 1000 )
iter_delete(it)
it = map_find_iter( m, 1000 )
map_iter_update_key( m, it, 5)
iter_delete(it)
| sulaweyo/torrentflux-b4rt-php7 | html/bin/clients/mainline/BTL/CMap.py | Python | gpl-2.0 | 44,894 |
#!/usr/bin/python
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2006 XenSource Ltd.
#============================================================================
import sys
import time
import re
import os
sys.path.append('/usr/lib/python')
from xen.util.xmlrpclib2 import ServerProxy
from optparse import *
from pprint import pprint
from types import DictType
from getpass import getpass
# Get default values from the environment
SERVER_URI = os.environ.get('XAPI_SERVER_URI', 'http://localhost:9363/')
SERVER_USER = os.environ.get('XAPI_SERVER_USER', '')
SERVER_PASS = os.environ.get('XAPI_SERVER_PASS', '')
MB = 1024 * 1024
HOST_INFO_FORMAT = '%-20s: %-50s'
VM_LIST_FORMAT = '%(name_label)-18s %(memory_actual)-5s %(VCPUs_number)-5s'\
' %(power_state)-10s %(uuid)-36s'
SR_LIST_FORMAT = '%(name_label)-18s %(uuid)-36s %(physical_size)-10s' \
'%(type)-10s'
VDI_LIST_FORMAT = '%(name_label)-18s %(uuid)-36s %(virtual_size)-8s'
VBD_LIST_FORMAT = '%(device)-6s %(uuid)-36s %(VDI)-8s'
TASK_LIST_FORMAT = '%(name_label)-18s %(uuid)-36s %(status)-8s %(progress)-4s'
VIF_LIST_FORMAT = '%(name)-8s %(device)-7s %(uuid)-36s %(MAC)-10s'
CONSOLE_LIST_FORMAT = '%(uuid)-36s %(protocol)-8s %(location)-32s'
COMMANDS = {
'host-info': ('', 'Get Xen Host Info'),
'host-set-name': ('', 'Set host name'),
'pif-list': ('', 'List all PIFs'),
'sr-list': ('', 'List all SRs'),
'vbd-list': ('', 'List all VBDs'),
'vbd-create': ('<domname> <pycfg> [opts]',
'Create VBD attached to domname'),
'vdi-create': ('<pycfg> [opts]', 'Create a VDI'),
'vdi-list' : ('', 'List all VDI'),
'vdi-rename': ('<vdi_uuid> <new_name>', 'Rename VDI'),
'vdi-destroy': ('<vdi_uuid>', 'Delete VDI'),
'vif-create': ('<domname> <pycfg>', 'Create VIF attached to domname'),
'vtpm-create' : ('<domname> <pycfg>', 'Create VTPM attached to domname'),
'vm-create': ('<pycfg>', 'Create VM with python config'),
'vm-destroy': ('<domname>', 'Delete VM'),
'vm-list': ('[--long]', 'List all domains.'),
'vm-name': ('<uuid>', 'Name of UUID.'),
'vm-shutdown': ('<name> [opts]', 'Shutdown VM with name'),
'vm-start': ('<name>', 'Start VM with name'),
'vm-uuid': ('<name>', 'UUID of a domain by name.'),
'async-vm-start': ('<name>', 'Start VM asynchronously'),
}
OPTIONS = {
'sr-list': [(('-l', '--long'),
{'action':'store_true',
'help':'List all properties of SR'})
],
'vdi-list': [(('-l', '--long'),
{'action':'store_true',
'help':'List all properties of VDI'})
],
'vif-list': [(('-l', '--long'),
{'action':'store_true',
'help':'List all properties of VIF'})
],
'vm-list': [(('-l', '--long'),
{'action':'store_true',
'help':'List all properties of VMs'})
],
'vm-shutdown': [(('-f', '--force'), {'help': 'Shutdown Forcefully',
'action': 'store_true'})],
'vdi-create': [(('--name-label',), {'help': 'Name for VDI'}),
(('--name-description',), {'help': 'Description for VDI'}),
(('--virtual-size',), {'type': 'int',
'default': 0,
'help': 'Size of VDI in bytes'}),
(('--type',), {'choices': ['system', 'user', 'ephemeral'],
'default': 'system',
'help': 'VDI type'}),
(('--sharable',), {'action': 'store_true',
'help': 'VDI sharable'}),
(('--read-only',), {'action': 'store_true',
'help': 'Read only'}),
(('--sr',), {})],
'vbd-create': [(('--VDI',), {'help': 'UUID of VDI to attach to.'}),
(('--mode',), {'choices': ['RO', 'RW'],
'help': 'device mount mode'}),
(('--driver',), {'choices':['paravirtualised', 'ioemu'],
'help': 'Driver for VBD'}),
(('--device',), {'help': 'Device name on guest domain'})]
}
class OptionError(Exception):
pass
class XenAPIError(Exception):
pass
#
# Extra utility functions
#
class IterableValues(Values):
"""Better interface to the list of values from optparse."""
def __iter__(self):
for opt, val in self.__dict__.items():
if opt[0] == '_' or callable(val):
continue
yield opt, val
def parse_args(cmd_name, args, set_defaults = False):
argstring, desc = COMMANDS[cmd_name]
parser = OptionParser(usage = 'xapi %s %s' % (cmd_name, argstring),
description = desc)
if cmd_name in OPTIONS:
for optargs, optkwds in OPTIONS[cmd_name]:
parser.add_option(*optargs, **optkwds)
if set_defaults:
default_values = parser.get_default_values()
defaults = IterableValues(default_values.__dict__)
else:
defaults = IterableValues()
(opts, extraargs) = parser.parse_args(args = list(args),
values = defaults)
return opts, extraargs
def execute(server, fn, args, async = False):
if async:
func = eval('server.Async.%s' % fn)
else:
func = eval('server.%s' % fn)
result = func(*args)
if type(result) != DictType:
raise TypeError("Function returned object of type: %s" %
str(type(result)))
if 'Value' not in result:
raise XenAPIError(*result['ErrorDescription'])
return result['Value']
_initialised = False
_server = None
_session = None
def connect(*args):
global _server, _session, _initialised
if not _initialised:
# try without password or default credentials
try:
_server = ServerProxy(SERVER_URI)
_session = execute(_server.session, 'login_with_password',
(SERVER_USER, SERVER_PASS))
except:
login = raw_input("Login: ")
password = getpass()
creds = (login, password)
_server = ServerProxy(SERVER_URI)
_session = execute(_server.session, 'login_with_password',
creds)
_initialised = True
return (_server, _session)
def _stringify(adict):
return dict([(k, str(v)) for k, v in adict.items()])
def _read_python_cfg(filename):
cfg = {}
execfile(filename, {}, cfg)
return cfg
def resolve_vm(server, session, vm_name):
vm_uuid = execute(server, 'VM.get_by_name_label', (session, vm_name))
if not vm_uuid:
return None
else:
return vm_uuid[0]
def resolve_vdi(server, session, vdi_name):
vdi_uuid = execute(server, 'VDI.get_by_name_label', (session, vdi_name))
if not vdi_uuid:
return None
else:
return vdi_uuid[0]
#
# Actual commands
#
def xapi_host_info(args, async = False):
server, session = connect()
hosts = execute(server, 'host.get_all', (session,))
for host in hosts: # there is only one, but ..
hostinfo = execute(server, 'host.get_record', (session, host))
print HOST_INFO_FORMAT % ('Name', hostinfo['name_label'])
print HOST_INFO_FORMAT % ('Version', hostinfo['software_version'])
print HOST_INFO_FORMAT % ('CPUs', len(hostinfo['host_CPUs']))
print HOST_INFO_FORMAT % ('VMs', len(hostinfo['resident_VMs']))
print HOST_INFO_FORMAT % ('UUID', host)
for host_cpu_uuid in hostinfo['host_CPUs']:
host_cpu = execute(server, 'host_cpu.get_record',
(session, host_cpu_uuid))
print 'CPU %s Util: %.2f' % (host_cpu['number'],
float(host_cpu['utilisation']))
def xapi_host_set_name(args, async = False):
if len(args) < 1:
raise OptionError("No hostname specified")
server, session = connect()
hosts = execute(server, 'host.get_all', (session,))
if len(hosts) > 0:
execute(server, 'host.set_name_label', (session, hosts[0], args[0]))
print 'Hostname: %s' % execute(server, 'host.get_name_label',
(session, hosts[0]))
def xapi_vm_uuid(args, async = False):
if len(args) < 1:
raise OptionError("No domain name specified")
server, session = connect()
vm_uuid = resolve_vm(server, session, args[0])
print vm_uuid
def xapi_vm_name(args, async = False):
if len(args) < 1:
raise OptionError("No UUID specified")
server, session = connect()
vm_name = execute(server, 'VM.get_name_label', (session, args[0]))
print vm_name
def xapi_vm_list(args, async = False):
opts, args = parse_args('vm-list', args, set_defaults = True)
is_long = opts and opts.long
list_only = args
server, session = connect()
vm_uuids = execute(server, 'VM.get_all', (session,))
if not is_long:
print VM_LIST_FORMAT % {'name_label':'Name',
'memory_actual':'Mem',
'VCPUs_number': 'VCPUs',
'power_state': 'State',
'uuid': 'UUID'}
for uuid in vm_uuids:
vm_info = execute(server, 'VM.get_record', (session, uuid))
# skip domain if we don't want
if list_only and vm_info['name_label'] not in list_only:
continue
if is_long:
vbds = vm_info['VBDs']
vifs = vm_info['VIFs']
vtpms = vm_info['VTPMs']
vif_infos = []
vbd_infos = []
vtpm_infos = []
for vbd in vbds:
vbd_info = execute(server, 'VBD.get_record', (session, vbd))
vbd_infos.append(vbd_info)
for vif in vifs:
vif_info = execute(server, 'VIF.get_record', (session, vif))
vif_infos.append(vif_info)
for vtpm in vtpms:
vtpm_info = execute(server, 'VTPM.get_record', (session, vtpm))
vtpm_infos.append(vtpm_info)
vm_info['VBDs'] = vbd_infos
vm_info['VIFs'] = vif_infos
vm_info['VTPMs'] = vtpm_infos
pprint(vm_info)
else:
print VM_LIST_FORMAT % _stringify(vm_info)
def xapi_vm_create(args, async = False):
if len(args) < 1:
raise OptionError("Configuration file not specified")
filename = args[0]
cfg = _read_python_cfg(filename)
print 'Creating VM from %s ..' % filename
server, session = connect()
uuid = execute(server, 'VM.create', (session, cfg), async = async)
print 'Done. (%s)' % uuid
print uuid
def xapi_vm_destroy(args, async = False):
if len(args) < 1:
raise OptionError("No domain name specified.")
server, session = connect()
vm_uuid = resolve_vm(server, session, args[0])
print 'Destroying VM %s (%s)' % (args[0], vm_uuid)
success = execute(server, 'VM.destroy', (session, vm_uuid), async = async)
print 'Done.'
def xapi_vm_start(args, async = False):
if len(args) < 1:
raise OptionError("No Domain name specified.")
server, session = connect()
vm_uuid = resolve_vm(server, session, args[0])
print 'Starting VM %s (%s)' % (args[0], vm_uuid)
success = execute(server, 'VM.start', (session, vm_uuid, False), async = async)
if async:
print 'Task started: %s' % success
else:
print 'Done.'
def xapi_vm_suspend(args, async = False):
if len(args) < 1:
raise OptionError("No Domain name specified.")
server, session = connect()
vm_uuid = resolve_vm(server, session, args[0])
print 'Suspending VM %s (%s)' % (args[0], vm_uuid)
success = execute(server, 'VM.suspend', (session, vm_uuid), async = async)
if async:
print 'Task started: %s' % success
else:
print 'Done.'
def xapi_vm_resume(args, async = False):
if len(args) < 1:
raise OptionError("No Domain name specified.")
server, session = connect()
vm_uuid = resolve_vm(server, session, args[0])
print 'Resuming VM %s (%s)' % (args[0], vm_uuid)
success = execute(server, 'VM.resume', (session, vm_uuid, False), async = async)
if async:
print 'Task started: %s' % success
else:
print 'Done.'
def xapi_vm_pause(args, async = False):
if len(args) < 1:
raise OptionError("No Domain name specified.")
server, session = connect()
vm_uuid = resolve_vm(server, session, args[0])
print 'Pausing VM %s (%s)' % (args[0], vm_uuid)
success = execute(server, 'VM.pause', (session, vm_uuid), async = async)
if async:
print 'Task started: %s' % success
else:
print 'Done.'
def xapi_vm_unpause(args, async = False):
if len(args) < 1:
raise OptionError("No Domain name specified.")
server, session = connect()
vm_uuid = resolve_vm(server, session, args[0])
print 'Pausing VM %s (%s)' % (args[0], vm_uuid)
success = execute(server, 'VM.unpause', (session, vm_uuid), async = async)
if async:
print 'Task started: %s' % success
else:
print 'Done.'
def xapi_task_list(args, async = False):
server, session = connect()
all_tasks = execute(server, 'task.get_all', (session,))
print TASK_LIST_FORMAT % {'name_label': 'Task Name',
'uuid': 'UUID',
'status': 'Status',
'progress': '%'}
for task_uuid in all_tasks:
task = execute(server, 'task.get_record', (session, task_uuid))
print TASK_LIST_FORMAT % task
def xapi_task_clear(args, async = False):
server, session = connect()
all_tasks = execute(server, 'task.get_all', (session,))
for task_uuid in all_tasks:
success = execute(server, 'task.destroy', (session, task_uuid))
print 'Destroyed Task %s' % task_uuid
def xapi_vm_shutdown(args, async = False):
opts, args = parse_args("vm-shutdown", args, set_defaults = True)
if len(args) < 1:
raise OptionError("No Domain name specified.")
server, session = connect()
vm_uuid = resolve_vm(server, session, args[0])
if opts.force:
print 'Forcefully shutting down VM %s (%s)' % (args[0], vm_uuid)
success = execute(server, 'VM.hard_shutdown', (session, vm_uuid), async = async)
else:
print 'Shutting down VM %s (%s)' % (args[0], vm_uuid)
success = execute(server, 'VM.clean_shutdown', (session, vm_uuid), async = async)
if async:
print 'Task started: %s' % success
else:
print 'Done.'
def xapi_vbd_create(args, async = False):
opts, args = parse_args('vbd-create', args)
if len(args) < 2:
raise OptionError("Configuration file and domain not specified")
domname = args[0]
if len(args) > 1:
filename = args[1]
cfg = _read_python_cfg(filename)
else:
cfg = {}
for opt, val in opts:
cfg[opt] = val
print 'Creating VBD ...',
server, session = connect()
vm_uuid = resolve_vm(server, session, domname)
cfg['VM'] = vm_uuid
vbd_uuid = execute(server, 'VBD.create', (session, cfg), async = async)
if async:
print 'Task started: %s' % vbd_uuid
else:
print 'Done. (%s)' % vbd_uuid
def xapi_vif_create(args, async = False):
if len(args) < 2:
raise OptionError("Configuration file not specified")
domname = args[0]
filename = args[1]
cfg = _read_python_cfg(filename)
print 'Creating VIF from %s ..' % filename
server, session = connect()
vm_uuid = resolve_vm(server, session, domname)
cfg['VM'] = vm_uuid
vif_uuid = execute(server, 'VIF.create', (session, cfg), async = async)
if async:
print 'Task started: %s' % vif_uuid
else:
print 'Done. (%s)' % vif_uuid
def xapi_vbd_list(args, async = False):
server, session = connect()
domname = args[0]
dom_uuid = resolve_vm(server, session, domname)
vbds = execute(server, 'VM.get_VBDs', (session, dom_uuid))
print VBD_LIST_FORMAT % {'device': 'Device',
'uuid' : 'UUID',
'VDI': 'VDI'}
for vbd in vbds:
vbd_struct = execute(server, 'VBD.get_record', (session, vbd))
print VBD_LIST_FORMAT % vbd_struct
def xapi_vbd_stats(args, async = False):
server, session = connect()
domname = args[0]
dom_uuid = resolve_vm(server, session, domname)
vbds = execute(server, 'VM.get_VBDs', (session, dom_uuid))
for vbd_uuid in vbds:
print execute(server, 'VBD.get_io_read_kbs', (session, vbd_uuid))
def xapi_vif_list(args, async = False):
server, session = connect()
opts, args = parse_args('vdi-list', args, set_defaults = True)
is_long = opts and opts.long
domname = args[0]
dom_uuid = resolve_vm(server, session, domname)
vifs = execute(server, 'VM.get_VIFs', (session, dom_uuid))
if not is_long:
print VIF_LIST_FORMAT % {'name': 'Name',
'device': 'Device',
'uuid' : 'UUID',
'MAC': 'MAC'}
for vif in vifs:
vif_struct = execute(server, 'VIF.get_record', (session, vif))
print VIF_LIST_FORMAT % vif_struct
else:
for vif in vifs:
vif_struct = execute(server, 'VIF.get_record', (session, vif))
pprint(vif_struct)
def xapi_console_list(args, async = False):
server, session = connect()
opts, args = parse_args('vdi-list', args, set_defaults = True)
is_long = opts and opts.long
domname = args[0]
dom_uuid = resolve_vm(server, session, domname)
consoles = execute(server, 'VM.get_consoles', (session, dom_uuid))
if not is_long:
print CONSOLE_LIST_FORMAT % {'protocol': 'Protocol',
'location': 'Location',
'uuid': 'UUID'}
for console in consoles:
console_struct = execute(server, 'console.get_record',
(session, console))
print CONSOLE_LIST_FORMAT % console_struct
else:
for console in consoles:
console_struct = execute(server, 'console.get_record',
(session, console))
pprint(console_struct)
def xapi_vdi_list(args, async = False):
opts, args = parse_args('vdi-list', args, set_defaults = True)
is_long = opts and opts.long
server, session = connect()
vdis = execute(server, 'VDI.get_all', (session,))
if not is_long:
print VDI_LIST_FORMAT % {'name_label': 'VDI Label',
'uuid' : 'UUID',
'virtual_size': 'Bytes'}
for vdi in vdis:
vdi_struct = execute(server, 'VDI.get_record', (session, vdi))
print VDI_LIST_FORMAT % vdi_struct
else:
for vdi in vdis:
vdi_struct = execute(server, 'VDI.get_record', (session, vdi))
pprint(vdi_struct)
def xapi_sr_list(args, async = False):
opts, args = parse_args('sr-list', args, set_defaults = True)
is_long = opts and opts.long
server, session = connect()
srs = execute(server, 'SR.get_all', (session,))
if not is_long:
print SR_LIST_FORMAT % {'name_label': 'SR Label',
'uuid' : 'UUID',
'physical_size': 'Size (MB)',
'type': 'Type'}
for sr in srs:
sr_struct = execute(server, 'SR.get_record', (session, sr))
sr_struct['physical_size'] = int(sr_struct['physical_size'])/MB
print SR_LIST_FORMAT % sr_struct
else:
for sr in srs:
sr_struct = execute(server, 'SR.get_record', (session, sr))
pprint(sr_struct)
def xapi_sr_rename(args, async = False):
server, session = connect()
sr = execute(server, 'SR.get_by_name_label', (session, args[0]))
execute(server, 'SR.set_name_label', (session, sr[0], args[1]))
def xapi_vdi_create(args, async = False):
opts, args = parse_args('vdi-create', args)
if len(args) > 0:
cfg = _read_python_cfg(args[0])
else:
cfg = {}
for opt, val in opts:
cfg[opt] = val
server, session = connect()
srs = []
if cfg.get('SR'):
srs = execute(server, 'SR.get_by_name_label', (session, cfg['SR']))
else:
srs = execute(server, 'SR.get_all', (session,))
sr = srs[0]
cfg['SR'] = sr
size = cfg['virtual_size']/MB
print 'Creating VDI of size: %dMB ..' % size,
uuid = execute(server, 'VDI.create', (session, cfg), async = async)
if async:
print 'Task started: %s' % uuid
else:
print 'Done. (%s)' % uuid
def xapi_vdi_destroy(args, async = False):
server, session = connect()
if len(args) < 1:
raise OptionError('Not enough arguments')
vdi_uuid = args[0]
print 'Deleting VDI %s' % vdi_uuid
result = execute(server, 'VDI.destroy', (session, vdi_uuid), async = async)
if async:
print 'Task started: %s' % result
else:
print 'Done.'
def xapi_vdi_rename(args, async = False):
server, session = connect()
if len(args) < 2:
raise OptionError('Not enough arguments')
vdi_uuid = execute(server, 'VDI.get_by_name_label', session, args[0])
vdi_name = args[1]
print 'Renaming VDI %s to %s' % (vdi_uuid[0], vdi_name)
result = execute(server, 'VDI.set_name_label',
(session, vdi_uuid[0], vdi_name), async = async)
if async:
print 'Task started: %s' % result
else:
print 'Done.'
def xapi_vtpm_create(args, async = False):
server, session = connect()
domname = args[0]
cfg = _read_python_cfg(args[1])
vm_uuid = resolve_vm(server, session, domname)
cfg['VM'] = vm_uuid
print "Creating vTPM with cfg = %s" % cfg
vtpm_uuid = execute(server, 'VTPM.create', (session, cfg))
print "Done. (%s)" % vtpm_uuid
def xapi_pif_list(args, async = False):
server, session = connect()
pif_uuids = execute(server, 'PIF.get_all', (session,))
for pif_uuid in pif_uuids:
pif = execute(server, 'PIF.get_record', (session, pif_uuid))
print pif
def xapi_debug_wait(args, async = False):
secs = 10
if len(args) > 0:
secs = int(args[0])
server, session = connect()
task_uuid = execute(server, 'debug.wait', (session, secs), async=async)
print 'Task UUID: %s' % task_uuid
def xapi_vm_stat(args, async = False):
domname = args[0]
server, session = connect()
vm_uuid = resolve_vm(server, session, domname)
vif_uuids = execute(server, 'VM.get_VIFs', (session, vm_uuid))
vbd_uuids = execute(server, 'VM.get_VBDs', (session, vm_uuid))
vcpus_utils = execute(server, 'VM.get_VCPUs_utilisation',
(session, vm_uuid))
for vcpu_num in sorted(vcpus_utils.keys()):
print 'CPU %s : %5.2f%%' % (vcpu_num, vcpus_utils[vcpu_num] * 100)
for vif_uuid in vif_uuids:
vif = execute(server, 'VIF.get_record', (session, vif_uuid))
print '%(device)s: rx: %(io_read_kbs)10.2f tx: %(io_write_kbs)10.2f' \
% vif
for vbd_uuid in vbd_uuids:
vbd = execute(server, 'VBD.get_record', (session, vbd_uuid))
print '%(device)s: rd: %(io_read_kbs)10.2f wr: %(io_write_kbs)10.2f' \
% vbd
#
# Command Line Utils
#
import cmd
import shlex
class XenAPICmd(cmd.Cmd):
def __init__(self, server, session):
cmd.Cmd.__init__(self)
self.server = server
self.session = session
self.prompt = ">>> "
def default(self, line):
words = shlex.split(line)
if len(words) > 0:
cmd_name = words[0].replace('-', '_')
is_async = 'async' in cmd_name
if is_async:
cmd_name = re.sub('async_', '', cmd_name)
func_name = 'xapi_%s' % cmd_name
func = globals().get(func_name)
if func:
try:
args = tuple(words[1:])
func(args, async = is_async)
return True
except SystemExit:
return False
except OptionError, e:
print 'Error:', str(e)
return False
except Exception, e:
import traceback
traceback.print_exc()
return False
print '*** Unknown command: %s' % words[0]
return False
def do_EOF(self, line):
print
sys.exit(0)
def do_help(self, line):
usage(print_usage = False)
def emptyline(self):
pass
def postcmd(self, stop, line):
return False
def precmd(self, line):
words = shlex.split(line)
if len(words) > 0:
words0 = words[0].replace('-', '_')
return ' '.join([words0] + words[1:])
else:
return line
def shell():
server, session = connect()
x = XenAPICmd(server, session)
x.cmdloop('Xen API Prompt. Type "help" for a list of functions')
def usage(command = None, print_usage = True):
if not command:
if print_usage:
print 'Usage: xapi <subcommand> [options] [args]'
print
print 'Subcommands:'
print
for func in sorted(globals().keys()):
if func.startswith('xapi_'):
command = func[5:].replace('_', '-')
args, description = COMMANDS.get(command, ('', ''))
print '%-16s %-40s' % (command, description)
print
else:
parse_args(command, ['-h'])
def main(args):
# poor man's optparse that doesn't abort on unrecognised opts
options = {}
remaining = []
arg_n = 0
while args:
arg = args.pop(0)
if arg in ('--help', '-h'):
options['help'] = True
elif arg in ('--server', '-s') and args:
options['server'] = args.pop(0)
elif arg in ('--user', '-u') and args:
options['user'] = args.pop(0)
elif arg in ('--password', '-p') and args:
options['password'] = args.pop(0)
else:
remaining.append(arg)
# abort here if these conditions are true
if options.get('help') and not remaining:
usage()
sys.exit(1)
if options.get('help') and remaining:
usage(remaining[0])
sys.exit(1)
if not remaining:
usage()
sys.exit(1)
if options.get('server'):
# it is ugly to use a global, but it is simple
global SERVER_URI
SERVER_URI = options['server']
if options.get('user'):
global SERVER_USER
SERVER_USER = options['user']
if options.get('password'):
global SERVER_PASS
SERVER_PASS = options['password']
subcmd = remaining[0].replace('-', '_')
is_async = 'async' in subcmd
if is_async:
subcmd = re.sub('async_', '', subcmd)
subcmd_func_name = 'xapi_' + subcmd
subcmd_func = globals().get(subcmd_func_name, None)
if subcmd == 'shell':
shell()
elif not subcmd_func or not callable(subcmd_func):
print 'Error: Unable to find subcommand \'%s\'' % subcmd
usage()
sys.exit(1)
try:
subcmd_func(remaining[1:], async = is_async)
except XenAPIError, e:
print 'Error: %s' % str(e.args[0])
sys.exit(2)
except OptionError, e:
print 'Error: %s' % e
sys.exit(0)
if __name__ == "__main__":
import sys
main(sys.argv[1:])
| mikesun/xen-cow-checkpointing | tools/python/scripts/xapi.py | Python | gpl-2.0 | 29,258 |
# -*- coding: utf-8 -*-
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**metadata module.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '[email protected]'
__revision__ = '$Format:%H$'
__date__ = '10/12/15'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import json
from types import NoneType
from safe.common.exceptions import MetadataCastError
from safe.metadata.property import BaseProperty
class ListProperty(BaseProperty):
"""A property that accepts list input."""
# if you edit this you need to adapt accordingly xml_value and is_valid
_allowed_python_types = [list, NoneType]
def __init__(self, name, value, xml_path):
super(ListProperty, self).__init__(
name, value, xml_path, self._allowed_python_types)
@classmethod
def is_valid(cls, value):
return True
def cast_from_str(self, value):
try:
return json.loads(value)
except ValueError as e:
raise MetadataCastError(e)
@property
def xml_value(self):
if self.python_type is list:
return json.dumps(self.value)
elif self.python_type is NoneType:
return ''
else:
raise RuntimeError('self._allowed_python_types and self.xml_value'
'are out of sync. This should never happen')
| Gustry/inasafe | safe/metadata/property/list_property.py | Python | gpl-3.0 | 1,695 |
from django.conf.urls.defaults import *
"""
Also used in cms.tests.ApphooksTestCase
"""
urlpatterns = patterns('cms.test_utils.project.sampleapp.views',
url(r'^$', 'sample_view', {'message': 'sample root page',}, name='sample-root'),
url(r'^settings/$', 'sample_view', kwargs={'message': 'sample settings page'}, name='sample-settings'),
url(r'^account/$', 'sample_view', {'message': 'sample account page'}, name='sample-account'),
url(r'^account/my_profile/$', 'sample_view', {'message': 'sample my profile page'}, name='sample-profile'),
url(r'^(?P<id>[0-9]+)/$', 'category_view', name='category_view'),
url(r'^notfound/$', 'notfound', name='notfound'),
url(r'^extra_1/$', 'extra_view', {'message': 'test urlconf'}, name='extra_first'),
url(r'^', include('cms.test_utils.project.sampleapp.urls_extra')),
)
| hzlf/openbroadcast | website/cms/test_utils/project/sampleapp/urls.py | Python | gpl-3.0 | 842 |
Subsets and Splits