repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
jstacoder/Flask-Velox | refs/heads/master | flask_velox/mixins/sqla/__init__.py | 12133432 | |
sarvex/django | refs/heads/master | django/db/backends/postgresql_psycopg2/__init__.py | 12133432 | |
edilio/citiesprj | refs/heads/master | citiesprj/apps/cities/migrations/__init__.py | 12133432 | |
sublime1809/django | refs/heads/master | tests/null_fk/__init__.py | 12133432 | |
DONIKAN/django | refs/heads/master | tests/introspection/__init__.py | 12133432 | |
tejesh95/Zubio.in | refs/heads/master | zubio/allauth/socialaccount/providers/windowslive/provider.py | 10 | from __future__ import unicode_literals
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class WindowsLiveAccount(ProviderAccount):
# def get_avatar_url(self):
# return self.account.extra_data.get('picture')
def to_str(self):
name = '{0} {1}'.format(self.account.extra_data.get('first_name', ''),
self.account.extra_data.get('last_name', ''))
if name.strip() != '':
return name
return super(WinLiveAccount, self).to_str()
class WindowsLiveProvider(OAuth2Provider):
id = str('windowslive')
name = 'Live'
package = 'allauth.socialaccount.providers.windowslive'
account_class = WindowsLiveAccount
# doc on scopes available
#http://msdn.microsoft.com/en-us/library/dn631845.aspx
def get_default_scope(self):
return ['wl.basic', 'wl.emails']
def extract_uid(self, data):
return str(data['id'])
def extract_common_fields(self, data):
try:
email = data.get('emails').get('preferred')
except:
email = None
return dict(email=email,
last_name=data.get('last_name'),
first_name=data.get('first_name'))
providers.registry.register(WindowsLiveProvider)
|
rebost/django | refs/heads/master | tests/regressiontests/backends/tests.py | 4 | # -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import absolute_import, unicode_literals
import datetime
import threading
from django.conf import settings
from django.core.management.color import no_style
from django.core.exceptions import ImproperlyConfigured
from django.db import (backend, connection, connections, DEFAULT_DB_ALIAS,
IntegrityError, transaction)
from django.db.backends.signals import connection_created
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.utils import ConnectionHandler, DatabaseError, load_backend
from django.test import (TestCase, skipUnlessDBFeature, skipIfDBFeature,
TransactionTestCase)
from django.test.utils import override_settings
from django.utils import unittest
from . import models
class OracleChecks(unittest.TestCase):
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
convert_unicode = backend.convert_unicode
cursor = connection.cursor()
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!'),])
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
cursor = connection.cursor()
var = cursor.var(backend.Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
c = connection.cursor()
c.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join([unicode(x) for x in xrange(4000)])
c.execute('INSERT INTO ltext VALUES (%s)',[long_str])
c.execute('SELECT text FROM ltext')
row = c.fetchone()
self.assertEqual(long_str, row[0].read())
c.execute('DROP TABLE ltext')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle connection semantics")
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.cursor() # Ensure the connection is initialized.
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle connection semantics")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
c = connection.cursor()
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
c.execute(query)
self.assertEqual(c.fetchone()[0], 1)
class MySQLTests(TestCase):
@unittest.skipUnless(connection.vendor == 'mysql',
"Test valid only for MySQL")
def test_autoincrement(self):
"""
Check that auto_increment fields are reset correctly by sql_flush().
Before MySQL version 5.0.13 TRUNCATE did not do auto_increment reset.
Refs #16961.
"""
statements = connection.ops.sql_flush(no_style(),
tables=['test'],
sequences=[{
'table': 'test',
'col': 'somecol',
}])
found_reset = False
for sql in statements:
found_reset = found_reset or 'ALTER TABLE' in sql
if connection.mysql_version < (5,0,13):
self.assertTrue(found_reset)
else:
self.assertFalse(found_reset)
@unittest.skipUnless(connection.vendor == 'mysql',
"Test valid only for MySQL")
def test_server_version_connections(self):
connection.close()
connection.mysql_version
self.assertTrue(connection.connection is None)
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.datetime(2010, 1, 1, 0, 0)])
def test_django_extract(self):
"""
Test the custom ``django_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
class LastExecutedQueryTest(TestCase):
@override_settings(DEBUG=True)
def test_debug_sql(self):
list(models.Tag.objects.filter(name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertTrue(sql.startswith("select"))
self.assertIn(models.Tag._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
tags = models.Tag.objects.extra(select={'föö':1})
sql, params = tags.query.sql_with_params()
cursor = tags.query.get_compiler('default').execute_sql(None)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertTrue(isinstance(last_sql, unicode))
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1,2,3),])
self.assertRaises(Exception, cursor.executemany, query, [(1,),])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertTrue(obj.pk > 10)
class PostgresVersionTest(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 8.3 beta4", 80300)
self.assert_parses("PostgreSQL 8.3", 80300)
self.assert_parses("EnterpriseDB 8.3", 80300)
self.assert_parses("PostgreSQL 8.3.6", 80306)
self.assert_parses("PostgreSQL 8.4beta1", 80400)
self.assert_parses("PostgreSQL 8.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 80301)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 8.3"]
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 80300)
class PostgresNewConnectionTest(TestCase):
"""
#17062: PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back.
"""
@unittest.skipUnless(
connection.vendor == 'postgresql' and connection.isolation_level > 0,
"This test applies only to PostgreSQL without autocommit")
def test_connect_and_rollback(self):
new_connections = ConnectionHandler(settings.DATABASES)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
new_connection.settings_dict['TIME_ZONE'] = new_tz
new_connection.enter_transaction_management()
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
try:
new_connection.close()
except DatabaseError:
pass
# Unfortunately with sqlite3 the in-memory test database cannot be
# closed, and so it cannot be re-opened during testing, and so we
# sadly disable this test for now.
class ConnectionCreatedSignalTest(TestCase):
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
cursor = connection.cursor()
self.assertTrue(data["connection"].connection is connection.connection)
connection_created.disconnect(receiver)
data.clear()
cursor = connection.cursor()
self.assertTrue(data == {})
class EscapingChecks(TestCase):
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is a sqlite-specific issue")
def test_parameter_escaping(self):
#13648: '%s' escaping support for sqlite3
cursor = connection.cursor()
response = cursor.execute(
"select strftime('%%s', date('now'))").fetchall()[0][0]
self.assertNotEqual(response, None)
# response should be an non-zero integer
self.assertTrue(int(response))
class BackendTestCase(TestCase):
def create_squares_with_executemany(self, args):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
cursor.executemany(query, args)
def test_cursor_executemany(self):
#4896: Test cursor.executemany
args = [(i, i**2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i**2)
def test_cursor_executemany_with_empty_params_list(self):
#4765: executemany with params=[] does nothing
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
#10320: executemany accepts iterators
args = iter((i, i**2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i**2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
#6254: fetchone, fetchmany, fetchall return strings as unicode objects
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a.save()
except IntegrityError:
return
self.skipTest("This backend does not support integrity checks.")
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
a.save()
except IntegrityError:
return
self.skipTest("This backend does not support integrity checks.")
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
with transaction.commit_manually():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
finally:
transaction.rollback()
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
with transaction.commit_manually():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
finally:
transaction.rollback()
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.commit_manually():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
finally:
transaction.rollback()
class ThreadTests(TestCase):
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
connections_set = set()
connection.cursor()
connections_set.add(connection.connection)
def runner():
from django.db import connection
connection.cursor()
connections_set.add(connection.connection)
for x in xrange(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_set), 3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_set:
if conn != connection.connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
connections_set = set()
for conn in connections.all():
connections_set.add(conn)
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_set.add(conn)
for x in xrange(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_set), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_set:
if conn != connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except DatabaseError as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertTrue(isinstance(exceptions[0], DatabaseError))
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertTrue(isinstance(exceptions[0], DatabaseError))
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(len(exceptions), 0)
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class BackendLoadingTests(TestCase):
def test_old_style_backends_raise_useful_exception(self):
self.assertRaisesRegexp(ImproperlyConfigured,
"Try using django.db.backends.sqlite3 instead",
load_backend, 'sqlite3')
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for automatic primary key.
"""
@skipIfDBFeature('allows_primary_key_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
|
jeffrey4l/nova | refs/heads/master | nova/tests/functional/v3/test_server_metadata.py | 27 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.v3 import test_servers
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class ServersMetadataJsonTest(test_servers.ServersSampleBase):
extends_name = 'core_only'
sample_dir = 'server-metadata'
extra_extensions_to_load = ["os-access-ips"]
_api_version = 'v2'
def _create_and_set(self, subs):
uuid = self._post_server()
response = self._do_put('/servers/%s/metadata' % uuid,
'server-metadata-all-req',
subs)
self._verify_response('server-metadata-all-resp', subs, response, 200)
return uuid
def generalize_subs(self, subs, vanilla_regexes):
subs['value'] = '(Foo|Bar) Value'
return subs
def test_metadata_put_all(self):
# Test setting all metadata for a server.
subs = {'value': 'Foo Value'}
self._create_and_set(subs)
def test_metadata_post_all(self):
# Test updating all metadata for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
subs['value'] = 'Bar Value'
response = self._do_post('servers/%s/metadata' % uuid,
'server-metadata-all-req',
subs)
self._verify_response('server-metadata-all-resp', subs, response, 200)
def test_metadata_get_all(self):
# Test getting all metadata for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_get('servers/%s/metadata' % uuid)
self._verify_response('server-metadata-all-resp', subs, response, 200)
def test_metadata_put(self):
# Test putting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
subs['value'] = 'Bar Value'
response = self._do_put('servers/%s/metadata/foo' % uuid,
'server-metadata-req',
subs)
self._verify_response('server-metadata-resp', subs, response, 200)
def test_metadata_get(self):
# Test getting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_get('servers/%s/metadata/foo' % uuid)
self._verify_response('server-metadata-resp', subs, response, 200)
def test_metadata_delete(self):
# Test deleting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_delete('servers/%s/metadata/foo' % uuid)
self.assertEqual(response.status_code, 204)
self.assertEqual(response.content, '')
|
jkshaver/virtualenv-1.8.2 | refs/heads/master | env/lib/python2.7/site-packages/django/contrib/localflavor/it/it_province.py | 406 | # -*- coding: utf-8 -*
PROVINCE_CHOICES = (
('AG', 'Agrigento'),
('AL', 'Alessandria'),
('AN', 'Ancona'),
('AO', 'Aosta'),
('AR', 'Arezzo'),
('AP', 'Ascoli Piceno'),
('AT', 'Asti'),
('AV', 'Avellino'),
('BA', 'Bari'),
('BT', 'Barletta-Andria-Trani'), # active starting from 2009
('BL', 'Belluno'),
('BN', 'Benevento'),
('BG', 'Bergamo'),
('BI', 'Biella'),
('BO', 'Bologna'),
('BZ', 'Bolzano/Bozen'),
('BS', 'Brescia'),
('BR', 'Brindisi'),
('CA', 'Cagliari'),
('CL', 'Caltanissetta'),
('CB', 'Campobasso'),
('CI', 'Carbonia-Iglesias'),
('CE', 'Caserta'),
('CT', 'Catania'),
('CZ', 'Catanzaro'),
('CH', 'Chieti'),
('CO', 'Como'),
('CS', 'Cosenza'),
('CR', 'Cremona'),
('KR', 'Crotone'),
('CN', 'Cuneo'),
('EN', 'Enna'),
('FM', 'Fermo'), # active starting from 2009
('FE', 'Ferrara'),
('FI', 'Firenze'),
('FG', 'Foggia'),
('FC', 'Forlì-Cesena'),
('FR', 'Frosinone'),
('GE', 'Genova'),
('GO', 'Gorizia'),
('GR', 'Grosseto'),
('IM', 'Imperia'),
('IS', 'Isernia'),
('SP', 'La Spezia'),
('AQ', u'L’Aquila'),
('LT', 'Latina'),
('LE', 'Lecce'),
('LC', 'Lecco'),
('LI', 'Livorno'),
('LO', 'Lodi'),
('LU', 'Lucca'),
('MC', 'Macerata'),
('MN', 'Mantova'),
('MS', 'Massa-Carrara'),
('MT', 'Matera'),
('VS', 'Medio Campidano'),
('ME', 'Messina'),
('MI', 'Milano'),
('MO', 'Modena'),
('MB', 'Monza e Brianza'), # active starting from 2009
('NA', 'Napoli'),
('NO', 'Novara'),
('NU', 'Nuoro'),
('OG', 'Ogliastra'),
('OT', 'Olbia-Tempio'),
('OR', 'Oristano'),
('PD', 'Padova'),
('PA', 'Palermo'),
('PR', 'Parma'),
('PV', 'Pavia'),
('PG', 'Perugia'),
('PU', 'Pesaro e Urbino'),
('PE', 'Pescara'),
('PC', 'Piacenza'),
('PI', 'Pisa'),
('PT', 'Pistoia'),
('PN', 'Pordenone'),
('PZ', 'Potenza'),
('PO', 'Prato'),
('RG', 'Ragusa'),
('RA', 'Ravenna'),
('RC', 'Reggio Calabria'),
('RE', 'Reggio Emilia'),
('RI', 'Rieti'),
('RN', 'Rimini'),
('RM', 'Roma'),
('RO', 'Rovigo'),
('SA', 'Salerno'),
('SS', 'Sassari'),
('SV', 'Savona'),
('SI', 'Siena'),
('SR', 'Siracusa'),
('SO', 'Sondrio'),
('TA', 'Taranto'),
('TE', 'Teramo'),
('TR', 'Terni'),
('TO', 'Torino'),
('TP', 'Trapani'),
('TN', 'Trento'),
('TV', 'Treviso'),
('TS', 'Trieste'),
('UD', 'Udine'),
('VA', 'Varese'),
('VE', 'Venezia'),
('VB', 'Verbano Cusio Ossola'),
('VC', 'Vercelli'),
('VR', 'Verona'),
('VV', 'Vibo Valentia'),
('VI', 'Vicenza'),
('VT', 'Viterbo'),
)
|
Thraxis/pymedusa | refs/heads/master | lib/feedparser/urls.py | 43 | from __future__ import absolute_import, unicode_literals
import re
try:
import urllib.parse as urlparse
except ImportError:
import urlparse as urlparse
from .html import _BaseHTMLProcessor
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
try:
uri = urlparse.urljoin(base, uri)
except ValueError:
uri = ''
return uri
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urlparse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = ''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urlparse.urlunsplit(parts)
else:
return url
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or '')
if not base:
return rel or ''
if not rel:
try:
scheme = urlparse.urlparse(base)[0]
except ValueError:
return ''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return ''
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return ''
return uri
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('audio', 'src'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src'),
('source', 'src'),
('video', 'poster'),
('video', 'src')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
# if not _SGML_AVAILABLE:
# return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
|
MotherNature/IndeedScraper | refs/heads/master | indeed/settings.py | 2 | # Scrapy settings for indeed project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/topics/settings.html
#
from numpy.random import randint
BOT_NAME = 'indeed'
#BOT_VERSION = '1.0'
SPIDER_MODULES = ['indeed.spiders']
NEWSPIDER_MODULE = 'indeed.spiders'
DEFAULT_ITEM_CLASS = 'indeed.items.indeedItem'
USER_AGENTS = ['Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.0 Safari/537.36',' Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36','Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.116 Safari/537.36 Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10','Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36','Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36','Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.16 Safari/537.36','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1623.0 Safari/537.36','Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.17 Safari/537.36','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.62 Safari/537.36','Mozilla/5.0 (X11; CrOS i686 4319.74.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36']
index = randint(len(USER_AGENTS))
USER_AGENT = USER_AGENTS[index]
print '#user agent: ', USER_AGENT
SCHEDULER_ORDER = 'DFO'
FEED_URI = 'items.json'
FEED_FORMAT = 'json'
ITEM_PIPELINES = ['indeed.pipelines.indeedPipeline']
|
nurmd2/nurmd | refs/heads/master | addons/website_slides/models/__init__.py | 77 | # -*- coding: utf-8 -*-
import res_config
import slides
|
SerCeMan/intellij-community | refs/heads/master | python/testData/joinLines/StatementColon.py | 87 | if True:
return "No special handling"
|
octopus-platform/joern | refs/heads/dev | projects/octopus/python/octopus-mlutils/tests/orientdb_server_command.py | 5 | import unittest
from octopus.server.server_command import ServerCommand
class TestServerCommand(unittest.TestCase):
def testUnreachableServer(self):
self.hostname = 'localhost'
self.port = '1337'
cmd = ServerCommand(self.hostname, self.port)
self.assertRaises(ConnectionRefusedError, cmd.execute_get_command, "foo")
|
qedi-r/home-assistant | refs/heads/dev | homeassistant/components/crimereports/__init__.py | 29 | """The crimereports component."""
|
siosio/intellij-community | refs/heads/master | python/testData/inspections/PyUnboundLocalVariableInspection/UnboundUnreachable.py | 83 | def test1(c, xs):
if c:
y = 1
print(<warning descr="Local variable 'y' might be referenced before assignment">y</warning>)
for x in xs:
continue
z = 1
|
m8ttyB/oneanddone | refs/heads/master | oneanddone/users/forms.py | 4 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django import forms
from django.utils.translation import ugettext as _
from oneanddone.base.widgets import MyURLField
from oneanddone.users.models import UserProfile
class SignUpForm(forms.ModelForm):
pp_checkbox = forms.BooleanField(
label=_("You are creating a profile that will include a public username"
" and work history."
" You will begin to receive email communications from Mozilla"
" once you have completed tasks."
" You may configure email preferences when editing your profile."),
required=True,
initial=True)
username = forms.RegexField(
label=_("Username:"),
max_length=30, regex=r'^[a-zA-Z0-9]+$',
error_messages={'invalid': _("This value may contain only alphanumeric characters.")})
personal_url = MyURLField(label=_('Personal URL:'))
bugzilla_email = forms.EmailField(label=_('Bugzilla email address:'), required=False)
class Meta:
model = UserProfile
fields = ('name', 'username', 'pp_checkbox', 'personal_url', 'bugzilla_email')
def save(self, *args, **kwargs):
# We will only reach the save() method if the pp_checkbox was checked
self.instance.privacy_policy_accepted = True
return super(SignUpForm, self).save(*args, **kwargs)
class UserProfileForm(forms.ModelForm):
username = forms.RegexField(
label=_("Username:"),
max_length=30, regex=r'^[a-zA-Z0-9]+$',
error_messages={'invalid': _("This value may contain only alphanumeric characters.")})
consent_to_email = forms.BooleanField(required=False)
personal_url = MyURLField(label=_('Personal URL:'))
bugzilla_email = forms.EmailField(label=_('Bugzilla email address:'), required=False)
class Meta:
model = UserProfile
fields = ('name', 'username', 'consent_to_email', 'personal_url', 'bugzilla_email')
|
omazapa/root-old | refs/heads/master | interpreter/llvm/src/tools/clang/utils/analyzer/SATestAdd.py | 48 | #!/usr/bin/env python
"""
Static Analyzer qualification infrastructure: adding a new project to
the Repository Directory.
Add a new project for testing: build it and add to the Project Map file.
Assumes it's being run from the Repository Directory.
The project directory should be added inside the Repository Directory and
have the same name as the project ID
The project should use the following files for set up:
- pre_run_static_analyzer.sh - prepare the build environment.
Ex: make clean can be a part of it.
- run_static_analyzer.cmd - a list of commands to run through scan-build.
Each command should be on a separate line.
Choose from: configure, make, xcodebuild
"""
import SATestBuild
import os
import csv
import sys
def isExistingProject(PMapFile, projectID) :
PMapReader = csv.reader(PMapFile)
for I in PMapReader:
if projectID == I[0]:
return True
return False
# Add a new project for testing: build it and add to the Project Map file.
# Params:
# Dir is the directory where the sources are.
# ID is a short string used to identify a project.
def addNewProject(ID, BuildMode) :
CurDir = os.path.abspath(os.curdir)
Dir = SATestBuild.getProjectDir(ID)
if not os.path.exists(Dir):
print "Error: Project directory is missing: %s" % Dir
sys.exit(-1)
# Build the project.
SATestBuild.testProject(ID, BuildMode, IsReferenceBuild=True, Dir=Dir)
# Add the project ID to the project map.
ProjectMapPath = os.path.join(CurDir, SATestBuild.ProjectMapFile)
if os.path.exists(ProjectMapPath):
PMapFile = open(ProjectMapPath, "r+b")
else:
print "Warning: Creating the Project Map file!!"
PMapFile = open(ProjectMapPath, "w+b")
try:
if (isExistingProject(PMapFile, ID)) :
print >> sys.stdout, 'Warning: Project with ID \'', ID, \
'\' already exists.'
print >> sys.stdout, "Reference output has been regenerated."
else:
PMapWriter = csv.writer(PMapFile)
PMapWriter.writerow( (ID, int(BuildMode)) );
print "The project map is updated: ", ProjectMapPath
finally:
PMapFile.close()
# TODO: Add an option not to build.
# TODO: Set the path to the Repository directory.
if __name__ == '__main__':
if len(sys.argv) < 2:
print >> sys.stderr, 'Usage: ', sys.argv[0],\
'project_ID <mode>' \
'mode - 0 for single file project; ' \
'1 for scan_build; ' \
'2 for single file c++11 project'
sys.exit(-1)
BuildMode = 1
if (len(sys.argv) >= 3):
BuildMode = int(sys.argv[2])
assert((BuildMode == 0) | (BuildMode == 1) | (BuildMode == 2))
addNewProject(sys.argv[1], BuildMode)
|
appliedx/edx-platform | refs/heads/master | common/test/acceptance/tests/studio/test_studio_outline.py | 45 | """
Acceptance tests for studio related to the outline page.
"""
import json
from datetime import datetime, timedelta
import itertools
from pytz import UTC
from bok_choy.promise import EmptyPromise
from nose.plugins.attrib import attr
from ...pages.studio.settings_advanced import AdvancedSettingsPage
from ...pages.studio.overview import CourseOutlinePage, ContainerPage, ExpandCollapseLinkState
from ...pages.studio.utils import add_discussion, drag, verify_ordering
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.lms.staff_view import StaffPage
from ...fixtures.course import XBlockFixtureDesc
from base_studio_test import StudioCourseTest
from ..helpers import load_data_str
from ...pages.lms.progress import ProgressPage
SECTION_NAME = 'Test Section'
SUBSECTION_NAME = 'Test Subsection'
UNIT_NAME = 'Test Unit'
class CourseOutlineTest(StudioCourseTest):
"""
Base class for all course outline tests
"""
def setUp(self):
"""
Install a course with no content using a fixture.
"""
super(CourseOutlineTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
self.advanced_settings = AdvancedSettingsPage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
""" Install a course with sections/problems, tabs, updates, and handouts """
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME).add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('html', 'Test HTML Component'),
XBlockFixtureDesc('discussion', 'Test Discussion Component')
)
)
)
)
def do_action_and_verify(self, outline_page, action, expected_ordering):
"""
Perform the supplied action and then verify the resulting ordering.
"""
if outline_page is None:
outline_page = self.course_outline_page.visit()
action(outline_page)
verify_ordering(self, outline_page, expected_ordering)
# Reload the page and expand all subsections to see that the change was persisted.
outline_page = self.course_outline_page.visit()
outline_page.q(css='.outline-item.outline-subsection.is-collapsed .ui-toggle-expansion').click()
verify_ordering(self, outline_page, expected_ordering)
@attr('shard_3')
class CourseOutlineDragAndDropTest(CourseOutlineTest):
"""
Tests of drag and drop within the outline page.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
"""
Create a course with one section, two subsections, and four units
"""
# with collapsed outline
self.chap_1_handle = 0
self.chap_1_seq_1_handle = 1
# with first sequential expanded
self.seq_1_vert_1_handle = 2
self.seq_1_vert_2_handle = 3
self.chap_1_seq_2_handle = 4
course_fixture.add_children(
XBlockFixtureDesc('chapter', "1").add_children(
XBlockFixtureDesc('sequential', '1.1').add_children(
XBlockFixtureDesc('vertical', '1.1.1'),
XBlockFixtureDesc('vertical', '1.1.2')
),
XBlockFixtureDesc('sequential', '1.2').add_children(
XBlockFixtureDesc('vertical', '1.2.1'),
XBlockFixtureDesc('vertical', '1.2.2')
)
)
)
def drag_and_verify(self, source, target, expected_ordering, outline_page=None):
self.do_action_and_verify(
outline_page,
lambda (outline): drag(outline, source, target),
expected_ordering
)
def test_drop_unit_in_collapsed_subsection(self):
"""
Drag vertical "1.1.2" from subsection "1.1" into collapsed subsection "1.2" which already
have its own verticals.
"""
course_outline_page = self.course_outline_page.visit()
# expand first subsection
course_outline_page.q(css='.outline-item.outline-subsection.is-collapsed .ui-toggle-expansion').first.click()
expected_ordering = [{"1": ["1.1", "1.2"]},
{"1.1": ["1.1.1"]},
{"1.2": ["1.1.2", "1.2.1", "1.2.2"]}]
self.drag_and_verify(self.seq_1_vert_2_handle, self.chap_1_seq_2_handle, expected_ordering, course_outline_page)
@attr('shard_3')
class WarningMessagesTest(CourseOutlineTest):
"""
Feature: Warning messages on sections, subsections, and units
"""
__test__ = True
STAFF_ONLY_WARNING = 'Contains staff only content'
LIVE_UNPUBLISHED_WARNING = 'Unpublished changes to live content'
FUTURE_UNPUBLISHED_WARNING = 'Unpublished changes to content that will release in the future'
NEVER_PUBLISHED_WARNING = 'Unpublished units will not be released'
class PublishState(object):
"""
Default values for representing the published state of a unit
"""
NEVER_PUBLISHED = 1
UNPUBLISHED_CHANGES = 2
PUBLISHED = 3
VALUES = [NEVER_PUBLISHED, UNPUBLISHED_CHANGES, PUBLISHED]
class UnitState(object):
""" Represents the state of a unit """
def __init__(self, is_released, publish_state, is_locked):
""" Creates a new UnitState with the given properties """
self.is_released = is_released
self.publish_state = publish_state
self.is_locked = is_locked
@property
def name(self):
""" Returns an appropriate name based on the properties of the unit """
result = "Released " if self.is_released else "Unreleased "
if self.publish_state == WarningMessagesTest.PublishState.NEVER_PUBLISHED:
result += "Never Published "
elif self.publish_state == WarningMessagesTest.PublishState.UNPUBLISHED_CHANGES:
result += "Unpublished Changes "
else:
result += "Published "
result += "Locked" if self.is_locked else "Unlocked"
return result
def populate_course_fixture(self, course_fixture):
""" Install a course with various configurations that could produce warning messages """
# Define the dimensions that map to the UnitState constructor
features = [
[True, False], # Possible values for is_released
self.PublishState.VALUES, # Possible values for publish_state
[True, False] # Possible values for is_locked
]
# Add a fixture for every state in the product of features
course_fixture.add_children(*[
self._build_fixture(self.UnitState(*state)) for state in itertools.product(*features)
])
def _build_fixture(self, unit_state):
""" Returns an XBlockFixtureDesc with a section, subsection, and possibly unit that has the given state. """
name = unit_state.name
start = (datetime(1984, 3, 4) if unit_state.is_released else datetime.now(UTC) + timedelta(1)).isoformat()
subsection = XBlockFixtureDesc('sequential', name, metadata={'start': start})
# Children of never published subsections will be added on demand via _ensure_unit_present
return XBlockFixtureDesc('chapter', name).add_children(
subsection if unit_state.publish_state == self.PublishState.NEVER_PUBLISHED
else subsection.add_children(
XBlockFixtureDesc('vertical', name, metadata={
'visible_to_staff_only': True if unit_state.is_locked else None
})
)
)
def test_released_never_published_locked(self):
""" Tests that released never published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_never_published_unlocked(self):
""" Tests that released never published unlocked units display 'Unpublished units will not be released' """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False),
self.NEVER_PUBLISHED_WARNING
)
def test_released_unpublished_changes_locked(self):
""" Tests that released unpublished changes locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_unpublished_changes_unlocked(self):
""" Tests that released unpublished changes unlocked units display 'Unpublished changes to live content' """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False),
self.LIVE_UNPUBLISHED_WARNING
)
def test_released_published_locked(self):
""" Tests that released published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_published_unlocked(self):
""" Tests that released published unlocked units display no warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=False),
None
)
def test_unreleased_never_published_locked(self):
""" Tests that unreleased never published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_never_published_unlocked(self):
""" Tests that unreleased never published unlocked units display 'Unpublished units will not be released' """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False),
self.NEVER_PUBLISHED_WARNING
)
def test_unreleased_unpublished_changes_locked(self):
""" Tests that unreleased unpublished changes locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_unpublished_changes_unlocked(self):
"""
Tests that unreleased unpublished changes unlocked units display 'Unpublished changes to content that will
release in the future'
"""
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False),
self.FUTURE_UNPUBLISHED_WARNING
)
def test_unreleased_published_locked(self):
""" Tests that unreleased published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_published_unlocked(self):
""" Tests that unreleased published unlocked units display no warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=False),
None
)
def _verify_unit_warning(self, unit_state, expected_status_message):
"""
Verifies that the given unit's messages match the expected messages.
If expected_status_message is None, then the unit status message is expected to not be present.
"""
self._ensure_unit_present(unit_state)
self.course_outline_page.visit()
section = self.course_outline_page.section(unit_state.name)
subsection = section.subsection_at(0)
subsection.expand_subsection()
unit = subsection.unit_at(0)
if expected_status_message == self.STAFF_ONLY_WARNING:
self.assertEqual(section.status_message, self.STAFF_ONLY_WARNING)
self.assertEqual(subsection.status_message, self.STAFF_ONLY_WARNING)
self.assertEqual(unit.status_message, self.STAFF_ONLY_WARNING)
else:
self.assertFalse(section.has_status_message)
self.assertFalse(subsection.has_status_message)
if expected_status_message:
self.assertEqual(unit.status_message, expected_status_message)
else:
self.assertFalse(unit.has_status_message)
def _ensure_unit_present(self, unit_state):
""" Ensures that a unit with the given state is present on the course outline """
if unit_state.publish_state == self.PublishState.PUBLISHED:
return
name = unit_state.name
self.course_outline_page.visit()
subsection = self.course_outline_page.section(name).subsection(name)
subsection.expand_subsection()
if unit_state.publish_state == self.PublishState.UNPUBLISHED_CHANGES:
unit = subsection.unit(name).go_to()
add_discussion(unit)
elif unit_state.publish_state == self.PublishState.NEVER_PUBLISHED:
subsection.add_unit()
unit = ContainerPage(self.browser, None)
unit.wait_for_page()
if unit.is_staff_locked != unit_state.is_locked:
unit.toggle_staff_lock()
@attr('shard_3')
class EditingSectionsTest(CourseOutlineTest):
"""
Feature: Editing Release date, Due date and grading type.
"""
__test__ = True
def test_can_edit_subsection(self):
"""
Scenario: I can edit settings of subsection.
Given that I have created a subsection
Then I see release date, due date and grading policy of subsection in course outline
When I click on the configuration icon
Then edit modal window is shown
And release date, due date and grading policy fields present
And they have correct initial values
Then I set new values for these fields
And I click save button on the modal
Then I see release date, due date and grading policy of subsection in course outline
"""
self.course_outline_page.visit()
subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME)
# Verify that Release date visible by default
self.assertTrue(subsection.release_date)
# Verify that Due date and Policy hidden by default
self.assertFalse(subsection.due_date)
self.assertFalse(subsection.policy)
modal = subsection.edit()
# Verify fields
self.assertTrue(modal.has_release_date())
self.assertTrue(modal.has_due_date())
self.assertTrue(modal.has_policy())
# Verify initial values
self.assertEqual(modal.release_date, u'1/1/1970')
self.assertEqual(modal.due_date, u'')
self.assertEqual(modal.policy, u'Not Graded')
# Set new values
modal.release_date = '3/12/1972'
modal.due_date = '7/21/2014'
modal.policy = 'Lab'
modal.save()
self.assertIn(u'Released: Mar 12, 1972', subsection.release_date)
self.assertIn(u'Due: Jul 21, 2014', subsection.due_date)
self.assertIn(u'Lab', subsection.policy)
def test_can_edit_section(self):
"""
Scenario: I can edit settings of section.
Given that I have created a section
Then I see release date of section in course outline
When I click on the configuration icon
Then edit modal window is shown
And release date field present
And it has correct initial value
Then I set new value for this field
And I click save button on the modal
Then I see release date of section in course outline
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
# Verify that Release date visible by default
self.assertTrue(section.release_date)
# Verify that Due date and Policy are not present
self.assertFalse(section.due_date)
self.assertFalse(section.policy)
modal = section.edit()
# Verify fields
self.assertTrue(modal.has_release_date())
self.assertFalse(modal.has_due_date())
self.assertFalse(modal.has_policy())
# Verify initial value
self.assertEqual(modal.release_date, u'1/1/1970')
# Set new value
modal.release_date = '5/14/1969'
modal.save()
self.assertIn(u'Released: May 14, 1969', section.release_date)
# Verify that Due date and Policy are not present
self.assertFalse(section.due_date)
self.assertFalse(section.policy)
def test_subsection_is_graded_in_lms(self):
"""
Scenario: I can grade subsection from course outline page.
Given I visit progress page
And I see that problem in subsection has grading type "Practice"
Then I visit course outline page
And I click on the configuration icon of subsection
And I set grading policy to "Lab"
And I click save button on the modal
Then I visit progress page
And I see that problem in subsection has grading type "Problem"
"""
progress_page = ProgressPage(self.browser, self.course_id)
progress_page.visit()
progress_page.wait_for_page()
self.assertEqual(u'Practice', progress_page.grading_formats[0])
self.course_outline_page.visit()
subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME)
modal = subsection.edit()
# Set new values
modal.policy = 'Lab'
modal.save()
progress_page.visit()
self.assertEqual(u'Problem', progress_page.grading_formats[0])
def test_unchanged_release_date_is_not_saved(self):
"""
Scenario: Saving a subsection without changing the release date will not override the release date
Given that I have created a section with a subsection
When I open the settings modal for the subsection
And I pressed save
And I open the settings modal for the section
And I change the release date to 07/20/1969
And I press save
Then the subsection and the section have the release date 07/20/1969
"""
self.course_outline_page.visit()
modal = self.course_outline_page.section_at(0).subsection_at(0).edit()
modal.save()
modal = self.course_outline_page.section_at(0).edit()
modal.release_date = '7/20/1969'
modal.save()
release_text = 'Released: Jul 20, 1969'
self.assertIn(release_text, self.course_outline_page.section_at(0).release_date)
self.assertIn(release_text, self.course_outline_page.section_at(0).subsection_at(0).release_date)
@attr('shard_3')
class StaffLockTest(CourseOutlineTest):
"""
Feature: Sections, subsections, and units can be locked and unlocked from the course outline.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Create a course with one section, two subsections, and four units """
course_fixture.add_children(
XBlockFixtureDesc('chapter', '1').add_children(
XBlockFixtureDesc('sequential', '1.1').add_children(
XBlockFixtureDesc('vertical', '1.1.1'),
XBlockFixtureDesc('vertical', '1.1.2')
),
XBlockFixtureDesc('sequential', '1.2').add_children(
XBlockFixtureDesc('vertical', '1.2.1'),
XBlockFixtureDesc('vertical', '1.2.2')
)
)
)
def _verify_descendants_are_staff_only(self, item):
"""Verifies that all the descendants of item are staff only"""
self.assertTrue(item.is_staff_only)
if hasattr(item, 'children'):
for child in item.children():
self._verify_descendants_are_staff_only(child)
def _remove_staff_lock_and_verify_warning(self, outline_item, expect_warning):
"""Removes staff lock from a course outline item and checks whether or not a warning appears."""
modal = outline_item.edit()
modal.is_explicitly_locked = False
if expect_warning:
self.assertTrue(modal.shows_staff_lock_warning())
else:
self.assertFalse(modal.shows_staff_lock_warning())
modal.save()
def _toggle_lock_on_unlocked_item(self, outline_item):
"""Toggles outline_item's staff lock on and then off, verifying the staff lock warning"""
self.assertFalse(outline_item.has_staff_lock_warning)
outline_item.set_staff_lock(True)
self.assertTrue(outline_item.has_staff_lock_warning)
self._verify_descendants_are_staff_only(outline_item)
outline_item.set_staff_lock(False)
self.assertFalse(outline_item.has_staff_lock_warning)
def _verify_explicit_staff_lock_remains_after_unlocking_parent(self, child_item, parent_item):
"""Verifies that child_item's explicit staff lock remains after removing parent_item's staff lock"""
child_item.set_staff_lock(True)
parent_item.set_staff_lock(True)
self.assertTrue(parent_item.has_staff_lock_warning)
self.assertTrue(child_item.has_staff_lock_warning)
parent_item.set_staff_lock(False)
self.assertFalse(parent_item.has_staff_lock_warning)
self.assertTrue(child_item.has_staff_lock_warning)
def test_units_can_be_locked(self):
"""
Scenario: Units can be locked and unlocked from the course outline page
Given I have a course with a unit
When I click on the configuration icon
And I enable explicit staff locking
And I click save
Then the unit shows a staff lock warning
And when I click on the configuration icon
And I disable explicit staff locking
And I click save
Then the unit does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0)
self._toggle_lock_on_unlocked_item(unit)
def test_subsections_can_be_locked(self):
"""
Scenario: Subsections can be locked and unlocked from the course outline page
Given I have a course with a subsection
When I click on the subsection's configuration icon
And I enable explicit staff locking
And I click save
Then the subsection shows a staff lock warning
And all its descendants are staff locked
And when I click on the subsection's configuration icon
And I disable explicit staff locking
And I click save
Then the the subsection does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
self._toggle_lock_on_unlocked_item(subsection)
def test_sections_can_be_locked(self):
"""
Scenario: Sections can be locked and unlocked from the course outline page
Given I have a course with a section
When I click on the section's configuration icon
And I enable explicit staff locking
And I click save
Then the section shows a staff lock warning
And all its descendants are staff locked
And when I click on the section's configuration icon
And I disable explicit staff locking
And I click save
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
self._toggle_lock_on_unlocked_item(section)
def test_explicit_staff_lock_remains_after_unlocking_section(self):
"""
Scenario: An explicitly locked unit is still locked after removing an inherited lock from a section
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a section and one of its units
When I click on the section's configuration icon
And I disable explicit staff locking
And I click save
Then the unit still shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
unit = section.subsection_at(0).unit_at(0)
self._verify_explicit_staff_lock_remains_after_unlocking_parent(unit, section)
def test_explicit_staff_lock_remains_after_unlocking_subsection(self):
"""
Scenario: An explicitly locked unit is still locked after removing an inherited lock from a subsection
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a subsection and one of its units
When I click on the subsection's configuration icon
And I disable explicit staff locking
And I click save
Then the unit still shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
self._verify_explicit_staff_lock_remains_after_unlocking_parent(unit, subsection)
def test_section_displays_lock_when_all_subsections_locked(self):
"""
Scenario: All subsections in section are explicitly locked, section should display staff only warning
Given I have a course one section and two subsections
When I enable explicit staff lock on all the subsections
Then the section shows a staff lock warning
"""
self.course_outline_page.visit()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).set_staff_lock(True)
section.subsection_at(1).set_staff_lock(True)
self.assertTrue(section.has_staff_lock_warning)
def test_section_displays_lock_when_all_units_locked(self):
"""
Scenario: All units in a section are explicitly locked, section should display staff only warning
Given I have a course with one section, two subsections, and four units
When I enable explicit staff lock on all the units
Then the section shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).unit_at(0).set_staff_lock(True)
section.subsection_at(0).unit_at(1).set_staff_lock(True)
section.subsection_at(1).unit_at(0).set_staff_lock(True)
section.subsection_at(1).unit_at(1).set_staff_lock(True)
self.assertTrue(section.has_staff_lock_warning)
def test_subsection_displays_lock_when_all_units_locked(self):
"""
Scenario: All units in subsection are explicitly locked, subsection should display staff only warning
Given I have a course with one subsection and two units
When I enable explicit staff lock on all the units
Then the subsection shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.unit_at(0).set_staff_lock(True)
subsection.unit_at(1).set_staff_lock(True)
self.assertTrue(subsection.has_staff_lock_warning)
def test_section_does_not_display_lock_when_some_subsections_locked(self):
"""
Scenario: Only some subsections in section are explicitly locked, section should NOT display staff only warning
Given I have a course with one section and two subsections
When I enable explicit staff lock on one subsection
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).set_staff_lock(True)
self.assertFalse(section.has_staff_lock_warning)
def test_section_does_not_display_lock_when_some_units_locked(self):
"""
Scenario: Only some units in section are explicitly locked, section should NOT display staff only warning
Given I have a course with one section, two subsections, and four units
When I enable explicit staff lock on three units
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).unit_at(0).set_staff_lock(True)
section.subsection_at(0).unit_at(1).set_staff_lock(True)
section.subsection_at(1).unit_at(1).set_staff_lock(True)
self.assertFalse(section.has_staff_lock_warning)
def test_subsection_does_not_display_lock_when_some_units_locked(self):
"""
Scenario: Only some units in subsection are explicitly locked, subsection should NOT display staff only warning
Given I have a course with one subsection and two units
When I enable explicit staff lock on one unit
Then the subsection does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.unit_at(0).set_staff_lock(True)
self.assertFalse(subsection.has_staff_lock_warning)
def test_locked_sections_do_not_appear_in_lms(self):
"""
Scenario: A locked section is not visible to students in the LMS
Given I have a course with two sections
When I enable explicit staff lock on one section
And I click the View Live button to switch to staff view
Then I see two sections in the sidebar
And when I switch the view mode to student view
Then I see one section in the sidebar
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.course_outline_page.section_at(1).set_staff_lock(True)
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_sections, 2)
StaffPage(self.browser, self.course_id).set_staff_view_mode('Student')
self.assertEqual(courseware.num_sections, 1)
def test_locked_subsections_do_not_appear_in_lms(self):
"""
Scenario: A locked subsection is not visible to students in the LMS
Given I have a course with two subsections
When I enable explicit staff lock on one subsection
And I click the View Live button to switch to staff view
Then I see two subsections in the sidebar
And when I switch the view mode to student view
Then I see one section in the sidebar
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(1).set_staff_lock(True)
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_subsections, 2)
StaffPage(self.browser, self.course_id).set_staff_view_mode('Student')
self.assertEqual(courseware.num_subsections, 1)
def test_toggling_staff_lock_on_section_does_not_publish_draft_units(self):
"""
Scenario: Locking and unlocking a section will not publish its draft units
Given I have a course with a section and unit
And the unit has a draft and published version
When I enable explicit staff lock on the section
And I disable explicit staff lock on the section
And I click the View Live button to switch to staff view
Then I see the published version of the unit
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
add_discussion(unit)
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.set_staff_lock(True)
section.set_staff_lock(False)
unit = section.subsection_at(0).unit_at(0).go_to()
unit.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 0)
def test_toggling_staff_lock_on_subsection_does_not_publish_draft_units(self):
"""
Scenario: Locking and unlocking a subsection will not publish its draft units
Given I have a course with a subsection and unit
And the unit has a draft and published version
When I enable explicit staff lock on the subsection
And I disable explicit staff lock on the subsection
And I click the View Live button to switch to staff view
Then I see the published version of the unit
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
add_discussion(unit)
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.set_staff_lock(True)
subsection.set_staff_lock(False)
unit = subsection.unit_at(0).go_to()
unit.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 0)
def test_removing_staff_lock_from_unit_without_inherited_lock_shows_warning(self):
"""
Scenario: Removing explicit staff lock from a unit which does not inherit staff lock displays a warning.
Given I have a course with a subsection and unit
When I enable explicit staff lock on the unit
And I disable explicit staff lock on the unit
Then I see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0)
unit.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(unit, True)
def test_removing_staff_lock_from_subsection_without_inherited_lock_shows_warning(self):
"""
Scenario: Removing explicit staff lock from a subsection which does not inherit staff lock displays a warning.
Given I have a course with a section and subsection
When I enable explicit staff lock on the subsection
And I disable explicit staff lock on the subsection
Then I see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(subsection, True)
def test_removing_staff_lock_from_unit_with_inherited_lock_shows_no_warning(self):
"""
Scenario: Removing explicit staff lock from a unit which also inherits staff lock displays no warning.
Given I have a course with a subsection and unit
When I enable explicit staff lock on the subsection
And I enable explicit staff lock on the unit
When I disable explicit staff lock on the unit
Then I do not see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
subsection.set_staff_lock(True)
unit.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(unit, False)
def test_removing_staff_lock_from_subsection_with_inherited_lock_shows_no_warning(self):
"""
Scenario: Removing explicit staff lock from a subsection which also inherits staff lock displays no warning.
Given I have a course with a section and subsection
When I enable explicit staff lock on the section
And I enable explicit staff lock on the subsection
When I disable explicit staff lock on the subsection
Then I do not see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
subsection = section.subsection_at(0)
section.set_staff_lock(True)
subsection.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(subsection, False)
@attr('shard_3')
class EditNamesTest(CourseOutlineTest):
"""
Feature: Click-to-edit section/subsection names
"""
__test__ = True
def set_name_and_verify(self, item, old_name, new_name, expected_name):
"""
Changes the display name of item from old_name to new_name, then verifies that its value is expected_name.
"""
self.assertEqual(item.name, old_name)
item.change_name(new_name)
self.assertFalse(item.in_editable_form())
self.assertEqual(item.name, expected_name)
def test_edit_section_name(self):
"""
Scenario: Click-to-edit section name
Given that I have created a section
When I click on the name of section
Then the section name becomes editable
And given that I have edited the section name
When I click outside of the edited section name
Then the section name saves
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0),
'Test Section',
'Changed',
'Changed'
)
def test_edit_subsection_name(self):
"""
Scenario: Click-to-edit subsection name
Given that I have created a subsection
When I click on the name of subsection
Then the subsection name becomes editable
And given that I have edited the subsection name
When I click outside of the edited subsection name
Then the subsection name saves
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0).subsection_at(0),
'Test Subsection',
'Changed',
'Changed'
)
def test_edit_empty_section_name(self):
"""
Scenario: Click-to-edit section name, enter empty name
Given that I have created a section
And I have clicked to edit the name of the section
And I have entered an empty section name
When I click outside of the edited section name
Then the section name does not change
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0),
'Test Section',
'',
'Test Section'
)
def test_edit_empty_subsection_name(self):
"""
Scenario: Click-to-edit subsection name, enter empty name
Given that I have created a subsection
And I have clicked to edit the name of the subsection
And I have entered an empty subsection name
When I click outside of the edited subsection name
Then the subsection name does not change
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0).subsection_at(0),
'Test Subsection',
'',
'Test Subsection'
)
def test_editing_names_does_not_expand_collapse(self):
"""
Scenario: A section stays in the same expand/collapse state while its name is edited
Given that I have created a section
And the section is collapsed
When I click on the name of the section
Then the section is collapsed
And given that I have entered a new name
Then the section is collapsed
And given that I press ENTER to finalize the name
Then the section is collapsed
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).expand_subsection()
self.assertFalse(self.course_outline_page.section_at(0).in_editable_form())
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).edit_name()
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).enter_name('Changed')
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).finalize_name()
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
@attr('shard_3')
class CreateSectionsTest(CourseOutlineTest):
"""
Feature: Create new sections/subsections/units
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with a completely empty course to easily test adding things to it """
pass
def test_create_new_section_from_top_button(self):
"""
Scenario: Create new section from button at top of page
Given that I am on the course outline
When I click the "+ Add section" button at the top of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_section_from_bottom_button(self):
"""
Scenario: Create new section from button at bottom of page
Given that I am on the course outline
When I click the "+ Add section" button at the bottom of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_bottom_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_section_from_bottom_button_plus_icon(self):
"""
Scenario: Create new section from button plus icon at bottom of page
Given that I am on the course outline
When I click the plus icon in "+ Add section" button at the bottom of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_bottom_button(click_child_icon=True)
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_subsection(self):
"""
Scenario: Create new subsection
Given that I have created a section
When I click the "+ Add subsection" button in that section
Then I see a new subsection added to the bottom of the section
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).add_subsection()
subsections = self.course_outline_page.section_at(0).subsections()
self.assertEqual(len(subsections), 1)
self.assertTrue(subsections[0].in_editable_form())
def test_create_new_unit(self):
"""
Scenario: Create new unit
Given that I have created a section
And that I have created a subsection within that section
When I click the "+ Add unit" button in that subsection
Then I am redirected to a New Unit page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).add_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).add_unit()
unit_page = ContainerPage(self.browser, None)
EmptyPromise(unit_page.is_browser_on_page, 'Browser is on the unit page').fulfill()
self.assertTrue(unit_page.is_inline_editing_display_name())
@attr('shard_3')
class DeleteContentTest(CourseOutlineTest):
"""
Feature: Deleting sections/subsections/units
"""
__test__ = True
def test_delete_section(self):
"""
Scenario: Delete section
Given that I am on the course outline
When I click the delete button for a section on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the section
When I click "Yes, I want to delete this component"
Then the confirmation message should close
And the section should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).delete()
self.assertEqual(len(self.course_outline_page.sections()), 0)
def test_cancel_delete_section(self):
"""
Scenario: Cancel delete of section
Given that I clicked the delte button for a section on the course outline
And I received a confirmation message, asking me if I really want to delete the component
When I click "Cancel"
Then the confirmation message should close
And the section should remain in the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.sections()), 1)
def test_delete_subsection(self):
"""
Scenario: Delete subsection
Given that I am on the course outline
When I click the delete button for a subsection on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the subsection
When I click "Yes, I want to delete this component"
Then the confiramtion message should close
And the subsection should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).delete()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 0)
def test_cancel_delete_subsection(self):
"""
Scenario: Cancel delete of subsection
Given that I clicked the delete button for a subsection on the course outline
And I received a confirmation message, asking me if I really want to delete the subsection
When I click "cancel"
Then the confirmation message should close
And the subsection should remain in the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
def test_delete_unit(self):
"""
Scenario: Delete unit
Given that I am on the course outline
When I click the delete button for a unit on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the unit
When I click "Yes, I want to delete this unit"
Then the confirmation message should close
And the unit should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 0)
def test_cancel_delete_unit(self):
"""
Scenario: Cancel delete of unit
Given that I clicked the delete button for a unit on the course outline
And I received a confirmation message, asking me if I really want to delete the unit
When I click "Cancel"
Then the confirmation message should close
And the unit should remain in the course outline
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
def test_delete_all_no_content_message(self):
"""
Scenario: Delete all sections/subsections/units in a course, "no content" message should appear
Given that I delete all sections, subsections, and units in a course
When I visit the course outline
Then I will see a message that says, "You haven't added any content to this course yet"
Add see a + Add Section button
"""
self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.has_no_content_message)
self.course_outline_page.section_at(0).delete()
self.assertEqual(len(self.course_outline_page.sections()), 0)
self.assertTrue(self.course_outline_page.has_no_content_message)
@attr('shard_3')
class ExpandCollapseMultipleSectionsTest(CourseOutlineTest):
"""
Feature: Courses with multiple sections can expand and collapse all sections.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with a course with two sections """
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('vertical', 'Test Unit 2')
)
)
)
def verify_all_sections(self, collapsed):
"""
Verifies that all sections are collapsed if collapsed is True, otherwise all expanded.
"""
for section in self.course_outline_page.sections():
self.assertEqual(collapsed, section.is_collapsed)
def toggle_all_sections(self):
"""
Toggles the expand collapse state of all sections.
"""
for section in self.course_outline_page.sections():
section.expand_subsection()
def test_expanded_by_default(self):
"""
Scenario: The default layout for the outline page is to show sections in expanded view
Given I have a course with sections
When I navigate to the course outline page
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Collapse link is removed after last section of a course is deleted
Given I have a course with multiple sections
And I navigate to the course outline page
When I will confirm all alerts
And I press the "section" delete icon
Then I do not see the "Collapse All Sections" link
And I will see a message that says "You haven't added any content to this course yet"
"""
self.course_outline_page.visit()
for section in self.course_outline_page.sections():
section.delete()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.assertTrue(self.course_outline_page.has_no_content_message)
def test_collapse_all_when_all_expanded(self):
"""
Scenario: Collapse all sections when all sections are expanded
Given I navigate to the outline page of a course with sections
And all sections are expanded
When I click the "Collapse All Sections" link
Then I see the "Expand All Sections" link
And all sections are collapsed
"""
self.course_outline_page.visit()
self.verify_all_sections(collapsed=False)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
def test_collapse_all_when_some_expanded(self):
"""
Scenario: Collapsing all sections when 1 or more sections are already collapsed
Given I navigate to the outline page of a course with sections
And all sections are expanded
When I collapse the first section
And I click the "Collapse All Sections" link
Then I see the "Expand All Sections" link
And all sections are collapsed
"""
self.course_outline_page.visit()
self.verify_all_sections(collapsed=False)
self.course_outline_page.section_at(0).expand_subsection()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
def test_expand_all_when_all_collapsed(self):
"""
Scenario: Expanding all sections when all sections are collapsed
Given I navigate to the outline page of a course with multiple sections
And I click the "Collapse All Sections" link
When I click the "Expand All Sections" link
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
def test_expand_all_when_some_collapsed(self):
"""
Scenario: Expanding all sections when 1 or more sections are already expanded
Given I navigate to the outline page of a course with multiple sections
And I click the "Collapse All Sections" link
When I expand the first section
And I click the "Expand All Sections" link
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.course_outline_page.section_at(0).expand_subsection()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
@attr('shard_3')
class ExpandCollapseSingleSectionTest(CourseOutlineTest):
"""
Feature: Courses with a single section can expand and collapse all sections.
"""
__test__ = True
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Collapse link is removed after last section of a course is deleted
Given I have a course with one section
And I navigate to the course outline page
When I will confirm all alerts
And I press the "section" delete icon
Then I do not see the "Collapse All Sections" link
And I will see a message that says "You haven't added any content to this course yet"
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).delete()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.assertTrue(self.course_outline_page.has_no_content_message)
def test_old_subsection_stays_collapsed_after_creation(self):
"""
Scenario: Collapsed subsection stays collapsed after creating a new subsection
Given I have a course with one section and subsection
And I navigate to the course outline page
Then the subsection is collapsed
And when I create a new subsection
Then the first subsection is collapsed
And the second subsection is expanded
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed)
self.course_outline_page.section_at(0).add_subsection()
self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed)
self.assertFalse(self.course_outline_page.section_at(0).subsection_at(1).is_collapsed)
@attr('shard_3')
class ExpandCollapseEmptyTest(CourseOutlineTest):
"""
Feature: Courses with no sections initially can expand and collapse all sections after addition.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with an empty course """
pass
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Expand/collapse for a course with no sections
Given I have a course with no sections
When I navigate to the course outline page
Then I do not see the "Collapse All Sections" link
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
def test_link_appears_after_section_creation(self):
"""
Scenario: Collapse link appears after creating first section of a course
Given I have a course with no sections
When I navigate to the course outline page
And I add a section
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.course_outline_page.add_section_from_top_button()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.assertFalse(self.course_outline_page.section_at(0).is_collapsed)
@attr('shard_3')
class DefaultStatesEmptyTest(CourseOutlineTest):
"""
Feature: Misc course outline default states/actions when starting with an empty course
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with an empty course """
pass
def test_empty_course_message(self):
"""
Scenario: Empty course state
Given that I am in a course with no sections, subsections, nor units
When I visit the course outline
Then I will see a message that says "You haven't added any content to this course yet"
And see a + Add Section button
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.has_no_content_message)
self.assertTrue(self.course_outline_page.bottom_add_section_button.is_present())
@attr('shard_3')
class DefaultStatesContentTest(CourseOutlineTest):
"""
Feature: Misc course outline default states/actions when starting with a course with content
"""
__test__ = True
def test_view_live(self):
"""
Scenario: View Live version from course outline
Given that I am on the course outline
When I click the "View Live" button
Then a new tab will open to the course on the LMS
"""
self.course_outline_page.visit()
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 3)
self.assertEqual(courseware.xblock_component_type(0), 'problem')
self.assertEqual(courseware.xblock_component_type(1), 'html')
self.assertEqual(courseware.xblock_component_type(2), 'discussion')
@attr('shard_3')
class UnitNavigationTest(CourseOutlineTest):
"""
Feature: Navigate to units
"""
__test__ = True
def test_navigate_to_unit(self):
"""
Scenario: Click unit name to navigate to unit page
Given that I have expanded a section/subsection so I can see unit names
When I click on a unit name
Then I will be taken to the appropriate unit page
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
self.assertTrue(unit.is_browser_on_page)
@attr('shard_3')
class PublishSectionTest(CourseOutlineTest):
"""
Feature: Publish sections.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with 2 subsections inside a single section.
The first subsection has 2 units, and the second subsection has one unit.
"""
self.courseware = CoursewarePage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME),
XBlockFixtureDesc('vertical', 'Test Unit 2'),
),
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('vertical', 'Test Unit 3'),
),
),
)
def test_unit_publishing(self):
"""
Scenario: Can publish a unit and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the first unit, subsection, section
When I publish the first unit
Then I see that publish button for the first unit disappears
And I see publish buttons for subsection, section
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
unit.publish()
self.assertFalse(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
def test_subsection_publishing(self):
"""
Scenario: Can publish a subsection and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the unit, subsection, section
When I publish the first subsection
Then I see that publish button for the first subsection disappears
And I see that publish buttons disappear for the child units of the subsection
And I see publish button for section
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME).publish()
self.assertFalse(unit.publish_action)
self.assertFalse(subsection.publish_action)
self.assertTrue(section.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_nav.go_to_sequential_position(2)
self.assertEqual(1, self.courseware.num_xblock_components)
def test_section_publishing(self):
"""
Scenario: Can publish a section and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the unit, subsection, section
When I publish the section
Then I see that publish buttons disappears
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.assertTrue(unit.publish_action)
self.course_outline_page.section(SECTION_NAME).publish()
self.assertFalse(subsection.publish_action)
self.assertFalse(section.publish_action)
self.assertFalse(unit.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_nav.go_to_sequential_position(2)
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_nav.go_to_section(SECTION_NAME, 'Test Subsection 2')
self.assertEqual(1, self.courseware.num_xblock_components)
def _add_unpublished_content(self):
"""
Adds unpublished HTML content to first three units in the course.
"""
for index in xrange(3):
self.course_fixture.create_xblock(
self.course_fixture.get_nested_xblocks(category="vertical")[index].locator,
XBlockFixtureDesc('html', 'Unpublished HTML Component ' + str(index)),
)
def _get_items(self):
"""
Returns first section, subsection, and unit on the page.
"""
section = self.course_outline_page.section(SECTION_NAME)
subsection = section.subsection(SUBSECTION_NAME)
unit = subsection.expand_subsection().unit(UNIT_NAME)
return (section, subsection, unit)
@attr('shard_3')
class DeprecationWarningMessageTest(CourseOutlineTest):
"""
Feature: Verify deprecation warning message.
"""
HEADING_TEXT = 'This course uses features that are no longer supported.'
COMPONENT_LIST_HEADING = 'You must delete or replace the following components.'
ADVANCE_MODULES_REMOVE_TEXT = ('To avoid errors, edX strongly recommends that you remove unsupported features '
'from the course advanced settings. To do this, go to the Advanced Settings '
'page, locate the "Advanced Module List" setting, and then delete the following '
'modules from the list.')
DEFAULT_DISPLAYNAME = "Deprecated Component"
def _add_deprecated_advance_modules(self, block_types):
"""
Add `block_types` into `Advanced Module List`
Arguments:
block_types (list): list of block types
"""
self.advanced_settings.visit()
self.advanced_settings.set_values({"Advanced Module List": json.dumps(block_types)})
def _create_deprecated_components(self):
"""
Create deprecated components.
"""
parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
parent_vertical.locator,
XBlockFixtureDesc('combinedopenended', "Open", data=load_data_str('ora_peer_problem.xml'))
)
self.course_fixture.create_xblock(parent_vertical.locator, XBlockFixtureDesc('peergrading', 'Peer'))
def _verify_deprecation_warning_info(
self,
deprecated_blocks_present,
components_present,
components_display_name_list=None,
deprecated_modules_list=None
):
"""
Verify deprecation warning
Arguments:
deprecated_blocks_present (bool): deprecated blocks remove text and
is list is visible if True else False
components_present (bool): components list shown if True else False
components_display_name_list (list): list of components display name
deprecated_modules_list (list): list of deprecated advance modules
"""
self.assertTrue(self.course_outline_page.deprecated_warning_visible)
self.assertEqual(self.course_outline_page.warning_heading_text, self.HEADING_TEXT)
self.assertEqual(self.course_outline_page.modules_remove_text_shown, deprecated_blocks_present)
if deprecated_blocks_present:
self.assertEqual(self.course_outline_page.modules_remove_text, self.ADVANCE_MODULES_REMOVE_TEXT)
self.assertEqual(self.course_outline_page.deprecated_advance_modules, deprecated_modules_list)
self.assertEqual(self.course_outline_page.components_visible, components_present)
if components_present:
self.assertEqual(self.course_outline_page.components_list_heading, self.COMPONENT_LIST_HEADING)
self.assertItemsEqual(self.course_outline_page.components_display_names, components_display_name_list)
def test_no_deprecation_warning_message_present(self):
"""
Scenario: Verify that deprecation warning message is not shown if ORA1
advance modules are not present and also no ORA1 component exist in
course outline.
When I goto course outline
Then I don't see ORA1 deprecated warning
"""
self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.deprecated_warning_visible)
def test_deprecation_warning_message_present(self):
"""
Scenario: Verify deprecation warning message if ORA1 advance modules
and ORA1 components are present.
Given I have ORA1 advance modules present in `Advanced Module List`
And I have created 2 ORA1 components
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I see correct ORA1 deprecated warning advance modules remove text
And I see list of ORA1 components with correct display names
"""
self._add_deprecated_advance_modules(block_types=['peergrading', 'combinedopenended'])
self._create_deprecated_components()
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=True,
components_present=True,
components_display_name_list=['Open', 'Peer'],
deprecated_modules_list=['peergrading', 'combinedopenended']
)
def test_deprecation_warning_with_no_displayname(self):
"""
Scenario: Verify deprecation warning message if ORA1 components are present.
Given I have created 1 ORA1 deprecated component
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I see list of ORA1 components with correct message
"""
parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
# Create a deprecated ORA1 component with display_name to be empty and make sure
# the deprecation warning is displayed with
self.course_fixture.create_xblock(
parent_vertical.locator,
XBlockFixtureDesc(category='combinedopenended', display_name="", data=load_data_str('ora_peer_problem.xml'))
)
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=False,
components_present=True,
components_display_name_list=[self.DEFAULT_DISPLAYNAME],
)
def test_warning_with_ora1_advance_modules_only(self):
"""
Scenario: Verify that deprecation warning message is shown if only
ORA1 advance modules are present and no ORA1 component exist.
Given I have ORA1 advance modules present in `Advanced Module List`
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I see correct ORA1 deprecated warning advance modules remove text
And I don't see list of ORA1 components
"""
self._add_deprecated_advance_modules(block_types=['peergrading', 'combinedopenended'])
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=True,
components_present=False,
deprecated_modules_list=['peergrading', 'combinedopenended']
)
def test_warning_with_ora1_components_only(self):
"""
Scenario: Verify that deprecation warning message is shown if only
ORA1 component exist and no ORA1 advance modules are present.
Given I have created two ORA1 components
When I go to course outline
Then I see ORA1 deprecated warning
And I see correct ORA1 deprecated warning heading text
And I don't see ORA1 deprecated warning advance modules remove text
And I see list of ORA1 components with correct display names
"""
self._create_deprecated_components()
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=False,
components_present=True,
components_display_name_list=['Open', 'Peer']
)
|
ajbrock/Neural-Photo-Editor | refs/heads/master | NPE.py | 1 | ### Neural Photo Editor
# A Brock, 2016
### Imports
from Tkinter import * # Note that I dislike the * on the Tkinter import, but all the tutorials seem to do that so I stuck with it.
from tkColorChooser import askcolor # This produces an OS-dependent color selector. I like the windows one best, and can't stand the linux one.
from collections import OrderedDict
from PIL import Image, ImageTk
import numpy as np
import scipy.misc
from API import IAN
### Step 1: Create theano functions
# Initialize model
model = IAN(config_path = 'IAN_simple.py', dnn = True)
### Prepare GUI functions
print('Compiling remaining functions')
# Create master
master = Tk()
master.title( "Neural Photo Editor" )
# RGB interpreter convenience function
def rgb(r,g,b):
return '#%02x%02x%02x' % (r,g,b)
# Convert RGB to bi-directional RB scale.
def rb(i):
# return rgb(int(i*int(i>0)),0, -int(i*int(i<0)))
return rgb(255+max(int(i*int(i<0)),-255),255-min(abs(int(i)),255), 255-min(int(i*int(i>0)),255))
# Convenience functions to go from [0,255] to [-1,1] and [-1,1] to [0,255]
def to_tanh(input):
return 2.0*(input/255.0)-1.0
def from_tanh(input):
return 255.0*(input+1)/2.0
# Ground truth image
GIM=np.asarray(np.load('CelebAValid.npz')['arr_0'][420])
# Image for modification
IM = GIM
# Reconstruction
RECON = IM
# Error between reconstruction and current image
ERROR = np.zeros(np.shape(IM),dtype=np.float32)
# Change between Recon and Current
DELTA = np.zeros(np.shape(IM),dtype=np.float32)
# User-Painted Mask, currently not implemented.
USER_MASK=np.mean(DELTA,axis=0)
# Are we operating on a photo or a sample?
SAMPLE_FLAG=0
### Latent Canvas Variables
# Latent Square dimensions
dim = [10,10]
# Squared Latent Array
Z = np.zeros((dim[0],dim[1]),dtype=np.float32)
# Pixel-wise resolution for latent canvas
res = 16
# Array that holds the actual latent canvas
r = np.zeros((res*dim[0],res*dim[1]),dtype=np.float32)
# Painted rectangles for free-form latent painting
painted_rects = []
# Actual latent rectangles
rects = np.zeros((dim[0],dim[1]),dtype=int)
### Output Display Variables
# RGB paintbrush array
myRGB = np.zeros((1,3,64,64),dtype=np.float32);
# Canvas width and height
canvas_width = 400
canvas_height = 400
# border width
bd =2
# Brush color
color = IntVar()
color.set(0)
# Brush size
d = IntVar()
d.set(12)
# Selected Color
mycol = (0,0,0)
# Function to update display
def update_photo(data=None,widget=None):
global Z
if data is None: # By default, assume we're updating with the current value of Z
data = np.repeat(np.repeat(np.uint8(from_tanh(model.sample_at(np.float32([Z.flatten()]))[0])),4,1),4,2)
else:
data = np.repeat(np.repeat(np.uint8(data),4,1),4,2)
if widget is None:
widget = output
# Reshape image to canvas
mshape = (4*64,4*64,1)
im = Image.fromarray(np.concatenate([np.reshape(data[0],mshape),np.reshape(data[1],mshape),np.reshape(data[2],mshape)],axis=2),mode='RGB')
# Make sure photo is an object of the current widget so the garbage collector doesn't wreck it
widget.photo = ImageTk.PhotoImage(image=im)
widget.create_image(0,0,image=widget.photo,anchor=NW)
widget.tag_raise(pixel_rect)
# Function to update the latent canvas.
def update_canvas(widget=None):
global r, Z, res, rects, painted_rects
if widget is None:
widget = w
# Update display values
r = np.repeat(np.repeat(Z,r.shape[0]//Z.shape[0],0),r.shape[1]//Z.shape[1],1)
# If we're letting freeform painting happen, delete the painted rectangles
for p in painted_rects:
w.delete(p)
painted_rects = []
for i in range(Z.shape[0]):
for j in range(Z.shape[1]):
w.itemconfig(int(rects[i,j]),fill = rb(255*Z[i,j]),outline = rb(255*Z[i,j]))
# Function to move the paintbrush
def move_mouse( event ):
global output
# using a rectangle width equivalent to d/4 (so 1-16)
# First, get location and extent of local patch
x,y = event.x//4,event.y//4
brush_width = ((d.get()//4)+1)
# if x is near the left corner, then the minimum x is dependent on how close it is to the left
xmin = max(min(x-brush_width//2,64 - brush_width),0) # This 64 may need to change if the canvas size changes
xmax = xmin+brush_width
ymin = max(min(y-brush_width//2,64 - brush_width),0) # This 64 may need to change if the canvas size changes
ymax = ymin+brush_width
# update output canvas
output.coords(pixel_rect,4*xmin,4*ymin,4*xmax,4*ymax)
output.tag_raise(pixel_rect)
output.itemconfig(pixel_rect,outline = rgb(mycol[0],mycol[1],mycol[2]))
### Optional functions for the Neural Painter
# Localized Gaussian Smoothing Kernel
# Use this if you want changes to MASK to be more localized to the brush location in soe sense
def gk(c1,r1,c2,r2):
# First, create X and Y arrays indicating distance to the boundaries of the paintbrush
# In this current context, im is the ordinal number of pixels (64 typically)
sigma = 0.3
im = 64
x = np.repeat([np.concatenate([np.mgrid[-c1:0],np.zeros(c2-c1),np.mgrid[1:1+im-c2]])],im,axis=0)
y = np.repeat(np.vstack(np.concatenate([np.mgrid[-r1:0],np.zeros(r2-r1),np.mgrid[1:1+im-r2]])),im,axis=1)
g = np.exp(-(x**2/float(im)+y**2/float(im))/(2*sigma**2))
return np.repeat([g],3,axis=0) # remove the 3 if you want to apply this to mask rather than an RGB channel
# This function reduces the likelihood of a change based on how close each individual pixel is to a maximal value.
# Consider conditioning this based on the gK value and the requested color. I.E. instead of just a flat distance from 128,
# have it be a difference from the expected color at a given location. This could also be used to "weight" the image towards staying the same.
def upperlim(image):
h=1
return (1.0/((1.0/h)*np.abs(image-128)+1))
# Similar to upperlim, this function changes the value of the correction term if it's going to move pixels too close to a maximal value
def dampen(input,correct):
# The closer input+correct is to -1 or 1, the further it is from 0.
# We're okay with almost all values (i.e. between 0 and 0.8) but as we approach 1 we want to slow the change
thresh = 0.75
m = (input+correct)>thresh
return -input*m+correct*(1-m)+thresh*m
### Neural Painter Function
def paint( event ):
global Z, output, myRGB, IM, ERROR, RECON, USER_MASK, SAMPLE_FLAG
# Move the paintbrush
move_mouse(event)
# Define a gradient descent step-size
weight = 0.05
# Get paintbrush location
[x1,y1,x2,y2] = [coordinate//4 for coordinate in output.coords(pixel_rect)]
# Get dIM/dZ that minimizes the difference between IM and RGB in the domain of the paintbrush
temp = np.asarray(model.imgradRGB(x1,y1,x2,y2,np.float32(to_tanh(myRGB)),np.float32([Z.flatten()]))[0])
grad = temp.reshape((10,10))*(1+(x2-x1))
# Update Z
Z -=weight*grad
# If operating on a sample, update sample
if SAMPLE_FLAG:
update_canvas(w)
update_photo(None,output)
# Else, update photo
else:
# Difference between current image and reconstruction
DELTA = model.sample_at(np.float32([Z.flatten()]))[0]-to_tanh(np.float32(RECON))
# Not-Yet-Implemented User Mask feature
# USER_MASK[y1:y2,x1:x2]+=0.05
# Get MASK
MASK=scipy.ndimage.filters.gaussian_filter(np.min([np.mean(np.abs(DELTA),axis=0),np.ones((64,64))],axis=0),0.7)
# Optionally dampen D
# D = dampen(to_tanh(np.float32(RECON)),MASK*DELTA+(1-MASK)*ERROR)
# Update image
D = MASK*DELTA+(1-MASK)*ERROR
IM = np.uint8(from_tanh(to_tanh(RECON)+D))
# Pass updates
update_canvas(w)
update_photo(IM,output)
# Load an image and infer/reconstruct from it. Update this with a function to load your own images if you want to edit
# non-celebA photos.
def infer():
global Z,w,GIM,IM,ERROR,RECON,DELTA,USER_MASK,SAMPLE_FLAG
val = myentry.get()
try:
val = int(val)
GIM = np.asarray(np.load('CelebAValid.npz')['arr_0'][val])
IM = GIM
except ValueError:
print "No input"
val = 420
GIM = np.asarray(np.load('CelebAValid.npz')['arr_0'][val])
IM = GIM
# myentry.delete(0, END) # Optionally, clear entry after typing it in
# Reset Delta
DELTA = np.zeros(np.shape(IM),dtype=np.float32)
# Infer and reshape latents. This can be done without an intermediate variable if desired
s = model.encode_images(np.asarray([to_tanh(IM)],dtype=np.float32))
Z = np.reshape(s[0],np.shape(Z))
# Get reconstruction
RECON = np.uint8(from_tanh(model.sample_at(np.float32([Z.flatten()]))[0]))
# Get error
ERROR = to_tanh(np.float32(IM)) - to_tanh(np.float32(RECON))
# Reset user mask
USER_MASK*=0
# Clear the sample flag
SAMPLE_FLAG=0
# Update photo
update_photo(IM,output)
update_canvas(w)
# Paint directly into the latent space
def paint_latents( event ):
global r, Z, output,painted_rects,MASK,USER_MASK,RECON
# Get extent of latent paintbrush
x1, y1 = ( event.x - d.get() ), ( event.y - d.get() )
x2, y2 = ( event.x + d.get() ), ( event.y + d.get() )
selected_widget = event.widget
# Paint in latent space and update Z
painted_rects.append(event.widget.create_rectangle( x1, y1, x2, y2, fill = rb(color.get()),outline = rb(color.get()) ))
r[max((y1-bd),0):min((y2-bd),r.shape[0]),max((x1-bd),0):min((x2-bd),r.shape[1])] = color.get()/255.0;
Z = np.asarray([np.mean(o) for v in [np.hsplit(h,Z.shape[0])\
for h in np.vsplit((r),Z.shape[1])]\
for o in v]).reshape(Z.shape[0],Z.shape[1])
if SAMPLE_FLAG:
update_photo(None,output)
update_canvas(w) # Remove this if you wish to see a more free-form paintbrush
else:
DELTA = model.sample_at(np.float32([Z.flatten()]))[0]-to_tanh(np.float32(RECON))
MASK=scipy.ndimage.filters.gaussian_filter(np.min([np.mean(np.abs(DELTA),axis=0),np.ones((64,64))],axis=0),0.7)
# D = dampen(to_tanh(np.float32(RECON)),MASK*DELTA+(1-MASK)*ERROR)
D = MASK*DELTA+(1-MASK)*ERROR
IM = np.uint8(from_tanh(to_tanh(RECON)+D))
update_canvas(w) # Remove this if you wish to see a more free-form paintbrush
update_photo(IM,output)
# Scroll to lighten or darken an image patch
def scroll( event ):
global r,Z,output
# Optional alternate method to get a single X Y point
# x,y = np.floor( ( event.x - (output.winfo_rootx() - master.winfo_rootx()) ) / 4), np.floor( ( event.y - (output.winfo_rooty() - master.winfo_rooty()) ) / 4)
weight = 0.1
[x1,y1,x2,y2] = [coordinate//4 for coordinate in output.coords(pixel_rect)]
grad = np.reshape(model.imgrad(x1,y1,x2,y2,np.float32([Z.flatten()]))[0],Z.shape)*(1+(x2-x1))
Z+=np.sign(event.delta)*weight*grad
update_canvas(w)
update_photo(None,output)
# Samples in the latent space
def sample():
global Z, output,RECON,IM,ERROR,SAMPLE_FLAG
Z = np.random.randn(Z.shape[0],Z.shape[1])
# Z = np.random.uniform(low=-1.0,high=1.0,size=(Z.shape[0],Z.shape[1])) # Optionally get uniform sample
# Update reconstruction and error
RECON = np.uint8(from_tanh(model.sample_at(np.float32([Z.flatten()]))[0]))
ERROR = to_tanh(np.float32(IM)) - to_tanh(np.float32(RECON))
update_canvas(w)
SAMPLE_FLAG=1
update_photo(None,output)
# Reset to ground-truth image
def Reset():
global GIM,IM,Z, DELTA,RECON,ERROR,USER_MASK,SAMPLE_FLAG
IM = GIM
Z = np.reshape(model.encode_images(np.asarray([to_tanh(IM)],dtype=np.float32))[0],np.shape(Z))
DELTA = np.zeros(np.shape(IM),dtype=np.float32)
RECON = np.uint8(from_tanh(model.sample_at(np.float32([Z.flatten()]))[0]))
ERROR = to_tanh(np.float32(IM)) - to_tanh(np.float32(RECON))
USER_MASK*=0
SAMPLE_FLAG=0
update_canvas(w)
update_photo(IM,output)
def UpdateGIM():
global GIM,IM
GIM = IM
Reset()# Recalc the latent space for the new ground-truth image.
# Change brush size
def update_brush(event):
brush.create_rectangle(0,0,25,25,fill=rgb(255,255,255),outline=rgb(255,255,255))
brush.create_rectangle( int(12.5-d.get()/4.0), int(12.5-d.get()/4.0), int(12.5+d.get()/4.0), int(12.5+d.get()/4.0), fill = rb(color.get()),outline = rb(color.get()) )
# assign color picker values to myRGB
def getColor():
global myRGB, mycol
col = askcolor(mycol)
if col[0] is None:
return # Dont change color if Cancel pressed.
mycol = col[0]
for i in xrange(3): myRGB[0,i,:,:] = mycol[i]; # assign
# Optional function to "lock" latents so that gradients are always evaluated with respect to the locked Z
# def lock():
# global Z,locked, Zlock, lockbutton
# lockbutton.config(relief='raised' if locked else 'sunken')
# Zlock = Z if not locked else Zlock
# locked = not locked
# lockbutton = Button(f, text="Lock", command=lock,relief='raised')
# lockbutton.pack(side=LEFT)
### Prepare GUI
master.bind("<MouseWheel>",scroll)
# Prepare drawing canvas
f=Frame(master)
f.pack(side=TOP)
output = Canvas(f,name='output',width=64*4,height=64*4)
output.bind('<Motion>',move_mouse)
output.bind('<B1-Motion>', paint )
pixel_rect = output.create_rectangle(0,0,4,4,outline = 'yellow')
output.pack()
# Prepare latent canvas
f = Frame(master,width=res*dim[0],height=dim[1]*10)
f.pack(side=TOP)
w = Canvas(f,name='canvas', width=res*dim[0],height=res*dim[1])
w.bind( "<B1-Motion>", paint_latents )
# Produce painted rectangles
for i in range(Z.shape[0]):
for j in range(Z.shape[1]):
rects[i,j] = w.create_rectangle( j*res, i*res, (j+1)*res, (i+1)*res, fill = rb(255*Z[i,j]),outline = rb(255*Z[i,j]) )
# w.create_rectangle( 0,0,res*dim[0],res*dim[1], fill = rgb(255,255,255),outline=rgb(255,255,255)) # Optionally Initialize canvas to white
w.pack()
# Color gradient
gradient = Canvas(master, width=400, height=20)
gradient.pack(side=TOP)
# gradient.grid(row=i+1)
for j in range(-200,200):
gradient.create_rectangle(j*255/200+200,0,j*255/200+201,20,fill = rb(j*255/200),outline=rb(j*255/200))
# Color scale slider
f= Frame(master)
Scale(master, from_=-255, to=255,length=canvas_width, variable = color,orient=HORIZONTAL,showvalue=0,command=update_brush).pack(side=TOP)
# Buttons and brushes
Button(f, text="Sample", command=sample).pack(side=LEFT)
Button(f, text="Reset", command=Reset).pack(side=LEFT)
Button(f, text="Update", command=UpdateGIM).pack(side=LEFT)
brush = Canvas(f,width=25,height=25)
Scale(f, from_=0, to=64,length=100,width=25, variable = d,orient=HORIZONTAL,showvalue=0,command=update_brush).pack(side=LEFT) # Brush diameter scale
brush.pack(side=LEFT)
inferbutton = Button(f, text="Infer", command=infer)
inferbutton.pack(side=LEFT)
colorbutton=Button(f,text='Col',command=getColor)
colorbutton.pack(side=LEFT)
myentry = Entry()
myentry.pack(side=LEFT)
f.pack(side=TOP)
print('Running')
# Reset and infer to kick it off
Reset()
infer()
mainloop()
|
Sphere2013/GlobeCrypto | refs/heads/master | contrib/pyminer/pyminer.py | 8 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8638
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
kawasaki2013/getting-started-python | refs/heads/master | optional-container-engine/bookshelf/__init__.py | 2 | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from flask import current_app, Flask, redirect, request, session, url_for
import httplib2
from oauth2client.contrib.flask_util import UserOAuth2
oauth2 = UserOAuth2()
def create_app(config, debug=False, testing=False, config_overrides=None):
app = Flask(__name__)
app.config.from_object(config)
app.debug = debug
app.testing = testing
if config_overrides:
app.config.update(config_overrides)
# Configure logging
if not app.testing:
logging.basicConfig(level=logging.INFO)
# Setup the data model.
with app.app_context():
model = get_model()
model.init_app(app)
# Create a health check handler. Health checks are used when running on
# Google Compute Engine by the load balancer to determine which instances
# can serve traffic. Google App Engine also uses health checking, but
# accepts any non-500 response as healthy.
@app.route('/_ah/health')
def health_check():
return 'ok', 200
# Initalize the OAuth2 helper.
oauth2.init_app(
app,
scopes=['email', 'profile'],
authorize_callback=_request_user_info)
# Add a logout handler.
@app.route('/logout')
def logout():
# Delete the user's profile and the credentials stored by oauth2.
del session['profile']
session.modified = True
oauth2.storage.delete()
return redirect(request.referrer or '/')
# Register the Bookshelf CRUD blueprint.
from .crud import crud
app.register_blueprint(crud, url_prefix='/books')
# Add a default root route.
@app.route("/")
def index():
return redirect(url_for('crud.list'))
# Add an error handler. This is useful for debugging the live application,
# however, you should disable the output of the exception for production
# applications.
@app.errorhandler(500)
def server_error(e):
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
return app
def get_model():
model_backend = current_app.config['DATA_BACKEND']
if model_backend == 'cloudsql':
from . import model_cloudsql
model = model_cloudsql
elif model_backend == 'datastore':
from . import model_datastore
model = model_datastore
elif model_backend == 'mongodb':
from . import model_mongodb
model = model_mongodb
else:
raise ValueError(
"No appropriate databackend configured. "
"Please specify datastore, cloudsql, or mongodb")
return model
def _request_user_info(credentials):
"""
Makes an HTTP request to the Google+ API to retrieve the user's basic
profile information, including full name and photo, and stores it in the
Flask session.
"""
http = httplib2.Http()
credentials.authorize(http)
resp, content = http.request(
'https://www.googleapis.com/plus/v1/people/me')
if resp.status != 200:
current_app.logger.error(
"Error while obtaining user profile: %s" % resp)
return None
session['profile'] = json.loads(content.decode('utf-8'))
|
Pirata-Repository/Pirata | refs/heads/master | plugin.video.1channel/default.py | 1 | """
1Channel XBMC Addon
Copyright (C) 2012 Bstrdsmkr
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# pylint: disable=C0301
# pylint: disable=F0401
# pylint: disable=W0621
import re
import os
import sys
import json
import string
import urllib
import datetime
import metapacks
import xbmc
import xbmcgui
import xbmcvfs
import xbmcaddon
import xbmcplugin
from addon.common.addon import Addon
try: from metahandler import metahandlers
except: xbmc.executebuiltin("XBMC.Notification(%s,%s,2000)" % ('Import Failed','metahandler')); pass
import utils
from urllib2 import HTTPError
from pw_scraper import PW_Scraper, PW_Error
from db_utils import DB_Connection
from pw_dispatcher import PW_Dispatcher
from utils import MODES
from utils import SUB_TYPES
import gui_utils
global urlresolver
_1CH = Addon('plugin.video.1channel', sys.argv)
META_ON = _1CH.get_setting('use-meta') == 'true'
FANART_ON = _1CH.get_setting('enable-fanart') == 'true'
USE_POSTERS = _1CH.get_setting('use-posters') == 'true'
POSTERS_FALLBACK = _1CH.get_setting('posters-fallback') == 'true'
THEME_LIST = ['Classic', 'Glossy_Black', 'PrimeWire', 'Firestorm']
THEME = THEME_LIST[int(_1CH.get_setting('theme'))]
if xbmc.getCondVisibility('System.HasAddon(script.1channel.themepak)'):
themepak_path = xbmcaddon.Addon('script.1channel.themepak').getAddonInfo('path')
else:
themepak_path=''
THEME_PATH = os.path.join(themepak_path, 'art', 'themes', THEME)
ICON_PATH = os.path.join(_1CH.get_path(), 'icon.png')
FAV_ACTIONS = utils.enum(ADD='add', REMOVE='remove')
PL_SORT = ['added', 'alphabet', 'popularity']
REMOVE_TW_MENU='Remove from ToWatch List'
REMOVE_W_MENU='Remove from Watched list'
REMOVE_FAV_MENU='Remove from Favorites'
pw_scraper = PW_Scraper(_1CH.get_setting("username"),_1CH.get_setting("passwd"))
db_connection = DB_Connection()
pw_dispatcher = PW_Dispatcher()
__metaget__ = metahandlers.MetaData()
if not xbmcvfs.exists(_1CH.get_profile()):
try: xbmcvfs.mkdirs(_1CH.get_profile())
except: os.mkdir(_1CH.get_profile())
def art(name):
path = os.path.join(THEME_PATH, name)
if not xbmcvfs.exists(path):
path = path.replace('.png','.jpg')
return path
@pw_dispatcher.register(MODES.SAVE_FAV, ['fav_type', 'title', 'url'], ['year'])
def save_favorite(fav_type, title, url, year=''):
if fav_type != 'tv': fav_type = 'movie'
utils.log('Saving Favorite type: %s name: %s url: %s year: %s' % (fav_type, title, url, year))
try:
if utils.website_is_integrated():
pw_scraper.add_favorite(url)
else:
db_connection.save_favorite(fav_type, title, url, year)
builtin = 'XBMC.Notification(PrimeWire, %s added to Favorites, 5000, %s)'
except:
builtin = 'XBMC.Notification(PrimeWire,%s already in Favorites, 5000, %s)'
xbmc.executebuiltin(builtin % (title, ICON_PATH))
xbmc.executebuiltin('Container.Refresh')
@pw_dispatcher.register(MODES.DEL_FAV, ['url'])
def delete_favorite(url):
utils.log('Deleting Favorite: %s' % (url))
if utils.website_is_integrated():
pw_scraper.delete_favorite(url)
else:
db_connection.delete_favorite(url)
builtin = 'XBMC.Notification(PrimeWire, Favorite Removed, 3000, %s)'
xbmc.executebuiltin(builtin % ICON_PATH)
xbmc.executebuiltin('Container.Refresh')
# returns true if user chooses to resume, else false
def get_resume_choice(url):
question = 'Resume from %s' % (utils.format_time(db_connection.get_bookmark(url)))
return xbmcgui.Dialog().yesno('Resume?', question, '', '', 'Start from beginning', 'Resume')==1
@pw_dispatcher.register(MODES.GET_SOURCES, ['url', 'title'], ['year', 'img', 'imdbnum', 'dialog'])
def get_sources(url, title, year='', img='', imdbnum='', dialog=None, respect_auto=True):
url = urllib.unquote(url)
utils.log('Getting sources from: %s' % url)
primewire_url = url
resume = False
if db_connection.bookmark_exists(url):
resume = get_resume_choice(url)
pattern = r'tv-\d{1,10}-(.*)/season-(\d{1,4})-episode-(\d{1,4})'
match = re.search(pattern, url, re.IGNORECASE | re.DOTALL)
if match:
video_type = 'episode'
season = int(match.group(2))
episode = int(match.group(3))
else:
video_type = 'movie'
season = ''
episode = ''
if META_ON and video_type == 'movie' and not imdbnum:
imdbnum=pw_scraper.get_last_imdbnum()
__metaget__.update_meta('movie', title, imdb_id='',new_imdb_id=imdbnum, year=year)
_img = xbmc.getInfoImage('ListItem.Thumb')
if _img != "":
img = _img
hosters=pw_scraper.get_sources(url)
if not hosters:
_1CH.show_ok_dialog(['No sources were found for this item'], title='PrimeWire')
return
dbid=get_dbid(video_type, title, season, episode, year)
# auto play is on
if respect_auto and _1CH.get_setting('auto-play')=='true':
auto_try_sources(hosters, title, img, year, imdbnum, video_type, season, episode, primewire_url, resume, dbid)
else: # autoplay is off, or respect_auto is False
# dialog is either True, False or None -- source-win is either Dialog or Directory
# If dialog is forced, or there is no force and it's set to dialog use the dialog
if dialog or (dialog is None and _1CH.get_setting('source-win') == 'Dialog'):
if _1CH.get_setting('filter-source') == 'true':
play_filtered_dialog(hosters, title, img, year, imdbnum, video_type, season, episode, primewire_url, resume, dbid)
else:
play_unfiltered_dialog(hosters, title, img, year, imdbnum, video_type, season, episode, primewire_url, resume, dbid)
# if dialog is forced off (0), or it's None, but source-win is Directory, then use a directory
else:
if _1CH.get_setting('filter-source') == 'true':
play_filtered_dir(hosters, title, img, year, imdbnum, video_type, season, episode, primewire_url, resume)
else:
play_unfiltered_dir(hosters, title, img, year, imdbnum, video_type, season, episode, primewire_url, resume)
def get_dbid(video_type, title, season='', episode='', year=''):
dbid=0
filter=''
#variable used to match title with closest len, if there is more than one match, the one with the closest title length is the winner,
#The Middle and Malcolm in the Middle in the same library would still match the corret title. Starts at high value and lowers
max_title_len_diff=1000
titleComp2=re.sub('[^a-zA-Z0-9]+','',title).lower()
#if it's a movie check if the titles match in the library, then pull the movieid
if video_type == 'movie':
if year: filter = '"filter": {"field": "year", "operator": "is", "value": "%s"},' % year
json_string = '{"jsonrpc": "2.0", "id": 1, "method": "VideoLibrary.GetMovies", "params": {%s "properties": ["title"], "limits": {"end": 10000}}}' % filter
result_key="movies"
id_key="movieid"
title_key="title"
#if it'a a tvshow episode filter out all tvshows which contain said season and episode, then match tvshow title
if video_type == 'episode':
filter = '"filter": {"and":['
if year: filter += '{"field": "year", "operator": "is", "value": "%s"},' % year
filter+='{"field": "season", "operator": "is", "value": "%s"},' % season
filter+='{"field": "episode", "operator": "is", "value": "%s"}]},' % episode
json_string = '{"jsonrpc": "2.0", "id": 1, "method": "VideoLibrary.GetEpisodes", "params": {%s "properties": ["showtitle"], "limits": {"end": 10000}}}' % (filter)
result_key="episodes"
id_key="episodeid"
title_key="showtitle"
result=xbmc.executeJSONRPC(json_string)
resultObj=json.loads(result)
if not ('result' in resultObj and result_key in resultObj['result']): return None
for item in resultObj['result'][result_key]:
#converts titles to only alpha numeric, then compares smallest title to largest title, for example
#'Adventure Time' would match to 'Adventure tIME with FiNn and Jake_ (en) (4214)'
titleComp1=re.sub('[^a-zA-Z0-9]+','',item[title_key]).lower()
found_match=0
if len(titleComp1)>len(titleComp2):
if titleComp2 in titleComp1: found_match=1
else:
if titleComp1 in titleComp2: found_match=1
if found_match:
title_len_diff=abs(len(titleComp1)-len(titleComp2))
if title_len_diff<=max_title_len_diff:
max_title_len_diff=title_len_diff
if video_type == 'movie':
dbid=item[id_key]
utils.log('successfully matched dbid to movieid %s' % (dbid), xbmc.LOGDEBUG)
if video_type == 'episode':
dbid=item[id_key]
utils.log('successfully matched dbid to episodeid %s' % (dbid), xbmc.LOGDEBUG)
if dbid:
return dbid
else:
utils.log('Failed to recover dbid, type: %s, title: %s, season: %s, episode: %s' % (video_type, title, season, episode), xbmc.LOGDEBUG)
return None
def play_filtered_dialog(hosters, title, img, year, imdbnum, video_type, season, episode, primewire_url, resume, dbid):
sources=[]
for item in hosters:
try:
label = utils.format_label_source(item)
hosted_media = urlresolver.HostedMediaFile(url=item['url'], title=label)
sources.append(hosted_media)
if item['multi-part']:
partnum = 2
for _ in item['parts']:
label = utils.format_label_source_parts(item, partnum)
hosted_media = urlresolver.HostedMediaFile(url=item['parts'][partnum - 2], title=label)
sources.append(hosted_media)
partnum += 1
except:
utils.log('Error while trying to resolve %s' % item['url'], xbmc.LOGERROR)
source = urlresolver.choose_source(sources)
if source:
source=source.get_url()
else:
return
PlaySource(source, title, video_type, primewire_url, resume, imdbnum, year, season, episode, dbid)
def play_unfiltered_dialog(hosters, title, img, year, imdbnum, video_type, season, episode, primewire_url, resume, dbid):
sources=[]
for item in hosters:
label = utils.format_label_source(item)
sources.append(label)
dialog = xbmcgui.Dialog()
index = dialog.select('Choose your stream', sources)
if index > -1:
PlaySource(hosters[index]['url'], title,video_type, primewire_url, resume, imdbnum, year, season, episode, dbid)
else:
return
def play_filtered_dir(hosters, title, img, year, imdbnum, video_type, season, episode, primewire_url, resume):
hosters_len = len(hosters)
for item in hosters:
#utils.log(item)
hosted_media = urlresolver.HostedMediaFile(url=item['url'])
if hosted_media:
label = utils.format_label_source(item)
_1CH.add_directory({'mode': MODES.PLAY_SOURCE, 'url': item['url'], 'title': title,
'img': img, 'year': year, 'imdbnum': imdbnum,
'video_type': video_type, 'season': season, 'episode': episode, 'primewire_url': primewire_url, 'resume': resume},
infolabels={'title': label}, properties={'resumeTime': str(0), 'totalTime': str(1)}, is_folder=False, img=img, fanart=art('fanart.png'), total_items=hosters_len)
if item['multi-part']:
partnum = 2
for part in item['parts']:
label = utils.format_label_source_parts(item, partnum)
partnum += 1
_1CH.add_directory({'mode': MODES.PLAY_SOURCE, 'url': part, 'title': title,
'img': img, 'year': year, 'imdbnum': imdbnum,
'video_type': video_type, 'season': season, 'episode': episode, 'primewire_url': primewire_url, 'resume': resume},
infolabels={'title': label}, properties={'resumeTime': str(0), 'totalTime': str(1)}, is_folder=False, img=img,
fanart=art('fanart.png'), total_items=hosters_len)
else:
utils.log('Skipping unresolvable source: %s' % (item['url']), xbmc.LOGWARNING)
_1CH.end_of_directory()
def play_unfiltered_dir(hosters, title, img, year, imdbnum, video_type, season, episode, primewire_url, resume):
hosters_len=len(hosters)
for item in hosters:
#utils.log(item)
label = utils.format_label_source(item)
_1CH.add_directory({'mode': MODES.PLAY_SOURCE, 'url': item['url'], 'title': title,
'img': img, 'year': year, 'imdbnum': imdbnum,
'video_type': video_type, 'season': season, 'episode': episode, 'primewire_url': primewire_url, 'resume': resume},
infolabels={'title': label}, properties={'resumeTime': str(0), 'totalTime': str(1)}, is_folder=False, img=img, fanart=art('fanart.png'), total_items=hosters_len)
if item['multi-part']:
partnum = 2
for part in item['parts']:
label = utils.format_label_source_parts(item, partnum)
partnum += 1
_1CH.add_directory({'mode': MODES.PLAY_SOURCE, 'url': part, 'title': title,
'img': img, 'year': year, 'imdbnum': imdbnum,
'video_type': video_type, 'season': season, 'episode': episode, 'primewire_url': primewire_url, 'resume': resume},
infolabels={'title': label}, properties={'resumeTime': str(0), 'totalTime': str(1)}, is_folder=False, img=img,
fanart=art('fanart.png'), total_items=hosters_len)
_1CH.end_of_directory()
def auto_try_sources(hosters, title, img, year, imdbnum, video_type, season, episode, primewire_url, resume, dbid):
dlg = xbmcgui.DialogProgress()
line1 = 'Trying Source: '
dlg.create('PrimeWire')
total = len(hosters)
count = 1
success = False
while not (success or dlg.iscanceled() or xbmc.abortRequested):
for source in hosters:
if dlg.iscanceled(): return
percent = int((count * 100) / total)
label = utils.format_label_source(source)
dlg.update(percent, '', line1 + label)
utils.log('Trying Source: %s' % (source['host']), xbmc.LOGDEBUG)
if not PlaySource(source['url'], title, video_type, primewire_url, resume, imdbnum, year, season, episode, dbid):
dlg.update(percent, 'Playback Failed: %s' % (label), line1 + label)
utils.log('Source Failed: %s' % (source['host']), xbmc.LOGWARNING)
count += 1
else:
success = True
break # Playback was successful, break out of the loop
else:
utils.log('All sources failed to play', xbmc.LOGERROR)
dlg.close()
_1CH.show_ok_dialog(['All Sources Failed to Play'], title='PrimeWire')
break
@pw_dispatcher.register(MODES.PLAY_SOURCE, ['url', ' title', 'video_type', 'primewire_url', 'resume'], ['imdbnum', 'year', 'season', 'episode'])
def PlaySource(url, title, video_type, primewire_url, resume, imdbnum='', year='', season='', episode='', dbid=None):
utils.log('Attempting to play url: %s' % url)
stream_url = urlresolver.HostedMediaFile(url=url).resolve()
#If urlresolver returns false then the video url was not resolved.
if not stream_url or not isinstance(stream_url, basestring):
return False
win = xbmcgui.Window(10000)
win.setProperty('1ch.playing.title', title)
win.setProperty('1ch.playing.year', year)
win.setProperty('1ch.playing.imdb', imdbnum)
win.setProperty('1ch.playing.season', str(season))
win.setProperty('1ch.playing.episode', str(episode))
win.setProperty('1ch.playing.url',primewire_url)
#metadata is enabled
if META_ON:
if not dbid or int(dbid) <= 0:
#we're not playing from a library item
if video_type == 'episode':
meta = __metaget__.get_episode_meta(title, imdbnum, season, episode)
meta['TVShowTitle'] = title
meta['title'] = utils.format_tvshow_episode(meta)
elif video_type == 'movie':
meta = __metaget__.get_meta('movie', title, year=year)
meta['title'] = utils.format_label_movie(meta)
else: #metadata is not enabled
meta = {'label' : title, 'title' : title}
if dbid and int(dbid) > 0:
#we're playing from a library item
if video_type == 'episode':
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"episodeid" : %s, "properties" : ["title", "plot", "votes", "rating", "writer", "firstaired", "playcount", "runtime", "director", "productioncode", "season", "episode", "originaltitle", "showtitle", "lastplayed", "fanart", "thumbnail", "dateadded", "art"]}, "id": 1}'
cmd = cmd %(dbid)
meta = xbmc.executeJSONRPC(cmd)
meta = json.loads(meta)
meta = meta['result']['episodedetails']
meta['TVShowTitle'] = meta['showtitle']
meta['duration'] = meta['runtime']
meta['premiered'] = meta['firstaired']
meta['DBID']=dbid
meta['backdrop_url']=meta['fanart']
meta['cover_url']=meta['thumbnail']
if 'art' in meta:
meta['banner_url']=meta['art']['tvshow.banner']
del meta['art']
if video_type == 'movie':
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"movieid" : %s, "properties" : ["title", "plot", "votes", "rating", "writer", "playcount", "runtime", "director", "originaltitle", "lastplayed", "fanart", "thumbnail", "file", "year", "dateadded"]}, "id": 1}'
cmd = cmd %(dbid)
meta = xbmc.executeJSONRPC(cmd)
meta = json.loads(meta)
meta = meta['result']['moviedetails']
meta['duration'] = meta['runtime']
meta['DBID']=dbid
meta['backdrop_url']=meta['fanart']
meta['cover_url']=meta['thumbnail']
win = xbmcgui.Window(10000)
win.setProperty('1ch.playing', json.dumps(meta))
art=make_art(video_type, meta)
listitem = xbmcgui.ListItem(path=url, iconImage=art['thumb'], thumbnailImage=art['thumb'])
listitem.setProperty('fanart_image', art['fanart'])
try: listitem.setArt(art)
except: pass # method doesn't exist in Frodo
resume_point=0
if resume:
resume_point = db_connection.get_bookmark(primewire_url)
utils.log("Playing Video from: %s secs" % (resume_point), xbmc.LOGDEBUG)
listitem.setProperty('ResumeTime', str(resume_point))
listitem.setProperty('Totaltime', str(99999)) # dummy value to force resume to work
listitem.setProperty('IsPlayable', 'true')
listitem.setInfo(type = "Video", infoLabels = meta)
if _1CH.get_setting('enable-axel')=='true':
utils.log('Using Axel Downloader', xbmc.LOGDEBUG)
try:
download_name=title
if season and episode: download_name += ' %sx%s' % (season,episode)
import axelproxy as proxy
axelhelper = proxy.ProxyHelper()
stream_url, download_id = axelhelper.create_proxy_url(stream_url, name=download_name)
win.setProperty('download_id', str(download_id))
utils.log('Axel Downloader: stream_url: %s, download_id: %s' % (stream_url, download_id), xbmc.LOGDEBUG)
except:
message='Axel [COLOR blue]ENABLED[/COLOR] but [COLOR red]NOT INSTALLED[/COLOR]'
xbmc.executebuiltin("XBMC.Notification(%s,%s,10000, %s)" % ('Axel Downloader',message, ICON_PATH))
listitem.setPath(stream_url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, listitem)
return True
@pw_dispatcher.register(MODES.CH_WATCH, ['video_type', 'title', 'primewire_url', 'watched'], ['imdbnum', 'season', 'episode', 'year', 'dbid'])
def change_watched(video_type, title, primewire_url , watched, imdbnum='', season='', episode='', year='', dbid=None):
if watched==True:
overlay=7
whattodo='add'
else:
whattodo='delete'
overlay=6
# meta['dbid'] only gets set for strms
if dbid and int(dbid) > 0:
if video_type == 'episode':
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"episodeid": %s, "properties": ["playcount"]}, "id": 1}'
cmd = cmd %(dbid)
result = json.loads(xbmc.executeJSONRPC(cmd))
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.SetEpisodeDetails", "params": {"episodeid": %s, "playcount": %s}, "id": 1}'
playcount = int(result['result']['episodedetails']['playcount']) + 1 if watched == True else 0
cmd = cmd %(dbid, playcount)
result = xbmc.executeJSONRPC(cmd)
xbmc.log('PrimeWire: Marking episode .strm as watched: %s' %result)
if video_type == 'movie':
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"movieid": %s, "properties": ["playcount"]}, "id": 1}'
cmd = cmd %(dbid)
result = json.loads(xbmc.executeJSONRPC(cmd))
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.SetMovieDetails", "params": {"movieid": %s, "playcount": %s}, "id": 1}'
playcount = int(result['result']['moviedetails']['playcount']) + 1 if watched == True else 0
cmd = cmd %(dbid, playcount)
result = xbmc.executeJSONRPC(cmd)
xbmc.log('PrimeWire: Marking movie .strm as watched: %s' %result)
__metaget__.change_watched(video_type, title, imdbnum, season=season, episode=episode, year=year, watched=overlay)
if utils.website_is_integrated():
change_watched_website(primewire_url, whattodo, refresh=False)
xbmc.executebuiltin("XBMC.Container.Refresh")
@pw_dispatcher.register(MODES.CH_WATCH_WEB, ['primewire_url', 'action'], ['refresh'])
def change_watched_website( primewire_url , action, refresh = True):
if utils.website_is_integrated():
pw_scraper.change_watched(primewire_url, "watched", action)
if refresh: xbmc.executebuiltin("XBMC.Container.Refresh")
@pw_dispatcher.register(MODES.CH_TOWATCH_WEB, ['primewire_url', 'action'], ['refresh'])
def change_towatch_website( primewire_url , action, refresh = True):
if utils.website_is_integrated():
pw_scraper.change_watched(primewire_url, "towatch", action)
if refresh: xbmc.executebuiltin("XBMC.Container.Refresh")
@pw_dispatcher.register(MODES.PLAY_TRAILER, ['url'])
def PlayTrailer(url):
url = url.decode('base-64')
url = 'http://www.youtube.com/watch?v=%s&hd=1' % (url)
utils.log('Attempting to resolve and play trailer at %s' % url)
sources = []
hosted_media = urlresolver.HostedMediaFile(url=url)
sources.append(hosted_media)
source = urlresolver.choose_source(sources)
stream_url = source.resolve() if source else ''
xbmc.Player().play(stream_url)
@pw_dispatcher.register(MODES.SEARCH_QUERY, ['section', 'next_mode'])
@pw_dispatcher.register(MODES.DESC_QUERY, ['section', 'next_mode'])
def GetSearchQuery(section, next_mode):
paginate=(_1CH.get_setting('paginate-search')=='true' and _1CH.get_setting('paginate')=='true')
keyboard = xbmc.Keyboard()
if section == 'tv':
keyboard.setHeading('Search TV Shows')
else:
keyboard.setHeading('Search Movies')
while True:
keyboard.doModal()
if keyboard.isConfirmed():
search_text = keyboard.getText()
if not paginate and not search_text:
_1CH.show_ok_dialog(['Blank searches are not allowed unless [B]Paginate Search Results[/B] is enabled.'], title='PrimeWire')
return
else:
break
else:
break
if keyboard.isConfirmed():
if search_text.startswith('!#'):
if search_text == '!#create metapacks': metapacks.create_meta_packs()
if search_text == '!#repair meta': repair_missing_images()
if search_text == '!#install all meta': metapacks.install_all_meta()
if search_text.startswith('!#sql:'):
utils.log('Running SQL: |%s|' % (search_text[6:]), xbmc.LOGDEBUG)
db_connection.execute_sql(search_text[6:])
else:
queries = {'mode': next_mode, 'section': section, 'query': keyboard.getText()}
pluginurl = _1CH.build_plugin_url(queries)
builtin = 'Container.Update(%s)' %(pluginurl)
xbmc.executebuiltin(builtin)
else:
BrowseListMenu(section)
@pw_dispatcher.register(MODES.ADV_QUERY, ['section'])
def GetSearchQueryAdvanced(section):
try:
query=gui_utils.get_adv_search_query(section)
js_query=json.dumps(query)
queries = {'mode': MODES.SEARCH_ADV, 'section': section, 'query': js_query}
pluginurl = _1CH.build_plugin_url(queries)
builtin = 'Container.Update(%s)' %(pluginurl)
xbmc.executebuiltin(builtin)
except:
BrowseListMenu(section)
@pw_dispatcher.register(MODES.SEARCH, ['mode', 'section'], ['query', 'page'])
@pw_dispatcher.register(MODES.SEARCH_DESC, ['mode', 'section'], ['query', 'page'])
@pw_dispatcher.register(MODES.SEARCH_ADV, ['mode', 'section'], ['query', 'page'])
@pw_dispatcher.register(MODES.REMOTE_SEARCH, ['section'], ['query'])
def Search(mode, section, query='', page=None):
section_params = get_section_params(section)
paginate=(_1CH.get_setting('paginate-search')=='true' and _1CH.get_setting('paginate')=='true')
try:
if mode==MODES.SEARCH:
results=pw_scraper.search(section,query, page, paginate)
elif mode==MODES.SEARCH_DESC:
results=pw_scraper.search_desc(section,query, page, paginate)
elif mode==MODES.SEARCH_ADV:
criteria = utils.unpack_query(query)
results=pw_scraper.search_advanced(section, criteria['title'], criteria['tag'], False, criteria['country'], criteria['genre'],
criteria['actor'], criteria['director'], criteria['year'], criteria['month'], criteria['decade'], page=page, paginate=paginate)
except PW_Error:
message='Site Blocked? Unexpected page received.'
xbmc.executebuiltin("XBMC.Notification(%s,%s,10000, %s)" % ('PrimeWire',message, ICON_PATH))
return
total_pages = pw_scraper.get_last_res_pages()
total=pw_scraper.get_last_res_total()
if paginate:
if page != total_pages:
total=PW_Scraper.ITEMS_PER_PAGE
else:
total=total % PW_Scraper.ITEMS_PER_PAGE
resurls = []
for result in results:
if result['url'] not in resurls:
resurls.append(result['url'])
create_item(section_params,result['title'],result['year'],result['img'],result['url'],totalItems=total)
if not page: page = 1
next_page = int(page) + 1
if int(page) < int(total_pages) and paginate:
label = 'Skip to Page...'
command = _1CH.build_plugin_url(
{'mode': MODES.SEARCH_PAGE_SELECT, 'pages': total_pages, 'query': query, 'search': mode, 'section': section})
command = 'RunPlugin(%s)' % command
menu_items = [(label, command)]
meta = {'title': 'Next Page >>'}
_1CH.add_directory(
{'mode': mode, 'query': query, 'page': next_page, 'section': section},
meta, contextmenu_items=menu_items, context_replace=True, img=art('nextpage.png'), fanart=art('fanart.png'), is_folder=True)
utils.set_view(section_params['content'], '%s-view' % (section_params['content']))
_1CH.end_of_directory()
# temporary method to fix bad urls
def fix_urls():
tables = ['favorites', 'subscriptions', 'external_subs']
for table in tables:
# remove any possible dupes
while True:
rows = db_connection.execute_sql("SELECT url from %s GROUP BY REPLACE(url,'-online-free','') HAVING COUNT(*)>1" % (table))
if rows:
db_connection.execute_sql("DELETE FROM %s WHERE url in (SELECT * FROM (SELECT url from %s GROUP BY REPLACE(url,'-online-free','') HAVING COUNT(*)>1) as t)" % (table, table))
else:
break
# strip the -online-free part of the url off
db_connection.execute_sql("UPDATE %s SET url=REPLACE(url,'-online-free','') WHERE SUBSTR(url, -12)='-online-free'" % (table))
@pw_dispatcher.register(MODES.MAIN)
def AddonMenu(): # homescreen
utils.log('Main Menu')
db_connection.init_database()
fix_urls()
if utils.has_upgraded():
utils.log('Showing update popup', xbmc.LOGDEBUG)
if _1CH.get_setting('show_splash')=='true':
msg = ('The 1Channel/PrimeWire addon is developed and supported by the team at [COLOR white]www.tvaddons.ag[/COLOR]\n\n'
'If you are having issues with the addon, visit our Forums for help. We also invite you to visit us regularly to show your support.\n\n'
'[I](This message is [B]only[/B] shown when the 1Channel addon is first installed or updated.)[/I]')
gui_utils.do_My_TextSplash(msg, HowLong=20, TxtColor='0xFF00FF00', BorderWidth=45)
utils.TextBox()
adn = xbmcaddon.Addon('plugin.video.1channel')
adn.setSetting('domain', 'http://www.primewire.ag')
adn.setSetting('old_version', _1CH.get_version())
_1CH.add_directory({'mode': MODES.LIST_MENU, 'section': 'movie'}, {'title': 'Movies'}, img=art('movies.png'),
fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.LIST_MENU, 'section': 'tv'}, {'title': 'TV shows'}, img=art('television.png'),
fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.PLAYLISTS_MENU, 'section': 'playlist'}, {'title': 'Playlists'}, img=art('playlists.png'),
fanart=art('fanart.png'))
if _1CH.get_setting('h99_hidden')=='true':
_1CH.add_directory({'mode': MODES.FILTER_RESULTS, 'section': 'tv', 'sort': 'date'}, {'title': 'TV - Date added'},img=art('date_added.png'), fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.MANAGE_SUBS}, {'title': 'TV - Subscriptions'}, img=art('subscriptions.png'),fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.FILTER_RESULTS, 'section': 'movie', 'sort': 'date'}, {'title': 'Movies - Date added'},img=art('date_added.png'), fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.FILTER_RESULTS, 'section': 'movie', 'sort': 'featured'}, {'title': 'Movies - Featured'},img=art('featured.png'), fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.FILTER_RESULTS, 'section': 'movie', 'sort': 'views'}, {'title': 'Movies - Most Popular'},img=art('most_popular.png'), fanart=art('fanart.png'))
if not xbmc.getCondVisibility('System.HasAddon(script.1channel.themepak)') and xbmc.getCondVisibility('System.HasAddon(plugin.program.addoninstaller)'):
_1CH.add_directory({'mode': MODES.INSTALL_THEMES}, {'title': 'Install 1Channel Themes/Icons'}, img=art('settings.png'),fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.RES_SETTINGS}, {'title': 'Resolver Settings'}, img=art('settings.png'),
fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.HELP}, {'title': 'Help'}, img=art('help.png'), fanart=art('fanart.png'))
# _1CH.add_directory({'mode': 'test'}, {'title': 'Test'}, img=art('settings.png'), fanart=art('fanart.png'))
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=False)
@pw_dispatcher.register(MODES.INSTALL_THEMES)
def install_themes():
addon='plugin://plugin.program.addoninstaller'
query={'mode': 'addoninstall', 'name': '1Channel.thempak', \
'url': 'https://offshoregit.com/tknorris/tknorris-release-repo/raw/master/zips/script.1channel.themepak/script.1channel.themepak-0.0.3.zip', \
'description': 'none', 'filetype': 'addon', 'repourl': 'none'}
run = 'RunPlugin(%s)' % (addon + '?' + urllib.urlencode(query))
xbmc.executebuiltin(run)
@pw_dispatcher.register(MODES.LIST_MENU, ['section'])
def BrowseListMenu(section):
utils.log('Browse Options')
_1CH.add_directory({'mode': MODES.AZ_MENU, 'section': section}, {'title': 'A-Z'}, img=art('atoz.png'),
fanart=art('fanart.png'))
add_search_item({'mode': MODES.SEARCH_QUERY, 'section': section, 'next_mode': MODES.SEARCH}, 'Search')
if utils.website_is_integrated():
_1CH.add_directory({'mode': MODES.BROWSE_FAVS_WEB, 'section': section}, {'title': 'Website Favourites'},
img=art('favourites.png'), fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.BROWSE_W_WEB, 'section': section}, {'title': 'Website Watched List'},
img=art('watched.png'), fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.BROWSE_TW_WEB, 'section': section}, {'title': 'Website To Watch List'},
img=art('towatch.png'), fanart=art('fanart.png'))
if section == 'tv':
_1CH.add_directory({'mode': MODES.SHOW_SCHEDULE}, {'title': 'My TV Schedule'}, img=art('schedule.png'),
fanart=art('fanart.png'))
else:
_1CH.add_directory({'mode': MODES.BROWSE_FAVS, 'section': section}, {'title': 'Favourites'},
img=art('favourites.png'), fanart=art('fanart.png'))
if section == 'tv':
_1CH.add_directory({'mode': MODES.MANAGE_SUBS}, {'title': 'Subscriptions'}, img=art('subscriptions.png'),
fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.GENRE_MENU, 'section': section}, {'title': 'Genres'}, img=art('genres.png'),
fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.FILTER_RESULTS, 'section': section, 'sort': 'featured'}, {'title': 'Featured'},
img=art('featured.png'), fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.FILTER_RESULTS, 'section': section, 'sort': 'views'}, {'title': 'Most Popular'},
img=art('most_popular.png'), fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.FILTER_RESULTS, 'section': section, 'sort': 'ratings'}, {'title': 'Highly rated'},
img=art('highly_rated.png'), fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.FILTER_RESULTS, 'section': section, 'sort': 'release'},
{'title': 'Date released'}, img=art('date_released.png'), fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.FILTER_RESULTS, 'section': section, 'sort': 'date'}, {'title': 'Date added'},
img=art('date_added.png'), fanart=art('fanart.png'))
add_search_item({'mode': MODES.DESC_QUERY, 'section': section, 'next_mode': MODES.SEARCH_DESC}, 'Search (+Description)')
add_search_item({'mode': MODES.ADV_QUERY, 'section': section}, 'Search (Advanced Search)')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@pw_dispatcher.register(MODES.PLAYLISTS_MENU)
def playlist_menu():
utils.log('Playlist Menu')
_1CH.add_directory({'mode': MODES.BROWSE_PLAYLISTS, 'public': True, 'sort': 'date'}, {'title': 'Public Playlists (sorted by date)'}, img=art('public_playlists_date.png'),
fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.BROWSE_PLAYLISTS, 'public': True, 'sort': 'rating'}, {'title': 'Public Playlists (sorted by rating)'}, img=art('public_playlists_rating.png'),
fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.BROWSE_PLAYLISTS, 'public': True, 'sort': 'hits'}, {'title': 'Public Playlists (sorted by views)'}, img=art('public_playlists_views.png'),
fanart=art('fanart.png'))
if utils.website_is_integrated():
_1CH.add_directory({'mode': MODES.BROWSE_PLAYLISTS, 'public': False, 'sort': 'date'}, {'title': 'Personal Playlists (sorted by date)'}, img=art('personal_playlists_date.png'),
fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.BROWSE_PLAYLISTS, 'public': False, 'sort': 'rating'}, {'title': 'Personal Playlists (sorted by rating)'}, img=art('personal_playlists_rating.png'),
fanart=art('fanart.png'))
_1CH.add_directory({'mode': MODES.BROWSE_PLAYLISTS, 'public': False, 'sort': 'hits'}, {'title': 'Personal Playlists (sorted by views)'}, img=art('personal_playlists_views.png'),
fanart=art('fanart.png'))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@pw_dispatcher.register(MODES.BROWSE_PLAYLISTS, ['public'], ['sort', 'page'])
def browse_playlists(public,sort=None, page=None, paginate=True):
utils.log('Browse Playlists: public: |%s| sort: |%s| page: |%s| paginate: |%s|' % (public, sort, page, paginate))
playlists=pw_scraper.get_playlists(public, sort, page, paginate)
total_pages = pw_scraper.get_last_res_pages()
for playlist in playlists:
title = '%s (%s items) (%s views) (rating %s)' % (playlist['title'].encode('ascii', 'ignore'), playlist['item_count'], playlist['views'], playlist['rating'])
_1CH.add_directory({'mode': MODES.SHOW_PLAYLIST, 'url': playlist['url'], 'public': public}, {'title': title}, img=playlist['img'],fanart=art('fanart.png'))
if not page: page = 1
next_page = int(page)+1
if int(page) < int(total_pages) and paginate:
label = 'Skip to Page...'
command = _1CH.build_plugin_url(
{'mode': MODES.PL_PAGE_SELECT, 'section': 'playlist', 'pages': total_pages, 'public': public, 'sort': sort})
command = 'RunPlugin(%s)' % command
menu_items = [(label, command)]
meta = {'title': 'Next Page >>'}
_1CH.add_directory(
{'mode': MODES.BROWSE_PLAYLISTS, 'public': public, 'sort': sort, 'page': next_page},
meta, contextmenu_items=menu_items, context_replace=True, img=art('nextpage.png'), fanart=art('fanart.png'), is_folder=True)
utils.set_view(None, 'default-view')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@pw_dispatcher.register(MODES.SHOW_PLAYLIST, ['url', 'public'])
def show_playlist(url, public):
sort = PL_SORT[int(_1CH.get_setting('playlist-sort'))]
items=pw_scraper.show_playlist(url, public, sort)
# one playlist can contain both movies and tvshows so can't set the params for the whole playlist/section
item_params={}
item_params['subs'] = [row[0] for row in get_subscriptions()]
if utils.website_is_integrated():
item_params['fav_urls']=[]
else:
item_params['fav_urls']=get_fav_urls()
item_params['xbmc_fav_urls']=utils.get_xbmc_fav_urls()
for item in items:
item_params.update(get_item_params(item))
runstring = 'RunPlugin(%s)' % _1CH.build_plugin_url({'mode': MODES.RM_FROM_PL, 'playlist_url': url, 'item_url': item['url']})
menu_items = [('Remove from Playlist', runstring)]
create_item(item_params,item['title'],item['year'],item['img'],item['url'], menu_items=menu_items)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@pw_dispatcher.register(MODES.ADD2PL, ['item_url'])
def add_to_playlist(item_url):
playlists=pw_scraper.get_playlists(False)
sel_list=[]
url_list=[]
for playlist in playlists:
title = '%s (%s items) (%s views) (rating %s)' % (playlist['title'], playlist['item_count'], playlist['views'], playlist['rating'])
sel_list.append(title)
url_list.append(playlist['url'])
if sel_list:
dialog=xbmcgui.Dialog()
ret = dialog.select('Select Playlist', sel_list)
if ret>-1:
try:
pw_scraper.add_to_playlist(url_list[ret], item_url)
message = 'Item added to playlist.'
except:
message = 'Error adding item to playlist.'
builtin = 'XBMC.Notification(PrimeWire,%s,4000, %s)'
xbmc.executebuiltin(builtin % (message,ICON_PATH))
else:
builtin = 'XBMC.Notification(PrimeWire,%s,4000, %s)'
xbmc.executebuiltin(builtin % ('Create a playlist on the website first.',ICON_PATH))
@pw_dispatcher.register(MODES.RM_FROM_PL, ['playlist_url', 'item_url'])
def remove_from_playlist(playlist_url, item_url):
pw_scraper.remove_from_playlist(playlist_url, item_url)
xbmc.executebuiltin('Container.Refresh')
# add searches as an items so they don't get added to the path history
# _1CH.add_item doesn't work because it insists on adding non-folder items as playable
def add_search_item(queries, label):
liz = xbmcgui.ListItem(label=label, iconImage=art('search.png'), thumbnailImage=art('search.png'))
liz.setProperty('IsPlayable', 'false')
liz.setProperty('fanart_image', art('fanart.png'))
liz.setInfo('video', {'title': label})
liz_url = _1CH.build_plugin_url(queries)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False)
@pw_dispatcher.register(MODES.AZ_MENU, ['section'])
def BrowseAlphabetMenu(section=None):
utils.log('Browse by alphabet screen')
_1CH.add_directory({'mode': MODES.FILTER_RESULTS, 'section': section, 'sort': 'alphabet', 'letter': '123'},
{'title': '#123'}, img=art('123.png'), fanart=art('fanart.png'))
for character in (ltr for ltr in string.ascii_uppercase):
_1CH.add_directory({'mode': MODES.FILTER_RESULTS, 'section': section, 'sort': 'alphabet', 'letter': character},
{'title': character}, img=art(character.lower() + '.png'), fanart=art('fanart.png'))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@pw_dispatcher.register(MODES.GENRE_MENU, ['section'])
def BrowseByGenreMenu(section=None): #2000
utils.log('Browse by genres screen')
for genre in pw_scraper.get_genres():
_1CH.add_directory({'mode': MODES.FILTER_RESULTS, 'section': section, 'sort': 'date', 'genre': genre},
{'title': genre}, img=art(genre.lower() + '.png'))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def add_contextsearchmenu(title, video_type):
contextmenuitems = []
nameonly=utils.filename_filter_out_year(title); #print 'nameonly: '+nameonly
if os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.solarmovie.so'):
if video_type == 'tv':
section = 'tv'
contextmenuitems.append(('Find AirDates', 'XBMC.Container.Update(%s?mode=%s&title=%s)' % ('plugin://plugin.video.solarmovie.so/','SearchForAirDates',nameonly)))
else: section = 'movies'
contextmenuitems.append(('Search Solarmovie.so', 'XBMC.Container.Update(%s?mode=%s§ion=%s&title=%s)' % ('plugin://plugin.video.solarmovie.so/','ApiSearch',section,nameonly)))
if os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.icefilms'):
contextmenuitems.append(('Search Icefilms',
'XBMC.Container.Update(%s?mode=555&url=%s&search=%s&nextPage=%s)' % (
'plugin://plugin.video.icefilms/', 'http://www.icefilms.info/', nameonly, '1')))
if os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.tubeplus'):
if video_type == 'tv':
section = 'tv-shows'
else:
section = 'movies'
contextmenuitems.append(('Search tubeplus', 'XBMC.Container.Update(%s?mode=Search§ion=%s&query=%s)' % (
'plugin://plugin.video.tubeplus/', section, nameonly)))
if os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.tvlinks'):
if video_type == 'tv':
contextmenuitems.append(('Search tvlinks', 'XBMC.Container.Update(%s?mode=Search&query=%s)' % (
'plugin://plugin.video.tvlinks/', nameonly)))
return contextmenuitems
def get_item_params(item):
item_params = {}
if item['video_type']=='movie':
item_params['section']='movies'
item_params['nextmode'] = MODES.GET_SOURCES
item_params['video_type'] = 'movie'
item_params['folder']=(_1CH.get_setting('source-win') == 'Directory' and _1CH.get_setting('auto-play') == 'false')
else:
item_params['section']='tv'
item_params['nextmode'] = MODES.SEASON_LIST
item_params['video_type'] = 'tvshow'
item_params['folder']=True
return item_params
def get_section_params(section):
section_params={}
section_params['section']=section
if section == 'tv':
section_params['content']='tvshows'
section_params['nextmode'] = MODES.SEASON_LIST
section_params['video_type'] = 'tvshow'
section_params['folder'] = True
subscriptions = get_subscriptions()
section_params['subs'] = [row[0] for row in subscriptions]
elif section=='episode':
section_params['nextmode'] = MODES.GET_SOURCES
section_params['video_type']='episode'
section_params['content']='episodes'
section_params['folder'] = (_1CH.get_setting('source-win') == 'Directory' and _1CH.get_setting('auto-play') == 'false')
section_params['subs'] = []
elif section == 'calendar':
section_params['nextmode'] = MODES.GET_SOURCES
section_params['video_type']='episode'
section_params['content']='calendar'
section_params['folder'] = (_1CH.get_setting('source-win') == 'Directory' and _1CH.get_setting('auto-play') == 'false')
section_params['subs'] = []
else:
section_params['content']='movies'
section_params['nextmode'] = MODES.GET_SOURCES
section_params['video_type'] = 'movie'
section_params['folder'] = (_1CH.get_setting('source-win') == 'Directory' and _1CH.get_setting('auto-play') == 'false')
section_params['subs'] = []
# only grab actual fav_urls if not using website favs (otherwise too much site load)
if utils.website_is_integrated():
section_params['fav_urls']=[]
else:
section_params['fav_urls']=get_fav_urls(section)
section_params['xbmc_fav_urls']=utils.get_xbmc_fav_urls()
return section_params
def create_item(section_params,title,year,img,url, imdbnum='', season='', episode = '', day='', totalItems=0, menu_items=None):
#utils.log('Create Item: %s, %s, %s, %s, %s, %s, %s, %s, %s' % (section_params, title, year, img, url, imdbnum, season, episode, totalItems))
if menu_items is None: menu_items=[]
if section_params['nextmode']==MODES.GET_SOURCES and _1CH.get_setting('auto-play')=='true':
queries = {'mode': MODES.SELECT_SOURCES, 'title': title, 'url': url, 'img': img, 'imdbnum': imdbnum, 'video_type': section_params['video_type'], 'year': year}
if _1CH.get_setting('source-win')=='Dialog':
runstring = 'PlayMedia(%s)' % _1CH.build_plugin_url(queries)
else:
runstring = 'Container.Update(%s)' % _1CH.build_plugin_url(queries)
menu_items.insert(0,('Select Source', runstring),)
# fix episode url being added to subs
if section_params['video_type']=='episode':
temp_url=re.match('(/.*/).*',url).groups()[0]
else:
temp_url=url
liz,menu_items = build_listitem(section_params, title, year, img, temp_url, imdbnum, season, episode, day=day, extra_cms=menu_items)
img = liz.getProperty('img')
imdbnum = liz.getProperty('imdb')
if not section_params['folder']: # should only be when it's a movie and dialog are off and autoplay is off
liz.setProperty('isPlayable','true')
queries = {'mode': section_params['nextmode'], 'title': title, 'url': url, 'img': img, 'imdbnum': imdbnum, 'video_type': section_params['video_type'], 'year': year}
liz_url = _1CH.build_plugin_url(queries)
if utils.in_xbmc_favs(liz_url, section_params['xbmc_fav_urls']):
action=FAV_ACTIONS.REMOVE
label='Remove from XBMC Favourites'
else:
action=FAV_ACTIONS.ADD
label='Add to XBMC Favourites'
runstring = 'RunPlugin(%s)' % _1CH.build_plugin_url({'mode': MODES.TOGGLE_X_FAVS, 'title': liz.getLabel(), 'url': liz_url, 'img': img, 'is_playable': liz.getProperty('isPlayable')=='true', 'action': action})
menu_items.insert(0,(label, runstring),)
liz.addContextMenuItems(menu_items, replaceItems=True)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz,section_params['folder'],totalItems)
def build_listitem(section_params, title, year, img, resurl, imdbnum='', season='', episode='', day='', extra_cms=None):
if not extra_cms: extra_cms = []
menu_items = add_contextsearchmenu(title, section_params['section'])
menu_items = menu_items + extra_cms
# fav_urls is only populated when local favs are used
if resurl in section_params['fav_urls']:
runstring = 'RunPlugin(%s)' % _1CH.build_plugin_url({'mode': MODES.DEL_FAV, 'section': section_params['section'], 'title': title, 'url': resurl, 'year': year})
menu_items.append((REMOVE_FAV_MENU, runstring),)
# will show add to favs always when using website favs and not on favorites view;
# but only when item isn't in favs when using local favs
elif REMOVE_FAV_MENU not in [menu[0] for menu in menu_items]:
queries = {'mode': MODES.SAVE_FAV, 'fav_type': section_params['section'], 'title': title, 'url': resurl, 'year': year}
runstring = 'RunPlugin(%s)' % _1CH.build_plugin_url(queries)
menu_items.append(('Add to Favorites', runstring), )
if resurl and utils.website_is_integrated():
if REMOVE_TW_MENU not in (item[0] for item in menu_items):
watchstring = 'RunPlugin(%s)' % _1CH.build_plugin_url({'mode': MODES.CH_TOWATCH_WEB, 'primewire_url': resurl, 'action':'add', 'refresh':True})
menu_items.append(('Add to ToWatch list', watchstring),)
if REMOVE_W_MENU not in (item[0] for item in menu_items):
watchedstring = 'RunPlugin(%s)' % _1CH.build_plugin_url({'mode': MODES.CH_WATCH_WEB, 'primewire_url': resurl,'action':'add', 'refresh':True})
menu_items.append(('Add to Watched List', watchedstring),)
queries = {'mode': MODES.ADD2LIB, 'video_type': section_params['video_type'], 'title': title, 'img': img, 'year': year,
'url': resurl}
runstring = 'RunPlugin(%s)' % _1CH.build_plugin_url(queries)
menu_items.append(('Add to Library', runstring), )
if utils.website_is_integrated():
queries = {'mode': MODES.ADD2PL, 'item_url': resurl}
runstring = 'RunPlugin(%s)' % _1CH.build_plugin_url(queries)
menu_items.append(('Add to Playlist', runstring), )
if section_params['video_type'] in ('tv', 'tvshow', 'episode'):
if resurl not in section_params['subs']:
queries = {'mode': MODES.ADD_SUB, 'video_type': section_params['video_type'], 'url': resurl, 'title': title,
'img': img, 'year': year}
runstring = 'RunPlugin(%s)' % _1CH.build_plugin_url(queries)
menu_items.append(('Subscribe', runstring), )
else:
runstring = 'RunPlugin(%s)' % _1CH.build_plugin_url({'mode': MODES.CANCEL_SUB, 'url': resurl})
menu_items.append(('Cancel subscription', runstring,))
else:
plugin_str = 'plugin://plugin.video.couchpotato_manager'
plugin_str += '/movies/add?title=%s' % title
runstring = 'XBMC.RunPlugin(%s)' % plugin_str
menu_items.append(('Add to CouchPotato', runstring), )
if META_ON:
if section_params['video_type'] == 'episode':
meta = __metaget__.get_episode_meta(title, imdbnum, season, episode)
meta['TVShowTitle'] = title
else:
meta = create_meta(section_params['video_type'], title, year)
menu_items.append(('Show Information', 'XBMC.Action(Info)'), )
queries = {'mode': MODES.REFRESH_META, 'video_type': section_params['video_type'], 'title': meta['title'], 'imdbnum': meta['imdb_id'],
'alt_id': 'imdbnum', 'year': year}
runstring = _1CH.build_plugin_url(queries)
runstring = 'RunPlugin(%s)' % runstring
menu_items.append(('Refresh Metadata', runstring,))
if 'trailer_url' in meta and meta['trailer_url']:
try:
url = meta['trailer_url']
url = url.encode('base-64').strip()
runstring = 'RunPlugin(%s)' % _1CH.build_plugin_url({'mode': MODES.PLAY_TRAILER, 'url': url})
menu_items.append(('Watch Trailer', runstring,))
except: pass
if meta['overlay'] == 6:
label = 'Mark as watched'
watched=True
else:
label = 'Mark as unwatched'
watched=False
queries = {'mode': MODES.CH_WATCH, 'title': title, 'imdbnum': meta['imdb_id'], 'video_type': section_params['video_type'], 'year': year, 'primewire_url': resurl, 'watched': watched}
if section_params['video_type'] in ('tv', 'tvshow', 'episode'):
queries['season'] = season
queries['episode'] = episode
runstring = 'RunPlugin(%s)' % _1CH.build_plugin_url(queries)
menu_items.append((label, runstring,))
if section_params['video_type'] == 'tvshow':
if resurl in section_params['subs']:
meta['title'] = utils.format_label_sub(meta)
else:
meta['title'] = utils.format_label_tvshow(meta)
# save the playcount for ep counts; delete it to prevent tvshow being marked as watched
if 'playcount' in meta:
playcount=meta['playcount']
del meta['playcount']
elif section_params['video_type'] == 'episode':
if section_params['content'] == 'calendar':
meta['title'] = '[[COLOR deeppink]%s[/COLOR]] %s - S%02dE%02d - %s' % (day, title, int(season), int(episode), meta['title'])
else:
meta['title'] = utils.format_tvshow_episode(meta)
else:
meta['title'] = utils.format_label_movie(meta)
art=make_art(section_params['video_type'], meta, img)
listitem=xbmcgui.ListItem(meta['title'], iconImage=art['thumb'], thumbnailImage=art['thumb'])
listitem.setProperty('fanart_image', art['fanart'])
try: listitem.setArt(art)
except: pass # method doesn't exist in Frodo
listitem.setInfo('video', meta)
listitem.setProperty('imdb', meta['imdb_id'])
listitem.setProperty('img', img)
# set tvshow episode counts
if section_params['video_type']== 'tvshow' and 'episode' in meta:
total_episodes=meta['episode']
unwatched_episodes=total_episodes - playcount
watched_episodes = total_episodes - unwatched_episodes
listitem.setProperty('TotalEpisodes', str(total_episodes))
listitem.setProperty('WatchedEpisodes', str(watched_episodes))
listitem.setProperty('UnWatchedEpisodes', str(unwatched_episodes))
else: # Metadata off
if section_params['video_type'] == 'episode':
if section_params['content'] == 'calendar':
disp_title = '[[COLOR deeppink]%s[/COLOR]] %s - S%02dE%02d' % (day, title, int(season), int(episode))
else:
disp_title = '%sx%s' % (season, episode)
listitem = xbmcgui.ListItem(disp_title, iconImage=img,
thumbnailImage=img)
else:
if year:
disp_title = '%s (%s)' % (title, year)
else:
disp_title = title
listitem = xbmcgui.ListItem(disp_title, iconImage=img,
thumbnailImage=img)
# Hack resumetime & totaltime to prevent XBMC from popping up a resume dialog if a native bookmark is set. UGH!
listitem.setProperty('resumetime',str(0))
listitem.setProperty('totaltime',str(1))
return (listitem,menu_items)
@pw_dispatcher.register(MODES.FILTER_RESULTS, ['section'], ['genre', 'letter', 'sort', 'page'])
def GetFilteredResults(section, genre='', letter='', sort='alphabet', page=None, paginate=None):
utils.log('Filtered results for Section: %s Genre: %s Letter: %s Sort: %s Page: %s Paginate: %s' % (section, genre, letter, sort, page, paginate))
if paginate is None: paginate=(_1CH.get_setting('paginate-lists')=='true' and _1CH.get_setting('paginate')=='true')
section_params = get_section_params(section)
results = pw_scraper.get_filtered_results(section, genre, letter, sort, page, paginate)
total_pages = pw_scraper.get_last_res_pages()
resurls = []
count = 0
win = xbmcgui.Window(10000)
for result in results:
#resurl, title, year, thumb = s.groups()
if result['url'] not in resurls:
resurls.append(result['url'])
create_item(section_params,result['title'],result['year'],result['img'],result['url'])
# expose to skin
if sort == update_movie_cat():
win.setProperty('1ch.movie.%d.title' % count, result['title'])
win.setProperty('1ch.movie.%d.thumb' % count, result['img'])
# Needs dialog=1 to show dialog instead of going to window
queries = {'mode': section_params['nextmode'], 'url': result['url'], 'title': result['title'],
'img': result['img'], 'dialog': 1, 'video_type': section_params['video_type']}
win.setProperty('1ch.movie.%d.path' % count, _1CH.build_plugin_url(queries))
count = count + 1
# more
if sort == update_movie_cat():
# goto page 1 since it may take some time to download page 2
# since users may be inpatient because xbmc does not show progress
command = _1CH.build_plugin_url( {'mode': MODES.FILTER_RESULTS, 'section': section, 'sort': sort, 'title': _1CH.get_setting('auto-update-movies-cat'), 'page':'1'})
win.setProperty('1ch.movie.more.title', "More")
win.setProperty('1ch.movie.more.path', command)
if not page: page = 1
next_page = int(page)+1
if int(page) < int(total_pages) and paginate:
label = 'Skip to Page...'
command = _1CH.build_plugin_url(
{'mode': MODES.PAGE_SELECT, 'pages': total_pages, 'section': section, 'genre': genre, 'letter': letter,'sort': sort})
command = 'RunPlugin(%s)' % command
menu_items = [(label, command)]
meta = {'title': 'Next Page >>'}
_1CH.add_directory(
{'mode': MODES.FILTER_RESULTS, 'section': section, 'genre': genre, 'letter': letter, 'sort': sort,
'page': next_page},
meta, contextmenu_items=menu_items, context_replace=True, img=art('nextpage.png'), fanart=art('fanart.png'), is_folder=True)
utils.set_view(section_params['content'], '%s-view' % (section_params['content']))
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=_1CH.get_setting('dir-cache')=='true')
@pw_dispatcher.register(MODES.SEASON_LIST, ['url', 'title'], ['year', 'tvdbnum'])
def TVShowSeasonList(url, title, year='', old_imdb='', tvdbnum=''):
utils.log('Seasons for TV Show %s' % url)
season_gen=pw_scraper.get_season_list(url)
seasons = list(season_gen) # copy the generator into a list so that we can iterate over it multiple times
new_imdbnum = pw_scraper.get_last_imdbnum()
imdbnum = old_imdb
if META_ON:
if not old_imdb and new_imdbnum:
utils.log('Imdb ID not recieved from title search, updating with new id of %s' % new_imdbnum)
try:
utils.log('Title: %s Old IMDB: %s Old TVDB: %s New IMDB %s Year: %s' % (title, old_imdb, tvdbnum, new_imdbnum, year), xbmc.LOGDEBUG)
__metaget__.update_meta('tvshow', title, old_imdb, tvdbnum, new_imdbnum)
except:
utils.log('Error while trying to update metadata with: %s, %s, %s, %s, %s' % (title, old_imdb, tvdbnum, new_imdbnum, year), xbmc.LOGERROR)
imdbnum = new_imdbnum
season_nums = [season[0] for season in seasons]
season_meta = __metaget__.get_seasons(title, imdbnum, season_nums)
num = 0
seasons_found=False
for season in seasons:
seasons_found=True
season_num,season_html = season
if META_ON:
meta = season_meta[num]
else:
meta={}
label = 'Season %s' % season_num
db_connection.cache_season(season_num, season_html)
art=make_art('tvshow', meta)
listitem = xbmcgui.ListItem(label, iconImage=art['thumb'],thumbnailImage=art['thumb'])
listitem.setInfo('video', meta)
listitem.setProperty('fanart_image', art['fanart'])
try: listitem.setArt(art)
except: pass # method doesn't exist in Frodo
listitem.addContextMenuItems([], replaceItems=True)
queries = {'mode': MODES.EPISODE_LIST, 'season': season_num, 'year': year,
'imdbnum': imdbnum, 'title': title}
li_url = _1CH.build_plugin_url(queries)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), li_url, listitem,
isFolder=True,
totalItems=len(seasons))
num += 1
if not seasons_found:
utils.log("No Seasons Found for %s at %s" % (title, url), xbmc.LOGERROR)
_1CH.show_small_popup('PrimeWire','No Seasons Found for %s' % (title), 3000, ICON_PATH)
return
xbmcplugin.endOfDirectory(int(sys.argv[1]))
utils.set_view('seasons', 'seasons-view')
@pw_dispatcher.register(MODES.EPISODE_LIST, ['title', 'season'], ['imdbnum', 'year']) # TVShowEpisodeList(title, season, imdbnum, tvdbnum)
def TVShowEpisodeList(title, season, imdbnum='', year=''):
season_html = db_connection.get_cached_season(season)
r = '"tv_episode_item".+?href="(.+?)">(.*?)</a>'
episodes = re.finditer(r, season_html, re.DOTALL)
section_params = get_section_params('episode')
for ep in episodes:
epurl, eptitle = ep.groups()
eptitle = re.sub(r'<[^<]+?>', '', eptitle.strip())
eptitle = re.sub(r'\s\s+', ' ', eptitle)
season = int(re.search('/season-([0-9]{1,4})-', epurl).group(1))
epnum = int(re.search('-episode-([0-9]{1,3})', epurl).group(1))
create_item(section_params, title, year, '', epurl, imdbnum, season, epnum)
utils.set_view(section_params['content'], '%s-view' % (section_params['content']))
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=_1CH.get_setting('dir-cache')=='true')
def get_fav_urls(fav_type=None):
if utils.website_is_integrated():
if fav_type is None:
favs=pw_scraper.get_favorites('movies')
fav_urls=[fav['url'] for fav in favs]
favs=pw_scraper.get_favorites('tv')
fav_urls += [fav['url'] for fav in favs]
else:
favs=pw_scraper.get_favorites(fav_type)
fav_urls=[fav['url'] for fav in favs]
else:
favs=db_connection.get_favorites(fav_type)
fav_urls=[fav[2] for fav in favs]
return fav_urls
@pw_dispatcher.register(MODES.BROWSE_FAVS, ['section'])
def browse_favorites(section):
if not section: section='movie'
favs=db_connection.get_favorites(section)
section_params = get_section_params(section)
if section=='tv':
label='Add Favorite TV Shows to Library'
else:
label='Add Favorite Movies to Library'
liz = xbmcgui.ListItem(label=label)
liz_url = _1CH.build_plugin_url({'mode': MODES.FAV2LIB, 'section': section})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False)
for row in favs:
_, title,favurl,year = row
create_item(section_params,title,year,'',favurl)
utils.set_view(section_params['content'], '%s-view' % (section_params['content']))
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=_1CH.get_setting('dir-cache')=='true')
@pw_dispatcher.register(MODES.BROWSE_FAVS_WEB, ['section'], ['page'])
def browse_favorites_website(section, page=None):
if section=='movie': section='movies'
local_favs=db_connection.get_favorites_count()
if local_favs:
liz = xbmcgui.ListItem(label='Upload Local Favorites')
liz_url = _1CH.build_plugin_url({'mode': MODES.MIG_FAVS})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False)
if section=='tv':
label='Add Favorite TV Shows to Library'
else:
label='Add Favorite Movies to Library'
liz = xbmcgui.ListItem(label=label)
liz_url = _1CH.build_plugin_url({'mode': MODES.FAV2LIB, 'section': section})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False)
section_params = get_section_params(section)
paginate=(_1CH.get_setting('paginate-favs')=='true' and _1CH.get_setting('paginate')=='true')
for fav in pw_scraper.get_favorites(section, page, paginate):
runstring = 'RunPlugin(%s)' % _1CH.build_plugin_url({'mode': MODES.DEL_FAV, 'section': section_params['section'], 'title': fav['title'], 'url': fav['url'], 'year': fav['year']})
menu_items=[(REMOVE_FAV_MENU, runstring),]
create_item(section_params,fav['title'],fav['year'],fav['img'],fav['url'], menu_items=menu_items)
total_pages=pw_scraper.get_last_res_pages()
if not page: page = 1
next_page = int(page)+1
if int(page) < int(total_pages) and paginate:
label = 'Skip to Page...'
command = _1CH.build_plugin_url({'mode': MODES.FAV_PAGE_SELECT, 'section': section, 'pages': total_pages})
command = 'RunPlugin(%s)' % command
menu_items = [(label, command)]
meta = {'title': 'Next Page >>'}
_1CH.add_directory({'mode': MODES.BROWSE_FAVS_WEB, 'section': section, 'page': next_page}, meta, contextmenu_items=menu_items, context_replace=True, img=art('nextpage.png'), fanart=art('fanart.png'), is_folder=True)
utils.set_view(section_params['content'], '%s-view' % (section_params['content']))
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=_1CH.get_setting('dir-cache')=='true')
@pw_dispatcher.register(MODES.MIG_FAVS)
def migrate_favs_to_web():
progress = xbmcgui.DialogProgress()
ln1 = 'Uploading your favorites to www.primewire.ag...'
progress.create('Uploading Favorites', ln1)
successes = []
all_favs= db_connection.get_favorites()
fav_len = len(all_favs)
count=0
for fav in all_favs:
if progress.iscanceled(): return
title = fav[1]
favurl = fav[2]
try:
pw_scraper.add_favorite(favurl)
ln3 = "Success"
utils.log('%s added successfully' % title, xbmc.LOGDEBUG)
successes.append((title, favurl))
except Exception as e:
ln3= "Already Exists"
utils.log(e, xbmc.LOGDEBUG)
count += 1
progress.update(count*100/fav_len, ln1, 'Processed %s' % title, ln3)
progress.close()
dialog = xbmcgui.Dialog()
ln1 = 'Do you want to remove the successful'
ln2 = 'uploads from local favorites?'
ln3 = 'THIS CANNOT BE UNDONE'
yes = 'Keep'
no = 'Delete'
ret = dialog.yesno('Migration Complete', ln1, ln2, ln3, yes, no)
# failures = [('title1','url1'), ('title2','url2'), ('title3','url3'), ('title4','url4'), ('title5','url5'), ('title6','url6'), ('title7','url7')]
if ret:
db_connection.delete_favorites([fav[1] for fav in successes])
xbmc.executebuiltin("XBMC.Container.Refresh")
@pw_dispatcher.register(MODES.FAV2LIB, ['section'])
def add_favs_to_library(section):
if not section: section='movie'
section_params=get_section_params(section)
if utils.website_is_integrated():
for fav in pw_scraper.get_favorites(section, paginate=False):
add_to_library(section_params['video_type'], fav['url'], fav['title'], fav['img'], fav['year'], '')
else:
favs=db_connection.get_favorites(section)
for fav in favs:
_, title, url, year = fav
add_to_library(section_params['video_type'], url, title, '', year, '')
if section=='tv':
message='Favorite TV Shows Added to Library'
else:
message='Favorite Movies Added to Library'
builtin = 'XBMC.Notification(PrimeWire,%s,4000, %s)'
xbmc.executebuiltin(builtin % (message,ICON_PATH))
@pw_dispatcher.register(MODES.BROWSE_W_WEB, ['section'], ['page'])
def browse_watched_website(section, page=None):
if section=='movie': section='movies'
# TODO: Extend fav2Library
# if section=='tv':
# label='Add Watched TV Shows to Library'
# else:
# label='Add Watched Movies to Library'
# liz = xbmcgui.ListItem(label=label)
# liz_url = _1CH.build_plugin_url({'mode': MODES.FAV2LIB, 'section': section})
# xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False)
section_params = get_section_params(section)
paginate=(_1CH.get_setting('paginate-watched')=='true' and _1CH.get_setting('paginate')=='true')
for video in pw_scraper.get_watched(section, page, paginate):
watchedstring = 'RunPlugin(%s)' % _1CH.build_plugin_url({'mode': MODES.CH_WATCH_WEB, 'primewire_url': video['url'],'action':'delete', 'refresh':True})
menu_items=[(REMOVE_W_MENU, watchedstring),]
create_item(section_params,video['title'],video['year'],video['img'],video['url'], menu_items=menu_items)
total_pages=pw_scraper.get_last_res_pages()
if not page: page = 1
next_page = int(page)+1
if int(page) < int(total_pages) and paginate:
label = 'Skip to Page...'
command = _1CH.build_plugin_url({'mode': MODES.WATCH_PAGE_SELECT, 'section': section, 'pages': total_pages})
command = 'RunPlugin(%s)' % command
menu_items = [(label, command)]
meta = {'title': 'Next Page >>'}
_1CH.add_directory({'mode': MODES.BROWSE_W_WEB, 'section': section, 'page': next_page}, meta, contextmenu_items=menu_items, context_replace=True, img=art('nextpage.png'), fanart=art('fanart.png'), is_folder=True)
utils.set_view(section_params['content'], '%s-view' % (section_params['content']))
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=_1CH.get_setting('dir-cache')=='true')
@pw_dispatcher.register(MODES.BROWSE_TW_WEB, ['section'], ['page'])
def browse_towatch_website(section, page=None):
if section=='movie': section='movies'
if section=='movies':
label='Add To Watch List Movies to Library'
liz = xbmcgui.ListItem(label=label)
liz_url = _1CH.build_plugin_url({'mode': MODES.MAN_UPD_TOWATCH})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False)
section_params = get_section_params(section)
paginate=(_1CH.get_setting('paginate-towatched')=='true' and _1CH.get_setting('paginate')=='true')
for video in pw_scraper.get_towatch(section, page, paginate):
watchstring = 'RunPlugin(%s)' % _1CH.build_plugin_url({'mode': MODES.CH_TOWATCH_WEB, 'primewire_url': video['url'], 'action':'delete', 'refresh':True})
menu_items = [(REMOVE_TW_MENU, watchstring),]
create_item(section_params,video['title'],video['year'],video['img'],video['url'], menu_items=menu_items)
total_pages=pw_scraper.get_last_res_pages()
if not page: page = 1
next_page = int(page)+1
if int(page) < int(total_pages) and paginate:
label = 'Skip to Page...'
command = _1CH.build_plugin_url({'mode': 'WatchPageSelect', 'section': section, 'pages': total_pages})
command = 'RunPlugin(%s)' % command
menu_items = [(label, command)]
meta = {'title': 'Next Page >>'}
_1CH.add_directory({'mode': MODES.BROWSE_TW_WEB, 'section': section, 'page': next_page}, meta, contextmenu_items=menu_items, context_replace=True, img=art('nextpage.png'), fanart=art('fanart.png'), is_folder=True)
utils.set_view(section_params['content'], '%s-view' % (section_params['content']))
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=_1CH.get_setting('dir-cache')=='true')
@pw_dispatcher.register(MODES.SHOW_SCHEDULE)
def show_schedule():
utils.log('Calling Show Schedule', xbmc.LOGDEBUG)
section_params = get_section_params('calendar')
for episode in pw_scraper.get_schedule():
create_item(section_params, episode['show_title'], '', episode['img'], episode['url'], '', episode['season_num'], episode['episode_num'], day=episode['day'])
utils.set_view(section_params['content'], '%s-view' % (section_params['content']))
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=_1CH.get_setting('dir-cache')=='true')
def create_meta(video_type, title, year):
utils.log('Calling Create Meta: %s, %s, %s' % (video_type, title, year), xbmc.LOGDEBUG)
meta = {'title': title, 'year': year, 'imdb_id': '', 'overlay': ''}
if META_ON:
try:
if video_type == 'tvshow':
meta = __metaget__.get_meta(video_type, title, year=str(year))
if not meta['imdb_id'] and not meta['tvdb_id']:
utils.log('No Meta Match for %s on title & year: |%s|%s|' % (video_type, title, year), xbmc.LOGDEBUG)
# call update_meta to force metahandlers to delete data it might have cached from get_meta
meta = __metaget__.update_meta(video_type, title, '')
else: # movie
meta = __metaget__.get_meta(video_type, title, year=str(year))
except Exception as e:
utils.log('Error (%s) assigning meta data for %s %s %s' % (str(e),video_type, title, year), xbmc.LOGERROR)
return meta
def make_art(video_type, meta, pw_img=''):
utils.log('Making Art: %s, %s, %s' % (video_type, meta, pw_img), xbmc.LOGDEBUG)
# default fanart to theme fanart
art_dict={'thumb': '', 'poster': '', 'fanart': art('fanart.png'), 'banner': ''}
# set the thumb & cover to the poster if it exists
if 'cover_url' in meta:
art_dict['thumb']=meta['cover_url']
art_dict['poster']=meta['cover_url']
# set the thumb to the PW image if fallback is on and there is no cover art
if POSTERS_FALLBACK and art_dict['thumb'] in ('/images/noposter.jpg', ''):
art_dict['thumb']=pw_img
art_dict['poster']=pw_img
# override the fanart with metadata if fanart is on and it exists and isn't blank
if FANART_ON and 'backdrop_url' in meta and meta['backdrop_url']: art_dict['fanart']=meta['backdrop_url']
if 'banner_url' in meta: art_dict['banner']=meta['banner_url']
return art_dict
def repair_missing_images():
utils.log("Repairing Metadata Images")
db_connection.repair_meta_images()
@pw_dispatcher.register(MODES.ADD2LIB, ['video_type', 'url', 'title'], ['year', 'img', 'imdbnum'])
def manual_add_to_library(video_type, url, title, year='', img='', imdbnum=''):
add_to_library(video_type, url, title, img, year, imdbnum)
builtin = "XBMC.Notification(PrimeWire, Added '%s' to library,2000, %s)" % (title, ICON_PATH)
xbmc.executebuiltin(builtin)
def add_to_library(video_type, url, title, img, year, imdbnum):
utils.log('Creating .strm for %s %s %s %s %s %s' % (video_type, title, imdbnum, url, img, year))
if video_type == 'tvshow':
save_path = _1CH.get_setting('tvshow-folder')
save_path = xbmc.translatePath(save_path)
show_title = title.strip()
seasons = pw_scraper.get_season_list(url, cached=False)
found_seasons=False
for season in seasons:
found_seasons=True
season_num= season[0]
season_html = season[1]
r = '"tv_episode_item".+?href="(.+?)">(.*?)</a>'
episodes = re.finditer(r, season_html, re.DOTALL)
for ep_line in episodes:
epurl, eptitle = ep_line.groups()
eptitle = re.sub('<[^<]+?>', '', eptitle.strip())
eptitle = re.sub(r'\s\s+', ' ', eptitle)
pattern = r'tv-\d{1,10}-.*/season-\d+-episode-(\d+)'
match = re.search(pattern, epurl, re.I | re.DOTALL)
epnum = match.group(1)
filename = utils.filename_from_title(show_title, video_type)
filename = filename % (season_num, epnum)
show_title = re.sub(r'([^\w\-_\.\(\)\' ]|\.$)', '_', show_title)
final_path = os.path.join(save_path, show_title, 'Season '+season_num, filename)
queries = {'mode': MODES.GET_SOURCES, 'url': epurl, 'imdbnum': '', 'title': show_title, 'img': '',
'dialog': 1, 'video_type': 'episode'}
strm_string = _1CH.build_plugin_url(queries)
write_strm(strm_string, final_path)
if not found_seasons:
utils.log('No Seasons found for %s at %s' % (show_title, url), xbmc.LOGERROR)
elif video_type == 'movie':
save_path = _1CH.get_setting('movie-folder')
save_path = xbmc.translatePath(save_path)
strm_string = _1CH.build_plugin_url(
{'mode': MODES.GET_SOURCES, 'url': url, 'imdbnum': imdbnum, 'title': title, 'img': img, 'year': year,
'dialog': 1, 'video_type': 'movie'})
if year: title = '%s (%s)' % (title, year)
filename = utils.filename_from_title(title, 'movie')
title = re.sub(r'[^\w\-_\.\(\)\' ]', '_', title)
final_path = os.path.join(save_path, title, filename)
write_strm(strm_string, final_path)
def write_strm(stream, path):
path = xbmc.makeLegalFilename(path)
if not xbmcvfs.exists(os.path.dirname(path)):
try:
try: xbmcvfs.mkdirs(os.path.dirname(path))
except: os.mkdir(os.path.dirname(path))
except:
utils.log('Failed to create directory %s' % path, xbmc.LOGERROR)
old_strm_string=''
try:
f = xbmcvfs.File(path, 'r')
old_strm_string = f.read()
f.close()
except: pass
#print "Old String: %s; New String %s" %(old_strm_string,strm_string)
# string will be blank if file doesn't exist or is blank
if stream != old_strm_string:
try:
utils.log('Writing strm: %s' % stream)
file_desc = xbmcvfs.File(path, 'w')
file_desc.write(stream)
file_desc.close()
except Exception, e:
utils.log('Failed to create .strm file: %s\n%s' % (path, e), xbmc.LOGERROR)
@pw_dispatcher.register(MODES.ADD_SUB, ['url', 'title', 'year'], ['img', 'imdbnum'])
def add_subscription(url, title, year, img='', imdbnum=''):
try:
days=utils.get_default_days()
if utils.using_pl_subs():
pw_scraper.add_to_playlist(utils.get_subs_pl_url(), url)
db_connection.add_ext_sub(SUB_TYPES.PW_PL, url, imdbnum, days)
else:
db_connection.add_subscription(url, title, img, year, imdbnum, days)
add_to_library('tvshow', url, title, img, year, imdbnum)
builtin = "XBMC.Notification(PrimeWire, Subscribed to '%s',2000, %s)" % (title, ICON_PATH)
xbmc.executebuiltin(builtin)
except:
builtin = "XBMC.Notification(PrimeWire, Already subscribed to '%s',2000, %s)" % (title, ICON_PATH)
xbmc.executebuiltin(builtin)
xbmc.executebuiltin('Container.Refresh')
@pw_dispatcher.register(MODES.CANCEL_SUB, ['url'])
def cancel_subscription(url):
if utils.using_pl_subs():
pw_scraper.remove_from_playlist(utils.get_subs_pl_url(), url)
db_connection.delete_ext_sub(SUB_TYPES.PW_PL, url)
else:
db_connection.delete_subscription(url)
xbmc.executebuiltin('Container.Refresh')
@pw_dispatcher.register(MODES.MAN_UPD_SUBS)
def manual_update_subscriptions():
update_subscriptions()
builtin = "XBMC.Notification(PrimeWire, Subscriptions Updated, 2000, %s)" % (ICON_PATH)
xbmc.executebuiltin(builtin)
now=datetime.datetime.now()
_1CH.set_setting('%s-last_run' % MODES.UPD_SUBS, now.strftime("%Y-%m-%d %H:%M:%S.%f"))
xbmc.executebuiltin('Container.Refresh')
@pw_dispatcher.register(MODES.UPD_SUBS)
def update_subscriptions():
day=datetime.datetime.now().weekday()
subs=get_subscriptions(day)
for sub in subs:
add_to_library('tvshow', sub[0], sub[1], sub[2], sub[3], sub[4])
if _1CH.get_setting('auto-update_towatch') == 'true':
update_towatch()
if _1CH.get_setting('library-update') == 'true':
xbmc.executebuiltin('UpdateLibrary(video)')
if _1CH.get_setting('cleanup-subscriptions') == 'true':
clean_up_subscriptions()
if _1CH.get_setting(MODES.UPD_SUBS+'-notify')=='true':
builtin = "XBMC.Notification(PrimeWire,Subscription Updated, 2000, %s)" % (ICON_PATH)
xbmc.executebuiltin(builtin)
builtin = "XBMC.Notification(PrimeWire,Next Update in %s hours,5000, %s)" % (_1CH.get_setting(MODES.UPD_SUBS+'-interval'), ICON_PATH)
xbmc.executebuiltin(builtin)
@pw_dispatcher.register(MODES.MAN_CLEAN_SUBS)
def manual_clean_up_subscriptions():
clean_up_subscriptions()
builtin = "XBMC.Notification(PrimeWire, Subscriptions Cleaned Up, 2000, %s)" % (ICON_PATH)
xbmc.executebuiltin(builtin)
@pw_dispatcher.register(MODES.CLEAN_SUBS)
def clean_up_subscriptions():
utils.log('Cleaning up dead subscriptions')
subs=get_subscriptions()
for sub in subs:
meta = __metaget__.get_meta('tvshow', sub[1], year=sub[3])
if meta['status'] == 'Ended':
utils.log('Selecting %s for removal' % sub[1], xbmc.LOGDEBUG)
cancel_subscription(sub[0])
@pw_dispatcher.register(MODES.MAN_UPD_TOWATCH)
def manual_update_towatch():
update_towatch()
if _1CH.get_setting('library-update') == 'true':
xbmc.executebuiltin('UpdateLibrary(video)')
builtin = "XBMC.Notification(PrimeWire,ToWatch LIst added to library, 2000, %s)" % (ICON_PATH)
xbmc.executebuiltin(builtin)
def update_towatch():
if not utils.website_is_integrated(): return
movies=pw_scraper.get_towatch('movies')
for movie in movies:
add_to_library('movie', movie['url'], movie['title'], movie['img'], movie['year'], None)
@pw_dispatcher.register(MODES.MANAGE_SUBS)
def manage_subscriptions():
utils.set_view('tvshows', 'tvshows-view')
next_run = utils.get_next_run(MODES.UPD_SUBS)
liz = xbmcgui.ListItem(label='Update Subscriptions ([B]Next Scheduled Run: %s[/B])' % (next_run.strftime('%Y-%m-%d %H:%M:%S')))
liz_url = _1CH.build_plugin_url({'mode': MODES.MAN_UPD_SUBS})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False)
liz = xbmcgui.ListItem(label='Clean Up Subscriptions')
liz_url = _1CH.build_plugin_url({'mode': MODES.MAN_CLEAN_SUBS})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False)
fav_urls=get_fav_urls('tv')
subs=get_subscriptions(order_matters=True)
subs_len=len(subs)
for sub in subs:
url, title, img, year, _, days = sub
days_string = utils.get_days_string_from_days(days)
if days_string=='': days_string='DISABLED'
days_format = _1CH.get_setting('format-sub-days')
if '%s' in days_format:
days_string = days_format % (days_string)
else:
utils.log('Ignoring subscription days format because %s is missing', xbmc.LOGDEBUG)
meta = create_meta('tvshow', title, year)
meta['title'] = utils.format_label_sub(meta)
menu_items = add_contextsearchmenu(meta['title'], 'tv')
runstring = 'RunPlugin(%s)' % _1CH.build_plugin_url(
{'mode': MODES.EDIT_DAYS, 'url': url, 'days': days})
menu_items.append(('Edit days', runstring,))
runstring = 'RunPlugin(%s)' % _1CH.build_plugin_url(
{'mode': MODES.CANCEL_SUB, 'url': url})
menu_items.append(('Cancel subscription', runstring,))
if url in fav_urls:
runstring = 'RunPlugin(%s)' % _1CH.build_plugin_url({'mode': MODES.DEL_FAV, 'url': url})
menu_items.append(('Remove from Favorites', runstring,))
else:
runstring = 'RunPlugin(%s)' % _1CH.build_plugin_url(
{'mode': MODES.SAVE_FAV, 'fav_type': 'tv', 'title': title, 'url': url, 'year': year})
menu_items.append(('Add to Favorites', runstring,))
menu_items.append(('Show Information', 'XBMC.Action(Info)',))
art=make_art('tvshow', meta, img)
label = '[%s] %s' % (days_string, meta['title'])
listitem = xbmcgui.ListItem(label, iconImage=art['thumb'], thumbnailImage=art['thumb'])
listitem.setProperty('fanart_image', art['fanart'])
try: listitem.setArt(art)
except: pass
listitem.setInfo('video', meta)
listitem.addContextMenuItems(menu_items, replaceItems=True)
queries = {'mode': MODES.SEASON_LIST, 'title': title, 'url': url, 'img': img, 'imdbnum': meta['imdb_id'], 'video_type': 'tvshow', 'year': year}
li_url = _1CH.build_plugin_url(queries)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), li_url, listitem, isFolder=True, totalItems=subs_len)
_1CH.end_of_directory()
def get_subscriptions(day=None, order_matters=False):
if utils.using_pl_subs():
def_days=utils.get_default_days()
items=pw_scraper.show_playlist(utils.get_subs_pl_url(), False)
ext_subs = db_connection.get_external_subs(SUB_TYPES.PW_PL)
subs=[]
for item in items:
if item['video_type']=='tvshow':
for i, sub in enumerate(ext_subs):
if item['url']==sub[1]:
item['days']=sub[3]
del ext_subs[i]
break
else:
# add the item to ext_subs with default days
db_connection.add_ext_sub(SUB_TYPES.PW_PL, item['url'], '', def_days)
item['days']=def_days
# only add this item to the list if we are pulling all days or a day that this item runs on
if day is None or str(day) in item['days']:
subs.append((item['url'], item['title'], item['img'], item['year'], '', item['days']))
if order_matters:
subs.sort(cmp=days_cmp, key=lambda k:k[5].ljust(7)+k[1])
else:
subs=db_connection.get_subscriptions(day, order_matters)
return subs
# "all days" goes to the top, "no days" goes to the bottom, everything else is sorted lexicographically
def days_cmp(x,y):
xdays, xtitle=x[:7], x[7:]
ydays, ytitle=y[:7], y[7:]
#print 'xdays,xtitle,ydays,ytitle: |%s|%s|%s|%s|' % (xdays,xtitle,ydays,ytitle)
if xdays==ydays:
return cmp(xtitle,ytitle)
elif xdays =='0123456':
return -1
elif ydays =='0123456':
return 1
elif xdays==' '*7:
return 1
elif ydays==' '*7:
return -1
else:
return cmp(x,y)
def compose(inner_func, *outer_funcs):
"""Compose multiple unary functions together into a single unary function"""
if not outer_funcs:
return inner_func
outer_func = compose(*outer_funcs)
return lambda *args, **kwargs: outer_func(inner_func(*args, **kwargs))
def update_movie_cat():
if _1CH.get_setting('auto-update-movies-cat') == "Featured":
return str("featured")
elif _1CH.get_setting('auto-update-movies-cat') == "Most Popular":
return str("views")
elif _1CH.get_setting('auto-update-movies-cat') == "Highly Rated":
return str("ratings")
elif _1CH.get_setting('auto-update-movies-cat') == "Date Released":
return str("release")
elif _1CH.get_setting('auto-update-movies-cat') == "Date Added":
return str("date")
return str("featured") # default
@pw_dispatcher.register(MODES.PAGE_SELECT, ['mode', 'section'], ['genre', 'letter', 'sort'])
@pw_dispatcher.register(MODES.FAV_PAGE_SELECT, ['mode', 'section'])
@pw_dispatcher.register(MODES.WATCH_PAGE_SELECT, ['mode', 'section'])
@pw_dispatcher.register(MODES.SEARCH_PAGE_SELECT, ['mode', 'section'], ['search', 'query'])
@pw_dispatcher.register(MODES.PL_PAGE_SELECT, ['mode', 'section'], ['public', 'sort'])
def jump_to_page(mode, section, genre='', letter='', sort='', search='', query='', public=''):
if mode == MODES.PAGE_SELECT:
queries={'mode': MODES.FILTER_RESULTS, 'section': section, 'genre': genre, 'letter': letter, 'sort': sort}
elif mode==MODES.FAV_PAGE_SELECT:
queries={'mode': MODES.BROWSE_FAVS_WEB, 'section': section}
elif mode==MODES.WATCH_PAGE_SELECT:
queries={'mode': MODES.BROWSE_W_WEB, 'section': section}
elif mode==MODES.SEARCH_PAGE_SELECT:
queries={'mode': search, 'query': query, 'section': section}
elif mode==MODES.PL_PAGE_SELECT:
queries={'mode': MODES.BROWSE_PLAYLISTS, 'section': section, 'public': public, 'sort': sort}
pages = int(_1CH.queries['pages'])
dialog = xbmcgui.Dialog()
options = []
for page in range(pages):
label = 'Page %s' % str(page + 1)
options.append(label)
index = dialog.select('Skip to page', options)
if index>-1:
queries['page']=index+1
url = _1CH.build_plugin_url(queries)
builtin = 'Container.Update(%s)' % url
xbmc.executebuiltin(builtin)
@pw_dispatcher.register(MODES.RESET_DB)
def reset_db():
if db_connection.reset_db():
message='DB Reset Successful'
else:
message='Reset only allowed on sqlite DBs'
builtin = "XBMC.Notification(PrimeWire,%s,2000, %s)" % (message, ICON_PATH)
xbmc.executebuiltin(builtin)
@pw_dispatcher.register(MODES.EXPORT_DB)
def export_db():
try:
dialog = xbmcgui.Dialog()
export_path = dialog.browse(0, 'Select Export Directory', 'files')
if export_path:
export_path = xbmc.translatePath(export_path)
keyboard = xbmc.Keyboard('export.csv', 'Enter Export Filename')
keyboard.doModal()
if keyboard.isConfirmed():
export_filename = keyboard.getText()
export_file = export_path + export_filename
db_connection.export_from_db(export_file)
builtin = "XBMC.Notification(Export Successful,Exported to %s,2000, %s)" % (export_file, ICON_PATH)
xbmc.executebuiltin(builtin)
except Exception as e:
utils.log('Export Failed: %s' % (e), xbmc.LOGERROR)
builtin = "XBMC.Notification(Export,Export Failed,2000, %s)" % (ICON_PATH)
xbmc.executebuiltin(builtin)
@pw_dispatcher.register(MODES.IMPORT_DB)
def import_db():
try:
dialog = xbmcgui.Dialog()
import_file = dialog.browse(1, 'Select Import File', 'files')
if import_file:
import_file = xbmc.translatePath(import_file)
db_connection.import_into_db(import_file)
builtin = "XBMC.Notification(Import Success,Imported from %s,5000, %s)" % (import_file, ICON_PATH)
xbmc.executebuiltin(builtin)
except Exception as e:
utils.log('Import Failed: %s' % (e), xbmc.LOGERROR)
builtin = "XBMC.Notification(Import,Import Failed,2000, %s)" % (ICON_PATH)
xbmc.executebuiltin(builtin)
raise
@pw_dispatcher.register(MODES.BACKUP_DB)
def backup_db():
path = xbmc.translatePath("special://database")
now_str = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
full_path = path + 'db_backup_' + now_str +'.csv'
db_connection.export_from_db(full_path)
@pw_dispatcher.register(MODES.EDIT_DAYS, ['url'], ['days'])
def edit_days(url, days=''):
try:
# use a keyboard if the hidden setting is true
if _1CH.get_setting('use-days-keyboard')=='true':
keyboard = xbmc.Keyboard(utils.get_days_string_from_days(days), 'Days to update Subscription (e.g. MTWHFSaSu)')
keyboard.doModal()
if keyboard.isConfirmed():
days_string=keyboard.getText()
new_days=utils.get_days_from_days_string(days_string)
else:
raise # jump back
else:
new_days=gui_utils.days_select(days)
if utils.using_pl_subs():
db_connection.edit_external_days(SUB_TYPES.PW_PL, url, new_days)
else:
db_connection.edit_days(url, new_days)
xbmc.executebuiltin('Container.Refresh')
except: pass # if clicked cancel just abort
@pw_dispatcher.register(MODES.HELP)
def show_help():
utils.log('Showing help popup')
try: utils.TextBox()
except: pass
@pw_dispatcher.register(MODES.FLUSH_CACHE)
def flush_cache():
dlg = xbmcgui.Dialog()
ln1 = 'Are you sure you want to '
ln2 = 'delete the url cache?'
ln3 = 'This will slow things down until rebuilt'
yes = 'Keep'
no = 'Delete'
if dlg.yesno('Flush web cache', ln1, ln2, ln3, yes, no):
db_connection.flush_cache()
@pw_dispatcher.register(MODES.INSTALL_META, ['title'])
def install_metapack(title):
metapacks.install_metapack(title)
@pw_dispatcher.register(MODES.INSTALL_LOCAL_META)
def install_local_metapack():
dialog = xbmcgui.Dialog()
source = dialog.browse(1, 'Metapack', 'files', '.zip', False, False)
metapacks.install_local_zip(source)
@pw_dispatcher.register(MODES.MOVIE_UPDATE)
def movie_update():
builtin = "XBMC.Notification(PrimeWire,Updating. Please wait...,5000,%s)" % xbmcaddon.Addon().getAddonInfo('icon')
xbmc.executebuiltin(builtin)
GetFilteredResults(section='movies', sort=update_movie_cat(), paginate=True)
@pw_dispatcher.register(MODES.SELECT_SOURCES, ['url', 'title'], ['year', 'imdbnum', 'img'])
def select_sources(url, title, year='', img='', imdbnum=''):
get_sources(url, title, year=year, img=img, imdbnum=imdbnum, respect_auto=False)
@pw_dispatcher.register(MODES.REFRESH_META, ['video_type', 'title', 'alt_id'], ['imdbnum', 'year'])
def refresh_meta(video_type, title, alt_id, imdbnum='', year=''):
utils.refresh_meta(video_type, title, imdbnum, alt_id, year)
@pw_dispatcher.register(MODES.META_SETTINGS)
def metahandler_settings():
import metahandler
metahandler.display_settings()
@pw_dispatcher.register(MODES.RES_SETTINGS)
def resolver_settings():
urlresolver.display_settings()
@pw_dispatcher.register(MODES.TOGGLE_X_FAVS, ['title', 'url', 'img', 'action'], ['is_playable'])
def toggle_xbmc_fav(title, url, img, action, is_playable=False):
# playable urls have to be added as media; folders as window
fav_types = ['media', 'window']
url_types = ['path', 'windowparameter']
dialogs = ['&dialog=True', '&dialog=False']
xbmc_fav_urls=utils.get_xbmc_fav_urls()
if is_playable:
fav_index=0
else:
fav_index=1
opp_index = (fav_index+1)%2
# annoyingly, json rpc toggles favorite despite it's name (i.e. if it exists, it removes it and vice versa)
fav_url = url + dialogs[fav_index]
opp_url = url + dialogs[opp_index]
cmd = '{"jsonrpc": "2.0", "method": "Favourites.AddFavourite", "params": {"title": "%s", "type": "%s", "window": "10025", "%s": "%s", "thumbnail": "%s"}, "id": 1}'
fav_cmd = cmd % (title, fav_types[fav_index], url_types[fav_index], fav_url, img)
opp_cmd = cmd % (title, fav_types[opp_index], url_types[opp_index], opp_url, img)
fav_exists=utils.in_xbmc_favs(fav_url, xbmc_fav_urls, False)
opp_exists=utils.in_xbmc_favs(opp_url, xbmc_fav_urls, False)
if action==FAV_ACTIONS.ADD:
if not fav_exists:
xbmc.executeJSONRPC(fav_cmd)
if action==FAV_ACTIONS.REMOVE:
if fav_exists:
xbmc.executeJSONRPC(fav_cmd)
# we should only need to remove this if it was added while source-win=<opposite current setting>
if opp_exists:
xbmc.executeJSONRPC(opp_cmd)
xbmc.executebuiltin('Container.Refresh')
def main(argv=None):
if sys.argv: argv=sys.argv
utils.log('Version: |%s| Queries: |%s|' % (_1CH.get_version(),_1CH.queries))
utils.log('Args: |%s|' % (argv))
# don't process params that don't match our url exactly. (e.g. plugin://plugin.video.1channel/extrafanart)
plugin_url = 'plugin://%s/' % (_1CH.get_id())
if argv[0] != plugin_url:
return
mode = _1CH.queries.get('mode', None)
if mode in [MODES.GET_SOURCES, MODES.PLAY_SOURCE, MODES.PLAY_TRAILER, MODES.RES_SETTINGS, MODES.SELECT_SOURCES]:
global urlresolver
import urlresolver
pw_dispatcher.dispatch(mode, _1CH.queries)
if __name__ == '__main__':
sys.exit(main())
|
smalls257/VRvisu | refs/heads/master | Library/External.LCA_RESTRICTED/Languages/CPython/27/Lib/test/test_types.py | 113 | # Python test set -- part 6, built-in types
from test.test_support import run_unittest, have_unicode, run_with_locale, \
check_py3k_warnings
import unittest
import sys
import locale
class TypesTests(unittest.TestCase):
def test_truth_values(self):
if None: self.fail('None is true instead of false')
if 0: self.fail('0 is true instead of false')
if 0L: self.fail('0L is true instead of false')
if 0.0: self.fail('0.0 is true instead of false')
if '': self.fail('\'\' is true instead of false')
if not 1: self.fail('1 is false instead of true')
if not 1L: self.fail('1L is false instead of true')
if not 1.0: self.fail('1.0 is false instead of true')
if not 'x': self.fail('\'x\' is false instead of true')
if not {'x': 1}: self.fail('{\'x\': 1} is false instead of true')
def f(): pass
class C: pass
x = C()
if not f: self.fail('f is false instead of true')
if not C: self.fail('C is false instead of true')
if not sys: self.fail('sys is false instead of true')
if not x: self.fail('x is false instead of true')
def test_boolean_ops(self):
if 0 or 0: self.fail('0 or 0 is true instead of false')
if 1 and 1: pass
else: self.fail('1 and 1 is false instead of true')
if not 1: self.fail('not 1 is true instead of false')
def test_comparisons(self):
if 0 < 1 <= 1 == 1 >= 1 > 0 != 1: pass
else: self.fail('int comparisons failed')
if 0L < 1L <= 1L == 1L >= 1L > 0L != 1L: pass
else: self.fail('long int comparisons failed')
if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 != 1.0: pass
else: self.fail('float comparisons failed')
if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass
else: self.fail('string comparisons failed')
if None is None: pass
else: self.fail('identity test failed')
def test_float_constructor(self):
self.assertRaises(ValueError, float, '')
self.assertRaises(ValueError, float, '5\0')
def test_zero_division(self):
try: 5.0 / 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 / 0.0 didn't raise ZeroDivisionError")
try: 5.0 // 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 // 0.0 didn't raise ZeroDivisionError")
try: 5.0 % 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 % 0.0 didn't raise ZeroDivisionError")
try: 5 / 0L
except ZeroDivisionError: pass
else: self.fail("5 / 0L didn't raise ZeroDivisionError")
try: 5 // 0L
except ZeroDivisionError: pass
else: self.fail("5 // 0L didn't raise ZeroDivisionError")
try: 5 % 0L
except ZeroDivisionError: pass
else: self.fail("5 % 0L didn't raise ZeroDivisionError")
def test_numeric_types(self):
if 0 != 0L or 0 != 0.0 or 0L != 0.0: self.fail('mixed comparisons')
if 1 != 1L or 1 != 1.0 or 1L != 1.0: self.fail('mixed comparisons')
if -1 != -1L or -1 != -1.0 or -1L != -1.0:
self.fail('int/long/float value not equal')
# calling built-in types without argument must return 0
if int() != 0: self.fail('int() does not return 0')
if long() != 0L: self.fail('long() does not return 0L')
if float() != 0.0: self.fail('float() does not return 0.0')
if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass
else: self.fail('int() does not round properly')
if long(1.9) == 1L == long(1.1) and long(-1.1) == -1L == long(-1.9): pass
else: self.fail('long() does not round properly')
if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass
else: self.fail('float() does not work properly')
def test_float_to_string(self):
def test(f, result):
self.assertEqual(f.__format__('e'), result)
self.assertEqual('%e' % f, result)
# test all 2 digit exponents, both with __format__ and with
# '%' formatting
for i in range(-99, 100):
test(float('1.5e'+str(i)), '1.500000e{0:+03d}'.format(i))
# test some 3 digit exponents
self.assertEqual(1.5e100.__format__('e'), '1.500000e+100')
self.assertEqual('%e' % 1.5e100, '1.500000e+100')
self.assertEqual(1.5e101.__format__('e'), '1.500000e+101')
self.assertEqual('%e' % 1.5e101, '1.500000e+101')
self.assertEqual(1.5e-100.__format__('e'), '1.500000e-100')
self.assertEqual('%e' % 1.5e-100, '1.500000e-100')
self.assertEqual(1.5e-101.__format__('e'), '1.500000e-101')
self.assertEqual('%e' % 1.5e-101, '1.500000e-101')
self.assertEqual('%g' % 1.0, '1')
self.assertEqual('%#g' % 1.0, '1.00000')
def test_normal_integers(self):
# Ensure the first 256 integers are shared
a = 256
b = 128*2
if a is not b: self.fail('256 is not shared')
if 12 + 24 != 36: self.fail('int op')
if 12 + (-24) != -12: self.fail('int op')
if (-12) + 24 != 12: self.fail('int op')
if (-12) + (-24) != -36: self.fail('int op')
if not 12 < 24: self.fail('int op')
if not -24 < -12: self.fail('int op')
# Test for a particular bug in integer multiply
xsize, ysize, zsize = 238, 356, 4
if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912):
self.fail('int mul commutativity')
# And another.
m = -sys.maxint - 1
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor
prod = divisor * j
if prod != m:
self.fail("%r * %r == %r != %r" % (divisor, j, prod, m))
if type(prod) is not int:
self.fail("expected type(prod) to be int, not %r" %
type(prod))
# Check for expected * overflow to long.
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor - 1
prod = divisor * j
if type(prod) is not long:
self.fail("expected type(%r) to be long, not %r" %
(prod, type(prod)))
# Check for expected * overflow to long.
m = sys.maxint
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor + 1
prod = divisor * j
if type(prod) is not long:
self.fail("expected type(%r) to be long, not %r" %
(prod, type(prod)))
def test_long_integers(self):
if 12L + 24L != 36L: self.fail('long op')
if 12L + (-24L) != -12L: self.fail('long op')
if (-12L) + 24L != 12L: self.fail('long op')
if (-12L) + (-24L) != -36L: self.fail('long op')
if not 12L < 24L: self.fail('long op')
if not -24L < -12L: self.fail('long op')
x = sys.maxint
if int(long(x)) != x: self.fail('long op')
try: y = int(long(x)+1L)
except OverflowError: self.fail('long op')
if not isinstance(y, long): self.fail('long op')
x = -x
if int(long(x)) != x: self.fail('long op')
x = x-1
if int(long(x)) != x: self.fail('long op')
try: y = int(long(x)-1L)
except OverflowError: self.fail('long op')
if not isinstance(y, long): self.fail('long op')
try: 5 << -5
except ValueError: pass
else: self.fail('int negative shift <<')
try: 5L << -5L
except ValueError: pass
else: self.fail('long negative shift <<')
try: 5 >> -5
except ValueError: pass
else: self.fail('int negative shift >>')
try: 5L >> -5L
except ValueError: pass
else: self.fail('long negative shift >>')
def test_floats(self):
if 12.0 + 24.0 != 36.0: self.fail('float op')
if 12.0 + (-24.0) != -12.0: self.fail('float op')
if (-12.0) + 24.0 != 12.0: self.fail('float op')
if (-12.0) + (-24.0) != -36.0: self.fail('float op')
if not 12.0 < 24.0: self.fail('float op')
if not -24.0 < -12.0: self.fail('float op')
def test_strings(self):
if len('') != 0: self.fail('len(\'\')')
if len('a') != 1: self.fail('len(\'a\')')
if len('abcdef') != 6: self.fail('len(\'abcdef\')')
if 'xyz' + 'abcde' != 'xyzabcde': self.fail('string concatenation')
if 'xyz'*3 != 'xyzxyzxyz': self.fail('string repetition *3')
if 0*'abcde' != '': self.fail('string repetition 0*')
if min('abc') != 'a' or max('abc') != 'c': self.fail('min/max string')
if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass
else: self.fail('in/not in string')
x = 'x'*103
if '%s!'%x != x+'!': self.fail('nasty string formatting bug')
#extended slices for strings
a = '0123456789'
self.assertEqual(a[::], a)
self.assertEqual(a[::2], '02468')
self.assertEqual(a[1::2], '13579')
self.assertEqual(a[::-1],'9876543210')
self.assertEqual(a[::-2], '97531')
self.assertEqual(a[3::-2], '31')
self.assertEqual(a[-100:100:], a)
self.assertEqual(a[100:-100:-1], a[::-1])
self.assertEqual(a[-100L:100L:2L], '02468')
if have_unicode:
a = unicode('0123456789', 'ascii')
self.assertEqual(a[::], a)
self.assertEqual(a[::2], unicode('02468', 'ascii'))
self.assertEqual(a[1::2], unicode('13579', 'ascii'))
self.assertEqual(a[::-1], unicode('9876543210', 'ascii'))
self.assertEqual(a[::-2], unicode('97531', 'ascii'))
self.assertEqual(a[3::-2], unicode('31', 'ascii'))
self.assertEqual(a[-100:100:], a)
self.assertEqual(a[100:-100:-1], a[::-1])
self.assertEqual(a[-100L:100L:2L], unicode('02468', 'ascii'))
def test_type_function(self):
self.assertRaises(TypeError, type, 1, 2)
self.assertRaises(TypeError, type, 1, 2, 3, 4)
def test_buffers(self):
self.assertRaises(ValueError, buffer, 'asdf', -1)
cmp(buffer("abc"), buffer("def")) # used to raise a warning: tp_compare didn't return -1, 0, or 1
self.assertRaises(TypeError, buffer, None)
a = buffer('asdf')
hash(a)
b = a * 5
if a == b:
self.fail('buffers should not be equal')
if str(b) != ('asdf' * 5):
self.fail('repeated buffer has wrong content')
if str(a * 0) != '':
self.fail('repeated buffer zero times has wrong content')
if str(a + buffer('def')) != 'asdfdef':
self.fail('concatenation of buffers yields wrong content')
if str(buffer(a)) != 'asdf':
self.fail('composing buffers failed')
if str(buffer(a, 2)) != 'df':
self.fail('specifying buffer offset failed')
if str(buffer(a, 0, 2)) != 'as':
self.fail('specifying buffer size failed')
if str(buffer(a, 1, 2)) != 'sd':
self.fail('specifying buffer offset and size failed')
self.assertRaises(ValueError, buffer, buffer('asdf', 1), -1)
if str(buffer(buffer('asdf', 0, 2), 0)) != 'as':
self.fail('composing length-specified buffer failed')
if str(buffer(buffer('asdf', 0, 2), 0, 5000)) != 'as':
self.fail('composing length-specified buffer failed')
if str(buffer(buffer('asdf', 0, 2), 0, -1)) != 'as':
self.fail('composing length-specified buffer failed')
if str(buffer(buffer('asdf', 0, 2), 1, 2)) != 's':
self.fail('composing length-specified buffer failed')
try: a[1] = 'g'
except TypeError: pass
else: self.fail("buffer assignment should raise TypeError")
try: a[0:1] = 'g'
except TypeError: pass
else: self.fail("buffer slice assignment should raise TypeError")
# array.array() returns an object that does not implement a char buffer,
# something which int() uses for conversion.
import array
try: int(buffer(array.array('c')))
except TypeError: pass
else: self.fail("char buffer (at C level) not working")
def test_int__format__(self):
def test(i, format_spec, result):
# just make sure I'm not accidentally checking longs
assert type(i) == int
assert type(format_spec) == str
self.assertEqual(i.__format__(format_spec), result)
self.assertEqual(i.__format__(unicode(format_spec)), result)
test(123456789, 'd', '123456789')
test(123456789, 'd', '123456789')
test(1, 'c', '\01')
# sign and aligning are interdependent
test(1, "-", '1')
test(-1, "-", '-1')
test(1, "-3", ' 1')
test(-1, "-3", ' -1')
test(1, "+3", ' +1')
test(-1, "+3", ' -1')
test(1, " 3", ' 1')
test(-1, " 3", ' -1')
test(1, " ", ' 1')
test(-1, " ", '-1')
# hex
test(3, "x", "3")
test(3, "X", "3")
test(1234, "x", "4d2")
test(-1234, "x", "-4d2")
test(1234, "8x", " 4d2")
test(-1234, "8x", " -4d2")
test(1234, "x", "4d2")
test(-1234, "x", "-4d2")
test(-3, "x", "-3")
test(-3, "X", "-3")
test(int('be', 16), "x", "be")
test(int('be', 16), "X", "BE")
test(-int('be', 16), "x", "-be")
test(-int('be', 16), "X", "-BE")
# octal
test(3, "o", "3")
test(-3, "o", "-3")
test(65, "o", "101")
test(-65, "o", "-101")
test(1234, "o", "2322")
test(-1234, "o", "-2322")
test(1234, "-o", "2322")
test(-1234, "-o", "-2322")
test(1234, " o", " 2322")
test(-1234, " o", "-2322")
test(1234, "+o", "+2322")
test(-1234, "+o", "-2322")
# binary
test(3, "b", "11")
test(-3, "b", "-11")
test(1234, "b", "10011010010")
test(-1234, "b", "-10011010010")
test(1234, "-b", "10011010010")
test(-1234, "-b", "-10011010010")
test(1234, " b", " 10011010010")
test(-1234, " b", "-10011010010")
test(1234, "+b", "+10011010010")
test(-1234, "+b", "-10011010010")
# alternate (#) formatting
test(0, "#b", '0b0')
test(0, "-#b", '0b0')
test(1, "-#b", '0b1')
test(-1, "-#b", '-0b1')
test(-1, "-#5b", ' -0b1')
test(1, "+#5b", ' +0b1')
test(100, "+#b", '+0b1100100')
test(100, "#012b", '0b0001100100')
test(-100, "#012b", '-0b001100100')
test(0, "#o", '0o0')
test(0, "-#o", '0o0')
test(1, "-#o", '0o1')
test(-1, "-#o", '-0o1')
test(-1, "-#5o", ' -0o1')
test(1, "+#5o", ' +0o1')
test(100, "+#o", '+0o144')
test(100, "#012o", '0o0000000144')
test(-100, "#012o", '-0o000000144')
test(0, "#x", '0x0')
test(0, "-#x", '0x0')
test(1, "-#x", '0x1')
test(-1, "-#x", '-0x1')
test(-1, "-#5x", ' -0x1')
test(1, "+#5x", ' +0x1')
test(100, "+#x", '+0x64')
test(100, "#012x", '0x0000000064')
test(-100, "#012x", '-0x000000064')
test(123456, "#012x", '0x000001e240')
test(-123456, "#012x", '-0x00001e240')
test(0, "#X", '0X0')
test(0, "-#X", '0X0')
test(1, "-#X", '0X1')
test(-1, "-#X", '-0X1')
test(-1, "-#5X", ' -0X1')
test(1, "+#5X", ' +0X1')
test(100, "+#X", '+0X64')
test(100, "#012X", '0X0000000064')
test(-100, "#012X", '-0X000000064')
test(123456, "#012X", '0X000001E240')
test(-123456, "#012X", '-0X00001E240')
# issue 5782, commas with no specifier type
test(1234, '010,', '00,001,234')
# make sure these are errors
# precision disallowed
self.assertRaises(ValueError, 3 .__format__, "1.3")
# sign not allowed with 'c'
self.assertRaises(ValueError, 3 .__format__, "+c")
# format spec must be string
self.assertRaises(TypeError, 3 .__format__, None)
self.assertRaises(TypeError, 3 .__format__, 0)
# can't have ',' with 'c'
self.assertRaises(ValueError, 3 .__format__, ",c")
# ensure that only int and float type specifiers work
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'bcdoxXeEfFgGn%':
self.assertRaises(ValueError, 0 .__format__, format_spec)
self.assertRaises(ValueError, 1 .__format__, format_spec)
self.assertRaises(ValueError, (-1) .__format__, format_spec)
# ensure that float type specifiers work; format converts
# the int to a float
for format_spec in 'eEfFgG%':
for value in [0, 1, -1, 100, -100, 1234567890, -1234567890]:
self.assertEqual(value.__format__(format_spec),
float(value).__format__(format_spec))
# Issue 6902
test(123456, "0<20", '12345600000000000000')
test(123456, "1<20", '12345611111111111111')
test(123456, "*<20", '123456**************')
test(123456, "0>20", '00000000000000123456')
test(123456, "1>20", '11111111111111123456')
test(123456, "*>20", '**************123456')
test(123456, "0=20", '00000000000000123456')
test(123456, "1=20", '11111111111111123456')
test(123456, "*=20", '**************123456')
def test_long__format__(self):
def test(i, format_spec, result):
# make sure we're not accidentally checking ints
assert type(i) == long
assert type(format_spec) == str
self.assertEqual(i.__format__(format_spec), result)
self.assertEqual(i.__format__(unicode(format_spec)), result)
test(10**100, 'd', '1' + '0' * 100)
test(10**100+100, 'd', '1' + '0' * 97 + '100')
test(123456789L, 'd', '123456789')
test(123456789L, 'd', '123456789')
# sign and aligning are interdependent
test(1L, "-", '1')
test(-1L, "-", '-1')
test(1L, "-3", ' 1')
test(-1L, "-3", ' -1')
test(1L, "+3", ' +1')
test(-1L, "+3", ' -1')
test(1L, " 3", ' 1')
test(-1L, " 3", ' -1')
test(1L, " ", ' 1')
test(-1L, " ", '-1')
test(1L, 'c', '\01')
# hex
test(3L, "x", "3")
test(3L, "X", "3")
test(1234L, "x", "4d2")
test(-1234L, "x", "-4d2")
test(1234L, "8x", " 4d2")
test(-1234L, "8x", " -4d2")
test(1234L, "x", "4d2")
test(-1234L, "x", "-4d2")
test(-3L, "x", "-3")
test(-3L, "X", "-3")
test(long('be', 16), "x", "be")
test(long('be', 16), "X", "BE")
test(-long('be', 16), "x", "-be")
test(-long('be', 16), "X", "-BE")
# octal
test(3L, "o", "3")
test(-3L, "o", "-3")
test(65L, "o", "101")
test(-65L, "o", "-101")
test(1234L, "o", "2322")
test(-1234L, "o", "-2322")
test(1234L, "-o", "2322")
test(-1234L, "-o", "-2322")
test(1234L, " o", " 2322")
test(-1234L, " o", "-2322")
test(1234L, "+o", "+2322")
test(-1234L, "+o", "-2322")
# binary
test(3L, "b", "11")
test(-3L, "b", "-11")
test(1234L, "b", "10011010010")
test(-1234L, "b", "-10011010010")
test(1234L, "-b", "10011010010")
test(-1234L, "-b", "-10011010010")
test(1234L, " b", " 10011010010")
test(-1234L, " b", "-10011010010")
test(1234L, "+b", "+10011010010")
test(-1234L, "+b", "-10011010010")
# make sure these are errors
# precision disallowed
self.assertRaises(ValueError, 3L .__format__, "1.3")
# sign not allowed with 'c'
self.assertRaises(ValueError, 3L .__format__, "+c")
# format spec must be string
self.assertRaises(TypeError, 3L .__format__, None)
self.assertRaises(TypeError, 3L .__format__, 0)
# alternate specifier in wrong place
self.assertRaises(ValueError, 1L .__format__, "#+5x")
self.assertRaises(ValueError, 1L .__format__, "+5#x")
# ensure that only int and float type specifiers work
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'bcdoxXeEfFgGn%':
self.assertRaises(ValueError, 0L .__format__, format_spec)
self.assertRaises(ValueError, 1L .__format__, format_spec)
self.assertRaises(ValueError, (-1L) .__format__, format_spec)
# ensure that float type specifiers work; format converts
# the long to a float
for format_spec in 'eEfFgG%':
for value in [0L, 1L, -1L, 100L, -100L, 1234567890L, -1234567890L]:
self.assertEqual(value.__format__(format_spec),
float(value).__format__(format_spec))
# Issue 6902
test(123456L, "0<20", '12345600000000000000')
test(123456L, "1<20", '12345611111111111111')
test(123456L, "*<20", '123456**************')
test(123456L, "0>20", '00000000000000123456')
test(123456L, "1>20", '11111111111111123456')
test(123456L, "*>20", '**************123456')
test(123456L, "0=20", '00000000000000123456')
test(123456L, "1=20", '11111111111111123456')
test(123456L, "*=20", '**************123456')
@run_with_locale('LC_NUMERIC', 'en_US.UTF8')
def test_float__format__locale(self):
# test locale support for __format__ code 'n'
for i in range(-10, 10):
x = 1234567890.0 * (10.0 ** i)
self.assertEqual(locale.format('%g', x, grouping=True), format(x, 'n'))
self.assertEqual(locale.format('%.10g', x, grouping=True), format(x, '.10n'))
@run_with_locale('LC_NUMERIC', 'en_US.UTF8')
def test_int__format__locale(self):
# test locale support for __format__ code 'n' for integers
x = 123456789012345678901234567890
for i in range(0, 30):
self.assertEqual(locale.format('%d', x, grouping=True), format(x, 'n'))
# move to the next integer to test
x = x // 10
rfmt = ">20n"
lfmt = "<20n"
cfmt = "^20n"
for x in (1234, 12345, 123456, 1234567, 12345678, 123456789, 1234567890, 12345678900):
self.assertEqual(len(format(0, rfmt)), len(format(x, rfmt)))
self.assertEqual(len(format(0, lfmt)), len(format(x, lfmt)))
self.assertEqual(len(format(0, cfmt)), len(format(x, cfmt)))
def test_float__format__(self):
# these should be rewritten to use both format(x, spec) and
# x.__format__(spec)
def test(f, format_spec, result):
assert type(f) == float
assert type(format_spec) == str
self.assertEqual(f.__format__(format_spec), result)
self.assertEqual(f.__format__(unicode(format_spec)), result)
test(0.0, 'f', '0.000000')
# the default is 'g', except for empty format spec
test(0.0, '', '0.0')
test(0.01, '', '0.01')
test(0.01, 'g', '0.01')
# test for issue 3411
test(1.23, '1', '1.23')
test(-1.23, '1', '-1.23')
test(1.23, '1g', '1.23')
test(-1.23, '1g', '-1.23')
test( 1.0, ' g', ' 1')
test(-1.0, ' g', '-1')
test( 1.0, '+g', '+1')
test(-1.0, '+g', '-1')
test(1.1234e200, 'g', '1.1234e+200')
test(1.1234e200, 'G', '1.1234E+200')
test(1.0, 'f', '1.000000')
test(-1.0, 'f', '-1.000000')
test( 1.0, ' f', ' 1.000000')
test(-1.0, ' f', '-1.000000')
test( 1.0, '+f', '+1.000000')
test(-1.0, '+f', '-1.000000')
# Python versions <= 2.6 switched from 'f' to 'g' formatting for
# values larger than 1e50. No longer.
f = 1.1234e90
for fmt in 'f', 'F':
# don't do a direct equality check, since on some
# platforms only the first few digits of dtoa
# will be reliable
result = f.__format__(fmt)
self.assertEqual(len(result), 98)
self.assertEqual(result[-7], '.')
self.assertIn(result[:12], ('112340000000', '112339999999'))
f = 1.1234e200
for fmt in 'f', 'F':
result = f.__format__(fmt)
self.assertEqual(len(result), 208)
self.assertEqual(result[-7], '.')
self.assertIn(result[:12], ('112340000000', '112339999999'))
test( 1.0, 'e', '1.000000e+00')
test(-1.0, 'e', '-1.000000e+00')
test( 1.0, 'E', '1.000000E+00')
test(-1.0, 'E', '-1.000000E+00')
test(1.1234e20, 'e', '1.123400e+20')
test(1.1234e20, 'E', '1.123400E+20')
# No format code means use g, but must have a decimal
# and a number after the decimal. This is tricky, because
# a totaly empty format specifier means something else.
# So, just use a sign flag
test(1e200, '+g', '+1e+200')
test(1e200, '+', '+1e+200')
test(1.1e200, '+g', '+1.1e+200')
test(1.1e200, '+', '+1.1e+200')
test(1.1e200, '+g', '+1.1e+200')
test(1.1e200, '+', '+1.1e+200')
# 0 padding
test(1234., '010f', '1234.000000')
test(1234., '011f', '1234.000000')
test(1234., '012f', '01234.000000')
test(-1234., '011f', '-1234.000000')
test(-1234., '012f', '-1234.000000')
test(-1234., '013f', '-01234.000000')
test(-1234.12341234, '013f', '-01234.123412')
test(-123456.12341234, '011.2f', '-0123456.12')
# issue 5782, commas with no specifier type
test(1.2, '010,.2', '0,000,001.2')
# 0 padding with commas
test(1234., '011,f', '1,234.000000')
test(1234., '012,f', '1,234.000000')
test(1234., '013,f', '01,234.000000')
test(-1234., '012,f', '-1,234.000000')
test(-1234., '013,f', '-1,234.000000')
test(-1234., '014,f', '-01,234.000000')
test(-12345., '015,f', '-012,345.000000')
test(-123456., '016,f', '-0,123,456.000000')
test(-123456., '017,f', '-0,123,456.000000')
test(-123456.12341234, '017,f', '-0,123,456.123412')
test(-123456.12341234, '013,.2f', '-0,123,456.12')
# % formatting
test(-1.0, '%', '-100.000000%')
# format spec must be string
self.assertRaises(TypeError, 3.0.__format__, None)
self.assertRaises(TypeError, 3.0.__format__, 0)
# other format specifiers shouldn't work on floats,
# in particular int specifiers
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'eEfFgGn%':
self.assertRaises(ValueError, format, 0.0, format_spec)
self.assertRaises(ValueError, format, 1.0, format_spec)
self.assertRaises(ValueError, format, -1.0, format_spec)
self.assertRaises(ValueError, format, 1e100, format_spec)
self.assertRaises(ValueError, format, -1e100, format_spec)
self.assertRaises(ValueError, format, 1e-100, format_spec)
self.assertRaises(ValueError, format, -1e-100, format_spec)
# Alternate formatting is not supported
self.assertRaises(ValueError, format, 0.0, '#')
self.assertRaises(ValueError, format, 0.0, '#20f')
# Issue 6902
test(12345.6, "0<20", '12345.60000000000000')
test(12345.6, "1<20", '12345.61111111111111')
test(12345.6, "*<20", '12345.6*************')
test(12345.6, "0>20", '000000000000012345.6')
test(12345.6, "1>20", '111111111111112345.6')
test(12345.6, "*>20", '*************12345.6')
test(12345.6, "0=20", '000000000000012345.6')
test(12345.6, "1=20", '111111111111112345.6')
test(12345.6, "*=20", '*************12345.6')
def test_format_spec_errors(self):
# int, float, and string all share the same format spec
# mini-language parser.
# Check that we can't ask for too many digits. This is
# probably a CPython specific test. It tries to put the width
# into a C long.
self.assertRaises(ValueError, format, 0, '1'*10000 + 'd')
# Similar with the precision.
self.assertRaises(ValueError, format, 0, '.' + '1'*10000 + 'd')
# And may as well test both.
self.assertRaises(ValueError, format, 0, '1'*1000 + '.' + '1'*10000 + 'd')
# Make sure commas aren't allowed with various type codes
for code in 'xXobns':
self.assertRaises(ValueError, format, 0, ',' + code)
def test_internal_sizes(self):
self.assertGreater(object.__basicsize__, 0)
self.assertGreater(tuple.__itemsize__, 0)
def test_main():
with check_py3k_warnings(
("buffer.. not supported", DeprecationWarning),
("classic long division", DeprecationWarning)):
run_unittest(TypesTests)
if __name__ == '__main__':
test_main()
|
marctc/django | refs/heads/master | django/views/decorators/gzip.py | 720 | from django.middleware.gzip import GZipMiddleware
from django.utils.decorators import decorator_from_middleware
gzip_page = decorator_from_middleware(GZipMiddleware)
gzip_page.__doc__ = "Decorator for views that gzips pages if the client supports it."
|
jtsymon/livestreamer | refs/heads/develop | src/livestreamer/plugins/stream.py | 37 | from livestreamer.compat import urlparse
from livestreamer.exceptions import PluginError
from livestreamer.plugin import Plugin
from livestreamer.stream import (AkamaiHDStream, HDSStream, HLSStream,
HTTPStream, RTMPStream)
import ast
import re
PROTOCOL_MAP = {
"akamaihd": AkamaiHDStream,
"hds": HDSStream.parse_manifest,
"hls": HLSStream,
"hlsvariant": HLSStream.parse_variant_playlist,
"httpstream": HTTPStream,
"rtmp": RTMPStream,
"rtmpe": RTMPStream,
"rtmps": RTMPStream,
"rtmpt": RTMPStream,
"rtmpte": RTMPStream
}
PARAMS_REGEX = r"(\w+)=({.+?}|\[.+?\]|\(.+?\)|'(?:[^'\\]|\\')*'|\"(?:[^\"\\]|\\\")*\"|\S+)"
class StreamURL(Plugin):
@classmethod
def can_handle_url(self, url):
parsed = urlparse(url)
return parsed.scheme in PROTOCOL_MAP
def _parse_params(self, params):
rval = {}
matches = re.findall(PARAMS_REGEX, params)
for key, value in matches:
try:
value = ast.literal_eval(value)
except Exception:
pass
rval[key] = value
return rval
def _get_streams(self):
parsed = urlparse(self.url)
cls = PROTOCOL_MAP.get(parsed.scheme)
if not cls:
return
split = self.url.split(" ")
url = split[0]
urlnoproto = re.match("^\w+://(.+)", url).group(1)
# Prepend http:// if needed.
if cls != RTMPStream and not re.match("^http(s)?://", urlnoproto):
urlnoproto = "http://{0}".format(urlnoproto)
params = (" ").join(split[1:])
params = self._parse_params(params)
if cls == RTMPStream:
params["rtmp"] = url
for boolkey in ("live", "realtime", "quiet", "verbose", "debug"):
if boolkey in params:
params[boolkey] = bool(params[boolkey])
stream = cls(self.session, params)
elif cls == HLSStream.parse_variant_playlist or cls == HDSStream.parse_manifest:
try:
streams = cls(self.session, urlnoproto, **params)
except IOError as err:
raise PluginError(err)
return streams
else:
stream = cls(self.session, urlnoproto, **params)
return dict(live=stream)
__plugin__ = StreamURL
|
agusc/scrapy | refs/heads/master | scrapy/commands/__init__.py | 129 | """
Base class for Scrapy commands
"""
import os
from optparse import OptionGroup
from twisted.python import failure
from scrapy.utils.conf import arglist_to_dict
from scrapy.exceptions import UsageError
class ScrapyCommand(object):
requires_project = False
crawler_process = None
# default settings to be used for this command instead of global defaults
default_settings = {}
exitcode = 0
def __init__(self):
self.settings = None # set in scrapy.cmdline
def set_crawler(self, crawler):
assert not hasattr(self, '_crawler'), "crawler already set"
self._crawler = crawler
def syntax(self):
"""
Command syntax (preferably one-line). Do not include command name.
"""
return ""
def short_desc(self):
"""
A short description of the command
"""
return ""
def long_desc(self):
"""A long description of the command. Return short description when not
available. It cannot contain newlines, since contents will be formatted
by optparser which removes newlines and wraps text.
"""
return self.short_desc()
def help(self):
"""An extensive help for the command. It will be shown when using the
"help" command. It can contain newlines, since not post-formatting will
be applied to its contents.
"""
return self.long_desc()
def add_options(self, parser):
"""
Populate option parse with options available for this command
"""
group = OptionGroup(parser, "Global Options")
group.add_option("--logfile", metavar="FILE",
help="log file. if omitted stderr will be used")
group.add_option("-L", "--loglevel", metavar="LEVEL", default=None,
help="log level (default: %s)" % self.settings['LOG_LEVEL'])
group.add_option("--nolog", action="store_true",
help="disable logging completely")
group.add_option("--profile", metavar="FILE", default=None,
help="write python cProfile stats to FILE")
group.add_option("--lsprof", metavar="FILE", default=None,
help="write lsprof profiling stats to FILE")
group.add_option("--pidfile", metavar="FILE",
help="write process ID to FILE")
group.add_option("-s", "--set", action="append", default=[], metavar="NAME=VALUE",
help="set/override setting (may be repeated)")
group.add_option("--pdb", action="store_true", help="enable pdb on failure")
parser.add_option_group(group)
def process_options(self, args, opts):
try:
self.settings.setdict(arglist_to_dict(opts.set),
priority='cmdline')
except ValueError:
raise UsageError("Invalid -s value, use -s NAME=VALUE", print_help=False)
if opts.logfile:
self.settings.set('LOG_ENABLED', True, priority='cmdline')
self.settings.set('LOG_FILE', opts.logfile, priority='cmdline')
if opts.loglevel:
self.settings.set('LOG_ENABLED', True, priority='cmdline')
self.settings.set('LOG_LEVEL', opts.loglevel, priority='cmdline')
if opts.nolog:
self.settings.set('LOG_ENABLED', False, priority='cmdline')
if opts.pidfile:
with open(opts.pidfile, "w") as f:
f.write(str(os.getpid()) + os.linesep)
if opts.pdb:
failure.startDebugMode()
def run(self, args, opts):
"""
Entry point for running commands
"""
raise NotImplementedError
|
xyzz/vcmi-build | refs/heads/master | project/jni/python/src/Lib/plat-mac/Carbon/Scrap.py | 82 | from _Scrap import *
|
bsipocz/scikit-image | refs/heads/master | skimage/_shared/_tempfile.py | 41 | from tempfile import NamedTemporaryFile
from contextlib import contextmanager
import os
@contextmanager
def temporary_file(suffix=''):
"""Yield a writeable temporary filename that is deleted on context exit.
Parameters
----------
suffix : string, optional
The suffix for the file.
Examples
--------
>>> import numpy as np
>>> from skimage import io
>>> with temporary_file('.tif') as tempfile:
... im = np.zeros((5, 5), np.uint8)
... io.imsave(tempfile, im)
... assert np.all(io.imread(tempfile) == im)
"""
tempfile_stream = NamedTemporaryFile(suffix=suffix, delete=False)
tempfile = tempfile_stream.name
tempfile_stream.close()
yield tempfile
os.remove(tempfile)
|
veridiam/Madcow-Waaltz | refs/heads/master | madcow/include/twisted/_version.py | 10 | # This is an auto-generated file. Do not edit it.
from twisted.python import versions
version = versions.Version('twisted', 8, 2, 0)
|
fretsonfire/fof-python | refs/heads/master | src/TestAll.py | 1 | #!/usr/bin/python
# -*- coding: iso-8859-1 -*- #
#####################################################################
# #
# Frets on Fire #
# Copyright (C) 2006 Sami Kyöstilä #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
"""Run all unit tests."""
import sys
import os
import unittest
import Config
tests = []
for root, dirs, files in os.walk("."):
for f in files:
f = os.path.join(root, f)
if f.endswith("Test.py"):
m = os.path.basename(f).replace(".py", "")
d = os.path.dirname(f)
sys.path.append(d)
tests.append(__import__(m))
suite = unittest.TestSuite()
if "-i" in sys.argv:
suffix = "TestInteractive"
else:
suffix = "Test"
for test in tests:
for item in dir(test):
if item.endswith(suffix):
suite.addTest(unittest.makeSuite(test.__dict__[item]))
Config.load(setAsDefault = True)
unittest.TextTestRunner(verbosity = 2).run(suite)
|
eliasdorneles/scrapy | refs/heads/master | scrapy/spiders/__init__.py | 134 | """
Base class for Scrapy spiders
See documentation in docs/topics/spiders.rst
"""
import logging
import warnings
from scrapy import signals
from scrapy.http import Request
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import url_is_from_spider
from scrapy.utils.deprecate import create_deprecated_class
from scrapy.exceptions import ScrapyDeprecationWarning
class Spider(object_ref):
"""Base class for scrapy spiders. All spiders must inherit from this
class.
"""
name = None
custom_settings = None
def __init__(self, name=None, **kwargs):
if name is not None:
self.name = name
elif not getattr(self, 'name', None):
raise ValueError("%s must have a name" % type(self).__name__)
self.__dict__.update(kwargs)
if not hasattr(self, 'start_urls'):
self.start_urls = []
@property
def logger(self):
logger = logging.getLogger(self.name)
return logging.LoggerAdapter(logger, {'spider': self})
def log(self, message, level=logging.DEBUG, **kw):
"""Log the given message at the given log level
This helper wraps a log call to the logger within the spider, but you
can use it directly (e.g. Spider.logger.info('msg')) or use any other
Python logger too.
"""
self.logger.log(level, message, **kw)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = cls(*args, **kwargs)
spider._set_crawler(crawler)
return spider
def set_crawler(self, crawler):
warnings.warn("set_crawler is deprecated, instantiate and bound the "
"spider to this crawler with from_crawler method "
"instead.",
category=ScrapyDeprecationWarning, stacklevel=2)
assert not hasattr(self, 'crawler'), "Spider already bounded to a " \
"crawler"
self._set_crawler(crawler)
def _set_crawler(self, crawler):
self.crawler = crawler
self.settings = crawler.settings
crawler.signals.connect(self.close, signals.spider_closed)
def start_requests(self):
for url in self.start_urls:
yield self.make_requests_from_url(url)
def make_requests_from_url(self, url):
return Request(url, dont_filter=True)
def parse(self, response):
raise NotImplementedError
@classmethod
def update_settings(cls, settings):
settings.setdict(cls.custom_settings or {}, priority='spider')
@classmethod
def handles_request(cls, request):
return url_is_from_spider(request.url, cls)
@staticmethod
def close(spider, reason):
closed = getattr(spider, 'closed', None)
if callable(closed):
return closed(reason)
def __str__(self):
return "<%s %r at 0x%0x>" % (type(self).__name__, self.name, id(self))
__repr__ = __str__
BaseSpider = create_deprecated_class('BaseSpider', Spider)
class ObsoleteClass(object):
def __init__(self, message):
self.message = message
def __getattr__(self, name):
raise AttributeError(self.message)
spiders = ObsoleteClass(
'"from scrapy.spider import spiders" no longer works - use '
'"from scrapy.spiderloader import SpiderLoader" and instantiate '
'it with your project settings"'
)
# Top-level imports
from scrapy.spiders.crawl import CrawlSpider, Rule
from scrapy.spiders.feed import XMLFeedSpider, CSVFeedSpider
from scrapy.spiders.sitemap import SitemapSpider
|
carlomt/dicom_tools | refs/heads/master | dicom_tools/pyqtgraph/graphicsItems/ErrorBarItem.py | 45 | from ..Qt import QtGui, QtCore
from .GraphicsObject import GraphicsObject
from .. import getConfigOption
from .. import functions as fn
__all__ = ['ErrorBarItem']
class ErrorBarItem(GraphicsObject):
def __init__(self, **opts):
"""
All keyword arguments are passed to setData().
"""
GraphicsObject.__init__(self)
self.opts = dict(
x=None,
y=None,
height=None,
width=None,
top=None,
bottom=None,
left=None,
right=None,
beam=None,
pen=None
)
self.setData(**opts)
def setData(self, **opts):
"""
Update the data in the item. All arguments are optional.
Valid keyword options are:
x, y, height, width, top, bottom, left, right, beam, pen
* x and y must be numpy arrays specifying the coordinates of data points.
* height, width, top, bottom, left, right, and beam may be numpy arrays,
single values, or None to disable. All values should be positive.
* top, bottom, left, and right specify the lengths of bars extending
in each direction.
* If height is specified, it overrides top and bottom.
* If width is specified, it overrides left and right.
* beam specifies the width of the beam at the end of each bar.
* pen may be any single argument accepted by pg.mkPen().
This method was added in version 0.9.9. For prior versions, use setOpts.
"""
self.opts.update(opts)
self.path = None
self.update()
self.prepareGeometryChange()
self.informViewBoundsChanged()
def setOpts(self, **opts):
# for backward compatibility
self.setData(**opts)
def drawPath(self):
p = QtGui.QPainterPath()
x, y = self.opts['x'], self.opts['y']
if x is None or y is None:
return
beam = self.opts['beam']
height, top, bottom = self.opts['height'], self.opts['top'], self.opts['bottom']
if height is not None or top is not None or bottom is not None:
## draw vertical error bars
if height is not None:
y1 = y - height/2.
y2 = y + height/2.
else:
if bottom is None:
y1 = y
else:
y1 = y - bottom
if top is None:
y2 = y
else:
y2 = y + top
for i in range(len(x)):
p.moveTo(x[i], y1[i])
p.lineTo(x[i], y2[i])
if beam is not None and beam > 0:
x1 = x - beam/2.
x2 = x + beam/2.
if height is not None or top is not None:
for i in range(len(x)):
p.moveTo(x1[i], y2[i])
p.lineTo(x2[i], y2[i])
if height is not None or bottom is not None:
for i in range(len(x)):
p.moveTo(x1[i], y1[i])
p.lineTo(x2[i], y1[i])
width, right, left = self.opts['width'], self.opts['right'], self.opts['left']
if width is not None or right is not None or left is not None:
## draw vertical error bars
if width is not None:
x1 = x - width/2.
x2 = x + width/2.
else:
if left is None:
x1 = x
else:
x1 = x - left
if right is None:
x2 = x
else:
x2 = x + right
for i in range(len(x)):
p.moveTo(x1[i], y[i])
p.lineTo(x2[i], y[i])
if beam is not None and beam > 0:
y1 = y - beam/2.
y2 = y + beam/2.
if width is not None or right is not None:
for i in range(len(x)):
p.moveTo(x2[i], y1[i])
p.lineTo(x2[i], y2[i])
if width is not None or left is not None:
for i in range(len(x)):
p.moveTo(x1[i], y1[i])
p.lineTo(x1[i], y2[i])
self.path = p
self.prepareGeometryChange()
def paint(self, p, *args):
if self.path is None:
self.drawPath()
pen = self.opts['pen']
if pen is None:
pen = getConfigOption('foreground')
p.setPen(fn.mkPen(pen))
p.drawPath(self.path)
def boundingRect(self):
if self.path is None:
self.drawPath()
return self.path.boundingRect()
|
iamkingmaker/zipline | refs/heads/master | tests/test_algorithm_gen.py | 18 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from nose.tools import (
timed,
nottest
)
from datetime import datetime
import pandas as pd
import pytz
from zipline.finance import trading
from zipline.algorithm import TradingAlgorithm
from zipline.finance import slippage
from zipline.utils import factory
from zipline.utils.factory import create_simulation_parameters
from zipline.utils.test_utils import (
setup_logger,
teardown_logger
)
from zipline.protocol import (
Event,
DATASOURCE_TYPE
)
DEFAULT_TIMEOUT = 15 # seconds
EXTENDED_TIMEOUT = 90
class RecordDateSlippage(slippage.FixedSlippage):
def __init__(self, spread):
super(RecordDateSlippage, self).__init__(spread=spread)
self.latest_date = None
def simulate(self, event, open_orders):
self.latest_date = event.dt
result = super(RecordDateSlippage, self).simulate(event, open_orders)
return result
class TestAlgo(TradingAlgorithm):
def __init__(self, asserter, *args, **kwargs):
super(TestAlgo, self).__init__(*args, **kwargs)
self.asserter = asserter
def initialize(self, window_length=100):
self.latest_date = None
self.set_slippage(RecordDateSlippage(spread=0.05))
self.stocks = [self.sid(8229)]
self.ordered = False
self.num_bars = 0
def handle_data(self, data):
self.num_bars += 1
self.latest_date = self.get_datetime()
if not self.ordered:
for stock in self.stocks:
self.order(stock, 100)
self.ordered = True
else:
self.asserter.assertGreaterEqual(
self.latest_date,
self.slippage.latest_date
)
class AlgorithmGeneratorTestCase(TestCase):
def setUp(self):
setup_logger(self)
def tearDown(self):
teardown_logger(self)
@nottest
def test_lse_algorithm(self):
lse = trading.TradingEnvironment(
bm_symbol='^FTSE',
exchange_tz='Europe/London'
)
with lse:
sim_params = factory.create_simulation_parameters(
start=datetime(2012, 5, 1, tzinfo=pytz.utc),
end=datetime(2012, 6, 30, tzinfo=pytz.utc)
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
200,
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
results = list(gen)
self.assertEqual(len(results), 42)
# May 7, 2012 was an LSE holiday, confirm the 4th trading
# day was May 8.
self.assertEqual(results[4]['daily_perf']['period_open'],
datetime(2012, 5, 8, 8, 31, tzinfo=pytz.utc))
@timed(DEFAULT_TIMEOUT)
def test_generator_dates(self):
"""
Ensure the pipeline of generators are in sync, at least as far as
their current dates.
"""
sim_params = factory.create_simulation_parameters(
start=datetime(2011, 7, 30, tzinfo=pytz.utc),
end=datetime(2012, 7, 30, tzinfo=pytz.utc)
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
self.assertTrue(list(gen))
self.assertTrue(algo.slippage.latest_date)
self.assertTrue(algo.latest_date)
@timed(DEFAULT_TIMEOUT)
def test_handle_data_on_market(self):
"""
Ensure that handle_data is only called on market minutes.
i.e. events that come in at midnight should be processed at market
open.
"""
from zipline.finance.trading import SimulationParameters
sim_params = SimulationParameters(
period_start=datetime(2012, 7, 30, tzinfo=pytz.utc),
period_end=datetime(2012, 7, 30, tzinfo=pytz.utc),
data_frequency='minute'
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
midnight_custom_source = [Event({
'custom_field': 42.0,
'sid': 'custom_data',
'source_id': 'TestMidnightSource',
'dt': pd.Timestamp('2012-07-30', tz='UTC'),
'type': DATASOURCE_TYPE.CUSTOM
})]
minute_event_source = [Event({
'volume': 100,
'price': 200.0,
'high': 210.0,
'open_price': 190.0,
'low': 180.0,
'sid': 8229,
'source_id': 'TestMinuteEventSource',
'dt': pd.Timestamp('2012-07-30 9:31 AM', tz='US/Eastern').
tz_convert('UTC'),
'type': DATASOURCE_TYPE.TRADE
})]
algo.set_sources([midnight_custom_source, minute_event_source])
gen = algo.get_generator()
# Consume the generator
list(gen)
# Though the events had different time stamps, handle data should
# have only been called once, at the market open.
self.assertEqual(algo.num_bars, 1)
@timed(DEFAULT_TIMEOUT)
def test_progress(self):
"""
Ensure the pipeline of generators are in sync, at least as far as
their current dates.
"""
sim_params = factory.create_simulation_parameters(
start=datetime(2008, 1, 1, tzinfo=pytz.utc),
end=datetime(2008, 1, 5, tzinfo=pytz.utc)
)
algo = TestAlgo(self, sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
results = list(gen)
self.assertEqual(results[-2]['progress'], 1.0)
def test_benchmark_times_match_market_close_for_minutely_data(self):
"""
Benchmark dates should be adjusted so that benchmark events are
emitted at the end of each trading day when working with minutely
data.
Verification relies on the fact that there are no trades so
algo.datetime should be equal to the last benchmark time.
See https://github.com/quantopian/zipline/issues/241
"""
sim_params = create_simulation_parameters(num_days=1,
data_frequency='minute')
algo = TestAlgo(self, sim_params=sim_params, identifiers=[8229])
algo.run(source=[], overwrite_sim_params=False)
self.assertEqual(algo.datetime, sim_params.last_close)
|
luzhijun/Optimization | refs/heads/gh-pages | cma-es/test_ud/worker_package/config.py | 2 |
# Your access_id and secret_key pair
ID = 'P4c9wtscfsH4rxeT'
KEY = 'i1D0CKk4kVXS0xI1YfN2fJzFVHdW8Y'
assert ID and KEY, 'You must supply your accsess_id and secret key.'
REGION = 'batchcompute.cn-shenzhen.aliyuncs.com'
OSS_HOST = 'oss-cn-shenzhen.aliyuncs.com'
#OSS_HOST='oss-cn-shenzhen-internal.aliyuncs.com'
OSS_BUCKET = 'vp02'
assert OSS_HOST and OSS_BUCKET, 'You also must supply a bucket \
created with the access_id above.'
IMAGE_ID = 'img-0000000055DBF0650000821C00002121'
assert IMAGE_ID, "You'd better specify a valid image id."
# COUNT_TASK_NUM is the total instance count
TASK_NUM = 1
INSTANCE_NUM = 3
CPU_NUM=16
SYNC=0
M=100
#NFS
LOCK=True
LOCALE='UTF-8'
LOCAL_DATA = '/home/admin/nfs/'
#PATH
PATH_TMPL = 'oss://%s/%s'
REQUEST_NAME='test/data/testString.txt'
RESPONSE_NAME='test/data/testString.txt'
OUTPUTLOG='test/data/testlog%s.txt'
TMPFILE='test/data/tmp.txt'
DATA_PATH = 'test/data/'
PACKAGE_PATH='test/package/udtest.tar.gz'
LOG_PATH=PATH_TMPL%(OSS_BUCKET,'test/logs')
FULL_PACKAGE = PATH_TMPL%(OSS_BUCKET, PACKAGE_PATH)
FULL_DATAPATH=PATH_TMPL%(OSS_BUCKET, DATA_PATH)
|
Cuuuurzel/KiPyCalc | refs/heads/master | sympy/functions/special/tests/test_bsplines.py | 27 | from sympy.functions import bspline_basis, bspline_basis_set
from sympy import Piecewise, Interval
from sympy import symbols, Rational
x, y = symbols('x,y')
def test_basic_degree_0():
d = 0
knots = range(5)
splines = bspline_basis_set(d, knots, x)
for i in range(len(splines)):
assert splines[i] == Piecewise((1, Interval(i, i + 1)
.contains(x)), (0, True))
def test_basic_degree_1():
d = 1
knots = range(5)
splines = bspline_basis_set(d, knots, x)
assert splines[0] == Piecewise(
(x, Interval(0, 1, False, True).contains(x)),
(2 - x, Interval(1, 2).contains(x)), (0, True))
assert splines[1] == Piecewise(
(-1 + x, Interval(1, 2, False, True).contains(x)),
(3 - x, Interval(2, 3).contains(x)), (0, True))
assert splines[2] == Piecewise(
(-2 + x, Interval(2, 3, False, True).contains(x)),
(4 - x, Interval(3, 4).contains(x)), (0, True))
def test_basic_degree_2():
d = 2
knots = range(5)
splines = bspline_basis_set(d, knots, x)
b0 = Piecewise((x**2/2, Interval(0, 1, False, True).contains(x)),
(Rational(
-3, 2) + 3*x - x**2, Interval(1, 2, False, True).contains(x)),
(Rational(9, 2) - 3*x + x**2/2, Interval(2, 3).contains(x)), (0, True))
b1 = Piecewise(
(Rational(1, 2) - x + x**2/2, Interval(1, 2, False, True).contains(x)),
(Rational(
-11, 2) + 5*x - x**2, Interval(2, 3, False, True).contains(x)),
(8 - 4*x + x**2/2, Interval(3, 4).contains(x)), (0, True))
assert splines[0] == b0
assert splines[1] == b1
def test_basic_degree_3():
d = 3
knots = range(5)
splines = bspline_basis_set(d, knots, x)
b0 = Piecewise(
(x**3/6, Interval(0, 1, False, True).contains(x)),
(Rational(2, 3) - 2*x + 2*x**2 - x**3/2, Interval(1, 2,
False, True).contains(x)),
(Rational(-22, 3) + 10*x - 4*x**2 + x**3/2, Interval(2, 3,
False, True).contains(x)),
(Rational(32, 3) - 8*x + 2*x**2 - x**3/6, Interval(3, 4).contains(x)),
(0, True)
)
assert splines[0] == b0
def test_repeated_degree_1():
d = 1
knots = [0, 0, 1, 2, 2, 3, 4, 4]
splines = bspline_basis_set(d, knots, x)
assert splines[0] == Piecewise((1 - x, Interval(0, 1).contains(x)),
(0, True))
assert splines[1] == Piecewise(
(x, Interval(0, 1, False, True).contains(x)),
(2 - x, Interval(1, 2).contains(x)), (0, True))
assert splines[2] == Piecewise((-1 + x, Interval(1, 2).contains(x)
), (0, True))
assert splines[3] == Piecewise((3 - x, Interval(2, 3).contains(x)),
(0, True))
assert splines[4] == Piecewise(
(-2 + x, Interval(2, 3, False, True).contains(x)),
(4 - x, Interval(3, 4).contains(x)), (0, True))
assert splines[5] == Piecewise((-3 + x, Interval(3, 4).contains(x)
), (0, True))
|
brchiu/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/in_topk_op_test.py | 13 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for PrecisionOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class InTopKTest(test.TestCase):
def _validateInTopK(self, predictions, target, k, expected):
np_ans = np.array(expected)
with self.cached_session():
precision = nn_ops.in_top_k(predictions, target, k)
out = self.evaluate(precision)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, precision)
def testInTop1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
target = [3, 1]
self._validateInTopK(predictions, target, 1, [True, False])
def testInTop2(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
target = [0, 2]
self._validateInTopK(predictions, target, 2, [False, True])
def testInTop2Tie(self):
# Class 2 and 3 tie for 2nd, so both are considered in top 2.
predictions = [[0.1, 0.3, 0.2, 0.2], [0.1, 0.3, 0.2, 0.2]]
target = [2, 3]
self._validateInTopK(predictions, target, 2, [True, True])
def testInTop2_int64Target(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
target = np.asarray([0, 2]).astype(np.int64)
self._validateInTopK(predictions, target, 2, [False, True])
def testInTopNan(self):
predictions = [[0.1, float("nan"), 0.2, 0.4], [0.1, 0.2, 0.3, float("inf")]]
target = [0, 2]
self._validateInTopK(predictions, target, 2, [False, False])
def testBadTarget(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
target = [0, 80000]
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"target.*out of range"):
nn_ops.in_top_k(predictions, target, 2).eval()
def testTensorK(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
target = [0, 2]
k = constant_op.constant(3)
np_ans = np.array([False, True])
with self.cached_session():
precision = nn_ops.in_top_k(predictions, target, k)
out = self.evaluate(precision)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, precision)
if __name__ == "__main__":
test.main()
|
langpavel/skolajs.cz | refs/heads/master | tools/closure_linter/closure_linter/tokenutil.py | 13 | #!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Token utility functions."""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)')
from closure_linter.common import tokens
from closure_linter import javascripttokens
import copy
# Shorthand
JavaScriptToken = javascripttokens.JavaScriptToken
Type = tokens.TokenType
def GetFirstTokenInSameLine(token):
"""Returns the first token in the same line as token.
Args:
token: Any token in the line.
Returns:
The first token in the same line as token.
"""
while not token.IsFirstInLine():
token = token.previous
return token
def CustomSearch(start_token, func, end_func=None, distance=None,
reverse=False):
"""Returns the first token where func is True within distance of this token.
Args:
start_token: The token to start searching from
func: The function to call to test a token for applicability
end_func: The function to call to test a token to determine whether to abort
the search.
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token matching func within distance of this token, or None if no
such token is found.
"""
token = start_token
if reverse:
while token and (distance is None or distance > 0):
previous = token.previous
if previous:
if func(previous):
return previous
if end_func and end_func(previous):
return None
token = previous
if distance is not None:
distance -= 1
else:
while token and (distance is None or distance > 0):
next = token.next
if next:
if func(next):
return next
if end_func and end_func(next):
return None
token = next
if distance is not None:
distance -= 1
return None
def Search(start_token, token_types, distance=None, reverse=False):
"""Returns the first token of type in token_types within distance.
Args:
start_token: The token to start searching from
token_types: The allowable types of the token being searched for
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token of any type in token_types within distance of this token, or
None if no such token is found.
"""
return CustomSearch(start_token, lambda token: token.IsAnyType(token_types),
None, distance, reverse)
def SearchExcept(start_token, token_types, distance=None, reverse=False):
"""Returns the first token not of any type in token_types within distance.
Args:
start_token: The token to start searching from
token_types: The unallowable types of the token being searched for
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token of any type in token_types within distance of this token, or
None if no such token is found.
"""
return CustomSearch(start_token,
lambda token: not token.IsAnyType(token_types),
None, distance, reverse)
def SearchUntil(start_token, token_types, end_types, distance=None,
reverse=False):
"""Returns the first token of type in token_types before a token of end_type.
Args:
start_token: The token to start searching from.
token_types: The allowable types of the token being searched for.
end_types: Types of tokens to abort search if we find.
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token of any type in token_types within distance of this token
before any tokens of type in end_type, or None if no such token is found.
"""
return CustomSearch(start_token, lambda token: token.IsAnyType(token_types),
lambda token: token.IsAnyType(end_types),
distance, reverse)
def DeleteToken(token):
"""Deletes the given token from the linked list.
Args:
token: The token to delete
"""
if token.previous:
token.previous.next = token.next
if token.next:
token.next.previous = token.previous
following_token = token.next
while following_token and following_token.metadata.last_code == token:
following_token.metadata.last_code = token.metadata.last_code
following_token = following_token.next
def DeleteTokens(token, tokenCount):
"""Deletes the given number of tokens starting with the given token.
Args:
token: The token to start deleting at.
tokenCount: The total number of tokens to delete.
"""
for i in xrange(1, tokenCount):
DeleteToken(token.next)
DeleteToken(token)
def InsertTokenAfter(new_token, token):
"""Insert new_token after token
Args:
new_token: A token to be added to the stream
token: A token already in the stream
"""
new_token.previous = token
new_token.next = token.next
new_token.metadata = copy.copy(token.metadata)
if token.IsCode():
new_token.metadata.last_code = token
if new_token.IsCode():
following_token = token.next
while following_token and following_token.metadata.last_code == token:
following_token.metadata.last_code = new_token
following_token = following_token.next
token.next = new_token
if new_token.next:
new_token.next.previous = new_token
if new_token.start_index is None:
if new_token.line_number == token.line_number:
new_token.start_index = token.start_index + len(token.string)
else:
new_token.start_index = 0
iterator = new_token.next
while iterator and iterator.line_number == new_token.line_number:
iterator.start_index += len(new_token.string)
iterator = iterator.next
def InsertSpaceTokenAfter(token):
"""Inserts a space token after the given token.
Args:
token: The token to insert a space token after
Returns:
A single space token"""
space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line,
token.line_number)
InsertTokenAfter(space_token, token)
def InsertLineAfter(token):
"""Inserts a blank line after the given token.
Args:
token: The token to insert a blank line after
Returns:
A single space token"""
blank_token = JavaScriptToken('', Type.BLANK_LINE, '',
token.line_number + 1)
InsertTokenAfter(blank_token, token)
# Update all subsequent ine numbers.
blank_token = blank_token.next
while blank_token:
blank_token.line_number += 1
blank_token = blank_token.next
def SplitToken(token, position):
"""Splits the token into two tokens at position.
Args:
token: The token to split
position: The position to split at. Will be the beginning of second token.
Returns:
The new second token.
"""
new_string = token.string[position:]
token.string = token.string[:position]
new_token = JavaScriptToken(new_string, token.type, token.line,
token.line_number)
InsertTokenAfter(new_token, token)
return new_token
def Compare(token1, token2):
"""Compares two tokens and determines their relative order.
Returns:
A negative integer, zero, or a positive integer as the first token is
before, equal, or after the second in the token stream.
"""
if token2.line_number != token1.line_number:
return token1.line_number - token2.line_number
else:
return token1.start_index - token2.start_index
|
UCRoboticsLab/BaxterTictactoe | refs/heads/master | src/baxter_interface/src/baxter_interface/digital_io.py | 3 | # Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import errno
import rospy
import baxter_dataflow
from baxter_core_msgs.msg import (
DigitalIOState,
DigitalOutputCommand,
)
class DigitalIO(object):
"""
Interface class for a simple Digital Input and/or Output on the
Baxter robot
Input
- read input state
Output
- turn output On/Off
- read current output state
"""
def __init__(self, component_id):
"""
Constructor.
@param component_id: unique id of the digital component
"""
self._id = component_id
self._component_type = 'digital_io'
self._is_output = False
self._state = None
self.state_changed = baxter_dataflow.Signal()
type_ns = '/robot/' + self._component_type
topic_base = type_ns + '/' + self._id
self._sub_state = rospy.Subscriber(
topic_base + '/state',
DigitalIOState,
self._on_io_state)
baxter_dataflow.wait_for(
lambda: self._state != None,
timeout=2.0,
timeout_msg="Failed to get current digital_io state from %s" \
% (topic_base,),
)
# check if output-capable before creating publisher
if self._is_output:
self._pub_output = rospy.Publisher(
type_ns + '/command',
DigitalOutputCommand,
queue_size=10)
def _on_io_state(self, msg):
"""
Updates the internally stored state of the Digital Input/Output.
"""
new_state = (msg.state == DigitalIOState.PRESSED)
if self._state is None:
self._is_output = not msg.isInputOnly
old_state = self._state
self._state = new_state
# trigger signal if changed
if old_state is not None and old_state != new_state:
self.state_changed(new_state)
@property
def is_output(self):
"""
Accessor to check if IO is capable of output.
"""
return self._is_output
@property
def state(self):
"""
Current state of the Digital Input/Output.
"""
return self._state
@state.setter
def state(self, value):
"""
Control the state of the Digital Output. (is_output must be True)
@type value: bool
@param value: new state to output {True, False}
"""
self.set_output(value)
def set_output(self, value, timeout=2.0):
"""
Control the state of the Digital Output.
Use this function for finer control over the wait_for timeout.
@type value: bool
@param value: new state {True, False} of the Output.
@type timeout: float
@param timeout: Seconds to wait for the io to reflect command.
If 0, just command once and return. [0]
"""
if not self._is_output:
raise IOError(errno.EACCES, "Component is not an output [%s: %s]" %
(self._component_type, self._id))
cmd = DigitalOutputCommand()
cmd.name = self._id
cmd.value = value
self._pub_output.publish(cmd)
if not timeout == 0:
baxter_dataflow.wait_for(
test=lambda: self.state == value,
timeout=timeout,
rate=100,
timeout_msg=("Failed to command digital io to: %r" % (value,)),
body=lambda: self._pub_output.publish(cmd)
)
|
florentx/OpenUpgrade | refs/heads/8.0 | addons/hr_timesheet_sheet/wizard/hr_timesheet_current.py | 381 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_timesheet_current_open(osv.osv_memory):
_name = 'hr.timesheet.current.open'
_description = 'hr.timesheet.current.open'
def open_timesheet(self, cr, uid, ids, context=None):
ts = self.pool.get('hr_timesheet_sheet.sheet')
if context is None:
context = {}
view_type = 'form,tree'
user_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id','=',uid)], context=context)
if not len(user_ids):
raise osv.except_osv(_('Error!'), _('Please create an employee and associate it with this user.'))
ids = ts.search(cr, uid, [('user_id','=',uid),('state','in',('draft','new')),('date_from','<=',time.strftime('%Y-%m-%d')), ('date_to','>=',time.strftime('%Y-%m-%d'))], context=context)
if len(ids) > 1:
view_type = 'tree,form'
domain = "[('id','in',["+','.join(map(str, ids))+"]),('user_id', '=', uid)]"
elif len(ids)==1:
domain = "[('user_id', '=', uid)]"
else:
domain = "[('user_id', '=', uid)]"
value = {
'domain': domain,
'name': _('Open Timesheet'),
'view_type': 'form',
'view_mode': view_type,
'res_model': 'hr_timesheet_sheet.sheet',
'view_id': False,
'type': 'ir.actions.act_window'
}
if len(ids) == 1:
value['res_id'] = ids[0]
return value
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
darkfeline/sphinxcontrib-napoleon | refs/heads/master | build/lib/sphinxcontrib/napoleon/pavertasks.py | 2 | # -*- coding: utf-8 -*-
# Copyright 2013 Rob Ruana
# Licensed under the BSD License, see LICENSE file for details.
"""Sphinx related paver tasks.
Methods
-------
apidoc
Derive reStructuredText API doc files from python source code.
This task is essentially a wrapper around the `sphinx-apidoc`_ script.
The following settings can be set on the options object:
* ``apidoc_excludes`` -- (*str or list of str*) A directory or list of
directories to exclude from doc generation. These should either be
absolute paths, or relative to `apidoc_moduledir`
* ``apidoc_moduledir`` -- (*str*) The root directory to search for Python
modules. Defaults to "."
* ``apidoc_outputdir`` -- (*str*) The output directory. Defaults to
`options.docroot/options.sourcedir`
* ``apidoc_overwrite`` -- (*bool*) True to overwrite existing files.
Defaults to True
.. _sphinx-apidoc: http://sphinx-doc.org/man/sphinx-apidoc.html
Example
-------
Creating API documentation is easy with a `pavement.py` file like this::
# pavement.py
from sphinxcontrib.napoleon.pavertasks import apidoc, html
from paver.easy import *
options(
sphinx=Bunch(
apidoc_excludes=['tests'],
apidoc_moduledir='sphinxcontrib/napoleon',
apidoc_outputdir='docs/source',
apidoc_overwrite=True,
builddir='build',
docroot='docs',
sourcedir='source',
),
)
And call::
$ paver apidoc
---> sphinxcontrib.napoleon.pavertasks.apidoc
sphinx-apidoc -f -o docs/source sphinxcontrib tests
Creating file docs/source/sphinxcontrib.rst.
Creating file docs/source/sphinxcontrib.napoleon.rst.
Creating file docs/source/modules.rst.
html
Build HTML documentation, including API documentation.
This task is a convenience method for calling `apidoc` followed by
`paver.docutils.html`. To use it, simply import it in your `pavement.py`
file::
from sphinxcontrib.napoleon.pavertasks import html
And call::
$ paver html
"""
import os
from paver.easy import BuildFailure, needs, task
try:
import sphinx
assert(sphinx)
has_sphinx = True
except ImportError:
has_sphinx = False
@task
def apidoc(options):
if not has_sphinx:
raise BuildFailure('Install sphinx to build html docs')
outputdir = options.get('apidoc_outputdir', '')
if not outputdir:
docroot = options.get('docroot', 'docs')
if not os.path.exists(docroot):
raise BuildFailure('Doc root dir (%s) does not exist' % docroot)
outputdir = os.path.join(docroot, options.get('sourcedir', ''))
if not os.path.exists(outputdir):
raise BuildFailure('Doc source dir (%s) does not exist' % outputdir)
moduledir = options.get('apidoc_moduledir', '.')
if not os.path.exists(moduledir):
raise BuildFailure('Module dir (%s) does not exist' % moduledir)
excludes = options.get('apidoc_excludes', [])
if isinstance(excludes, str):
excludes = [excludes]
if options.get('apidoc_overwrite', True):
args = ['sphinx-apidoc', '-f']
else:
args = ['sphinx-apidoc']
from sphinx.apidoc import main
args.extend(['-o', outputdir, moduledir] + excludes)
print((' '.join(args)))
main(args)
@task
@needs('sphinxcontrib.napoleon.pavertasks.apidoc', 'paver.doctools.html')
def html(options):
pass
|
googleapis/googleapis-gen | refs/heads/master | google/ads/googleads/v8/googleads-py/tests/unit/gapic/googleads.v8/services/test_campaign_service.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.ads.googleads.v8.common.types import bidding
from google.ads.googleads.v8.common.types import custom_parameter
from google.ads.googleads.v8.common.types import frequency_cap
from google.ads.googleads.v8.common.types import real_time_bidding_setting
from google.ads.googleads.v8.common.types import targeting_setting
from google.ads.googleads.v8.enums.types import ad_serving_optimization_status
from google.ads.googleads.v8.enums.types import advertising_channel_sub_type
from google.ads.googleads.v8.enums.types import advertising_channel_type
from google.ads.googleads.v8.enums.types import app_campaign_app_store
from google.ads.googleads.v8.enums.types import app_campaign_bidding_strategy_goal_type
from google.ads.googleads.v8.enums.types import asset_field_type
from google.ads.googleads.v8.enums.types import bidding_strategy_type
from google.ads.googleads.v8.enums.types import brand_safety_suitability
from google.ads.googleads.v8.enums.types import campaign_experiment_type
from google.ads.googleads.v8.enums.types import campaign_serving_status
from google.ads.googleads.v8.enums.types import campaign_status
from google.ads.googleads.v8.enums.types import frequency_cap_event_type
from google.ads.googleads.v8.enums.types import frequency_cap_level
from google.ads.googleads.v8.enums.types import frequency_cap_time_unit
from google.ads.googleads.v8.enums.types import location_source_type
from google.ads.googleads.v8.enums.types import negative_geo_target_type
from google.ads.googleads.v8.enums.types import optimization_goal_type
from google.ads.googleads.v8.enums.types import payment_mode
from google.ads.googleads.v8.enums.types import positive_geo_target_type
from google.ads.googleads.v8.enums.types import response_content_type
from google.ads.googleads.v8.enums.types import target_impression_share_location
from google.ads.googleads.v8.enums.types import targeting_dimension
from google.ads.googleads.v8.enums.types import vanity_pharma_display_url_mode
from google.ads.googleads.v8.enums.types import vanity_pharma_text
from google.ads.googleads.v8.resources.types import campaign
from google.ads.googleads.v8.services.services.campaign_service import CampaignServiceClient
from google.ads.googleads.v8.services.services.campaign_service import transports
from google.ads.googleads.v8.services.types import campaign_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert CampaignServiceClient._get_default_mtls_endpoint(None) is None
assert CampaignServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert CampaignServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert CampaignServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert CampaignServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert CampaignServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_campaign_service_client_from_service_account_info():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = CampaignServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_campaign_service_client_from_service_account_file():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = CampaignServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = CampaignServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_campaign_service_client_get_transport_class():
transport = CampaignServiceClient.get_transport_class()
assert transport == transports.CampaignServiceGrpcTransport
transport = CampaignServiceClient.get_transport_class("grpc")
assert transport == transports.CampaignServiceGrpcTransport
@mock.patch.object(CampaignServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CampaignServiceClient))
def test_campaign_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v8.services.services.campaign_service.CampaignServiceClient.get_transport_class') as gtc:
transport = transports.CampaignServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials()
)
client = CampaignServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v8.services.services.campaign_service.CampaignServiceClient.get_transport_class') as gtc:
client = CampaignServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v8.services.services.campaign_service.transports.CampaignServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CampaignServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v8.services.services.campaign_service.transports.CampaignServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CampaignServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v8.services.services.campaign_service.transports.CampaignServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CampaignServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = CampaignServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = CampaignServiceClient()
@mock.patch.object(CampaignServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CampaignServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_campaign_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v8.services.services.campaign_service.transports.CampaignServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = CampaignServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v8.services.services.campaign_service.transports.CampaignServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = CampaignServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v8.services.services.campaign_service.transports.CampaignServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = CampaignServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_campaign_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v8.services.services.campaign_service.transports.CampaignServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CampaignServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_campaign(transport: str = 'grpc', request_type=campaign_service.GetCampaignRequest):
client = CampaignServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_campaign),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = campaign.Campaign(
resource_name='resource_name_value',
id=205,
name='name_value',
status=campaign_status.CampaignStatusEnum.CampaignStatus.UNKNOWN,
serving_status=campaign_serving_status.CampaignServingStatusEnum.CampaignServingStatus.UNKNOWN,
ad_serving_optimization_status=ad_serving_optimization_status.AdServingOptimizationStatusEnum.AdServingOptimizationStatus.UNKNOWN,
advertising_channel_type=advertising_channel_type.AdvertisingChannelTypeEnum.AdvertisingChannelType.UNKNOWN,
advertising_channel_sub_type=advertising_channel_sub_type.AdvertisingChannelSubTypeEnum.AdvertisingChannelSubType.UNKNOWN,
tracking_url_template='tracking_url_template_value',
labels=['labels_value'],
experiment_type=campaign_experiment_type.CampaignExperimentTypeEnum.CampaignExperimentType.UNKNOWN,
base_campaign='base_campaign_value',
campaign_budget='campaign_budget_value',
bidding_strategy_type=bidding_strategy_type.BiddingStrategyTypeEnum.BiddingStrategyType.UNKNOWN,
accessible_bidding_strategy='accessible_bidding_strategy_value',
start_date='start_date_value',
end_date='end_date_value',
final_url_suffix='final_url_suffix_value',
video_brand_safety_suitability=brand_safety_suitability.BrandSafetySuitabilityEnum.BrandSafetySuitability.UNKNOWN,
payment_mode=payment_mode.PaymentModeEnum.PaymentMode.UNKNOWN,
optimization_score=0.1954,
excluded_parent_asset_field_types=[asset_field_type.AssetFieldTypeEnum.AssetFieldType.UNKNOWN],
bidding_strategy='bidding_strategy_value',
)
response = client.get_campaign(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == campaign_service.GetCampaignRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, campaign.Campaign)
assert response.resource_name == 'resource_name_value'
assert response.id == 205
assert response.name == 'name_value'
assert response.status == campaign_status.CampaignStatusEnum.CampaignStatus.UNKNOWN
assert response.serving_status == campaign_serving_status.CampaignServingStatusEnum.CampaignServingStatus.UNKNOWN
assert response.ad_serving_optimization_status == ad_serving_optimization_status.AdServingOptimizationStatusEnum.AdServingOptimizationStatus.UNKNOWN
assert response.advertising_channel_type == advertising_channel_type.AdvertisingChannelTypeEnum.AdvertisingChannelType.UNKNOWN
assert response.advertising_channel_sub_type == advertising_channel_sub_type.AdvertisingChannelSubTypeEnum.AdvertisingChannelSubType.UNKNOWN
assert response.tracking_url_template == 'tracking_url_template_value'
assert response.labels == ['labels_value']
assert response.experiment_type == campaign_experiment_type.CampaignExperimentTypeEnum.CampaignExperimentType.UNKNOWN
assert response.base_campaign == 'base_campaign_value'
assert response.campaign_budget == 'campaign_budget_value'
assert response.bidding_strategy_type == bidding_strategy_type.BiddingStrategyTypeEnum.BiddingStrategyType.UNKNOWN
assert response.accessible_bidding_strategy == 'accessible_bidding_strategy_value'
assert response.start_date == 'start_date_value'
assert response.end_date == 'end_date_value'
assert response.final_url_suffix == 'final_url_suffix_value'
assert response.video_brand_safety_suitability == brand_safety_suitability.BrandSafetySuitabilityEnum.BrandSafetySuitability.UNKNOWN
assert response.payment_mode == payment_mode.PaymentModeEnum.PaymentMode.UNKNOWN
assert math.isclose(response.optimization_score, 0.1954, rel_tol=1e-6)
assert response.excluded_parent_asset_field_types == [asset_field_type.AssetFieldTypeEnum.AssetFieldType.UNKNOWN]
def test_get_campaign_from_dict():
test_get_campaign(request_type=dict)
def test_get_campaign_field_headers():
client = CampaignServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = campaign_service.GetCampaignRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_campaign),
'__call__') as call:
call.return_value = campaign.Campaign()
client.get_campaign(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_campaign_flattened():
client = CampaignServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_campaign),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = campaign.Campaign()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_campaign(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_campaign_flattened_error():
client = CampaignServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_campaign(
campaign_service.GetCampaignRequest(),
resource_name='resource_name_value',
)
def test_mutate_campaigns(transport: str = 'grpc', request_type=campaign_service.MutateCampaignsRequest):
client = CampaignServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_campaigns),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = campaign_service.MutateCampaignsResponse(
)
response = client.mutate_campaigns(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == campaign_service.MutateCampaignsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, campaign_service.MutateCampaignsResponse)
def test_mutate_campaigns_from_dict():
test_mutate_campaigns(request_type=dict)
def test_mutate_campaigns_field_headers():
client = CampaignServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = campaign_service.MutateCampaignsRequest()
request.customer_id = 'customer_id/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_campaigns),
'__call__') as call:
call.return_value = campaign_service.MutateCampaignsResponse()
client.mutate_campaigns(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'customer_id=customer_id/value',
) in kw['metadata']
def test_mutate_campaigns_flattened():
client = CampaignServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_campaigns),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = campaign_service.MutateCampaignsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.mutate_campaigns(
customer_id='customer_id_value',
operations=[campaign_service.CampaignOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].customer_id == 'customer_id_value'
assert args[0].operations == [campaign_service.CampaignOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))]
def test_mutate_campaigns_flattened_error():
client = CampaignServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.mutate_campaigns(
campaign_service.MutateCampaignsRequest(),
customer_id='customer_id_value',
operations=[campaign_service.CampaignOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.CampaignServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CampaignServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.CampaignServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = CampaignServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.CampaignServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = CampaignServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.CampaignServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.CampaignServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_campaign_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v8.services.services.campaign_service.transports.CampaignServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.CampaignServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_campaign',
'mutate_campaigns',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_campaign_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default') as adc, mock.patch('google.ads.googleads.v8.services.services.campaign_service.transports.CampaignServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CampaignServiceTransport()
adc.assert_called_once()
def test_campaign_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
CampaignServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_campaign_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transports.CampaignServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_campaign_service_host_no_port():
client = CampaignServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_campaign_service_host_with_port():
client = CampaignServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_campaign_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.CampaignServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.CampaignServiceGrpcTransport])
def test_campaign_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.CampaignServiceGrpcTransport,])
def test_campaign_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_accessible_bidding_strategy_path():
customer_id = "squid"
bidding_strategy_id = "clam"
expected = "customers/{customer_id}/accessibleBiddingStrategies/{bidding_strategy_id}".format(customer_id=customer_id, bidding_strategy_id=bidding_strategy_id, )
actual = CampaignServiceClient.accessible_bidding_strategy_path(customer_id, bidding_strategy_id)
assert expected == actual
def test_parse_accessible_bidding_strategy_path():
expected = {
"customer_id": "whelk",
"bidding_strategy_id": "octopus",
}
path = CampaignServiceClient.accessible_bidding_strategy_path(**expected)
# Check that the path construction is reversible.
actual = CampaignServiceClient.parse_accessible_bidding_strategy_path(path)
assert expected == actual
def test_bidding_strategy_path():
customer_id = "oyster"
bidding_strategy_id = "nudibranch"
expected = "customers/{customer_id}/biddingStrategies/{bidding_strategy_id}".format(customer_id=customer_id, bidding_strategy_id=bidding_strategy_id, )
actual = CampaignServiceClient.bidding_strategy_path(customer_id, bidding_strategy_id)
assert expected == actual
def test_parse_bidding_strategy_path():
expected = {
"customer_id": "cuttlefish",
"bidding_strategy_id": "mussel",
}
path = CampaignServiceClient.bidding_strategy_path(**expected)
# Check that the path construction is reversible.
actual = CampaignServiceClient.parse_bidding_strategy_path(path)
assert expected == actual
def test_campaign_path():
customer_id = "winkle"
campaign_id = "nautilus"
expected = "customers/{customer_id}/campaigns/{campaign_id}".format(customer_id=customer_id, campaign_id=campaign_id, )
actual = CampaignServiceClient.campaign_path(customer_id, campaign_id)
assert expected == actual
def test_parse_campaign_path():
expected = {
"customer_id": "scallop",
"campaign_id": "abalone",
}
path = CampaignServiceClient.campaign_path(**expected)
# Check that the path construction is reversible.
actual = CampaignServiceClient.parse_campaign_path(path)
assert expected == actual
def test_campaign_budget_path():
customer_id = "squid"
campaign_budget_id = "clam"
expected = "customers/{customer_id}/campaignBudgets/{campaign_budget_id}".format(customer_id=customer_id, campaign_budget_id=campaign_budget_id, )
actual = CampaignServiceClient.campaign_budget_path(customer_id, campaign_budget_id)
assert expected == actual
def test_parse_campaign_budget_path():
expected = {
"customer_id": "whelk",
"campaign_budget_id": "octopus",
}
path = CampaignServiceClient.campaign_budget_path(**expected)
# Check that the path construction is reversible.
actual = CampaignServiceClient.parse_campaign_budget_path(path)
assert expected == actual
def test_campaign_label_path():
customer_id = "oyster"
campaign_id = "nudibranch"
label_id = "cuttlefish"
expected = "customers/{customer_id}/campaignLabels/{campaign_id}~{label_id}".format(customer_id=customer_id, campaign_id=campaign_id, label_id=label_id, )
actual = CampaignServiceClient.campaign_label_path(customer_id, campaign_id, label_id)
assert expected == actual
def test_parse_campaign_label_path():
expected = {
"customer_id": "mussel",
"campaign_id": "winkle",
"label_id": "nautilus",
}
path = CampaignServiceClient.campaign_label_path(**expected)
# Check that the path construction is reversible.
actual = CampaignServiceClient.parse_campaign_label_path(path)
assert expected == actual
def test_conversion_action_path():
customer_id = "scallop"
conversion_action_id = "abalone"
expected = "customers/{customer_id}/conversionActions/{conversion_action_id}".format(customer_id=customer_id, conversion_action_id=conversion_action_id, )
actual = CampaignServiceClient.conversion_action_path(customer_id, conversion_action_id)
assert expected == actual
def test_parse_conversion_action_path():
expected = {
"customer_id": "squid",
"conversion_action_id": "clam",
}
path = CampaignServiceClient.conversion_action_path(**expected)
# Check that the path construction is reversible.
actual = CampaignServiceClient.parse_conversion_action_path(path)
assert expected == actual
def test_feed_path():
customer_id = "whelk"
feed_id = "octopus"
expected = "customers/{customer_id}/feeds/{feed_id}".format(customer_id=customer_id, feed_id=feed_id, )
actual = CampaignServiceClient.feed_path(customer_id, feed_id)
assert expected == actual
def test_parse_feed_path():
expected = {
"customer_id": "oyster",
"feed_id": "nudibranch",
}
path = CampaignServiceClient.feed_path(**expected)
# Check that the path construction is reversible.
actual = CampaignServiceClient.parse_feed_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = CampaignServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = CampaignServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = CampaignServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder, )
actual = CampaignServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = CampaignServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = CampaignServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization, )
actual = CampaignServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = CampaignServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = CampaignServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project, )
actual = CampaignServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = CampaignServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = CampaignServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = CampaignServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = CampaignServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = CampaignServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.CampaignServiceTransport, '_prep_wrapped_messages') as prep:
client = CampaignServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.CampaignServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = CampaignServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
|
zeekay/decorum | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python packaging."""
import os
import sys
from setuptools import setup
#: Absolute path to directory containing setup.py file.
here = os.path.abspath(os.path.dirname(__file__))
#: Boolean, ``True`` if environment is running Python version 2.
IS_PYTHON2 = sys.version_info[0] == 2
# Data for use in setup.
NAME = 'Decorum'
DESCRIPTION = 'Tool for writing simple decorators.'
README = open(os.path.join(here, 'README.rst')).read()
AUTHOR = u'Zach Kelling'
EMAIL = '[email protected]'
LICENSE = 'MIT'
URL = 'https://pypi.python.org/pypi/{name}'.format(name=NAME)
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
]
KEYWORDS = [
'decorator',
'decorators',
]
PACKAGES = ['decorum']
REQUIREMENTS = []
SETUP_REQUIREMENTS = [
'setuptools',
]
ENTRY_POINTS = {}
if __name__ == '__main__': # Don't run setup() when we import this module.
setup(
author=AUTHOR,
author_email=EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
entry_points=ENTRY_POINTS,
include_package_data=True,
install_requires=REQUIREMENTS,
keywords=' '.join(KEYWORDS),
license=LICENSE,
long_description=README,
name=NAME,
packages=PACKAGES,
setup_requires=SETUP_REQUIREMENTS,
url=URL,
version='1.0.4.dev0',
zip_safe=False,
)
|
casacore/python-casacore | refs/heads/master | casacore/images/__init__.py | 1 | # __init__.py: Python image functions
# Copyright (C) 2008
# Associated Universities, Inc. Washington DC, USA.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
#
# Correspondence concerning AIPS++ should be addressed as follows:
# Internet email: [email protected].
# Postal address: AIPS++ Project Office
# National Radio Astronomy Observatory
# 520 Edgemont Road
# Charlottesville, VA 22903-2475 USA
#
# $Id$
"""Python interface to the Casacore images module.
A `casacore image <../../casacore/doc/html/group__Images__module.html>`_
represents an astronomical image of arbitrary dimensionality.
Several image formats are recognized:
`casacore paged image <../../casacore/doc/html/classcasa_1_1PagedImage.html>`_
is the native casacore image format stored in a casacore table.
`HDF5 <http://www.hdfgroup.org/HDF5>`_
is the HDF5 format often used in the earth science community.
`FITS <http://heasarc.gsfc.nasa.gov/docs/software/fitsio/fitsio.html>`_
is the well-known astronomical FITS format
`miriad <http://www.atnf.csiro.au/computing/software/miriad>`_
is the format used by the radio-astronomical MIRIAD package.
The following functionality exists:
- get and put data (slices)
- get or put a mask
- get meta data like coordinates and history
- get, put, or search optional image attributes (as used for LOFAR)
- get statistics
- form a subimage
- form an image expression which is treated as an ordinary image
- regrid the image
- write the image to a FITS file
"""
# Make image interface available.
from .image import image
|
KAMI911/loec | refs/heads/master | examples/Sharpen/binaries-windows-python26/ArgImagePlugin.py | 3 | #
# THIS IS WORK IN PROGRESS
#
# The Python Imaging Library.
# $Id: ArgImagePlugin.py 2309 2005-03-02 15:06:34Z fredrik $
#
# ARG animation support code
#
# history:
# 1996-12-30 fl Created
# 1996-01-06 fl Added safe scripting environment
# 1996-01-10 fl Added JHDR, UHDR and sYNC support
# 2005-03-02 fl Removed AAPP and ARUN support
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996-97.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.4"
import marshal, string
import Image, ImageFile, ImagePalette
from PngImagePlugin import i16, i32, ChunkStream, _MODES
MAGIC = "\212ARG\r\n\032\n"
# --------------------------------------------------------------------
# ARG parser
class ArgStream(ChunkStream):
"Parser callbacks for ARG data"
def __init__(self, fp):
ChunkStream.__init__(self, fp)
self.eof = 0
self.im = None
self.palette = None
self.__reset()
def __reset(self):
# reset decoder state (called on init and sync)
self.count = 0
self.id = None
self.action = ("NONE",)
self.images = {}
self.names = {}
def chunk_AHDR(self, offset, bytes):
"AHDR -- animation header"
# assertions
if self.count != 0:
raise SyntaxError, "misplaced AHDR chunk"
s = self.fp.read(bytes)
self.size = i32(s), i32(s[4:])
try:
self.mode, self.rawmode = _MODES[(ord(s[8]), ord(s[9]))]
except:
raise SyntaxError, "unknown ARG mode"
if Image.DEBUG:
print "AHDR size", self.size
print "AHDR mode", self.mode, self.rawmode
return s
def chunk_AFRM(self, offset, bytes):
"AFRM -- next frame follows"
# assertions
if self.count != 0:
raise SyntaxError, "misplaced AFRM chunk"
self.show = 1
self.id = 0
self.count = 1
self.repair = None
s = self.fp.read(bytes)
if len(s) >= 2:
self.id = i16(s)
if len(s) >= 4:
self.count = i16(s[2:4])
if len(s) >= 6:
self.repair = i16(s[4:6])
else:
self.repair = None
if Image.DEBUG:
print "AFRM", self.id, self.count
return s
def chunk_ADEF(self, offset, bytes):
"ADEF -- store image"
# assertions
if self.count != 0:
raise SyntaxError, "misplaced ADEF chunk"
self.show = 0
self.id = 0
self.count = 1
self.repair = None
s = self.fp.read(bytes)
if len(s) >= 2:
self.id = i16(s)
if len(s) >= 4:
self.count = i16(s[2:4])
if Image.DEBUG:
print "ADEF", self.id, self.count
return s
def chunk_NAME(self, offset, bytes):
"NAME -- name the current image"
# assertions
if self.count == 0:
raise SyntaxError, "misplaced NAME chunk"
name = self.fp.read(bytes)
self.names[self.id] = name
return name
def chunk_AEND(self, offset, bytes):
"AEND -- end of animation"
if Image.DEBUG:
print "AEND"
self.eof = 1
raise EOFError, "end of ARG file"
def __getmodesize(self, s, full=1):
size = i32(s), i32(s[4:])
try:
mode, rawmode = _MODES[(ord(s[8]), ord(s[9]))]
except:
raise SyntaxError, "unknown image mode"
if full:
if ord(s[12]):
pass # interlace not yet supported
if ord(s[11]):
raise SyntaxError, "unknown filter category"
return size, mode, rawmode
def chunk_PAST(self, offset, bytes):
"PAST -- paste one image into another"
# assertions
if self.count == 0:
raise SyntaxError, "misplaced PAST chunk"
if self.repair is not None:
# we must repair the target image before we
# start pasting
# brute force; a better solution would be to
# update only the dirty rectangles in images[id].
# note that if images[id] doesn't exist, it must
# be created
self.images[self.id] = self.images[self.repair].copy()
self.repair = None
s = self.fp.read(bytes)
im = self.images[i16(s)]
x, y = i32(s[2:6]), i32(s[6:10])
bbox = x, y, im.size[0]+x, im.size[1]+y
if im.mode in ["RGBA"]:
# paste with transparency
# FIXME: should handle P+transparency as well
self.images[self.id].paste(im, bbox, im)
else:
# paste without transparency
self.images[self.id].paste(im, bbox)
self.action = ("PAST",)
self.__store()
return s
def chunk_BLNK(self, offset, bytes):
"BLNK -- create blank image"
# assertions
if self.count == 0:
raise SyntaxError, "misplaced BLNK chunk"
s = self.fp.read(bytes)
size, mode, rawmode = self.__getmodesize(s, 0)
# store image (FIXME: handle colour)
self.action = ("BLNK",)
self.im = Image.core.fill(mode, size, 0)
self.__store()
return s
def chunk_IHDR(self, offset, bytes):
"IHDR -- full image follows"
# assertions
if self.count == 0:
raise SyntaxError, "misplaced IHDR chunk"
# image header
s = self.fp.read(bytes)
size, mode, rawmode = self.__getmodesize(s)
# decode and store image
self.action = ("IHDR",)
self.im = Image.core.new(mode, size)
self.decoder = Image.core.zip_decoder(rawmode)
self.decoder.setimage(self.im, (0,0) + size)
self.data = ""
return s
def chunk_DHDR(self, offset, bytes):
"DHDR -- delta image follows"
# assertions
if self.count == 0:
raise SyntaxError, "misplaced DHDR chunk"
s = self.fp.read(bytes)
size, mode, rawmode = self.__getmodesize(s)
# delta header
diff = ord(s[13])
offs = i32(s[14:18]), i32(s[18:22])
bbox = offs + (offs[0]+size[0], offs[1]+size[1])
if Image.DEBUG:
print "DHDR", diff, bbox
# FIXME: decode and apply image
self.action = ("DHDR", diff, bbox)
# setup decoder
self.im = Image.core.new(mode, size)
self.decoder = Image.core.zip_decoder(rawmode)
self.decoder.setimage(self.im, (0,0) + size)
self.data = ""
return s
def chunk_JHDR(self, offset, bytes):
"JHDR -- JPEG image follows"
# assertions
if self.count == 0:
raise SyntaxError, "misplaced JHDR chunk"
# image header
s = self.fp.read(bytes)
size, mode, rawmode = self.__getmodesize(s, 0)
# decode and store image
self.action = ("JHDR",)
self.im = Image.core.new(mode, size)
self.decoder = Image.core.jpeg_decoder(rawmode)
self.decoder.setimage(self.im, (0,0) + size)
self.data = ""
return s
def chunk_UHDR(self, offset, bytes):
"UHDR -- uncompressed image data follows (EXPERIMENTAL)"
# assertions
if self.count == 0:
raise SyntaxError, "misplaced UHDR chunk"
# image header
s = self.fp.read(bytes)
size, mode, rawmode = self.__getmodesize(s, 0)
# decode and store image
self.action = ("UHDR",)
self.im = Image.core.new(mode, size)
self.decoder = Image.core.raw_decoder(rawmode)
self.decoder.setimage(self.im, (0,0) + size)
self.data = ""
return s
def chunk_IDAT(self, offset, bytes):
"IDAT -- image data block"
# pass compressed chunks through the decoder
s = self.fp.read(bytes)
self.data = self.data + s
n, e = self.decoder.decode(self.data)
if n < 0:
# end of image
if e < 0:
raise IOError, "decoder error %d" % e
else:
self.data = self.data[n:]
return s
def chunk_DEND(self, offset, bytes):
return self.chunk_IEND(offset, bytes)
def chunk_JEND(self, offset, bytes):
return self.chunk_IEND(offset, bytes)
def chunk_UEND(self, offset, bytes):
return self.chunk_IEND(offset, bytes)
def chunk_IEND(self, offset, bytes):
"IEND -- end of image"
# we now have a new image. carry out the operation
# defined by the image header.
# won't need these anymore
del self.decoder
del self.data
self.__store()
return self.fp.read(bytes)
def __store(self):
# apply operation
cid = self.action[0]
if cid in ["BLNK", "IHDR", "JHDR", "UHDR"]:
# store
self.images[self.id] = self.im
elif cid == "DHDR":
# paste
cid, mode, bbox = self.action
im0 = self.images[self.id]
im1 = self.im
if mode == 0:
im1 = im1.chop_add_modulo(im0.crop(bbox))
im0.paste(im1, bbox)
self.count = self.count - 1
if self.count == 0 and self.show:
self.im = self.images[self.id]
raise EOFError # end of this frame
def chunk_PLTE(self, offset, bytes):
"PLTE -- palette data"
s = self.fp.read(bytes)
if self.mode == "P":
self.palette = ImagePalette.raw("RGB", s)
return s
def chunk_sYNC(self, offset, bytes):
"SYNC -- reset decoder"
if self.count != 0:
raise SyntaxError, "misplaced sYNC chunk"
s = self.fp.read(bytes)
self.__reset()
return s
# --------------------------------------------------------------------
# ARG reader
def _accept(prefix):
return prefix[:8] == MAGIC
##
# Image plugin for the experimental Animated Raster Graphics format.
class ArgImageFile(ImageFile.ImageFile):
format = "ARG"
format_description = "Animated raster graphics"
def _open(self):
if self.fp.read(8) != MAGIC:
raise SyntaxError, "not an ARG file"
self.arg = ArgStream(self.fp)
# read and process the first chunk (AHDR)
cid, offset, bytes = self.arg.read()
if cid != "AHDR":
raise SyntaxError, "expected an AHDR chunk"
s = self.arg.call(cid, offset, bytes)
self.arg.crc(cid, s)
# image characteristics
self.mode = self.arg.mode
self.size = self.arg.size
def load(self):
if self.arg.im is None:
self.seek(0)
# image data
self.im = self.arg.im
self.palette = self.arg.palette
# set things up for further processing
Image.Image.load(self)
def seek(self, frame):
if self.arg.eof:
raise EOFError, "end of animation"
self.fp = self.arg.fp
while 1:
#
# process chunks
cid, offset, bytes = self.arg.read()
if self.arg.eof:
raise EOFError, "end of animation"
try:
s = self.arg.call(cid, offset, bytes)
except EOFError:
break
except "glurk": # AttributeError
if Image.DEBUG:
print cid, bytes, "(unknown)"
s = self.fp.read(bytes)
self.arg.crc(cid, s)
self.fp.read(4) # ship extra CRC
def tell(self):
return 0
def verify(self):
"Verify ARG file"
# back up to first chunk
self.fp.seek(8)
self.arg.verify(self)
self.arg.close()
self.fp = None
#
# --------------------------------------------------------------------
Image.register_open("ARG", ArgImageFile, _accept)
Image.register_extension("ARG", ".arg")
Image.register_mime("ARG", "video/x-arg")
|
MOA-2011/enigma2 | refs/heads/master | lib/python/Tools/FuzzyDate.py | 17 | from time import localtime, time
def FuzzyTime(t, inPast = False):
d = localtime(t)
nt = time()
n = localtime()
dayOfWeek = (_("Mon"), _("Tue"), _("Wed"), _("Thu"), _("Fri"), _("Sat"), _("Sun"))
if d[:3] == n[:3]:
# same day
date = _("Today")
elif d[0] == n[0] and d[7] == n[7] - 1 and inPast:
# won't work on New Year's day
date = _("Yesterday")
elif ((t - nt) < 7*86400) and (nt < t) and not inPast:
# same week (must be future)
date = dayOfWeek[d[6]]
elif d[0] == n[0]:
# same year
if inPast:
# I want the day in the movielist
date = "%s %d.%d" % (dayOfWeek[d[6]], d[2], d[1])
else:
date = "%d.%d" % (d[2], d[1])
else:
date = "%d.%d.%d" % (d[2], d[1], d[0])
timeres = "%d:%02d" % (d[3], d[4])
return (date, timeres)
if __name__ == "__main__":
def _(x): return x
print "now: %s %s" % FuzzyTime(time())
for i in range(1, 14):
print "+%2s day(s): %s " % (i, FuzzyTime(time() + 86400 * i))
for i in range(1, 14):
print "-%2s day(s): %s " % (i, FuzzyTime(time() - 86400 * i, True))
|
unseenlaser/python-for-android | refs/heads/master | python-modules/twisted/twisted/internet/test/inlinecb_tests.py | 59 | # -*- test-case-name: twisted.internet.test.test_inlinecb -*-
# Copyright (c) 2009-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.defer.inlineCallbacks}.
These tests are defined in a non-C{test_*} module because they are
syntactically invalid on python < 2.5. test_inlinecb will conditionally import
these tests on python 2.5 and greater.
Some tests for inlineCallbacks are defined in L{twisted.test.test_defgen} as
well: see U{http://twistedmatrix.com/trac/ticket/4182}.
"""
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred, returnValue, inlineCallbacks
class NonLocalExitTests(TestCase):
"""
It's possible for L{returnValue} to be (accidentally) invoked at a stack
level below the L{inlineCallbacks}-decorated function which it is exiting.
If this happens, L{returnValue} should report useful errors.
If L{returnValue} is invoked from a function not decorated by
L{inlineCallbacks}, it will emit a warning if it causes an
L{inlineCallbacks} function further up the stack to exit.
"""
def mistakenMethod(self):
"""
This method mistakenly invokes L{returnValue}, despite the fact that it
is not decorated with L{inlineCallbacks}.
"""
returnValue(1)
def assertMistakenMethodWarning(self, resultList):
"""
Flush the current warnings and assert that we have been told that
C{mistakenMethod} was invoked, and that the result from the Deferred
that was fired (appended to the given list) is C{mistakenMethod}'s
result. The warning should indicate that an inlineCallbacks function
called 'inline' was made to exit.
"""
self.assertEqual(resultList, [1])
warnings = self.flushWarnings(offendingFunctions=[self.mistakenMethod])
self.assertEqual(len(warnings), 1)
self.assertEquals(warnings[0]['category'], DeprecationWarning)
self.assertEquals(
warnings[0]['message'],
"returnValue() in 'mistakenMethod' causing 'inline' to exit: "
"returnValue should only be invoked by functions decorated with "
"inlineCallbacks")
def test_returnValueNonLocalWarning(self):
"""
L{returnValue} will emit a non-local exit warning in the simplest case,
where the offending function is invoked immediately.
"""
@inlineCallbacks
def inline():
self.mistakenMethod()
returnValue(2)
yield 0
d = inline()
results = []
d.addCallback(results.append)
self.assertMistakenMethodWarning(results)
def test_returnValueNonLocalDeferred(self):
"""
L{returnValue} will emit a non-local warning in the case where the
L{inlineCallbacks}-decorated function has already yielded a Deferred
and therefore moved its generator function along.
"""
cause = Deferred()
@inlineCallbacks
def inline():
yield cause
self.mistakenMethod()
returnValue(2)
effect = inline()
results = []
effect.addCallback(results.append)
self.assertEquals(results, [])
cause.callback(1)
self.assertMistakenMethodWarning(results)
|
miipl-naveen/optibizz | refs/heads/master | addons/hr_contract/__init__.py | 381 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-Today OpenERP SA (<http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_contract
import base_action_rule
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
deadRaccoons/MameAirlines | refs/heads/master | tabo/cherrypy/cherrypy/process/win32.py | 68 | """Windows service. Requires pywin32."""
import os
import win32api
import win32con
import win32event
import win32service
import win32serviceutil
from cherrypy.process import wspbus, plugins
class ConsoleCtrlHandler(plugins.SimplePlugin):
"""A WSPBus plugin for handling Win32 console events (like Ctrl-C)."""
def __init__(self, bus):
self.is_set = False
plugins.SimplePlugin.__init__(self, bus)
def start(self):
if self.is_set:
self.bus.log('Handler for console events already set.', level=40)
return
result = win32api.SetConsoleCtrlHandler(self.handle, 1)
if result == 0:
self.bus.log('Could not SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Set handler for console events.', level=40)
self.is_set = True
def stop(self):
if not self.is_set:
self.bus.log('Handler for console events already off.', level=40)
return
try:
result = win32api.SetConsoleCtrlHandler(self.handle, 0)
except ValueError:
# "ValueError: The object has not been registered"
result = 1
if result == 0:
self.bus.log('Could not remove SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Removed handler for console events.', level=40)
self.is_set = False
def handle(self, event):
"""Handle console control events (like Ctrl-C)."""
if event in (win32con.CTRL_C_EVENT, win32con.CTRL_LOGOFF_EVENT,
win32con.CTRL_BREAK_EVENT, win32con.CTRL_SHUTDOWN_EVENT,
win32con.CTRL_CLOSE_EVENT):
self.bus.log('Console event %s: shutting down bus' % event)
# Remove self immediately so repeated Ctrl-C doesn't re-call it.
try:
self.stop()
except ValueError:
pass
self.bus.exit()
# 'First to return True stops the calls'
return 1
return 0
class Win32Bus(wspbus.Bus):
"""A Web Site Process Bus implementation for Win32.
Instead of time.sleep, this bus blocks using native win32event objects.
"""
def __init__(self):
self.events = {}
wspbus.Bus.__init__(self)
def _get_state_event(self, state):
"""Return a win32event for the given state (creating it if needed)."""
try:
return self.events[state]
except KeyError:
event = win32event.CreateEvent(None, 0, 0,
"WSPBus %s Event (pid=%r)" %
(state.name, os.getpid()))
self.events[state] = event
return event
def _get_state(self):
return self._state
def _set_state(self, value):
self._state = value
event = self._get_state_event(value)
win32event.PulseEvent(event)
state = property(_get_state, _set_state)
def wait(self, state, interval=0.1, channel=None):
"""Wait for the given state(s), KeyboardInterrupt or SystemExit.
Since this class uses native win32event objects, the interval
argument is ignored.
"""
if isinstance(state, (tuple, list)):
# Don't wait for an event that beat us to the punch ;)
if self.state not in state:
events = tuple([self._get_state_event(s) for s in state])
win32event.WaitForMultipleObjects(
events, 0, win32event.INFINITE)
else:
# Don't wait for an event that beat us to the punch ;)
if self.state != state:
event = self._get_state_event(state)
win32event.WaitForSingleObject(event, win32event.INFINITE)
class _ControlCodes(dict):
"""Control codes used to "signal" a service via ControlService.
User-defined control codes are in the range 128-255. We generally use
the standard Python value for the Linux signal and add 128. Example:
>>> signal.SIGUSR1
10
control_codes['graceful'] = 128 + 10
"""
def key_for(self, obj):
"""For the given value, return its corresponding key."""
for key, val in self.items():
if val is obj:
return key
raise ValueError("The given object could not be found: %r" % obj)
control_codes = _ControlCodes({'graceful': 138})
def signal_child(service, command):
if command == 'stop':
win32serviceutil.StopService(service)
elif command == 'restart':
win32serviceutil.RestartService(service)
else:
win32serviceutil.ControlService(service, control_codes[command])
class PyWebService(win32serviceutil.ServiceFramework):
"""Python Web Service."""
_svc_name_ = "Python Web Service"
_svc_display_name_ = "Python Web Service"
_svc_deps_ = None # sequence of service names on which this depends
_exe_name_ = "pywebsvc"
_exe_args_ = None # Default to no arguments
# Only exists on Windows 2000 or later, ignored on windows NT
_svc_description_ = "Python Web Service"
def SvcDoRun(self):
from cherrypy import process
process.bus.start()
process.bus.block()
def SvcStop(self):
from cherrypy import process
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
process.bus.exit()
def SvcOther(self, control):
process.bus.publish(control_codes.key_for(control))
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(PyWebService)
|
CloudServer/cinder | refs/heads/master | cinder/volume/drivers/hitachi/hbsd_iscsi.py | 16 | # Copyright (C) 2014, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
iSCSI Cinder volume driver for Hitachi storage.
"""
import os
import threading
from oslo_config import cfg
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _LE, _LI
from cinder import utils
import cinder.volume.driver
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
from cinder.volume.drivers.hitachi import hbsd_common as common
LOG = logging.getLogger(__name__)
CHAP_METHOD = ('None', 'CHAP None', 'CHAP')
volume_opts = [
cfg.BoolOpt('hitachi_add_chap_user',
default=False,
help='Add CHAP user'),
cfg.StrOpt('hitachi_auth_method',
default=None,
help='iSCSI authentication method'),
cfg.StrOpt('hitachi_auth_user',
default='%sCHAP-user' % basic_lib.NAME_PREFIX,
help='iSCSI authentication username'),
cfg.StrOpt('hitachi_auth_password',
default='%sCHAP-password' % basic_lib.NAME_PREFIX,
help='iSCSI authentication password', secret=True),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
VERSION = common.VERSION
def __init__(self, *args, **kwargs):
os.environ['LANG'] = 'C'
super(HBSDISCSIDriver, self).__init__(*args, **kwargs)
self.db = kwargs.get('db')
self.common = None
self.configuration.append_config_values(common.volume_opts)
self._stats = {}
self.context = None
self.do_setup_status = threading.Event()
def _check_param(self):
self.configuration.append_config_values(volume_opts)
if (self.configuration.hitachi_auth_method and
self.configuration.hitachi_auth_method not in CHAP_METHOD):
raise exception.HBSDError(
message=basic_lib.output_err(601, param='hitachi_auth_method'))
if self.configuration.hitachi_auth_method == 'None':
self.configuration.hitachi_auth_method = None
for opt in volume_opts:
getattr(self.configuration, opt.name)
def check_param(self):
try:
self.common.check_param()
self._check_param()
except exception.HBSDError:
raise
except Exception as ex:
raise exception.HBSDError(
message=basic_lib.output_err(601, param=six.text_type(ex)))
def output_param_to_log(self):
lock = basic_lib.get_process_lock(self.common.system_lock_file)
with lock:
self.common.output_param_to_log('iSCSI')
for opt in volume_opts:
if not opt.secret:
value = getattr(self.configuration, opt.name)
LOG.info(_LI('\t%(name)-35s : %(value)s'),
{'name': opt.name, 'value': value})
def _delete_lun_iscsi(self, hostgroups, ldev):
try:
self.common.command.comm_delete_lun_iscsi(hostgroups, ldev)
except exception.HBSDNotFound:
LOG.warning(basic_lib.set_msg(301, ldev=ldev))
def _add_target(self, hostgroups, ldev):
self.common.add_lun('autargetmap', hostgroups, ldev)
def _add_initiator(self, hgs, port, gid, host_iqn):
self.common.command.comm_add_initiator(port, gid, host_iqn)
hgs.append({'port': port, 'gid': int(gid), 'detected': True})
LOG.debug("Create iSCSI target for %s", hgs)
def _get_unused_gid_iscsi(self, port):
group_range = self.configuration.hitachi_group_range
if not group_range:
group_range = basic_lib.DEFAULT_GROUP_RANGE
return self.common.command.get_unused_gid_iscsi(group_range, port)
def _delete_iscsi_target(self, port, target_no, target_alias):
ret, _stdout, _stderr = self.common.command.delete_iscsi_target(
port, target_no, target_alias)
if ret:
LOG.warning(basic_lib.set_msg(
307, port=port, tno=target_no, alias=target_alias))
def _delete_chap_user(self, port):
ret, _stdout, _stderr = self.common.command.delete_chap_user(port)
if ret:
LOG.warning(basic_lib.set_msg(
303, user=self.configuration.hitachi_auth_user))
def _get_hostgroup_info_iscsi(self, hgs, host_iqn):
return self.common.command.comm_get_hostgroup_info_iscsi(
hgs, host_iqn, self.configuration.hitachi_target_ports)
def _discovery_iscsi_target(self, hostgroups):
for hostgroup in hostgroups:
ip_addr, ip_port = self.common.command.comm_get_iscsi_ip(
hostgroup['port'])
target_iqn = self.common.command.comm_get_target_iqn(
hostgroup['port'], hostgroup['gid'])
hostgroup['ip_addr'] = ip_addr
hostgroup['ip_port'] = ip_port
hostgroup['target_iqn'] = target_iqn
LOG.debug("ip_addr=%(addr)s ip_port=%(port)s target_iqn=%(iqn)s",
{'addr': ip_addr, 'port': ip_port, 'iqn': target_iqn})
def _fill_groups(self, hgs, ports, target_iqn, target_alias, add_iqn):
for port in ports:
added_hostgroup = False
added_user = False
LOG.debug('Create target (hgs: %(hgs)s port: %(port)s '
'target_iqn: %(tiqn)s target_alias: %(alias)s '
'add_iqn: %(aiqn)s)',
{'hgs': hgs, 'port': port, 'tiqn': target_iqn,
'alias': target_alias, 'aiqn': add_iqn})
gid = self.common.command.get_gid_from_targetiqn(
target_iqn, target_alias, port)
if gid is None:
for retry_cnt in basic_lib.DEFAULT_TRY_RANGE:
gid = None
try:
gid = self._get_unused_gid_iscsi(port)
self.common.command.comm_add_hostgrp_iscsi(
port, gid, target_alias, target_iqn)
added_hostgroup = True
except exception.HBSDNotFound:
LOG.warning(basic_lib.set_msg(312, resource='GID'))
continue
except Exception as ex:
LOG.warning(basic_lib.set_msg(
309, port=port, alias=target_alias,
reason=ex))
break
else:
LOG.debug('Completed to add target'
'(port: %(port)s gid: %(gid)d)',
{'port': port, 'gid': gid})
break
if gid is None:
LOG.error(_LE('Failed to add target(port: %s)'), port)
continue
try:
if added_hostgroup:
if self.configuration.hitachi_auth_method:
added_user = self.common.command.set_chap_authention(
port, gid)
self.common.command.comm_set_hostgrp_reportportal(
port, target_alias)
self._add_initiator(hgs, port, gid, add_iqn)
except Exception as ex:
LOG.warning(basic_lib.set_msg(
316, port=port, reason=ex))
if added_hostgroup:
if added_user:
self._delete_chap_user(port)
self._delete_iscsi_target(port, gid, target_alias)
def add_hostgroup_core(self, hgs, ports, target_iqn,
target_alias, add_iqn):
if ports:
self._fill_groups(hgs, ports, target_iqn, target_alias, add_iqn)
def add_hostgroup_master(self, hgs, master_iqn, host_ip, security_ports):
target_ports = self.configuration.hitachi_target_ports
group_request = self.configuration.hitachi_group_request
target_alias = '%s%s' % (basic_lib.NAME_PREFIX, host_ip)
if target_ports and group_request:
target_iqn = '%s.target' % master_iqn
diff_ports = []
for port in security_ports:
for hostgroup in hgs:
if hostgroup['port'] == port:
break
else:
diff_ports.append(port)
self.add_hostgroup_core(hgs, diff_ports, target_iqn,
target_alias, master_iqn)
if not hgs:
raise exception.HBSDError(message=basic_lib.output_err(649))
def add_hostgroup(self):
properties = utils.brick_get_connector_properties()
if 'initiator' not in properties:
raise exception.HBSDError(
message=basic_lib.output_err(650, resource='HBA'))
LOG.debug("initiator: %s", properties['initiator'])
hostgroups = []
security_ports = self._get_hostgroup_info_iscsi(
hostgroups, properties['initiator'])
self.add_hostgroup_master(hostgroups, properties['initiator'],
properties['ip'], security_ports)
def _get_properties(self, volume, hostgroups):
conf = self.configuration
properties = {}
self._discovery_iscsi_target(hostgroups)
hostgroup = hostgroups[0]
properties['target_discovered'] = True
properties['target_portal'] = "%s:%s" % (hostgroup['ip_addr'],
hostgroup['ip_port'])
properties['target_iqn'] = hostgroup['target_iqn']
properties['target_lun'] = hostgroup['lun']
if conf.hitachi_auth_method:
properties['auth_method'] = 'CHAP'
properties['auth_username'] = conf.hitachi_auth_user
properties['auth_password'] = conf.hitachi_auth_password
return properties
def do_setup(self, context):
self.context = context
self.common = common.HBSDCommon(self.configuration, self,
context, self.db)
self.check_param()
self.common.create_lock_file()
self.common.command.connect_storage()
lock = basic_lib.get_process_lock(self.common.service_lock_file)
with lock:
self.add_hostgroup()
self.output_param_to_log()
self.do_setup_status.set()
def check_for_setup_error(self):
pass
def extend_volume(self, volume, new_size):
self.do_setup_status.wait()
self.common.extend_volume(volume, new_size)
def get_volume_stats(self, refresh=False):
if refresh:
if self.do_setup_status.isSet():
self.common.output_backend_available_once()
_stats = self.common.update_volume_stats("iSCSI")
if _stats:
self._stats = _stats
return self._stats
def create_volume(self, volume):
self.do_setup_status.wait()
metadata = self.common.create_volume(volume)
return metadata
def delete_volume(self, volume):
self.do_setup_status.wait()
self.common.delete_volume(volume)
def create_snapshot(self, snapshot):
self.do_setup_status.wait()
metadata = self.common.create_snapshot(snapshot)
return metadata
def delete_snapshot(self, snapshot):
self.do_setup_status.wait()
self.common.delete_snapshot(snapshot)
def create_cloned_volume(self, volume, src_vref):
self.do_setup_status.wait()
metadata = self.common.create_cloned_volume(volume, src_vref)
return metadata
def create_volume_from_snapshot(self, volume, snapshot):
self.do_setup_status.wait()
metadata = self.common.create_volume_from_snapshot(volume, snapshot)
return metadata
def _initialize_connection(self, ldev, connector, src_hgs=None):
LOG.debug("Call _initialize_connection "
"(config_group: %(group)s ldev: %(ldev)d)",
{'group': self.configuration.config_group, 'ldev': ldev})
if src_hgs:
hostgroups = src_hgs[:]
else:
hostgroups = []
security_ports = self._get_hostgroup_info_iscsi(
hostgroups, connector['initiator'])
self.add_hostgroup_master(hostgroups, connector['initiator'],
connector['ip'], security_ports)
self._add_target(hostgroups, ldev)
return hostgroups
def initialize_connection(self, volume, connector):
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
raise exception.HBSDError(
message=basic_lib.output_err(619, volume_id=volume['id']))
self.common.add_volinfo(ldev, volume['id'])
with self.common.volume_info[ldev]['lock'],\
self.common.volume_info[ldev]['in_use']:
hostgroups = self._initialize_connection(ldev, connector)
protocol = 'iscsi'
properties = self._get_properties(volume, hostgroups)
LOG.debug('Initialize volume_info: %s',
self.common.volume_info)
LOG.debug('HFCDrv: properties=%s', properties)
return {
'driver_volume_type': protocol,
'data': properties
}
def _terminate_connection(self, ldev, connector, src_hgs):
LOG.debug("Call _terminate_connection(config_group: %s)",
self.configuration.config_group)
hostgroups = src_hgs[:]
self._delete_lun_iscsi(hostgroups, ldev)
LOG.debug("*** _terminate_ ***")
def terminate_connection(self, volume, connector, **kwargs):
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
LOG.warning(basic_lib.set_msg(302, volume_id=volume['id']))
return
if 'initiator' not in connector:
raise exception.HBSDError(
message=basic_lib.output_err(650, resource='HBA'))
hostgroups = []
self._get_hostgroup_info_iscsi(hostgroups,
connector['initiator'])
if not hostgroups:
raise exception.HBSDError(message=basic_lib.output_err(649))
self.common.add_volinfo(ldev, volume['id'])
with self.common.volume_info[ldev]['lock'],\
self.common.volume_info[ldev]['in_use']:
self._terminate_connection(ldev, connector, hostgroups)
def create_export(self, context, volume, connector):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def pair_initialize_connection(self, unused_ldev):
pass
def pair_terminate_connection(self, unused_ldev):
pass
def copy_volume_to_image(self, context, volume, image_service, image_meta):
self.do_setup_status.wait()
if volume['volume_attachment']:
desc = 'volume %s' % volume['id']
raise exception.HBSDError(
message=basic_lib.output_err(660, desc=desc))
super(HBSDISCSIDriver, self).copy_volume_to_image(context, volume,
image_service,
image_meta)
def manage_existing(self, volume, existing_ref):
return self.common.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
self.do_setup_status.wait()
return self.common.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
self.do_setup_status.wait()
self.common.unmanage(volume)
|
emanuelcovaci/TLT | refs/heads/master | blog/utility/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
ofir123/CouchPotatoServer | refs/heads/master | libs/suds/properties.py | 204 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Properties classes.
"""
from logging import getLogger
log = getLogger(__name__)
class AutoLinker(object):
"""
Base class, provides interface for I{automatic} link
management between a L{Properties} object and the L{Properties}
contained within I{values}.
"""
def updated(self, properties, prev, next):
"""
Notification that a values was updated and the linkage
between the I{properties} contained with I{prev} need to
be relinked to the L{Properties} contained within the
I{next} value.
"""
pass
class Link(object):
"""
Property link object.
@ivar endpoints: A tuple of the (2) endpoints of the link.
@type endpoints: tuple(2)
"""
def __init__(self, a, b):
"""
@param a: Property (A) to link.
@type a: L{Property}
@param b: Property (B) to link.
@type b: L{Property}
"""
pA = Endpoint(self, a)
pB = Endpoint(self, b)
self.endpoints = (pA, pB)
self.validate(a, b)
a.links.append(pB)
b.links.append(pA)
def validate(self, pA, pB):
"""
Validate that the two properties may be linked.
@param pA: Endpoint (A) to link.
@type pA: L{Endpoint}
@param pB: Endpoint (B) to link.
@type pB: L{Endpoint}
@return: self
@rtype: L{Link}
"""
if pA in pB.links or \
pB in pA.links:
raise Exception, 'Already linked'
dA = pA.domains()
dB = pB.domains()
for d in dA:
if d in dB:
raise Exception, 'Duplicate domain "%s" found' % d
for d in dB:
if d in dA:
raise Exception, 'Duplicate domain "%s" found' % d
kA = pA.keys()
kB = pB.keys()
for k in kA:
if k in kB:
raise Exception, 'Duplicate key %s found' % k
for k in kB:
if k in kA:
raise Exception, 'Duplicate key %s found' % k
return self
def teardown(self):
"""
Teardown the link.
Removes endpoints from properties I{links} collection.
@return: self
@rtype: L{Link}
"""
pA, pB = self.endpoints
if pA in pB.links:
pB.links.remove(pA)
if pB in pA.links:
pA.links.remove(pB)
return self
class Endpoint(object):
"""
Link endpoint (wrapper).
@ivar link: The associated link.
@type link: L{Link}
@ivar target: The properties object.
@type target: L{Property}
"""
def __init__(self, link, target):
self.link = link
self.target = target
def teardown(self):
return self.link.teardown()
def __eq__(self, rhs):
return ( self.target == rhs )
def __hash__(self):
return hash(self.target)
def __getattr__(self, name):
return getattr(self.target, name)
class Definition:
"""
Property definition.
@ivar name: The property name.
@type name: str
@ivar classes: The (class) list of permitted values
@type classes: tuple
@ivar default: The default value.
@ivar type: any
"""
def __init__(self, name, classes, default, linker=AutoLinker()):
"""
@param name: The property name.
@type name: str
@param classes: The (class) list of permitted values
@type classes: tuple
@param default: The default value.
@type default: any
"""
if not isinstance(classes, (list, tuple)):
classes = (classes,)
self.name = name
self.classes = classes
self.default = default
self.linker = linker
def nvl(self, value=None):
"""
Convert the I{value} into the default when I{None}.
@param value: The proposed value.
@type value: any
@return: The I{default} when I{value} is I{None}, else I{value}.
@rtype: any
"""
if value is None:
return self.default
else:
return value
def validate(self, value):
"""
Validate the I{value} is of the correct class.
@param value: The value to validate.
@type value: any
@raise AttributeError: When I{value} is invalid.
"""
if value is None:
return
if len(self.classes) and \
not isinstance(value, self.classes):
msg = '"%s" must be: %s' % (self.name, self.classes)
raise AttributeError,msg
def __repr__(self):
return '%s: %s' % (self.name, str(self))
def __str__(self):
s = []
if len(self.classes):
s.append('classes=%s' % str(self.classes))
else:
s.append('classes=*')
s.append("default=%s" % str(self.default))
return ', '.join(s)
class Properties:
"""
Represents basic application properties.
Provides basic type validation, default values and
link/synchronization behavior.
@ivar domain: The domain name.
@type domain: str
@ivar definitions: A table of property definitions.
@type definitions: {name: L{Definition}}
@ivar links: A list of linked property objects used to create
a network of properties.
@type links: [L{Property},..]
@ivar defined: A dict of property values.
@type defined: dict
"""
def __init__(self, domain, definitions, kwargs):
"""
@param domain: The property domain name.
@type domain: str
@param definitions: A table of property definitions.
@type definitions: {name: L{Definition}}
@param kwargs: A list of property name/values to set.
@type kwargs: dict
"""
self.definitions = {}
for d in definitions:
self.definitions[d.name] = d
self.domain = domain
self.links = []
self.defined = {}
self.modified = set()
self.prime()
self.update(kwargs)
def definition(self, name):
"""
Get the definition for the property I{name}.
@param name: The property I{name} to find the definition for.
@type name: str
@return: The property definition
@rtype: L{Definition}
@raise AttributeError: On not found.
"""
d = self.definitions.get(name)
if d is None:
raise AttributeError(name)
return d
def update(self, other):
"""
Update the property values as specified by keyword/value.
@param other: An object to update from.
@type other: (dict|L{Properties})
@return: self
@rtype: L{Properties}
"""
if isinstance(other, Properties):
other = other.defined
for n,v in other.items():
self.set(n, v)
return self
def notset(self, name):
"""
Get whether a property has never been set by I{name}.
@param name: A property name.
@type name: str
@return: True if never been set.
@rtype: bool
"""
self.provider(name).__notset(name)
def set(self, name, value):
"""
Set the I{value} of a property by I{name}.
The value is validated against the definition and set
to the default when I{value} is None.
@param name: The property name.
@type name: str
@param value: The new property value.
@type value: any
@return: self
@rtype: L{Properties}
"""
self.provider(name).__set(name, value)
return self
def unset(self, name):
"""
Unset a property by I{name}.
@param name: A property name.
@type name: str
@return: self
@rtype: L{Properties}
"""
self.provider(name).__set(name, None)
return self
def get(self, name, *df):
"""
Get the value of a property by I{name}.
@param name: The property name.
@type name: str
@param df: An optional value to be returned when the value
is not set
@type df: [1].
@return: The stored value, or I{df[0]} if not set.
@rtype: any
"""
return self.provider(name).__get(name, *df)
def link(self, other):
"""
Link (associate) this object with anI{other} properties object
to create a network of properties. Links are bidirectional.
@param other: The object to link.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
Link(self, other)
return self
def unlink(self, *others):
"""
Unlink (disassociate) the specified properties object.
@param others: The list object to unlink. Unspecified means unlink all.
@type others: [L{Properties},..]
@return: self
@rtype: L{Properties}
"""
if not len(others):
others = self.links[:]
for p in self.links[:]:
if p in others:
p.teardown()
return self
def provider(self, name, history=None):
"""
Find the provider of the property by I{name}.
@param name: The property name.
@type name: str
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: The provider when found. Otherwise, None (when nested)
and I{self} when not nested.
@rtype: L{Properties}
"""
if history is None:
history = []
history.append(self)
if name in self.definitions:
return self
for x in self.links:
if x in history:
continue
provider = x.provider(name, history)
if provider is not None:
return provider
history.remove(self)
if len(history):
return None
return self
def keys(self, history=None):
"""
Get the set of I{all} property names.
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: A set of property names.
@rtype: list
"""
if history is None:
history = []
history.append(self)
keys = set()
keys.update(self.definitions.keys())
for x in self.links:
if x in history:
continue
keys.update(x.keys(history))
history.remove(self)
return keys
def domains(self, history=None):
"""
Get the set of I{all} domain names.
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: A set of domain names.
@rtype: list
"""
if history is None:
history = []
history.append(self)
domains = set()
domains.add(self.domain)
for x in self.links:
if x in history:
continue
domains.update(x.domains(history))
history.remove(self)
return domains
def prime(self):
"""
Prime the stored values based on default values
found in property definitions.
@return: self
@rtype: L{Properties}
"""
for d in self.definitions.values():
self.defined[d.name] = d.default
return self
def __notset(self, name):
return not (name in self.modified)
def __set(self, name, value):
d = self.definition(name)
d.validate(value)
value = d.nvl(value)
prev = self.defined[name]
self.defined[name] = value
self.modified.add(name)
d.linker.updated(self, prev, value)
def __get(self, name, *df):
d = self.definition(name)
value = self.defined.get(name)
if value == d.default and len(df):
value = df[0]
return value
def str(self, history):
s = []
s.append('Definitions:')
for d in self.definitions.values():
s.append('\t%s' % repr(d))
s.append('Content:')
for d in self.defined.items():
s.append('\t%s' % str(d))
if self not in history:
history.append(self)
s.append('Linked:')
for x in self.links:
s.append(x.str(history))
history.remove(self)
return '\n'.join(s)
def __repr__(self):
return str(self)
def __str__(self):
return self.str([])
class Skin(object):
"""
The meta-programming I{skin} around the L{Properties} object.
@ivar __pts__: The wrapped object.
@type __pts__: L{Properties}.
"""
def __init__(self, domain, definitions, kwargs):
self.__pts__ = Properties(domain, definitions, kwargs)
def __setattr__(self, name, value):
builtin = name.startswith('__') and name.endswith('__')
if builtin:
self.__dict__[name] = value
return
self.__pts__.set(name, value)
def __getattr__(self, name):
return self.__pts__.get(name)
def __repr__(self):
return str(self)
def __str__(self):
return str(self.__pts__)
class Unskin(object):
def __new__(self, *args, **kwargs):
return args[0].__pts__
class Inspector:
"""
Wrapper inspector.
"""
def __init__(self, options):
self.properties = options.__pts__
def get(self, name, *df):
"""
Get the value of a property by I{name}.
@param name: The property name.
@type name: str
@param df: An optional value to be returned when the value
is not set
@type df: [1].
@return: The stored value, or I{df[0]} if not set.
@rtype: any
"""
return self.properties.get(name, *df)
def update(self, **kwargs):
"""
Update the property values as specified by keyword/value.
@param kwargs: A list of property name/values to set.
@type kwargs: dict
@return: self
@rtype: L{Properties}
"""
return self.properties.update(**kwargs)
def link(self, other):
"""
Link (associate) this object with anI{other} properties object
to create a network of properties. Links are bidirectional.
@param other: The object to link.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
p = other.__pts__
return self.properties.link(p)
def unlink(self, other):
"""
Unlink (disassociate) the specified properties object.
@param other: The object to unlink.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
p = other.__pts__
return self.properties.unlink(p)
|
henryiii/rootpy | refs/heads/master | rootpy/root2hdf5.py | 1 | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
"""
This module handles conversion of ROOT's TFile and
contained TTrees into HDF5 format with PyTables
"""
from __future__ import absolute_import
import os
import sys
import warnings
from pkg_resources import parse_version
import tables
TABLES_NEW_API = parse_version(tables.__version__) >= parse_version('3')
if TABLES_NEW_API:
tables_open = tables.open_file
else:
tables_open = tables.openFile
from root_numpy import tree2array, RootNumpyUnconvertibleWarning
from numpy.lib import recfunctions
from .io import root_open, TemporaryFile
from . import log; log = log[__name__]
from .extern.progressbar import ProgressBar, Bar, ETA, Percentage
from .extern.six import string_types
from .logger.utils import check_tty
from . import QROOT
__all__ = [
'tree2hdf5',
'root2hdf5',
]
def _drop_object_col(rec, warn=True):
# ignore columns of type `object` since PyTables does not support these
if rec.dtype.hasobject:
object_fields = []
fields = rec.dtype.fields
for name in rec.dtype.names:
if fields[name][0].kind == 'O':
object_fields.append(name)
if warn:
log.warning(
"ignoring unsupported object branch '{0}'".format(
name))
# NumPy 1.7.1: TypeError: Cannot change data-type for object array.
#return rec[non_object_fields]
if object_fields:
rec = recfunctions.rec_drop_fields(rec, object_fields)
return rec
def tree2hdf5(tree, hfile, group=None,
entries=-1, show_progress=False, **kwargs):
"""
Convert a TTree into a HDF5 table.
Parameters
----------
tree : ROOT.TTree
A ROOT TTree.
hfile : string or PyTables HDF5 File
A PyTables HDF5 File handle or string path to an existing HDF5 file.
group : string or PyTables Group instance, optional (default=None)
Write the table at this location in the HDF5 file.
entries : int, optional (default=-1)
The number of entries to read at once while converting a ROOT TTree
into an HDF5 table. By default read the entire TTree into memory (this
may not be desired if your TTrees are large).
show_progress : bool, optional (default=False)
If True, then display and update a progress bar on stdout as the TTree
is converted.
kwargs : dict, optional
Additional keyword arguments for the tree2array function.
"""
show_progress = show_progress and check_tty(sys.stdout)
if show_progress:
widgets = [Percentage(), ' ', Bar(), ' ', ETA()]
own_h5file = False
if isinstance(hfile, string_types):
hfile = tables_open(filename=hfile, mode="w", title="Data")
own_h5file = True
log.info("Converting tree '{0}' with {1:d} entries ...".format(
tree.GetName(),
tree.GetEntries()))
if not group:
group = hfile.root
elif isinstance(group, string_types):
group_where = '/' + os.path.dirname(group)
group_name = os.path.basename(group)
if TABLES_NEW_API:
group = hfile.create_group(group_where, group_name,
createparents=True)
else:
group = hfile.createGroup(group_where, group_name)
if tree.GetName() in group:
log.warning(
"Tree '{0}' already exists "
"in the output file".format(tree.GetName()))
return
total_entries = tree.GetEntries()
pbar = None
if show_progress and total_entries > 0:
pbar = ProgressBar(widgets=widgets, maxval=total_entries)
if entries <= 0:
# read the entire tree
if pbar is not None:
pbar.start()
array = tree2array(tree, **kwargs)
array = _drop_object_col(array)
if TABLES_NEW_API:
table = hfile.create_table(
group, tree.GetName(),
array, tree.GetTitle())
else:
table = hfile.createTable(
group, tree.GetName(),
array, tree.GetTitle())
# flush data in the table
table.flush()
# flush all pending data
hfile.flush()
else:
# read the tree in chunks
start = 0
while start < total_entries or start == 0:
if start > 0:
with warnings.catch_warnings():
warnings.simplefilter(
"ignore",
RootNumpyUnconvertibleWarning)
warnings.simplefilter(
"ignore",
tables.NaturalNameWarning)
array = tree2array(
tree,
start=start,
stop=start + entries,
**kwargs)
array = _drop_object_col(array, warn=False)
table.append(array)
else:
array = tree2array(
tree,
start=start,
stop=start + entries,
**kwargs)
array = _drop_object_col(array)
if pbar is not None:
# start after any output from root_numpy
pbar.start()
if TABLES_NEW_API:
table = hfile.create_table(
group, tree.GetName(),
array, tree.GetTitle())
else:
table = hfile.createTable(
group, tree.GetName(),
array, tree.GetTitle())
start += entries
if start <= total_entries and pbar is not None:
pbar.update(start)
# flush data in the table
table.flush()
# flush all pending data
hfile.flush()
if pbar is not None:
pbar.finish()
if own_h5file:
hfile.close()
def root2hdf5(rfile, hfile, rpath='',
entries=-1, userfunc=None,
show_progress=False,
ignore_exception=False,
**kwargs):
"""
Convert all trees in a ROOT file into tables in an HDF5 file.
Parameters
----------
rfile : string or asrootpy'd ROOT File
A ROOT File handle or string path to an existing ROOT file.
hfile : string or PyTables HDF5 File
A PyTables HDF5 File handle or string path to an existing HDF5 file.
rpath : string, optional (default='')
Top level path to begin traversal through the ROOT file. By default
convert everything in and below the root directory.
entries : int, optional (default=-1)
The number of entries to read at once while converting a ROOT TTree
into an HDF5 table. By default read the entire TTree into memory (this
may not be desired if your TTrees are large).
userfunc : callable, optional (default=None)
A function that will be called on every tree and that must return a
tree or list of trees that will be converted instead of the original
tree.
show_progress : bool, optional (default=False)
If True, then display and update a progress bar on stdout as each tree
is converted.
ignore_exception : bool, optional (default=False)
If True, then ignore exceptions raised in converting trees and instead
skip such trees.
kwargs : dict, optional
Additional keyword arguments for the tree2array function.
"""
own_rootfile = False
if isinstance(rfile, string_types):
rfile = root_open(rfile)
own_rootfile = True
own_h5file = False
if isinstance(hfile, string_types):
hfile = tables_open(filename=hfile, mode="w", title="Data")
own_h5file = True
for dirpath, dirnames, treenames in rfile.walk(
rpath, class_ref=QROOT.TTree):
# skip directories w/o trees
if not treenames:
continue
treenames.sort()
group_where = '/' + os.path.dirname(dirpath)
group_name = os.path.basename(dirpath)
if not group_name:
group = hfile.root
elif TABLES_NEW_API:
group = hfile.create_group(group_where, group_name,
createparents=True)
else:
group = hfile.createGroup(group_where, group_name)
ntrees = len(treenames)
log.info(
"Will convert {0:d} tree{1} in {2}".format(
ntrees, 's' if ntrees != 1 else '',
os.path.join(group_where, group_name)))
for treename in treenames:
input_tree = rfile.Get(os.path.join(dirpath, treename))
if userfunc is not None:
tmp_file = TemporaryFile()
# call user-defined function on tree and get output trees
log.info("Calling user function on tree '{0}'".format(
input_tree.GetName()))
trees = userfunc(input_tree)
if not isinstance(trees, list):
trees = [trees]
else:
trees = [input_tree]
tmp_file = None
for tree in trees:
try:
tree2hdf5(tree, hfile, group=group,
entries=entries,
show_progress=show_progress,
**kwargs)
except Exception as e:
if ignore_exception:
log.error("Failed to convert tree '{0}': {1}".format(
tree.GetName(), str(e)))
else:
raise
input_tree.Delete()
if userfunc is not None:
for tree in trees:
tree.Delete()
tmp_file.Close()
if own_h5file:
hfile.close()
if own_rootfile:
rfile.Close()
def main():
import rootpy
from rootpy.extern.argparse import (
ArgumentParser,
ArgumentDefaultsHelpFormatter, RawTextHelpFormatter)
class formatter_class(ArgumentDefaultsHelpFormatter,
RawTextHelpFormatter):
pass
parser = ArgumentParser(formatter_class=formatter_class,
description="Convert ROOT files containing TTrees into HDF5 files "
"containing HDF5 tables")
parser.add_argument('--version', action='version',
version=rootpy.__version__,
help="show the version number and exit")
parser.add_argument('-n', '--entries', type=int, default=100000,
help="number of entries to read at once")
parser.add_argument('-f', '--force', action='store_true', default=False,
help="overwrite existing output files")
parser.add_argument('-u', '--update', action='store_true', default=False,
help="update existing output files")
parser.add_argument('--ext', default='h5',
help="output file extension")
parser.add_argument('-c', '--complevel', type=int, default=5,
choices=range(0, 10),
help="compression level")
parser.add_argument('-l', '--complib', default='zlib',
choices=('zlib', 'lzo', 'bzip2', 'blosc'),
help="compression algorithm")
parser.add_argument('-s', '--selection', default=None,
help="apply a selection on each "
"tree with a cut expression")
parser.add_argument(
'--script', default=None,
help="Python script containing a function with the same name \n"
"that will be called on each tree and must return a tree or \n"
"list of trees that will be converted instead of the \n"
"original tree")
parser.add_argument('-q', '--quiet', action='store_true', default=False,
help="suppress all warnings")
parser.add_argument('-d', '--debug', action='store_true', default=False,
help="show stack trace in the event of "
"an uncaught exception")
parser.add_argument('--no-progress-bar', action='store_true', default=False,
help="do not show the progress bar")
parser.add_argument('--ignore-exception', action='store_true',
default=False,
help="ignore exceptions raised in converting trees "
"and instead skip such trees")
parser.add_argument('files', nargs='+')
args = parser.parse_args()
import logging
if hasattr(logging, 'captureWarnings'):
logging.captureWarnings(True)
def formatwarning(message, category, filename, lineno, line=None):
return "{0}: {1}".format(category.__name__, message)
warnings.formatwarning = formatwarning
args.ext = args.ext.strip('.')
if args.quiet:
warnings.simplefilter(
"ignore",
RootNumpyUnconvertibleWarning)
warnings.simplefilter(
"ignore",
tables.NaturalNameWarning)
userfunc = None
if args.script is not None:
# get user-defined function
try:
exec(compile(open(args.script).read(), args.script, 'exec'),
globals(), locals())
except IOError:
sys.exit('Could not open script {0}'.format(args.script))
funcname = os.path.splitext(os.path.basename(args.script))[0]
try:
userfunc = locals()[funcname]
except KeyError:
sys.exit(
"Could not find the function '{0}' in the script {1}".format(
funcname, args.script))
for inputname in args.files:
outputname = os.path.splitext(inputname)[0] + '.' + args.ext
output_exists = os.path.exists(outputname)
if output_exists and not (args.force or args.update):
sys.exit(
"Output {0} already exists. "
"Use the --force option to overwrite it".format(outputname))
try:
rootfile = root_open(inputname)
except IOError:
sys.exit("Could not open {0}".format(inputname))
try:
if args.complevel > 0:
filters = tables.Filters(complib=args.complib,
complevel=args.complevel)
else:
filters = None
hd5file = tables_open(filename=outputname,
mode='a' if args.update else 'w',
title='Data', filters=filters)
except IOError:
sys.exit("Could not create {0}".format(outputname))
try:
log.info("Converting {0} ...".format(inputname))
root2hdf5(rootfile, hd5file,
entries=args.entries,
userfunc=userfunc,
selection=args.selection,
show_progress=not args.no_progress_bar,
ignore_exception=args.ignore_exception)
log.info("{0} {1}".format(
"Updated" if output_exists and args.update else "Created",
outputname))
except KeyboardInterrupt:
log.info("Caught Ctrl-c ... cleaning up")
hd5file.close()
rootfile.Close()
if not output_exists:
log.info("Removing {0}".format(outputname))
os.unlink(outputname)
sys.exit(1)
except Exception as e:
if args.debug:
# If in debug mode show full stack trace
import traceback
traceback.print_exception(*sys.exc_info())
log.error(str(e))
sys.exit(1)
finally:
hd5file.close()
rootfile.Close()
|
MarcosCommunity/odoo | refs/heads/marcos-8.0 | addons/product/pricelist.py | 18 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from itertools import chain
import time
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.exceptions import except_orm
import openerp.addons.decimal_precision as dp
class price_type(osv.osv):
"""
The price type is used to points which field in the product form
is a price and in which currency is this price expressed.
When a field is a price, you can use it in pricelists to base
sale and purchase prices based on some fields of the product.
"""
def _price_field_get(self, cr, uid, context=None):
mf = self.pool.get('ir.model.fields')
ids = mf.search(cr, uid, [('model','in', (('product.product'),('product.template'))), ('ttype','=','float')], context=context)
res = []
for field in mf.browse(cr, uid, ids, context=context):
if not (field.name, field.field_description) in res:
res.append((field.name, field.field_description))
return res
def _get_field_currency(self, cr, uid, fname, ctx):
ids = self.search(cr, uid, [('field','=',fname)], context=ctx)
return self.browse(cr, uid, ids, context=ctx)[0].currency_id
def _get_currency(self, cr, uid, ctx):
comp = self.pool.get('res.users').browse(cr,uid,uid).company_id
if not comp:
comp_id = self.pool.get('res.company').search(cr, uid, [])[0]
comp = self.pool.get('res.company').browse(cr, uid, comp_id)
return comp.currency_id.id
_name = "product.price.type"
_description = "Price Type"
_columns = {
"name" : fields.char("Price Name", required=True, translate=True, help="Name of this kind of price."),
"active" : fields.boolean("Active"),
"field" : fields.selection(_price_field_get, "Product Field", size=32, required=True, help="Associated field in the product form."),
"currency_id" : fields.many2one('res.currency', "Currency", required=True, help="The currency the field is expressed in."),
}
_defaults = {
"active": lambda *args: True,
"currency_id": _get_currency
}
#----------------------------------------------------------
# Price lists
#----------------------------------------------------------
class product_pricelist_type(osv.osv):
_name = "product.pricelist.type"
_description = "Pricelist Type"
_columns = {
'name': fields.char('Name', required=True, translate=True),
'key': fields.char('Key', required=True, help="Used in the code to select specific prices based on the context. Keep unchanged."),
}
class product_pricelist(osv.osv):
def _pricelist_type_get(self, cr, uid, context=None):
pricelist_type_obj = self.pool.get('product.pricelist.type')
pricelist_type_ids = pricelist_type_obj.search(cr, uid, [], order='name')
pricelist_types = pricelist_type_obj.read(cr, uid, pricelist_type_ids, ['key','name'], context=context)
res = []
for type in pricelist_types:
res.append((type['key'],type['name']))
return res
_name = "product.pricelist"
_description = "Pricelist"
_order = 'name'
_columns = {
'name': fields.char('Pricelist Name', required=True, translate=True),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the pricelist without removing it."),
'type': fields.selection(_pricelist_type_get, 'Pricelist Type', required=True),
'version_id': fields.one2many('product.pricelist.version', 'pricelist_id', 'Pricelist Versions', copy=True),
'currency_id': fields.many2one('res.currency', 'Currency', required=True),
'company_id': fields.many2one('res.company', 'Company'),
}
def name_get(self, cr, uid, ids, context=None):
result= []
if not all(ids):
return result
for pl in self.browse(cr, uid, ids, context=context):
name = pl.name + ' ('+ pl.currency_id.name + ')'
result.append((pl.id,name))
return result
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if name and operator == '=' and not args:
# search on the name of the pricelist and its currency, opposite of name_get(),
# Used by the magic context filter in the product search view.
query_args = {'name': name, 'limit': limit, 'lang': (context or {}).get('lang') or 'en_US'}
query = """SELECT p.id
FROM ((
SELECT pr.id, pr.name
FROM product_pricelist pr JOIN
res_currency cur ON
(pr.currency_id = cur.id)
WHERE pr.name || ' (' || cur.name || ')' = %(name)s
)
UNION (
SELECT tr.res_id as id, tr.value as name
FROM ir_translation tr JOIN
product_pricelist pr ON (
pr.id = tr.res_id AND
tr.type = 'model' AND
tr.name = 'product.pricelist,name' AND
tr.lang = %(lang)s
) JOIN
res_currency cur ON
(pr.currency_id = cur.id)
WHERE tr.value || ' (' || cur.name || ')' = %(name)s
)
) p
ORDER BY p.name"""
if limit:
query += " LIMIT %(limit)s"
cr.execute(query, query_args)
ids = [r[0] for r in cr.fetchall()]
# regular search() to apply ACLs - may limit results below limit in some cases
ids = self.search(cr, uid, [('id', 'in', ids)], limit=limit, context=context)
if ids:
return self.name_get(cr, uid, ids, context)
return super(product_pricelist, self).name_search(
cr, uid, name, args, operator=operator, context=context, limit=limit)
def _get_currency(self, cr, uid, ctx):
comp = self.pool.get('res.users').browse(cr, uid, uid).company_id
if not comp:
comp_id = self.pool.get('res.company').search(cr, uid, [])[0]
comp = self.pool.get('res.company').browse(cr, uid, comp_id)
return comp.currency_id.id
_defaults = {
'active': lambda *a: 1,
"currency_id": _get_currency
}
def price_get_multi(self, cr, uid, ids, products_by_qty_by_partner, context=None):
return dict((key, dict((key, price[0]) for key, price in value.items())) for key, value in self.price_rule_get_multi(cr, uid, ids, products_by_qty_by_partner, context=context).items())
def price_rule_get_multi(self, cr, uid, ids, products_by_qty_by_partner, context=None):
"""multi products 'price_get'.
@param ids:
@param products_by_qty:
@param partner:
@param context: {
'date': Date of the pricelist (%Y-%m-%d),}
@return: a dict of dict with product_id as key and a dict 'price by pricelist' as value
"""
if not ids:
ids = self.pool.get('product.pricelist').search(cr, uid, [], context=context)
results = {}
for pricelist in self.browse(cr, uid, ids, context=context):
subres = self._price_rule_get_multi(cr, uid, pricelist, products_by_qty_by_partner, context=context)
for product_id,price in subres.items():
results.setdefault(product_id, {})
results[product_id][pricelist.id] = price
return results
def _price_get_multi(self, cr, uid, pricelist, products_by_qty_by_partner, context=None):
return dict((key, price[0]) for key, price in self._price_rule_get_multi(cr, uid, pricelist, products_by_qty_by_partner, context=context).items())
def _price_rule_get_multi(self, cr, uid, pricelist, products_by_qty_by_partner, context=None):
context = context or {}
date = context.get('date') or time.strftime('%Y-%m-%d')
date = date[0:10]
products = map(lambda x: x[0], products_by_qty_by_partner)
currency_obj = self.pool.get('res.currency')
product_obj = self.pool.get('product.template')
product_uom_obj = self.pool.get('product.uom')
price_type_obj = self.pool.get('product.price.type')
if not products:
return {}
version = False
for v in pricelist.version_id:
if ((v.date_start is False) or (v.date_start <= date)) and ((v.date_end is False) or (v.date_end >= date)):
version = v
break
if not version:
raise osv.except_osv(_('Warning!'), _("At least one pricelist has no active version !\nPlease create or activate one."))
categ_ids = {}
for p in products:
categ = p.categ_id
while categ:
categ_ids[categ.id] = True
categ = categ.parent_id
categ_ids = categ_ids.keys()
is_product_template = products[0]._name == "product.template"
if is_product_template:
prod_tmpl_ids = [tmpl.id for tmpl in products]
# all variants of all products
prod_ids = [p.id for p in
list(chain.from_iterable([t.product_variant_ids for t in products]))]
else:
prod_ids = [product.id for product in products]
prod_tmpl_ids = [product.product_tmpl_id.id for product in products]
# Load all rules
cr.execute(
'SELECT i.id '
'FROM product_pricelist_item AS i '
'WHERE (product_tmpl_id IS NULL OR product_tmpl_id = any(%s)) '
'AND (product_id IS NULL OR (product_id = any(%s))) '
'AND ((categ_id IS NULL) OR (categ_id = any(%s))) '
'AND (price_version_id = %s) '
'ORDER BY sequence, min_quantity desc',
(prod_tmpl_ids, prod_ids, categ_ids, version.id))
item_ids = [x[0] for x in cr.fetchall()]
items = self.pool.get('product.pricelist.item').browse(cr, uid, item_ids, context=context)
price_types = {}
results = {}
for product, qty, partner in products_by_qty_by_partner:
results[product.id] = 0.0
rule_id = False
price = False
# Final unit price is computed according to `qty` in the `qty_uom_id` UoM.
# An intermediary unit price may be computed according to a different UoM, in
# which case the price_uom_id contains that UoM.
# The final price will be converted to match `qty_uom_id`.
qty_uom_id = context.get('uom') or product.uom_id.id
price_uom_id = product.uom_id.id
qty_in_product_uom = qty
if qty_uom_id != product.uom_id.id:
try:
qty_in_product_uom = product_uom_obj._compute_qty(
cr, uid, context['uom'], qty, product.uom_id.id or product.uos_id.id)
except except_orm:
# Ignored - incompatible UoM in context, use default product UoM
pass
for rule in items:
if rule.min_quantity and qty_in_product_uom < rule.min_quantity:
continue
if is_product_template:
if rule.product_tmpl_id and product.id != rule.product_tmpl_id.id:
continue
if rule.product_id and not (product.product_variant_count == 1 and product.product_variant_ids[0].id == rule.product_id.id):
# product rule acceptable on template if has only one variant
continue
else:
if rule.product_tmpl_id and product.product_tmpl_id.id != rule.product_tmpl_id.id:
continue
if rule.product_id and product.id != rule.product_id.id:
continue
if rule.categ_id:
cat = product.categ_id
while cat:
if cat.id == rule.categ_id.id:
break
cat = cat.parent_id
if not cat:
continue
if rule.base == -1:
if rule.base_pricelist_id:
price_tmp = self._price_get_multi(cr, uid,
rule.base_pricelist_id, [(product,
qty, partner)], context=context)[product.id]
ptype_src = rule.base_pricelist_id.currency_id.id
price_uom_id = qty_uom_id
price = currency_obj.compute(cr, uid,
ptype_src, pricelist.currency_id.id,
price_tmp, round=False,
context=context)
elif rule.base == -2:
seller = False
for seller_id in product.seller_ids:
if (not partner) or (seller_id.name.id != partner):
continue
seller = seller_id
if not seller and product.seller_ids:
seller = product.seller_ids[0]
if seller:
qty_in_seller_uom = qty
seller_uom = seller.product_uom.id
if qty_uom_id != seller_uom:
qty_in_seller_uom = product_uom_obj._compute_qty(cr, uid, qty_uom_id, qty, to_uom_id=seller_uom)
price_uom_id = seller_uom
for line in seller.pricelist_ids:
if line.min_quantity <= qty_in_seller_uom:
price = line.price
else:
if rule.base not in price_types:
price_types[rule.base] = price_type_obj.browse(cr, uid, int(rule.base))
price_type = price_types[rule.base]
# price_get returns the price in the context UoM, i.e. qty_uom_id
price_uom_id = qty_uom_id
price = currency_obj.compute(
cr, uid,
price_type.currency_id.id, pricelist.currency_id.id,
product_obj._price_get(cr, uid, [product], price_type.field, context=context)[product.id],
round=False, context=context)
if price is not False:
price_limit = price
price = price * (1.0+(rule.price_discount or 0.0))
if rule.price_round:
price = tools.float_round(price, precision_rounding=rule.price_round)
convert_to_price_uom = (lambda price: product_uom_obj._compute_price(
cr, uid, product.uom_id.id,
price, price_uom_id))
if rule.price_surcharge:
price_surcharge = convert_to_price_uom(rule.price_surcharge)
price += price_surcharge
if rule.price_min_margin:
price_min_margin = convert_to_price_uom(rule.price_min_margin)
price = max(price, price_limit + price_min_margin)
if rule.price_max_margin:
price_max_margin = convert_to_price_uom(rule.price_max_margin)
price = min(price, price_limit + price_max_margin)
rule_id = rule.id
break
# Final price conversion to target UoM
price = product_uom_obj._compute_price(cr, uid, price_uom_id, price, qty_uom_id)
results[product.id] = (price, rule_id)
return results
def price_get(self, cr, uid, ids, prod_id, qty, partner=None, context=None):
return dict((key, price[0]) for key, price in self.price_rule_get(cr, uid, ids, prod_id, qty, partner=partner, context=context).items())
def price_rule_get(self, cr, uid, ids, prod_id, qty, partner=None, context=None):
product = self.pool.get('product.product').browse(cr, uid, prod_id, context=context)
res_multi = self.price_rule_get_multi(cr, uid, ids, products_by_qty_by_partner=[(product, qty, partner)], context=context)
res = res_multi[prod_id]
return res
class product_pricelist_version(osv.osv):
_name = "product.pricelist.version"
_description = "Pricelist Version"
_columns = {
'pricelist_id': fields.many2one('product.pricelist', 'Price List',
required=True, select=True, ondelete='cascade'),
'name': fields.char('Name', required=True, translate=True),
'active': fields.boolean('Active',
help="When a version is duplicated it is set to non active, so that the " \
"dates do not overlaps with original version. You should change the dates " \
"and reactivate the pricelist"),
'items_id': fields.one2many('product.pricelist.item',
'price_version_id', 'Price List Items', required=True, copy=True),
'date_start': fields.date('Start Date', help="First valid date for the version."),
'date_end': fields.date('End Date', help="Last valid date for the version."),
'company_id': fields.related('pricelist_id','company_id',type='many2one',
readonly=True, relation='res.company', string='Company', store=True)
}
_defaults = {
'active': lambda *a: 1,
}
def _check_date(self, cursor, user, ids, context=None):
for pricelist_version in self.browse(cursor, user, ids, context=context):
if not pricelist_version.active:
continue
where = []
if pricelist_version.date_start:
where.append("((date_end>='%s') or (date_end is null))" % (pricelist_version.date_start,))
if pricelist_version.date_end:
where.append("((date_start<='%s') or (date_start is null))" % (pricelist_version.date_end,))
cursor.execute('SELECT id ' \
'FROM product_pricelist_version ' \
'WHERE '+' and '.join(where) + (where and ' and ' or '')+
'pricelist_id = %s ' \
'AND active ' \
'AND id <> %s', (
pricelist_version.pricelist_id.id,
pricelist_version.id))
if cursor.fetchall():
return False
return True
_constraints = [
(_check_date, 'You cannot have 2 pricelist versions that overlap!',
['date_start', 'date_end'])
]
def copy(self, cr, uid, id, default=None, context=None):
# set active False to prevent overlapping active pricelist
# versions
if not default:
default = {}
default['active'] = False
return super(product_pricelist_version, self).copy(cr, uid, id, default, context=context)
class product_pricelist_item(osv.osv):
def _price_field_get(self, cr, uid, context=None):
pt = self.pool.get('product.price.type')
ids = pt.search(cr, uid, [], context=context)
result = []
for line in pt.browse(cr, uid, ids, context=context):
result.append((line.id, line.name))
result.append((-1, _('Other Pricelist')))
result.append((-2, _('Supplier Prices on the product form')))
return result
# Added default function to fetch the Price type Based on Pricelist type.
def _get_default_base(self, cr, uid, fields, context=None):
product_price_type_obj = self.pool.get('product.price.type')
if fields.get('type') == 'purchase':
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field', '=', 'standard_price')], context=context)
elif fields.get('type') == 'sale':
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','list_price')], context=context)
else:
return -1
if not product_price_type_ids:
return False
else:
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
return pricetype.id
_name = "product.pricelist.item"
_description = "Pricelist item"
_order = "sequence, min_quantity desc"
_defaults = {
'base': _get_default_base,
'min_quantity': lambda *a: 0,
'sequence': lambda *a: 5,
'price_discount': lambda *a: 0,
}
def _check_recursion(self, cr, uid, ids, context=None):
for obj_list in self.browse(cr, uid, ids, context=context):
if obj_list.base == -1:
main_pricelist = obj_list.price_version_id.pricelist_id.id
other_pricelist = obj_list.base_pricelist_id.id
if main_pricelist == other_pricelist:
return False
return True
def _check_margin(self, cr, uid, ids, context=None):
for item in self.browse(cr, uid, ids, context=context):
if item.price_max_margin and item.price_min_margin and (item.price_min_margin > item.price_max_margin):
return False
return True
_columns = {
'name': fields.char('Rule Name', help="Explicit rule name for this pricelist line."),
'price_version_id': fields.many2one('product.pricelist.version', 'Price List Version', required=True, select=True, ondelete='cascade'),
'product_tmpl_id': fields.many2one('product.template', 'Product Template', ondelete='cascade', help="Specify a template if this rule only applies to one product template. Keep empty otherwise."),
'product_id': fields.many2one('product.product', 'Product', ondelete='cascade', help="Specify a product if this rule only applies to one product. Keep empty otherwise."),
'categ_id': fields.many2one('product.category', 'Product Category', ondelete='cascade', help="Specify a product category if this rule only applies to products belonging to this category or its children categories. Keep empty otherwise."),
'min_quantity': fields.integer('Min. Quantity', required=True,
help="For the rule to apply, bought/sold quantity must be greater "
"than or equal to the minimum quantity specified in this field.\n"
"Expressed in the default UoM of the product."
),
'sequence': fields.integer('Sequence', required=True, help="Gives the order in which the pricelist items will be checked. The evaluation gives highest priority to lowest sequence and stops as soon as a matching item is found."),
'base': fields.selection(_price_field_get, 'Based on', required=True, size=-1, help="Base price for computation."),
'base_pricelist_id': fields.many2one('product.pricelist', 'Other Pricelist'),
'price_surcharge': fields.float('Price Surcharge',
digits_compute= dp.get_precision('Product Price'), help='Specify the fixed amount to add or substract(if negative) to the amount calculated with the discount.'),
'price_discount': fields.float('Price Discount', digits=(16,4)),
'price_round': fields.float('Price Rounding',
digits_compute= dp.get_precision('Product Price'),
help="Sets the price so that it is a multiple of this value.\n" \
"Rounding is applied after the discount and before the surcharge.\n" \
"To have prices that end in 9.99, set rounding 10, surcharge -0.01" \
),
'price_min_margin': fields.float('Min. Price Margin',
digits_compute= dp.get_precision('Product Price'), help='Specify the minimum amount of margin over the base price.'),
'price_max_margin': fields.float('Max. Price Margin',
digits_compute= dp.get_precision('Product Price'), help='Specify the maximum amount of margin over the base price.'),
'company_id': fields.related('price_version_id','company_id',type='many2one',
readonly=True, relation='res.company', string='Company', store=True)
}
_constraints = [
(_check_recursion, 'Error! You cannot assign the Main Pricelist as Other Pricelist in PriceList Item!', ['base_pricelist_id']),
(_check_margin, 'Error! The minimum margin should be lower than the maximum margin.', ['price_min_margin', 'price_max_margin'])
]
def product_id_change(self, cr, uid, ids, product_id, context=None):
if not product_id:
return {}
prod = self.pool.get('product.product').read(cr, uid, [product_id], ['code','name'])
if prod[0]['code']:
return {'value': {'name': prod[0]['code']}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
anryko/ansible | refs/heads/devel | lib/ansible/modules/cloud/google/gcp_compute_target_ssl_proxy.py | 13 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_target_ssl_proxy
description:
- Represents a TargetSslProxy resource, which is used by one or more global forwarding
rule to route incoming SSL requests to a backend service.
short_description: Creates a GCP TargetSslProxy
version_added: '2.6'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
description:
description:
- An optional description of this resource.
required: false
type: str
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
type: str
proxy_header:
description:
- Specifies the type of proxy header to append before sending data to the backend,
either NONE or PROXY_V1. The default is NONE.
- 'Some valid choices include: "NONE", "PROXY_V1"'
required: false
type: str
service:
description:
- A reference to the BackendService resource.
- 'This field represents a link to a BackendService resource in GCP. It can be
specified in two ways. First, you can place a dictionary with key ''selfLink''
and value of your resource''s selfLink Alternatively, you can add `register:
name-of-resource` to a gcp_compute_backend_service task and then set this service
field to "{{ name-of-resource }}"'
required: true
type: dict
ssl_certificates:
description:
- A list of SslCertificate resources that are used to authenticate connections
between users and the load balancer. Currently, exactly one SSL certificate
must be specified.
required: true
type: list
ssl_policy:
description:
- A reference to the SslPolicy resource that will be associated with the TargetSslProxy
resource. If not set, the TargetSslProxy resource will not have any SSL policy
configured.
- 'This field represents a link to a SslPolicy resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''selfLink'' and value
of your resource''s selfLink Alternatively, you can add `register: name-of-resource`
to a gcp_compute_ssl_policy task and then set this ssl_policy field to "{{ name-of-resource
}}"'
required: false
type: dict
version_added: '2.8'
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/targetSslProxies)'
- 'Setting Up SSL proxy for Google Cloud Load Balancing: U(https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/)'
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a instance group
gcp_compute_instance_group:
name: instancegroup-targetsslproxy
zone: us-central1-a
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: instancegroup
- name: create a health check
gcp_compute_health_check:
name: healthcheck-targetsslproxy
type: TCP
tcp_health_check:
port_name: service-health
request: ping
response: pong
healthy_threshold: 10
timeout_sec: 2
unhealthy_threshold: 5
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: healthcheck
- name: create a backend service
gcp_compute_backend_service:
name: backendservice-targetsslproxy
backends:
- group: "{{ instancegroup.selfLink }}"
health_checks:
- "{{ healthcheck.selfLink }}"
protocol: SSL
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: backendservice
- name: create a SSL certificate
gcp_compute_ssl_certificate:
name: sslcert-targetsslproxy
description: A certificate for testing. Do not use this certificate in production
certificate: |-
-----BEGIN CERTIFICATE-----
MIICqjCCAk+gAwIBAgIJAIuJ+0352Kq4MAoGCCqGSM49BAMCMIGwMQswCQYDVQQG
EwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjERMA8GA1UEBwwIS2lya2xhbmQxFTAT
BgNVBAoMDEdvb2dsZSwgSW5jLjEeMBwGA1UECwwVR29vZ2xlIENsb3VkIFBsYXRm
b3JtMR8wHQYDVQQDDBZ3d3cubXktc2VjdXJlLXNpdGUuY29tMSEwHwYJKoZIhvcN
AQkBFhJuZWxzb25hQGdvb2dsZS5jb20wHhcNMTcwNjI4MDQ1NjI2WhcNMjcwNjI2
MDQ1NjI2WjCBsDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24xETAP
BgNVBAcMCEtpcmtsYW5kMRUwEwYDVQQKDAxHb29nbGUsIEluYy4xHjAcBgNVBAsM
FUdvb2dsZSBDbG91ZCBQbGF0Zm9ybTEfMB0GA1UEAwwWd3d3Lm15LXNlY3VyZS1z
aXRlLmNvbTEhMB8GCSqGSIb3DQEJARYSbmVsc29uYUBnb29nbGUuY29tMFkwEwYH
KoZIzj0CAQYIKoZIzj0DAQcDQgAEHGzpcRJ4XzfBJCCPMQeXQpTXwlblimODQCuQ
4mzkzTv0dXyB750fOGN02HtkpBOZzzvUARTR10JQoSe2/5PIwaNQME4wHQYDVR0O
BBYEFKIQC3A2SDpxcdfn0YLKineDNq/BMB8GA1UdIwQYMBaAFKIQC3A2SDpxcdfn
0YLKineDNq/BMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwIDSQAwRgIhALs4vy+O
M3jcqgA4fSW/oKw6UJxp+M6a+nGMX+UJR3YgAiEAvvl39QRVAiv84hdoCuyON0lJ
zqGNhIPGq2ULqXKK8BY=
-----END CERTIFICATE-----
private_key: |-
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIObtRo8tkUqoMjeHhsOh2ouPpXCgBcP+EDxZCB/tws15oAoGCCqGSM49
AwEHoUQDQgAEHGzpcRJ4XzfBJCCPMQeXQpTXwlblimODQCuQ4mzkzTv0dXyB750f
OGN02HtkpBOZzzvUARTR10JQoSe2/5PIwQ==
-----END EC PRIVATE KEY-----
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: sslcert
- name: create a target SSL proxy
gcp_compute_target_ssl_proxy:
name: test_object
ssl_certificates:
- "{{ sslcert }}"
service: "{{ backendservice }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
proxyHeader:
description:
- Specifies the type of proxy header to append before sending data to the backend,
either NONE or PROXY_V1. The default is NONE.
returned: success
type: str
service:
description:
- A reference to the BackendService resource.
returned: success
type: dict
sslCertificates:
description:
- A list of SslCertificate resources that are used to authenticate connections between
users and the load balancer. Currently, exactly one SSL certificate must be specified.
returned: success
type: list
sslPolicy:
description:
- A reference to the SslPolicy resource that will be associated with the TargetSslProxy
resource. If not set, the TargetSslProxy resource will not have any SSL policy
configured.
returned: success
type: dict
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
name=dict(required=True, type='str'),
proxy_header=dict(type='str'),
service=dict(required=True, type='dict'),
ssl_certificates=dict(required=True, type='list', elements='dict'),
ssl_policy=dict(type='dict'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#targetSslProxy'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind, fetch)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind, fetch):
update_fields(module, resource_to_request(module), response_to_hash(module, fetch))
return fetch_resource(module, self_link(module), kind)
def update_fields(module, request, response):
if response.get('proxyHeader') != request.get('proxyHeader'):
proxy_header_update(module, request, response)
if response.get('service') != request.get('service'):
service_update(module, request, response)
if response.get('sslCertificates') != request.get('sslCertificates'):
ssl_certificates_update(module, request, response)
if response.get('sslPolicy') != request.get('sslPolicy'):
ssl_policy_update(module, request, response)
def proxy_header_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/global/targetSslProxies/{name}/setProxyHeader"]).format(**module.params),
{u'proxyHeader': module.params.get('proxy_header')},
)
def service_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/global/targetSslProxies/{name}/setBackendService"]).format(**module.params),
{u'service': replace_resource_dict(module.params.get(u'service', {}), 'selfLink')},
)
def ssl_certificates_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/global/targetSslProxies/{name}/setSslCertificates"]).format(**module.params),
{u'sslCertificates': replace_resource_dict(module.params.get('ssl_certificates', []), 'selfLink')},
)
def ssl_policy_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/global/targetSslProxies/{name}/setSslPolicy"]).format(**module.params),
{u'sslPolicy': replace_resource_dict(module.params.get(u'ssl_policy', {}), 'selfLink')},
)
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#targetSslProxy',
u'description': module.params.get('description'),
u'name': module.params.get('name'),
u'proxyHeader': module.params.get('proxy_header'),
u'service': replace_resource_dict(module.params.get(u'service', {}), 'selfLink'),
u'sslCertificates': replace_resource_dict(module.params.get('ssl_certificates', []), 'selfLink'),
u'sslPolicy': replace_resource_dict(module.params.get(u'ssl_policy', {}), 'selfLink'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/targetSslProxies/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/targetSslProxies".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': module.params.get('description'),
u'id': response.get(u'id'),
u'name': module.params.get('name'),
u'proxyHeader': response.get(u'proxyHeader'),
u'service': response.get(u'service'),
u'sslCertificates': response.get(u'sslCertificates'),
u'sslPolicy': response.get(u'sslPolicy'),
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#targetSslProxy')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
if __name__ == '__main__':
main()
|
BobBowles/django-diary | refs/heads/master | manage.py | 198 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
tboyce021/home-assistant | refs/heads/dev | tests/components/apple_tv/__init__.py | 9 | """Tests for Apple TV."""
import pytest
# Make asserts in the common module display differences
pytest.register_assert_rewrite("tests.components.apple_tv.common")
|
resmo/ansible | refs/heads/devel | lib/ansible/modules/windows/win_eventlog_entry.py | 38 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Andrew Saraceni <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_eventlog_entry
version_added: "2.4"
short_description: Write entries to Windows event logs
description:
- Write log entries to a given event log from a specified source.
options:
log:
description:
- Name of the event log to write an entry to.
type: str
required: yes
source:
description:
- Name of the log source to indicate where the entry is from.
type: str
required: yes
event_id:
description:
- The numeric event identifier for the entry.
- Value must be between 0 and 65535.
type: int
required: yes
message:
description:
- The message for the given log entry.
type: str
required: yes
entry_type:
description:
- Indicates the entry being written to the log is of a specific type.
type: str
choices: [ Error, FailureAudit, Information, SuccessAudit, Warning ]
category:
description:
- A numeric task category associated with the category message file for the log source.
type: int
raw_data:
description:
- Binary data associated with the log entry.
- Value must be a comma-separated array of 8-bit unsigned integers (0 to 255).
type: str
notes:
- This module will always report a change when writing an event entry.
seealso:
- module: win_eventlog
author:
- Andrew Saraceni (@andrewsaraceni)
'''
EXAMPLES = r'''
- name: Write an entry to a Windows event log
win_eventlog_entry:
log: MyNewLog
source: NewLogSource1
event_id: 1234
message: This is a test log entry.
- name: Write another entry to a different Windows event log
win_eventlog_entry:
log: AnotherLog
source: MyAppSource
event_id: 5000
message: An error has occurred.
entry_type: Error
category: 5
raw_data: 10,20
'''
RETURN = r'''
# Default return values
'''
|
samuell/luigi | refs/heads/master | luigi/target.py | 11 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The abstract :py:class:`Target` class.
It is a central concept of Luigi and represents the state of the workflow.
"""
import abc
import io
import os
import random
import tempfile
import logging
import warnings
from luigi import six
logger = logging.getLogger('luigi-interface')
@six.add_metaclass(abc.ABCMeta)
class Target(object):
"""
A Target is a resource generated by a :py:class:`~luigi.task.Task`.
For example, a Target might correspond to a file in HDFS or data in a database. The Target
interface defines one method that must be overridden: :py:meth:`exists`, which signifies if the
Target has been created or not.
Typically, a :py:class:`~luigi.task.Task` will define one or more Targets as output, and the Task
is considered complete if and only if each of its output Targets exist.
"""
@abc.abstractmethod
def exists(self):
"""
Returns ``True`` if the :py:class:`Target` exists and ``False`` otherwise.
"""
pass
class FileSystemException(Exception):
"""
Base class for generic file system exceptions.
"""
pass
class FileAlreadyExists(FileSystemException):
"""
Raised when a file system operation can't be performed because
a directory exists but is required to not exist.
"""
pass
class MissingParentDirectory(FileSystemException):
"""
Raised when a parent directory doesn't exist.
(Imagine mkdir without -p)
"""
pass
class NotADirectory(FileSystemException):
"""
Raised when a file system operation can't be performed because
an expected directory is actually a file.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class FileSystem(object):
"""
FileSystem abstraction used in conjunction with :py:class:`FileSystemTarget`.
Typically, a FileSystem is associated with instances of a :py:class:`FileSystemTarget`. The
instances of the py:class:`FileSystemTarget` will delegate methods such as
:py:meth:`FileSystemTarget.exists` and :py:meth:`FileSystemTarget.remove` to the FileSystem.
Methods of FileSystem raise :py:class:`FileSystemException` if there is a problem completing the
operation.
"""
@abc.abstractmethod
def exists(self, path):
"""
Return ``True`` if file or directory at ``path`` exist, ``False`` otherwise
:param str path: a path within the FileSystem to check for existence.
"""
pass
@abc.abstractmethod
def remove(self, path, recursive=True, skip_trash=True):
""" Remove file or directory at location ``path``
:param str path: a path within the FileSystem to remove.
:param bool recursive: if the path is a directory, recursively remove the directory and all
of its descendants. Defaults to ``True``.
"""
pass
def mkdir(self, path, parents=True, raise_if_exists=False):
"""
Create directory at location ``path``
Creates the directory at ``path`` and implicitly create parent
directories if they do not already exist.
:param str path: a path within the FileSystem to create as a directory.
:param bool parents: Create parent directories when necessary. When
parents=False and the parent directory doesn't
exist, raise luigi.target.MissingParentDirectory
:param bool raise_if_exists: raise luigi.target.FileAlreadyExists if
the folder already exists.
"""
raise NotImplementedError("mkdir() not implemented on {0}".format(self.__class__.__name__))
def isdir(self, path):
"""
Return ``True`` if the location at ``path`` is a directory. If not, return ``False``.
:param str path: a path within the FileSystem to check as a directory.
*Note*: This method is optional, not all FileSystem subclasses implements it.
"""
raise NotImplementedError("isdir() not implemented on {0}".format(self.__class__.__name__))
def listdir(self, path):
"""Return a list of files rooted in path.
This returns an iterable of the files rooted at ``path``. This is intended to be a
recursive listing.
:param str path: a path within the FileSystem to list.
*Note*: This method is optional, not all FileSystem subclasses implements it.
"""
raise NotImplementedError("listdir() not implemented on {0}".format(self.__class__.__name__))
def move(self, path, dest):
"""
Move a file, as one would expect.
"""
raise NotImplementedError("move() not implemented on {0}".format(self.__class__.__name__))
def rename_dont_move(self, path, dest):
"""
Potentially rename ``path`` to ``dest``, but don't move it into the
``dest`` folder (if it is a folder). This relates to :ref:`AtomicWrites`.
This method has a reasonable but not bullet proof default
implementation. It will just do ``move()`` if the file doesn't
``exists()`` already.
"""
warnings.warn("File system {} client doesn't support atomic mv.".format(self.__class__.__name__))
if self.exists(dest):
raise FileAlreadyExists()
self.move(path, dest)
def rename(self, *args, **kwargs):
"""
Alias for ``move()``
"""
self.move(*args, **kwargs)
def copy(self, path, dest):
"""
Copy a file or a directory with contents.
Currently, LocalFileSystem and MockFileSystem support only single file
copying but S3Client copies either a file or a directory as required.
"""
raise NotImplementedError("copy() not implemented on {0}".
format(self.__class__.__name__))
class FileSystemTarget(Target):
"""
Base class for FileSystem Targets like :class:`~luigi.file.LocalTarget` and :class:`~luigi.contrib.hdfs.HdfsTarget`.
A FileSystemTarget has an associated :py:class:`FileSystem` to which certain operations can be
delegated. By default, :py:meth:`exists` and :py:meth:`remove` are delegated to the
:py:class:`FileSystem`, which is determined by the :py:attr:`fs` property.
Methods of FileSystemTarget raise :py:class:`FileSystemException` if there is a problem
completing the operation.
"""
def __init__(self, path):
"""
Initializes a FileSystemTarget instance.
:param str path: the path associated with this FileSystemTarget.
"""
self.path = path
@abc.abstractproperty
def fs(self):
"""
The :py:class:`FileSystem` associated with this FileSystemTarget.
"""
raise NotImplementedError()
@abc.abstractmethod
def open(self, mode):
"""
Open the FileSystem target.
This method returns a file-like object which can either be read from or written to depending
on the specified mode.
:param str mode: the mode `r` opens the FileSystemTarget in read-only mode, whereas `w` will
open the FileSystemTarget in write mode. Subclasses can implement
additional options.
"""
pass
def exists(self):
"""
Returns ``True`` if the path for this FileSystemTarget exists; ``False`` otherwise.
This method is implemented by using :py:attr:`fs`.
"""
path = self.path
if '*' in path or '?' in path or '[' in path or '{' in path:
logger.warning("Using wildcards in path %s might lead to processing of an incomplete dataset; "
"override exists() to suppress the warning.", path)
return self.fs.exists(path)
def remove(self):
"""
Remove the resource at the path specified by this FileSystemTarget.
This method is implemented by using :py:attr:`fs`.
"""
self.fs.remove(self.path)
def temporary_path(self):
"""
A context manager that enables a reasonably short, general and
magic-less way to solve the :ref:`AtomicWrites`.
* On *entering*, it will create the parent directories so the
temporary_path is writeable right away.
This step uses :py:meth:`FileSystem.mkdir`.
* On *exiting*, it will move the temporary file if there was no exception thrown.
This step uses :py:meth:`FileSystem.rename_dont_move`
The file system operations will be carried out by calling them on :py:attr:`fs`.
The typical use case looks like this:
.. code:: python
class MyTask(luigi.Task):
def output(self):
return MyFileSystemTarget(...)
def run(self):
with self.output().temporary_path() as self.temp_output_path:
run_some_external_command(output_path=self.temp_output_path)
"""
class _Manager(object):
target = self
def __init__(self):
num = random.randrange(0, 1e10)
slashless_path = self.target.path.rstrip('/').rstrip("\\")
self._temp_path = '{}-luigi-tmp-{:010}{}'.format(
slashless_path,
num,
self.target._trailing_slash())
# TODO: os.path doesn't make sense here as it's os-dependent
tmp_dir = os.path.dirname(slashless_path)
if tmp_dir:
self.target.fs.mkdir(tmp_dir, parents=True, raise_if_exists=False)
def __enter__(self):
return self._temp_path
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
# There were no exceptions
self.target.fs.rename_dont_move(self._temp_path, self.target.path)
return False # False means we don't suppress the exception
return _Manager()
def _touchz(self):
with self.open('w'):
pass
def _trailing_slash(self):
# I suppose one day schema-like paths, like
# file:///path/blah.txt?params=etc can be parsed too
return self.path[-1] if self.path[-1] in r'\/' else ''
class AtomicLocalFile(io.BufferedWriter):
"""Abstract class to create a Target that creates
a temporary file in the local filesystem before
moving it to its final destination.
This class is just for the writing part of the Target. See
:class:`luigi.file.LocalTarget` for example
"""
def __init__(self, path):
self.__tmp_path = self.generate_tmp_path(path)
self.path = path
super(AtomicLocalFile, self).__init__(io.FileIO(self.__tmp_path, 'w'))
def close(self):
super(AtomicLocalFile, self).close()
self.move_to_final_destination()
def generate_tmp_path(self, path):
return os.path.join(tempfile.gettempdir(), 'luigi-s3-tmp-%09d' % random.randrange(0, 1e10))
def move_to_final_destination(self):
raise NotImplementedError()
def __del__(self):
if os.path.exists(self.tmp_path):
os.remove(self.tmp_path)
@property
def tmp_path(self):
return self.__tmp_path
def __exit__(self, exc_type, exc, traceback):
" Close/commit the file if there are no exception "
if exc_type:
return
return super(AtomicLocalFile, self).__exit__(exc_type, exc, traceback)
|
AkademieOlympia/sympy | refs/heads/master | sympy/tensor/indexed.py | 29 | """Module that defines indexed objects
The classes IndexedBase, Indexed and Idx would represent a matrix element
M[i, j] as in the following graph::
1) The Indexed class represents the entire indexed object.
|
___|___
' '
M[i, j]
/ \__\______
| |
| |
| 2) The Idx class represent indices and each Idx can
| optionally contain information about its range.
|
3) IndexedBase represents the `stem' of an indexed object, here `M'.
The stem used by itself is usually taken to represent the entire
array.
There can be any number of indices on an Indexed object. No
transformation properties are implemented in these Base objects, but
implicit contraction of repeated indices is supported.
Note that the support for complicated (i.e. non-atomic) integer
expressions as indices is limited. (This should be improved in
future releases.)
Examples
========
To express the above matrix element example you would write:
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols
>>> M = IndexedBase('M')
>>> i, j = symbols('i j', cls=Idx)
>>> M[i, j]
M[i, j]
Repeated indices in a product implies a summation, so to express a
matrix-vector product in terms of Indexed objects:
>>> x = IndexedBase('x')
>>> M[i, j]*x[j]
x[j]*M[i, j]
If the indexed objects will be converted to component based arrays, e.g.
with the code printers or the autowrap framework, you also need to provide
(symbolic or numerical) dimensions. This can be done by passing an
optional shape parameter to IndexedBase upon construction:
>>> dim1, dim2 = symbols('dim1 dim2', integer=True)
>>> A = IndexedBase('A', shape=(dim1, 2*dim1, dim2))
>>> A.shape
(dim1, 2*dim1, dim2)
>>> A[i, j, 3].shape
(dim1, 2*dim1, dim2)
If an IndexedBase object has no shape information, it is assumed that the
array is as large as the ranges of its indices:
>>> n, m = symbols('n m', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> M[i, j].shape
(m, n)
>>> M[i, j].ranges
[(0, m - 1), (0, n - 1)]
The above can be compared with the following:
>>> A[i, 2, j].shape
(dim1, 2*dim1, dim2)
>>> A[i, 2, j].ranges
[(0, m - 1), None, (0, n - 1)]
To analyze the structure of indexed expressions, you can use the methods
get_indices() and get_contraction_structure():
>>> from sympy.tensor import get_indices, get_contraction_structure
>>> get_indices(A[i, j, j])
(set([i]), {})
>>> get_contraction_structure(A[i, j, j])
{(j,): set([A[i, j, j]])}
See the appropriate docstrings for a detailed explanation of the output.
"""
# TODO: (some ideas for improvement)
#
# o test and guarantee numpy compatibility
# - implement full support for broadcasting
# - strided arrays
#
# o more functions to analyze indexed expressions
# - identify standard constructs, e.g matrix-vector product in a subexpression
#
# o functions to generate component based arrays (numpy and sympy.Matrix)
# - generate a single array directly from Indexed
# - convert simple sub-expressions
#
# o sophisticated indexing (possibly in subclasses to preserve simplicity)
# - Idx with range smaller than dimension of Indexed
# - Idx with stepsize != 1
# - Idx with step determined by function call
from __future__ import print_function, division
from sympy.core import Expr, Tuple, Symbol, sympify, S
from sympy.core.compatibility import is_sequence, string_types, NotIterable, range
class IndexException(Exception):
pass
class Indexed(Expr):
"""Represents a mathematical object with indices.
>>> from sympy.tensor import Indexed, IndexedBase, Idx
>>> from sympy import symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j)
A[i, j]
It is recommended that Indexed objects are created via IndexedBase:
>>> A = IndexedBase('A')
>>> Indexed('A', i, j) == A[i, j]
True
"""
is_commutative = True
def __new__(cls, base, *args):
from sympy.utilities.misc import filldedent
if not args:
raise IndexException("Indexed needs at least one index.")
if isinstance(base, (string_types, Symbol)):
base = IndexedBase(base)
elif not hasattr(base, '__getitem__') and not isinstance(base, IndexedBase):
raise TypeError(filldedent("""
Indexed expects string, Symbol or IndexedBase as base."""))
args = list(map(sympify, args))
return Expr.__new__(cls, base, *args)
@property
def base(self):
"""Returns the IndexedBase of the Indexed object.
Examples
========
>>> from sympy.tensor import Indexed, IndexedBase, Idx
>>> from sympy import symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j).base
A
>>> B = IndexedBase('B')
>>> B == B[i, j].base
True
"""
return self.args[0]
@property
def indices(self):
"""
Returns the indices of the Indexed object.
Examples
========
>>> from sympy.tensor import Indexed, Idx
>>> from sympy import symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j).indices
(i, j)
"""
return self.args[1:]
@property
def rank(self):
"""
Returns the rank of the Indexed object.
Examples
========
>>> from sympy.tensor import Indexed, Idx
>>> from sympy import symbols
>>> i, j, k, l, m = symbols('i:m', cls=Idx)
>>> Indexed('A', i, j).rank
2
>>> q = Indexed('A', i, j, k, l, m)
>>> q.rank
5
>>> q.rank == len(q.indices)
True
"""
return len(self.args) - 1
@property
def shape(self):
"""Returns a list with dimensions of each index.
Dimensions is a property of the array, not of the indices. Still, if
the IndexedBase does not define a shape attribute, it is assumed that
the ranges of the indices correspond to the shape of the array.
>>> from sympy.tensor.indexed import IndexedBase, Idx
>>> from sympy import symbols
>>> n, m = symbols('n m', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', m)
>>> A = IndexedBase('A', shape=(n, n))
>>> B = IndexedBase('B')
>>> A[i, j].shape
(n, n)
>>> B[i, j].shape
(m, m)
"""
from sympy.utilities.misc import filldedent
if self.base.shape:
return self.base.shape
try:
return Tuple(*[i.upper - i.lower + 1 for i in self.indices])
except AttributeError:
raise IndexException(filldedent("""
Range is not defined for all indices in: %s""" % self))
except TypeError:
raise IndexException(filldedent("""
Shape cannot be inferred from Idx with
undefined range: %s""" % self))
@property
def ranges(self):
"""Returns a list of tuples with lower and upper range of each index.
If an index does not define the data members upper and lower, the
corresponding slot in the list contains ``None`` instead of a tuple.
Examples
========
>>> from sympy import Indexed,Idx, symbols
>>> Indexed('A', Idx('i', 2), Idx('j', 4), Idx('k', 8)).ranges
[(0, 1), (0, 3), (0, 7)]
>>> Indexed('A', Idx('i', 3), Idx('j', 3), Idx('k', 3)).ranges
[(0, 2), (0, 2), (0, 2)]
>>> x, y, z = symbols('x y z', integer=True)
>>> Indexed('A', x, y, z).ranges
[None, None, None]
"""
ranges = []
for i in self.indices:
try:
ranges.append(Tuple(i.lower, i.upper))
except AttributeError:
ranges.append(None)
return ranges
def _sympystr(self, p):
indices = list(map(p.doprint, self.indices))
return "%s[%s]" % (p.doprint(self.base), ", ".join(indices))
class IndexedBase(Expr, NotIterable):
"""Represent the base or stem of an indexed object
The IndexedBase class represent an array that contains elements. The main purpose
of this class is to allow the convenient creation of objects of the Indexed
class. The __getitem__ method of IndexedBase returns an instance of
Indexed. Alone, without indices, the IndexedBase class can be used as a
notation for e.g. matrix equations, resembling what you could do with the
Symbol class. But, the IndexedBase class adds functionality that is not
available for Symbol instances:
- An IndexedBase object can optionally store shape information. This can
be used in to check array conformance and conditions for numpy
broadcasting. (TODO)
- An IndexedBase object implements syntactic sugar that allows easy symbolic
representation of array operations, using implicit summation of
repeated indices.
- The IndexedBase object symbolizes a mathematical structure equivalent
to arrays, and is recognized as such for code generation and automatic
compilation and wrapping.
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols
>>> A = IndexedBase('A'); A
A
>>> type(A)
<class 'sympy.tensor.indexed.IndexedBase'>
When an IndexedBase object receives indices, it returns an array with named
axes, represented by an Indexed object:
>>> i, j = symbols('i j', integer=True)
>>> A[i, j, 2]
A[i, j, 2]
>>> type(A[i, j, 2])
<class 'sympy.tensor.indexed.Indexed'>
The IndexedBase constructor takes an optional shape argument. If given,
it overrides any shape information in the indices. (But not the index
ranges!)
>>> m, n, o, p = symbols('m n o p', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> A[i, j].shape
(m, n)
>>> B = IndexedBase('B', shape=(o, p))
>>> B[i, j].shape
(o, p)
"""
is_commutative = True
def __new__(cls, label, shape=None, **kw_args):
if isinstance(label, string_types):
label = Symbol(label)
elif isinstance(label, Symbol):
pass
else:
raise TypeError("Base label should be a string or Symbol.")
obj = Expr.__new__(cls, label, **kw_args)
if is_sequence(shape):
obj._shape = Tuple(*shape)
else:
obj._shape = sympify(shape)
return obj
@property
def args(self):
"""Returns the arguments used to create this IndexedBase object.
Examples
========
>>> from sympy import IndexedBase
>>> from sympy.abc import x, y
>>> IndexedBase('A', shape=(x, y)).args
(A, (x, y))
"""
if self._shape:
return self._args + (self._shape,)
else:
return self._args
def _hashable_content(self):
return Expr._hashable_content(self) + (self._shape,)
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
if self.shape and len(self.shape) != len(indices):
raise IndexException("Rank mismatch.")
return Indexed(self, *indices, **kw_args)
else:
if self.shape and len(self.shape) != 1:
raise IndexException("Rank mismatch.")
return Indexed(self, indices, **kw_args)
@property
def shape(self):
"""Returns the shape of the IndexedBase object.
Examples
========
>>> from sympy import IndexedBase, Idx, Symbol
>>> from sympy.abc import x, y
>>> IndexedBase('A', shape=(x, y)).shape
(x, y)
Note: If the shape of the IndexedBase is specified, it will override
any shape information given by the indices.
>>> A = IndexedBase('A', shape=(x, y))
>>> B = IndexedBase('B')
>>> i = Idx('i', 2)
>>> j = Idx('j', 1)
>>> A[i, j].shape
(x, y)
>>> B[i, j].shape
(2, 1)
"""
return self._shape
@property
def label(self):
"""Returns the label of the IndexedBase object.
Examples
========
>>> from sympy import IndexedBase
>>> from sympy.abc import x, y
>>> IndexedBase('A', shape=(x, y)).label
A
"""
return self.args[0]
def _sympystr(self, p):
return p.doprint(self.label)
class Idx(Expr):
"""Represents an integer index as an Integer or integer expression.
There are a number of ways to create an Idx object. The constructor
takes two arguments:
``label``
An integer or a symbol that labels the index.
``range``
Optionally you can specify a range as either
- Symbol or integer: This is interpreted as a dimension. Lower and
upper bounds are set to 0 and range - 1, respectively.
- tuple: The two elements are interpreted as the lower and upper
bounds of the range, respectively.
Note: the Idx constructor is rather pedantic in that it only accepts
integer arguments. The only exception is that you can use oo and -oo to
specify an unbounded range. For all other cases, both label and bounds
must be declared as integers, e.g. if n is given as an argument then
n.is_integer must return True.
For convenience, if the label is given as a string it is automatically
converted to an integer symbol. (Note: this conversion is not done for
range or dimension arguments.)
Examples
========
>>> from sympy.tensor import Idx
>>> from sympy import symbols, oo
>>> n, i, L, U = symbols('n i L U', integer=True)
If a string is given for the label an integer Symbol is created and the
bounds are both None:
>>> idx = Idx('qwerty'); idx
qwerty
>>> idx.lower, idx.upper
(None, None)
Both upper and lower bounds can be specified:
>>> idx = Idx(i, (L, U)); idx
i
>>> idx.lower, idx.upper
(L, U)
When only a single bound is given it is interpreted as the dimension
and the lower bound defaults to 0:
>>> idx = Idx(i, n); idx.lower, idx.upper
(0, n - 1)
>>> idx = Idx(i, 4); idx.lower, idx.upper
(0, 3)
>>> idx = Idx(i, oo); idx.lower, idx.upper
(0, oo)
"""
is_integer = True
def __new__(cls, label, range=None, **kw_args):
from sympy.utilities.misc import filldedent
if isinstance(label, string_types):
label = Symbol(label, integer=True)
label, range = list(map(sympify, (label, range)))
if label.is_Number:
if not label.is_integer:
raise TypeError("Index is not an integer number.")
return label
if not label.is_integer:
raise TypeError("Idx object requires an integer label.")
elif is_sequence(range):
if len(range) != 2:
raise ValueError(filldedent("""
Idx range tuple must have length 2, but got %s""" % len(range)))
for bound in range:
if not (bound.is_integer or abs(bound) is S.Infinity):
raise TypeError("Idx object requires integer bounds.")
args = label, Tuple(*range)
elif isinstance(range, Expr):
if not (range.is_integer or range is S.Infinity):
raise TypeError("Idx object requires an integer dimension.")
args = label, Tuple(0, range - 1)
elif range:
raise TypeError(filldedent("""
The range must be an ordered iterable or
integer SymPy expression."""))
else:
args = label,
obj = Expr.__new__(cls, *args, **kw_args)
return obj
@property
def label(self):
"""Returns the label (Integer or integer expression) of the Idx object.
Examples
========
>>> from sympy import Idx, Symbol
>>> x = Symbol('x', integer=True)
>>> Idx(x).label
x
>>> j = Symbol('j', integer=True)
>>> Idx(j).label
j
>>> Idx(j + 1).label
j + 1
"""
return self.args[0]
@property
def lower(self):
"""Returns the lower bound of the Index.
Examples
========
>>> from sympy import Idx
>>> Idx('j', 2).lower
0
>>> Idx('j', 5).lower
0
>>> Idx('j').lower is None
True
"""
try:
return self.args[1][0]
except IndexError:
return
@property
def upper(self):
"""Returns the upper bound of the Index.
Examples
========
>>> from sympy import Idx
>>> Idx('j', 2).upper
1
>>> Idx('j', 5).upper
4
>>> Idx('j').upper is None
True
"""
try:
return self.args[1][1]
except IndexError:
return
def _sympystr(self, p):
return p.doprint(self.label)
|
rgommers/pywt | refs/heads/master | pywt/tests/test_deprecations.py | 3 | import warnings
import numpy as np
from numpy.testing import assert_warns, assert_array_equal
import pywt
def test_intwave_deprecation():
wavelet = pywt.Wavelet('db3')
assert_warns(DeprecationWarning, pywt.intwave, wavelet)
def test_centrfrq_deprecation():
wavelet = pywt.Wavelet('db3')
assert_warns(DeprecationWarning, pywt.centrfrq, wavelet)
def test_scal2frq_deprecation():
wavelet = pywt.Wavelet('db3')
assert_warns(DeprecationWarning, pywt.scal2frq, wavelet, 1)
def test_orthfilt_deprecation():
assert_warns(DeprecationWarning, pywt.orthfilt, range(6))
def test_integrate_wave_tuple():
sig = [0, 1, 2, 3]
xgrid = [0, 1, 2, 3]
assert_warns(DeprecationWarning, pywt.integrate_wavelet, (sig, xgrid))
old_modes = ['zpd',
'cpd',
'sym',
'ppd',
'sp1',
'per',
]
def test_MODES_from_object_deprecation():
for mode in old_modes:
assert_warns(DeprecationWarning, pywt.Modes.from_object, mode)
def test_MODES_attributes_deprecation():
def get_mode(Modes, name):
return getattr(Modes, name)
for mode in old_modes:
assert_warns(DeprecationWarning, get_mode, pywt.Modes, mode)
def test_MODES_deprecation_new():
def use_MODES_new():
return pywt.MODES.symmetric
assert_warns(DeprecationWarning, use_MODES_new)
def test_MODES_deprecation_old():
def use_MODES_old():
return pywt.MODES.sym
assert_warns(DeprecationWarning, use_MODES_old)
def test_MODES_deprecation_getattr():
def use_MODES_new():
return getattr(pywt.MODES, 'symmetric')
assert_warns(DeprecationWarning, use_MODES_new)
def test_mode_equivalence():
old_new = [('zpd', 'zero'),
('cpd', 'constant'),
('sym', 'symmetric'),
('ppd', 'periodic'),
('sp1', 'smooth'),
('per', 'periodization')]
x = np.arange(8.)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
for old, new in old_new:
assert_array_equal(pywt.dwt(x, 'db2', mode=old),
pywt.dwt(x, 'db2', mode=new))
|
surligas/gnuradio | refs/heads/master | gr-qtgui/examples/pyqt_freq_c.py | 38 | #!/usr/bin/env python
#
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import blocks
import sys
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore
import sip
except ImportError:
sys.stderr.write("Error: Program requires PyQt4 and gr-qtgui.\n")
sys.exit(1)
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from gnuradio import channels
except ImportError:
sys.stderr.write("Error: Program requires gr-channels.\n")
sys.exit(1)
class dialog_box(QtGui.QWidget):
def __init__(self, display, control):
QtGui.QWidget.__init__(self, None)
self.setWindowTitle('PyQt Test GUI')
self.boxlayout = QtGui.QBoxLayout(QtGui.QBoxLayout.LeftToRight, self)
self.boxlayout.addWidget(display, 1)
self.boxlayout.addWidget(control)
self.resize(800, 500)
class control_box(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle('Control Panel')
self.setToolTip('Control the signals')
QtGui.QToolTip.setFont(QtGui.QFont('OldEnglish', 10))
self.layout = QtGui.QFormLayout(self)
# Control the first signal
self.freq1Edit = QtGui.QLineEdit(self)
self.freq1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Frequency:", self.freq1Edit)
self.connect(self.freq1Edit, QtCore.SIGNAL("editingFinished()"),
self.freq1EditText)
self.amp1Edit = QtGui.QLineEdit(self)
self.amp1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Amplitude:", self.amp1Edit)
self.connect(self.amp1Edit, QtCore.SIGNAL("editingFinished()"),
self.amp1EditText)
# Control the second signal
self.freq2Edit = QtGui.QLineEdit(self)
self.freq2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Frequency:", self.freq2Edit)
self.connect(self.freq2Edit, QtCore.SIGNAL("editingFinished()"),
self.freq2EditText)
self.amp2Edit = QtGui.QLineEdit(self)
self.amp2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Amplitude:", self.amp2Edit)
self.connect(self.amp2Edit, QtCore.SIGNAL("editingFinished()"),
self.amp2EditText)
self.quit = QtGui.QPushButton('Close', self)
self.quit.setMinimumWidth(100)
self.layout.addWidget(self.quit)
self.connect(self.quit, QtCore.SIGNAL('clicked()'),
QtGui.qApp, QtCore.SLOT('quit()'))
def attach_signal1(self, signal):
self.signal1 = signal
self.freq1Edit.setText(QtCore.QString("%1").arg(self.signal1.frequency()))
self.amp1Edit.setText(QtCore.QString("%1").arg(self.signal1.amplitude()))
def attach_signal2(self, signal):
self.signal2 = signal
self.freq2Edit.setText(QtCore.QString("%1").arg(self.signal2.frequency()))
self.amp2Edit.setText(QtCore.QString("%1").arg(self.signal2.amplitude()))
def freq1EditText(self):
try:
newfreq = float(self.freq1Edit.text())
self.signal1.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp1EditText(self):
try:
newamp = float(self.amp1Edit.text())
self.signal1.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
def freq2EditText(self):
try:
newfreq = float(self.freq2Edit.text())
self.signal2.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp2EditText(self):
try:
newamp = float(self.amp2Edit.text())
self.signal2.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
Rs = 8000
f1 = 100
f2 = 200
npts = 2048
self.qapp = QtGui.QApplication(sys.argv)
ss = open(gr.prefix() + '/share/gnuradio/themes/dark.qss')
sstext = ss.read()
ss.close()
self.qapp.setStyleSheet(sstext)
src1 = analog.sig_source_c(Rs, analog.GR_SIN_WAVE, f1, 0.1, 0)
src2 = analog.sig_source_c(Rs, analog.GR_SIN_WAVE, f2, 0.1, 0)
src = blocks.add_cc()
channel = channels.channel_model(0.01)
thr = blocks.throttle(gr.sizeof_gr_complex, 100*npts)
self.snk1 = qtgui.freq_sink_c(npts, filter.firdes.WIN_BLACKMAN_hARRIS,
0, Rs,
"Complex Freq Example", 3)
self.connect(src1, (src,0))
self.connect(src2, (src,1))
self.connect(src, channel, thr, (self.snk1, 0))
self.connect(src1, (self.snk1, 1))
self.connect(src2, (self.snk1, 2))
self.ctrl_win = control_box()
self.ctrl_win.attach_signal1(src1)
self.ctrl_win.attach_signal2(src2)
# Get the reference pointer to the SpectrumDisplayForm QWidget
pyQt = self.snk1.pyqwidget()
# Wrap the pointer as a PyQt SIP object
# This can now be manipulated as a PyQt4.QtGui.QWidget
pyWin = sip.wrapinstance(pyQt, QtGui.QWidget)
#pyWin.show()
self.main_box = dialog_box(pyWin, self.ctrl_win)
self.main_box.show()
if __name__ == "__main__":
tb = my_top_block();
tb.start()
tb.qapp.exec_()
tb.stop()
|
bbozhev/flask-test | refs/heads/master | flask/lib/python2.7/site-packages/decorator.py | 112 | ########################## LICENCE ###############################
# Copyright (c) 2005-2012, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
__version__ = '3.4.0'
__all__ = ["decorator", "FunctionMaker", "contextmanager"]
import sys, re, inspect
if sys.version >= '3':
from inspect import getfullargspec
def get_init(cls):
return cls.__init__
else:
class getfullargspec(object):
"A quick and dirty replacement for getfullargspec for Python 2.X"
def __init__(self, f):
self.args, self.varargs, self.varkw, self.defaults = \
inspect.getargspec(f)
self.kwonlyargs = []
self.kwonlydefaults = None
def __iter__(self):
yield self.args
yield self.varargs
yield self.varkw
yield self.defaults
def get_init(cls):
return cls.__init__.im_func
DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
if sys.version < '3': # easy way
self.shortsignature = self.signature = \
inspect.formatargspec(
formatvalue=lambda val: "", *argspec)[1:-1]
else: # Python 3 way
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.func_defaults = getattr(self, 'defaults', ())
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
func.__annotations__ = getattr(self, 'annotations', None)
callermodule = sys._getframe(3).f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline just for safety
src += '\n' # this is needed in old versions of Python
try:
code = compile(src, '<string>', 'single')
# print >> sys.stderr, 'Compiling %s' % src
exec code in evaldict
except:
print >> sys.stderr, 'Error in generated code:'
print >> sys.stderr, src
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an attribute
__source__ is added to the result. The attributes attrs are added,
if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] #strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
return self.make('def %(name)s(%(signature)s):\n' + ibody,
evaldict, addsource, **attrs)
def decorator(caller, func=None):
"""
decorator(caller) converts a caller function into a decorator;
decorator(caller, func) decorates a function using a caller.
"""
if func is not None: # returns a decorated function
evaldict = func.func_globals.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
return FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, undecorated=func, __wrapped__=func)
else: # returns a decorator
if inspect.isclass(caller):
name = caller.__name__.lower()
callerfunc = get_init(caller)
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
fun = getfullargspec(callerfunc).args[1] # second arg
elif inspect.isfunction(caller):
name = '_lambda_' if caller.__name__ == '<lambda>' \
else caller.__name__
callerfunc = caller
doc = caller.__doc__
fun = getfullargspec(callerfunc).args[0] # first arg
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
callerfunc = caller.__call__.im_func
doc = caller.__call__.__doc__
fun = getfullargspec(callerfunc).args[1] # second arg
evaldict = callerfunc.func_globals.copy()
evaldict['_call_'] = caller
evaldict['decorator'] = decorator
return FunctionMaker.create(
'%s(%s)' % (name, fun),
'return decorator(_call_, %s)' % fun,
evaldict, undecorated=caller, __wrapped__=caller,
doc=doc, module=caller.__module__)
######################### contextmanager ########################
def __call__(self, func):
'Context manager decorator'
return FunctionMaker.create(
func, "with _self_: return _func_(%(shortsignature)s)",
dict(_self_=self, _func_=func), __wrapped__=func)
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
ContextManager = type(
'ContextManager', (_GeneratorContextManager,), dict(__call__=__call__))
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager
def __init__(self, f, *a, **k):
return GeneratorContextManager.__init__(self, f(*a, **k))
ContextManager = type(
'ContextManager', (GeneratorContextManager,),
dict(__call__=__call__, __init__=__init__))
contextmanager = decorator(ContextManager)
|
mtorromeo/mattersend | refs/heads/master | mattersend.py | 1 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
import os
import argparse
import json
import csv
import mimetypes
from io import StringIO
try:
import configparser
except ImportError:
import ConfigParser as configparser
name = 'mattersend'
version = '2.0'
url = 'https://github.com/mtorromeo/mattersend'
description = "Library and CLI utility to send messages to mattermost's incoming webhooks"
syntaxes = ['diff', 'apache', 'makefile', 'http', 'json', 'markdown',
'javascript', 'css', 'nginx', 'objectivec', 'python', 'xml',
'perl', 'bash', 'php', 'coffeescript', 'cs', 'cpp', 'sql', 'go',
'ruby', 'java', 'ini', 'latex', 'plain', 'auto']
mime_to_syntax = {
'text/x-diff': 'diff',
'application/json': 'json',
'application/x-javascript': 'javascript',
'text/x-python': 'python',
'application/xml': 'xml',
'text/x-perl': 'perl',
'text/x-sh': 'bash',
'text/x-csrc': 'cpp',
'text/x-chdr': 'cpp',
'text/x-c++src': 'cpp',
'text/x-c++hdr': 'cpp',
'text/x-c': 'cpp',
'application/x-sql': 'sql',
'application/x-ruby': 'ruby',
'text/x-java-source': 'java',
'application/x-latex': 'latex',
}
ext_to_syntax = {
'Makefile': 'makefile',
'.mk': 'makefile',
'.htaccess': 'apache',
'.json': 'json',
'.js': 'javascript',
'.css': 'css',
'.m': 'objectivec',
'.py': 'python',
'.xml': 'xml',
'.pl': 'perl',
'.sh': 'bash',
'.php': 'php',
'.phtml': 'php',
'.phps': 'php',
'.php3': 'php',
'.php4': 'php',
'.php5': 'php',
'.php7': 'php',
'.coffee': 'coffeescript',
'.cs': 'cs',
'.c': 'cpp',
'.cc': 'cpp',
'.cxx': 'cpp',
'.cpp': 'cpp',
'.h': 'cpp',
'.hh': 'cpp',
'.dic': 'cpp',
'.sql': 'sql',
'.go': 'go',
'.rb': 'ruby',
'.java': 'java',
'.ini': 'ini',
'.latex': 'latex',
}
emoji_to_code = {
'smile': '1f604',
'smiley': '1f603',
'grinning': '1f600',
'blush': '1f60a',
'relaxed': '263a',
'wink': '1f609',
'heart_eyes': '1f60d',
'kissing_heart': '1f618',
'kissing_closed_eyes': '1f61a',
'kissing': '1f617',
'kissing_smiling_eyes': '1f619',
'stuck_out_tongue_winking_eye': '1f61c',
'stuck_out_tongue_closed_eyes': '1f61d',
'stuck_out_tongue': '1f61b',
'flushed': '1f633',
'grin': '1f601',
'pensive': '1f614',
'relieved': '1f60c',
'unamused': '1f612',
'disappointed': '1f61e',
'persevere': '1f623',
'cry': '1f622',
'joy': '1f602',
'sob': '1f62d',
'sleepy': '1f62a',
'disappointed_relieved': '1f625',
'cold_sweat': '1f630',
'sweat_smile': '1f605',
'sweat': '1f613',
'weary': '1f629',
'tired_face': '1f62b',
'fearful': '1f628',
'scream': '1f631',
'angry': '1f620',
'rage': '1f621',
'pout': '1f621',
'triumph': '1f624',
'confounded': '1f616',
'laughing': '1f606',
'satisfied': '1f606',
'yum': '1f60b',
'mask': '1f637',
'sunglasses': '1f60e',
'sleeping': '1f634',
'dizzy_face': '1f635',
'astonished': '1f632',
'worried': '1f61f',
'frowning': '1f626',
'anguished': '1f627',
'smiling_imp': '1f608',
'imp': '1f47f',
'open_mouth': '1f62e',
'grimacing': '1f62c',
'neutral_face': '1f610',
'confused': '1f615',
'hushed': '1f62f',
'no_mouth': '1f636',
'innocent': '1f607',
'smirk': '1f60f',
'expressionless': '1f611',
'man_with_gua_pi_mao': '1f472',
'man_with_turban': '1f473',
'cop': '1f46e',
'construction_worker': '1f477',
'guardsman': '1f482',
'baby': '1f476',
'boy': '1f466',
'girl': '1f467',
'man': '1f468',
'woman': '1f469',
'older_man': '1f474',
'older_woman': '1f475',
'person_with_blond_hair': '1f471',
'angel': '1f47c',
'princess': '1f478',
'smiley_cat': '1f63a',
'smile_cat': '1f638',
'heart_eyes_cat': '1f63b',
'kissing_cat': '1f63d',
'smirk_cat': '1f63c',
'scream_cat': '1f640',
'crying_cat_face': '1f63f',
'joy_cat': '1f639',
'pouting_cat': '1f63e',
'japanese_ogre': '1f479',
'japanese_goblin': '1f47a',
'see_no_evil': '1f648',
'hear_no_evil': '1f649',
'speak_no_evil': '1f64a',
'skull': '1f480',
'alien': '1f47d',
'hankey': '1f4a9',
'poop': '1f4a9',
'shit': '1f4a9',
'fire': '1f525',
'sparkles': '2728',
'star2': '1f31f',
'dizzy': '1f4ab',
'boom': '1f4a5',
'collision': '1f4a5',
'anger': '1f4a2',
'sweat_drops': '1f4a6',
'droplet': '1f4a7',
'zzz': '1f4a4',
'dash': '1f4a8',
'ear': '1f442',
'eyes': '1f440',
'nose': '1f443',
'tongue': '1f445',
'lips': '1f444',
'+1': '1f44d',
'thumbsup': '1f44d',
'-1': '1f44e',
'thumbsdown': '1f44e',
'ok_hand': '1f44c',
'facepunch': '1f44a',
'punch': '1f44a',
'fist': '270a',
'v': '270c',
'wave': '1f44b',
'hand': '270b',
'raised_hand': '270b',
'open_hands': '1f450',
'point_up_2': '1f446',
'point_down': '1f447',
'point_right': '1f449',
'point_left': '1f448',
'raised_hands': '1f64c',
'pray': '1f64f',
'point_up': '261d',
'clap': '1f44f',
'muscle': '1f4aa',
'walking': '1f6b6',
'runner': '1f3c3',
'running': '1f3c3',
'dancer': '1f483',
'couple': '1f46b',
'family': '1f46a',
'two_men_holding_hands': '1f46c',
'two_women_holding_hands': '1f46d',
'couplekiss': '1f48f',
'couple_with_heart': '1f491',
'dancers': '1f46f',
'ok_woman': '1f646',
'no_good': '1f645',
'ng_woman': '1f645',
'information_desk_person': '1f481',
'raising_hand': '1f64b',
'massage': '1f486',
'haircut': '1f487',
'nail_care': '1f485',
'bride_with_veil': '1f470',
'person_with_pouting_face': '1f64e',
'person_frowning': '1f64d',
'bow': '1f647',
'tophat': '1f3a9',
'crown': '1f451',
'womans_hat': '1f452',
'athletic_shoe': '1f45f',
'mans_shoe': '1f45e',
'shoe': '1f45e',
'sandal': '1f461',
'high_heel': '1f460',
'boot': '1f462',
'shirt': '1f455',
'tshirt': '1f455',
'necktie': '1f454',
'womans_clothes': '1f45a',
'dress': '1f457',
'running_shirt_with_sash': '1f3bd',
'jeans': '1f456',
'kimono': '1f458',
'bikini': '1f459',
'briefcase': '1f4bc',
'handbag': '1f45c',
'pouch': '1f45d',
'purse': '1f45b',
'eyeglasses': '1f453',
'ribbon': '1f380',
'closed_umbrella': '1f302',
'lipstick': '1f484',
'yellow_heart': '1f49b',
'blue_heart': '1f499',
'purple_heart': '1f49c',
'green_heart': '1f49a',
'heart': '2764',
'broken_heart': '1f494',
'heartpulse': '1f497',
'heartbeat': '1f493',
'two_hearts': '1f495',
'sparkling_heart': '1f496',
'revolving_hearts': '1f49e',
'cupid': '1f498',
'love_letter': '1f48c',
'kiss': '1f48b',
'ring': '1f48d',
'gem': '1f48e',
'bust_in_silhouette': '1f464',
'busts_in_silhouette': '1f465',
'speech_balloon': '1f4ac',
'footprints': '1f463',
'thought_balloon': '1f4ad',
'dog': '1f436',
'wolf': '1f43a',
'cat': '1f431',
'mouse': '1f42d',
'hamster': '1f439',
'rabbit': '1f430',
'frog': '1f438',
'tiger': '1f42f',
'koala': '1f428',
'bear': '1f43b',
'pig': '1f437',
'pig_nose': '1f43d',
'cow': '1f42e',
'boar': '1f417',
'monkey_face': '1f435',
'monkey': '1f412',
'horse': '1f434',
'sheep': '1f411',
'elephant': '1f418',
'panda_face': '1f43c',
'penguin': '1f427',
'bird': '1f426',
'baby_chick': '1f424',
'hatched_chick': '1f425',
'hatching_chick': '1f423',
'chicken': '1f414',
'snake': '1f40d',
'turtle': '1f422',
'bug': '1f41b',
'bee': '1f41d',
'honeybee': '1f41d',
'ant': '1f41c',
'beetle': '1f41e',
'snail': '1f40c',
'octopus': '1f419',
'shell': '1f41a',
'tropical_fish': '1f420',
'fish': '1f41f',
'dolphin': '1f42c',
'flipper': '1f42c',
'whale': '1f433',
'whale2': '1f40b',
'cow2': '1f404',
'ram': '1f40f',
'rat': '1f400',
'water_buffalo': '1f403',
'tiger2': '1f405',
'rabbit2': '1f407',
'dragon': '1f409',
'racehorse': '1f40e',
'goat': '1f410',
'rooster': '1f413',
'dog2': '1f415',
'pig2': '1f416',
'mouse2': '1f401',
'ox': '1f402',
'dragon_face': '1f432',
'blowfish': '1f421',
'crocodile': '1f40a',
'camel': '1f42b',
'dromedary_camel': '1f42a',
'leopard': '1f406',
'cat2': '1f408',
'poodle': '1f429',
'feet': '1f43e',
'paw_prints': '1f43e',
'bouquet': '1f490',
'cherry_blossom': '1f338',
'tulip': '1f337',
'four_leaf_clover': '1f340',
'rose': '1f339',
'sunflower': '1f33b',
'hibiscus': '1f33a',
'maple_leaf': '1f341',
'leaves': '1f343',
'fallen_leaf': '1f342',
'herb': '1f33f',
'ear_of_rice': '1f33e',
'mushroom': '1f344',
'cactus': '1f335',
'palm_tree': '1f334',
'evergreen_tree': '1f332',
'deciduous_tree': '1f333',
'chestnut': '1f330',
'seedling': '1f331',
'blossom': '1f33c',
'globe_with_meridians': '1f310',
'sun_with_face': '1f31e',
'full_moon_with_face': '1f31d',
'new_moon_with_face': '1f31a',
'new_moon': '1f311',
'waxing_crescent_moon': '1f312',
'first_quarter_moon': '1f313',
'moon': '1f314',
'waxing_gibbous_moon': '1f314',
'full_moon': '1f315',
'waning_gibbous_moon': '1f316',
'last_quarter_moon': '1f317',
'waning_crescent_moon': '1f318',
'last_quarter_moon_with_face': '1f31c',
'first_quarter_moon_with_face': '1f31b',
'crescent_moon': '1f319',
'earth_africa': '1f30d',
'earth_americas': '1f30e',
'earth_asia': '1f30f',
'volcano': '1f30b',
'milky_way': '1f30c',
'stars': '1f320',
'star': '2b50',
'sunny': '2600',
'partly_sunny': '26c5',
'cloud': '2601',
'zap': '26a1',
'umbrella': '2614',
'snowflake': '2744',
'snowman': '26c4',
'cyclone': '1f300',
'foggy': '1f301',
'rainbow': '1f308',
'ocean': '1f30a',
'bamboo': '1f38d',
'gift_heart': '1f49d',
'dolls': '1f38e',
'school_satchel': '1f392',
'mortar_board': '1f393',
'flags': '1f38f',
'fireworks': '1f386',
'sparkler': '1f387',
'wind_chime': '1f390',
'rice_scene': '1f391',
'jack_o_lantern': '1f383',
'ghost': '1f47b',
'santa': '1f385',
'christmas_tree': '1f384',
'gift': '1f381',
'tanabata_tree': '1f38b',
'tada': '1f389',
'confetti_ball': '1f38a',
'balloon': '1f388',
'crossed_flags': '1f38c',
'crystal_ball': '1f52e',
'movie_camera': '1f3a5',
'camera': '1f4f7',
'video_camera': '1f4f9',
'vhs': '1f4fc',
'cd': '1f4bf',
'dvd': '1f4c0',
'minidisc': '1f4bd',
'floppy_disk': '1f4be',
'computer': '1f4bb',
'iphone': '1f4f1',
'phone': '260e',
'telephone': '260e',
'telephone_receiver': '1f4de',
'pager': '1f4df',
'fax': '1f4e0',
'satellite': '1f4e1',
'tv': '1f4fa',
'radio': '1f4fb',
'loud_sound': '1f50a',
'sound': '1f509',
'speaker': '1f508',
'mute': '1f507',
'bell': '1f514',
'no_bell': '1f515',
'loudspeaker': '1f4e2',
'mega': '1f4e3',
'hourglass_flowing_sand': '23f3',
'hourglass': '231b',
'alarm_clock': '23f0',
'watch': '231a',
'unlock': '1f513',
'lock': '1f512',
'lock_with_ink_pen': '1f50f',
'closed_lock_with_key': '1f510',
'key': '1f511',
'mag_right': '1f50e',
'bulb': '1f4a1',
'flashlight': '1f526',
'high_brightness': '1f506',
'low_brightness': '1f505',
'electric_plug': '1f50c',
'battery': '1f50b',
'mag': '1f50d',
'bathtub': '1f6c1',
'bath': '1f6c0',
'shower': '1f6bf',
'toilet': '1f6bd',
'wrench': '1f527',
'nut_and_bolt': '1f529',
'hammer': '1f528',
'door': '1f6aa',
'smoking': '1f6ac',
'bomb': '1f4a3',
'gun': '1f52b',
'hocho': '1f52a',
'knife': '1f52a',
'pill': '1f48a',
'syringe': '1f489',
'moneybag': '1f4b0',
'yen': '1f4b4',
'dollar': '1f4b5',
'pound': '1f4b7',
'euro': '1f4b6',
'credit_card': '1f4b3',
'money_with_wings': '1f4b8',
'calling': '1f4f2',
'e-mail': '1f4e7',
'inbox_tray': '1f4e5',
'outbox_tray': '1f4e4',
'email': '2709',
'envelope': '2709',
'envelope_with_arrow': '1f4e9',
'incoming_envelope': '1f4e8',
'postal_horn': '1f4ef',
'mailbox': '1f4eb',
'mailbox_closed': '1f4ea',
'mailbox_with_mail': '1f4ec',
'mailbox_with_no_mail': '1f4ed',
'postbox': '1f4ee',
'package': '1f4e6',
'memo': '1f4dd',
'pencil': '1f4dd',
'page_facing_up': '1f4c4',
'page_with_curl': '1f4c3',
'bookmark_tabs': '1f4d1',
'bar_chart': '1f4ca',
'chart_with_upwards_trend': '1f4c8',
'chart_with_downwards_trend': '1f4c9',
'scroll': '1f4dc',
'clipboard': '1f4cb',
'date': '1f4c5',
'calendar': '1f4c6',
'card_index': '1f4c7',
'file_folder': '1f4c1',
'open_file_folder': '1f4c2',
'scissors': '2702',
'pushpin': '1f4cc',
'paperclip': '1f4ce',
'black_nib': '2712',
'pencil2': '270f',
'straight_ruler': '1f4cf',
'triangular_ruler': '1f4d0',
'closed_book': '1f4d5',
'green_book': '1f4d7',
'blue_book': '1f4d8',
'orange_book': '1f4d9',
'notebook': '1f4d3',
'notebook_with_decorative_cover': '1f4d4',
'ledger': '1f4d2',
'books': '1f4da',
'book': '1f4d6',
'open_book': '1f4d6',
'bookmark': '1f516',
'name_badge': '1f4db',
'microscope': '1f52c',
'telescope': '1f52d',
'newspaper': '1f4f0',
'art': '1f3a8',
'clapper': '1f3ac',
'microphone': '1f3a4',
'headphones': '1f3a7',
'musical_score': '1f3bc',
'musical_note': '1f3b5',
'notes': '1f3b6',
'musical_keyboard': '1f3b9',
'violin': '1f3bb',
'trumpet': '1f3ba',
'saxophone': '1f3b7',
'guitar': '1f3b8',
'space_invader': '1f47e',
'video_game': '1f3ae',
'black_joker': '1f0cf',
'flower_playing_cards': '1f3b4',
'mahjong': '1f004',
'game_die': '1f3b2',
'dart': '1f3af',
'football': '1f3c8',
'basketball': '1f3c0',
'soccer': '26bd',
'baseball': '26be',
'tennis': '1f3be',
'8ball': '1f3b1',
'rugby_football': '1f3c9',
'bowling': '1f3b3',
'golf': '26f3',
'mountain_bicyclist': '1f6b5',
'bicyclist': '1f6b4',
'checkered_flag': '1f3c1',
'horse_racing': '1f3c7',
'trophy': '1f3c6',
'ski': '1f3bf',
'snowboarder': '1f3c2',
'swimmer': '1f3ca',
'surfer': '1f3c4',
'fishing_pole_and_fish': '1f3a3',
'coffee': '2615',
'tea': '1f375',
'sake': '1f376',
'baby_bottle': '1f37c',
'beer': '1f37a',
'beers': '1f37b',
'cocktail': '1f378',
'tropical_drink': '1f379',
'wine_glass': '1f377',
'fork_and_knife': '1f374',
'pizza': '1f355',
'hamburger': '1f354',
'fries': '1f35f',
'poultry_leg': '1f357',
'meat_on_bone': '1f356',
'spaghetti': '1f35d',
'curry': '1f35b',
'fried_shrimp': '1f364',
'bento': '1f371',
'sushi': '1f363',
'fish_cake': '1f365',
'rice_ball': '1f359',
'rice_cracker': '1f358',
'rice': '1f35a',
'ramen': '1f35c',
'stew': '1f372',
'oden': '1f362',
'dango': '1f361',
'egg': '1f373',
'bread': '1f35e',
'doughnut': '1f369',
'custard': '1f36e',
'icecream': '1f366',
'ice_cream': '1f368',
'shaved_ice': '1f367',
'birthday': '1f382',
'cake': '1f370',
'cookie': '1f36a',
'chocolate_bar': '1f36b',
'candy': '1f36c',
'lollipop': '1f36d',
'honey_pot': '1f36f',
'apple': '1f34e',
'green_apple': '1f34f',
'tangerine': '1f34a',
'orange': '1f34a',
'mandarin': '1f34a',
'lemon': '1f34b',
'cherries': '1f352',
'grapes': '1f347',
'watermelon': '1f349',
'strawberry': '1f353',
'peach': '1f351',
'melon': '1f348',
'banana': '1f34c',
'pear': '1f350',
'pineapple': '1f34d',
'sweet_potato': '1f360',
'eggplant': '1f346',
'tomato': '1f345',
'corn': '1f33d',
'house': '1f3e0',
'house_with_garden': '1f3e1',
'school': '1f3eb',
'office': '1f3e2',
'post_office': '1f3e3',
'hospital': '1f3e5',
'bank': '1f3e6',
'convenience_store': '1f3ea',
'love_hotel': '1f3e9',
'hotel': '1f3e8',
'wedding': '1f492',
'church': '26ea',
'department_store': '1f3ec',
'european_post_office': '1f3e4',
'city_sunrise': '1f307',
'city_sunset': '1f306',
'japanese_castle': '1f3ef',
'european_castle': '1f3f0',
'tent': '26fa',
'factory': '1f3ed',
'tokyo_tower': '1f5fc',
'japan': '1f5fe',
'mount_fuji': '1f5fb',
'sunrise_over_mountains': '1f304',
'sunrise': '1f305',
'night_with_stars': '1f303',
'statue_of_liberty': '1f5fd',
'bridge_at_night': '1f309',
'carousel_horse': '1f3a0',
'ferris_wheel': '1f3a1',
'fountain': '26f2',
'roller_coaster': '1f3a2',
'ship': '1f6a2',
'boat': '26f5',
'sailboat': '26f5',
'speedboat': '1f6a4',
'rowboat': '1f6a3',
'anchor': '2693',
'rocket': '1f680',
'airplane': '2708',
'seat': '1f4ba',
'helicopter': '1f681',
'steam_locomotive': '1f682',
'tram': '1f68a',
'station': '1f689',
'mountain_railway': '1f69e',
'train2': '1f686',
'bullettrain_side': '1f684',
'bullettrain_front': '1f685',
'light_rail': '1f688',
'metro': '1f687',
'monorail': '1f69d',
'train': '1f68b',
'railway_car': '1f683',
'trolleybus': '1f68e',
'bus': '1f68c',
'oncoming_bus': '1f68d',
'blue_car': '1f699',
'oncoming_automobile': '1f698',
'car': '1f697',
'red_car': '1f697',
'taxi': '1f695',
'oncoming_taxi': '1f696',
'articulated_lorry': '1f69b',
'truck': '1f69a',
'rotating_light': '1f6a8',
'police_car': '1f693',
'oncoming_police_car': '1f694',
'fire_engine': '1f692',
'ambulance': '1f691',
'minibus': '1f690',
'bike': '1f6b2',
'aerial_tramway': '1f6a1',
'suspension_railway': '1f69f',
'mountain_cableway': '1f6a0',
'tractor': '1f69c',
'barber': '1f488',
'busstop': '1f68f',
'ticket': '1f3ab',
'vertical_traffic_light': '1f6a6',
'traffic_light': '1f6a5',
'warning': '26a0',
'construction': '1f6a7',
'beginner': '1f530',
'fuelpump': '26fd',
'izakaya_lantern': '1f3ee',
'lantern': '1f3ee',
'slot_machine': '1f3b0',
'hotsprings': '2668',
'moyai': '1f5ff',
'circus_tent': '1f3aa',
'performing_arts': '1f3ad',
'round_pushpin': '1f4cd',
'triangular_flag_on_post': '1f6a9',
'jp': '1f1ef-1f1f5',
'kr': '1f1f0-1f1f7',
'de': '1f1e9-1f1ea',
'cn': '1f1e8-1f1f3',
'us': '1f1fa-1f1f8',
'fr': '1f1eb-1f1f7',
'es': '1f1ea-1f1f8',
'it': '1f1ee-1f1f9',
'ru': '1f1f7-1f1fa',
'gb': '1f1ec-1f1e7',
'uk': '1f1ec-1f1e7',
'one': '0031-20e3',
'two': '0032-20e3',
'three': '0033-20e3',
'four': '0034-20e3',
'five': '0035-20e3',
'six': '0036-20e3',
'seven': '0037-20e3',
'eight': '0038-20e3',
'nine': '0039-20e3',
'zero': '0030-20e3',
'keycap_ten': '1f51f',
'1234': '1f522',
'hash': '0023-20e3',
'symbols': '1f523',
'arrow_up': '2b06',
'arrow_down': '2b07',
'arrow_left': '2b05',
'arrow_right': '27a1',
'capital_abcd': '1f520',
'abcd': '1f521',
'abc': '1f524',
'arrow_upper_right': '2197',
'arrow_upper_left': '2196',
'arrow_lower_right': '2198',
'arrow_lower_left': '2199',
'left_right_arrow': '2194',
'arrow_up_down': '2195',
'arrows_counterclockwise': '1f504',
'arrow_backward': '25c0',
'arrow_forward': '25b6',
'arrow_up_small': '1f53c',
'arrow_down_small': '1f53d',
'leftwards_arrow_with_hook': '21a9',
'arrow_right_hook': '21aa',
'information_source': '2139',
'rewind': '23ea',
'fast_forward': '23e9',
'arrow_double_up': '23eb',
'arrow_double_down': '23ec',
'arrow_heading_down': '2935',
'arrow_heading_up': '2934',
'ok': '1f197',
'twisted_rightwards_arrows': '1f500',
'repeat': '1f501',
'repeat_one': '1f502',
'new': '1f195',
'up': '1f199',
'cool': '1f192',
'free': '1f193',
'ng': '1f196',
'signal_strength': '1f4f6',
'cinema': '1f3a6',
'koko': '1f201',
'u6307': '1f22f',
'u7a7a': '1f233',
'u6e80': '1f235',
'u5408': '1f234',
'u7981': '1f232',
'ideograph_advantage': '1f250',
'u5272': '1f239',
'u55b6': '1f23a',
'u6709': '1f236',
'u7121': '1f21a',
'restroom': '1f6bb',
'mens': '1f6b9',
'womens': '1f6ba',
'baby_symbol': '1f6bc',
'wc': '1f6be',
'potable_water': '1f6b0',
'put_litter_in_its_place': '1f6ae',
'parking': '1f17f',
'wheelchair': '267f',
'no_smoking': '1f6ad',
'u6708': '1f237',
'u7533': '1f238',
'sa': '1f202',
'm': '24c2',
'passport_control': '1f6c2',
'baggage_claim': '1f6c4',
'left_luggage': '1f6c5',
'customs': '1f6c3',
'accept': '1f251',
'secret': '3299',
'congratulations': '3297',
'cl': '1f191',
'sos': '1f198',
'id': '1f194',
'no_entry_sign': '1f6ab',
'underage': '1f51e',
'no_mobile_phones': '1f4f5',
'do_not_litter': '1f6af',
'non-potable_water': '1f6b1',
'no_bicycles': '1f6b3',
'no_pedestrians': '1f6b7',
'children_crossing': '1f6b8',
'no_entry': '26d4',
'eight_spoked_asterisk': '2733',
'sparkle': '2747',
'negative_squared_cross_mark': '274e',
'white_check_mark': '2705',
'eight_pointed_black_star': '2734',
'heart_decoration': '1f49f',
'vs': '1f19a',
'vibration_mode': '1f4f3',
'mobile_phone_off': '1f4f4',
'a': '1f170',
'b': '1f171',
'ab': '1f18e',
'o2': '1f17e',
'diamond_shape_with_a_dot_inside': '1f4a0',
'loop': '27bf',
'recycle': '267b',
'aries': '2648',
'taurus': '2649',
'gemini': '264a',
'cancer': '264b',
'leo': '264c',
'virgo': '264d',
'libra': '264e',
'scorpius': '264f',
'sagittarius': '2650',
'capricorn': '2651',
'aquarius': '2652',
'pisces': '2653',
'ophiuchus': '26ce',
'six_pointed_star': '1f52f',
'atm': '1f3e7',
'chart': '1f4b9',
'heavy_dollar_sign': '1f4b2',
'currency_exchange': '1f4b1',
'copyright': '00a9',
'registered': '00ae',
'tm': '2122',
'x': '274c',
'bangbang': '203c',
'interrobang': '2049',
'exclamation': '2757',
'heavy_exclamation_mark': '2757',
'question': '2753',
'grey_exclamation': '2755',
'grey_question': '2754',
'o': '2b55',
'top': '1f51d',
'end': '1f51a',
'back': '1f519',
'on': '1f51b',
'soon': '1f51c',
'arrows_clockwise': '1f503',
'clock12': '1f55b',
'clock1230': '1f567',
'clock1': '1f550',
'clock130': '1f55c',
'clock2': '1f551',
'clock230': '1f55d',
'clock3': '1f552',
'clock330': '1f55e',
'clock4': '1f553',
'clock430': '1f55f',
'clock5': '1f554',
'clock530': '1f560',
'clock6': '1f555',
'clock7': '1f556',
'clock8': '1f557',
'clock9': '1f558',
'clock10': '1f559',
'clock11': '1f55a',
'clock630': '1f561',
'clock730': '1f562',
'clock830': '1f563',
'clock930': '1f564',
'clock1030': '1f565',
'clock1130': '1f566',
'heavy_multiplication_x': '2716',
'heavy_plus_sign': '2795',
'heavy_minus_sign': '2796',
'heavy_division_sign': '2797',
'spades': '2660',
'hearts': '2665',
'clubs': '2663',
'diamonds': '2666',
'white_flower': '1f4ae',
'100': '1f4af',
'heavy_check_mark': '2714',
'ballot_box_with_check': '2611',
'radio_button': '1f518',
'link': '1f517',
'curly_loop': '27b0',
'wavy_dash': '3030',
'part_alternation_mark': '303d',
'trident': '1f531',
'black_medium_square': '25fc',
'white_medium_square': '25fb',
'black_medium_small_square': '25fe',
'white_medium_small_square': '25fd',
'black_small_square': '25aa',
'white_small_square': '25ab',
'small_red_triangle': '1f53a',
'black_square_button': '1f532',
'white_square_button': '1f533',
'black_circle': '26ab',
'white_circle': '26aa',
'red_circle': '1f534',
'large_blue_circle': '1f535',
'small_red_triangle_down': '1f53b',
'white_large_square': '2b1c',
'black_large_square': '2b1b',
'large_orange_diamond': '1f536',
'large_blue_diamond': '1f537',
'small_orange_diamond': '1f538',
'small_blue_diamond': '1f539',
'ca': '1f1e8-1f1e6',
'eh': '1f1e8-1f1e6',
'pk': '1f1f5-1f1f0',
'za': '1f1ff-1f1e6',
'slightly_smiling_face': '1f642',
'slightly_frowning_face': '1f641',
'upside_down_face': '1f643',
'mm': 'mm',
'mattermost': 'mattermost',
'basecamp': 'basecamp',
'basecampy': 'basecampy',
'bowtie': 'bowtie',
'feelsgood': 'feelsgood',
'finnadie': 'finnadie',
'fu': 'fu',
'goberserk': 'goberserk',
'godmode': 'godmode',
'hurtrealbad': 'hurtrealbad',
'metal': 'metal',
'neckbeard': 'neckbeard',
'octocat': 'octocat',
'rage1': 'rage1',
'rage2': 'rage2',
'rage3': 'rage3',
'rage4': 'rage4',
'shipit': 'shipit',
'squirrel': 'squirrel',
'suspect': 'suspect',
'taco': 'taco',
'trollface': 'trollface',
}
def detect_syntax(basename, mime):
if mime in mime_to_syntax:
return mime_to_syntax[mime]
(_, ext) = os.path.splitext(basename)
if not ext:
ext = basename
return ext_to_syntax[ext] if ext in ext_to_syntax else None
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def md_table(data):
md = []
for i, row in enumerate(data):
if i == 1:
md.append("| --- " * len(row) + "|")
md.append("| {} |".format(" | ".join(
[str(cell).replace("|", "❘").replace("\n", " ").replace("\r", " ") for cell in row]
)))
return "\n".join(md)
class Message:
def __init__(self, channel=None, url=None, username=None, icon=None,
config_section='DEFAULT', config_name='mattersend',
config_file=None):
# CONFIG file
config = configparser.ConfigParser()
if config_file:
config.read(config_file)
elif config_name:
config.read(["/etc/{}.conf".format(config_name), os.path.expanduser("~/.{}.conf".format(config_name))])
config = dict(config.items(config_section))
# merge config file with cli arguments
self.url = config.get('url') if url is None else url
self.channel = config.get('channel') if channel is None else channel
self.username = config.get('username') if username is None else username
self.icon = config.get('icon') if icon is None else icon
self.text = ''
self.attachments = []
def get_payload(self):
payload = {}
for opt in ('text', 'channel', 'username'):
optvalue = getattr(self, opt)
if optvalue is not None:
payload[opt] = optvalue.strip()
opt, optvalue = self.get_icon()
if optvalue is not None:
payload[opt] = optvalue
if self.attachments:
payload['attachments'] = [a.data() for a in self.attachments]
return json.dumps(payload, sort_keys=True, indent=4)
def get_icon(self):
if self.icon is None:
return None, None
ioptvalue = self.icon.strip()
ioptname = 'icon_url' if '://' in ioptvalue else 'icon_emoji'
# workaround mattermost missing icon_emoji until implemented
if ioptname == 'icon_emoji' and ioptvalue[0] == ':' and ioptvalue[-1] == ':':
ioptvalue = emoji_to_code.get(ioptvalue[1:-1], False)
if ioptvalue:
baseurl = self.url.split('/hooks/', 1)
if len(baseurl) == 2:
ioptname = 'icon_url'
ioptvalue = '{0}/static/emoji/{1}.png'.format(baseurl[0], ioptvalue)
return ioptname, ioptvalue
def append(self, text, separator=None):
if self.text and separator is not None:
self.text += separator
self.text += text
def attach_file(self, filename, text=None, tabular=False, syntax='auto', fileinfo=False):
attachment = Attachment()
if tabular:
syntax = None
(mime, _) = mimetypes.guess_type(filename)
attachment.title = os.path.basename(filename)
if text is None:
with open(filename, 'rUb') as f:
text = f.read().decode('utf-8')
if tabular:
csvfile = StringIO(text.strip())
if tabular == 'sniff':
dialect = csv.Sniffer().sniff(text)
else:
dialect = tabular
text = md_table(csv.reader(csvfile, dialect))
elif syntax == 'auto':
syntax = detect_syntax(attachment.title, mime)
if syntax is not None:
text = md_code(text, syntax)
attachment.text = text
if fileinfo:
statinfo = os.stat(filename)
attachment.add_field('Size', sizeof_fmt(statinfo.st_size), True)
attachment.add_field('Mime', mime, True)
self.attachments.append(attachment)
return attachment
def send(self):
if self.url is None:
raise TypeError('Missing mattermost webhook URL')
if self.channel is None:
raise TypeError('Missing destination channel')
import requests
payload = self.get_payload()
r = requests.post(self.url, data={'payload': payload})
if r.status_code != 200:
try:
r = json.loads(r.text)
except ValueError:
r = {'message': r.text, 'status_code': r.status_code}
raise RuntimeError("{} ({})".format(r['message'], r['status_code']))
return r
class Attachment:
def __init__(self, text=''):
self.text = text
self.color = None
self.pretext = None
self.fallback = None
self.author_name = None
self.author_link = None
self.author_icon = None
self.title = None
self.title_link = None
self.image_url = None
self.thumb_url = None
self.fields = []
def set_author(self, name, link=None, icon=None):
self.author_name = name
self.author_link = link
self.author_icon = icon
def set_title(self, title, link=None):
self.title = title
self.title_link = link
def add_field(self, title, value, short=None):
field = {
'title': str(title),
'value': str(value),
}
if short is not None:
field['short'] = bool(short)
self.fields.append(field)
def data(self):
data = {k: v for (k, v) in self.__dict__.items() if v}
if not self.fallback:
data['fallback'] = self.text
# 4000+ chars triggers error on mattermost, not sure where the limit is
data['text'] = data['text'][:3500]
data['fallback'] = data['fallback'][:3500]
return data
def md_code(code, syntax='plain'):
if syntax == 'plain':
syntax = ''
return "```{}\n{}```".format(syntax, code)
def main():
try:
import setproctitle
setproctitle.setproctitle(name)
except ImportError:
pass
dialects = csv.list_dialects()
dialects.sort()
dialects.insert(0, 'sniff')
# CLI arguments
parser = argparse.ArgumentParser(prog=name, description=description)
parser.add_argument('-V', '--version', action='version', version="%(prog)s " + version)
parser.add_argument('-C', '--config', help='Use a different configuration file')
parser.add_argument('-s', '--section', help='Configuration file section', default='DEFAULT')
parser.add_argument('-c', '--channel', help='Send to this channel or @username')
parser.add_argument('-U', '--url', help='Mattermost webhook URL')
parser.add_argument('-u', '--username', help='Username')
parser.add_argument('-i', '--icon', help='Icon')
group = parser.add_mutually_exclusive_group()
group.add_argument('-t', '--tabular', metavar='DIALECT', const='sniff',
nargs='?', choices=dialects,
help='Parse input as CSV and format it as a table (DIALECT can be one of %(choices)s)')
group.add_argument('-y', '--syntax', default='auto')
parser.add_argument('-I', '--info', action='store_true',
help='Include file information in message')
parser.add_argument('-n', '--dry-run', '--just-print', action='store_true',
help="Don't send, just print the payload")
parser.add_argument('-f', '--file', default='-',
help="Read content from FILE. If - reads from standard input (DEFAULT: %(default)s)")
args = parser.parse_args()
if args.file == '-':
message = sys.stdin.read()
filename = None
else:
message = ''
filename = args.file
try:
payload = send(args.channel, message, filename, args.url,
args.username, args.icon, args.syntax, args.tabular,
args.info, args.dry_run, args.section, name,
args.config)
except (configparser.Error, TypeError, RuntimeError) as e:
sys.exit(str(e))
if args.dry_run:
print(payload)
def send(channel, message='', filename=False, url=None, username=None,
icon=None, syntax='auto', tabular=False, fileinfo=False,
just_return=False, config_section='DEFAULT',
config_name='mattersend', config_file=None):
msg = Message(channel, url, username, icon, config_section,
config_name, config_file)
if filename:
if syntax == 'none':
syntax = None
msg.attach_file(filename, None, tabular, syntax, fileinfo)
else:
if tabular:
syntax = None
csvfile = StringIO(message.strip())
if tabular == 'sniff':
dialect = csv.Sniffer().sniff(message)
else:
dialect = tabular
message = md_table(csv.reader(csvfile, dialect))
elif syntax in ('auto', 'none'):
syntax = None
if syntax is not None:
message = md_code(message, syntax)
msg.text = message
if just_return:
payload = msg.get_payload()
return "POST {}\n{}".format(msg.url, payload)
msg.send()
if __name__ == '__main__':
main()
|
Yarrick13/hwasp | refs/heads/master | tests/asp/weakConstraints/valves.1.gringo.test.py | 4 | input = """
1 1 1 1 63
1 1 1 1 64
1 1 2 0 65 66
1 1 2 0 67 68
1 1 2 0 69 70
1 1 2 0 71 72
1 1 2 0 66 65
1 1 2 0 68 67
1 1 2 0 70 69
1 1 2 0 72 71
1 81 1 1 66
1 82 1 1 65
1 83 1 1 68
1 84 1 1 67
1 85 1 1 70
1 86 1 1 69
1 87 1 1 72
1 88 1 1 71
1 89 1 1 66
1 90 1 1 68
1 91 1 1 67
1 92 1 1 69
1 93 2 1 68 81
1 94 1 0 81
1 95 2 1 70 82
1 96 2 1 72 82
1 97 2 1 66 83
1 98 1 0 83
1 99 1 0 84
1 100 2 1 69 84
1 101 2 1 72 85
1 102 2 1 65 85
1 103 1 0 86
1 104 2 1 67 86
1 105 2 1 70 87
1 106 2 1 65 87
1 107 2 1 65 89
1 108 2 1 67 90
1 109 2 1 68 91
1 110 2 1 70 92
1 81 2 1 68 93
1 111 2 1 67 93
1 81 1 0 94
1 82 2 1 70 95
1 111 2 1 69 95
1 82 2 1 72 96
1 112 2 1 71 96
1 83 2 1 66 97
1 113 2 1 65 97
1 83 1 0 98
1 84 1 0 99
1 113 2 1 70 100
1 84 2 1 69 100
1 85 2 1 72 101
1 114 2 1 71 101
1 115 2 1 66 102
1 85 2 1 65 102
1 86 1 0 103
1 115 2 1 68 104
1 86 2 1 67 104
1 87 2 1 70 105
1 116 2 1 69 105
1 117 2 1 66 106
1 87 2 1 65 106
1 118 2 1 70 107
1 119 2 1 72 107
1 89 2 1 65 107
1 120 1 0 108
1 90 2 1 67 108
1 118 2 1 69 108
1 121 2 1 66 109
1 91 2 1 68 109
1 122 1 0 109
1 92 2 1 70 110
1 123 2 1 72 110
1 121 2 1 65 110
1 124 1 0 111
1 93 2 1 67 111
1 95 2 1 69 111
1 96 2 1 71 112
1 100 2 1 70 113
1 125 2 1 72 113
1 97 2 1 65 113
1 101 2 1 71 114
1 102 2 1 66 115
1 104 2 1 68 115
1 126 1 0 115
1 127 1 0 116
1 128 2 1 67 116
1 105 2 1 69 116
1 106 2 1 66 117
1 128 2 1 68 117
1 129 1 0 117
1 107 2 1 70 118
1 108 2 1 69 118
1 107 2 1 72 119
1 130 2 1 71 119
1 108 1 0 120
1 109 2 1 66 121
1 110 2 1 65 121
1 109 1 0 122
1 110 2 1 72 123
1 131 2 1 71 123
1 111 1 0 124
1 113 2 1 72 125
1 132 2 1 71 125
1 115 1 0 126
1 116 1 0 127
1 117 2 1 68 128
1 116 2 1 67 128
1 117 1 0 129
1 119 2 1 71 130
1 123 2 1 71 131
1 125 2 1 71 132
1 133 0 0
1 134 0 0
1 135 0 0
1 136 0 0
1 137 0 0
1 138 0 0
1 139 1 1 120
1 140 1 1 122
1 141 1 1 94
1 142 1 1 124
1 143 1 1 98
1 144 1 1 99
1 145 1 1 126
1 146 1 1 103
1 147 1 1 129
1 148 1 1 127
1 149 2 1 108 139
1 150 2 1 109 140
1 151 2 1 81 141
1 152 2 1 111 142
1 153 2 1 83 143
1 154 2 1 84 144
1 155 2 1 115 145
1 156 2 1 86 146
1 157 2 1 117 147
1 158 2 1 116 148
1 139 2 1 120 149
1 159 2 1 90 149
1 160 2 1 118 149
1 161 2 1 121 150
1 162 2 1 91 150
1 140 2 1 122 150
1 163 2 1 93 151
1 141 2 1 94 151
1 142 2 1 124 152
1 163 2 1 93 152
1 164 2 1 95 152
1 165 2 1 97 153
1 143 2 1 98 153
1 144 2 1 99 154
1 166 2 1 100 154
1 167 2 1 102 155
1 168 2 1 104 155
1 145 2 1 126 155
1 146 2 1 103 156
1 168 2 1 104 156
1 169 2 1 106 157
1 170 2 1 128 157
1 147 2 1 129 157
1 148 2 1 127 158
1 170 2 1 128 158
1 171 2 1 105 158
1 149 2 1 108 159
1 172 2 1 107 160
1 149 2 1 108 160
1 150 2 1 109 161
1 173 2 1 110 161
1 150 2 1 109 162
1 151 2 1 81 163
1 152 2 1 111 163
1 174 2 1 82 164
1 152 2 1 111 164
1 153 2 1 83 165
1 175 2 1 113 165
1 175 2 1 113 166
1 154 2 1 84 166
1 155 2 1 115 167
1 176 2 1 85 167
1 155 2 1 115 168
1 156 2 1 86 168
1 157 2 1 117 169
1 177 2 1 87 169
1 157 2 1 117 170
1 158 2 1 116 170
1 177 2 1 87 171
1 158 2 1 116 171
1 160 2 1 118 172
1 178 2 1 119 172
1 179 2 1 89 172
1 180 2 1 92 173
1 181 2 1 123 173
1 161 2 1 121 173
1 164 2 1 95 174
1 182 2 1 96 174
1 166 2 1 100 175
1 183 2 1 125 175
1 165 2 1 97 175
1 184 2 1 101 176
1 167 2 1 102 176
1 171 2 1 105 177
1 169 2 1 106 177
1 172 2 1 107 178
1 185 2 1 130 178
1 172 2 1 107 179
1 173 2 1 110 180
1 173 2 1 110 181
1 186 2 1 131 181
1 174 2 1 82 182
1 187 2 1 112 182
1 175 2 1 113 183
1 188 2 1 132 183
1 176 2 1 85 184
1 189 2 1 114 184
1 178 2 1 119 185
1 181 2 1 123 186
1 182 2 1 96 187
1 183 2 1 125 188
1 184 2 1 101 189
1 190 2 1 141 140
1 191 2 1 143 140
1 192 2 1 145 140
1 193 2 1 147 140
1 194 2 1 145 141
1 195 2 1 147 141
1 196 2 1 143 141
1 197 2 1 145 143
1 198 2 1 147 143
1 199 2 1 147 145
1 200 2 1 142 139
1 201 2 1 144 139
1 202 2 1 146 139
1 203 2 1 148 139
1 204 1 0 139
1 205 2 1 146 142
1 206 2 1 148 142
1 207 2 1 144 142
1 208 2 1 146 144
1 209 2 1 148 144
1 210 2 1 148 146
1 211 1 0 161
1 212 2 1 165 161
1 213 2 1 167 161
1 214 2 1 169 161
1 215 2 1 167 165
1 216 2 1 169 165
1 217 2 1 169 167
1 218 1 0 179
1 219 2 1 165 179
1 220 2 1 167 179
1 221 2 1 169 179
1 222 2 1 161 179
1 223 2 1 163 159
1 224 1 0 159
1 225 2 1 168 159
1 226 2 1 170 159
1 227 2 1 162 159
1 228 2 1 163 162
1 229 1 0 162
1 230 2 1 168 162
1 231 2 1 170 162
1 232 2 1 168 163
1 233 2 1 170 163
1 234 1 0 163
1 235 2 1 170 168
1 236 2 1 164 160
1 237 2 1 166 160
1 238 1 0 160
1 239 2 1 171 160
1 240 2 1 180 160
1 241 1 0 164
1 242 2 1 171 164
1 243 2 1 166 164
1 244 1 0 166
1 245 2 1 171 166
1 246 2 1 164 180
1 247 2 1 166 180
1 248 1 0 180
1 249 2 1 171 180
1 250 2 1 182 178
1 251 2 1 183 178
1 252 2 1 184 178
1 253 1 0 178
1 254 2 1 181 178
1 255 2 1 182 181
1 256 2 1 183 181
1 257 2 1 184 181
1 258 1 0 181
1 259 2 1 184 182
1 260 1 0 182
1 261 2 1 183 182
1 262 2 1 184 183
1 263 1 0 183
1 264 1 0 184
1 265 1 0 140
1 266 1 0 141
1 267 2 1 140 141
1 268 1 0 143
1 269 2 1 140 143
1 270 2 1 141 143
1 271 1 0 145
1 272 2 1 140 145
1 273 2 1 141 145
1 274 2 1 143 145
1 275 1 0 147
1 276 2 1 140 147
1 277 2 1 141 147
1 278 2 1 143 147
1 279 2 1 145 147
1 280 2 1 139 142
1 281 1 0 142
1 282 2 1 139 144
1 283 1 0 144
1 284 2 1 142 144
1 285 2 1 139 146
1 286 1 0 146
1 287 2 1 142 146
1 288 2 1 144 146
1 289 2 1 139 148
1 290 1 0 148
1 291 2 1 142 148
1 292 2 1 144 148
1 293 2 1 146 148
1 294 2 1 179 161
1 295 2 1 179 165
1 296 2 1 161 165
1 297 1 0 165
1 298 2 1 179 167
1 299 2 1 161 167
1 300 1 0 167
1 301 2 1 165 167
1 302 2 1 179 169
1 303 2 1 161 169
1 304 1 0 169
1 305 2 1 165 169
1 306 2 1 167 169
1 307 2 1 159 162
1 308 2 1 159 163
1 309 2 1 162 163
1 310 2 1 159 168
1 311 2 1 162 168
1 312 2 1 163 168
1 313 1 0 168
1 314 2 1 159 170
1 315 2 1 162 170
1 316 2 1 163 170
1 317 1 0 170
1 318 2 1 168 170
1 319 2 1 160 164
1 320 2 1 180 164
1 321 2 1 160 166
1 322 2 1 180 166
1 323 2 1 164 166
1 324 2 1 160 171
1 325 2 1 180 171
1 326 2 1 164 171
1 327 2 1 166 171
1 328 1 0 171
1 329 2 1 160 180
1 330 2 1 178 181
1 331 2 1 178 182
1 332 2 1 181 182
1 333 2 1 178 183
1 334 2 1 181 183
1 335 2 1 182 183
1 336 2 1 178 184
1 337 2 1 181 184
1 338 2 1 182 184
1 339 2 1 183 184
1 340 1 0 200
1 341 1 0 218
1 342 1 0 223
1 343 1 0 236
1 344 1 0 250
5 345 627 10 5 340 341 342 343 344 266 280 308 319 331 65 155 129 78 200 57 65 129 78 200
1 346 1 1 345
1 347 1 0 346
1 348 1 0 190
1 349 1 0 211
1 350 1 0 228
1 351 1 0 246
1 352 1 0 255
5 353 619 10 5 348 349 350 351 352 267 281 309 320 332 57 155 129 78 200 57 65 129 78 200
1 354 1 1 353
1 355 1 0 354
1 356 1 0 201
1 357 1 0 219
1 358 1 0 224
1 359 1 0 237
1 360 1 0 251
5 361 627 10 5 356 357 358 359 360 268 282 295 321 333 65 155 129 78 200 57 65 155 78 200
1 362 1 1 361
1 347 1 0 362
1 363 1 0 191
1 364 1 0 212
1 365 1 0 229
1 366 1 0 247
1 367 1 0 256
5 368 619 10 5 363 364 365 366 367 269 283 296 322 334 57 155 129 78 200 57 65 155 78 200
1 369 1 1 368
1 355 1 0 369
1 370 1 0 202
1 371 1 0 220
1 372 1 0 225
1 373 1 0 238
1 374 1 0 252
5 375 627 10 5 370 371 372 373 374 271 285 298 310 336 65 155 129 78 200 57 65 155 129 200
1 376 1 1 375
1 347 1 0 376
1 377 1 0 192
1 378 1 0 213
1 379 1 0 230
1 380 1 0 248
1 381 1 0 257
5 382 619 10 5 377 378 379 380 381 272 286 299 311 337 57 155 129 78 200 57 65 155 129 200
1 383 1 1 382
1 355 1 0 383
1 384 1 0 194
1 385 1 0 205
1 386 1 0 232
1 387 1 0 241
1 388 1 0 259
5 389 529 10 5 384 385 386 387 388 273 287 300 312 338 57 65 129 78 200 57 65 155 129 200
1 390 1 1 389
1 391 1 0 390
1 392 1 0 197
1 393 1 0 208
1 394 1 0 215
1 395 1 0 244
1 396 1 0 262
5 397 555 10 5 392 393 394 395 396 274 288 301 313 339 57 65 155 78 200 57 65 155 129 200
1 398 1 1 397
1 399 1 0 398
1 400 1 0 203
1 401 1 0 221
1 402 1 0 226
1 403 1 0 239
1 404 1 0 253
5 405 627 10 5 400 401 402 403 404 275 289 302 314 324 65 155 129 78 200 57 65 155 129 78
1 406 1 1 405
1 347 1 0 406
1 407 1 0 193
1 408 1 0 214
1 409 1 0 231
1 410 1 0 249
1 411 1 0 258
5 412 619 10 5 407 408 409 410 411 276 290 303 315 325 57 155 129 78 200 57 65 155 129 78
1 413 1 1 412
1 355 1 0 413
1 414 1 0 195
1 415 1 0 206
1 416 1 0 233
1 417 1 0 242
1 418 1 0 260
5 419 529 10 5 414 415 416 417 418 277 291 304 316 326 57 65 129 78 200 57 65 155 129 78
1 420 1 1 419
1 391 1 0 420
1 421 1 0 198
1 422 1 0 209
1 423 1 0 216
1 424 1 0 245
1 425 1 0 263
5 426 555 10 5 421 422 423 424 425 278 292 305 317 327 57 65 155 78 200 57 65 155 129 78
1 427 1 1 426
1 399 1 0 427
1 428 1 0 204
1 429 1 0 222
1 430 1 0 227
1 431 1 0 240
1 432 1 0 254
5 433 627 10 5 428 429 430 431 432 265 294 307 329 330 65 155 129 78 200 57 155 129 78 200
1 434 1 1 433
1 347 1 0 434
1 435 1 0 196
1 436 1 0 207
1 437 1 0 234
1 438 1 0 243
1 439 1 0 261
5 440 529 10 5 435 436 437 438 439 270 284 297 323 335 57 65 129 78 200 57 65 155 78 200
1 441 1 1 440
1 391 1 0 441
1 442 1 0 199
1 443 1 0 210
1 444 1 0 217
1 445 1 0 235
1 446 1 0 264
5 447 606 10 5 442 443 444 445 446 279 293 306 318 328 57 65 155 129 200 57 65 155 129 78
1 448 1 1 447
1 449 1 0 448
1 391 1 1 347
1 391 1 1 355
1 399 1 1 347
1 399 1 1 355
1 449 1 1 347
1 449 1 1 355
1 449 1 1 391
1 449 1 1 399
1 450 1 1 347
1 450 1 1 355
1 450 1 1 391
1 450 1 1 399
1 355 1 1 347
1 399 1 1 391
1 450 1 1 449
1 451 2 1 355 140
1 451 2 1 391 141
1 451 2 1 399 143
1 451 2 1 449 145
1 451 2 1 450 147
1 452 2 1 347 139
1 452 2 1 391 142
1 452 2 1 399 144
1 452 2 1 449 146
1 452 2 1 450 148
1 453 2 1 355 161
1 453 2 1 399 165
1 453 2 1 449 167
1 453 2 1 450 169
1 453 2 1 347 179
1 454 2 1 347 159
1 454 2 1 355 162
1 454 2 1 391 163
1 454 2 1 449 168
1 454 2 1 450 170
1 455 2 1 347 160
1 455 2 1 391 164
1 455 2 1 399 166
1 455 2 1 450 171
1 455 2 1 355 180
1 456 2 1 347 178
1 456 2 1 355 181
1 456 2 1 391 182
1 456 2 1 399 183
1 456 2 1 449 184
3 10 63 64 66 68 70 72 65 67 69 71 0 0
2 457 10 0 4 63 64 66 68 70 72 65 67 69 71
2 458 10 0 5 63 64 66 68 70 72 65 67 69 71
1 459 2 1 458 457
1 1 1 1 459
6 0 6 6 451 452 453 454 455 456 57 65 155 129 78 200
0
10 pipe(1,2)
11 pipe(1,4)
12 pipe(2,3)
13 pipe(2,4)
14 pipe(3,4)
15 pipe(3,5)
22 swap(pipe(1,2),pipe(1,2))
23 swap(pipe(1,4),pipe(1,4))
24 swap(pipe(2,3),pipe(2,3))
25 swap(pipe(2,4),pipe(2,4))
26 swap(pipe(3,4),pipe(3,4))
27 swap(pipe(3,5),pipe(3,5))
28 swap(pipe(1,2),pipe(2,1))
29 swap(pipe(1,4),pipe(4,1))
30 swap(pipe(2,3),pipe(3,2))
31 swap(pipe(2,4),pipe(4,2))
32 swap(pipe(3,4),pipe(4,3))
33 swap(pipe(3,5),pipe(5,3))
34 symm_pipe(1,2)
35 symm_pipe(1,4)
36 symm_pipe(2,3)
37 symm_pipe(2,4)
38 symm_pipe(3,4)
39 symm_pipe(3,5)
40 symm_pipe(2,1)
41 symm_pipe(4,1)
42 symm_pipe(3,2)
43 symm_pipe(4,2)
44 symm_pipe(4,3)
45 symm_pipe(5,3)
46 less_ico(pipe(1,2),pipe(2,3))
47 less_ico(pipe(1,4),pipe(2,3))
48 less_ico(pipe(1,2),pipe(2,4))
49 less_ico(pipe(1,4),pipe(2,4))
50 less_ico(pipe(1,2),pipe(3,4))
51 less_ico(pipe(1,4),pipe(3,4))
52 less_ico(pipe(2,3),pipe(3,4))
53 less_ico(pipe(2,4),pipe(3,4))
54 less_ico(pipe(1,2),pipe(3,5))
55 less_ico(pipe(1,4),pipe(3,5))
56 less_ico(pipe(2,3),pipe(3,5))
57 less_ico(pipe(2,4),pipe(3,5))
58 less_ico(pipe(1,2),pipe(1,4))
59 less_ico(pipe(2,3),pipe(2,4))
60 less_ico(pipe(3,4),pipe(3,5))
9 tank(1)
3 valves_per_pipe(1)
61 drop(2,1)
62 drop(4,1)
2 valves_number(4)
63 valve(1,2)
64 valve(1,4)
66 valve(2,3)
68 valve(2,4)
70 valve(3,4)
72 valve(3,5)
65 valve(3,2)
67 valve(4,2)
69 valve(4,3)
71 valve(5,3)
73 broken(pipe(1,2),pipe(1,2))
74 broken(pipe(1,4),pipe(1,4))
75 broken(pipe(2,3),pipe(2,3))
76 broken(pipe(2,4),pipe(2,4))
77 broken(pipe(3,4),pipe(3,4))
78 broken(pipe(3,5),pipe(3,5))
89 broken(pipe(1,2),pipe(2,3))
90 broken(pipe(1,2),pipe(2,4))
91 broken(pipe(1,4),pipe(2,4))
92 broken(pipe(1,4),pipe(3,4))
93 broken(pipe(2,3),pipe(2,4))
94 broken(pipe(2,3),pipe(1,2))
95 broken(pipe(2,3),pipe(3,4))
96 broken(pipe(2,3),pipe(3,5))
97 broken(pipe(2,4),pipe(2,3))
98 broken(pipe(2,4),pipe(1,2))
99 broken(pipe(2,4),pipe(1,4))
100 broken(pipe(2,4),pipe(3,4))
101 broken(pipe(3,4),pipe(3,5))
102 broken(pipe(3,4),pipe(2,3))
103 broken(pipe(3,4),pipe(1,4))
104 broken(pipe(3,4),pipe(2,4))
105 broken(pipe(3,5),pipe(3,4))
106 broken(pipe(3,5),pipe(2,3))
118 broken(pipe(1,2),pipe(3,4))
119 broken(pipe(1,2),pipe(3,5))
120 broken(pipe(1,2),pipe(1,4))
121 broken(pipe(1,4),pipe(2,3))
122 broken(pipe(1,4),pipe(1,2))
123 broken(pipe(1,4),pipe(3,5))
124 broken(pipe(2,3),pipe(1,4))
125 broken(pipe(2,4),pipe(3,5))
126 broken(pipe(3,4),pipe(1,2))
127 broken(pipe(3,5),pipe(1,4))
128 broken(pipe(3,5),pipe(2,4))
129 broken(pipe(3,5),pipe(1,2))
79 extend(pipe(1,2),2)
80 extend(pipe(1,4),4)
81 extend(pipe(2,3),2)
82 extend(pipe(2,3),3)
83 extend(pipe(2,4),2)
84 extend(pipe(2,4),4)
85 extend(pipe(3,4),3)
86 extend(pipe(3,4),4)
87 extend(pipe(3,5),3)
88 extend(pipe(3,5),5)
107 extend(pipe(1,2),3)
108 extend(pipe(1,2),4)
109 extend(pipe(1,4),2)
110 extend(pipe(1,4),3)
111 extend(pipe(2,3),4)
112 extend(pipe(2,3),5)
113 extend(pipe(2,4),3)
114 extend(pipe(3,4),5)
115 extend(pipe(3,4),2)
116 extend(pipe(3,5),4)
117 extend(pipe(3,5),2)
130 extend(pipe(1,2),5)
131 extend(pipe(1,4),5)
132 extend(pipe(2,4),5)
133 reached(pipe(1,2),1)
134 reached(pipe(1,4),1)
135 reached(pipe(2,3),1)
136 reached(pipe(2,4),1)
137 reached(pipe(3,4),1)
138 reached(pipe(3,5),1)
149 reached(pipe(1,2),4)
150 reached(pipe(1,4),2)
151 reached(pipe(2,3),2)
152 reached(pipe(2,3),4)
153 reached(pipe(2,4),2)
154 reached(pipe(2,4),4)
155 reached(pipe(3,4),2)
156 reached(pipe(3,4),4)
157 reached(pipe(3,5),2)
158 reached(pipe(3,5),4)
172 reached(pipe(1,2),3)
173 reached(pipe(1,4),3)
174 reached(pipe(2,3),3)
175 reached(pipe(2,4),3)
176 reached(pipe(3,4),3)
177 reached(pipe(3,5),3)
185 reached(pipe(1,2),5)
186 reached(pipe(1,4),5)
187 reached(pipe(2,3),5)
188 reached(pipe(2,4),5)
189 reached(pipe(3,4),5)
139 deliver(pipe(1,2),pipe(1,4))
140 deliver(pipe(1,4),pipe(1,2))
141 deliver(pipe(2,3),pipe(1,2))
142 deliver(pipe(2,3),pipe(1,4))
143 deliver(pipe(2,4),pipe(1,2))
144 deliver(pipe(2,4),pipe(1,4))
145 deliver(pipe(3,4),pipe(1,2))
146 deliver(pipe(3,4),pipe(1,4))
147 deliver(pipe(3,5),pipe(1,2))
148 deliver(pipe(3,5),pipe(1,4))
159 deliver(pipe(1,2),pipe(2,4))
160 deliver(pipe(1,2),pipe(3,4))
161 deliver(pipe(1,4),pipe(2,3))
162 deliver(pipe(1,4),pipe(2,4))
163 deliver(pipe(2,3),pipe(2,4))
164 deliver(pipe(2,3),pipe(3,4))
165 deliver(pipe(2,4),pipe(2,3))
166 deliver(pipe(2,4),pipe(3,4))
167 deliver(pipe(3,4),pipe(2,3))
168 deliver(pipe(3,4),pipe(2,4))
169 deliver(pipe(3,5),pipe(2,3))
170 deliver(pipe(3,5),pipe(2,4))
171 deliver(pipe(3,5),pipe(3,4))
178 deliver(pipe(1,2),pipe(3,5))
179 deliver(pipe(1,2),pipe(2,3))
180 deliver(pipe(1,4),pipe(3,4))
181 deliver(pipe(1,4),pipe(3,5))
182 deliver(pipe(2,3),pipe(3,5))
183 deliver(pipe(2,4),pipe(3,5))
184 deliver(pipe(3,4),pipe(3,5))
16 dem(1,2,57)
17 dem(1,4,65)
18 dem(2,3,155)
19 dem(2,4,129)
20 dem(3,4,78)
21 dem(3,5,200)
190 compare(pipe(1,4),pipe(2,3),pipe(1,2),-57)
191 compare(pipe(1,4),pipe(2,4),pipe(1,2),-57)
192 compare(pipe(1,4),pipe(3,4),pipe(1,2),-57)
193 compare(pipe(1,4),pipe(3,5),pipe(1,2),-57)
194 compare(pipe(2,3),pipe(3,4),pipe(1,2),-57)
195 compare(pipe(2,3),pipe(3,5),pipe(1,2),-57)
196 compare(pipe(2,3),pipe(2,4),pipe(1,2),-57)
197 compare(pipe(2,4),pipe(3,4),pipe(1,2),-57)
198 compare(pipe(2,4),pipe(3,5),pipe(1,2),-57)
199 compare(pipe(3,4),pipe(3,5),pipe(1,2),-57)
200 compare(pipe(1,2),pipe(2,3),pipe(1,4),-65)
201 compare(pipe(1,2),pipe(2,4),pipe(1,4),-65)
202 compare(pipe(1,2),pipe(3,4),pipe(1,4),-65)
203 compare(pipe(1,2),pipe(3,5),pipe(1,4),-65)
204 compare(pipe(1,2),pipe(1,4),pipe(1,4),-65)
205 compare(pipe(2,3),pipe(3,4),pipe(1,4),-65)
206 compare(pipe(2,3),pipe(3,5),pipe(1,4),-65)
207 compare(pipe(2,3),pipe(2,4),pipe(1,4),-65)
208 compare(pipe(2,4),pipe(3,4),pipe(1,4),-65)
209 compare(pipe(2,4),pipe(3,5),pipe(1,4),-65)
210 compare(pipe(3,4),pipe(3,5),pipe(1,4),-65)
211 compare(pipe(1,4),pipe(2,3),pipe(2,3),-155)
212 compare(pipe(1,4),pipe(2,4),pipe(2,3),-155)
213 compare(pipe(1,4),pipe(3,4),pipe(2,3),-155)
214 compare(pipe(1,4),pipe(3,5),pipe(2,3),-155)
215 compare(pipe(2,4),pipe(3,4),pipe(2,3),-155)
216 compare(pipe(2,4),pipe(3,5),pipe(2,3),-155)
217 compare(pipe(3,4),pipe(3,5),pipe(2,3),-155)
218 compare(pipe(1,2),pipe(2,3),pipe(2,3),-155)
219 compare(pipe(1,2),pipe(2,4),pipe(2,3),-155)
220 compare(pipe(1,2),pipe(3,4),pipe(2,3),-155)
221 compare(pipe(1,2),pipe(3,5),pipe(2,3),-155)
222 compare(pipe(1,2),pipe(1,4),pipe(2,3),-155)
223 compare(pipe(1,2),pipe(2,3),pipe(2,4),-129)
224 compare(pipe(1,2),pipe(2,4),pipe(2,4),-129)
225 compare(pipe(1,2),pipe(3,4),pipe(2,4),-129)
226 compare(pipe(1,2),pipe(3,5),pipe(2,4),-129)
227 compare(pipe(1,2),pipe(1,4),pipe(2,4),-129)
228 compare(pipe(1,4),pipe(2,3),pipe(2,4),-129)
229 compare(pipe(1,4),pipe(2,4),pipe(2,4),-129)
230 compare(pipe(1,4),pipe(3,4),pipe(2,4),-129)
231 compare(pipe(1,4),pipe(3,5),pipe(2,4),-129)
232 compare(pipe(2,3),pipe(3,4),pipe(2,4),-129)
233 compare(pipe(2,3),pipe(3,5),pipe(2,4),-129)
234 compare(pipe(2,3),pipe(2,4),pipe(2,4),-129)
235 compare(pipe(3,4),pipe(3,5),pipe(2,4),-129)
236 compare(pipe(1,2),pipe(2,3),pipe(3,4),-78)
237 compare(pipe(1,2),pipe(2,4),pipe(3,4),-78)
238 compare(pipe(1,2),pipe(3,4),pipe(3,4),-78)
239 compare(pipe(1,2),pipe(3,5),pipe(3,4),-78)
240 compare(pipe(1,2),pipe(1,4),pipe(3,4),-78)
241 compare(pipe(2,3),pipe(3,4),pipe(3,4),-78)
242 compare(pipe(2,3),pipe(3,5),pipe(3,4),-78)
243 compare(pipe(2,3),pipe(2,4),pipe(3,4),-78)
244 compare(pipe(2,4),pipe(3,4),pipe(3,4),-78)
245 compare(pipe(2,4),pipe(3,5),pipe(3,4),-78)
246 compare(pipe(1,4),pipe(2,3),pipe(3,4),-78)
247 compare(pipe(1,4),pipe(2,4),pipe(3,4),-78)
248 compare(pipe(1,4),pipe(3,4),pipe(3,4),-78)
249 compare(pipe(1,4),pipe(3,5),pipe(3,4),-78)
250 compare(pipe(1,2),pipe(2,3),pipe(3,5),-200)
251 compare(pipe(1,2),pipe(2,4),pipe(3,5),-200)
252 compare(pipe(1,2),pipe(3,4),pipe(3,5),-200)
253 compare(pipe(1,2),pipe(3,5),pipe(3,5),-200)
254 compare(pipe(1,2),pipe(1,4),pipe(3,5),-200)
255 compare(pipe(1,4),pipe(2,3),pipe(3,5),-200)
256 compare(pipe(1,4),pipe(2,4),pipe(3,5),-200)
257 compare(pipe(1,4),pipe(3,4),pipe(3,5),-200)
258 compare(pipe(1,4),pipe(3,5),pipe(3,5),-200)
259 compare(pipe(2,3),pipe(3,4),pipe(3,5),-200)
260 compare(pipe(2,3),pipe(3,5),pipe(3,5),-200)
261 compare(pipe(2,3),pipe(2,4),pipe(3,5),-200)
262 compare(pipe(2,4),pipe(3,4),pipe(3,5),-200)
263 compare(pipe(2,4),pipe(3,5),pipe(3,5),-200)
264 compare(pipe(3,4),pipe(3,5),pipe(3,5),-200)
265 compare(pipe(1,2),pipe(1,4),pipe(1,2),57)
266 compare(pipe(1,2),pipe(2,3),pipe(1,2),57)
267 compare(pipe(1,4),pipe(2,3),pipe(1,2),57)
268 compare(pipe(1,2),pipe(2,4),pipe(1,2),57)
269 compare(pipe(1,4),pipe(2,4),pipe(1,2),57)
270 compare(pipe(2,3),pipe(2,4),pipe(1,2),57)
271 compare(pipe(1,2),pipe(3,4),pipe(1,2),57)
272 compare(pipe(1,4),pipe(3,4),pipe(1,2),57)
273 compare(pipe(2,3),pipe(3,4),pipe(1,2),57)
274 compare(pipe(2,4),pipe(3,4),pipe(1,2),57)
275 compare(pipe(1,2),pipe(3,5),pipe(1,2),57)
276 compare(pipe(1,4),pipe(3,5),pipe(1,2),57)
277 compare(pipe(2,3),pipe(3,5),pipe(1,2),57)
278 compare(pipe(2,4),pipe(3,5),pipe(1,2),57)
279 compare(pipe(3,4),pipe(3,5),pipe(1,2),57)
280 compare(pipe(1,2),pipe(2,3),pipe(1,4),65)
281 compare(pipe(1,4),pipe(2,3),pipe(1,4),65)
282 compare(pipe(1,2),pipe(2,4),pipe(1,4),65)
283 compare(pipe(1,4),pipe(2,4),pipe(1,4),65)
284 compare(pipe(2,3),pipe(2,4),pipe(1,4),65)
285 compare(pipe(1,2),pipe(3,4),pipe(1,4),65)
286 compare(pipe(1,4),pipe(3,4),pipe(1,4),65)
287 compare(pipe(2,3),pipe(3,4),pipe(1,4),65)
288 compare(pipe(2,4),pipe(3,4),pipe(1,4),65)
289 compare(pipe(1,2),pipe(3,5),pipe(1,4),65)
290 compare(pipe(1,4),pipe(3,5),pipe(1,4),65)
291 compare(pipe(2,3),pipe(3,5),pipe(1,4),65)
292 compare(pipe(2,4),pipe(3,5),pipe(1,4),65)
293 compare(pipe(3,4),pipe(3,5),pipe(1,4),65)
294 compare(pipe(1,2),pipe(1,4),pipe(2,3),155)
295 compare(pipe(1,2),pipe(2,4),pipe(2,3),155)
296 compare(pipe(1,4),pipe(2,4),pipe(2,3),155)
297 compare(pipe(2,3),pipe(2,4),pipe(2,3),155)
298 compare(pipe(1,2),pipe(3,4),pipe(2,3),155)
299 compare(pipe(1,4),pipe(3,4),pipe(2,3),155)
300 compare(pipe(2,3),pipe(3,4),pipe(2,3),155)
301 compare(pipe(2,4),pipe(3,4),pipe(2,3),155)
302 compare(pipe(1,2),pipe(3,5),pipe(2,3),155)
303 compare(pipe(1,4),pipe(3,5),pipe(2,3),155)
304 compare(pipe(2,3),pipe(3,5),pipe(2,3),155)
305 compare(pipe(2,4),pipe(3,5),pipe(2,3),155)
306 compare(pipe(3,4),pipe(3,5),pipe(2,3),155)
307 compare(pipe(1,2),pipe(1,4),pipe(2,4),129)
308 compare(pipe(1,2),pipe(2,3),pipe(2,4),129)
309 compare(pipe(1,4),pipe(2,3),pipe(2,4),129)
310 compare(pipe(1,2),pipe(3,4),pipe(2,4),129)
311 compare(pipe(1,4),pipe(3,4),pipe(2,4),129)
312 compare(pipe(2,3),pipe(3,4),pipe(2,4),129)
313 compare(pipe(2,4),pipe(3,4),pipe(2,4),129)
314 compare(pipe(1,2),pipe(3,5),pipe(2,4),129)
315 compare(pipe(1,4),pipe(3,5),pipe(2,4),129)
316 compare(pipe(2,3),pipe(3,5),pipe(2,4),129)
317 compare(pipe(2,4),pipe(3,5),pipe(2,4),129)
318 compare(pipe(3,4),pipe(3,5),pipe(2,4),129)
319 compare(pipe(1,2),pipe(2,3),pipe(3,4),78)
320 compare(pipe(1,4),pipe(2,3),pipe(3,4),78)
321 compare(pipe(1,2),pipe(2,4),pipe(3,4),78)
322 compare(pipe(1,4),pipe(2,4),pipe(3,4),78)
323 compare(pipe(2,3),pipe(2,4),pipe(3,4),78)
324 compare(pipe(1,2),pipe(3,5),pipe(3,4),78)
325 compare(pipe(1,4),pipe(3,5),pipe(3,4),78)
326 compare(pipe(2,3),pipe(3,5),pipe(3,4),78)
327 compare(pipe(2,4),pipe(3,5),pipe(3,4),78)
328 compare(pipe(3,4),pipe(3,5),pipe(3,4),78)
329 compare(pipe(1,2),pipe(1,4),pipe(3,4),78)
330 compare(pipe(1,2),pipe(1,4),pipe(3,5),200)
331 compare(pipe(1,2),pipe(2,3),pipe(3,5),200)
332 compare(pipe(1,4),pipe(2,3),pipe(3,5),200)
333 compare(pipe(1,2),pipe(2,4),pipe(3,5),200)
334 compare(pipe(1,4),pipe(2,4),pipe(3,5),200)
335 compare(pipe(2,3),pipe(2,4),pipe(3,5),200)
336 compare(pipe(1,2),pipe(3,4),pipe(3,5),200)
337 compare(pipe(1,4),pipe(3,4),pipe(3,5),200)
338 compare(pipe(2,3),pipe(3,4),pipe(3,5),200)
339 compare(pipe(2,4),pipe(3,4),pipe(3,5),200)
347 lower(pipe(1,2))
355 lower(pipe(1,4))
391 lower(pipe(2,3))
399 lower(pipe(2,4))
449 lower(pipe(3,4))
450 lower(pipe(3,5))
451 worst_deliv_dem(1,2,57)
452 worst_deliv_dem(1,4,65)
453 worst_deliv_dem(2,3,155)
454 worst_deliv_dem(2,4,129)
455 worst_deliv_dem(3,4,78)
456 worst_deliv_dem(3,5,200)
4 junction(1)
5 junction(2)
6 junction(3)
7 junction(4)
8 junction(5)
0
B+
0
B-
1
0
1
"""
output = """
COST 343@1
"""
|
Opentrons/labware | refs/heads/master | api/tests/opentrons/config/test_reset.py | 2 | from collections import namedtuple
from unittest.mock import patch, MagicMock
import pytest
from opentrons.config import reset
@pytest.fixture
def mock_reset_boot_scripts():
with patch("opentrons.config.reset.reset_boot_scripts") as m:
yield m
@pytest.fixture
def mock_reset_labware_calibration():
with patch("opentrons.config.reset.reset_labware_calibration") as m:
yield m
@pytest.fixture
def mock_reset_tip_probe():
with patch("opentrons.config.reset.reset_tip_probe") as m:
yield m
@pytest.fixture()
def mock_db():
with patch("opentrons.config.reset.db") as m:
yield m
@pytest.fixture()
def mock_labware():
with patch("opentrons.config.reset.delete") as m:
yield m
@pytest.fixture()
def mock_robot_config():
with patch("opentrons.config.reset.rc") as m:
yield m
def test_reset_empty_set(mock_reset_boot_scripts,
mock_reset_labware_calibration,
mock_reset_tip_probe):
reset.reset(set())
mock_reset_labware_calibration.assert_not_called()
mock_reset_boot_scripts.assert_not_called()
mock_reset_tip_probe.assert_not_called()
def test_reset_all_set(mock_reset_boot_scripts,
mock_reset_labware_calibration,
mock_reset_tip_probe):
reset.reset({reset.ResetOptionId.boot_scripts,
reset.ResetOptionId.tip_probe,
reset.ResetOptionId.labware_calibration})
mock_reset_labware_calibration.assert_called_once()
mock_reset_boot_scripts.assert_called_once()
mock_reset_tip_probe.assert_called_once()
def test_labware_calibration_reset(mock_db, mock_labware):
reset.reset_labware_calibration()
# Check side effecting function calls
mock_db.reset.assert_called_once()
mock_labware.clear_calibrations.assert_called_once()
def test_tip_probe_reset(mock_robot_config):
# Create a named tuple of the robot_config fields we care about
FakeRobotConfig = namedtuple("FakeRobotConfig",
["instrument_offset", "tip_length"])
# Instantiate with None and a mock.
obj = FakeRobotConfig(None, MagicMock())
# Mock out build_fallback_instrument_offset
mock_robot_config.build_fallback_instrument_offset.return_value = 100
# Mock out load to return our fake robot config
mock_robot_config.load.return_value = obj
# Call the test function
reset.reset_tip_probe()
# Check the side effects
obj.tip_length.clear.assert_called_once_with()
mock_robot_config.save_robot_settings.assert_called_once_with(
FakeRobotConfig(100,
obj.tip_length))
|
v-iam/azure-sdk-for-python | refs/heads/master | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/network_interface_dns_settings.py | 9 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NetworkInterfaceDnsSettings(Model):
"""DNS settings of a network interface.
:param dns_servers: List of DNS servers IP addresses. Use
'AzureProvidedDNS' to switch to azure provided DNS resolution.
'AzureProvidedDNS' value cannot be combined with other IPs, it must be the
only value in dnsServers collection.
:type dns_servers: list of str
:param applied_dns_servers: If the VM that uses this NIC is part of an
Availability Set, then this list will have the union of all DNS servers
from all NICs that are part of the Availability Set. This property is what
is configured on each of those VMs.
:type applied_dns_servers: list of str
:param internal_dns_name_label: Relative DNS name for this NIC used for
internal communications between VMs in the same virtual network.
:type internal_dns_name_label: str
:param internal_fqdn: Fully qualified DNS name supporting internal
communications between VMs in the same virtual network.
:type internal_fqdn: str
:param internal_domain_name_suffix: Even if internalDnsNameLabel is not
specified, a DNS entry is created for the primary NIC of the VM. This DNS
name can be constructed by concatenating the VM name with the value of
internalDomainNameSuffix.
:type internal_domain_name_suffix: str
"""
_attribute_map = {
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
'applied_dns_servers': {'key': 'appliedDnsServers', 'type': '[str]'},
'internal_dns_name_label': {'key': 'internalDnsNameLabel', 'type': 'str'},
'internal_fqdn': {'key': 'internalFqdn', 'type': 'str'},
'internal_domain_name_suffix': {'key': 'internalDomainNameSuffix', 'type': 'str'},
}
def __init__(self, dns_servers=None, applied_dns_servers=None, internal_dns_name_label=None, internal_fqdn=None, internal_domain_name_suffix=None):
self.dns_servers = dns_servers
self.applied_dns_servers = applied_dns_servers
self.internal_dns_name_label = internal_dns_name_label
self.internal_fqdn = internal_fqdn
self.internal_domain_name_suffix = internal_domain_name_suffix
|
harlequin/sickbeard | refs/heads/master | lib/dateutil/zoneinfo/__init__.py | 265 | """
Copyright (c) 2003-2005 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
from dateutil.tz import tzfile
from tarfile import TarFile
import os
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
__all__ = ["setcachesize", "gettz", "rebuild"]
CACHE = []
CACHESIZE = 10
class tzfile(tzfile):
def __reduce__(self):
return (gettz, (self._filename,))
def getzoneinfofile():
filenames = os.listdir(os.path.join(os.path.dirname(__file__)))
filenames.sort()
filenames.reverse()
for entry in filenames:
if entry.startswith("zoneinfo") and ".tar." in entry:
return os.path.join(os.path.dirname(__file__), entry)
return None
ZONEINFOFILE = getzoneinfofile()
del getzoneinfofile
def setcachesize(size):
global CACHESIZE, CACHE
CACHESIZE = size
del CACHE[size:]
def gettz(name):
tzinfo = None
if ZONEINFOFILE:
for cachedname, tzinfo in CACHE:
if cachedname == name:
break
else:
tf = TarFile.open(ZONEINFOFILE)
try:
zonefile = tf.extractfile(name)
except KeyError:
tzinfo = None
else:
tzinfo = tzfile(zonefile)
tf.close()
CACHE.insert(0, (name, tzinfo))
del CACHE[CACHESIZE:]
return tzinfo
def rebuild(filename, tag=None, format="gz"):
import tempfile, shutil
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
if tag: tag = "-"+tag
targetname = "zoneinfo%s.tar.%s" % (tag, format)
try:
tf = TarFile.open(filename)
for name in tf.getnames():
if not (name.endswith(".sh") or
name.endswith(".tab") or
name == "leapseconds"):
tf.extract(name, tmpdir)
filepath = os.path.join(tmpdir, name)
os.system("zic -d %s %s" % (zonedir, filepath))
tf.close()
target = os.path.join(moduledir, targetname)
for entry in os.listdir(moduledir):
if entry.startswith("zoneinfo") and ".tar." in entry:
os.unlink(os.path.join(moduledir, entry))
tf = TarFile.open(target, "w:%s" % format)
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
tf.close()
finally:
shutil.rmtree(tmpdir)
|
n0trax/ansible | refs/heads/devel | lib/ansible/modules/cloud/cloudstack/cs_zone.py | 49 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_zone
short_description: Manages zones on Apache CloudStack based clouds.
description:
- Create, update and remove zones.
version_added: "2.1"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the zone.
required: true
id:
description:
- uuid of the existing zone.
default: null
required: false
state:
description:
- State of the zone.
required: false
default: 'present'
choices: [ 'present', 'enabled', 'disabled', 'absent' ]
domain:
description:
- Domain the zone is related to.
- Zone is a public zone if not set.
required: false
default: null
network_domain:
description:
- Network domain for the zone.
required: false
default: null
network_type:
description:
- Network type of the zone.
required: false
default: basic
choices: [ 'basic', 'advanced' ]
dns1:
description:
- First DNS for the zone.
- Required if C(state=present)
required: false
default: null
dns2:
description:
- Second DNS for the zone.
required: false
default: null
internal_dns1:
description:
- First internal DNS for the zone.
- If not set C(dns1) will be used on C(state=present).
required: false
default: null
internal_dns2:
description:
- Second internal DNS for the zone.
required: false
default: null
dns1_ipv6:
description:
- First DNS for IPv6 for the zone.
required: false
default: null
dns2_ipv6:
description:
- Second DNS for IPv6 for the zone.
required: false
default: null
guest_cidr_address:
description:
- Guest CIDR address for the zone.
required: false
default: null
dhcp_provider:
description:
- DHCP provider for the Zone.
required: false
default: null
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure a zone is present
- local_action:
module: cs_zone
name: ch-zrh-ix-01
dns1: 8.8.8.8
dns2: 8.8.4.4
network_type: basic
# Ensure a zone is disabled
- local_action:
module: cs_zone
name: ch-zrh-ix-01
state: disabled
# Ensure a zone is enabled
- local_action:
module: cs_zone
name: ch-zrh-ix-01
state: enabled
# Ensure a zone is absent
- local_action:
module: cs_zone
name: ch-zrh-ix-01
state: absent
'''
RETURN = '''
---
id:
description: UUID of the zone.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the zone.
returned: success
type: string
sample: zone01
dns1:
description: First DNS for the zone.
returned: success
type: string
sample: 8.8.8.8
dns2:
description: Second DNS for the zone.
returned: success
type: string
sample: 8.8.4.4
internal_dns1:
description: First internal DNS for the zone.
returned: success
type: string
sample: 8.8.8.8
internal_dns2:
description: Second internal DNS for the zone.
returned: success
type: string
sample: 8.8.4.4
dns1_ipv6:
description: First IPv6 DNS for the zone.
returned: success
type: string
sample: "2001:4860:4860::8888"
dns2_ipv6:
description: Second IPv6 DNS for the zone.
returned: success
type: string
sample: "2001:4860:4860::8844"
allocation_state:
description: State of the zone.
returned: success
type: string
sample: Enabled
domain:
description: Domain the zone is related to.
returned: success
type: string
sample: ROOT
network_domain:
description: Network domain for the zone.
returned: success
type: string
sample: example.com
network_type:
description: Network type for the zone.
returned: success
type: string
sample: basic
local_storage_enabled:
description: Local storage offering enabled.
returned: success
type: bool
sample: false
securitygroups_enabled:
description: Security groups support is enabled.
returned: success
type: bool
sample: false
guest_cidr_address:
description: Guest CIDR address for the zone
returned: success
type: string
sample: 10.1.1.0/24
dhcp_provider:
description: DHCP provider for the zone
returned: success
type: string
sample: VirtualRouter
zone_token:
description: Zone token
returned: success
type: string
sample: ccb0a60c-79c8-3230-ab8b-8bdbe8c45bb7
tags:
description: List of resource tags associated with the zone.
returned: success
type: dict
sample: [ { "key": "foo", "value": "bar" } ]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackZone(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackZone, self).__init__(module)
self.returns = {
'dns1': 'dns1',
'dns2': 'dns2',
'internaldns1': 'internal_dns1',
'internaldns2': 'internal_dns2',
'ipv6dns1': 'dns1_ipv6',
'ipv6dns2': 'dns2_ipv6',
'domain': 'network_domain',
'networktype': 'network_type',
'securitygroupsenabled': 'securitygroups_enabled',
'localstorageenabled': 'local_storage_enabled',
'guestcidraddress': 'guest_cidr_address',
'dhcpprovider': 'dhcp_provider',
'allocationstate': 'allocation_state',
'zonetoken': 'zone_token',
}
self.zone = None
def _get_common_zone_args(self):
args = {
'name': self.module.params.get('name'),
'dns1': self.module.params.get('dns1'),
'dns2': self.module.params.get('dns2'),
'internaldns1': self.get_or_fallback('internal_dns1', 'dns1'),
'internaldns2': self.get_or_fallback('internal_dns2', 'dns2'),
'ipv6dns1': self.module.params.get('dns1_ipv6'),
'ipv6dns2': self.module.params.get('dns2_ipv6'),
'networktype': self.module.params.get('network_type'),
'domain': self.module.params.get('network_domain'),
'localstorageenabled': self.module.params.get('local_storage_enabled'),
'guestcidraddress': self.module.params.get('guest_cidr_address'),
'dhcpprovider': self.module.params.get('dhcp_provider'),
}
state = self.module.params.get('state')
if state in ['enabled', 'disabled']:
args['allocationstate'] = state.capitalize()
return args
def get_zone(self):
if not self.zone:
args = {}
uuid = self.module.params.get('id')
if uuid:
args['id'] = uuid
zones = self.query_api('listZones', **args)
if zones:
self.zone = zones['zone'][0]
return self.zone
args['name'] = self.module.params.get('name')
zones = self.query_api('listZones', **args)
if zones:
self.zone = zones['zone'][0]
return self.zone
def present_zone(self):
zone = self.get_zone()
if zone:
zone = self._update_zone()
else:
zone = self._create_zone()
return zone
def _create_zone(self):
required_params = [
'dns1',
]
self.module.fail_on_missing_params(required_params=required_params)
self.result['changed'] = True
args = self._get_common_zone_args()
args['domainid'] = self.get_domain(key='id')
args['securitygroupenabled'] = self.module.params.get('securitygroups_enabled')
zone = None
if not self.module.check_mode:
res = self.query_api('createZone', **args)
zone = res['zone']
return zone
def _update_zone(self):
zone = self.get_zone()
args = self._get_common_zone_args()
args['id'] = zone['id']
if self.has_changed(args, zone):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateZone', **args)
zone = res['zone']
return zone
def absent_zone(self):
zone = self.get_zone()
if zone:
self.result['changed'] = True
args = {
'id': zone['id']
}
if not self.module.check_mode:
self.query_api('deleteZone', **args)
return zone
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
id=dict(),
name=dict(required=True),
dns1=dict(),
dns2=dict(),
internal_dns1=dict(),
internal_dns2=dict(),
dns1_ipv6=dict(),
dns2_ipv6=dict(),
network_type=dict(default='basic', choices=['Basic', 'basic', 'Advanced', 'advanced']),
network_domain=dict(),
guest_cidr_address=dict(),
dhcp_provider=dict(),
local_storage_enabled=dict(type='bool'),
securitygroups_enabled=dict(type='bool'),
state=dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'),
domain=dict(),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_zone = AnsibleCloudStackZone(module)
state = module.params.get('state')
if state in ['absent']:
zone = acs_zone.absent_zone()
else:
zone = acs_zone.present_zone()
result = acs_zone.get_result(zone)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
Permutatrix/servo | refs/heads/master | etc/servo_gdb.py | 233 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
"""
A set of simple pretty printers for gdb to make debugging Servo a bit easier.
To load these, you need to add something like the following to your .gdbinit file:
python
import sys
sys.path.insert(0, '/home/<path to git checkout>/servo/src/etc')
import servo_gdb
servo_gdb.register_printers(None)
end
"""
import gdb
# Print Au in both raw value and CSS pixels
class AuPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
i32_type = gdb.lookup_type("i32")
au = self.val.cast(i32_type)
return "{0}px".format(au / 60.0)
# Print a U8 bitfield as binary
class BitFieldU8Printer:
def __init__(self, val):
self.val = val
def to_string(self):
u8_type = gdb.lookup_type("u8")
value = self.val.cast(u8_type)
return "[{0:#010b}]".format(int(value))
# Print a struct with fields as children
class ChildPrinter:
def __init__(self, val):
self.val = val
def children(self):
children = []
for f in self.val.type.fields():
children.append((f.name, self.val[f.name]))
return children
def to_string(self):
return None
# Allow a trusted node to be dereferenced in the debugger
class TrustedNodeAddressPrinter:
def __init__(self, val):
self.val = val
def children(self):
node_type = gdb.lookup_type("struct script::dom::node::Node").pointer()
value = self.val.cast(node_type)
return [('Node', value)]
def to_string(self):
return self.val.address
# Extract a node type ID from enum
class NodeTypeIdPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
u8_ptr_type = gdb.lookup_type("u8").pointer()
enum_0 = self.val.address.cast(u8_ptr_type).dereference()
enum_type = self.val.type.fields()[int(enum_0)].type
return str(enum_type).lstrip('struct ')
# Printer for std::Option<>
class OptionPrinter:
def __init__(self, val):
self.val = val
def is_some(self):
# Get size of discriminator
d_size = self.val.type.fields()[0].type.sizeof
if d_size > 0 and d_size <= 8:
# Read first byte to check if None or Some
ptr = self.val.address.cast(gdb.lookup_type("unsigned char").pointer())
discriminator = int(ptr.dereference())
return discriminator != 0
raise "unhandled discriminator size"
def children(self):
if self.is_some():
option_type = self.val.type
# Get total size and size of value
ptr = self.val.address.cast(gdb.lookup_type("unsigned char").pointer())
t_size = option_type.sizeof
value_type = option_type.fields()[1].type.fields()[1].type
v_size = value_type.sizeof
data_ptr = (ptr + t_size - v_size).cast(value_type.pointer()).dereference()
return [('Some', data_ptr)]
return [('None', None)]
def to_string(self):
return None
# Useful for debugging when type is unknown
class TestPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
return "[UNKNOWN - type = {0}]".format(str(self.val.type))
type_map = [
('struct Au', AuPrinter),
('FlowFlags', BitFieldU8Printer),
('IntrinsicWidths', ChildPrinter),
('PlacementInfo', ChildPrinter),
('TrustedNodeAddress', TrustedNodeAddressPrinter),
('NodeTypeId', NodeTypeIdPrinter),
('Option', OptionPrinter),
]
def lookup_servo_type(val):
val_type = str(val.type)
for (type_name, printer) in type_map:
if val_type == type_name or val_type.endswith("::" + type_name):
return printer(val)
return None
# return TestPrinter(val)
def register_printers(obj):
gdb.pretty_printers.append(lookup_servo_type)
|
xplv/qtile | refs/heads/develop | libqtile/xcursors.py | 6 | from logging import getLogger
# PyPy < 2.6 compaitibility
try:
from ._ffi_xcursors import ffi
except ImportError:
from .ffi_build import xcursors_ffi as ffi
# Stolen from samurai-x
# (Don't know where to put it, so I'll put it here)
# XCB cursors doesn't want to be themed, libxcursor
# would be better choice I think
# and we (indirectly) depend on it anyway...
class Cursors(dict):
def __init__(self, conn):
self.conn = conn
self.log = getLogger('qtile')
cursors = (
(b'X_cursor', 0),
(b'arrow', 2),
(b'based_arrow_down', 4),
(b'based_arrow_up', 6),
(b'boat', 8),
(b'bogosity', 10),
(b'bottom_left_corner', 12),
(b'bottom_right_corner', 14),
(b'bottom_side', 16),
(b'bottom_tee', 18),
(b'box_spiral', 20),
(b'center_ptr', 22),
(b'circle', 24),
(b'clock', 26),
(b'coffee_mug', 28),
(b'cross', 30),
(b'cross_reverse', 32),
(b'crosshair', 34),
(b'diamond_cross', 36),
(b'dot', 38),
(b'dotbox', 40),
(b'double_arrow', 42),
(b'draft_large', 44),
(b'draft_small', 46),
(b'draped_box', 48),
(b'exchange', 50),
(b'fleur', 52),
(b'gobbler', 54),
(b'gumby', 56),
(b'hand1', 58),
(b'hand2', 60),
(b'heart', 62),
(b'icon', 64),
(b'iron_cross', 66),
(b'left_ptr', 68),
(b'left_side', 70),
(b'left_tee', 72),
(b'leftbutton', 74),
(b'll_angle', 76),
(b'lr_angle', 78),
(b'man', 80),
(b'middlebutton', 82),
(b'mouse', 84),
(b'pencil', 86),
(b'pirate', 88),
(b'plus', 90),
(b'question_arrow', 92),
(b'right_ptr', 94),
(b'right_side', 96),
(b'right_tee', 98),
(b'rightbutton', 100),
(b'rtl_logo', 102),
(b'sailboat', 104),
(b'sb_down_arrow', 106),
(b'sb_h_double_arrow', 108),
(b'sb_left_arrow', 110),
(b'sb_right_arrow', 112),
(b'sb_up_arrow', 114),
(b'sb_v_double_arrow', 116),
(b'shuttle', 118),
(b'sizing', 120),
(b'spider', 122),
(b'spraycan', 124),
(b'star', 126),
(b'target', 128),
(b'tcross', 130),
(b'top_left_arrow', 132),
(b'top_left_corner', 134),
(b'top_right_corner', 136),
(b'top_side', 138),
(b'top_tee', 140),
(b'trek', 142),
(b'ul_angle', 144),
(b'umbrella', 146),
(b'ur_angle', 148),
(b'watch', 150),
(b'xterm', 152)
)
self.xcursor = self._setup_xcursor_binding()
for name, cursor_font in cursors:
self._new(name, cursor_font)
if self.xcursor:
self.xcursor.xcb_cursor_context_free(self._cursor_ctx[0])
def finalize(self):
self._cursor_ctx = None
def _setup_xcursor_binding(self):
try:
xcursor = ffi.dlopen('libxcb-cursor.so')
except OSError:
self.log.warning("xcb-cursor not found, fallback to font pointer")
return False
conn = self.conn.conn
screen_pointer = conn.get_screen_pointers()[0]
self._cursor_ctx = ffi.new('xcb_cursor_context_t **')
xcursor.xcb_cursor_context_new(conn._conn, screen_pointer,
self._cursor_ctx)
return xcursor
def get_xcursor(self, name):
"""
Get the cursor using xcb-util-cursor, so we support themed cursors
"""
cursor = self.xcursor.xcb_cursor_load_cursor(self._cursor_ctx[0], name)
return cursor
def get_font_cursor(self, name, cursor_font):
"""
Get the cursor from the font, used as a fallback if xcb-util-cursor
is not installed
"""
fid = self.conn.conn.generate_id()
self.conn.conn.core.OpenFont(fid, len("cursor"), "cursor")
cursor = self.conn.conn.generate_id()
self.conn.conn.core.CreateGlyphCursor(
cursor, fid, fid,
cursor_font, cursor_font + 1,
0, 0, 0,
65535, 65535, 65535
)
return cursor
def _new(self, name, cursor_font):
if self.xcursor:
cursor = self.get_xcursor(name)
else:
cursor = self.get_font_cursor(name, cursor_font)
self[name.decode()] = cursor
|
yuanagain/seniorthesis | refs/heads/master | venv/lib/python2.7/site-packages/numpy/core/tests/test_defchararray.py | 67 | from __future__ import division, absolute_import, print_function
import sys
import numpy as np
from numpy.core.multiarray import _vec_string
from numpy.compat import asbytes, asbytes_nested, sixu
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_array_equal
)
kw_unicode_true = {'unicode': True} # make 2to3 work properly
kw_unicode_false = {'unicode': False}
class TestBasic(TestCase):
def test_from_object_array(self):
A = np.array([['abc', 2],
['long ', '0123456789']], dtype='O')
B = np.char.array(A)
assert_equal(B.dtype.itemsize, 10)
assert_array_equal(B, asbytes_nested([['abc', '2'],
['long', '0123456789']]))
def test_from_object_array_unicode(self):
A = np.array([['abc', sixu('Sigma \u03a3')],
['long ', '0123456789']], dtype='O')
self.assertRaises(ValueError, np.char.array, (A,))
B = np.char.array(A, **kw_unicode_true)
assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
assert_array_equal(B, [['abc', sixu('Sigma \u03a3')],
['long', '0123456789']])
def test_from_string_array(self):
A = np.array(asbytes_nested([['abc', 'foo'],
['long ', '0123456789']]))
assert_equal(A.dtype.type, np.string_)
B = np.char.array(A)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
B[0, 0] = 'changed'
assert_(B[0, 0] != A[0, 0])
C = np.char.asarray(A)
assert_array_equal(C, A)
assert_equal(C.dtype, A.dtype)
C[0, 0] = 'changed again'
assert_(C[0, 0] != B[0, 0])
assert_(C[0, 0] == A[0, 0])
def test_from_unicode_array(self):
A = np.array([['abc', sixu('Sigma \u03a3')],
['long ', '0123456789']])
assert_equal(A.dtype.type, np.unicode_)
B = np.char.array(A)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
B = np.char.array(A, **kw_unicode_true)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
def fail():
np.char.array(A, **kw_unicode_false)
self.assertRaises(UnicodeEncodeError, fail)
def test_unicode_upconvert(self):
A = np.char.array(['abc'])
B = np.char.array([sixu('\u03a3')])
assert_(issubclass((A + B).dtype.type, np.unicode_))
def test_from_string(self):
A = np.char.array(asbytes('abc'))
assert_equal(len(A), 1)
assert_equal(len(A[0]), 3)
assert_(issubclass(A.dtype.type, np.string_))
def test_from_unicode(self):
A = np.char.array(sixu('\u03a3'))
assert_equal(len(A), 1)
assert_equal(len(A[0]), 1)
assert_equal(A.itemsize, 4)
assert_(issubclass(A.dtype.type, np.unicode_))
class TestVecString(TestCase):
def test_non_existent_method(self):
def fail():
_vec_string('a', np.string_, 'bogus')
self.assertRaises(AttributeError, fail)
def test_non_string_array(self):
def fail():
_vec_string(1, np.string_, 'strip')
self.assertRaises(TypeError, fail)
def test_invalid_args_tuple(self):
def fail():
_vec_string(['a'], np.string_, 'strip', 1)
self.assertRaises(TypeError, fail)
def test_invalid_type_descr(self):
def fail():
_vec_string(['a'], 'BOGUS', 'strip')
self.assertRaises(TypeError, fail)
def test_invalid_function_args(self):
def fail():
_vec_string(['a'], np.string_, 'strip', (1,))
self.assertRaises(TypeError, fail)
def test_invalid_result_type(self):
def fail():
_vec_string(['a'], np.integer, 'strip')
self.assertRaises(TypeError, fail)
def test_broadcast_error(self):
def fail():
_vec_string([['abc', 'def']], np.integer, 'find', (['a', 'd', 'j'],))
self.assertRaises(ValueError, fail)
class TestWhitespace(TestCase):
def setUp(self):
self.A = np.array([['abc ', '123 '],
['789 ', 'xyz ']]).view(np.chararray)
self.B = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
def test1(self):
assert_(np.all(self.A == self.B))
assert_(np.all(self.A >= self.B))
assert_(np.all(self.A <= self.B))
assert_(not np.any(self.A > self.B))
assert_(not np.any(self.A < self.B))
assert_(not np.any(self.A != self.B))
class TestChar(TestCase):
def setUp(self):
self.A = np.array('abc1', dtype='c').view(np.chararray)
def test_it(self):
assert_equal(self.A.shape, (4,))
assert_equal(self.A.upper()[:2].tobytes(), asbytes('AB'))
class TestComparisons(TestCase):
def setUp(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
self.B = np.array([['efg', '123 '],
['051', 'tuv']]).view(np.chararray)
def test_not_equal(self):
assert_array_equal((self.A != self.B), [[True, False], [True, True]])
def test_equal(self):
assert_array_equal((self.A == self.B), [[False, True], [False, False]])
def test_greater_equal(self):
assert_array_equal((self.A >= self.B), [[False, True], [True, True]])
def test_less_equal(self):
assert_array_equal((self.A <= self.B), [[True, True], [False, False]])
def test_greater(self):
assert_array_equal((self.A > self.B), [[False, False], [True, True]])
def test_less(self):
assert_array_equal((self.A < self.B), [[True, False], [False, False]])
class TestComparisonsMixed1(TestComparisons):
"""Ticket #1276"""
def setUp(self):
TestComparisons.setUp(self)
self.B = np.array([['efg', '123 '],
['051', 'tuv']], np.unicode_).view(np.chararray)
class TestComparisonsMixed2(TestComparisons):
"""Ticket #1276"""
def setUp(self):
TestComparisons.setUp(self)
self.A = np.array([['abc', '123'],
['789', 'xyz']], np.unicode_).view(np.chararray)
class TestInformation(TestCase):
def setUp(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
self.B = np.array([[sixu(' \u03a3 '), sixu('')],
[sixu('12345'), sixu('MixedCase')],
[sixu('123 \t 345 \0 '), sixu('UPPER')]]).view(np.chararray)
def test_len(self):
assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer))
assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]])
assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]])
def test_count(self):
assert_(issubclass(self.A.count('').dtype.type, np.integer))
assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]])
assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]])
# Python doesn't seem to like counting NULL characters
# assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]])
assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]])
assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]])
assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]])
# assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]])
def test_endswith(self):
assert_(issubclass(self.A.endswith('').dtype.type, np.bool_))
assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]])
assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]])
def fail():
self.A.endswith('3', 'fdjk')
self.assertRaises(TypeError, fail)
def test_find(self):
assert_(issubclass(self.A.find('a').dtype.type, np.integer))
assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]])
assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]])
assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]])
def test_index(self):
def fail():
self.A.index('a')
self.assertRaises(ValueError, fail)
assert_(np.char.index('abcba', 'b') == 1)
assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
def test_isalnum(self):
assert_(issubclass(self.A.isalnum().dtype.type, np.bool_))
assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]])
def test_isalpha(self):
assert_(issubclass(self.A.isalpha().dtype.type, np.bool_))
assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]])
def test_isdigit(self):
assert_(issubclass(self.A.isdigit().dtype.type, np.bool_))
assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]])
def test_islower(self):
assert_(issubclass(self.A.islower().dtype.type, np.bool_))
assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]])
def test_isspace(self):
assert_(issubclass(self.A.isspace().dtype.type, np.bool_))
assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]])
def test_istitle(self):
assert_(issubclass(self.A.istitle().dtype.type, np.bool_))
assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]])
def test_isupper(self):
assert_(issubclass(self.A.isupper().dtype.type, np.bool_))
assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]])
def test_rfind(self):
assert_(issubclass(self.A.rfind('a').dtype.type, np.integer))
assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]])
assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]])
assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]])
def test_rindex(self):
def fail():
self.A.rindex('a')
self.assertRaises(ValueError, fail)
assert_(np.char.rindex('abcba', 'b') == 3)
assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
def test_startswith(self):
assert_(issubclass(self.A.startswith('').dtype.type, np.bool_))
assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]])
assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]])
def fail():
self.A.startswith('3', 'fdjk')
self.assertRaises(TypeError, fail)
class TestMethods(TestCase):
def setUp(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']],
dtype='S').view(np.chararray)
self.B = np.array([[sixu(' \u03a3 '), sixu('')],
[sixu('12345'), sixu('MixedCase')],
[sixu('123 \t 345 \0 '), sixu('UPPER')]]).view(np.chararray)
def test_capitalize(self):
tgt = asbytes_nested([[' abc ', ''],
['12345', 'Mixedcase'],
['123 \t 345 \0 ', 'Upper']])
assert_(issubclass(self.A.capitalize().dtype.type, np.string_))
assert_array_equal(self.A.capitalize(), tgt)
tgt = [[sixu(' \u03c3 '), ''],
['12345', 'Mixedcase'],
['123 \t 345 \0 ', 'Upper']]
assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_))
assert_array_equal(self.B.capitalize(), tgt)
def test_center(self):
assert_(issubclass(self.A.center(10).dtype.type, np.string_))
C = self.A.center([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = self.A.center(20, asbytes('#'))
assert_(np.all(C.startswith(asbytes('#'))))
assert_(np.all(C.endswith(asbytes('#'))))
C = np.char.center(asbytes('FOO'), [[10, 20], [15, 8]])
tgt = asbytes_nested([[' FOO ', ' FOO '],
[' FOO ', ' FOO ']])
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, tgt)
def test_decode(self):
if sys.version_info[0] >= 3:
A = np.char.array([asbytes('\\u03a3')])
assert_(A.decode('unicode-escape')[0] == '\u03a3')
else:
A = np.char.array(['736563726574206d657373616765'])
assert_(A.decode('hex_codec')[0] == 'secret message')
def test_encode(self):
B = self.B.encode('unicode_escape')
assert_(B[0][0] == str(' \\u03a3 ').encode('latin1'))
def test_expandtabs(self):
T = self.A.expandtabs()
assert_(T[2, 0] == asbytes('123 345 \0'))
def test_join(self):
if sys.version_info[0] >= 3:
# NOTE: list(b'123') == [49, 50, 51]
# so that b','.join(b'123') results to an error on Py3
A0 = self.A.decode('ascii')
else:
A0 = self.A
A = np.char.join([',', '#'], A0)
if sys.version_info[0] >= 3:
assert_(issubclass(A.dtype.type, np.unicode_))
else:
assert_(issubclass(A.dtype.type, np.string_))
tgt = np.array([[' ,a,b,c, ', ''],
['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'],
['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']])
assert_array_equal(np.char.join([',', '#'], A0), tgt)
def test_ljust(self):
assert_(issubclass(self.A.ljust(10).dtype.type, np.string_))
C = self.A.ljust([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = self.A.ljust(20, asbytes('#'))
assert_array_equal(C.startswith(asbytes('#')), [
[False, True], [False, False], [False, False]])
assert_(np.all(C.endswith(asbytes('#'))))
C = np.char.ljust(asbytes('FOO'), [[10, 20], [15, 8]])
tgt = asbytes_nested([['FOO ', 'FOO '],
['FOO ', 'FOO ']])
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, tgt)
def test_lower(self):
tgt = asbytes_nested([[' abc ', ''],
['12345', 'mixedcase'],
['123 \t 345 \0 ', 'upper']])
assert_(issubclass(self.A.lower().dtype.type, np.string_))
assert_array_equal(self.A.lower(), tgt)
tgt = [[sixu(' \u03c3 '), sixu('')],
[sixu('12345'), sixu('mixedcase')],
[sixu('123 \t 345 \0 '), sixu('upper')]]
assert_(issubclass(self.B.lower().dtype.type, np.unicode_))
assert_array_equal(self.B.lower(), tgt)
def test_lstrip(self):
tgt = asbytes_nested([['abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']])
assert_(issubclass(self.A.lstrip().dtype.type, np.string_))
assert_array_equal(self.A.lstrip(), tgt)
tgt = asbytes_nested([[' abc', ''],
['2345', 'ixedCase'],
['23 \t 345 \x00', 'UPPER']])
assert_array_equal(self.A.lstrip(asbytes_nested(['1', 'M'])), tgt)
tgt = [[sixu('\u03a3 '), ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]
assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_))
assert_array_equal(self.B.lstrip(), tgt)
def test_partition(self):
P = self.A.partition(asbytes_nested(['3', 'M']))
tgt = asbytes_nested([[(' abc ', '', ''), ('', '', '')],
[('12', '3', '45'), ('', 'M', 'ixedCase')],
[('12', '3', ' \t 345 \0 '), ('UPPER', '', '')]])
assert_(issubclass(P.dtype.type, np.string_))
assert_array_equal(P, tgt)
def test_replace(self):
R = self.A.replace(asbytes_nested(['3', 'a']),
asbytes_nested(['##########', '@']))
tgt = asbytes_nested([[' abc ', ''],
['12##########45', 'MixedC@se'],
['12########## \t ##########45 \x00', 'UPPER']])
assert_(issubclass(R.dtype.type, np.string_))
assert_array_equal(R, tgt)
if sys.version_info[0] < 3:
# NOTE: b'abc'.replace(b'a', 'b') is not allowed on Py3
R = self.A.replace(asbytes('a'), sixu('\u03a3'))
tgt = [[sixu(' \u03a3bc '), ''],
['12345', sixu('MixedC\u03a3se')],
['123 \t 345 \x00', 'UPPER']]
assert_(issubclass(R.dtype.type, np.unicode_))
assert_array_equal(R, tgt)
def test_rjust(self):
assert_(issubclass(self.A.rjust(10).dtype.type, np.string_))
C = self.A.rjust([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = self.A.rjust(20, asbytes('#'))
assert_(np.all(C.startswith(asbytes('#'))))
assert_array_equal(C.endswith(asbytes('#')),
[[False, True], [False, False], [False, False]])
C = np.char.rjust(asbytes('FOO'), [[10, 20], [15, 8]])
tgt = asbytes_nested([[' FOO', ' FOO'],
[' FOO', ' FOO']])
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, tgt)
def test_rpartition(self):
P = self.A.rpartition(asbytes_nested(['3', 'M']))
tgt = asbytes_nested([[('', '', ' abc '), ('', '', '')],
[('12', '3', '45'), ('', 'M', 'ixedCase')],
[('123 \t ', '3', '45 \0 '), ('', '', 'UPPER')]])
assert_(issubclass(P.dtype.type, np.string_))
assert_array_equal(P, tgt)
def test_rsplit(self):
A = self.A.rsplit(asbytes('3'))
tgt = asbytes_nested([[[' abc '], ['']],
[['12', '45'], ['MixedCase']],
[['12', ' \t ', '45 \x00 '], ['UPPER']]])
assert_(issubclass(A.dtype.type, np.object_))
assert_equal(A.tolist(), tgt)
def test_rstrip(self):
assert_(issubclass(self.A.rstrip().dtype.type, np.string_))
tgt = asbytes_nested([[' abc', ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']])
assert_array_equal(self.A.rstrip(), tgt)
tgt = asbytes_nested([[' abc ', ''],
['1234', 'MixedCase'],
['123 \t 345 \x00', 'UPP']
])
assert_array_equal(self.A.rstrip(asbytes_nested(['5', 'ER'])), tgt)
tgt = [[sixu(' \u03a3'), ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']]
assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_))
assert_array_equal(self.B.rstrip(), tgt)
def test_strip(self):
tgt = asbytes_nested([['abc', ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']])
assert_(issubclass(self.A.strip().dtype.type, np.string_))
assert_array_equal(self.A.strip(), tgt)
tgt = asbytes_nested([[' abc ', ''],
['234', 'ixedCas'],
['23 \t 345 \x00', 'UPP']])
assert_array_equal(self.A.strip(asbytes_nested(['15', 'EReM'])), tgt)
tgt = [[sixu('\u03a3'), ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']]
assert_(issubclass(self.B.strip().dtype.type, np.unicode_))
assert_array_equal(self.B.strip(), tgt)
def test_split(self):
A = self.A.split(asbytes('3'))
tgt = asbytes_nested([
[[' abc '], ['']],
[['12', '45'], ['MixedCase']],
[['12', ' \t ', '45 \x00 '], ['UPPER']]])
assert_(issubclass(A.dtype.type, np.object_))
assert_equal(A.tolist(), tgt)
def test_splitlines(self):
A = np.char.array(['abc\nfds\nwer']).splitlines()
assert_(issubclass(A.dtype.type, np.object_))
assert_(A.shape == (1,))
assert_(len(A[0]) == 3)
def test_swapcase(self):
tgt = asbytes_nested([[' ABC ', ''],
['12345', 'mIXEDcASE'],
['123 \t 345 \0 ', 'upper']])
assert_(issubclass(self.A.swapcase().dtype.type, np.string_))
assert_array_equal(self.A.swapcase(), tgt)
tgt = [[sixu(' \u03c3 '), sixu('')],
[sixu('12345'), sixu('mIXEDcASE')],
[sixu('123 \t 345 \0 '), sixu('upper')]]
assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_))
assert_array_equal(self.B.swapcase(), tgt)
def test_title(self):
tgt = asbytes_nested([[' Abc ', ''],
['12345', 'Mixedcase'],
['123 \t 345 \0 ', 'Upper']])
assert_(issubclass(self.A.title().dtype.type, np.string_))
assert_array_equal(self.A.title(), tgt)
tgt = [[sixu(' \u03a3 '), sixu('')],
[sixu('12345'), sixu('Mixedcase')],
[sixu('123 \t 345 \0 '), sixu('Upper')]]
assert_(issubclass(self.B.title().dtype.type, np.unicode_))
assert_array_equal(self.B.title(), tgt)
def test_upper(self):
tgt = asbytes_nested([[' ABC ', ''],
['12345', 'MIXEDCASE'],
['123 \t 345 \0 ', 'UPPER']])
assert_(issubclass(self.A.upper().dtype.type, np.string_))
assert_array_equal(self.A.upper(), tgt)
tgt = [[sixu(' \u03a3 '), sixu('')],
[sixu('12345'), sixu('MIXEDCASE')],
[sixu('123 \t 345 \0 '), sixu('UPPER')]]
assert_(issubclass(self.B.upper().dtype.type, np.unicode_))
assert_array_equal(self.B.upper(), tgt)
def test_isnumeric(self):
def fail():
self.A.isnumeric()
self.assertRaises(TypeError, fail)
assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_))
assert_array_equal(self.B.isnumeric(), [
[False, False], [True, False], [False, False]])
def test_isdecimal(self):
def fail():
self.A.isdecimal()
self.assertRaises(TypeError, fail)
assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_))
assert_array_equal(self.B.isdecimal(), [
[False, False], [True, False], [False, False]])
class TestOperations(TestCase):
def setUp(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
self.B = np.array([['efg', '456'],
['051', 'tuv']]).view(np.chararray)
def test_add(self):
AB = np.array([['abcefg', '123456'],
['789051', 'xyztuv']]).view(np.chararray)
assert_array_equal(AB, (self.A + self.B))
assert_(len((self.A + self.B)[0][0]) == 6)
def test_radd(self):
QA = np.array([['qabc', 'q123'],
['q789', 'qxyz']]).view(np.chararray)
assert_array_equal(QA, ('q' + self.A))
def test_mul(self):
A = self.A
for r in (2, 3, 5, 7, 197):
Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
[A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
assert_array_equal(Ar, (self.A * r))
for ob in [object(), 'qrs']:
try:
A * ob
except ValueError:
pass
else:
self.fail("chararray can only be multiplied by integers")
def test_rmul(self):
A = self.A
for r in (2, 3, 5, 7, 197):
Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
[A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
assert_array_equal(Ar, (r * self.A))
for ob in [object(), 'qrs']:
try:
ob * A
except ValueError:
pass
else:
self.fail("chararray can only be multiplied by integers")
def test_mod(self):
"""Ticket #856"""
F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray)
C = np.array([[3, 7], [19, 1]])
FC = np.array([['3', '7.000000'],
['19', '1']]).view(np.chararray)
assert_array_equal(FC, F % C)
A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray)
A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray)
assert_array_equal(A1, (A % 1))
A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray)
assert_array_equal(A2, (A % [[1, 2], [3, 4]]))
def test_rmod(self):
assert_(("%s" % self.A) == str(self.A))
assert_(("%r" % self.A) == repr(self.A))
for ob in [42, object()]:
try:
ob % self.A
except TypeError:
pass
else:
self.fail("chararray __rmod__ should fail with "
"non-string objects")
def test_slice(self):
"""Regression test for https://github.com/numpy/numpy/issues/5982"""
arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']],
dtype='S4').view(np.chararray)
sl1 = arr[:]
assert_array_equal(sl1, arr)
assert_(sl1.base is arr)
assert_(sl1.base.base is arr.base)
sl2 = arr[:, :]
assert_array_equal(sl2, arr)
assert_(sl2.base is arr)
assert_(sl2.base.base is arr.base)
assert_(arr[0, 0] == asbytes('abc'))
def test_empty_indexing():
"""Regression test for ticket 1948."""
# Check that indexing a chararray with an empty list/array returns an
# empty chararray instead of a chararray with a single empty string in it.
s = np.chararray((4,))
assert_(s[[]].size == 0)
if __name__ == "__main__":
run_module_suite()
|
rrrene/django | refs/heads/master | django/contrib/gis/db/backends/postgis/features.py | 345 | from django.contrib.gis.db.backends.base.features import BaseSpatialFeatures
from django.db.backends.postgresql.features import \
DatabaseFeatures as Psycopg2DatabaseFeatures
class DatabaseFeatures(BaseSpatialFeatures, Psycopg2DatabaseFeatures):
supports_3d_storage = True
supports_3d_functions = True
supports_left_right_lookups = True
supports_raster = True
|
rnikiforova/GuruTubeProject | refs/heads/master | GuruTube/libraries/django/contrib/gis/db/backends/mysql/base.py | 317 | from django.db.backends.mysql.base import *
from django.db.backends.mysql.base import DatabaseWrapper as MySQLDatabaseWrapper
from django.contrib.gis.db.backends.mysql.creation import MySQLCreation
from django.contrib.gis.db.backends.mysql.introspection import MySQLIntrospection
from django.contrib.gis.db.backends.mysql.operations import MySQLOperations
class DatabaseWrapper(MySQLDatabaseWrapper):
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.creation = MySQLCreation(self)
self.ops = MySQLOperations(self)
self.introspection = MySQLIntrospection(self)
|
JonasThomas/free-cad | refs/heads/master | src/3rdParty/Pivy-0.5/__init__.py | 21 | ###
# Copyright (c) 2002-2008 Kongsberg SIM
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
__all__ = ['coin']
# set dynamic link flags for global to allow Coin to use dynamic loading
try:
import sys, dl
sys.setdlopenflags(dl.RTLD_GLOBAL | sys.getdlopenflags())
except Exception, e:
None
# initialize the Coin system
from coin import SoDB, SoNodeKit, SoInteraction
SoDB.init()
SoNodeKit.init()
SoInteraction.init()
|
felixfontein/ansible | refs/heads/devel | lib/ansible/release.py | 15 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
__version__ = '2.12.0.dev0'
__author__ = 'Ansible, Inc.'
__codename__ = 'Dazed and Confused'
|
jbzdak/edx-platform | refs/heads/master | cms/djangoapps/course_creators/migrations/0001_initial.py | 62 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CourseCreator',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('state_changed', models.DateTimeField(help_text='The date when state was last updated', verbose_name=b'state last updated', auto_now_add=True)),
('state', models.CharField(default=b'unrequested', help_text='Current course creator state', max_length=24, choices=[(b'unrequested', 'unrequested'), (b'pending', 'pending'), (b'granted', 'granted'), (b'denied', 'denied')])),
('note', models.CharField(help_text='Optional notes about this user (for example, why course creation access was denied)', max_length=512, blank=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, help_text='Studio user')),
],
),
]
|
foursquare/commons-old | refs/heads/master | src/python/twitter/pants/ant/lib.py | 1 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from __future__ import print_function
import os
import pkgutil
import shutil
import subprocess
import traceback
from twitter.pants import has_jvm_targets, is_jvm
from twitter.pants.ant import bang
from twitter.pants.base.generator import Generator
from twitter.pants.base.builder import Builder
from twitter.pants.targets import (
JarDependency,
JavaProtobufLibrary,
JavaThriftLibrary,
JavaThriftstoreDMLLibrary,
JavaTests,
Pants,
ScalaLibrary,
ScalaTests)
_TEMPLATE_BASEDIR = 'templates'
class AntBuilder(Builder):
@classmethod
def generate_ivy(cls, root_dir, output_filename, target):
"""Outputs an ivy.xml file to output_filename for the given java target"""
AntBuilder._check_target(target)
library_template_data = target._create_template_data()
AntBuilder._generate(root_dir, 'ivy', library_template_data, output_filename)
@classmethod
def _generate(cls, root_dir, template, template_data, output_filename):
with open(output_filename, 'w') as output:
template_path = os.path.join(_TEMPLATE_BASEDIR, '%s.mk' % template)
generator = Generator(pkgutil.get_data(__name__, template_path),
root_dir = root_dir, lib = template_data)
generator.write(output)
@classmethod
def _check_target(cls, target):
assert has_jvm_targets([target]), \
"AntBuilder can only build jvm targets, given %s" % str(target)
def __init__(self, ferror, root_dir):
Builder.__init__(self, ferror, root_dir)
def build(self, targets, args):
_, _, result = self.generate_and_build(targets, args)
return result
def generate_and_build(self, targets, args, name = None):
java_target = self._resolve_targets(targets, name)
extrabuildflags = set()
workspace_root = os.path.join(self.root_dir, '.pants.d')
if not os.path.exists(workspace_root):
os.makedirs(workspace_root)
buildxml, ivyxml = self.create_ant_builds(workspace_root, dict(), extrabuildflags, java_target)
buildflags = []
if extrabuildflags:
buildflags.extend(extrabuildflags)
# TODO(John Sirois): introduce java_binary and only allow buildflags from those and disallow
# java_binary as a pants dep - they must be leaf
if java_target.buildflags:
buildflags.extend(java_target.buildflags)
antargs = [ 'ant', '-f', '"%s"' % buildxml ]
if buildflags:
antargs.extend(buildflags)
if args:
antargs.extend(args)
print('AntBuilder executing (ANT_OPTS="%s") %s' % (os.environ['ANT_OPTS'], ' '.join(antargs)))
return buildxml, ivyxml, subprocess.call(antargs)
def create_ant_builds(self, workspace_root, targets, flags, target):
if target.id in targets:
return targets[target.id]
# Link in libraries required by ant targets as needed
def add_scaladeps(tgt):
scaladeps = target.do_in_context(lambda: JarDependency(
org = 'org.scala-lang',
name = 'scala-library',
rev = '${scala.version}'
).with_sources().resolve())
target.update_dependencies(scaladeps)
if is_jvm(target):
if not target.sources:
target.sources = [ '_not_a_real_file_' ]
if isinstance(target, JavaProtobufLibrary):
protobufdeps = target.do_in_context(lambda: JarDependency(
org = 'com.google.protobuf',
name = 'protobuf-java',
rev = '${protobuf.library.version}'
).resolve())
target.update_dependencies(protobufdeps)
elif isinstance(target, JavaThriftLibrary):
def resolve_thriftdeps():
all_deps = [
Pants('3rdparty:commons-lang'),
JarDependency(org = 'org.apache.thrift',
name = 'libthrift',
rev = '${thrift.library.version}'),
Pants('3rdparty:slf4j-api'),
# finagle thrift extra deps
Pants('3rdparty:finagle-core'),
Pants('3rdparty:finagle-thrift'),
Pants('3rdparty:util'),
]
for dep in all_deps:
target.update_dependencies(dep.resolve())
target.do_in_context(resolve_thriftdeps)
elif isinstance(target, JavaTests):
junit = target.do_in_context(lambda: Pants('3rdparty:junit').resolve())
target.update_dependencies(junit)
elif isinstance(target, ScalaLibrary):
add_scaladeps(target)
elif isinstance(target, ScalaTests):
add_scaladeps(target)
specdeps = target.do_in_context(lambda: JarDependency(
org = 'org.scala-tools.testing',
name = '${specs.name}',
rev = '${specs.version}'
).with_sources().resolve())
target.update_dependencies(specdeps)
try:
library_template_data = target._create_template_data()
except:
self.ferror("Problem creating template data for %s(%s): %s" %
(type(target).__name__, target.address, traceback.format_exc()))
workspace = os.path.join(workspace_root, library_template_data.id)
if not os.path.exists(workspace):
os.makedirs(workspace)
ivyxml = os.path.join(workspace, 'ivy.xml')
AntBuilder._generate(self.root_dir, 'ivy', library_template_data, ivyxml)
buildxml = os.path.join(workspace, 'build.xml')
if target.custom_antxml_path:
shutil.copyfile(target.custom_antxml_path, buildxml)
pants_buildxml = os.path.join(workspace, 'pants-build.xml')
flags.add('-Dpants.build.file=pants-build.xml')
else:
pants_buildxml = buildxml
build_template = os.path.join(library_template_data.template_base, 'build')
AntBuilder._generate(self.root_dir, build_template, library_template_data, pants_buildxml)
targets[target.id] = buildxml
for additional_library in target.internal_dependencies:
self.create_ant_builds(workspace_root, targets, flags, additional_library)
return buildxml, ivyxml
def _resolve_targets(self, targets, name = None):
for target in targets:
AntBuilder._check_target(target)
foil = list(targets)[0]
if len(targets) > 1 or foil.address.is_meta:
return bang.extract_target(targets, name)
else:
return foil
|
jimmysong/bitcoin | refs/heads/master | test/util/bitcoin-util-test.py | 11 | #!/usr/bin/env python
# Copyright 2014 BitPay Inc.
# Copyright 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import os
import sys
import argparse
import logging
help_text="""Test framework for bitcoin utils.
Runs automatically during `make check`.
Can also be run manually."""
if __name__ == '__main__':
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import buildenv
import bctest
parser = argparse.ArgumentParser(description=help_text)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format = formatter, level=level)
bctest.bctester(buildenv.SRCDIR + "/test/util/data", "bitcoin-util-test.json", buildenv)
|
exercism/python | refs/heads/main | exercises/practice/spiral-matrix/spiral_matrix.py | 4 | def spiral_matrix(size):
pass
|
renegelinas/mi-instrument | refs/heads/master | mi/dataset/driver/suna/resource/__init__.py | 10 | import os
RESOURCE_PATH = os.path.dirname(__file__)
|
inares/edx-platform | refs/heads/inares_sass | common/djangoapps/external_auth/djangostore.py | 224 | """A openid store using django cache"""
from openid.store.interface import OpenIDStore
from openid.store import nonce
from django.core.cache import cache
import logging
import time
DEFAULT_ASSOCIATIONS_TIMEOUT = 60
DEFAULT_NONCE_TIMEOUT = 600
ASSOCIATIONS_KEY_PREFIX = 'openid.provider.associations.'
NONCE_KEY_PREFIX = 'openid.provider.nonce.'
log = logging.getLogger('DjangoOpenIDStore')
def get_url_key(server_url):
key = ASSOCIATIONS_KEY_PREFIX + server_url
return key
def get_nonce_key(server_url, timestamp, salt):
key = '{prefix}{url}.{ts}.{salt}'.format(prefix=NONCE_KEY_PREFIX,
url=server_url,
ts=timestamp,
salt=salt)
return key
class DjangoOpenIDStore(OpenIDStore):
def __init__(self):
log.info('DjangoStore cache:' + str(cache.__class__))
def storeAssociation(self, server_url, assoc):
key = get_url_key(server_url)
log.info('storeAssociation {0}'.format(key))
associations = cache.get(key, {})
associations[assoc.handle] = assoc
cache.set(key, associations, DEFAULT_ASSOCIATIONS_TIMEOUT)
def getAssociation(self, server_url, handle=None):
key = get_url_key(server_url)
log.info('getAssociation {0}'.format(key))
associations = cache.get(key, {})
assoc = None
if handle is None:
# get best association
valid_assocs = [a for a in associations if a.getExpiresIn() > 0]
if valid_assocs:
valid_assocs.sort(lambda a: a.getExpiresIn(), reverse=True)
assoc = valid_assocs.sort[0]
else:
assoc = associations.get(handle)
# check expiration and remove if it has expired
if assoc and assoc.getExpiresIn() <= 0:
if handle is None:
cache.delete(key)
else:
associations.pop(handle)
cache.set(key, associations, DEFAULT_ASSOCIATIONS_TIMEOUT)
assoc = None
return assoc
def removeAssociation(self, server_url, handle):
key = get_url_key(server_url)
log.info('removeAssociation {0}'.format(key))
associations = cache.get(key, {})
removed = False
if associations:
if handle is None:
cache.delete(key)
removed = True
else:
assoc = associations.pop(handle, None)
if assoc:
cache.set(key, associations, DEFAULT_ASSOCIATIONS_TIMEOUT)
removed = True
return removed
def useNonce(self, server_url, timestamp, salt):
key = get_nonce_key(server_url, timestamp, salt)
log.info('useNonce {0}'.format(key))
if abs(timestamp - time.time()) > nonce.SKEW:
return False
anonce = cache.get(key)
found = False
if anonce is None:
cache.set(key, '-', DEFAULT_NONCE_TIMEOUT)
found = False
else:
found = True
return found
def cleanupNonces(self):
# not necesary, keys will timeout
return 0
def cleanupAssociations(self):
# not necesary, keys will timeout
return 0
|
wrxtasy/xbmc | refs/heads/master | lib/libUPnP/Platinum/Build/Tools/Scripts/GenSvnVersionHeader.py | 263 | #! /usr/bin/python
#############################################################
# This tool is used to generate the version info file #
#############################################################
import sys
import os
# ensure that PLATINUM_HOME has been set and exists
if not os.environ.has_key('PLATINUM_KIT_HOME'):
print 'ERROR: PLATINUM_KIT_HOME not set'
sys.exit(1)
PLATINUM_KIT_HOME = os.environ['PLATINUM_KIT_HOME']
# ensure that PLATINUM_KIT_HOME has been set and exists
if not os.path.exists(PLATINUM_KIT_HOME) :
print 'ERROR: PLATINUM_KIT_HOME ('+PLATINUM_KIT_HOME+') does not exist'
sys.exit(1)
else :
print 'PLATINUM_KIT_HOME = ' + PLATINUM_KIT_HOME
# get the SVN repo version
version = os.popen('svnversion -n').readlines()[0]
print 'current VERSION =',version
if version.endswith('P'):
version = version[0:-1]
if version.endswith('MP'):
version = version[0:-2]
try:
version_int = int(version)+1 ## add one, because when we check it in, the rev will be incremented by one
except:
print 'ERROR: you cannot run this on a modified working copy'
sys.exit(1)
output = open(PLATINUM_KIT_HOME+'/Platinum/Source/Platinum/PltSvnVersion.h', 'w+')
output.write('/* DO NOT EDIT. This file was automatically generated by GenSvnVersionHeader.py */\n')
output.write('#define PLT_SVN_VERSION '+str(version_int)+'\n')
output.write('#define PLT_SVN_VERSION_STRING "'+str(version_int)+'"\n')
output.close()
print 'upon check-in, version will be', str(version_int)
|
wdaher/zulip | refs/heads/master | zerver/migrations/0004_userprofile_left_side_userlist.py | 167 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0003_custom_indexes'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='left_side_userlist',
field=models.BooleanField(default=False),
),
]
|
rajanandakumar/DIRAC | refs/heads/integration | Core/Utilities/Time.py | 7 | # $HeadURL$
"""
DIRAC Times module
Support for basic Date and Time operations
based on system datetime module.
It provides common interface to UTC timestamps,
converter to string types and back.
The following datetime classes are used in the returned objects:
- dateTime = datetime.datetime
- date = datetime.date
- time = datetime.timedelta
Useful timedelta constant are also provided to
define time intervals.
Notice: datetime.timedelta objects allow multiplication and division by interger
but not by float. Thus:
- DIRAC.Times.second * 1.5 is not allowed
- DIRAC.Times.second * 3 / 2 is allowed
An timeInterval class provides a method to check
if a give datetime is in the defined interval.
"""
__RCSID__ = "$Id$"
import time as nativetime
import datetime
from types import StringTypes
# Some useful constants for time operations
microsecond = datetime.timedelta( microseconds = 1 )
second = datetime.timedelta( seconds = 1 )
minute = datetime.timedelta( minutes = 1 )
hour = datetime.timedelta( hours = 1 )
day = datetime.timedelta( days = 1 )
week = datetime.timedelta( days = 7 )
dt = datetime.datetime( 2000, 1, 1 )
def dateTime():
"""
Return current UTC datetime, as datetime.datetime object
"""
return dt.utcnow()
def date( myDateTime = None ):
"""
Return current UTC date, as datetime.date object
if a _dateTimeType is pass as argument its associated date is returned
"""
if type( myDateTime ) == _dateTimeType:
return myDateTime.date()
return dateTime().date()
def time( myDateTime = None ):
"""
Return current UTC time, as datetime.time object
if a _dateTimeType is pass as argument its associated time is returned
"""
if not type( myDateTime ) == _dateTimeType:
myDateTime = dateTime()
return myDateTime - datetime.datetime( myDateTime.year, myDateTime.month, myDateTime.day )
def toEpoch( dateTimeObject = None ):
"""
Get seconds since epoch
"""
if not dateTimeObject:
dateTimeObject = dateTime()
return nativetime.mktime( dateTimeObject.timetuple() )
def fromEpoch( epoch ):
"""
Get datetime object from epoch
"""
return dt.fromtimestamp( epoch )
def to2K( dateTimeObject = None ):
"""
Get seconds, with microsecond precission, since 2K
"""
if not dateTimeObject:
dateTimeObject = dateTime()
delta = dateTimeObject - dt
return delta.days * 86400 + delta.seconds + delta.microseconds / 1000000.
def from2K( seconds2K = None ):
"""
Get date from seconds since 2K
"""
if not seconds2K:
seconds2K = to2K( dt )
return dt + int( seconds2K ) * second + int( seconds2K % 1 * 1000000 ) * microsecond
def toString( myDate = None ):
"""
Convert to String
if argument type is neither _dateTimeType, _dateType, nor _timeType
the current dateTime converted to String is returned instead
Notice: datetime.timedelta are converted to strings using the format:
[day] days [hour]:[min]:[sec]:[microsec]
where hour, min, sec, microsec are always positive integers,
and day carries the sign.
To keep internal consistency we are using:
[hour]:[min]:[sec]:[microsec]
where min, sec, microsec are alwys positive intergers and hour carries the
sign.
"""
if type( myDate ) == _dateTimeType :
return str( myDate )
elif type( myDate ) == _dateType :
return str( myDate )
elif type( myDate ) == _timeType :
return '%02d:%02d:%02d.%06d' % ( myDate.days * 24 + myDate.seconds / 3600,
myDate.seconds % 3600 / 60,
myDate.seconds % 60,
myDate.microseconds )
else:
return toString( dateTime() )
def fromString( myDate = None ):
"""
Convert date/time/datetime String back to appropriated objects
The format of the string it is assume to be that returned by toString method.
See notice on toString method
On Error, return None
"""
if StringTypes.__contains__( type( myDate ) ):
if myDate.find( ' ' ) > 0:
dateTimeTuple = myDate.split( ' ' )
dateTuple = dateTimeTuple[0].split( '-' )
try:
return ( datetime.datetime( year = dateTuple[0],
month = dateTuple[1],
day = dateTuple[2] ) +
fromString( dateTimeTuple[1] ) )
# return dt.combine( fromString( dateTimeTuple[0] ),
# fromString( dateTimeTuple[1] ) )
except:
return ( datetime.datetime( year = int( dateTuple[0] ),
month = int( dateTuple[1] ),
day = int( dateTuple[2] ) ) +
fromString( dateTimeTuple[1] ) )
# return dt.combine( fromString( dateTimeTuple[0] ),
# fromString( dateTimeTuple[1] ) )
return None
elif myDate.find( ':' ) > 0:
timeTuple = myDate.replace( '.', ':' ).split( ':' )
try:
if len( timeTuple ) == 4:
return datetime.timedelta( hours = int( timeTuple[0] ),
minutes = int( timeTuple[1] ),
seconds = int( timeTuple[2] ),
microseconds = int( timeTuple[3] ) )
elif len( timeTuple ) == 3:
return datetime.timedelta( hours = int( timeTuple[0] ),
minutes = int( timeTuple[1] ),
seconds = int( timeTuple[2] ),
microseconds = 0 )
else:
return None
except:
return None
elif myDate.find( '-' ) > 0:
dateTuple = myDate.split( '-' )
try:
return datetime.date( int( dateTuple[0] ), int( dateTuple[1] ), int( dateTuple[2] ) )
except:
return None
return None
class timeInterval:
"""
Simple class to define a timeInterval object able to check if a given
dateTime is inside
"""
def __init__( self, initialDateTime, intervalTimeDelta ):
"""
Initialization method, it requires the initial dateTime and the
timedelta that define the limits.
The upper limit is not included thus it is [begin,end)
If not properly initialized an error flag is set, and subsequent calls
to any method will return None
"""
if ( type( initialDateTime ) != _dateTimeType or
type( intervalTimeDelta ) != _timeType ):
self.__error = True
return None
self.__error = False
if intervalTimeDelta.days < 0:
self.__startDateTime = initialDateTime + intervalTimeDelta
self.__endDateTime = initialDateTime
else:
self.__startDateTime = initialDateTime
self.__endDateTime = initialDateTime + intervalTimeDelta
def includes( self, myDateTime ):
"""
"""
if self.__error :
return None
if type( myDateTime ) != _dateTimeType :
return None
if myDateTime < self.__startDateTime :
return False
if myDateTime >= self.__endDateTime :
return False
return True
_dateTimeType = type( dateTime() )
_dateType = type( date() )
_timeType = type( time() )
_allTimeTypes = ( _dateTimeType, _timeType )
_allDateTypes = ( _dateTimeType, _dateType )
_allTypes = ( _dateTimeType, _dateType, _timeType )
|
UWPCE-PythonCert/IntroPython2016 | refs/heads/master | students/ninadn/class8/circle.py | 2 | #Test for circle class
class Circle:
def __init__(self, radius):
self.radius = radius
self.diameter = radius * 2
|
b0ri5/nishe-googlecode | refs/heads/master | scons/scons-local-1.3.0/SCons/Scanner/Prog.py | 5 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/Prog.py 4720 2010/03/24 03:14:11 jars"
import string
import SCons.Node
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
# global, set by --debug=findlibs
print_find_libs = None
def ProgramScanner(**kw):
"""Return a prototype Scanner instance for scanning executable
files for static-lib dependencies"""
kw['path_function'] = SCons.Scanner.FindPathDirs('LIBPATH')
ps = apply(SCons.Scanner.Base, [scan, "ProgramScanner"], kw)
return ps
def scan(node, env, libpath = ()):
"""
This scanner scans program files for static-library
dependencies. It will search the LIBPATH environment variable
for libraries specified in the LIBS variable, returning any
files it finds as dependencies.
"""
try:
libs = env['LIBS']
except KeyError:
# There are no LIBS in this environment, so just return a null list:
return []
if SCons.Util.is_String(libs):
libs = string.split(libs)
else:
libs = SCons.Util.flatten(libs)
try:
prefix = env['LIBPREFIXES']
if not SCons.Util.is_List(prefix):
prefix = [ prefix ]
except KeyError:
prefix = [ '' ]
try:
suffix = env['LIBSUFFIXES']
if not SCons.Util.is_List(suffix):
suffix = [ suffix ]
except KeyError:
suffix = [ '' ]
pairs = []
for suf in map(env.subst, suffix):
for pref in map(env.subst, prefix):
pairs.append((pref, suf))
result = []
if callable(libpath):
libpath = libpath()
find_file = SCons.Node.FS.find_file
adjustixes = SCons.Util.adjustixes
for lib in libs:
if SCons.Util.is_String(lib):
lib = env.subst(lib)
for pref, suf in pairs:
l = adjustixes(lib, pref, suf)
l = find_file(l, libpath, verbose=print_find_libs)
if l:
result.append(l)
else:
result.append(lib)
return result
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
tectronics/pyodbc | refs/heads/master | tests3/exceltests.py | 44 | #!/usr/bin/python
# Tests for reading from Excel files.
#
# I have not been able to successfully create or modify Excel files.
import sys, os, re
import unittest
from os.path import abspath
from testutils import *
CNXNSTRING = None
class ExcelTestCase(unittest.TestCase):
def __init__(self, method_name):
unittest.TestCase.__init__(self, method_name)
def setUp(self):
self.cnxn = pyodbc.connect(CNXNSTRING, autocommit=True)
self.cursor = self.cnxn.cursor()
for i in range(3):
try:
self.cursor.execute("drop table t%d" % i)
self.cnxn.commit()
except:
pass
self.cnxn.rollback()
def tearDown(self):
try:
self.cursor.close()
self.cnxn.close()
except:
# If we've already closed the cursor or connection, exceptions are thrown.
pass
def test_getinfo_string(self):
value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR)
self.assert_(isinstance(value, str))
def test_getinfo_bool(self):
value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)
self.assert_(isinstance(value, bool))
def test_getinfo_int(self):
value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)
self.assert_(isinstance(value, (int, long)))
def test_getinfo_smallint(self):
value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)
self.assert_(isinstance(value, int))
def test_read_sheet(self):
# The first method of reading data is to access worksheets by name in this format [name$].
#
# Our second sheet is named Sheet2 and has two columns. The first has values 10, 20, 30, etc.
rows = self.cursor.execute("select * from [Sheet2$]").fetchall()
self.assertEquals(len(rows), 5)
for index, row in enumerate(rows):
self.assertEquals(row.s2num, float(index + 1) * 10)
def test_read_range(self):
# The second method of reading data is to assign a name to a range of cells and access that as a table.
#
# Our first worksheet has a section named Table1. The first column has values 1, 2, 3, etc.
rows = self.cursor.execute("select * from Table1").fetchall()
self.assertEquals(len(rows), 10)
for index, row in enumerate(rows):
self.assertEquals(row.num, float(index + 1))
self.assertEquals(row.val, chr(ord('a') + index))
def test_tables(self):
# This is useful for figuring out what is available
tables = [ row.table_name for row in self.cursor.tables() ]
assert 'Sheet2$' in tables, 'tables: %s' % ' '.join(tables)
# def test_append(self):
# rows = self.cursor.execute("select s2num, s2val from [Sheet2$]").fetchall()
#
# print rows
#
# nextnum = max([ row.s2num for row in rows ]) + 10
#
# self.cursor.execute("insert into [Sheet2$](s2num, s2val) values (?, 'z')", nextnum)
#
# row = self.cursor.execute("select s2num, s2val from [Sheet2$] where s2num=?", nextnum).fetchone()
# self.assertTrue(row)
#
# print 'added:', nextnum, len(rows), 'rows'
#
# self.assertEquals(row.s2num, nextnum)
# self.assertEquals(row.s2val, 'z')
#
# self.cnxn.commit()
def main():
from optparse import OptionParser
parser = OptionParser() #usage=usage)
parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)")
parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items")
parser.add_option("-t", "--test", help="Run only the named test")
(options, args) = parser.parse_args()
if args:
parser.error('no arguments expected')
global CNXNSTRING
path = dirname(abspath(__file__))
filename = join(path, 'test.xls')
assert os.path.exists(filename)
CNXNSTRING = 'Driver={Microsoft Excel Driver (*.xls)};DBQ=%s;READONLY=FALSE' % filename
cnxn = pyodbc.connect(CNXNSTRING, autocommit=True)
print_library_info(cnxn)
cnxn.close()
suite = load_tests(ExcelTestCase, options.test)
testRunner = unittest.TextTestRunner(verbosity=options.verbose)
result = testRunner.run(suite)
if __name__ == '__main__':
# Add the build directory to the path so we're testing the latest build, not the installed version.
add_to_path()
import pyodbc
main()
|
ashemah/bh_architek | refs/heads/master | server/core/migrations/__init__.py | 12133432 | |
leorochael/odoo | refs/heads/8.0 | addons/payment_buckaroo/__openerp__.py | 374 | # -*- coding: utf-8 -*-
{
'name': 'Buckaroo Payment Acquirer',
'category': 'Hidden',
'summary': 'Payment Acquirer: Buckaroo Implementation',
'version': '1.0',
'description': """Buckaroo Payment Acquirer""",
'author': 'OpenERP SA',
'depends': ['payment'],
'data': [
'views/buckaroo.xml',
'views/payment_acquirer.xml',
'data/buckaroo.xml',
],
'installable': True,
}
|
superchilli/webapp | refs/heads/master | venv/lib/python2.7/site-packages/sqlalchemy/ext/declarative/__init__.py | 33 | # ext/declarative/__init__.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .api import declarative_base, synonym_for, comparable_using, \
instrument_declarative, ConcreteBase, AbstractConcreteBase, \
DeclarativeMeta, DeferredReflection, has_inherited_table,\
declared_attr, as_declarative
__all__ = ['declarative_base', 'synonym_for', 'has_inherited_table',
'comparable_using', 'instrument_declarative', 'declared_attr',
'as_declarative',
'ConcreteBase', 'AbstractConcreteBase', 'DeclarativeMeta',
'DeferredReflection']
|
cfossace/test | refs/heads/master | malstor/malstor_lib/malstorconfig.py | 1 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# This file is in the public domain
### END LICENSE
### DO NOT EDIT THIS FILE ###
__all__ = [
'project_path_not_found',
'get_data_file',
'get_data_path',
]
# Where your project will look for your data (for instance, images and ui
# files). By default, this is ../data, relative your trunk layout
__malstor_data_directory__ = '../data/'
__license__ = ''
__version__ = 'VERSION'
import os
from locale import gettext as _
class project_path_not_found(Exception):
"""Raised when we can't find the project directory."""
def get_data_file(*path_segments):
"""Get the full path to a data file.
Returns the path to a file underneath the data directory (as defined by
`get_data_path`). Equivalent to os.path.join(get_data_path(),
*path_segments).
"""
return os.path.join(get_data_path(), *path_segments)
def get_data_path():
"""Retrieve malstor data path
This path is by default <malstor_lib_path>/../data/ in trunk
and /usr/share/malstor in an installed version but this path
is specified at installation time.
"""
# Get pathname absolute or relative.
path = os.path.join(
os.path.dirname(__file__), __malstor_data_directory__)
abs_data_path = os.path.abspath(path)
if not os.path.exists(abs_data_path):
raise project_path_not_found
return abs_data_path
def get_version():
return __version__
|
joshka/SoundCloud2.Bundle | refs/heads/master | Contents/Libraries/Shared/requests/adapters.py | 23 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3 import Retry
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url, urldefragauth)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .packages.urllib3.exceptions import ProtocolError
from .packages.urllib3.exceptions import ReadTimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError)
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed connections and
timeouts, never to requests where the server returns a response.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if not proxy in self.proxy_manager:
proxy_headers = self.proxy_headers(proxy)
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return self.proxy_manager[proxy]
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url = urldefragauth(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a (`connect timeout, read
timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=Retry(self.max_retries, read=False),
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
raise ConnectTimeout(e, request=request)
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
|
SamuelMarks/fabric | refs/heads/master | tests/test_parallel.py | 36 | from __future__ import with_statement
from fabric.api import run, parallel, env, hide, execute, settings
from utils import FabricTest, eq_, aborts, mock_streams
from server import server, RESPONSES, USER, HOST, PORT
# TODO: move this into test_tasks? meh.
class OhNoesException(Exception): pass
class TestParallel(FabricTest):
@server()
@parallel
def test_parallel(self):
"""
Want to do a simple call and respond
"""
env.pool_size = 10
cmd = "ls /simple"
with hide('everything'):
eq_(run(cmd), RESPONSES[cmd])
@server(port=2200)
@server(port=2201)
def test_env_host_no_user_or_port(self):
"""
Ensure env.host doesn't get user/port parts when parallel
"""
@parallel
def _task():
run("ls /simple")
assert USER not in env.host
assert str(PORT) not in env.host
host_string = '%s@%s:%%s' % (USER, HOST)
with hide('everything'):
execute(_task, hosts=[host_string % 2200, host_string % 2201])
@server(port=2200)
@server(port=2201)
@aborts
def test_parallel_failures_abort(self):
with hide('everything'):
host1 = '127.0.0.1:2200'
host2 = '127.0.0.1:2201'
@parallel
def mytask():
run("ls /")
if env.host_string == host2:
raise OhNoesException
execute(mytask, hosts=[host1, host2])
@server(port=2200)
@server(port=2201)
@mock_streams('stderr') # To hide the traceback for now
def test_parallel_failures_honor_warn_only(self):
with hide('everything'):
host1 = '127.0.0.1:2200'
host2 = '127.0.0.1:2201'
@parallel
def mytask():
run("ls /")
if env.host_string == host2:
raise OhNoesException
with settings(warn_only=True):
result = execute(mytask, hosts=[host1, host2])
eq_(result[host1], None)
assert isinstance(result[host2], OhNoesException)
@server(port=2200)
@server(port=2201)
def test_parallel_implies_linewise(self):
host1 = '127.0.0.1:2200'
host2 = '127.0.0.1:2201'
assert not env.linewise
@parallel
def mytask():
run("ls /")
return env.linewise
with hide('everything'):
result = execute(mytask, hosts=[host1, host2])
eq_(result[host1], True)
eq_(result[host2], True)
|
diagramsoftware/odoo | refs/heads/8.0 | addons/website_sale_delivery/controllers/__init__.py | 7372 | import main
|
dsullivan7/scikit-learn | refs/heads/master | examples/cluster/plot_lena_ward_segmentation.py | 271 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
|
joachimmetz/plaso | refs/heads/main | plaso/cli/image_export_tool.py | 2 | # -*- coding: utf-8 -*-
"""The image export CLI tool."""
import argparse
import codecs
import collections
import io
import json
import os
import textwrap
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.lib import errors as dfvfs_errors
from dfvfs.resolver import context
from dfvfs.resolver import resolver as path_spec_resolver
from plaso.analyzers.hashers import manager as hashers_manager
from plaso.cli import logger
from plaso.cli import storage_media_tool
from plaso.cli.helpers import manager as helpers_manager
from plaso.engine import engine
from plaso.engine import extractors
from plaso.engine import path_helper
from plaso.filters import file_entry as file_entry_filters
from plaso.lib import errors
from plaso.lib import loggers
from plaso.lib import specification
class ImageExportTool(storage_media_tool.StorageMediaTool):
"""Class that implements the image export CLI tool.
Attributes:
has_filters (bool): True if filters have been specified via the options.
list_signature_identifiers (bool): True if information about the signature
identifiers should be shown.
"""
NAME = 'image_export'
DESCRIPTION = (
'This is a simple collector designed to export files inside an '
'image, both within a regular RAW image as well as inside a VSS. '
'The tool uses a collection filter that uses the same syntax as a '
'targeted plaso filter.')
EPILOG = 'And that is how you export files, plaso style.'
_COPY_BUFFER_SIZE = 32768
_DIRTY_CHARACTERS = frozenset([
'\x00', '\x01', '\x02', '\x03', '\x04', '\x05', '\x06', '\x07',
'\x08', '\x09', '\x0a', '\x0b', '\x0c', '\x0d', '\x0e', '\x0f',
'\x10', '\x11', '\x12', '\x13', '\x14', '\x15', '\x16', '\x17',
'\x18', '\x19', '\x1a', '\x1b', '\x1c', '\x1d', '\x1e', '\x1f',
os.path.sep, '!', '$', '%', '&', '*', '+', ':', ';', '<', '>',
'?', '@', '|', '~', '\x7f'])
_HASHES_FILENAME = 'hashes.json'
_READ_BUFFER_SIZE = 4096
# TODO: remove this redirect.
_SOURCE_OPTION = 'image'
_SOURCE_TYPES_TO_PREPROCESS = frozenset([
dfvfs_definitions.SOURCE_TYPE_DIRECTORY,
dfvfs_definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE,
dfvfs_definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE])
_SPECIFICATION_FILE_ENCODING = 'utf-8'
def __init__(self, input_reader=None, output_writer=None):
"""Initializes the CLI tool object.
Args:
input_reader (Optional[InputReader]): input reader, where None indicates
that the stdin input reader should be used.
output_writer (Optional[OutputWriter]): output writer, where None
indicates that the stdout output writer should be used.
"""
super(ImageExportTool, self).__init__(
input_reader=input_reader, output_writer=output_writer)
self._abort = False
self._artifact_definitions_path = None
self._artifact_filters = None
self._artifacts_registry = None
self._custom_artifacts_path = None
self._destination_path = None
self._digests = {}
self._filter_collection = file_entry_filters.FileEntryFilterCollection()
self._filter_file = None
self._no_hashes = False
self._path_spec_extractor = extractors.PathSpecExtractor()
self._process_memory_limit = None
self._paths_by_hash = collections.defaultdict(list)
self._resolver_context = context.Context()
self._skip_duplicates = True
self.has_filters = False
self.list_signature_identifiers = False
def _CalculateDigestHash(self, file_entry, data_stream_name):
"""Calculates a SHA-256 digest of the contents of the file entry.
Args:
file_entry (dfvfs.FileEntry): file entry whose content will be hashed.
data_stream_name (str): name of the data stream whose content is to be
hashed.
Returns:
str: hexadecimal representation of the SHA-256 hash or None if the digest
cannot be determined.
"""
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
if not file_object:
return None
file_object.seek(0, os.SEEK_SET)
hasher_object = hashers_manager.HashersManager.GetHasher('sha256')
data = file_object.read(self._READ_BUFFER_SIZE)
while data:
hasher_object.Update(data)
data = file_object.read(self._READ_BUFFER_SIZE)
return hasher_object.GetStringDigest()
def _CreateSanitizedDestination(
self, source_file_entry, source_path_spec, source_data_stream_name,
destination_path):
"""Creates a sanitized path of both destination directory and filename.
This function replaces non-printable and other characters defined in
_DIRTY_CHARACTERS with an underscore "_".
Args:
source_file_entry (dfvfs.FileEntry): file entry of the source file.
source_path_spec (dfvfs.PathSpec): path specification of the source file.
source_data_stream_name (str): name of the data stream of the source file
entry.
destination_path (str): path of the destination directory.
Returns:
tuple[str, str]: sanitized paths of both destination directory and
filename.
"""
file_system = source_file_entry.GetFileSystem()
path = getattr(source_path_spec, 'location', None)
path_segments = file_system.SplitPath(path)
# Sanitize each path segment.
for index, path_segment in enumerate(path_segments):
path_segments[index] = ''.join([
character if character not in self._DIRTY_CHARACTERS else '_'
for character in path_segment])
target_filename = path_segments.pop()
parent_path_spec = getattr(source_file_entry.path_spec, 'parent', None)
while parent_path_spec:
if parent_path_spec.type_indicator in (
dfvfs_definitions.FILE_SYSTEM_TYPE_INDICATORS):
path_segments.insert(0, parent_path_spec.location[1:])
break
if parent_path_spec.type_indicator == (
dfvfs_definitions.TYPE_INDICATOR_VSHADOW):
path_segments.insert(0, parent_path_spec.location[1:])
parent_path_spec = getattr(parent_path_spec, 'parent', None)
target_directory = os.path.join(destination_path, *path_segments)
if source_data_stream_name:
target_filename = '{0:s}_{1:s}'.format(
target_filename, source_data_stream_name)
return target_directory, target_filename
def _ExtractDataStream(
self, file_entry, data_stream_name, destination_path,
skip_duplicates=True):
"""Extracts a data stream.
Args:
file_entry (dfvfs.FileEntry): file entry containing the data stream.
data_stream_name (str): name of the data stream.
destination_path (str): path where the extracted files should be stored.
skip_duplicates (Optional[bool]): True if files with duplicate content
should be skipped.
"""
if not data_stream_name and not file_entry.IsFile():
return
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(
file_entry.path_spec)
try:
digest = self._CalculateDigestHash(file_entry, data_stream_name)
except (IOError, dfvfs_errors.BackEndError) as exception:
logger.error((
'[skipping] unable to read content of file entry: {0:s} '
'with error: {1!s}').format(display_name, exception))
return
if not digest:
logger.error(
'[skipping] unable to read content of file entry: {0:s}'.format(
display_name))
return
target_directory, target_filename = self._CreateSanitizedDestination(
file_entry, file_entry.path_spec, data_stream_name, destination_path)
# If does not exist, append path separator to have consistent behaviour.
if not destination_path.endswith(os.path.sep):
destination_path = destination_path + os.path.sep
target_path = os.path.join(target_directory, target_filename)
if target_path.startswith(destination_path):
path = target_path[len(destination_path):]
self._paths_by_hash[digest].append(path)
if skip_duplicates:
duplicate_display_name = self._digests.get(digest, None)
if duplicate_display_name:
logger.warning((
'[skipping] file entry: {0:s} is a duplicate of: {1:s} with '
'digest: {2:s}').format(
display_name, duplicate_display_name, digest))
return
self._digests[digest] = display_name
if not os.path.isdir(target_directory):
os.makedirs(target_directory)
if os.path.exists(target_path):
logger.warning((
'[skipping] unable to export contents of file entry: {0:s} '
'because exported file: {1:s} already exists.').format(
display_name, target_path))
return
try:
self._WriteFileEntry(file_entry, data_stream_name, target_path)
except (IOError, dfvfs_errors.BackEndError) as exception:
logger.error((
'[skipping] unable to export contents of file entry: {0:s} '
'with error: {1!s}').format(display_name, exception))
try:
os.remove(target_path)
except (IOError, OSError):
pass
def _ExtractFileEntry(
self, file_entry, destination_path, skip_duplicates=True):
"""Extracts a file entry.
Args:
file_entry (dfvfs.FileEntry): file entry whose content is to be written.
destination_path (str): path where the extracted files should be stored.
skip_duplicates (Optional[bool]): True if files with duplicate content
should be skipped.
"""
if not self._filter_collection.Matches(file_entry):
return
file_entry_processed = False
for data_stream in file_entry.data_streams:
if self._abort:
break
self._ExtractDataStream(
file_entry, data_stream.name, destination_path,
skip_duplicates=skip_duplicates)
file_entry_processed = True
if not file_entry_processed:
self._ExtractDataStream(
file_entry, '', destination_path, skip_duplicates=skip_duplicates)
# TODO: merge with collector and/or engine.
def _Extract(
self, source_path_specs, destination_path, output_writer,
artifact_filters, filter_file, artifact_definitions_path,
custom_artifacts_path, skip_duplicates=True):
"""Extracts files.
This method runs the file extraction process on the image and
potentially on every VSS if that is wanted.
Args:
source_path_specs (list[dfvfs.PathSpec]): path specifications to extract.
destination_path (str): path where the extracted files should be stored.
output_writer (CLIOutputWriter): output writer.
artifact_definitions_path (str): path to artifact definitions file.
custom_artifacts_path (str): path to custom artifact definitions file.
artifact_filters (list[str]): names of artifact definitions that are
used for filtering file system and Windows Registry key paths.
filter_file (str): path of the file that contains the filter file path
filters.
skip_duplicates (Optional[bool]): True if files with duplicate content
should be skipped.
Raises:
BadConfigOption: if an invalid collection filter was specified.
"""
extraction_engine = engine.BaseEngine()
# If the source is a directory or a storage media image
# run pre-processing.
if self._source_type in self._SOURCE_TYPES_TO_PREPROCESS:
self._PreprocessSources(extraction_engine)
try:
extraction_engine.BuildCollectionFilters(
artifact_definitions_path, custom_artifacts_path,
extraction_engine.knowledge_base, artifact_filters, filter_file)
except errors.InvalidFilter as exception:
raise errors.BadConfigOption(
'Unable to build collection filters with error: {0!s}'.format(
exception))
filters_helper = extraction_engine.collection_filters_helper
excluded_find_specs = None
included_find_specs = None
if filters_helper:
excluded_find_specs = filters_helper.excluded_file_system_find_specs
included_find_specs = filters_helper.included_file_system_find_specs
output_writer.Write('Extracting file entries.\n')
path_spec_generator = self._path_spec_extractor.ExtractPathSpecs(
source_path_specs, find_specs=included_find_specs,
resolver_context=self._resolver_context)
for path_spec in path_spec_generator:
file_entry = path_spec_resolver.Resolver.OpenFileEntry(
path_spec, resolver_context=self._resolver_context)
if not file_entry:
path_spec_string = self._GetPathSpecificationString(path_spec)
logger.warning(
'Unable to open file entry for path specfication: {0:s}'.format(
path_spec_string))
continue
skip_file_entry = False
for find_spec in excluded_find_specs or []:
skip_file_entry = find_spec.CompareLocation(file_entry)
if skip_file_entry:
break
if skip_file_entry:
logger.info('Skipped: {0:s} because of exclusion filter.'.format(
file_entry.path_spec.location))
continue
self._ExtractFileEntry(
file_entry, destination_path, skip_duplicates=skip_duplicates)
def _ParseExtensionsString(self, extensions_string):
"""Parses the extensions string.
Args:
extensions_string (str): comma separated extensions to filter.
"""
if not extensions_string:
return
extensions_string = extensions_string.lower()
extensions = [
extension.strip() for extension in extensions_string.split(',')]
file_entry_filter = file_entry_filters.ExtensionsFileEntryFilter(extensions)
self._filter_collection.AddFilter(file_entry_filter)
def _ParseNamesString(self, names_string):
"""Parses the name string.
Args:
names_string (str): comma separated filenames to filter.
"""
if not names_string:
return
names_string = names_string.lower()
names = [name.strip() for name in names_string.split(',')]
file_entry_filter = file_entry_filters.NamesFileEntryFilter(names)
self._filter_collection.AddFilter(file_entry_filter)
def _ParseFilterOptions(self, options):
"""Parses the filter options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
names = ['artifact_filters', 'date_filters', 'filter_file']
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=names)
extensions_string = self.ParseStringOption(options, 'extensions_string')
self._ParseExtensionsString(extensions_string)
names_string = getattr(options, 'names_string', None)
self._ParseNamesString(names_string)
signature_identifiers = getattr(options, 'signature_identifiers', None)
try:
self._ParseSignatureIdentifiers(
self._data_location, signature_identifiers)
except (IOError, ValueError) as exception:
raise errors.BadConfigOption(exception)
if self._artifact_filters or self._filter_file:
self.has_filters = True
else:
self.has_filters = self._filter_collection.HasFilters()
def _ParseSignatureIdentifiers(self, data_location, signature_identifiers):
"""Parses the signature identifiers.
Args:
data_location (str): location of the format specification file, for
example, "signatures.conf".
signature_identifiers (str): comma separated signature identifiers.
Raises:
IOError: if the format specification file could not be read from
the specified data location.
OSError: if the format specification file could not be read from
the specified data location.
ValueError: if no data location was specified.
"""
if not signature_identifiers:
return
if not data_location:
raise ValueError('Missing data location.')
path = os.path.join(data_location, 'signatures.conf')
if not os.path.exists(path):
raise IOError(
'No such format specification file: {0:s}'.format(path))
try:
specification_store = self._ReadSpecificationFile(path)
except IOError as exception:
raise IOError((
'Unable to read format specification file: {0:s} with error: '
'{1!s}').format(path, exception))
signature_identifiers = signature_identifiers.lower()
signature_identifiers = [
identifier.strip() for identifier in signature_identifiers.split(',')]
file_entry_filter = file_entry_filters.SignaturesFileEntryFilter(
specification_store, signature_identifiers)
self._filter_collection.AddFilter(file_entry_filter)
def _PreprocessSources(self, extraction_engine):
"""Preprocesses the sources.
Args:
extraction_engine (BaseEngine): extraction engine to preprocess
the sources.
"""
logger.debug('Starting preprocessing.')
try:
artifacts_registry = engine.BaseEngine.BuildArtifactsRegistry(
self._artifact_definitions_path, self._custom_artifacts_path)
# Setting storage writer to None here since we do not want to store
# preprocessing information.
extraction_engine.PreprocessSources(
artifacts_registry, self._source_path_specs, None,
resolver_context=self._resolver_context)
except IOError as exception:
logger.error('Unable to preprocess with error: {0!s}'.format(exception))
logger.debug('Preprocessing done.')
def _ReadSpecificationFile(self, path):
"""Reads the format specification file.
Args:
path (str): path of the format specification file.
Returns:
FormatSpecificationStore: format specification store.
"""
specification_store = specification.FormatSpecificationStore()
with io.open(
path, 'rt', encoding=self._SPECIFICATION_FILE_ENCODING) as file_object:
for line in file_object.readlines():
line = line.strip()
if not line or line.startswith('#'):
continue
try:
identifier, offset, pattern = line.split()
except ValueError:
logger.error('[skipping] invalid line: {0:s}'.format(line))
continue
try:
offset = int(offset, 10)
except ValueError:
logger.error('[skipping] invalid offset in line: {0:s}'.format(line))
continue
try:
# TODO: find another way to do this that doesn't use an undocumented
# API.
pattern = codecs.escape_decode(pattern)[0]
# ValueError is raised when the patterns contains invalid escaped
# characters, such as "\xg1".
except ValueError:
logger.error(
'[skipping] invalid pattern in line: {0:s}'.format(line))
continue
format_specification = specification.FormatSpecification(identifier)
format_specification.AddNewSignature(pattern, offset=offset)
specification_store.AddSpecification(format_specification)
return specification_store
def _WriteFileEntry(self, file_entry, data_stream_name, destination_file):
"""Writes the contents of the source file entry to a destination file.
Note that this function will overwrite an existing file.
Args:
file_entry (dfvfs.FileEntry): file entry whose content is to be written.
data_stream_name (str): name of the data stream whose content is to be
written.
destination_file (str): path of the destination file.
"""
source_file_object = file_entry.GetFileObject(
data_stream_name=data_stream_name)
if not source_file_object:
return
with open(destination_file, 'wb') as destination_file_object:
source_file_object.seek(0, os.SEEK_SET)
data = source_file_object.read(self._COPY_BUFFER_SIZE)
while data:
destination_file_object.write(data)
data = source_file_object.read(self._COPY_BUFFER_SIZE)
def AddFilterOptions(self, argument_group):
"""Adds the filter options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
names = ['artifact_filters', 'date_filters', 'filter_file']
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
argument_group, names=names)
argument_group.add_argument(
'-x', '--extensions', dest='extensions_string', action='store',
type=str, metavar='EXTENSIONS', help=(
'Filter on file name extensions. This option accepts multiple '
'multiple comma separated values e.g. "csv,docx,pst".'))
argument_group.add_argument(
'--names', dest='names_string', action='store',
type=str, metavar='NAMES', help=(
'Filter on file names. This option accepts a comma separated '
'string denoting all file names, e.g. -x '
'"NTUSER.DAT,UsrClass.dat".'))
argument_group.add_argument(
'--signatures', dest='signature_identifiers', action='store',
type=str, metavar='IDENTIFIERS', help=(
'Filter on file format signature identifiers. This option '
'accepts multiple comma separated values e.g. "esedb,lnk". '
'Use "list" to show an overview of the supported file format '
'signatures.'))
def ListSignatureIdentifiers(self):
"""Lists the signature identifier.
Raises:
BadConfigOption: if the data location is invalid.
"""
if not self._data_location:
raise errors.BadConfigOption('Missing data location.')
path = os.path.join(self._data_location, 'signatures.conf')
if not os.path.exists(path):
raise errors.BadConfigOption(
'No such format specification file: {0:s}'.format(path))
try:
specification_store = self._ReadSpecificationFile(path)
except IOError as exception:
raise errors.BadConfigOption((
'Unable to read format specification file: {0:s} with error: '
'{1!s}').format(path, exception))
identifiers = []
for format_specification in specification_store.specifications:
identifiers.append(format_specification.identifier)
self._output_writer.Write('Available signature identifiers:\n')
self._output_writer.Write(
'\n'.join(textwrap.wrap(', '.join(sorted(identifiers)), 79)))
self._output_writer.Write('\n\n')
def ParseArguments(self, arguments):
"""Parses the command line arguments.
Args:
arguments (list[str]): command line arguments.
Returns:
bool: True if the arguments were successfully parsed.
"""
loggers.ConfigureLogging()
argument_parser = argparse.ArgumentParser(
description=self.DESCRIPTION, epilog=self.EPILOG, add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
self.AddBasicOptions(argument_parser)
self.AddInformationalOptions(argument_parser)
argument_helper_names = [
'artifact_definitions', 'data_location', 'vfs_backend']
if self._CanEnforceProcessMemoryLimit():
argument_helper_names.append('process_resources')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
argument_parser, names=argument_helper_names)
self.AddLogFileOptions(argument_parser)
self.AddStorageMediaImageOptions(argument_parser)
self.AddVSSProcessingOptions(argument_parser)
self.AddCredentialOptions(argument_parser)
self.AddFilterOptions(argument_parser)
argument_parser.add_argument(
'-w', '--write', action='store', dest='path', type=str,
metavar='PATH', default='export', help=(
'The directory in which extracted files should be stored.'))
argument_parser.add_argument(
'--include_duplicates', '--include-duplicates',
dest='include_duplicates', action='store_true', default=False, help=(
'By default a digest hash (SHA-256) is calculated for each file '
'(data stream). These hashes are compared to the previously '
'exported files and duplicates are skipped. Use this option to '
'include duplicate files in the export.'))
argument_parser.add_argument(
'--no_hashes', '--no-hashes', dest='no_hashes', action='store_true',
default=False, help=(
'Do not generate the {0:s} file'.format(self._HASHES_FILENAME)))
argument_parser.add_argument(
self._SOURCE_OPTION, nargs='?', action='store', metavar='IMAGE',
default=None, type=str, help=(
'The full path to the image file that we are about to extract '
'files from, it should be a raw image or another image that '
'Plaso supports.'))
try:
options = argument_parser.parse_args(arguments)
except UnicodeEncodeError:
# If we get here we are attempting to print help in a non-Unicode
# terminal.
self._output_writer.Write('')
self._output_writer.Write(argument_parser.format_help())
return False
try:
self.ParseOptions(options)
except errors.BadConfigOption as exception:
self._output_writer.Write('ERROR: {0!s}\n'.format(exception))
self._output_writer.Write('')
self._output_writer.Write(argument_parser.format_usage())
return False
self._WaitUserWarning()
loggers.ConfigureLogging(
debug_output=self._debug_mode, filename=self._log_file,
quiet_mode=self._quiet_mode)
return True
def ParseOptions(self, options):
"""Parses the options and initializes the front-end.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
# The data location is required to list signatures.
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=['data_location'])
self.show_troubleshooting = getattr(options, 'show_troubleshooting', False)
# Check the list options first otherwise required options will raise.
signature_identifiers = self.ParseStringOption(
options, 'signature_identifiers')
if signature_identifiers == 'list':
self.list_signature_identifiers = True
if self.list_signature_identifiers or self.show_troubleshooting:
return
self._ParseInformationalOptions(options)
self._ParseLogFileOptions(options)
self._ParseStorageMediaOptions(options)
self._destination_path = self.ParseStringOption(
options, 'path', default_value='export')
if not self._data_location:
logger.warning('Unable to automatically determine data location.')
argument_helper_names = [
'artifact_definitions', 'process_resources', 'vfs_backend']
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=argument_helper_names)
if self._vfs_back_end == 'fsext':
dfvfs_definitions.PREFERRED_EXT_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_EXT)
elif self._vfs_back_end == 'fshfs':
dfvfs_definitions.PREFERRED_HFS_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_HFS)
elif self._vfs_back_end == 'fsntfs':
dfvfs_definitions.PREFERRED_NTFS_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_NTFS)
elif self._vfs_back_end == 'tsk':
dfvfs_definitions.PREFERRED_EXT_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_TSK)
dfvfs_definitions.PREFERRED_GPT_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION)
dfvfs_definitions.PREFERRED_HFS_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_TSK)
dfvfs_definitions.PREFERRED_NTFS_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_TSK)
elif self._vfs_back_end == 'vsgpt':
dfvfs_definitions.PREFERRED_GPT_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_GPT)
self._ParseFilterOptions(options)
include_duplicates = getattr(options, 'include_duplicates', False)
self._skip_duplicates = not include_duplicates
self._no_hashes = getattr(options, 'no_hashes', False)
self._EnforceProcessMemoryLimit(self._process_memory_limit)
def PrintFilterCollection(self):
"""Prints the filter collection."""
self._filter_collection.Print(self._output_writer)
def ProcessSources(self):
"""Processes the sources.
Raises:
SourceScannerError: if the source scanner could not find a supported
file system.
UserAbort: if the user initiated an abort.
"""
self.ScanSource(self._source_path)
self._output_writer.Write('Export started.\n')
if not os.path.isdir(self._destination_path):
os.makedirs(self._destination_path)
self._Extract(
self._source_path_specs, self._destination_path,
self._output_writer, self._artifact_filters, self._filter_file,
self._artifact_definitions_path, self._custom_artifacts_path,
skip_duplicates=self._skip_duplicates)
json_data = []
if not self._no_hashes:
with open(os.path.join(
self._destination_path, self._HASHES_FILENAME), 'w') as write_file:
for sha256, paths in self._paths_by_hash.items():
json_data.append({'sha256': sha256, 'paths': paths})
json.dump(json_data, write_file)
self._output_writer.Write('Export completed.\n')
self._output_writer.Write('\n')
|
cgar/servo | refs/heads/master | tests/wpt/css-tests/tools/pytest/doc/en/example/assertion/failure_demo.py | 179 | from pytest import raises
import _pytest._code
import py
def otherfunc(a,b):
assert a==b
def somefunc(x,y):
otherfunc(x,y)
def otherfunc_multi(a,b):
assert (a ==
b)
def test_generative(param1, param2):
assert param1 * 2 < param2
def pytest_generate_tests(metafunc):
if 'param1' in metafunc.fixturenames:
metafunc.addcall(funcargs=dict(param1=3, param2=6))
class TestFailing(object):
def test_simple(self):
def f():
return 42
def g():
return 43
assert f() == g()
def test_simple_multiline(self):
otherfunc_multi(
42,
6*9)
def test_not(self):
def f():
return 42
assert not f()
class TestSpecialisedExplanations(object):
def test_eq_text(self):
assert 'spam' == 'eggs'
def test_eq_similar_text(self):
assert 'foo 1 bar' == 'foo 2 bar'
def test_eq_multiline_text(self):
assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
def test_eq_long_text(self):
a = '1'*100 + 'a' + '2'*100
b = '1'*100 + 'b' + '2'*100
assert a == b
def test_eq_long_text_multiline(self):
a = '1\n'*100 + 'a' + '2\n'*100
b = '1\n'*100 + 'b' + '2\n'*100
assert a == b
def test_eq_list(self):
assert [0, 1, 2] == [0, 1, 3]
def test_eq_list_long(self):
a = [0]*100 + [1] + [3]*100
b = [0]*100 + [2] + [3]*100
assert a == b
def test_eq_dict(self):
assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
def test_eq_set(self):
assert set([0, 10, 11, 12]) == set([0, 20, 21])
def test_eq_longer_list(self):
assert [1,2] == [1,2,3]
def test_in_list(self):
assert 1 in [0, 2, 3, 4, 5]
def test_not_in_text_multiline(self):
text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
assert 'foo' not in text
def test_not_in_text_single(self):
text = 'single foo line'
assert 'foo' not in text
def test_not_in_text_single_long(self):
text = 'head ' * 50 + 'foo ' + 'tail ' * 20
assert 'foo' not in text
def test_not_in_text_single_long_term(self):
text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
assert 'f'*70 not in text
def test_attribute():
class Foo(object):
b = 1
i = Foo()
assert i.b == 2
def test_attribute_instance():
class Foo(object):
b = 1
assert Foo().b == 2
def test_attribute_failure():
class Foo(object):
def _get_b(self):
raise Exception('Failed to get attrib')
b = property(_get_b)
i = Foo()
assert i.b == 2
def test_attribute_multiple():
class Foo(object):
b = 1
class Bar(object):
b = 2
assert Foo().b == Bar().b
def globf(x):
return x+1
class TestRaises:
def test_raises(self):
s = 'qwe'
raises(TypeError, "int(s)")
def test_raises_doesnt(self):
raises(IOError, "int('3')")
def test_raise(self):
raise ValueError("demo error")
def test_tupleerror(self):
a,b = [1]
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
l = [1,2,3]
print ("l is %r" % l)
a,b = l.pop()
def test_some_error(self):
if namenotexi:
pass
def func1(self):
assert 41 == 42
# thanks to Matthew Scott for this test
def test_dynamic_compile_shows_nicely():
src = 'def foo():\n assert 1 == 0\n'
name = 'abc-123'
module = py.std.imp.new_module(name)
code = _pytest._code.compile(src, name, 'exec')
py.builtin.exec_(code, module.__dict__)
py.std.sys.modules[name] = module
module.foo()
class TestMoreErrors:
def test_complex_error(self):
def f():
return 44
def g():
return 43
somefunc(f(), g())
def test_z1_unpack_error(self):
l = []
a,b = l
def test_z2_type_error(self):
l = 3
a,b = l
def test_startswith(self):
s = "123"
g = "456"
assert s.startswith(g)
def test_startswith_nested(self):
def f():
return "123"
def g():
return "456"
assert f().startswith(g())
def test_global_func(self):
assert isinstance(globf(42), float)
def test_instance(self):
self.x = 6*7
assert self.x != 42
def test_compare(self):
assert globf(10) < 5
def test_try_finally(self):
x = 1
try:
assert x == 0
finally:
x = 0
class TestCustomAssertMsg:
def test_single_line(self):
class A:
a = 1
b = 2
assert A.a == b, "A.a appears not to be b"
def test_multiline(self):
class A:
a = 1
b = 2
assert A.a == b, "A.a appears not to be b\n" \
"or does not appear to be b\none of those"
def test_custom_repr(self):
class JSON:
a = 1
def __repr__(self):
return "This is JSON\n{\n 'foo': 'bar'\n}"
a = JSON()
b = 2
assert a.a == b, a
|
Yelp/yelp_bytes | refs/heads/master | tests/yelp_bytes_test.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from yelp_bytes import to_bytes, to_utf8, to_native, from_bytes, from_utf8, unicode, PY2
# Define some interesting unicode inputs
class UNICODE:
ascii = 'A' # The most basic of unicode.
latin1 = ascii + 'ü' # U-umlaut. This is defined in latin1 but not ascii.
win1252 = latin1 + '€' # Euro sign. This is defined in windows-1252, but not latin1.
bmp = win1252 + 'Ł' # Polish crossed-L. This requires at least a two-byte encoding.
utf8 = bmp + '🐵' # Monkey-face emoji. This requires at least a three-byte encoding.
def dunder_compat(cls):
if PY2:
if hasattr(cls, '__bytes__'):
cls.__str__ = cls.__bytes__
del cls.__bytes__
elif hasattr(cls, '__unicode__'):
cls.__str__ = cls.__unicode__
del cls.__unicode__
return cls
@dunder_compat
class Unicodable:
"""unicode() is fine, but bytes() will barf"""
def __unicode__(self):
return UNICODE.utf8
unicodable = Unicodable()
@dunder_compat
class Utf8able:
"""bytes() and decode('UTF-8') is fine, but unicode() will barf"""
def __bytes__(self):
return UNICODE.utf8.encode('utf8')
utf8able = Utf8able()
@dunder_compat
class Win1252able:
"""bytes() is fine, but unicode() and decode('UTF-8') will barf"""
def __bytes__(self):
return UNICODE.utf8.encode('windows-1252', 'ignore')
win1252able = Win1252able()
class BytesLike:
"""looks a bit like python3 bytes, emulating a list of ints"""
def __iter__(self):
return iter(range(10))
byteslike = BytesLike()
bytesvalue = b''.join(
chr(b) if PY2 else bytes([b])
for b in byteslike
)
both_from_funcs = pytest.mark.parametrize('testfunc', (from_bytes, from_utf8))
both_to_funcs = pytest.mark.parametrize('testfunc', (to_bytes, to_utf8))
@both_from_funcs
def test_with_unicode(testfunc):
# Unicode objects aren't touched.
assert UNICODE.utf8 is testfunc(UNICODE.utf8)
@both_from_funcs
def test_with_unicode_subclass(testfunc):
# Unicode subclasses (eg markupsafe) also go unmolested.
class MyText(unicode):
pass
mytext = MyText("abcdef")
assert mytext is testfunc(mytext)
@both_to_funcs
def test_with_bytes_subclass(testfunc):
# it would make sense for the same (above) to hold of a bytes subclass
class MyBytes(bytes):
pass
mybytes = MyBytes(b"abcdef")
assert mybytes is testfunc(mybytes)
@both_from_funcs
def test_with_utf8(testfunc):
utf8 = UNICODE.utf8.encode('utf8')
assert UNICODE.utf8 == testfunc(utf8)
def test_with_win1252():
win1252 = UNICODE.utf8.encode('windows-1252', 'ignore')
assert UNICODE.win1252.encode('windows-1252') == win1252
assert UNICODE.win1252 == from_bytes(win1252)
@both_from_funcs
def test_from_funcs_with_unicodable_object(testfunc):
assert UNICODE.utf8 == testfunc(unicodable)
@both_from_funcs
def test_from_funcs_with_utf8able_object(testfunc):
expected = UNICODE.utf8 if PY2 else repr(utf8able)
assert expected == testfunc(utf8able)
def test_from_bytes_with_win1252able_object():
expected = UNICODE.win1252 if PY2 else repr(win1252able)
assert expected == from_bytes(win1252able)
def test_from_utf8_with_win1252():
win1252 = UNICODE.utf8.encode('windows-1252', 'ignore')
with pytest.raises(UnicodeDecodeError):
from_utf8(win1252)
def test_from_utf8_with_win1252able_object():
if PY2:
with pytest.raises(UnicodeDecodeError):
from_utf8(win1252able)
else:
assert repr(win1252able) == from_utf8(win1252able)
@both_from_funcs
def test_from_funcs_with_byteslike_object(testfunc):
expected = repr(byteslike)
assert expected == testfunc(byteslike)
@both_to_funcs
def test_to_bytes_from_unicode(testfunc):
assert UNICODE.utf8.encode('utf8') == testfunc(UNICODE.utf8)
@both_to_funcs
def test_to_bytes_from_utf8(testfunc):
utf8 = UNICODE.utf8.encode('utf8')
assert utf8 == testfunc(utf8)
@both_to_funcs
def test_to_bytes_from_bad_utf8(testfunc):
# The to_bytes function doesn't attempt to auto-magically fix non-utf8 encodings.
win1252 = UNICODE.utf8.encode('windows-1252', 'ignore')
assert UNICODE.win1252.encode('windows-1252') == win1252
assert win1252 == testfunc(win1252)
@both_to_funcs
def test_to_funcs_with_unicodable_object(testfunc):
assert UNICODE.utf8.encode('UTF-8') == testfunc(unicodable)
@both_to_funcs
def test_to_funcs_with_utf8able_object(testfunc):
expected = UNICODE.utf8 if PY2 else repr(utf8able)
expected = expected.encode('UTF-8')
assert expected == testfunc(utf8able)
@both_to_funcs
def test_to_funcs_with_win1252able_object(testfunc):
expected = UNICODE.win1252 if PY2 else repr(win1252able)
expected = expected.encode('windows-1252')
assert expected == testfunc(win1252able)
@both_to_funcs
def test_to_funcs_with_byteslike_object(testfunc):
expected = repr(byteslike).encode('US-ASCII')
assert expected == testfunc(byteslike)
@pytest.mark.parametrize('value', (
UNICODE.utf8,
unicodable,
utf8able,
win1252able,
byteslike,
))
def test_internet_roundtrip(value):
assert from_bytes(value) == to_bytes(value).decode('internet')
@pytest.mark.parametrize('value', (
UNICODE.utf8,
unicodable,
utf8able,
))
def test_utf8_roundtrip(value):
assert from_bytes(value, 'utf8') == to_bytes(value, 'utf8').decode('utf8')
@pytest.mark.parametrize('value', (
UNICODE.win1252,
win1252able,
))
def test_windows_roundtrip(value):
assert from_bytes(value, 'windows-1252') == to_bytes(value, 'windows-1252').decode('windows-1252')
@pytest.mark.parametrize('value', (
UNICODE.utf8,
utf8able,
win1252able,
byteslike,
))
def test_to_bytes_is_like_str_encode(value):
# pylint:disable=bare-except,broad-except,redefined-variable-type
try:
bytes_result = str(value) if PY2 else str(value).encode('US-ASCII')
except:
bytes_result = '(error)'
try:
to_bytes_result = to_bytes(value, 'US-ASCII')
except:
to_bytes_result = '(error)'
assert bytes_result == to_bytes_result
@pytest.mark.parametrize('value', (
UNICODE.latin1,
UNICODE.win1252,
UNICODE.bmp,
UNICODE.utf8,
))
def test_to_native_with_unicode_objects(value): # pragma: no cover
if PY2:
assert to_native(value) == value.encode('UTF-8')
else:
assert to_native(value) == value
@pytest.mark.parametrize('value', (
UNICODE.latin1.encode('latin1'),
UNICODE.win1252.encode('cp1252'),
UNICODE.bmp.encode('UTF-8'),
UNICODE.utf8.encode('UTF-8'),
))
def test_to_native_with_byte_string(value): # pragma: no cover
if PY2:
assert to_native(value) == value
else:
assert to_native(value) == from_bytes(value)
def test_to_native_unicodable():
expected = UNICODE.utf8.encode('UTF-8') if PY2 else UNICODE.utf8
assert to_native(unicodable) == expected
def test_to_native_utf8able():
expected = UNICODE.utf8.encode('UTF-8') if PY2 else repr(utf8able)
assert to_native(utf8able) == expected
def test_to_native_win1252able():
expected = UNICODE.utf8.encode('cp1252', 'ignore') if PY2 else repr(win1252able)
assert to_native(win1252able) == expected
|
bdh1011/wau | refs/heads/master | venv/lib/python2.7/site-packages/twisted/internet/test/process_connectionlost.py | 20 | import os, sys
while 1:
line = sys.stdin.readline().strip()
if not line:
break
os.close(int(line))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.