repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
Kyria/LazyBlacksmith | lbcmd/sde_import.py | 1 | 4863 | # -*- encoding: utf-8 -*-
import bz2
import os
import sqlite3
import requests
from flask_script import Command, Option
from lazyblacksmith.models import db
from lbcmd.importer import Importer
class SdeImport(Command):
"""
Manage SDE Data in lazyblacksmith.
If all flags are specified, "clear" action is done first.
"""
filename = 'sqlite-latest.sqlite'
filename_bzip = '%s.bz2' % filename
url = "https://www.fuzzwork.co.uk/dump/%s" % filename_bzip
option_list = (
Option(
'--database_name', '-n',
dest='database_name',
default='sqlite-latest.sqlite',
help=('The path to the EVE Online SDE Sqlite '
'(absolute path may be required)')
),
Option(
'--clear', '-c',
dest='clear',
action="store_true",
default=False,
help='Delete the content of all the SDE table'
),
Option(
'--download', '-d',
dest='download',
action="store_true",
default=False,
help='Download the SDE before import'
),
Option(
'--url', '-u',
dest='url',
default='https://www.fuzzwork.co.uk/dump/sqlite-latest.sqlite.bz2',
help=('The URL to get the .bz2 export of EVE')
),
)
def download(self, path, url, output_filename):
""" Download the file at the given url and put it in output_file """
res = requests.get(url, stream=True)
output = "%s/%s" % (path, output_filename)
if res.status_code != 200:
print("Cannot download the file.")
print(res.content)
try:
total_size = 0
with open(output, "wb") as handle:
for data in res.iter_content(1024 * 1024 * 10):
print(
"\rDownloading file ... [%s] " % (
get_human_size(total_size)
),
end=""
)
handle.write(data)
total_size += len(data)
except Exception as err:
print("\rDownloading file ... [FAILED] ")
print(str(err))
return False
print("\rDownloading file ... [SUCCESS] ")
return True
def bunzip2(self, path, source_filename, dest_filename):
""" Bunzip the file provided """
source_file = "%s/%s" % (path, source_filename)
try:
print("Decompressing file ... ", end='')
with open(source_file, 'rb') as bz2file:
with open(dest_filename, 'wb') as unzipped_file:
decompressor = bz2.BZ2Decompressor()
for data in iter(lambda: bz2file.read(100 * 1024), b''):
unzipped_file.write(decompressor.decompress(data))
except Exception as err:
print("[FAILED]")
print(str(err))
return False
print("[SUCCESS]")
return True
# pylint: disable=method-hidden,arguments-differ
def run(self, database_name, clear, download, url):
# so we create in LazyBlacksmith folder, not in lbcmd
current_path = os.path.realpath(
'%s/../' % os.path.dirname(os.path.realpath(__file__))
)
tmp_path = '%s/tmp' % current_path
bzip_name = "%s.bz2" % database_name
if download:
# if we download the file, change the path
database_name = '%s/%s' % (tmp_path, database_name)
os.makedirs(tmp_path, exist_ok=True)
if self.download(tmp_path, url, bzip_name):
if self.bunzip2(tmp_path, bzip_name, database_name):
os.remove("%s/%s" % (tmp_path, bzip_name))
if clear:
importer = Importer(None, db.engine)
print("Starting SDE Data cleanup")
importer.delete_all()
print("\nCleanup : Done")
return
importer = Importer(self.create_sde_engine(database_name), db.engine)
# do import, as all step have been verified :)
print("Starting SDE Import...")
importer.delete_all()
importer.import_all()
print("\nSDE Import : Done")
return
def create_sde_engine(self, database):
con = sqlite3.connect(database)
return con
def get_human_size(size, precision=2):
""" Display size in human readable str """
suffixes = ['B', 'KB', 'MB', 'GB', 'TB']
suffixIndex = 0
while size > 1024 and suffixIndex < 4:
suffixIndex += 1 # increment the index of the suffix
size = size / 1024.0 # apply the division
return "%.*f%s" % (precision, size, suffixes[suffixIndex])
| bsd-3-clause | -8,072,828,805,342,243,000 | 2,947,260,592,896,219,600 | 32.308219 | 79 | 0.522517 | false |
stewartpark/django | django/db/backends/oracle/creation.py | 160 | 17256 | import sys
import time
from django.conf import settings
from django.db.backends.base.creation import BaseDatabaseCreation
from django.db.utils import DatabaseError
from django.utils.functional import cached_property
from django.utils.six.moves import input
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = 'Im_a_lumberjack'
class DatabaseCreation(BaseDatabaseCreation):
@cached_property
def _maindb_connection(self):
"""
This is analogous to other backends' `_nodb_connection` property,
which allows access to an "administrative" connection which can
be used to manage the test databases.
For Oracle, the only connection that can be used for that purpose
is the main (non-test) connection.
"""
settings_dict = settings.DATABASES[self.connection.alias]
user = settings_dict.get('SAVED_USER') or settings_dict['USER']
password = settings_dict.get('SAVED_PASSWORD') or settings_dict['PASSWORD']
settings_dict = settings_dict.copy()
settings_dict.update(USER=user, PASSWORD=password)
DatabaseWrapper = type(self.connection)
return DatabaseWrapper(settings_dict, alias=self.connection.alias)
def _create_test_db(self, verbosity=1, autoclobber=False, keepdb=False):
parameters = self._get_test_db_params()
cursor = self._maindb_connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity, keepdb)
except Exception as e:
# if we want to keep the db, then no need to do any of the below,
# just return and skip it all.
if keepdb:
return
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input(
"It appears the test database, %s, already exists. "
"Type 'yes' to delete it, or 'no' to cancel: " % parameters['user'])
if autoclobber or confirm == 'yes':
if verbosity >= 1:
print("Destroying old test database for alias '%s'..." % self.connection.alias)
try:
self._execute_test_db_destruction(cursor, parameters, verbosity)
except DatabaseError as e:
if 'ORA-29857' in str(e):
self._handle_objects_preventing_db_destruction(cursor, parameters,
verbosity, autoclobber)
else:
# Ran into a database error that isn't about leftover objects in the tablespace
sys.stderr.write("Got an error destroying the old test database: %s\n" % e)
sys.exit(2)
except Exception as e:
sys.stderr.write("Got an error destroying the old test database: %s\n" % e)
sys.exit(2)
try:
self._execute_test_db_creation(cursor, parameters, verbosity, keepdb)
except Exception as e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print("Creating test user...")
try:
self._create_test_user(cursor, parameters, verbosity, keepdb)
except Exception as e:
# If we want to keep the db, then we want to also keep the user.
if keepdb:
return
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = input(
"It appears the test user, %s, already exists. Type "
"'yes' to delete it, or 'no' to cancel: " % parameters['user'])
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print("Creating test user...")
self._create_test_user(cursor, parameters, verbosity, keepdb)
except Exception as e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
self._maindb_connection.close() # done with main user -- test user and tablespaces created
self._switch_to_test_user(parameters)
return self.connection.settings_dict['NAME']
def _switch_to_test_user(self, parameters):
"""
Oracle doesn't have the concept of separate databases under the same user.
Thus, we use a separate user (see _create_test_db). This method is used
to switch to that user. We will need the main user again for clean-up when
we end testing, so we keep its credentials in SAVED_USER/SAVED_PASSWORD
entries in the settings dict.
"""
real_settings = settings.DATABASES[self.connection.alias]
real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = \
self.connection.settings_dict['USER']
real_settings['SAVED_PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] = \
self.connection.settings_dict['PASSWORD']
real_test_settings = real_settings['TEST']
test_settings = self.connection.settings_dict['TEST']
real_test_settings['USER'] = real_settings['USER'] = test_settings['USER'] = \
self.connection.settings_dict['USER'] = parameters['user']
real_settings['PASSWORD'] = self.connection.settings_dict['PASSWORD'] = parameters['password']
def set_as_test_mirror(self, primary_settings_dict):
"""
Set this database up to be used in testing as a mirror of a primary database
whose settings are given
"""
self.connection.settings_dict['USER'] = primary_settings_dict['USER']
self.connection.settings_dict['PASSWORD'] = primary_settings_dict['PASSWORD']
def _handle_objects_preventing_db_destruction(self, cursor, parameters, verbosity, autoclobber):
# There are objects in the test tablespace which prevent dropping it
# The easy fix is to drop the test user -- but are we allowed to do so?
print("There are objects in the old test database which prevent its destruction.")
print("If they belong to the test user, deleting the user will allow the test "
"database to be recreated.")
print("Otherwise, you will need to find and remove each of these objects, "
"or use a different tablespace.\n")
if self._test_user_create():
if not autoclobber:
confirm = input("Type 'yes' to delete user %s: " % parameters['user'])
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error destroying the test user: %s\n" % e)
sys.exit(2)
try:
if verbosity >= 1:
print("Destroying old test database for alias '%s'..." % self.connection.alias)
self._execute_test_db_destruction(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error destroying the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled -- test database cannot be recreated.")
sys.exit(1)
else:
print("Django is configured to use pre-existing test user '%s',"
" and will not attempt to delete it.\n" % parameters['user'])
print("Tests cancelled -- test database cannot be recreated.")
sys.exit(1)
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
self.connection.close()
parameters = self._get_test_db_params()
cursor = self._maindb_connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print('Destroying test user...')
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print('Destroying test database tables...')
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._maindb_connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity, keepdb=False):
if verbosity >= 2:
print("_create_test_db(): dbname = %s" % parameters['user'])
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(datafile)s' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE %(maxsize)s
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(datafile_tmp)s' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE %(maxsize_tmp)s
""",
]
# Ignore "tablespace already exists" error when keepdb is on.
acceptable_ora_err = 'ORA-01543' if keepdb else None
self._execute_allow_fail_statements(cursor, statements, parameters, verbosity, acceptable_ora_err)
def _create_test_user(self, cursor, parameters, verbosity, keepdb=False):
if verbosity >= 2:
print("_create_test_user(): username = %s" % parameters['user'])
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
QUOTA UNLIMITED ON %(tblspace)s
""",
"""GRANT CREATE SESSION,
CREATE TABLE,
CREATE SEQUENCE,
CREATE PROCEDURE,
CREATE TRIGGER
TO %(user)s""",
]
# Ignore "user already exists" error when keepdb is on
acceptable_ora_err = 'ORA-01920' if keepdb else None
self._execute_allow_fail_statements(cursor, statements, parameters, verbosity, acceptable_ora_err)
# Most test-suites can be run without the create-view privilege. But some need it.
extra = "GRANT CREATE VIEW TO %(user)s"
success = self._execute_allow_fail_statements(cursor, [extra], parameters, verbosity, 'ORA-01031')
if not success and verbosity >= 2:
print("Failed to grant CREATE VIEW permission to test user. This may be ok.")
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_execute_test_db_destruction(): dbname=%s" % parameters['user'])
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_destroy_test_user(): user=%s" % parameters['user'])
print("Be patient. This can take some time...")
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity, allow_quiet_fail=False):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print(stmt)
try:
cursor.execute(stmt)
except Exception as err:
if (not allow_quiet_fail) or verbosity >= 2:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _execute_allow_fail_statements(self, cursor, statements, parameters, verbosity, acceptable_ora_err):
"""
Execute statements which are allowed to fail silently if the Oracle
error code given by `acceptable_ora_err` is raised. Return True if the
statements execute without an exception, or False otherwise.
"""
try:
# Statement can fail when acceptable_ora_err is not None
allow_quiet_fail = acceptable_ora_err is not None and len(acceptable_ora_err) > 0
self._execute_statements(cursor, statements, parameters, verbosity, allow_quiet_fail=allow_quiet_fail)
return True
except DatabaseError as err:
description = str(err)
if acceptable_ora_err is None or acceptable_ora_err not in description:
raise
return False
def _get_test_db_params(self):
return {
'dbname': self._test_database_name(),
'user': self._test_database_user(),
'password': self._test_database_passwd(),
'tblspace': self._test_database_tblspace(),
'tblspace_temp': self._test_database_tblspace_tmp(),
'datafile': self._test_database_tblspace_datafile(),
'datafile_tmp': self._test_database_tblspace_tmp_datafile(),
'maxsize': self._test_database_tblspace_size(),
'maxsize_tmp': self._test_database_tblspace_tmp_size(),
}
def _test_settings_get(self, key, default=None, prefixed=None):
"""
Return a value from the test settings dict,
or a given default,
or a prefixed entry from the main settings dict
"""
settings_dict = self.connection.settings_dict
val = settings_dict['TEST'].get(key, default)
if val is None:
val = TEST_DATABASE_PREFIX + settings_dict[prefixed]
return val
def _test_database_name(self):
return self._test_settings_get('NAME', prefixed='NAME')
def _test_database_create(self):
return self._test_settings_get('CREATE_DB', default=True)
def _test_user_create(self):
return self._test_settings_get('CREATE_USER', default=True)
def _test_database_user(self):
return self._test_settings_get('USER', prefixed='USER')
def _test_database_passwd(self):
return self._test_settings_get('PASSWORD', default=PASSWORD)
def _test_database_tblspace(self):
return self._test_settings_get('TBLSPACE', prefixed='USER')
def _test_database_tblspace_tmp(self):
settings_dict = self.connection.settings_dict
return settings_dict['TEST'].get('TBLSPACE_TMP',
TEST_DATABASE_PREFIX + settings_dict['USER'] + '_temp')
def _test_database_tblspace_datafile(self):
tblspace = '%s.dbf' % self._test_database_tblspace()
return self._test_settings_get('DATAFILE', default=tblspace)
def _test_database_tblspace_tmp_datafile(self):
tblspace = '%s.dbf' % self._test_database_tblspace_tmp()
return self._test_settings_get('DATAFILE_TMP', default=tblspace)
def _test_database_tblspace_size(self):
return self._test_settings_get('DATAFILE_MAXSIZE', default='500M')
def _test_database_tblspace_tmp_size(self):
return self._test_settings_get('DATAFILE_TMP_MAXSIZE', default='500M')
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
self._test_database_user(),
)
| bsd-3-clause | -7,949,293,996,639,661,000 | -7,774,747,953,377,874,000 | 47.201117 | 114 | 0.581537 | false |
RichardLitt/wyrd-django-dev | tests/regressiontests/urlpatterns_reverse/views.py | 51 | 1244 | from django.http import HttpResponse
from django.views.generic import RedirectView
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.decorators import user_passes_test
def empty_view(request, *args, **kwargs):
return HttpResponse('')
def kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def absolute_kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def defaults_view(request, arg1, arg2):
pass
def erroneous_view(request):
import non_existent
def pass_resolver_match_view(request, *args, **kwargs):
response = HttpResponse('')
response.resolver_match = request.resolver_match
return response
uncallable = "Can I be a view? Pleeeease?"
class ViewClass(object):
def __call__(self, request, *args, **kwargs):
return HttpResponse('')
view_class_instance = ViewClass()
class LazyRedirectView(RedirectView):
url = reverse_lazy('named-lazy-url-redirected-to')
@user_passes_test(lambda u: u.is_authenticated(), login_url=reverse_lazy('some-login-page'))
def login_required_view(request):
return HttpResponse('Hello you')
def bad_view(request, *args, **kwargs):
raise ValueError("I don't think I'm getting good value for this view")
| bsd-3-clause | 3,352,432,947,273,516,500 | -6,624,504,250,120,505,000 | 27.930233 | 92 | 0.725884 | false |
easmetz/inasafe | safe/impact_functions/inundation/tsunami_raster_road/test/test_tsunami_raster_road.py | 2 | 6057 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid and World Bank
- **Test for Tsunami Raster Building Impact Function.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import unittest
from collections import OrderedDict
from qgis.core import (
QgsFeatureRequest,
QgsField,
QgsRasterLayer,
QgsRectangle,
QgsVectorLayer
)
from PyQt4.QtCore import QVariant
from safe.test.utilities import get_qgis_app, standard_data_path
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
from safe.impact_functions.impact_function_manager import ImpactFunctionManager
# noinspection PyProtectedMember
from safe.impact_functions.inundation.tsunami_raster_road\
.impact_function import (
TsunamiRasterRoadsFunction,
_raster_to_vector_cells,
_intersect_lines_with_vector_cells)
from safe.gis.qgis_vector_tools import create_layer
__author__ = 'etiennetrimaille'
__project_name__ = 'inasafe-dev'
__filename__ = 'test_tsunami_raster_road.py'
__date__ = '11/03/16'
__copyright__ = '[email protected]'
class TsunamiRasterRoadsFunctionTest(unittest.TestCase):
"""Test for Tsunami Raster Road Impact Function."""
def setUp(self):
registry = ImpactFunctionManager().registry
registry.clear()
registry.register(TsunamiRasterRoadsFunction)
def test_run(self):
"""Test the tsunami on roads IF"""
impact_function = TsunamiRasterRoadsFunction.instance()
hazard_path = standard_data_path('hazard', 'tsunami_wgs84.tif')
exposure_path = standard_data_path('exposure', 'roads.shp')
hazard_layer = QgsRasterLayer(hazard_path, 'Tsunami')
exposure_layer = QgsVectorLayer(exposure_path, 'Roads', 'ogr')
impact_function.hazard = hazard_layer
impact_function.exposure = exposure_layer
# Let's set the extent to the hazard extent
extent = hazard_layer.extent()
rect_extent = [
extent.xMinimum(), extent.yMaximum(),
extent.xMaximum(), extent.yMinimum()]
impact_function.requested_extent = rect_extent
impact_function.run()
impact_layer = impact_function.impact
# Extract calculated result
impact_data = impact_layer.get_data()
# 1 = inundated, 2 = wet, 3 = dry
expected_result = {
0: 193, # changed from 3606 in 3.4.1
1: 88,
2: 107,
3: 114,
4: 53
}
result = {
0: 0,
1: 0,
2: 0,
3: 0,
4: 0
}
for feature in impact_data:
inundated_status = feature[impact_function.target_field]
result[inundated_status] += 1
self.assertDictEqual(expected_result, result)
def test_filter(self):
hazard_keywords = {
'layer_purpose': 'hazard',
'layer_mode': 'continuous',
'layer_geometry': 'raster',
'hazard': 'tsunami',
'hazard_category': 'single_event',
'continuous_hazard_unit': 'metres'
}
exposure_keywords = {
'layer_purpose': 'exposure',
'layer_mode': 'classified',
'layer_geometry': 'line',
'exposure': 'road'
}
impact_functions = ImpactFunctionManager().filter_by_keywords(
hazard_keywords, exposure_keywords)
message = 'There should be 1 impact function, but there are: %s' % \
len(impact_functions)
self.assertEqual(1, len(impact_functions), message)
retrieved_if = impact_functions[0].metadata().as_dict()['id']
expected = ImpactFunctionManager().get_function_id(
TsunamiRasterRoadsFunction)
message = 'Expecting %s, but getting %s instead' % (
expected, retrieved_if)
self.assertEqual(expected, retrieved_if, message)
def test_raster_to_vector_and_line_intersection(self):
"""Test the core part of the analysis.
1. Test creation of spatial index of flood cells
2. Test intersection of flood cells with roads layer
"""
raster_name = standard_data_path(
'hazard',
'tsunami_wgs84.tif')
exposure_name = standard_data_path(
'exposure',
'roads_osm_4326.shp')
raster = QgsRasterLayer(raster_name, 'Flood')
exposure = QgsVectorLayer(exposure_name, 'Exposure', 'ogr')
ranges = OrderedDict()
ranges[0] = [0, 1]
ranges[1] = [1, 2]
ranges[2] = [2, 100]
index, flood_cells_map = _raster_to_vector_cells(
raster, ranges, exposure.crs())
self.assertEqual(len(flood_cells_map), 4198)
rect_with_all_cells = raster.extent()
rect_with_4_cells = QgsRectangle(106.824, -6.177, 106.825, -6.179)
rect_with_0_cells = QgsRectangle(106.818, -6.168, 106.828, -6.175)
self.assertEqual(len(index.intersects(rect_with_all_cells)), 4198)
self.assertEqual(len(index.intersects(rect_with_4_cells)), 43)
self.assertEqual(len(index.intersects(rect_with_0_cells)), 504)
layer = create_layer(exposure)
new_field = QgsField('flooded', QVariant.Int)
layer.dataProvider().addAttributes([new_field])
request = QgsFeatureRequest()
_intersect_lines_with_vector_cells(
exposure, request, index, flood_cells_map, layer, 'flooded')
feature_count = layer.featureCount()
self.assertEqual(feature_count, 388)
flooded = 0
iterator = layer.getFeatures()
for feature in iterator:
attributes = feature.attributes()
if attributes[3] == 1:
flooded += 1
self.assertEqual(flooded, 40)
| gpl-3.0 | -8,285,317,785,272,208,000 | 4,754,152,627,955,156,000 | 33.611429 | 79 | 0.616642 | false |
mgadi/naemonbox | sources/psdash/pyzmq-13.1.0/zmq/sugar/tracker.py | 4 | 4012 | """Tracker for zero-copy messages with 0MQ."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import time
try:
# below 3.3
from threading import _Event as Event
except (ImportError, AttributeError):
# python throws ImportError, cython throws AttributeError
from threading import Event
from zmq.error import NotDone
from .backend import Frame
class MessageTracker(object):
"""MessageTracker(*towatch)
A class for tracking if 0MQ is done using one or more messages.
When you send a 0MQ message, it is not sent immediately. The 0MQ IO thread
sends the message at some later time. Often you want to know when 0MQ has
actually sent the message though. This is complicated by the fact that
a single 0MQ message can be sent multiple times using different sockets.
This class allows you to track all of the 0MQ usages of a message.
Parameters
----------
*towatch : tuple of Event, MessageTracker, Message instances.
This list of objects to track. This class can track the low-level
Events used by the Message class, other MessageTrackers or
actual Messages.
"""
events = None
peers = None
def __init__(self, *towatch):
"""MessageTracker(*towatch)
Create a message tracker to track a set of mesages.
Parameters
----------
*towatch : tuple of Event, MessageTracker, Message instances.
This list of objects to track. This class can track the low-level
Events used by the Message class, other MessageTrackers or
actual Messages.
"""
self.events = set()
self.peers = set()
for obj in towatch:
if isinstance(obj, Event):
self.events.add(obj)
elif isinstance(obj, MessageTracker):
self.peers.add(obj)
elif isinstance(obj, Frame):
if not obj.tracker:
raise ValueError("Not a tracked message")
self.peers.add(obj.tracker)
else:
raise TypeError("Require Events or Message Frames, not %s"%type(obj))
@property
def done(self):
"""Is 0MQ completely done with the message(s) being tracked?"""
for evt in self.events:
if not evt.is_set():
return False
for pm in self.peers:
if not pm.done:
return False
return True
def wait(self, timeout=-1):
"""mt.wait(timeout=-1)
Wait for 0MQ to be done with the message or until `timeout`.
Parameters
----------
timeout : float [default: -1, wait forever]
Maximum time in (s) to wait before raising NotDone.
Returns
-------
None
if done before `timeout`
Raises
------
NotDone
if `timeout` reached before I am done.
"""
tic = time.time()
if timeout is False or timeout < 0:
remaining = 3600*24*7 # a week
else:
remaining = timeout
done = False
for evt in self.events:
if remaining < 0:
raise NotDone
evt.wait(timeout=remaining)
if not evt.is_set():
raise NotDone
toc = time.time()
remaining -= (toc-tic)
tic = toc
for peer in self.peers:
if remaining < 0:
raise NotDone
peer.wait(timeout=remaining)
toc = time.time()
remaining -= (toc-tic)
tic = toc
__all__ = ['MessageTracker'] | gpl-2.0 | 5,361,275,708,098,976,000 | 7,786,410,094,002,791,000 | 30.849206 | 85 | 0.552094 | false |
ozburo/youtube-dl | youtube_dl/extractor/franceculture.py | 4 | 2866 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
determine_ext,
extract_attributes,
int_or_none,
)
class FranceCultureIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?franceculture\.fr/emissions/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.franceculture.fr/emissions/carnet-nomade/rendez-vous-au-pays-des-geeks',
'info_dict': {
'id': 'rendez-vous-au-pays-des-geeks',
'display_id': 'rendez-vous-au-pays-des-geeks',
'ext': 'mp3',
'title': 'Rendez-vous au pays des geeks',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20140301',
'timestamp': 1393700400,
'vcodec': 'none',
}
}, {
# no thumbnail
'url': 'https://www.franceculture.fr/emissions/la-recherche-montre-en-main/la-recherche-montre-en-main-du-mercredi-10-octobre-2018',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_data = extract_attributes(self._search_regex(
r'''(?sx)
(?:
</h1>|
<div[^>]+class="[^"]*?(?:title-zone-diffusion|heading-zone-(?:wrapper|player-button))[^"]*?"[^>]*>
).*?
(<button[^>]+data-(?:url|asset-source)="[^"]+"[^>]+>)
''',
webpage, 'video data'))
video_url = video_data.get('data-url') or video_data['data-asset-source']
title = video_data.get('data-asset-title') or video_data.get('data-diffusion-title') or self._og_search_title(webpage)
description = self._html_search_regex(
r'(?s)<div[^>]+class="intro"[^>]*>.*?<h2>(.+?)</h2>',
webpage, 'description', default=None)
thumbnail = self._search_regex(
r'(?s)<figure[^>]+itemtype="https://schema.org/ImageObject"[^>]*>.*?<img[^>]+(?:data-dejavu-)?src="([^"]+)"',
webpage, 'thumbnail', default=None)
uploader = self._html_search_regex(
r'(?s)<span class="author">(.*?)</span>',
webpage, 'uploader', default=None)
ext = determine_ext(video_url.lower())
return {
'id': display_id,
'display_id': display_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
'ext': ext,
'vcodec': 'none' if ext == 'mp3' else None,
'uploader': uploader,
'timestamp': int_or_none(video_data.get('data-start-time')) or int_or_none(video_data.get('data-asset-created-date')),
'duration': int_or_none(video_data.get('data-duration')),
}
| unlicense | 3,834,886,174,283,233,300 | 1,897,053,961,137,235,200 | 38.260274 | 140 | 0.523378 | false |
AlekhyaMallina-Vedams/openstack-manuals | doc/contributor-guide/setup.py | 608 | 1045 | #!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
| apache-2.0 | 5,836,009,844,585,361,000 | 2,012,882,713,484,690,200 | 33.833333 | 69 | 0.748325 | false |
anomitra/articleScraper | PyQt-gpl-5.4.1/examples/painting/transformations.py | 3 | 8603 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import QPointF, QSize, Qt
from PyQt5.QtGui import QBrush, QFont, QFontMetrics, QPainter, QPainterPath
from PyQt5.QtWidgets import QApplication, QComboBox, QGridLayout, QWidget
NoTransformation, Translate, Rotate, Scale = range(4)
class RenderArea(QWidget):
def __init__(self, parent=None):
super(RenderArea, self).__init__(parent)
newFont = self.font()
newFont.setPixelSize(12)
self.setFont(newFont)
fontMetrics = QFontMetrics(newFont)
self.xBoundingRect = fontMetrics.boundingRect("x")
self.yBoundingRect = fontMetrics.boundingRect("y")
self.shape = QPainterPath()
self.operations = []
def setOperations(self, operations):
self.operations = operations
self.update()
def setShape(self, shape):
self.shape = shape
self.update()
def minimumSizeHint(self):
return QSize(182, 182)
def sizeHint(self):
return QSize(232, 232)
def paintEvent(self, event):
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.fillRect(event.rect(), QBrush(Qt.white))
painter.translate(66, 66)
painter.save()
self.transformPainter(painter)
self.drawShape(painter)
painter.restore()
self.drawOutline(painter)
self.transformPainter(painter)
self.drawCoordinates(painter)
def drawCoordinates(self, painter):
painter.setPen(Qt.red)
painter.drawLine(0, 0, 50, 0)
painter.drawLine(48, -2, 50, 0)
painter.drawLine(48, 2, 50, 0)
painter.drawText(60 - self.xBoundingRect.width() / 2,
0 + self.xBoundingRect.height() / 2, "x")
painter.drawLine(0, 0, 0, 50)
painter.drawLine(-2, 48, 0, 50)
painter.drawLine(2, 48, 0, 50)
painter.drawText(0 - self.yBoundingRect.width() / 2,
60 + self.yBoundingRect.height() / 2, "y")
def drawOutline(self, painter):
painter.setPen(Qt.darkGreen)
painter.setPen(Qt.DashLine)
painter.setBrush(Qt.NoBrush)
painter.drawRect(0, 0, 100, 100)
def drawShape(self, painter):
painter.fillPath(self.shape, Qt.blue)
def transformPainter(self, painter):
for operation in self.operations:
if operation == Translate:
painter.translate(50, 50)
elif operation == Scale:
painter.scale(0.75, 0.75)
elif operation == Rotate:
painter.rotate(60)
class Window(QWidget):
operationTable = (NoTransformation, Rotate, Scale, Translate)
NumTransformedAreas = 3
def __init__(self):
super(Window, self).__init__()
self.originalRenderArea = RenderArea()
self.shapeComboBox = QComboBox()
self.shapeComboBox.addItem("Clock")
self.shapeComboBox.addItem("House")
self.shapeComboBox.addItem("Text")
self.shapeComboBox.addItem("Truck")
layout = QGridLayout()
layout.addWidget(self.originalRenderArea, 0, 0)
layout.addWidget(self.shapeComboBox, 1, 0)
self.transformedRenderAreas = list(range(Window.NumTransformedAreas))
self.operationComboBoxes = list(range(Window.NumTransformedAreas))
for i in range(Window.NumTransformedAreas):
self.transformedRenderAreas[i] = RenderArea()
self.operationComboBoxes[i] = QComboBox()
self.operationComboBoxes[i].addItem("No transformation")
self.operationComboBoxes[i].addItem(u"Rotate by 60\N{DEGREE SIGN}")
self.operationComboBoxes[i].addItem("Scale to 75%")
self.operationComboBoxes[i].addItem("Translate by (50, 50)")
self.operationComboBoxes[i].activated.connect(self.operationChanged)
layout.addWidget(self.transformedRenderAreas[i], 0, i + 1)
layout.addWidget(self.operationComboBoxes[i], 1, i + 1)
self.setLayout(layout)
self.setupShapes()
self.shapeSelected(0)
self.setWindowTitle("Transformations")
def setupShapes(self):
truck = QPainterPath()
truck.setFillRule(Qt.WindingFill)
truck.moveTo(0.0, 87.0)
truck.lineTo(0.0, 60.0)
truck.lineTo(10.0, 60.0)
truck.lineTo(35.0, 35.0)
truck.lineTo(100.0, 35.0)
truck.lineTo(100.0, 87.0)
truck.lineTo(0.0, 87.0)
truck.moveTo(17.0, 60.0)
truck.lineTo(55.0, 60.0)
truck.lineTo(55.0, 40.0)
truck.lineTo(37.0, 40.0)
truck.lineTo(17.0, 60.0)
truck.addEllipse(17.0, 75.0, 25.0, 25.0)
truck.addEllipse(63.0, 75.0, 25.0, 25.0)
clock = QPainterPath()
clock.addEllipse(-50.0, -50.0, 100.0, 100.0)
clock.addEllipse(-48.0, -48.0, 96.0, 96.0)
clock.moveTo(0.0, 0.0)
clock.lineTo(-2.0, -2.0)
clock.lineTo(0.0, -42.0)
clock.lineTo(2.0, -2.0)
clock.lineTo(0.0, 0.0)
clock.moveTo(0.0, 0.0)
clock.lineTo(2.732, -0.732)
clock.lineTo(24.495, 14.142)
clock.lineTo(0.732, 2.732)
clock.lineTo(0.0, 0.0)
house = QPainterPath()
house.moveTo(-45.0, -20.0)
house.lineTo(0.0, -45.0)
house.lineTo(45.0, -20.0)
house.lineTo(45.0, 45.0)
house.lineTo(-45.0, 45.0)
house.lineTo(-45.0, -20.0)
house.addRect(15.0, 5.0, 20.0, 35.0)
house.addRect(-35.0, -15.0, 25.0, 25.0)
text = QPainterPath()
font = QFont()
font.setPixelSize(50)
fontBoundingRect = QFontMetrics(font).boundingRect("Qt")
text.addText(-QPointF(fontBoundingRect.center()), font, "Qt")
self.shapes = (clock, house, text, truck)
self.shapeComboBox.activated.connect(self.shapeSelected)
def operationChanged(self):
operations = []
for i in range(Window.NumTransformedAreas):
index = self.operationComboBoxes[i].currentIndex()
operations.append(Window.operationTable[index])
self.transformedRenderAreas[i].setOperations(operations[:])
def shapeSelected(self, index):
shape = self.shapes[index]
self.originalRenderArea.setShape(shape)
for i in range(Window.NumTransformedAreas):
self.transformedRenderAreas[i].setShape(shape)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| gpl-2.0 | -7,742,386,364,784,204,000 | 6,743,507,239,267,725,000 | 33.550201 | 80 | 0.627456 | false |
facelessuser/Pywin32 | lib/x32/win32/lib/win32netcon.py | 9 | 20394 | # Generated by h2py from lmaccess.h
# Included from lmcons.h
CNLEN = 15
LM20_CNLEN = 15
DNLEN = CNLEN
LM20_DNLEN = LM20_CNLEN
UNCLEN = (CNLEN+2)
LM20_UNCLEN = (LM20_CNLEN+2)
NNLEN = 80
LM20_NNLEN = 12
RMLEN = (UNCLEN+1+NNLEN)
LM20_RMLEN = (LM20_UNCLEN+1+LM20_NNLEN)
SNLEN = 80
LM20_SNLEN = 15
STXTLEN = 256
LM20_STXTLEN = 63
PATHLEN = 256
LM20_PATHLEN = 256
DEVLEN = 80
LM20_DEVLEN = 8
EVLEN = 16
UNLEN = 256
LM20_UNLEN = 20
GNLEN = UNLEN
LM20_GNLEN = LM20_UNLEN
PWLEN = 256
LM20_PWLEN = 14
SHPWLEN = 8
CLTYPE_LEN = 12
MAXCOMMENTSZ = 256
LM20_MAXCOMMENTSZ = 48
QNLEN = NNLEN
LM20_QNLEN = LM20_NNLEN
ALERTSZ = 128
NETBIOS_NAME_LEN = 16
CRYPT_KEY_LEN = 7
CRYPT_TXT_LEN = 8
ENCRYPTED_PWLEN = 16
SESSION_PWLEN = 24
SESSION_CRYPT_KLEN = 21
PARMNUM_ALL = 0
PARM_ERROR_NONE = 0
PARMNUM_BASE_INFOLEVEL = 1000
NULL = 0
PLATFORM_ID_DOS = 300
PLATFORM_ID_OS2 = 400
PLATFORM_ID_NT = 500
PLATFORM_ID_OSF = 600
PLATFORM_ID_VMS = 700
MAX_LANMAN_MESSAGE_ID = 5799
UF_SCRIPT = 1
UF_ACCOUNTDISABLE = 2
UF_HOMEDIR_REQUIRED = 8
UF_LOCKOUT = 16
UF_PASSWD_NOTREQD = 32
UF_PASSWD_CANT_CHANGE = 64
UF_TEMP_DUPLICATE_ACCOUNT = 256
UF_NORMAL_ACCOUNT = 512
UF_INTERDOMAIN_TRUST_ACCOUNT = 2048
UF_WORKSTATION_TRUST_ACCOUNT = 4096
UF_SERVER_TRUST_ACCOUNT = 8192
UF_MACHINE_ACCOUNT_MASK = ( UF_INTERDOMAIN_TRUST_ACCOUNT | \
UF_WORKSTATION_TRUST_ACCOUNT | \
UF_SERVER_TRUST_ACCOUNT )
UF_ACCOUNT_TYPE_MASK = ( \
UF_TEMP_DUPLICATE_ACCOUNT | \
UF_NORMAL_ACCOUNT | \
UF_INTERDOMAIN_TRUST_ACCOUNT | \
UF_WORKSTATION_TRUST_ACCOUNT | \
UF_SERVER_TRUST_ACCOUNT \
)
UF_DONT_EXPIRE_PASSWD = 65536
UF_MNS_LOGON_ACCOUNT = 131072
UF_SETTABLE_BITS = ( \
UF_SCRIPT | \
UF_ACCOUNTDISABLE | \
UF_LOCKOUT | \
UF_HOMEDIR_REQUIRED | \
UF_PASSWD_NOTREQD | \
UF_PASSWD_CANT_CHANGE | \
UF_ACCOUNT_TYPE_MASK | \
UF_DONT_EXPIRE_PASSWD | \
UF_MNS_LOGON_ACCOUNT \
)
FILTER_TEMP_DUPLICATE_ACCOUNT = (1)
FILTER_NORMAL_ACCOUNT = (2)
FILTER_INTERDOMAIN_TRUST_ACCOUNT = (8)
FILTER_WORKSTATION_TRUST_ACCOUNT = (16)
FILTER_SERVER_TRUST_ACCOUNT = (32)
LG_INCLUDE_INDIRECT = (1)
AF_OP_PRINT = 1
AF_OP_COMM = 2
AF_OP_SERVER = 4
AF_OP_ACCOUNTS = 8
AF_SETTABLE_BITS = (AF_OP_PRINT | AF_OP_COMM | \
AF_OP_SERVER | AF_OP_ACCOUNTS)
UAS_ROLE_STANDALONE = 0
UAS_ROLE_MEMBER = 1
UAS_ROLE_BACKUP = 2
UAS_ROLE_PRIMARY = 3
USER_NAME_PARMNUM = 1
USER_PASSWORD_PARMNUM = 3
USER_PASSWORD_AGE_PARMNUM = 4
USER_PRIV_PARMNUM = 5
USER_HOME_DIR_PARMNUM = 6
USER_COMMENT_PARMNUM = 7
USER_FLAGS_PARMNUM = 8
USER_SCRIPT_PATH_PARMNUM = 9
USER_AUTH_FLAGS_PARMNUM = 10
USER_FULL_NAME_PARMNUM = 11
USER_USR_COMMENT_PARMNUM = 12
USER_PARMS_PARMNUM = 13
USER_WORKSTATIONS_PARMNUM = 14
USER_LAST_LOGON_PARMNUM = 15
USER_LAST_LOGOFF_PARMNUM = 16
USER_ACCT_EXPIRES_PARMNUM = 17
USER_MAX_STORAGE_PARMNUM = 18
USER_UNITS_PER_WEEK_PARMNUM = 19
USER_LOGON_HOURS_PARMNUM = 20
USER_PAD_PW_COUNT_PARMNUM = 21
USER_NUM_LOGONS_PARMNUM = 22
USER_LOGON_SERVER_PARMNUM = 23
USER_COUNTRY_CODE_PARMNUM = 24
USER_CODE_PAGE_PARMNUM = 25
USER_PRIMARY_GROUP_PARMNUM = 51
USER_PROFILE = 52
USER_PROFILE_PARMNUM = 52
USER_HOME_DIR_DRIVE_PARMNUM = 53
USER_NAME_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_NAME_PARMNUM)
USER_PASSWORD_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_PASSWORD_PARMNUM)
USER_PASSWORD_AGE_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_PASSWORD_AGE_PARMNUM)
USER_PRIV_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_PRIV_PARMNUM)
USER_HOME_DIR_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_HOME_DIR_PARMNUM)
USER_COMMENT_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_COMMENT_PARMNUM)
USER_FLAGS_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_FLAGS_PARMNUM)
USER_SCRIPT_PATH_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_SCRIPT_PATH_PARMNUM)
USER_AUTH_FLAGS_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_AUTH_FLAGS_PARMNUM)
USER_FULL_NAME_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_FULL_NAME_PARMNUM)
USER_USR_COMMENT_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_USR_COMMENT_PARMNUM)
USER_PARMS_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_PARMS_PARMNUM)
USER_WORKSTATIONS_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_WORKSTATIONS_PARMNUM)
USER_LAST_LOGON_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_LAST_LOGON_PARMNUM)
USER_LAST_LOGOFF_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_LAST_LOGOFF_PARMNUM)
USER_ACCT_EXPIRES_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_ACCT_EXPIRES_PARMNUM)
USER_MAX_STORAGE_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_MAX_STORAGE_PARMNUM)
USER_UNITS_PER_WEEK_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_UNITS_PER_WEEK_PARMNUM)
USER_LOGON_HOURS_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_LOGON_HOURS_PARMNUM)
USER_PAD_PW_COUNT_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_PAD_PW_COUNT_PARMNUM)
USER_NUM_LOGONS_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_NUM_LOGONS_PARMNUM)
USER_LOGON_SERVER_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_LOGON_SERVER_PARMNUM)
USER_COUNTRY_CODE_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_COUNTRY_CODE_PARMNUM)
USER_CODE_PAGE_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_CODE_PAGE_PARMNUM)
USER_PRIMARY_GROUP_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_PRIMARY_GROUP_PARMNUM)
USER_HOME_DIR_DRIVE_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + USER_HOME_DIR_DRIVE_PARMNUM)
NULL_USERSETINFO_PASSWD = " "
UNITS_PER_DAY = 24
UNITS_PER_WEEK = UNITS_PER_DAY * 7
USER_PRIV_MASK = 3
USER_PRIV_GUEST = 0
USER_PRIV_USER = 1
USER_PRIV_ADMIN = 2
MAX_PASSWD_LEN = PWLEN
DEF_MIN_PWLEN = 6
DEF_PWUNIQUENESS = 5
DEF_MAX_PWHIST = 8
DEF_MAX_BADPW = 0
VALIDATED_LOGON = 0
PASSWORD_EXPIRED = 2
NON_VALIDATED_LOGON = 3
VALID_LOGOFF = 1
MODALS_MIN_PASSWD_LEN_PARMNUM = 1
MODALS_MAX_PASSWD_AGE_PARMNUM = 2
MODALS_MIN_PASSWD_AGE_PARMNUM = 3
MODALS_FORCE_LOGOFF_PARMNUM = 4
MODALS_PASSWD_HIST_LEN_PARMNUM = 5
MODALS_ROLE_PARMNUM = 6
MODALS_PRIMARY_PARMNUM = 7
MODALS_DOMAIN_NAME_PARMNUM = 8
MODALS_DOMAIN_ID_PARMNUM = 9
MODALS_LOCKOUT_DURATION_PARMNUM = 10
MODALS_LOCKOUT_OBSERVATION_WINDOW_PARMNUM = 11
MODALS_LOCKOUT_THRESHOLD_PARMNUM = 12
MODALS_MIN_PASSWD_LEN_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + MODALS_MIN_PASSWD_LEN_PARMNUM)
MODALS_MAX_PASSWD_AGE_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + MODALS_MAX_PASSWD_AGE_PARMNUM)
MODALS_MIN_PASSWD_AGE_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + MODALS_MIN_PASSWD_AGE_PARMNUM)
MODALS_FORCE_LOGOFF_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + MODALS_FORCE_LOGOFF_PARMNUM)
MODALS_PASSWD_HIST_LEN_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + MODALS_PASSWD_HIST_LEN_PARMNUM)
MODALS_ROLE_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + MODALS_ROLE_PARMNUM)
MODALS_PRIMARY_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + MODALS_PRIMARY_PARMNUM)
MODALS_DOMAIN_NAME_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + MODALS_DOMAIN_NAME_PARMNUM)
MODALS_DOMAIN_ID_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + MODALS_DOMAIN_ID_PARMNUM)
GROUPIDMASK = 32768
GROUP_ALL_PARMNUM = 0
GROUP_NAME_PARMNUM = 1
GROUP_COMMENT_PARMNUM = 2
GROUP_ATTRIBUTES_PARMNUM = 3
GROUP_ALL_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + GROUP_ALL_PARMNUM)
GROUP_NAME_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + GROUP_NAME_PARMNUM)
GROUP_COMMENT_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + GROUP_COMMENT_PARMNUM)
GROUP_ATTRIBUTES_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + GROUP_ATTRIBUTES_PARMNUM)
LOCALGROUP_NAME_PARMNUM = 1
LOCALGROUP_COMMENT_PARMNUM = 2
MAXPERMENTRIES = 64
ACCESS_NONE = 0
ACCESS_READ = 1
ACCESS_WRITE = 2
ACCESS_CREATE = 4
ACCESS_EXEC = 8
ACCESS_DELETE = 16
ACCESS_ATRIB = 32
ACCESS_PERM = 64
ACCESS_GROUP = 32768
ACCESS_AUDIT = 1
ACCESS_SUCCESS_OPEN = 16
ACCESS_SUCCESS_WRITE = 32
ACCESS_SUCCESS_DELETE = 64
ACCESS_SUCCESS_ACL = 128
ACCESS_SUCCESS_MASK = 240
ACCESS_FAIL_OPEN = 256
ACCESS_FAIL_WRITE = 512
ACCESS_FAIL_DELETE = 1024
ACCESS_FAIL_ACL = 2048
ACCESS_FAIL_MASK = 3840
ACCESS_FAIL_SHIFT = 4
ACCESS_RESOURCE_NAME_PARMNUM = 1
ACCESS_ATTR_PARMNUM = 2
ACCESS_COUNT_PARMNUM = 3
ACCESS_ACCESS_LIST_PARMNUM = 4
ACCESS_RESOURCE_NAME_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + ACCESS_RESOURCE_NAME_PARMNUM)
ACCESS_ATTR_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + ACCESS_ATTR_PARMNUM)
ACCESS_COUNT_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + ACCESS_COUNT_PARMNUM)
ACCESS_ACCESS_LIST_INFOLEVEL = \
(PARMNUM_BASE_INFOLEVEL + ACCESS_ACCESS_LIST_PARMNUM)
ACCESS_LETTERS = "RWCXDAP "
NETLOGON_CONTROL_QUERY = 1
NETLOGON_CONTROL_REPLICATE = 2
NETLOGON_CONTROL_SYNCHRONIZE = 3
NETLOGON_CONTROL_PDC_REPLICATE = 4
NETLOGON_CONTROL_REDISCOVER = 5
NETLOGON_CONTROL_TC_QUERY = 6
NETLOGON_CONTROL_TRANSPORT_NOTIFY = 7
NETLOGON_CONTROL_FIND_USER = 8
NETLOGON_CONTROL_UNLOAD_NETLOGON_DLL = 65531
NETLOGON_CONTROL_BACKUP_CHANGE_LOG = 65532
NETLOGON_CONTROL_TRUNCATE_LOG = 65533
NETLOGON_CONTROL_SET_DBFLAG = 65534
NETLOGON_CONTROL_BREAKPOINT = 65535
NETLOGON_REPLICATION_NEEDED = 1
NETLOGON_REPLICATION_IN_PROGRESS = 2
NETLOGON_FULL_SYNC_REPLICATION = 4
NETLOGON_REDO_NEEDED = 8
######################
# Manual stuff
TEXT=lambda x:x
MAX_PREFERRED_LENGTH = -1
PARM_ERROR_UNKNOWN = -1
MESSAGE_FILENAME = TEXT("NETMSG")
OS2MSG_FILENAME = TEXT("BASE")
HELP_MSG_FILENAME = TEXT("NETH")
BACKUP_MSG_FILENAME = TEXT("BAK.MSG")
TIMEQ_FOREVER = -1
USER_MAXSTORAGE_UNLIMITED = -1
USER_NO_LOGOFF = -1
DEF_MAX_PWAGE = TIMEQ_FOREVER
DEF_MIN_PWAGE = 0
DEF_FORCE_LOGOFF = -1
ONE_DAY = 1*24*3600
GROUP_SPECIALGRP_USERS = "USERS"
GROUP_SPECIALGRP_ADMINS = "ADMINS"
GROUP_SPECIALGRP_GUESTS = "GUESTS"
GROUP_SPECIALGRP_LOCAL = "LOCAL"
ACCESS_ALL = ( ACCESS_READ | ACCESS_WRITE | ACCESS_CREATE | ACCESS_EXEC | ACCESS_DELETE | ACCESS_ATRIB | ACCESS_PERM )
# From lmserver.h
SV_PLATFORM_ID_OS2 = 400
SV_PLATFORM_ID_NT = 500
MAJOR_VERSION_MASK = 15
SV_TYPE_WORKSTATION = 1
SV_TYPE_SERVER = 2
SV_TYPE_SQLSERVER = 4
SV_TYPE_DOMAIN_CTRL = 8
SV_TYPE_DOMAIN_BAKCTRL = 16
SV_TYPE_TIME_SOURCE = 32
SV_TYPE_AFP = 64
SV_TYPE_NOVELL = 128
SV_TYPE_DOMAIN_MEMBER = 256
SV_TYPE_PRINTQ_SERVER = 512
SV_TYPE_DIALIN_SERVER = 1024
SV_TYPE_XENIX_SERVER = 2048
SV_TYPE_SERVER_UNIX = SV_TYPE_XENIX_SERVER
SV_TYPE_NT = 4096
SV_TYPE_WFW = 8192
SV_TYPE_SERVER_MFPN = 16384
SV_TYPE_SERVER_NT = 32768
SV_TYPE_POTENTIAL_BROWSER = 65536
SV_TYPE_BACKUP_BROWSER = 131072
SV_TYPE_MASTER_BROWSER = 262144
SV_TYPE_DOMAIN_MASTER = 524288
SV_TYPE_SERVER_OSF = 1048576
SV_TYPE_SERVER_VMS = 2097152
SV_TYPE_WINDOWS = 4194304
SV_TYPE_DFS = 8388608
SV_TYPE_CLUSTER_NT = 16777216
SV_TYPE_DCE = 268435456
SV_TYPE_ALTERNATE_XPORT = 536870912
SV_TYPE_LOCAL_LIST_ONLY = 1073741824
SV_TYPE_DOMAIN_ENUM = -2147483648
SV_TYPE_ALL = -1
SV_NODISC = -1
SV_USERSECURITY = 1
SV_SHARESECURITY = 0
SV_HIDDEN = 1
SV_VISIBLE = 0
SV_PLATFORM_ID_PARMNUM = 101
SV_NAME_PARMNUM = 102
SV_VERSION_MAJOR_PARMNUM = 103
SV_VERSION_MINOR_PARMNUM = 104
SV_TYPE_PARMNUM = 105
SV_COMMENT_PARMNUM = 5
SV_USERS_PARMNUM = 107
SV_DISC_PARMNUM = 10
SV_HIDDEN_PARMNUM = 16
SV_ANNOUNCE_PARMNUM = 17
SV_ANNDELTA_PARMNUM = 18
SV_USERPATH_PARMNUM = 112
SV_ULIST_MTIME_PARMNUM = 401
SV_GLIST_MTIME_PARMNUM = 402
SV_ALIST_MTIME_PARMNUM = 403
SV_ALERTS_PARMNUM = 11
SV_SECURITY_PARMNUM = 405
SV_NUMADMIN_PARMNUM = 406
SV_LANMASK_PARMNUM = 407
SV_GUESTACC_PARMNUM = 408
SV_CHDEVQ_PARMNUM = 410
SV_CHDEVJOBS_PARMNUM = 411
SV_CONNECTIONS_PARMNUM = 412
SV_SHARES_PARMNUM = 413
SV_OPENFILES_PARMNUM = 414
SV_SESSREQS_PARMNUM = 417
SV_ACTIVELOCKS_PARMNUM = 419
SV_NUMREQBUF_PARMNUM = 420
SV_NUMBIGBUF_PARMNUM = 422
SV_NUMFILETASKS_PARMNUM = 423
SV_ALERTSCHED_PARMNUM = 37
SV_ERRORALERT_PARMNUM = 38
SV_LOGONALERT_PARMNUM = 39
SV_ACCESSALERT_PARMNUM = 40
SV_DISKALERT_PARMNUM = 41
SV_NETIOALERT_PARMNUM = 42
SV_MAXAUDITSZ_PARMNUM = 43
SV_SRVHEURISTICS_PARMNUM = 431
SV_SESSOPENS_PARMNUM = 501
SV_SESSVCS_PARMNUM = 502
SV_OPENSEARCH_PARMNUM = 503
SV_SIZREQBUF_PARMNUM = 504
SV_INITWORKITEMS_PARMNUM = 505
SV_MAXWORKITEMS_PARMNUM = 506
SV_RAWWORKITEMS_PARMNUM = 507
SV_IRPSTACKSIZE_PARMNUM = 508
SV_MAXRAWBUFLEN_PARMNUM = 509
SV_SESSUSERS_PARMNUM = 510
SV_SESSCONNS_PARMNUM = 511
SV_MAXNONPAGEDMEMORYUSAGE_PARMNUM = 512
SV_MAXPAGEDMEMORYUSAGE_PARMNUM = 513
SV_ENABLESOFTCOMPAT_PARMNUM = 514
SV_ENABLEFORCEDLOGOFF_PARMNUM = 515
SV_TIMESOURCE_PARMNUM = 516
SV_ACCEPTDOWNLEVELAPIS_PARMNUM = 517
SV_LMANNOUNCE_PARMNUM = 518
SV_DOMAIN_PARMNUM = 519
SV_MAXCOPYREADLEN_PARMNUM = 520
SV_MAXCOPYWRITELEN_PARMNUM = 521
SV_MINKEEPSEARCH_PARMNUM = 522
SV_MAXKEEPSEARCH_PARMNUM = 523
SV_MINKEEPCOMPLSEARCH_PARMNUM = 524
SV_MAXKEEPCOMPLSEARCH_PARMNUM = 525
SV_THREADCOUNTADD_PARMNUM = 526
SV_NUMBLOCKTHREADS_PARMNUM = 527
SV_SCAVTIMEOUT_PARMNUM = 528
SV_MINRCVQUEUE_PARMNUM = 529
SV_MINFREEWORKITEMS_PARMNUM = 530
SV_XACTMEMSIZE_PARMNUM = 531
SV_THREADPRIORITY_PARMNUM = 532
SV_MAXMPXCT_PARMNUM = 533
SV_OPLOCKBREAKWAIT_PARMNUM = 534
SV_OPLOCKBREAKRESPONSEWAIT_PARMNUM = 535
SV_ENABLEOPLOCKS_PARMNUM = 536
SV_ENABLEOPLOCKFORCECLOSE_PARMNUM = 537
SV_ENABLEFCBOPENS_PARMNUM = 538
SV_ENABLERAW_PARMNUM = 539
SV_ENABLESHAREDNETDRIVES_PARMNUM = 540
SV_MINFREECONNECTIONS_PARMNUM = 541
SV_MAXFREECONNECTIONS_PARMNUM = 542
SV_INITSESSTABLE_PARMNUM = 543
SV_INITCONNTABLE_PARMNUM = 544
SV_INITFILETABLE_PARMNUM = 545
SV_INITSEARCHTABLE_PARMNUM = 546
SV_ALERTSCHEDULE_PARMNUM = 547
SV_ERRORTHRESHOLD_PARMNUM = 548
SV_NETWORKERRORTHRESHOLD_PARMNUM = 549
SV_DISKSPACETHRESHOLD_PARMNUM = 550
SV_MAXLINKDELAY_PARMNUM = 552
SV_MINLINKTHROUGHPUT_PARMNUM = 553
SV_LINKINFOVALIDTIME_PARMNUM = 554
SV_SCAVQOSINFOUPDATETIME_PARMNUM = 555
SV_MAXWORKITEMIDLETIME_PARMNUM = 556
SV_MAXRAWWORKITEMS_PARMNUM = 557
SV_PRODUCTTYPE_PARMNUM = 560
SV_SERVERSIZE_PARMNUM = 561
SV_CONNECTIONLESSAUTODISC_PARMNUM = 562
SV_SHARINGVIOLATIONRETRIES_PARMNUM = 563
SV_SHARINGVIOLATIONDELAY_PARMNUM = 564
SV_MAXGLOBALOPENSEARCH_PARMNUM = 565
SV_REMOVEDUPLICATESEARCHES_PARMNUM = 566
SV_LOCKVIOLATIONRETRIES_PARMNUM = 567
SV_LOCKVIOLATIONOFFSET_PARMNUM = 568
SV_LOCKVIOLATIONDELAY_PARMNUM = 569
SV_MDLREADSWITCHOVER_PARMNUM = 570
SV_CACHEDOPENLIMIT_PARMNUM = 571
SV_CRITICALTHREADS_PARMNUM = 572
SV_RESTRICTNULLSESSACCESS_PARMNUM = 573
SV_ENABLEWFW311DIRECTIPX_PARMNUM = 574
SV_OTHERQUEUEAFFINITY_PARMNUM = 575
SV_QUEUESAMPLESECS_PARMNUM = 576
SV_BALANCECOUNT_PARMNUM = 577
SV_PREFERREDAFFINITY_PARMNUM = 578
SV_MAXFREERFCBS_PARMNUM = 579
SV_MAXFREEMFCBS_PARMNUM = 580
SV_MAXFREELFCBS_PARMNUM = 581
SV_MAXFREEPAGEDPOOLCHUNKS_PARMNUM = 582
SV_MINPAGEDPOOLCHUNKSIZE_PARMNUM = 583
SV_MAXPAGEDPOOLCHUNKSIZE_PARMNUM = 584
SV_SENDSFROMPREFERREDPROCESSOR_PARMNUM = 585
SV_MAXTHREADSPERQUEUE_PARMNUM = 586
SV_CACHEDDIRECTORYLIMIT_PARMNUM = 587
SV_MAXCOPYLENGTH_PARMNUM = 588
SV_ENABLEBULKTRANSFER_PARMNUM = 589
SV_ENABLECOMPRESSION_PARMNUM = 590
SV_AUTOSHAREWKS_PARMNUM = 591
SV_AUTOSHARESERVER_PARMNUM = 592
SV_ENABLESECURITYSIGNATURE_PARMNUM = 593
SV_REQUIRESECURITYSIGNATURE_PARMNUM = 594
SV_MINCLIENTBUFFERSIZE_PARMNUM = 595
SV_CONNECTIONNOSESSIONSTIMEOUT_PARMNUM = 596
SVI1_NUM_ELEMENTS = 5
SVI2_NUM_ELEMENTS = 40
SVI3_NUM_ELEMENTS = 44
SW_AUTOPROF_LOAD_MASK = 1
SW_AUTOPROF_SAVE_MASK = 2
SV_MAX_SRV_HEUR_LEN = 32
SV_USERS_PER_LICENSE = 5
SVTI2_REMAP_PIPE_NAMES = 2
# Generated by h2py from lmshare.h
SHARE_NETNAME_PARMNUM = 1
SHARE_TYPE_PARMNUM = 3
SHARE_REMARK_PARMNUM = 4
SHARE_PERMISSIONS_PARMNUM = 5
SHARE_MAX_USES_PARMNUM = 6
SHARE_CURRENT_USES_PARMNUM = 7
SHARE_PATH_PARMNUM = 8
SHARE_PASSWD_PARMNUM = 9
SHARE_FILE_SD_PARMNUM = 501
SHI1_NUM_ELEMENTS = 4
SHI2_NUM_ELEMENTS = 10
STYPE_DISKTREE = 0
STYPE_PRINTQ = 1
STYPE_DEVICE = 2
STYPE_IPC = 3
STYPE_SPECIAL = -2147483648
SHI1005_FLAGS_DFS = 1
SHI1005_FLAGS_DFS_ROOT = 2
COW_PERMACHINE = 4
COW_PERUSER = 8
CSC_CACHEABLE = 16
CSC_NOFLOWOPS = 32
CSC_AUTO_INWARD = 64
CSC_AUTO_OUTWARD = 128
SHI1005_VALID_FLAGS_SET = ( CSC_CACHEABLE | \
CSC_NOFLOWOPS | \
CSC_AUTO_INWARD | \
CSC_AUTO_OUTWARD| \
COW_PERMACHINE | \
COW_PERUSER )
SHI1007_VALID_FLAGS_SET = SHI1005_VALID_FLAGS_SET
SESS_GUEST = 1
SESS_NOENCRYPTION = 2
SESI1_NUM_ELEMENTS = 8
SESI2_NUM_ELEMENTS = 9
PERM_FILE_READ = 1
PERM_FILE_WRITE = 2
PERM_FILE_CREATE = 4
# Generated by h2py from d:\mssdk\include\winnetwk.h
WNNC_NET_MSNET = 65536
WNNC_NET_LANMAN = 131072
WNNC_NET_NETWARE = 196608
WNNC_NET_VINES = 262144
WNNC_NET_10NET = 327680
WNNC_NET_LOCUS = 393216
WNNC_NET_SUN_PC_NFS = 458752
WNNC_NET_LANSTEP = 524288
WNNC_NET_9TILES = 589824
WNNC_NET_LANTASTIC = 655360
WNNC_NET_AS400 = 720896
WNNC_NET_FTP_NFS = 786432
WNNC_NET_PATHWORKS = 851968
WNNC_NET_LIFENET = 917504
WNNC_NET_POWERLAN = 983040
WNNC_NET_BWNFS = 1048576
WNNC_NET_COGENT = 1114112
WNNC_NET_FARALLON = 1179648
WNNC_NET_APPLETALK = 1245184
WNNC_NET_INTERGRAPH = 1310720
WNNC_NET_SYMFONET = 1376256
WNNC_NET_CLEARCASE = 1441792
WNNC_NET_FRONTIER = 1507328
WNNC_NET_BMC = 1572864
WNNC_NET_DCE = 1638400
WNNC_NET_DECORB = 2097152
WNNC_NET_PROTSTOR = 2162688
WNNC_NET_FJ_REDIR = 2228224
WNNC_NET_DISTINCT = 2293760
WNNC_NET_TWINS = 2359296
WNNC_NET_RDR2SAMPLE = 2424832
RESOURCE_CONNECTED = 1
RESOURCE_GLOBALNET = 2
RESOURCE_REMEMBERED = 3
RESOURCE_RECENT = 4
RESOURCE_CONTEXT = 5
RESOURCETYPE_ANY = 0
RESOURCETYPE_DISK = 1
RESOURCETYPE_PRINT = 2
RESOURCETYPE_RESERVED = 8
RESOURCETYPE_UNKNOWN = -1
RESOURCEUSAGE_CONNECTABLE = 1
RESOURCEUSAGE_CONTAINER = 2
RESOURCEUSAGE_NOLOCALDEVICE = 4
RESOURCEUSAGE_SIBLING = 8
RESOURCEUSAGE_ATTACHED = 16
RESOURCEUSAGE_ALL = (RESOURCEUSAGE_CONNECTABLE | RESOURCEUSAGE_CONTAINER | RESOURCEUSAGE_ATTACHED)
RESOURCEUSAGE_RESERVED = -2147483648
RESOURCEDISPLAYTYPE_GENERIC = 0
RESOURCEDISPLAYTYPE_DOMAIN = 1
RESOURCEDISPLAYTYPE_SERVER = 2
RESOURCEDISPLAYTYPE_SHARE = 3
RESOURCEDISPLAYTYPE_FILE = 4
RESOURCEDISPLAYTYPE_GROUP = 5
RESOURCEDISPLAYTYPE_NETWORK = 6
RESOURCEDISPLAYTYPE_ROOT = 7
RESOURCEDISPLAYTYPE_SHAREADMIN = 8
RESOURCEDISPLAYTYPE_DIRECTORY = 9
RESOURCEDISPLAYTYPE_TREE = 10
RESOURCEDISPLAYTYPE_NDSCONTAINER = 11
NETPROPERTY_PERSISTENT = 1
CONNECT_UPDATE_PROFILE = 1
CONNECT_UPDATE_RECENT = 2
CONNECT_TEMPORARY = 4
CONNECT_INTERACTIVE = 8
CONNECT_PROMPT = 16
CONNECT_NEED_DRIVE = 32
CONNECT_REFCOUNT = 64
CONNECT_REDIRECT = 128
CONNECT_LOCALDRIVE = 256
CONNECT_CURRENT_MEDIA = 512
CONNECT_DEFERRED = 1024
CONNECT_RESERVED = -16777216
CONNDLG_RO_PATH = 1
CONNDLG_CONN_POINT = 2
CONNDLG_USE_MRU = 4
CONNDLG_HIDE_BOX = 8
CONNDLG_PERSIST = 16
CONNDLG_NOT_PERSIST = 32
DISC_UPDATE_PROFILE = 1
DISC_NO_FORCE = 64
UNIVERSAL_NAME_INFO_LEVEL = 1
REMOTE_NAME_INFO_LEVEL = 2
WNFMT_MULTILINE = 1
WNFMT_ABBREVIATED = 2
WNFMT_INENUM = 16
WNFMT_CONNECTION = 32
NETINFO_DLL16 = 1
NETINFO_DISKRED = 4
NETINFO_PRINTERRED = 8
RP_LOGON = 1
RP_INIFILE = 2
PP_DISPLAYERRORS = 1
WNCON_FORNETCARD = 1
WNCON_NOTROUTED = 2
WNCON_SLOWLINK = 4
WNCON_DYNAMIC = 8
## NETSETUP_NAME_TYPE, used with NetValidateName
NetSetupUnknown = 0
NetSetupMachine = 1
NetSetupWorkgroup = 2
NetSetupDomain = 3
NetSetupNonExistentDomain = 4
NetSetupDnsMachine = 5
## NETSETUP_JOIN_STATUS, use with NetGetJoinInformation
NetSetupUnknownStatus = 0
NetSetupUnjoined = 1
NetSetupWorkgroupName = 2
NetSetupDomainName = 3
NetValidateAuthentication = 1
NetValidatePasswordChange = 2
NetValidatePasswordReset = 3
| bsd-3-clause | 4,351,569,894,582,248,400 | -6,242,493,315,199,887,000 | 29.135878 | 118 | 0.700892 | false |
CospanDesign/sdio-device | cocotb/test_dut.py | 2 | 1502 | # Simple tests for an adder module
import os
import sys
import cocotb
import logging
from cocotb.result import TestFailure
from nysa.host.sim.sim_host import NysaSim
from cocotb.clock import Clock
import time
from array import array as Array
from dut_driver import wb_sdio_deviceDriver
SIM_CONFIG = "sim_config.json"
CLK_PERIOD = 10
MODULE_PATH = os.path.join(os.path.dirname(__file__), os.pardir, "rtl")
MODULE_PATH = os.path.abspath(MODULE_PATH)
def setup_dut(dut):
cocotb.fork(Clock(dut.clk, CLK_PERIOD).start())
@cocotb.coroutine
def wait_ready(nysa, dut):
#while not dut.hd_ready.value.get_value():
# yield(nysa.wait_clocks(1))
#yield(nysa.wait_clocks(100))
pass
@cocotb.test(skip = False)
def first_test(dut):
"""
Description:
Very Basic Functionality
Startup Nysa
Test ID: 0
Expected Results:
Write to all registers
"""
dut.test_id = 0
print "module path: %s" % MODULE_PATH
nysa = NysaSim(dut, SIM_CONFIG, CLK_PERIOD, user_paths = [MODULE_PATH])
setup_dut(dut)
yield(nysa.reset())
nysa.read_sdb()
yield (nysa.wait_clocks(10))
nysa.pretty_print_sdb()
driver = wb_sdio_deviceDriver(nysa, nysa.find_device(wb_sdio_deviceDriver)[0])
print "here!"
yield cocotb.external(driver.set_control)(0x01)
yield (nysa.wait_clocks(100))
v = yield cocotb.external(driver.get_control)()
dut.log.info("V: %d" % v)
dut.log.info("DUT Opened!")
dut.log.info("Ready")
| mit | -4,900,391,596,408,713,000 | 3,660,551,283,475,415,600 | 22.46875 | 82 | 0.669774 | false |
OpenUpgrade-dev/OpenUpgrade | addons/account_check_writing/account.py | 379 | 2032 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
class account_journal(osv.osv):
_inherit = "account.journal"
_columns = {
'allow_check_writing': fields.boolean('Allow Check writing', help='Check this if the journal is to be used for writing checks.'),
'use_preprint_check': fields.boolean('Use Preprinted Check', help='Check if you use a preformated sheet for check'),
}
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'check_layout': fields.selection([
('top', 'Check on Top'),
('middle', 'Check in middle'),
('bottom', 'Check on bottom'),
],"Check Layout",
help="Check on top is compatible with Quicken, QuickBooks and Microsoft Money. Check in middle is compatible with Peachtree, ACCPAC and DacEasy. Check on bottom is compatible with Peachtree, ACCPAC and DacEasy only" ),
}
_defaults = {
'check_layout' : lambda *a: 'top',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,309,384,604,013,474,300 | -5,594,387,569,427,196,000 | 41.333333 | 231 | 0.613189 | false |
mne-tools/mne-tools.github.io | 0.15/_downloads/plot_custom_inverse_solver.py | 1 | 6931 | # -*- coding: utf-8 -*-
"""
================================================
Source localization with a custom inverse solver
================================================
The objective of this example is to show how to plug a custom inverse solver
in MNE in order to facilate empirical comparison with the methods MNE already
implements (wMNE, dSPM, sLORETA, LCMV, (TF-)MxNE etc.).
This script is educational and shall be used for methods
evaluations and new developments. It is not meant to be an example
of good practice to analyse your data.
The example makes use of 2 functions ``apply_solver`` and ``solver``
so changes can be limited to the ``solver`` function (which only takes three
parameters: the whitened data, the gain matrix, and the number of orientations)
in order to try out another inverse algorithm.
"""
import numpy as np
from scipy import linalg
import mne
from mne.datasets import sample
from mne.viz import plot_sparse_source_estimates
data_path = sample.data_path()
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
subjects_dir = data_path + '/subjects'
condition = 'Left Auditory'
# Read noise covariance matrix
noise_cov = mne.read_cov(cov_fname)
# Handling average file
evoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0))
evoked.crop(tmin=0.04, tmax=0.18)
evoked = evoked.pick_types(eeg=False, meg=True)
# Handling forward solution
forward = mne.read_forward_solution(fwd_fname)
###############################################################################
# Auxiliary function to run the solver
def apply_solver(solver, evoked, forward, noise_cov, loose=0.2, depth=0.8):
"""Function to call a custom solver on evoked data
This function does all the necessary computation:
- to select the channels in the forward given the available ones in
the data
- to take into account the noise covariance and do the spatial whitening
- to apply loose orientation constraint as MNE solvers
- to apply a weigthing of the columns of the forward operator as in the
weighted Minimum Norm formulation in order to limit the problem
of depth bias.
Parameters
----------
solver : callable
The solver takes 3 parameters: data M, gain matrix G, number of
dipoles orientations per location (1 or 3). A solver shall return
2 variables: X which contains the time series of the active dipoles
and an active set which is a boolean mask to specify what dipoles are
present in X.
evoked : instance of mne.Evoked
The evoked data
forward : instance of Forward
The forward solution.
noise_cov : instance of Covariance
The noise covariance.
loose : float in [0, 1] | 'auto'
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If loose
is 0 then the solution is computed with fixed orientation.
If loose is 1, it corresponds to free orientations.
The default value ('auto') is set to 0.2 for surface-oriented source
space and set to 1.0 for volumic or discrete source space.
depth : None | float in [0, 1]
Depth weighting coefficients. If None, no depth weighting is performed.
Returns
-------
stc : instance of SourceEstimate
The source estimates.
"""
# Import the necessary private functions
from mne.inverse_sparse.mxne_inverse import \
(_prepare_gain, _check_loose_forward, is_fixed_orient,
_reapply_source_weighting, _make_sparse_stc)
all_ch_names = evoked.ch_names
loose, forward = _check_loose_forward(loose, forward)
# put the forward solution in fixed orientation if it's not already
if loose == 0. and not is_fixed_orient(forward):
forward = mne.convert_forward_solution(
forward, surf_ori=True, force_fixed=True, copy=True, use_cps=True)
# Handle depth weighting and whitening (here is no weights)
gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked.info, noise_cov, pca=False, depth=depth,
loose=loose, weights=None, weights_min=None)
# Select channels of interest
sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
M = evoked.data[sel]
# Whiten data
M = np.dot(whitener, M)
n_orient = 1 if is_fixed_orient(forward) else 3
X, active_set = solver(M, gain, n_orient)
X = _reapply_source_weighting(X, source_weighting, active_set, n_orient)
stc = _make_sparse_stc(X, active_set, forward, tmin=evoked.times[0],
tstep=1. / evoked.info['sfreq'])
return stc
###############################################################################
# Define your solver
def solver(M, G, n_orient):
"""Dummy solver
It just runs L2 penalized regression and keep the 10 strongest locations
Parameters
----------
M : array, shape (n_channels, n_times)
The whitened data.
G : array, shape (n_channels, n_dipoles)
The gain matrix a.k.a. the forward operator. The number of locations
is n_dipoles / n_orient. n_orient will be 1 for a fixed orientation
constraint or 3 when using a free orientation model.
n_orient : int
Can be 1 or 3 depending if one works with fixed or free orientations.
If n_orient is 3, then ``G[:, 2::3]`` corresponds to the dipoles that
are normal to the cortex.
Returns
-------
X : array, (n_active_dipoles, n_times)
The time series of the dipoles in the active set.
active_set : array (n_dipoles)
Array of bool. Entry j is True if dipole j is in the active set.
We have ``X_full[active_set] == X`` where X_full is the full X matrix
such that ``M = G X_full``.
"""
K = linalg.solve(np.dot(G, G.T) + 1e15 * np.eye(G.shape[0]), G).T
K /= np.linalg.norm(K, axis=1)[:, None]
X = np.dot(K, M)
indices = np.argsort(np.sum(X ** 2, axis=1))[-10:]
active_set = np.zeros(G.shape[1], dtype=bool)
for idx in indices:
idx -= idx % n_orient
active_set[idx:idx + n_orient] = True
X = X[active_set]
return X, active_set
###############################################################################
# Apply your custom solver
# loose, depth = 0.2, 0.8 # corresponds to loose orientation
loose, depth = 1., 0. # corresponds to free orientation
stc = apply_solver(solver, evoked, forward, noise_cov, loose, depth)
###############################################################################
# View in 2D and 3D ("glass" brain like 3D plot)
plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
opacity=0.1)
| bsd-3-clause | -2,025,042,694,595,866,400 | -3,318,563,779,637,440,000 | 37.505556 | 79 | 0.633386 | false |
jordiblasco/easybuild-easyblocks | easybuild/easyblocks/t/trinity.py | 10 | 11628 | ##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Trinity, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import fileinput
import glob
import os
import re
import shutil
import sys
from distutils.version import LooseVersion
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
class EB_Trinity(EasyBlock):
"""Support for building/installing Trinity."""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for Trinity."""
EasyBlock.__init__(self, *args, **kwargs)
self.build_in_installdir = True
@staticmethod
def extra_options():
"""Custom easyconfig parameters for Trinity."""
extra_vars = {
'withsampledata': [False, "Include sample data", CUSTOM],
'bwapluginver': [None, "BWA pugin version", CUSTOM],
'RSEMmod': [False, "Enable RSEMmod", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def butterfly(self):
"""Install procedure for Butterfly."""
self.log.info("Begin Butterfly")
dst = os.path.join(self.cfg['start_dir'], 'Butterfly', 'src')
try:
os.chdir(dst)
except OSError, err:
raise EasyBuildError("Butterfly: failed to change to dst dir %s: %s", dst, err)
cmd = "ant"
run_cmd(cmd)
self.log.info("End Butterfly")
def chrysalis(self, run=True):
"""Install procedure for Chrysalis."""
make_flags = "COMPILER='%s' CPLUSPLUS='%s' CC='%s' " % (os.getenv('CXX'),
os.getenv('CXX'),
os.getenv('CC'))
make_flags += "OMP_FLAGS='%s' OMP_LINK='%s' " % (self.toolchain.get_flag('openmp'),
os.getenv('LIBS'))
make_flags += "OPTIM='-O1' SYS_OPT='-O2 %s' " % self.toolchain.get_flag('optarch')
make_flags += "OPEN_MP=yes UNSUPPORTED=yes DEBUG=no QUIET=yes"
if run:
self.log.info("Begin Chrysalis")
dst = os.path.join(self.cfg['start_dir'], 'Chrysalis')
try:
os.chdir(dst)
except OSError, err:
raise EasyBuildError("Chrysalis: failed to change to dst dir %s: %s", dst, err)
run_cmd("make clean")
run_cmd("make %s" % make_flags)
self.log.info("End Chrysalis")
else:
return make_flags
def inchworm(self, run=True):
"""Install procedure for Inchworm."""
make_flags = 'CXXFLAGS="%s %s"' % (os.getenv('CXXFLAGS'), self.toolchain.get_flag('openmp'))
if run:
self.log.info("Begin Inchworm")
dst = os.path.join(self.cfg['start_dir'], 'Inchworm')
try:
os.chdir(dst)
except OSError, err:
raise EasyBuildError("Inchworm: failed to change to dst dir %s: %s", dst, err)
run_cmd('./configure --prefix=%s' % dst)
run_cmd("make install %s" % make_flags)
self.log.info("End Inchworm")
else:
return make_flags
def jellyfish(self):
"""use a seperate jellyfish source if it exists, otherwise, just install the bundled jellyfish"""
self.log.debug("begin jellyfish")
self.log.debug("startdir: %s", self.cfg['start_dir'])
cwd = os.getcwd()
glob_pat = os.path.join(self.cfg['start_dir'], "..", "jellyfish-*")
jellyfishdirs = glob.glob(glob_pat)
self.log.debug("glob pattern '%s' yields %s" % (glob_pat, jellyfishdirs))
if len(jellyfishdirs) == 1 and os.path.isdir(jellyfishdirs[0]):
jellyfishdir = jellyfishdirs[0]
# if there is a jellyfish directory
self.log.info("detected jellyfish directory %s, so using this source", jellyfishdir)
orig_jellyfishdir = os.path.join(self.cfg['start_dir'], 'trinity-plugins', 'jellyfish')
try:
# remove original symlink
os.unlink(orig_jellyfishdir)
except OSError, err:
self.log.warning("jellyfish plugin: failed to remove dir %s: %s" % (orig_jellyfishdir, err))
try:
# create new one
os.symlink(jellyfishdir, orig_jellyfishdir)
os.chdir(orig_jellyfishdir)
except OSError, err:
raise EasyBuildError("jellyfish plugin: failed to change dir %s: %s", orig_jellyfishdir, err)
run_cmd('./configure --prefix=%s' % orig_jellyfishdir)
cmd = "make CC='%s' CXX='%s' CFLAGS='%s'" % (os.getenv('CC'), os.getenv('CXX'), os.getenv('CFLAGS'))
run_cmd(cmd)
# the installstep is running the jellyfish script, this is a wrapper that will compile .lib/jellyfish
run_cmd("bin/jellyfish cite")
# return to original dir
try:
os.chdir(cwd)
except OSError:
raise EasyBuildError("jellyfish: Could not return to original dir %s", cwd)
elif jellyfishdirs:
raise EasyBuildError("Found multiple 'jellyfish-*' directories: %s", jellyfishdirs)
else:
self.log.info("no seperate source found for jellyfish, letting Makefile build shipped version")
self.log.debug("end jellyfish")
def kmer(self):
"""Install procedure for kmer (Meryl)."""
self.log.info("Begin Meryl")
dst = os.path.join(self.cfg['start_dir'], 'trinity-plugins', 'kmer')
try:
os.chdir(dst)
except OSError, err:
raise EasyBuildError("Meryl: failed to change to dst dir %s: %s", dst, err)
cmd = "./configure.sh"
run_cmd(cmd)
cmd = 'make -j 1 CCDEP="%s -MM -MG" CXXDEP="%s -MM -MG"' % (os.getenv('CC'), os.getenv('CXX'))
run_cmd(cmd)
cmd = 'make install'
run_cmd(cmd)
self.log.info("End Meryl")
def trinityplugin(self, plugindir, cc=None):
"""Install procedure for Trinity plugins."""
self.log.info("Begin %s plugin" % plugindir)
dst = os.path.join(self.cfg['start_dir'], 'trinity-plugins', plugindir)
try:
os.chdir(dst)
except OSError, err:
raise EasyBuildError("%s plugin: failed to change to dst dir %s: %s", plugindir, dst, err)
if not cc:
cc = os.getenv('CC')
cmd = "make CC='%s' CXX='%s' CFLAGS='%s'" % (cc, os.getenv('CXX'), os.getenv('CFLAGS'))
run_cmd(cmd)
self.log.info("End %s plugin" % plugindir)
def configure_step(self):
"""No configuration for Trinity."""
pass
def build_step(self):
"""No building for Trinity."""
pass
def install_step(self):
"""Custom install procedure for Trinity."""
if LooseVersion(self.version) < LooseVersion('2012-10-05'):
self.inchworm()
self.chrysalis()
self.kmer()
self.butterfly()
bwapluginver = self.cfg['bwapluginver']
if bwapluginver:
self.trinityplugin('bwa-%s-patched_multi_map' % bwapluginver)
if self.cfg['RSEMmod']:
self.trinityplugin('RSEM-mod', cc=os.getenv('CXX'))
else:
self.jellyfish()
inchworm_flags = self.inchworm(run=False)
chrysalis_flags = self.chrysalis(run=False)
cc = os.getenv('CC')
cxx = os.getenv('CXX')
lib_flags = ""
for lib in ['ncurses', 'zlib']:
libroot = get_software_root(lib)
if libroot:
lib_flags += " -L%s/lib" % libroot
fn = "Makefile"
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
line = re.sub(r'^(INCHWORM_CONFIGURE_FLAGS\s*=\s*).*$', r'\1%s' % inchworm_flags, line)
line = re.sub(r'^(CHRYSALIS_MAKE_FLAGS\s*=\s*).*$', r'\1%s' % chrysalis_flags, line)
line = re.sub(r'(/rsem && \$\(MAKE\))\s*$',
r'\1 CC=%s CXX="%s %s" CFLAGS_EXTRA="%s"\n' % (cc, cxx, lib_flags, lib_flags), line)
line = re.sub(r'(/fastool && \$\(MAKE\))\s*$',
r'\1 CC="%s -std=c99" CFLAGS="%s ${CFLAGS}"\n' % (cc, lib_flags), line)
sys.stdout.write(line)
trinity_compiler = None
comp_fam = self.toolchain.comp_family()
if comp_fam in [toolchain.INTELCOMP]:
trinity_compiler = "intel"
elif comp_fam in [toolchain.GCC]:
trinity_compiler = "gcc"
else:
raise EasyBuildError("Don't know how to set TRINITY_COMPILER for %s compiler", comp_fam)
cmd = "make TRINITY_COMPILER=%s" % trinity_compiler
run_cmd(cmd)
# butterfly is not included in standard build
self.butterfly()
# remove sample data if desired
if not self.cfg['withsampledata']:
try:
shutil.rmtree(os.path.join(self.cfg['start_dir'], 'sample_data'))
except OSError, err:
raise EasyBuildError("Failed to remove sample data: %s", err)
def sanity_check_step(self):
"""Custom sanity check for Trinity."""
path = 'trinityrnaseq_r%s' % self.version
# these lists are definitely non-exhaustive, but better than nothing
custom_paths = {
'files': [os.path.join(path, x) for x in ['Inchworm/bin/inchworm', 'Chrysalis/Chrysalis']],
'dirs': [os.path.join(path, x) for x in ['Butterfly/src/bin', 'util']]
}
super(EB_Trinity, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom tweaks for PATH variable for Trinity."""
guesses = super(EB_Trinity, self).make_module_req_guess()
guesses.update({
'PATH': [os.path.basename(self.cfg['start_dir'].strip('/'))],
})
return guesses
| gpl-2.0 | -8,922,438,933,954,533,000 | -8,817,540,851,031,798,000 | 35.681388 | 114 | 0.574991 | false |
ntoll/yotta | yotta/test/components.py | 4 | 2617 | #! /usr/bin/env python2.7
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
import unittest
import os
import shutil
import errno
import logging
import tempfile
from collections import OrderedDict
from yotta.lib import access
from yotta.lib import component
from yotta.lib.pool import pool
from yotta.lib.fsutils import mkDirP, rmRf
test_json = '''{
"name": "something",
"version": "0.0.7",
"description": "some description.",
"private": false,
"homepage": "https://github.com/somewhere/something",
"bugs": {
"url": "about:blank",
"email": "[email protected]"
},
"author": "James Crosby <[email protected]>",
"licenses": [
{
"type": "Copyright (C) 2013 ARM Limited, all rights reserved.",
"url": "about:blank"
}
],
"dependencies": {
"toolchain": "ARM-RD/toolchain",
"libc": "ARM-RD/libc",
"libobjc2": "ARM-RD/libobjc2 @>0.0.7",
"yottos-platform": "ARM-RD/yottos-platform @0.0.3",
"emlib": "ARM-RD/emlib",
"nsobject": "ARM-RD/nsobject",
"nslog": "ARM-RD/nslog",
"nsassert": "ARM-RD/nsassert",
"thisdoesnotexist": "ARM-RD/thisdoesnotexist"
},
"testDependencies": {
"atestdep": "~0.2.3"
},
"targetDependencies": {
"sometarget": {
"atargetdep": "~1.3.4"
}
},
"testTargetDependencies": {
"sometarget": {
"anothertargetdep": "~1.3.4"
},
"someothertarget": {
"adifferenttargetdep": "~1.3.4"
}
}
}
'''
deps_in_order = [
'toolchain', 'libc', 'libobjc2', 'yottos-platform', 'emlib',
'nsobject', 'nslog', 'nsassert', 'thisdoesnotexist'
]
test_deps_in_order = deps_in_order + ['atestdep']
logging.basicConfig(
level=logging.ERROR
)
class ComponentTestCase(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
rmRf(self.test_dir)
def test_component_init(self):
# test things about components that don't (and shouldn't) require
# hitting the network
with open(os.path.join(self.test_dir, 'module.json'), 'w') as f:
f.write(test_json)
c = component.Component(self.test_dir)
self.assertTrue(c)
self.assertEqual(c.getName(), 'something')
self.assertEqual(str(c.getVersion()), '0.0.7')
deps = c.getDependencies()
self.assertEqual(list(deps.keys()), deps_in_order)
test_deps = c.getDependencies(test=True)
self.assertEqual(list(test_deps.keys()), test_deps_in_order)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -205,239,206,544,286,370 | -9,027,210,823,700,759,000 | 23.231481 | 73 | 0.618647 | false |
nullx002/pychess | testing/frc_castling.py | 20 | 1497 | from __future__ import print_function
import unittest
from pychess.Utils.const import *
from pychess.Utils.lutils.leval import LBoard
from pychess.Utils.lutils.lmove import FLAG
from pychess.Utils.lutils.lmovegen import genCastles, newMove
# TODO: add more test data
data = (
("r3k2r/8/8/8/8/8/8/R3K2R w AH - 0 1", [(E1, G1, KING_CASTLE), (E1, C1, QUEEN_CASTLE)]),
("r3k2r/8/8/8/8/8/8/R3K2R b ah - 0 1", [(E8, G8, KING_CASTLE), (E8, C8, QUEEN_CASTLE)]),
("1br3kr/2p5/8/8/8/8/8/1BR3KR w CH - 0 2", [(G1, G1, KING_CASTLE), (G1, C1, QUEEN_CASTLE)]),
("1br3kr/2p5/8/8/8/8/8/1BR3KR b ch - 0 2", [(G8, G8, KING_CASTLE), (G8, C8, QUEEN_CASTLE)]),
("2r1k2r/8/8/8/8/8/8/2R1K2R w H - 0 1", [(E1, G1, KING_CASTLE)]),
("2r1k2r/8/8/8/8/8/8/2R1K2R b h - 0 1", [(E8, G8, KING_CASTLE)]),
("3rk1qr/8/8/8/8/8/8/3RK1QR w - - 0 1", []),
("3rk1qr/8/8/8/8/8/8/3RK1QR b - - 0 1", []),
)
class FRCCastlingTestCase(unittest.TestCase):
def testFRCCastling(self):
"""Testing FRC castling movegen"""
print()
for fen, castles in data:
print(fen)
board = LBoard(FISCHERRANDOMCHESS)
board.applyFen(fen)
#print board
moves = [move for move in genCastles(board)]
self.assertEqual(len(moves), len(castles))
for i, castle in enumerate(castles):
kfrom, kto, flag = castle
self.assertEqual(moves[i], newMove(kfrom, kto, flag))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -7,709,113,272,480,847,000 | -7,409,567,986,508,739,000 | 36.425 | 93 | 0.595858 | false |
kvar/ansible | lib/ansible/modules/files/unarchive.py | 3 | 36099 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <[email protected]>
# Copyright: (c) 2013, Dylan Martin <[email protected]>
# Copyright: (c) 2015, Toshio Kuratomi <[email protected]>
# Copyright: (c) 2016, Dag Wieers <[email protected]>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: unarchive
version_added: '1.4'
short_description: Unpacks an archive after (optionally) copying it from the local machine.
description:
- The C(unarchive) module unpacks an archive. It will not unpack a compressed file that does not contain an archive.
- By default, it will copy the source file from the local system to the target before unpacking.
- Set C(remote_src=yes) to unpack an archive which already exists on the target.
- If checksum validation is desired, use M(get_url) or M(uri) instead to fetch the file and set C(remote_src=yes).
- For Windows targets, use the M(win_unzip) module instead.
options:
src:
description:
- If C(remote_src=no) (default), local path to archive file to copy to the target server; can be absolute or relative. If C(remote_src=yes), path on the
target server to existing archive file to unpack.
- If C(remote_src=yes) and C(src) contains C(://), the remote machine will download the file from the URL first. (version_added 2.0). This is only for
simple cases, for full download support use the M(get_url) module.
type: path
required: true
dest:
description:
- Remote absolute path where the archive should be unpacked.
type: path
required: true
copy:
description:
- If true, the file is copied from local 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine.
- This option has been deprecated in favor of C(remote_src).
- This option is mutually exclusive with C(remote_src).
type: bool
default: yes
creates:
description:
- If the specified absolute path (file or directory) already exists, this step will B(not) be run.
type: path
version_added: "1.6"
list_files:
description:
- If set to True, return the list of files that are contained in the tarball.
type: bool
default: no
version_added: "2.0"
exclude:
description:
- List the directory and file entries that you would like to exclude from the unarchive action.
type: list
version_added: "2.1"
keep_newer:
description:
- Do not replace existing files that are newer than files from the archive.
type: bool
default: no
version_added: "2.1"
extra_opts:
description:
- Specify additional options by passing in an array.
- Each space-separated command-line option should be a new element of the array. See examples.
- Command-line options with multiple elements must use multiple lines in the array, one for each element.
type: list
default: ""
version_added: "2.1"
remote_src:
description:
- Set to C(yes) to indicate the archived file is already on the remote system and not local to the Ansible controller.
- This option is mutually exclusive with C(copy).
type: bool
default: no
version_added: "2.2"
validate_certs:
description:
- This only applies if using a https URL as the source of the file.
- This should only set to C(no) used on personally controlled sites using self-signed certificate.
- Prior to 2.2 the code worked as if this was set to C(yes).
type: bool
default: yes
version_added: "2.2"
extends_documentation_fragment:
- decrypt
- files
todo:
- Re-implement tar support using native tarfile module.
- Re-implement zip support using native zipfile module.
notes:
- Requires C(zipinfo) and C(gtar)/C(unzip) command on target host.
- Can handle I(.zip) files using C(unzip) as well as I(.tar), I(.tar.gz), I(.tar.bz2) and I(.tar.xz) files using C(gtar).
- Does not handle I(.gz) files, I(.bz2) files or I(.xz) files that do not contain a I(.tar) archive.
- Uses gtar's C(--diff) arg to calculate if changed or not. If this C(arg) is not
supported, it will always unpack the archive.
- Existing files/directories in the destination which are not in the archive
are not touched. This is the same behavior as a normal archive extraction.
- Existing files/directories in the destination which are not in the archive
are ignored for purposes of deciding if the archive should be unpacked or not.
seealso:
- module: archive
- module: iso_extract
- module: win_unzip
author: Michael DeHaan
'''
EXAMPLES = r'''
- name: Extract foo.tgz into /var/lib/foo
unarchive:
src: foo.tgz
dest: /var/lib/foo
- name: Unarchive a file that is already on the remote machine
unarchive:
src: /tmp/foo.zip
dest: /usr/local/bin
remote_src: yes
- name: Unarchive a file that needs to be downloaded (added in 2.0)
unarchive:
src: https://example.com/example.zip
dest: /usr/local/bin
remote_src: yes
- name: Unarchive a file with extra options
unarchive:
src: /tmp/foo.zip
dest: /usr/local/bin
extra_opts:
- --transform
- s/^xxx/yyy/
'''
import binascii
import codecs
import datetime
import fnmatch
import grp
import os
import platform
import pwd
import re
import stat
import time
import traceback
from zipfile import ZipFile, BadZipfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_file
from ansible.module_utils._text import to_bytes, to_native, to_text
try: # python 3.3+
from shlex import quote
except ImportError: # older python
from pipes import quote
# String from tar that shows the tar contents are different from the
# filesystem
OWNER_DIFF_RE = re.compile(r': Uid differs$')
GROUP_DIFF_RE = re.compile(r': Gid differs$')
MODE_DIFF_RE = re.compile(r': Mode differs$')
MOD_TIME_DIFF_RE = re.compile(r': Mod time differs$')
# NEWER_DIFF_RE = re.compile(r' is newer or same age.$')
EMPTY_FILE_RE = re.compile(r': : Warning: Cannot stat: No such file or directory$')
MISSING_FILE_RE = re.compile(r': Warning: Cannot stat: No such file or directory$')
ZIP_FILE_MODE_RE = re.compile(r'([r-][w-][SsTtx-]){3}')
INVALID_OWNER_RE = re.compile(r': Invalid owner')
INVALID_GROUP_RE = re.compile(r': Invalid group')
def crc32(path):
''' Return a CRC32 checksum of a file '''
with open(path, 'rb') as f:
file_content = f.read()
return binascii.crc32(file_content) & 0xffffffff
def shell_escape(string):
''' Quote meta-characters in the args for the unix shell '''
return re.sub(r'([^A-Za-z0-9_])', r'\\\1', string)
class UnarchiveError(Exception):
pass
class ZipArchive(object):
def __init__(self, src, b_dest, file_args, module):
self.src = src
self.b_dest = b_dest
self.file_args = file_args
self.opts = module.params['extra_opts']
self.module = module
self.excludes = module.params['exclude']
self.includes = []
self.cmd_path = self.module.get_bin_path('unzip')
self.zipinfocmd_path = self.module.get_bin_path('zipinfo')
self._files_in_archive = []
self._infodict = dict()
def _permstr_to_octal(self, modestr, umask):
''' Convert a Unix permission string (rw-r--r--) into a mode (0644) '''
revstr = modestr[::-1]
mode = 0
for j in range(0, 3):
for i in range(0, 3):
if revstr[i + 3 * j] in ['r', 'w', 'x', 's', 't']:
mode += 2 ** (i + 3 * j)
# The unzip utility does not support setting the stST bits
# if revstr[i + 3 * j] in ['s', 't', 'S', 'T' ]:
# mode += 2 ** (9 + j)
return (mode & ~umask)
def _legacy_file_list(self, force_refresh=False):
unzip_bin = self.module.get_bin_path('unzip')
if not unzip_bin:
raise UnarchiveError('Python Zipfile cannot read %s and unzip not found' % self.src)
rc, out, err = self.module.run_command([unzip_bin, '-v', self.src])
if rc:
raise UnarchiveError('Neither python zipfile nor unzip can read %s' % self.src)
for line in out.splitlines()[3:-2]:
fields = line.split(None, 7)
self._files_in_archive.append(fields[7])
self._infodict[fields[7]] = int(fields[6])
def _crc32(self, path):
if self._infodict:
return self._infodict[path]
try:
archive = ZipFile(self.src)
except BadZipfile as e:
if e.args[0].lower().startswith('bad magic number'):
# Python2.4 can't handle zipfiles with > 64K files. Try using
# /usr/bin/unzip instead
self._legacy_file_list()
else:
raise
else:
try:
for item in archive.infolist():
self._infodict[item.filename] = int(item.CRC)
except Exception:
archive.close()
raise UnarchiveError('Unable to list files in the archive')
return self._infodict[path]
@property
def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
return self._files_in_archive
self._files_in_archive = []
try:
archive = ZipFile(self.src)
except BadZipfile as e:
if e.args[0].lower().startswith('bad magic number'):
# Python2.4 can't handle zipfiles with > 64K files. Try using
# /usr/bin/unzip instead
self._legacy_file_list(force_refresh)
else:
raise
else:
try:
for member in archive.namelist():
exclude_flag = False
if self.excludes:
for exclude in self.excludes:
if fnmatch.fnmatch(member, exclude):
exclude_flag = True
break
if not exclude_flag:
self._files_in_archive.append(to_native(member))
except Exception:
archive.close()
raise UnarchiveError('Unable to list files in the archive')
archive.close()
return self._files_in_archive
def is_unarchived(self):
# BSD unzip doesn't support zipinfo listings with timestamp.
cmd = [self.zipinfocmd_path, '-T', '-s', self.src]
if self.excludes:
cmd.extend(['-x', ] + self.excludes)
rc, out, err = self.module.run_command(cmd)
old_out = out
diff = ''
out = ''
if rc == 0:
unarchived = True
else:
unarchived = False
# Get some information related to user/group ownership
umask = os.umask(0)
os.umask(umask)
systemtype = platform.system()
# Get current user and group information
groups = os.getgroups()
run_uid = os.getuid()
run_gid = os.getgid()
try:
run_owner = pwd.getpwuid(run_uid).pw_name
except (TypeError, KeyError):
run_owner = run_uid
try:
run_group = grp.getgrgid(run_gid).gr_name
except (KeyError, ValueError, OverflowError):
run_group = run_gid
# Get future user ownership
fut_owner = fut_uid = None
if self.file_args['owner']:
try:
tpw = pwd.getpwnam(self.file_args['owner'])
except KeyError:
try:
tpw = pwd.getpwuid(self.file_args['owner'])
except (TypeError, KeyError):
tpw = pwd.getpwuid(run_uid)
fut_owner = tpw.pw_name
fut_uid = tpw.pw_uid
else:
try:
fut_owner = run_owner
except Exception:
pass
fut_uid = run_uid
# Get future group ownership
fut_group = fut_gid = None
if self.file_args['group']:
try:
tgr = grp.getgrnam(self.file_args['group'])
except (ValueError, KeyError):
try:
tgr = grp.getgrgid(self.file_args['group'])
except (KeyError, ValueError, OverflowError):
tgr = grp.getgrgid(run_gid)
fut_group = tgr.gr_name
fut_gid = tgr.gr_gid
else:
try:
fut_group = run_group
except Exception:
pass
fut_gid = run_gid
for line in old_out.splitlines():
change = False
pcs = line.split(None, 7)
if len(pcs) != 8:
# Too few fields... probably a piece of the header or footer
continue
# Check first and seventh field in order to skip header/footer
if len(pcs[0]) != 7 and len(pcs[0]) != 10:
continue
if len(pcs[6]) != 15:
continue
# Possible entries:
# -rw-rws--- 1.9 unx 2802 t- defX 11-Aug-91 13:48 perms.2660
# -rw-a-- 1.0 hpf 5358 Tl i4:3 4-Dec-91 11:33 longfilename.hpfs
# -r--ahs 1.1 fat 4096 b- i4:2 14-Jul-91 12:58 EA DATA. SF
# --w------- 1.0 mac 17357 bx i8:2 4-May-92 04:02 unzip.macr
if pcs[0][0] not in 'dl-?' or not frozenset(pcs[0][1:]).issubset('rwxstah-'):
continue
ztype = pcs[0][0]
permstr = pcs[0][1:]
version = pcs[1]
ostype = pcs[2]
size = int(pcs[3])
path = to_text(pcs[7], errors='surrogate_or_strict')
# Skip excluded files
if path in self.excludes:
out += 'Path %s is excluded on request\n' % path
continue
# Itemized change requires L for symlink
if path[-1] == '/':
if ztype != 'd':
err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % (path, ztype)
ftype = 'd'
elif ztype == 'l':
ftype = 'L'
elif ztype == '-':
ftype = 'f'
elif ztype == '?':
ftype = 'f'
# Some files may be storing FAT permissions, not Unix permissions
# For FAT permissions, we will use a base permissions set of 777 if the item is a directory or has the execute bit set. Otherwise, 666.
# This permission will then be modified by the system UMask.
# BSD always applies the Umask, even to Unix permissions.
# For Unix style permissions on Linux or Mac, we want to use them directly.
# So we set the UMask for this file to zero. That permission set will then be unchanged when calling _permstr_to_octal
if len(permstr) == 6:
if path[-1] == '/':
permstr = 'rwxrwxrwx'
elif permstr == 'rwx---':
permstr = 'rwxrwxrwx'
else:
permstr = 'rw-rw-rw-'
file_umask = umask
elif 'bsd' in systemtype.lower():
file_umask = umask
else:
file_umask = 0
# Test string conformity
if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):
raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr)
# DEBUG
# err += "%s%s %10d %s\n" % (ztype, permstr, size, path)
b_dest = os.path.join(self.b_dest, to_bytes(path, errors='surrogate_or_strict'))
try:
st = os.lstat(b_dest)
except Exception:
change = True
self.includes.append(path)
err += 'Path %s is missing\n' % path
diff += '>%s++++++.?? %s\n' % (ftype, path)
continue
# Compare file types
if ftype == 'd' and not stat.S_ISDIR(st.st_mode):
change = True
self.includes.append(path)
err += 'File %s already exists, but not as a directory\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
if ftype == 'f' and not stat.S_ISREG(st.st_mode):
change = True
unarchived = False
self.includes.append(path)
err += 'Directory %s already exists, but not as a regular file\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
if ftype == 'L' and not stat.S_ISLNK(st.st_mode):
change = True
self.includes.append(path)
err += 'Directory %s already exists, but not as a symlink\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
itemized = list('.%s.......??' % ftype)
# Note: this timestamp calculation has a rounding error
# somewhere... unzip and this timestamp can be one second off
# When that happens, we report a change and re-unzip the file
dt_object = datetime.datetime(*(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))
timestamp = time.mktime(dt_object.timetuple())
# Compare file timestamps
if stat.S_ISREG(st.st_mode):
if self.module.params['keep_newer']:
if timestamp > st.st_mtime:
change = True
self.includes.append(path)
err += 'File %s is older, replacing file\n' % path
itemized[4] = 't'
elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:
# Add to excluded files, ignore other changes
out += 'File %s is newer, excluding file\n' % path
self.excludes.append(path)
continue
else:
if timestamp != st.st_mtime:
change = True
self.includes.append(path)
err += 'File %s differs in mtime (%f vs %f)\n' % (path, timestamp, st.st_mtime)
itemized[4] = 't'
# Compare file sizes
if stat.S_ISREG(st.st_mode) and size != st.st_size:
change = True
err += 'File %s differs in size (%d vs %d)\n' % (path, size, st.st_size)
itemized[3] = 's'
# Compare file checksums
if stat.S_ISREG(st.st_mode):
crc = crc32(b_dest)
if crc != self._crc32(path):
change = True
err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % (path, self._crc32(path), crc)
itemized[2] = 'c'
# Compare file permissions
# Do not handle permissions of symlinks
if ftype != 'L':
# Use the new mode provided with the action, if there is one
if self.file_args['mode']:
if isinstance(self.file_args['mode'], int):
mode = self.file_args['mode']
else:
try:
mode = int(self.file_args['mode'], 8)
except Exception as e:
try:
mode = AnsibleModule._symbolic_mode_to_octal(st, self.file_args['mode'])
except ValueError as e:
self.module.fail_json(path=path, msg="%s" % to_native(e), exception=traceback.format_exc())
# Only special files require no umask-handling
elif ztype == '?':
mode = self._permstr_to_octal(permstr, 0)
else:
mode = self._permstr_to_octal(permstr, file_umask)
if mode != stat.S_IMODE(st.st_mode):
change = True
itemized[5] = 'p'
err += 'Path %s differs in permissions (%o vs %o)\n' % (path, mode, stat.S_IMODE(st.st_mode))
# Compare file user ownership
owner = uid = None
try:
owner = pwd.getpwuid(st.st_uid).pw_name
except (TypeError, KeyError):
uid = st.st_uid
# If we are not root and requested owner is not our user, fail
if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):
raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner))
if owner and owner != fut_owner:
change = True
err += 'Path %s is owned by user %s, not by user %s as expected\n' % (path, owner, fut_owner)
itemized[6] = 'o'
elif uid and uid != fut_uid:
change = True
err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % (path, uid, fut_uid)
itemized[6] = 'o'
# Compare file group ownership
group = gid = None
try:
group = grp.getgrgid(st.st_gid).gr_name
except (KeyError, ValueError, OverflowError):
gid = st.st_gid
if run_uid != 0 and fut_gid not in groups:
raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner))
if group and group != fut_group:
change = True
err += 'Path %s is owned by group %s, not by group %s as expected\n' % (path, group, fut_group)
itemized[6] = 'g'
elif gid and gid != fut_gid:
change = True
err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % (path, gid, fut_gid)
itemized[6] = 'g'
# Register changed files and finalize diff output
if change:
if path not in self.includes:
self.includes.append(path)
diff += '%s %s\n' % (''.join(itemized), path)
if self.includes:
unarchived = False
# DEBUG
# out = old_out + out
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)
def unarchive(self):
cmd = [self.cmd_path, '-o']
if self.opts:
cmd.extend(self.opts)
cmd.append(self.src)
# NOTE: Including (changed) files as arguments is problematic (limits on command line/arguments)
# if self.includes:
# NOTE: Command unzip has this strange behaviour where it expects quoted filenames to also be escaped
# cmd.extend(map(shell_escape, self.includes))
if self.excludes:
cmd.extend(['-x'] + self.excludes)
cmd.extend(['-d', self.b_dest])
rc, out, err = self.module.run_command(cmd)
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
if not self.cmd_path:
return False, 'Command "unzip" not found.'
cmd = [self.cmd_path, '-l', self.src]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return True, None
return False, 'Command "%s" could not handle archive.' % self.cmd_path
class TgzArchive(object):
def __init__(self, src, b_dest, file_args, module):
self.src = src
self.b_dest = b_dest
self.file_args = file_args
self.opts = module.params['extra_opts']
self.module = module
if self.module.check_mode:
self.module.exit_json(skipped=True, msg="remote module (%s) does not support check mode when using gtar" % self.module._name)
self.excludes = [path.rstrip('/') for path in self.module.params['exclude']]
# Prefer gtar (GNU tar) as it supports the compression options -z, -j and -J
self.cmd_path = self.module.get_bin_path('gtar', None)
if not self.cmd_path:
# Fallback to tar
self.cmd_path = self.module.get_bin_path('tar')
self.zipflag = '-z'
self._files_in_archive = []
if self.cmd_path:
self.tar_type = self._get_tar_type()
else:
self.tar_type = None
def _get_tar_type(self):
cmd = [self.cmd_path, '--version']
(rc, out, err) = self.module.run_command(cmd)
tar_type = None
if out.startswith('bsdtar'):
tar_type = 'bsd'
elif out.startswith('tar') and 'GNU' in out:
tar_type = 'gnu'
return tar_type
@property
def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
return self._files_in_archive
cmd = [self.cmd_path, '--list', '-C', self.b_dest]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend(['--show-transformed-names'] + self.opts)
if self.excludes:
cmd.extend(['--exclude=' + f for f in self.excludes])
cmd.extend(['-f', self.src])
rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
if rc != 0:
raise UnarchiveError('Unable to list files in the archive')
for filename in out.splitlines():
# Compensate for locale-related problems in gtar output (octal unicode representation) #11348
# filename = filename.decode('string_escape')
filename = to_native(codecs.escape_decode(filename)[0])
# We don't allow absolute filenames. If the user wants to unarchive rooted in "/"
# they need to use "dest: '/'". This follows the defaults for gtar, pax, etc.
# Allowing absolute filenames here also causes bugs: https://github.com/ansible/ansible/issues/21397
if filename.startswith('/'):
filename = filename[1:]
exclude_flag = False
if self.excludes:
for exclude in self.excludes:
if fnmatch.fnmatch(filename, exclude):
exclude_flag = True
break
if not exclude_flag:
self._files_in_archive.append(to_native(filename))
return self._files_in_archive
def is_unarchived(self):
cmd = [self.cmd_path, '--diff', '-C', self.b_dest]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend(['--show-transformed-names'] + self.opts)
if self.file_args['owner']:
cmd.append('--owner=' + quote(self.file_args['owner']))
if self.file_args['group']:
cmd.append('--group=' + quote(self.file_args['group']))
if self.module.params['keep_newer']:
cmd.append('--keep-newer-files')
if self.excludes:
cmd.extend(['--exclude=' + f for f in self.excludes])
cmd.extend(['-f', self.src])
rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
# Check whether the differences are in something that we're
# setting anyway
# What is different
unarchived = True
old_out = out
out = ''
run_uid = os.getuid()
# When unarchiving as a user, or when owner/group/mode is supplied --diff is insufficient
# Only way to be sure is to check request with what is on disk (as we do for zip)
# Leave this up to set_fs_attributes_if_different() instead of inducing a (false) change
for line in old_out.splitlines() + err.splitlines():
# FIXME: Remove the bogus lines from error-output as well !
# Ignore bogus errors on empty filenames (when using --split-component)
if EMPTY_FILE_RE.search(line):
continue
if run_uid == 0 and not self.file_args['owner'] and OWNER_DIFF_RE.search(line):
out += line + '\n'
if run_uid == 0 and not self.file_args['group'] and GROUP_DIFF_RE.search(line):
out += line + '\n'
if not self.file_args['mode'] and MODE_DIFF_RE.search(line):
out += line + '\n'
if MOD_TIME_DIFF_RE.search(line):
out += line + '\n'
if MISSING_FILE_RE.search(line):
out += line + '\n'
if INVALID_OWNER_RE.search(line):
out += line + '\n'
if INVALID_GROUP_RE.search(line):
out += line + '\n'
if out:
unarchived = False
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
def unarchive(self):
cmd = [self.cmd_path, '--extract', '-C', self.b_dest]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend(['--show-transformed-names'] + self.opts)
if self.file_args['owner']:
cmd.append('--owner=' + quote(self.file_args['owner']))
if self.file_args['group']:
cmd.append('--group=' + quote(self.file_args['group']))
if self.module.params['keep_newer']:
cmd.append('--keep-newer-files')
if self.excludes:
cmd.extend(['--exclude=' + f for f in self.excludes])
cmd.extend(['-f', self.src])
rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
if not self.cmd_path:
return False, 'Commands "gtar" and "tar" not found.'
if self.tar_type != 'gnu':
return False, 'Command "%s" detected as tar type %s. GNU tar required.' % (self.cmd_path, self.tar_type)
try:
if self.files_in_archive:
return True, None
except UnarchiveError:
return False, 'Command "%s" could not handle archive.' % self.cmd_path
# Errors and no files in archive assume that we weren't able to
# properly unarchive it
return False, 'Command "%s" found no files in archive.' % self.cmd_path
# Class to handle tar files that aren't compressed
class TarArchive(TgzArchive):
def __init__(self, src, b_dest, file_args, module):
super(TarArchive, self).__init__(src, b_dest, file_args, module)
# argument to tar
self.zipflag = ''
# Class to handle bzip2 compressed tar files
class TarBzipArchive(TgzArchive):
def __init__(self, src, b_dest, file_args, module):
super(TarBzipArchive, self).__init__(src, b_dest, file_args, module)
self.zipflag = '-j'
# Class to handle xz compressed tar files
class TarXzArchive(TgzArchive):
def __init__(self, src, b_dest, file_args, module):
super(TarXzArchive, self).__init__(src, b_dest, file_args, module)
self.zipflag = '-J'
# try handlers in order and return the one that works or bail if none work
def pick_handler(src, dest, file_args, module):
handlers = [ZipArchive, TgzArchive, TarArchive, TarBzipArchive, TarXzArchive]
reasons = set()
for handler in handlers:
obj = handler(src, dest, file_args, module)
(can_handle, reason) = obj.can_handle_archive()
if can_handle:
return obj
reasons.add(reason)
reason_msg = ' '.join(reasons)
module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed. %s' % (src, reason_msg))
def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec=dict(
src=dict(type='path', required=True),
dest=dict(type='path', required=True),
remote_src=dict(type='bool', default=False),
creates=dict(type='path'),
list_files=dict(type='bool', default=False),
keep_newer=dict(type='bool', default=False),
exclude=dict(type='list', default=[]),
extra_opts=dict(type='list', default=[]),
validate_certs=dict(type='bool', default=True),
),
add_file_common_args=True,
# check-mode only works for zip files, we cover that later
supports_check_mode=True,
)
src = module.params['src']
dest = module.params['dest']
b_dest = to_bytes(dest, errors='surrogate_or_strict')
remote_src = module.params['remote_src']
file_args = module.load_file_common_arguments(module.params)
# did tar file arrive?
if not os.path.exists(src):
if not remote_src:
module.fail_json(msg="Source '%s' failed to transfer" % src)
# If remote_src=true, and src= contains ://, try and download the file to a temp directory.
elif '://' in src:
src = fetch_file(module, src)
else:
module.fail_json(msg="Source '%s' does not exist" % src)
if not os.access(src, os.R_OK):
module.fail_json(msg="Source '%s' not readable" % src)
# skip working with 0 size archives
try:
if os.path.getsize(src) == 0:
module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src)
except Exception as e:
module.fail_json(msg="Source '%s' not readable, %s" % (src, to_native(e)))
# is dest OK to receive tar file?
if not os.path.isdir(b_dest):
module.fail_json(msg="Destination '%s' is not a directory" % dest)
handler = pick_handler(src, b_dest, file_args, module)
res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)
# do we need to do unpack?
check_results = handler.is_unarchived()
# DEBUG
# res_args['check_results'] = check_results
if module.check_mode:
res_args['changed'] = not check_results['unarchived']
elif check_results['unarchived']:
res_args['changed'] = False
else:
# do the unpack
try:
res_args['extract_results'] = handler.unarchive()
if res_args['extract_results']['rc'] != 0:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
except IOError:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
else:
res_args['changed'] = True
# Get diff if required
if check_results.get('diff', False):
res_args['diff'] = {'prepared': check_results['diff']}
# Run only if we found differences (idempotence) or diff was missing
if res_args.get('diff', True) and not module.check_mode:
# do we need to change perms?
for filename in handler.files_in_archive:
file_args['path'] = os.path.join(b_dest, to_bytes(filename, errors='surrogate_or_strict'))
try:
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'], expand=False)
except (IOError, OSError) as e:
module.fail_json(msg="Unexpected error when accessing exploded file: %s" % to_native(e), **res_args)
if module.params['list_files']:
res_args['files'] = handler.files_in_archive
module.exit_json(**res_args)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,903,804,755,654,625,000 | 5,215,892,809,624,215,000 | 38.538883 | 158 | 0.55863 | false |
Qalthos/ansible | lib/ansible/modules/network/fortios/fortios_endpoint_control_forticlient_registration_sync.py | 24 | 8122 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_endpoint_control_forticlient_registration_sync
short_description: Configure FortiClient registration synchronization settings in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure endpoint_control feature and forticlient_registration_sync category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
endpoint_control_forticlient_registration_sync:
description:
- Configure FortiClient registration synchronization settings.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
peer-ip:
description:
- IP address of the peer FortiGate for endpoint license synchronization.
peer-name:
description:
- Peer name.
required: true
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure FortiClient registration synchronization settings.
fortios_endpoint_control_forticlient_registration_sync:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
endpoint_control_forticlient_registration_sync:
state: "present"
peer-ip: "<your_own_value>"
peer-name: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_endpoint_control_forticlient_registration_sync_data(json):
option_list = ['peer-ip', 'peer-name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def endpoint_control_forticlient_registration_sync(data, fos):
vdom = data['vdom']
endpoint_control_forticlient_registration_sync_data = data['endpoint_control_forticlient_registration_sync']
filtered_data = filter_endpoint_control_forticlient_registration_sync_data(endpoint_control_forticlient_registration_sync_data)
if endpoint_control_forticlient_registration_sync_data['state'] == "present":
return fos.set('endpoint-control',
'forticlient-registration-sync',
data=filtered_data,
vdom=vdom)
elif endpoint_control_forticlient_registration_sync_data['state'] == "absent":
return fos.delete('endpoint-control',
'forticlient-registration-sync',
mkey=filtered_data['peer-name'],
vdom=vdom)
def fortios_endpoint_control(data, fos):
login(data)
methodlist = ['endpoint_control_forticlient_registration_sync']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"endpoint_control_forticlient_registration_sync": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"peer-ip": {"required": False, "type": "str"},
"peer-name": {"required": True, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_endpoint_control(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,957,046,313,504,925,000 | -5,327,039,359,596,753,000 | 29.419476 | 131 | 0.631125 | false |
sounak98/coala-bears | tests/java/CheckstyleBearTest.py | 2 | 3245 | import os
import pathlib
from tempfile import NamedTemporaryFile
from queue import Queue
from bears.java import CheckstyleBear
from tests.BearTestHelper import generate_skip_decorator
from tests.LocalBearTestHelper import LocalBearTestHelper
from coalib.settings.Section import Section
from coalib.settings.Setting import path, Setting
@generate_skip_decorator(CheckstyleBear.CheckstyleBear)
class CheckstyleBearTest(LocalBearTestHelper):
def setUp(self):
self.section = Section('test section')
self.uut = CheckstyleBear.CheckstyleBear(self.section, Queue())
test_files = os.path.join(os.path.dirname(__file__), 'test_files')
self.good_file = os.path.join(test_files, 'CheckstyleGood.java')
self.bad_file = os.path.join(test_files, 'CheckstyleBad.java')
self.empty_config = os.path.join(test_files,
'checkstyle_empty_config.xml')
def test_run(self):
self.check_validity(self.uut, [], self.good_file)
self.check_validity(self.uut, [], self.bad_file, valid=False)
def test_style_google(self):
self.section['checkstyle_configs'] = 'google'
self.check_validity(self.uut, [], self.good_file)
def test_style_sun(self):
self.section['checkstyle_configs'] = 'sun'
self.check_validity(self.uut, [], self.good_file)
def test_style_android(self):
self.section['checkstyle_configs'] = 'android-check-easy'
self.check_validity(self.uut, [], self.good_file)
self.section['checkstyle_configs'] = 'android-check-hard'
self.check_validity(self.uut, [], self.good_file)
def test_style_geosoft(self):
self.section['checkstyle_configs'] = 'geosoft'
self.check_validity(self.uut, [], self.good_file)
def test_config_failure_use_spaces(self):
self.section['checkstyle_configs'] = 'google'
self.section.append(Setting('use_spaces', False))
with self.assertRaises(AssertionError):
self.check_validity(self.uut, [], self.good_file)
def test_config_failure_indent_size(self):
self.section['checkstyle_configs'] = 'google'
self.section.append(Setting('indent_size', 3))
with self.assertRaises(AssertionError):
self.check_validity(self.uut, [], self.good_file)
def test_with_custom_configfile(self):
self.section['checkstyle_configs'] = self.empty_config
self.check_validity(self.uut, [], self.good_file)
self.check_validity(self.uut, [], self.bad_file)
def known_checkstyle_test(monkeypatch):
monkeypatch.setattr(CheckstyleBear, 'known_checkstyles', {'such': 'style'})
assert CheckstyleBear.known_checkstyle_or_path('such') == 'such'
def known_path_test(monkeypatch):
monkeypatch.setattr(CheckstyleBear, 'known_checkstyles', {'such': 'style'})
with NamedTemporaryFile() as coafile, NamedTemporaryFile() as style_file:
coafile_path = pathlib.Path(coafile.name)
style_path = pathlib.Path(style_file.name)
setting = Setting(
'style_path', style_path.name, origin=str(coafile_path))
assert (
CheckstyleBear.known_checkstyle_or_path(setting) == str(style_path)
)
| agpl-3.0 | 6,066,281,109,665,293,000 | 2,115,193,065,472,150,000 | 39.5625 | 79 | 0.672727 | false |
jlemanbeto/Server | dependencies64/freetype/src/tools/docmaker/tohtml.py | 78 | 18715 | # ToHTML (c) 2002, 2003, 2005, 2006, 2007, 2008
# David Turner <[email protected]>
from sources import *
from content import *
from formatter import *
import time
# The following defines the HTML header used by all generated pages.
html_header_1 = """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>\
"""
html_header_2 = """\
API Reference</title>
<style type="text/css">
body { font-family: Verdana, Geneva, Arial, Helvetica, serif;
color: #000000;
background: #FFFFFF; }
p { text-align: justify; }
h1 { text-align: center; }
li { text-align: justify; }
td { padding: 0 0.5em 0 0.5em; }
td.left { padding: 0 0.5em 0 0.5em;
text-align: left; }
a:link { color: #0000EF; }
a:visited { color: #51188E; }
a:hover { color: #FF0000; }
span.keyword { font-family: monospace;
text-align: left;
white-space: pre;
color: darkblue; }
pre.colored { color: blue; }
ul.empty { list-style-type: none; }
</style>
</head>
<body>
"""
html_header_3 = """
<table align=center><tr><td><font size=-1>[<a href="\
"""
html_header_3i = """
<table align=center><tr><td width="100%"></td>
<td><font size=-1>[<a href="\
"""
html_header_4 = """\
">Index</a>]</font></td>
<td width="100%"></td>
<td><font size=-1>[<a href="\
"""
html_header_5 = """\
">TOC</a>]</font></td></tr></table>
<center><h1>\
"""
html_header_5t = """\
">Index</a>]</font></td>
<td width="100%"></td></tr></table>
<center><h1>\
"""
html_header_6 = """\
API Reference</h1></center>
"""
# The HTML footer used by all generated pages.
html_footer = """\
</body>
</html>\
"""
# The header and footer used for each section.
section_title_header = "<center><h1>"
section_title_footer = "</h1></center>"
# The header and footer used for code segments.
code_header = '<pre class="colored">'
code_footer = '</pre>'
# Paragraph header and footer.
para_header = "<p>"
para_footer = "</p>"
# Block header and footer.
block_header = '<table align=center width="75%"><tr><td>'
block_footer_start = """\
</td></tr></table>
<hr width="75%">
<table align=center width="75%"><tr><td><font size=-2>[<a href="\
"""
block_footer_middle = """\
">Index</a>]</font></td>
<td width="100%"></td>
<td><font size=-2>[<a href="\
"""
block_footer_end = """\
">TOC</a>]</font></td></tr></table>
"""
# Description header/footer.
description_header = '<table align=center width="87%"><tr><td>'
description_footer = "</td></tr></table><br>"
# Marker header/inter/footer combination.
marker_header = '<table align=center width="87%" cellpadding=5><tr bgcolor="#EEEEFF"><td><em><b>'
marker_inter = "</b></em></td></tr><tr><td>"
marker_footer = "</td></tr></table>"
# Header location header/footer.
header_location_header = '<table align=center width="87%"><tr><td>'
header_location_footer = "</td></tr></table><br>"
# Source code extracts header/footer.
source_header = '<table align=center width="87%"><tr bgcolor="#D6E8FF"><td><pre>\n'
source_footer = "\n</pre></table><br>"
# Chapter header/inter/footer.
chapter_header = '<br><table align=center width="75%"><tr><td><h2>'
chapter_inter = '</h2><ul class="empty"><li>'
chapter_footer = '</li></ul></td></tr></table>'
# Index footer.
index_footer_start = """\
<hr>
<table><tr><td width="100%"></td>
<td><font size=-2>[<a href="\
"""
index_footer_end = """\
">TOC</a>]</font></td></tr></table>
"""
# TOC footer.
toc_footer_start = """\
<hr>
<table><tr><td><font size=-2>[<a href="\
"""
toc_footer_end = """\
">Index</a>]</font></td>
<td width="100%"></td>
</tr></table>
"""
# source language keyword coloration/styling
keyword_prefix = '<span class="keyword">'
keyword_suffix = '</span>'
section_synopsis_header = '<h2>Synopsis</h2>'
section_synopsis_footer = ''
# Translate a single line of source to HTML. This will convert
# a "<" into "<.", ">" into ">.", etc.
def html_quote( line ):
result = string.replace( line, "&", "&" )
result = string.replace( result, "<", "<" )
result = string.replace( result, ">", ">" )
return result
# same as 'html_quote', but ignores left and right brackets
def html_quote0( line ):
return string.replace( line, "&", "&" )
def dump_html_code( lines, prefix = "" ):
# clean the last empty lines
l = len( self.lines )
while l > 0 and string.strip( self.lines[l - 1] ) == "":
l = l - 1
# The code footer should be directly appended to the last code
# line to avoid an additional blank line.
print prefix + code_header,
for line in self.lines[0 : l + 1]:
print '\n' + prefix + html_quote( line ),
print prefix + code_footer,
class HtmlFormatter( Formatter ):
def __init__( self, processor, project_title, file_prefix ):
Formatter.__init__( self, processor )
global html_header_1, html_header_2, html_header_3
global html_header_4, html_header_5, html_footer
if file_prefix:
file_prefix = file_prefix + "-"
else:
file_prefix = ""
self.headers = processor.headers
self.project_title = project_title
self.file_prefix = file_prefix
self.html_header = html_header_1 + project_title + \
html_header_2 + \
html_header_3 + file_prefix + "index.html" + \
html_header_4 + file_prefix + "toc.html" + \
html_header_5 + project_title + \
html_header_6
self.html_index_header = html_header_1 + project_title + \
html_header_2 + \
html_header_3i + file_prefix + "toc.html" + \
html_header_5 + project_title + \
html_header_6
self.html_toc_header = html_header_1 + project_title + \
html_header_2 + \
html_header_3 + file_prefix + "index.html" + \
html_header_5t + project_title + \
html_header_6
self.html_footer = "<center><font size=""-2"">generated on " + \
time.asctime( time.localtime( time.time() ) ) + \
"</font></center>" + html_footer
self.columns = 3
def make_section_url( self, section ):
return self.file_prefix + section.name + ".html"
def make_block_url( self, block ):
return self.make_section_url( block.section ) + "#" + block.name
def make_html_words( self, words ):
""" convert a series of simple words into some HTML text """
line = ""
if words:
line = html_quote( words[0] )
for w in words[1:]:
line = line + " " + html_quote( w )
return line
def make_html_word( self, word ):
"""analyze a simple word to detect cross-references and styling"""
# look for cross-references
m = re_crossref.match( word )
if m:
try:
name = m.group( 1 )
rest = m.group( 2 )
block = self.identifiers[name]
url = self.make_block_url( block )
return '<a href="' + url + '">' + name + '</a>' + rest
except:
# we detected a cross-reference to an unknown item
sys.stderr.write( \
"WARNING: undefined cross reference '" + name + "'.\n" )
return '?' + name + '?' + rest
# look for italics and bolds
m = re_italic.match( word )
if m:
name = m.group( 1 )
rest = m.group( 3 )
return '<i>' + name + '</i>' + rest
m = re_bold.match( word )
if m:
name = m.group( 1 )
rest = m.group( 3 )
return '<b>' + name + '</b>' + rest
return html_quote( word )
def make_html_para( self, words ):
""" convert words of a paragraph into tagged HTML text, handle xrefs """
line = ""
if words:
line = self.make_html_word( words[0] )
for word in words[1:]:
line = line + " " + self.make_html_word( word )
# convert `...' quotations into real left and right single quotes
line = re.sub( r"(^|\W)`(.*?)'(\W|$)", \
r'\1‘\2’\3', \
line )
# convert tilde into non-breakable space
line = string.replace( line, "~", " " )
return para_header + line + para_footer
def make_html_code( self, lines ):
""" convert a code sequence to HTML """
line = code_header + '\n'
for l in lines:
line = line + html_quote( l ) + '\n'
return line + code_footer
def make_html_items( self, items ):
""" convert a field's content into some valid HTML """
lines = []
for item in items:
if item.lines:
lines.append( self.make_html_code( item.lines ) )
else:
lines.append( self.make_html_para( item.words ) )
return string.join( lines, '\n' )
def print_html_items( self, items ):
print self.make_html_items( items )
def print_html_field( self, field ):
if field.name:
print "<table><tr valign=top><td><b>" + field.name + "</b></td><td>"
print self.make_html_items( field.items )
if field.name:
print "</td></tr></table>"
def html_source_quote( self, line, block_name = None ):
result = ""
while line:
m = re_source_crossref.match( line )
if m:
name = m.group( 2 )
prefix = html_quote( m.group( 1 ) )
length = len( m.group( 0 ) )
if name == block_name:
# this is the current block name, if any
result = result + prefix + '<b>' + name + '</b>'
elif re_source_keywords.match( name ):
# this is a C keyword
result = result + prefix + keyword_prefix + name + keyword_suffix
elif self.identifiers.has_key( name ):
# this is a known identifier
block = self.identifiers[name]
result = result + prefix + '<a href="' + \
self.make_block_url( block ) + '">' + name + '</a>'
else:
result = result + html_quote( line[:length] )
line = line[length:]
else:
result = result + html_quote( line )
line = []
return result
def print_html_field_list( self, fields ):
print "<p></p>"
print "<table cellpadding=3 border=0>"
for field in fields:
if len( field.name ) > 22:
print "<tr valign=top><td colspan=0><b>" + field.name + "</b></td></tr>"
print "<tr valign=top><td></td><td>"
else:
print "<tr valign=top><td><b>" + field.name + "</b></td><td>"
self.print_html_items( field.items )
print "</td></tr>"
print "</table>"
def print_html_markup( self, markup ):
table_fields = []
for field in markup.fields:
if field.name:
# we begin a new series of field or value definitions, we
# will record them in the 'table_fields' list before outputting
# all of them as a single table
#
table_fields.append( field )
else:
if table_fields:
self.print_html_field_list( table_fields )
table_fields = []
self.print_html_items( field.items )
if table_fields:
self.print_html_field_list( table_fields )
#
# Formatting the index
#
def index_enter( self ):
print self.html_index_header
self.index_items = {}
def index_name_enter( self, name ):
block = self.identifiers[name]
url = self.make_block_url( block )
self.index_items[name] = url
def index_exit( self ):
# block_index already contains the sorted list of index names
count = len( self.block_index )
rows = ( count + self.columns - 1 ) / self.columns
print "<table align=center border=0 cellpadding=0 cellspacing=0>"
for r in range( rows ):
line = "<tr>"
for c in range( self.columns ):
i = r + c * rows
if i < count:
bname = self.block_index[r + c * rows]
url = self.index_items[bname]
line = line + '<td><a href="' + url + '">' + bname + '</a></td>'
else:
line = line + '<td></td>'
line = line + "</tr>"
print line
print "</table>"
print index_footer_start + \
self.file_prefix + "toc.html" + \
index_footer_end
print self.html_footer
self.index_items = {}
def index_dump( self, index_filename = None ):
if index_filename == None:
index_filename = self.file_prefix + "index.html"
Formatter.index_dump( self, index_filename )
#
# Formatting the table of content
#
def toc_enter( self ):
print self.html_toc_header
print "<center><h1>Table of Contents</h1></center>"
def toc_chapter_enter( self, chapter ):
print chapter_header + string.join( chapter.title ) + chapter_inter
print "<table cellpadding=5>"
def toc_section_enter( self, section ):
print '<tr valign=top><td class="left">'
print '<a href="' + self.make_section_url( section ) + '">' + \
section.title + '</a></td><td>'
print self.make_html_para( section.abstract )
def toc_section_exit( self, section ):
print "</td></tr>"
def toc_chapter_exit( self, chapter ):
print "</table>"
print chapter_footer
def toc_index( self, index_filename ):
print chapter_header + \
'<a href="' + index_filename + '">Global Index</a>' + \
chapter_inter + chapter_footer
def toc_exit( self ):
print toc_footer_start + \
self.file_prefix + "index.html" + \
toc_footer_end
print self.html_footer
def toc_dump( self, toc_filename = None, index_filename = None ):
if toc_filename == None:
toc_filename = self.file_prefix + "toc.html"
if index_filename == None:
index_filename = self.file_prefix + "index.html"
Formatter.toc_dump( self, toc_filename, index_filename )
#
# Formatting sections
#
def section_enter( self, section ):
print self.html_header
print section_title_header
print section.title
print section_title_footer
maxwidth = 0
for b in section.blocks.values():
if len( b.name ) > maxwidth:
maxwidth = len( b.name )
width = 70 # XXX magic number
if maxwidth <> 0:
# print section synopsis
print section_synopsis_header
print "<table align=center cellspacing=5 cellpadding=0 border=0>"
columns = width / maxwidth
if columns < 1:
columns = 1
count = len( section.block_names )
rows = ( count + columns - 1 ) / columns
for r in range( rows ):
line = "<tr>"
for c in range( columns ):
i = r + c * rows
line = line + '<td></td><td>'
if i < count:
name = section.block_names[i]
line = line + '<a href="#' + name + '">' + name + '</a>'
line = line + '</td>'
line = line + "</tr>"
print line
print "</table><br><br>"
print section_synopsis_footer
print description_header
print self.make_html_items( section.description )
print description_footer
def block_enter( self, block ):
print block_header
# place html anchor if needed
if block.name:
print '<h4><a name="' + block.name + '">' + block.name + '</a></h4>'
# dump the block C source lines now
if block.code:
header = ''
for f in self.headers.keys():
if block.source.filename.find( f ) >= 0:
header = self.headers[f] + ' (' + f + ')'
break;
# if not header:
# sys.stderr.write( \
# 'WARNING: No header macro for ' + block.source.filename + '.\n' )
if header:
print header_location_header
print 'Defined in ' + header + '.'
print header_location_footer
print source_header
for l in block.code:
print self.html_source_quote( l, block.name )
print source_footer
def markup_enter( self, markup, block ):
if markup.tag == "description":
print description_header
else:
print marker_header + markup.tag + marker_inter
self.print_html_markup( markup )
def markup_exit( self, markup, block ):
if markup.tag == "description":
print description_footer
else:
print marker_footer
def block_exit( self, block ):
print block_footer_start + self.file_prefix + "index.html" + \
block_footer_middle + self.file_prefix + "toc.html" + \
block_footer_end
def section_exit( self, section ):
print html_footer
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section, self.file_prefix + section.name + '.html' )
# eof
| gpl-3.0 | 3,583,151,670,371,591,000 | -2,298,193,848,668,547,000 | 30.559865 | 97 | 0.508042 | false |
jay-tyler/ansible | lib/ansible/plugins/lookup/file.py | 103 | 2060 | # (c) 2012, Daniel Hokka Zakrisson <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import codecs
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
ret = []
basedir = self.get_basedir(variables)
for term in terms:
self._display.debug("File lookup term: %s" % term)
# Special handling of the file lookup, used primarily when the
# lookup is done from a role. If the file isn't found in the
# basedir of the current file, use dwim_relative to look in the
# role/files/ directory, and finally the playbook directory
# itself (which will be relative to the current working dir)
lookupfile = self._loader.path_dwim_relative(basedir, 'files', term)
self._display.vvvv("File lookup using %s as file" % lookupfile)
try:
if lookupfile:
contents, show_data = self._loader._get_file_contents(lookupfile)
ret.append(contents.rstrip())
else:
raise AnsibleParserError()
except AnsibleParserError:
raise AnsibleError("could not locate file in lookup: %s" % term)
return ret
| gpl-3.0 | 3,040,761,457,064,755,700 | -2,199,409,440,755,426,800 | 37.148148 | 85 | 0.660194 | false |
PADAS/django-raster | raster/formulas.py | 1 | 9578 | import numpy
from pyparsing import CaselessLiteral, Combine, Forward, Literal, Optional, Word, ZeroOrMore, alphas, nums
from django.contrib.gis.gdal import GDALRaster
from .const import ALGEBRA_PIXEL_TYPE_GDAL, ALGEBRA_PIXEL_TYPE_NUMPY
class FormulaParser(object):
"""
Deconstruct mathematical algebra expressions and convert those into
callable funcitons.
Adopted from: http://pyparsing.wikispaces.com/file/view/fourFn.py
"""
expr_stack = []
# The data dictionary holds the values on which to evaluate the formula
data = {}
# Map operator symbols to arithmetic operations in numpy
opn = {
"+": numpy.add,
"-": numpy.subtract,
"*": numpy.multiply,
"/": numpy.divide,
"^": numpy.power,
"==": numpy.equal,
"!=": numpy.not_equal,
">": numpy.greater,
">=": numpy.greater_equal,
"<": numpy.less,
"<=": numpy.less_equal,
"|": numpy.logical_or,
"&": numpy.logical_and
}
# Map function names to python functions
fn = {
"sin": numpy.sin,
"cos": numpy.cos,
"tan": numpy.tan,
"log": numpy.log,
"exp": numpy.exp,
"abs": numpy.abs,
"int": numpy.int,
"round": numpy.round,
"sign": numpy.sign,
}
def __init__(self):
"""
Setup the Backus Normal Form (BNF) parser logic.
"""
self.dtype=ALGEBRA_PIXEL_TYPE_NUMPY
point = Literal(".")
e = CaselessLiteral("E")
fnumber = Combine(
Word("+-" + nums, nums) +
Optional(point + Optional(Word(nums))) +
Optional(e + Word("+-" + nums, nums))
)
ident = Word(alphas, alphas + nums + "_$")
# Operators
plus = Literal("+")
minus = Literal("-")
mult = Literal("*")
div = Literal("/")
eq = Literal("==")
neq = Literal("!=")
lt = Literal("<")
le = Literal("<=")
gt = Literal(">")
ge = Literal(">=")
ior = Literal("|")
iand = Literal("&")
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
addop = plus | minus | eq
multop = mult | div | eq | neq | ge | le | gt | lt | ior | iand # Order matters here due to "<=" being caught by "<"
expop = Literal("^")
pi = CaselessLiteral("PI")
# Letters for variables
aa = CaselessLiteral("a")
bb = CaselessLiteral("b")
cc = CaselessLiteral("c")
dd = CaselessLiteral("d")
ee = CaselessLiteral("e")
ff = CaselessLiteral("f")
gg = CaselessLiteral("g")
hh = CaselessLiteral("h")
ii = CaselessLiteral("i")
jj = CaselessLiteral("j")
kk = CaselessLiteral("k")
ll = CaselessLiteral("l")
mm = CaselessLiteral("m")
nn = CaselessLiteral("n")
oo = CaselessLiteral("o")
pp = CaselessLiteral("p")
qq = CaselessLiteral("q")
rr = CaselessLiteral("r")
ss = CaselessLiteral("s")
tt = CaselessLiteral("t")
uu = CaselessLiteral("u")
vv = CaselessLiteral("v")
ww = CaselessLiteral("w")
xx = CaselessLiteral("x")
yy = CaselessLiteral("y")
zz = CaselessLiteral("z")
bnf = Forward()
atom = (
Optional('-') + Optional("!") + (
pi | e | fnumber | ident + lpar + bnf + rpar | # pi needs to be before the letters for it to be found
aa | bb | cc | dd | ee | ff | gg | hh | ii | jj | kk | ll | mm |
nn | oo | pp | qq | rr | ss | tt | uu | vv | ww | xx | yy | zz
).setParseAction(self.push_first) | (lpar + bnf.suppress() + rpar)
).setParseAction(self.push_unary_operator)
# By defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...",
# we get right-to-left exponents, instead of left-to-righ
# that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor << atom + ZeroOrMore((expop + factor).setParseAction(self.push_first))
term = factor + ZeroOrMore((multop + factor).setParseAction(self.push_first))
bnf << term + ZeroOrMore((addop + term).setParseAction(self.push_first))
self.bnf = bnf
def push_first(self, strg, loc, toks):
self.expr_stack.append(toks[0])
def push_unary_operator(self, strg, loc, toks):
"""
Sets custom flag for unary operators.
"""
if toks:
if toks[0] == '-':
self.expr_stack.append('unary -')
elif toks[0] == '!':
self.expr_stack.append('unary !')
def evaluate_stack(self, stack):
"""
Evaluate a stack element.
"""
# Get operator element
op = stack.pop()
# Evaluate unary operators
if op == 'unary -':
return -self.evaluate_stack(stack)
if op == 'unary !':
return numpy.logical_not(self.evaluate_stack(stack))
# Evaluate binary operators
if op in ["+", "-", "*", "/", "^", ">", "<", "==", "!=", "<=", ">=", "|", "&", "!"]:
op2 = self.evaluate_stack(stack)
op1 = self.evaluate_stack(stack)
return self.opn[op](op1, op2)
elif op == "PI":
return numpy.pi
elif op == "E":
return numpy.e
elif op in self.fn:
return self.fn[op](self.evaluate_stack(stack))
elif op[0].isalpha() and len(op[0]) == 1 and op[0] in self.data:
return self.data[op[0]]
elif op[0].isalpha() and len(op[0]) == 1:
raise Exception('Found an undeclared variable in formula.')
else:
# If numeric, convert to numpy float
return numpy.array(op, dtype=self.dtype)
def parse_formula(self, formula):
"""
Parse a string formula into a BNF expression.
"""
# Clean formula before parsing
formula = self.clean_formula(formula)
# Reset expression stack
self.expr_stack = []
# Use bnf to parse the string
self.bnf.parseString(formula)
def clean_formula(self, formula):
"""
Remove any white space and line breaks from formula.
"""
return formula.replace(' ', '').replace('\n', '').replace('\r', '')
def evaluate(self, data=None):
"""
Evaluate the input data using the current formula expression stack.
"""
# Make sure a formula has been parsed before evaluating
if self.expr_stack == []:
raise Exception('Please specify a formula to evaluate.')
# Update dataset
if data:
self.data = data
# Evaluate stack on data
self.result = self.evaluate_stack(self.expr_stack)
return self.result
def evaluate_formula(self, formula, data={}, dtype=ALGEBRA_PIXEL_TYPE_NUMPY):
"""
Helper function to set formula and evaluate in one call.
"""
self.dtype = dtype
self.parse_formula(formula)
return self.evaluate(data)
class RasterAlgebraParser(FormulaParser):
"""
Compute raster algebra expressions using the FormulaParser class.
"""
def evaluate_raster_algebra(self, data, formula, check_aligned=False):
"""
Evaluate a raster algebra expression on a set of rasters. All input
rasters need to be strictly aligned (same size, geotransform and srid).
The input raster list will be zipped into a dictionary using the input
names. The resulting dictionary will be used as input data for formula
evaluation. If the check_aligned flag is set, the input rasters are
compared to make sure they are aligned.
"""
# Check that all input rasters are aligned
if check_aligned:
self.check_aligned(list(data.values()))
# Construct list of numpy arrays holding raster pixel data
data_arrays = {
key: numpy.ma.masked_values(rast.bands[0].data().ravel(), rast.bands[0].nodata_value)
for key, rast in data.items()
}
# Evaluate formula on raster data
result = self.evaluate_formula(formula, data_arrays)
# Reference first original raster for constructing result
orig = list(data.values())[0]
orig_band = orig.bands[0]
# Convert to default number type
result = result.astype(ALGEBRA_PIXEL_TYPE_NUMPY)
# Return GDALRaster holding results
return GDALRaster({
'datatype': ALGEBRA_PIXEL_TYPE_GDAL,
'driver': 'MEM',
'width': orig.width,
'height': orig.height,
'nr_of_bands': 1,
'srid': orig.srs.srid,
'origin': orig.origin,
'scale': orig.scale,
'skew': orig.skew,
'bands': [{
'nodata_value': orig_band.nodata_value,
'data': result
}],
})
def check_aligned(self, rasters):
"""
Assert that all input rasters are properly aligned.
"""
if not len(set([x.srs.srid for x in rasters])) == 1:
raise Exception('Raster aligment check failed: SRIDs not all the same')
gt = rasters[0].geotransform
if any([gt != rast.geotransform for rast in rasters[1:]]):
raise Exception('Raster aligment check failed: geotransform arrays are not all the same')
| bsd-3-clause | -3,072,883,379,120,000,500 | 541,839,742,566,409,600 | 32.256944 | 125 | 0.54479 | false |
yqm/sl4a | python/src/Lib/test/test_binop.py | 87 | 10683 | """Tests for binary operators on subtypes of built-in types."""
import unittest
from test import test_support
def gcd(a, b):
"""Greatest common divisor using Euclid's algorithm."""
while a:
a, b = b%a, a
return b
def isint(x):
"""Test whether an object is an instance of int or long."""
return isinstance(x, int) or isinstance(x, long)
def isnum(x):
"""Test whether an object is an instance of a built-in numeric type."""
for T in int, long, float, complex:
if isinstance(x, T):
return 1
return 0
def isRat(x):
"""Test wheter an object is an instance of the Rat class."""
return isinstance(x, Rat)
class Rat(object):
"""Rational number implemented as a normalized pair of longs."""
__slots__ = ['_Rat__num', '_Rat__den']
def __init__(self, num=0L, den=1L):
"""Constructor: Rat([num[, den]]).
The arguments must be ints or longs, and default to (0, 1)."""
if not isint(num):
raise TypeError, "Rat numerator must be int or long (%r)" % num
if not isint(den):
raise TypeError, "Rat denominator must be int or long (%r)" % den
# But the zero is always on
if den == 0:
raise ZeroDivisionError, "zero denominator"
g = gcd(den, num)
self.__num = long(num//g)
self.__den = long(den//g)
def _get_num(self):
"""Accessor function for read-only 'num' attribute of Rat."""
return self.__num
num = property(_get_num, None)
def _get_den(self):
"""Accessor function for read-only 'den' attribute of Rat."""
return self.__den
den = property(_get_den, None)
def __repr__(self):
"""Convert a Rat to an string resembling a Rat constructor call."""
return "Rat(%d, %d)" % (self.__num, self.__den)
def __str__(self):
"""Convert a Rat to a string resembling a decimal numeric value."""
return str(float(self))
def __float__(self):
"""Convert a Rat to a float."""
return self.__num*1.0/self.__den
def __int__(self):
"""Convert a Rat to an int; self.den must be 1."""
if self.__den == 1:
try:
return int(self.__num)
except OverflowError:
raise OverflowError, ("%s too large to convert to int" %
repr(self))
raise ValueError, "can't convert %s to int" % repr(self)
def __long__(self):
"""Convert a Rat to an long; self.den must be 1."""
if self.__den == 1:
return long(self.__num)
raise ValueError, "can't convert %s to long" % repr(self)
def __add__(self, other):
"""Add two Rats, or a Rat and a number."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(self.__num*other.__den + other.__num*self.__den,
self.__den*other.__den)
if isnum(other):
return float(self) + other
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two Rats, or a Rat and a number."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(self.__num*other.__den - other.__num*self.__den,
self.__den*other.__den)
if isnum(other):
return float(self) - other
return NotImplemented
def __rsub__(self, other):
"""Subtract two Rats, or a Rat and a number (reversed args)."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(other.__num*self.__den - self.__num*other.__den,
self.__den*other.__den)
if isnum(other):
return other - float(self)
return NotImplemented
def __mul__(self, other):
"""Multiply two Rats, or a Rat and a number."""
if isRat(other):
return Rat(self.__num*other.__num, self.__den*other.__den)
if isint(other):
return Rat(self.__num*other, self.__den)
if isnum(other):
return float(self)*other
return NotImplemented
__rmul__ = __mul__
def __truediv__(self, other):
"""Divide two Rats, or a Rat and a number."""
if isRat(other):
return Rat(self.__num*other.__den, self.__den*other.__num)
if isint(other):
return Rat(self.__num, self.__den*other)
if isnum(other):
return float(self) / other
return NotImplemented
__div__ = __truediv__
def __rtruediv__(self, other):
"""Divide two Rats, or a Rat and a number (reversed args)."""
if isRat(other):
return Rat(other.__num*self.__den, other.__den*self.__num)
if isint(other):
return Rat(other*self.__den, self.__num)
if isnum(other):
return other / float(self)
return NotImplemented
__rdiv__ = __rtruediv__
def __floordiv__(self, other):
"""Divide two Rats, returning the floored result."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
x = self/other
return x.__num // x.__den
def __rfloordiv__(self, other):
"""Divide two Rats, returning the floored result (reversed args)."""
x = other/self
return x.__num // x.__den
def __divmod__(self, other):
"""Divide two Rats, returning quotient and remainder."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
x = self//other
return (x, self - other * x)
def __rdivmod__(self, other):
"""Divide two Rats, returning quotient and remainder (reversed args)."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
return divmod(other, self)
def __mod__(self, other):
"""Take one Rat modulo another."""
return divmod(self, other)[1]
def __rmod__(self, other):
"""Take one Rat modulo another (reversed args)."""
return divmod(other, self)[1]
def __eq__(self, other):
"""Compare two Rats for equality."""
if isint(other):
return self.__den == 1 and self.__num == other
if isRat(other):
return self.__num == other.__num and self.__den == other.__den
if isnum(other):
return float(self) == other
return NotImplemented
def __ne__(self, other):
"""Compare two Rats for inequality."""
return not self == other
class RatTestCase(unittest.TestCase):
"""Unit tests for Rat class and its support utilities."""
def test_gcd(self):
self.assertEqual(gcd(10, 12), 2)
self.assertEqual(gcd(10, 15), 5)
self.assertEqual(gcd(10, 11), 1)
self.assertEqual(gcd(100, 15), 5)
self.assertEqual(gcd(-10, 2), -2)
self.assertEqual(gcd(10, -2), 2)
self.assertEqual(gcd(-10, -2), -2)
for i in range(1, 20):
for j in range(1, 20):
self.assert_(gcd(i, j) > 0)
self.assert_(gcd(-i, j) < 0)
self.assert_(gcd(i, -j) > 0)
self.assert_(gcd(-i, -j) < 0)
def test_constructor(self):
a = Rat(10, 15)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(10L, 15L)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(10, -15)
self.assertEqual(a.num, -2)
self.assertEqual(a.den, 3)
a = Rat(-10, 15)
self.assertEqual(a.num, -2)
self.assertEqual(a.den, 3)
a = Rat(-10, -15)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(7)
self.assertEqual(a.num, 7)
self.assertEqual(a.den, 1)
try:
a = Rat(1, 0)
except ZeroDivisionError:
pass
else:
self.fail("Rat(1, 0) didn't raise ZeroDivisionError")
for bad in "0", 0.0, 0j, (), [], {}, None, Rat, unittest:
try:
a = Rat(bad)
except TypeError:
pass
else:
self.fail("Rat(%r) didn't raise TypeError" % bad)
try:
a = Rat(1, bad)
except TypeError:
pass
else:
self.fail("Rat(1, %r) didn't raise TypeError" % bad)
def test_add(self):
self.assertEqual(Rat(2, 3) + Rat(1, 3), 1)
self.assertEqual(Rat(2, 3) + 1, Rat(5, 3))
self.assertEqual(1 + Rat(2, 3), Rat(5, 3))
self.assertEqual(1.0 + Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) + 1.0, 1.5)
def test_sub(self):
self.assertEqual(Rat(7, 2) - Rat(7, 5), Rat(21, 10))
self.assertEqual(Rat(7, 5) - 1, Rat(2, 5))
self.assertEqual(1 - Rat(3, 5), Rat(2, 5))
self.assertEqual(Rat(3, 2) - 1.0, 0.5)
self.assertEqual(1.0 - Rat(1, 2), 0.5)
def test_mul(self):
self.assertEqual(Rat(2, 3) * Rat(5, 7), Rat(10, 21))
self.assertEqual(Rat(10, 3) * 3, 10)
self.assertEqual(3 * Rat(10, 3), 10)
self.assertEqual(Rat(10, 5) * 0.5, 1.0)
self.assertEqual(0.5 * Rat(10, 5), 1.0)
def test_div(self):
self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
self.assertEqual(2 / Rat(5), Rat(2, 5))
self.assertEqual(3.0 * Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) * 3.0, 1.5)
def test_floordiv(self):
self.assertEqual(Rat(10) // Rat(4), 2)
self.assertEqual(Rat(10, 3) // Rat(4, 3), 2)
self.assertEqual(Rat(10) // 4, 2)
self.assertEqual(10 // Rat(4), 2)
def test_eq(self):
self.assertEqual(Rat(10), Rat(20, 2))
self.assertEqual(Rat(10), 10)
self.assertEqual(10, Rat(10))
self.assertEqual(Rat(10), 10.0)
self.assertEqual(10.0, Rat(10))
def test_future_div(self):
exec future_test
# XXX Ran out of steam; TO DO: divmod, div, future division
future_test = """
from __future__ import division
self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
self.assertEqual(2 / Rat(5), Rat(2, 5))
self.assertEqual(3.0 * Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) * 3.0, 1.5)
self.assertEqual(eval('1/2'), 0.5)
"""
def test_main():
test_support.run_unittest(RatTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 | -7,141,920,711,998,236,000 | -4,133,313,226,704,015,000 | 31.570122 | 80 | 0.52972 | false |
UdK-VPT/Open_eQuarter | mole3x/extensions/eval3/oeq_SQTP_Window.py | 2 | 1393 | # -*- coding: utf-8 -*-
import os,math
from qgis.core import NULL
from mole3 import oeq_global
from mole3.project import config
from mole3.extensions import OeQExtension
from mole3.stat_corr import contemporary_base_uvalue_by_building_age_lookup
def calculation(self=None, parameters={},feature = None):
from scipy.constants import golden
from math import floor, ceil
from qgis.PyQt.QtCore import QVariant
wn_sqtp = NULL
if not oeq_global.isnull([parameters['WN_UP'],parameters['HHRS']]):
wn_sqtp= float(parameters['WN_UP'])*float(parameters['HHRS'])/1000
return {'WN_SQTP': {'type': QVariant.Double, 'value': wn_sqtp}}
extension = OeQExtension(
extension_id=__name__,
category='Evaluation',
subcategory='Present Spec. Transm. Heat Loss',
extension_name='Window SpecTransm (SQT, Present)',
layer_name= 'SQT Window Present',
extension_filepath=os.path.join(__file__),
colortable = os.path.join(os.path.splitext(__file__)[0] + '.qml'),
field_id='WN_SQTP',
source_type='none',
par_in=['WN_UP','HHRS'],
sourcelayer_name=config.data_layer_name,
targetlayer_name=config.data_layer_name,
active=True,
show_results=['WN_SQTP'],
description="Calculate the contemporary Transmission Heat Loss of the Building's Windows per m2",
evaluation_method=calculation)
extension.registerExtension(default=True)
| gpl-2.0 | -8,361,876,539,971,601,000 | 3,816,732,064,923,491,000 | 34.717949 | 101 | 0.7028 | false |
rismalrv/edx-platform | common/djangoapps/student/migrations/0040_auto__del_field_usersignupsource_user_id__add_field_usersignupsource_u.py | 114 | 12942 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'UserSignupSource.user_id'
db.delete_column('student_usersignupsource', 'user_id_id')
# Adding field 'UserSignupSource.user'
db.add_column('student_usersignupsource', 'user',
self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['auth.User']),
keep_default=False)
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'UserSignupSource.user_id'
raise RuntimeError("Cannot reverse this migration. 'UserSignupSource.user_id' and its values cannot be restored.")
# Deleting field 'UserSignupSource.user'
db.delete_column('student_usersignupsource', 'user_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseaccessrole': {
'Meta': {'unique_together': "(('user', 'org', 'course_id', 'role'),)", 'object_name': 'CourseAccessRole'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'org': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.loginfailures': {
'Meta': {'object_name': 'LoginFailures'},
'failure_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lockout_until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.passwordhistory': {
'Meta': {'object_name': 'PasswordHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_set': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usersignupsource': {
'Meta': {'object_name': 'UserSignupSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 | 2,776,787,818,509,968,400 | -7,136,278,514,772,362,000 | 74.244186 | 182 | 0.549606 | false |
bspink/django | django/contrib/redirects/middleware.py | 383 | 1810 | from __future__ import unicode_literals
from django import http
from django.apps import apps
from django.conf import settings
from django.contrib.redirects.models import Redirect
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured
class RedirectFallbackMiddleware(object):
# Defined as class-level attributes to be subclassing-friendly.
response_gone_class = http.HttpResponseGone
response_redirect_class = http.HttpResponsePermanentRedirect
def __init__(self):
if not apps.is_installed('django.contrib.sites'):
raise ImproperlyConfigured(
"You cannot use RedirectFallbackMiddleware when "
"django.contrib.sites is not installed."
)
def process_response(self, request, response):
# No need to check for a redirect for non-404 responses.
if response.status_code != 404:
return response
full_path = request.get_full_path()
current_site = get_current_site(request)
r = None
try:
r = Redirect.objects.get(site=current_site, old_path=full_path)
except Redirect.DoesNotExist:
pass
if r is None and settings.APPEND_SLASH and not request.path.endswith('/'):
try:
r = Redirect.objects.get(
site=current_site,
old_path=request.get_full_path(force_append_slash=True),
)
except Redirect.DoesNotExist:
pass
if r is not None:
if r.new_path == '':
return self.response_gone_class()
return self.response_redirect_class(r.new_path)
# No redirect was found. Return the response.
return response
| bsd-3-clause | 7,919,025,418,061,302,000 | -4,789,742,566,280,044,000 | 34.490196 | 82 | 0.632597 | false |
ycaihua/kbengine | kbe/src/lib/python/Modules/_ctypes/libffi/generate-darwin-source-and-headers.py | 87 | 6613 | #!/usr/bin/env python
import subprocess
import os
import errno
import collections
import glob
import argparse
class Platform(object):
pass
class simulator_platform(Platform):
directory = 'darwin_ios'
sdk = 'iphonesimulator'
arch = 'i386'
triple = 'i386-apple-darwin11'
version_min = '-miphoneos-version-min=5.1.1'
prefix = "#ifdef __i386__\n\n"
suffix = "\n\n#endif"
src_dir = 'x86'
src_files = ['darwin.S', 'win32.S', 'ffi.c']
class simulator64_platform(Platform):
directory = 'darwin_ios'
sdk = 'iphonesimulator'
arch = 'x86_64'
triple = 'x86_64-apple-darwin13'
version_min = '-miphoneos-version-min=7.0'
prefix = "#ifdef __x86_64__\n\n"
suffix = "\n\n#endif"
src_dir = 'x86'
src_files = ['darwin64.S', 'ffi64.c']
class device_platform(Platform):
directory = 'darwin_ios'
sdk = 'iphoneos'
arch = 'armv7'
triple = 'arm-apple-darwin11'
version_min = '-miphoneos-version-min=5.1.1'
prefix = "#ifdef __arm__\n\n"
suffix = "\n\n#endif"
src_dir = 'arm'
src_files = ['sysv.S', 'trampoline.S', 'ffi.c']
class device64_platform(Platform):
directory = 'darwin_ios'
sdk = 'iphoneos'
arch = 'arm64'
triple = 'aarch64-apple-darwin13'
version_min = '-miphoneos-version-min=7.0'
prefix = "#ifdef __arm64__\n\n"
suffix = "\n\n#endif"
src_dir = 'aarch64'
src_files = ['sysv.S', 'ffi.c']
class desktop32_platform(Platform):
directory = 'darwin_osx'
sdk = 'macosx'
arch = 'i386'
triple = 'i386-apple-darwin10'
version_min = '-mmacosx-version-min=10.6'
src_dir = 'x86'
src_files = ['darwin.S', 'win32.S', 'ffi.c']
prefix = "#ifdef __i386__\n\n"
suffix = "\n\n#endif"
class desktop64_platform(Platform):
directory = 'darwin_osx'
sdk = 'macosx'
arch = 'x86_64'
triple = 'x86_64-apple-darwin10'
version_min = '-mmacosx-version-min=10.6'
prefix = "#ifdef __x86_64__\n\n"
suffix = "\n\n#endif"
src_dir = 'x86'
src_files = ['darwin64.S', 'ffi64.c']
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else:
raise
def move_file(src_dir, dst_dir, filename, file_suffix=None, prefix='', suffix=''):
mkdir_p(dst_dir)
out_filename = filename
if file_suffix:
split_name = os.path.splitext(filename)
out_filename = "%s_%s%s" % (split_name[0], file_suffix, split_name[1])
with open(os.path.join(src_dir, filename)) as in_file:
with open(os.path.join(dst_dir, out_filename), 'w') as out_file:
if prefix:
out_file.write(prefix)
out_file.write(in_file.read())
if suffix:
out_file.write(suffix)
def list_files(src_dir, pattern=None, filelist=None):
if pattern: filelist = glob.iglob(os.path.join(src_dir, pattern))
for file in filelist:
yield os.path.basename(file)
def copy_files(src_dir, dst_dir, pattern=None, filelist=None, file_suffix=None, prefix=None, suffix=None):
for filename in list_files(src_dir, pattern=pattern, filelist=filelist):
move_file(src_dir, dst_dir, filename, file_suffix=file_suffix, prefix=prefix, suffix=suffix)
def copy_src_platform_files(platform):
src_dir = os.path.join('src', platform.src_dir)
dst_dir = os.path.join(platform.directory, 'src', platform.src_dir)
copy_files(src_dir, dst_dir, filelist=platform.src_files, file_suffix=platform.arch, prefix=platform.prefix, suffix=platform.suffix)
def build_target(platform, platform_headers):
def xcrun_cmd(cmd):
return 'xcrun -sdk %s %s -arch %s' % (platform.sdk, cmd, platform.arch)
tag='%s-%s' % (platform.sdk, platform.arch)
build_dir = 'build_%s' % tag
mkdir_p(build_dir)
env = dict(CC=xcrun_cmd('clang'),
LD=xcrun_cmd('ld'),
CFLAGS='%s' % (platform.version_min))
working_dir = os.getcwd()
try:
os.chdir(build_dir)
subprocess.check_call(['../configure', '-host', platform.triple], env=env)
finally:
os.chdir(working_dir)
for src_dir in [build_dir, os.path.join(build_dir, 'include')]:
copy_files(src_dir,
os.path.join(platform.directory, 'include'),
pattern='*.h',
file_suffix=platform.arch,
prefix=platform.prefix,
suffix=platform.suffix)
for filename in list_files(src_dir, pattern='*.h'):
platform_headers[filename].add((platform.prefix, platform.arch, platform.suffix))
def make_tramp():
with open('src/arm/trampoline.S', 'w') as tramp_out:
p = subprocess.Popen(['bash', 'src/arm/gentramp.sh'], stdout=tramp_out)
p.wait()
def generate_source_and_headers(generate_osx=True, generate_ios=True):
copy_files('src', 'darwin_common/src', pattern='*.c')
copy_files('include', 'darwin_common/include', pattern='*.h')
if generate_ios:
make_tramp()
copy_src_platform_files(simulator_platform)
copy_src_platform_files(simulator64_platform)
copy_src_platform_files(device_platform)
copy_src_platform_files(device64_platform)
if generate_osx:
copy_src_platform_files(desktop32_platform)
copy_src_platform_files(desktop64_platform)
platform_headers = collections.defaultdict(set)
if generate_ios:
build_target(simulator_platform, platform_headers)
build_target(simulator64_platform, platform_headers)
build_target(device_platform, platform_headers)
build_target(device64_platform, platform_headers)
if generate_osx:
build_target(desktop32_platform, platform_headers)
build_target(desktop64_platform, platform_headers)
mkdir_p('darwin_common/include')
for header_name, tag_tuples in platform_headers.iteritems():
basename, suffix = os.path.splitext(header_name)
with open(os.path.join('darwin_common/include', header_name), 'w') as header:
for tag_tuple in tag_tuples:
header.write('%s#include <%s_%s%s>\n%s\n' % (tag_tuple[0], basename, tag_tuple[1], suffix, tag_tuple[2]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--only-ios', action='store_true', default=False)
parser.add_argument('--only-osx', action='store_true', default=False)
args = parser.parse_args()
generate_source_and_headers(generate_osx=not args.only_ios, generate_ios=not args.only_osx)
| lgpl-3.0 | 7,376,732,570,238,732,000 | 583,277,881,042,090,000 | 30.641148 | 136 | 0.622864 | false |
WillianPaiva/ycmd | cpp/ycm/tests/gmock/gtest/test/gtest_shuffle_test.py | 3023 | 12549 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-3.0 | 4,381,925,805,291,872,000 | -1,937,334,017,489,161,700 | 37.612308 | 79 | 0.674396 | false |
paulmadore/Eric-IDE | 6-6.0.9/eric/Preferences/ConfigurationPages/CooperationPage.py | 2 | 4178 | # -*- coding: utf-8 -*-
# Copyright (c) 2010 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing the Cooperation configuration page.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSlot, QRegExp
from PyQt5.QtGui import QRegExpValidator, QValidator
from .ConfigurationPageBase import ConfigurationPageBase
from .Ui_CooperationPage import Ui_CooperationPage
import Preferences
class CooperationPage(ConfigurationPageBase, Ui_CooperationPage):
"""
Class implementing the Cooperation configuration page.
"""
def __init__(self):
"""
Constructor
"""
super(CooperationPage, self).__init__()
self.setupUi(self)
self.setObjectName("CooperationPage")
self.__bannedUserValidator = QRegExpValidator(
QRegExp("[a-zA-Z0-9.-]+@"
"(?:(?:2(?:[0-4][0-9]|5[0-5])|[01]?[0-9]{1,2})\.){3}"
"(?:2(?:[0-4][0-9]|5[0-5])|[01]?[0-9]{1,2})"),
self.bannedUserEdit)
self.bannedUserEdit.setValidator(self.__bannedUserValidator)
# set initial values
self.autostartCheckBox.setChecked(
Preferences.getCooperation("AutoStartServer"))
self.otherPortsCheckBox.setChecked(
Preferences.getCooperation("TryOtherPorts"))
self.serverPortSpin.setValue(
Preferences.getCooperation("ServerPort"))
self.portToTrySpin.setValue(
Preferences.getCooperation("MaxPortsToTry"))
self.autoAcceptCheckBox.setChecked(
Preferences.getCooperation("AutoAcceptConnections"))
self.bannedUsersList.addItems(sorted(
Preferences.getCooperation("BannedUsers")))
def save(self):
"""
Public slot to save the Cooperation configuration.
"""
Preferences.setCooperation(
"AutoStartServer",
self.autostartCheckBox.isChecked())
Preferences.setCooperation(
"TryOtherPorts",
self.otherPortsCheckBox.isChecked())
Preferences.setCooperation(
"AutoAcceptConnections",
self.autoAcceptCheckBox.isChecked())
Preferences.setCooperation(
"ServerPort",
self.serverPortSpin.value())
Preferences.setCooperation(
"MaxPortsToTry",
self.portToTrySpin.value())
bannedUsers = []
for row in range(self.bannedUsersList.count()):
bannedUsers.append(self.bannedUsersList.item(row).text())
Preferences.setCooperation("BannedUsers", bannedUsers)
@pyqtSlot()
def on_bannedUsersList_itemSelectionChanged(self):
"""
Private slot to react on changes of selected banned users.
"""
self.deleteBannedUsersButton.setEnabled(
len(self.bannedUsersList.selectedItems()) > 0)
@pyqtSlot(str)
def on_bannedUserEdit_textChanged(self, txt):
"""
Private slot to handle the user entering a banned user.
@param txt text entered by the user (string)
"""
self.addBannedUserButton.setEnabled(
self.__bannedUserValidator.validate(txt, len(txt))[0] ==
QValidator.Acceptable)
@pyqtSlot()
def on_deleteBannedUsersButton_clicked(self):
"""
Private slot to remove the selected users from the list of
banned users.
"""
for itm in self.bannedUsersList.selectedItems():
row = self.bannedUsersList.row(itm)
itm = self.bannedUsersList.takeItem(row)
del itm
@pyqtSlot()
def on_addBannedUserButton_clicked(self):
"""
Private slot to add a user to the list of banned users.
"""
self.bannedUsersList.addItem(self.bannedUserEdit.text())
self.bannedUserEdit.clear()
def create(dlg):
"""
Module function to create the configuration page.
@param dlg reference to the configuration dialog
@return reference to the instantiated page (ConfigurationPageBase)
"""
page = CooperationPage()
return page
| gpl-3.0 | -3,359,914,705,360,581,600 | 5,271,579,399,648,206,000 | 31.897638 | 73 | 0.621589 | false |
roopali8/tempest | tempest/api/identity/admin/v3/test_projects.py | 1 | 7635 | # Copyright 2013 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import test
from tempest.common import credentials
class ProjectsTestJSON(base.BaseIdentityV3AdminTest):
@test.idempotent_id('0ecf465c-0dc4-4532-ab53-91ffeb74d12d')
def test_project_create_with_description(self):
# Create project with a description
project_name = data_utils.rand_name('project')
project_desc = data_utils.rand_name('desc')
project = self.client.create_project(
project_name, description=project_desc)['project']
self.data.projects.append(project)
project_id = project['id']
desc1 = project['description']
self.assertEqual(desc1, project_desc, 'Description should have '
'been sent in response for create')
body = self.client.get_project(project_id)['project']
desc2 = body['description']
self.assertEqual(desc2, project_desc, 'Description does not appear'
'to be set')
@test.idempotent_id('5f50fe07-8166-430b-a882-3b2ee0abe26f')
def test_project_create_with_domain(self):
# Create project with a domain
self.data.setup_test_domain()
project_name = data_utils.rand_name('project')
project = self.client.create_project(
project_name, domain_id=self.data.domain['id'])['project']
self.data.projects.append(project)
project_id = project['id']
self.assertEqual(project_name, project['name'])
self.assertEqual(self.data.domain['id'], project['domain_id'])
body = self.client.get_project(project_id)['project']
self.assertEqual(project_name, body['name'])
self.assertEqual(self.data.domain['id'], body['domain_id'])
@test.idempotent_id('1f66dc76-50cc-4741-a200-af984509e480')
def test_project_create_enabled(self):
# Create a project that is enabled
project_name = data_utils.rand_name('project')
project = self.client.create_project(
project_name, enabled=True)['project']
self.data.projects.append(project)
project_id = project['id']
en1 = project['enabled']
self.assertTrue(en1, 'Enable should be True in response')
body = self.client.get_project(project_id)['project']
en2 = body['enabled']
self.assertTrue(en2, 'Enable should be True in lookup')
@test.idempotent_id('78f96a9c-e0e0-4ee6-a3ba-fbf6dfd03207')
def test_project_create_not_enabled(self):
# Create a project that is not enabled
project_name = data_utils.rand_name('project')
project = self.client.create_project(
project_name, enabled=False)['project']
self.data.projects.append(project)
en1 = project['enabled']
self.assertEqual('false', str(en1).lower(),
'Enable should be False in response')
body = self.client.get_project(project['id'])['project']
en2 = body['enabled']
self.assertEqual('false', str(en2).lower(),
'Enable should be False in lookup')
@test.idempotent_id('f608f368-048c-496b-ad63-d286c26dab6b')
def test_project_update_name(self):
# Update name attribute of a project
p_name1 = data_utils.rand_name('project')
project = self.client.create_project(p_name1)['project']
self.data.projects.append(project)
resp1_name = project['name']
p_name2 = data_utils.rand_name('project2')
body = self.client.update_project(project['id'],
name=p_name2)['project']
resp2_name = body['name']
self.assertNotEqual(resp1_name, resp2_name)
body = self.client.get_project(project['id'])['project']
resp3_name = body['name']
self.assertNotEqual(resp1_name, resp3_name)
self.assertEqual(p_name1, resp1_name)
self.assertEqual(resp2_name, resp3_name)
@test.idempotent_id('f138b715-255e-4a7d-871d-351e1ef2e153')
def test_project_update_desc(self):
# Update description attribute of a project
p_name = data_utils.rand_name('project')
p_desc = data_utils.rand_name('desc')
project = self.client.create_project(
p_name, description=p_desc)['project']
self.data.projects.append(project)
resp1_desc = project['description']
p_desc2 = data_utils.rand_name('desc2')
body = self.client.update_project(
project['id'], description=p_desc2)['project']
resp2_desc = body['description']
self.assertNotEqual(resp1_desc, resp2_desc)
body = self.client.get_project(project['id'])['project']
resp3_desc = body['description']
self.assertNotEqual(resp1_desc, resp3_desc)
self.assertEqual(p_desc, resp1_desc)
self.assertEqual(resp2_desc, resp3_desc)
@test.idempotent_id('b6b25683-c97f-474d-a595-55d410b68100')
def test_project_update_enable(self):
# Update the enabled attribute of a project
p_name = data_utils.rand_name('project')
p_en = False
project = self.client.create_project(p_name, enabled=p_en)['project']
self.data.projects.append(project)
resp1_en = project['enabled']
p_en2 = True
body = self.client.update_project(
project['id'], enabled=p_en2)['project']
resp2_en = body['enabled']
self.assertNotEqual(resp1_en, resp2_en)
body = self.client.get_project(project['id'])['project']
resp3_en = body['enabled']
self.assertNotEqual(resp1_en, resp3_en)
self.assertEqual('false', str(resp1_en).lower())
self.assertEqual(resp2_en, resp3_en)
@test.idempotent_id('59398d4a-5dc5-4f86-9a4c-c26cc804d6c6')
def test_associate_user_to_project(self):
# Associate a user to a project
# Create a Project
p_name = data_utils.rand_name('project')
project = self.client.create_project(p_name)['project']
self.data.projects.append(project)
# Create a User
u_name = data_utils.rand_name('user')
u_desc = u_name + 'description'
u_email = u_name + '@testmail.tm'
u_password = credentials.get_policy_password()
user = self.client.create_user(
u_name, description=u_desc, password=u_password,
email=u_email, project_id=project['id'])['user']
# Delete the User at the end of this method
self.addCleanup(self.client.delete_user, user['id'])
# Get User To validate the user details
new_user_get = self.client.get_user(user['id'])['user']
# Assert response body of GET
self.assertEqual(u_name, new_user_get['name'])
self.assertEqual(u_desc, new_user_get['description'])
self.assertEqual(project['id'],
new_user_get['project_id'])
self.assertEqual(u_email, new_user_get['email'])
| apache-2.0 | -1,347,232,809,274,979,000 | 5,249,860,111,065,586,000 | 41.653631 | 78 | 0.634578 | false |
nyalldawson/QGIS | tests/src/python/test_qgsdatabaseschemamodel.py | 32 | 10288 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsDatabaseSchemaModel
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '07/03/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.core import (
QgsDatabaseSchemaModel,
QgsProviderRegistry,
)
from qgis.PyQt.QtCore import (
QCoreApplication,
QModelIndex,
Qt
)
from qgis.testing import unittest, start_app
class TestPyQgsDatabaseSchemaModel(unittest.TestCase):
# Provider test cases must define the string URI for the test
uri = ''
# Provider test cases must define the provider name (e.g. "postgres" or "ogr")
providerKey = 'postgres'
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain(cls.__name__)
QCoreApplication.setApplicationName(cls.__name__)
start_app()
cls.postgres_conn = "service='qgis_test'"
if 'QGIS_PGTEST_DB' in os.environ:
cls.postgres_conn = os.environ['QGIS_PGTEST_DB']
cls.uri = cls.postgres_conn + ' sslmode=disable'
def testModel(self):
conn = QgsProviderRegistry.instance().providerMetadata('postgres').createConnection(self.uri, {})
self.assertTrue(conn)
model = QgsDatabaseSchemaModel(conn)
self.assertGreaterEqual(model.rowCount(), 3)
old_count = model.rowCount()
self.assertEqual(model.columnCount(), 1)
schemas = [model.data(model.index(r, 0, QModelIndex()), Qt.DisplayRole) for r in range(model.rowCount())]
self.assertIn('public', schemas)
self.assertIn('CamelCaseSchema', schemas)
self.assertIn('qgis_test', schemas)
self.assertEqual(model.data(model.index(schemas.index('qgis_test'), 0, QModelIndex()), Qt.ToolTipRole), 'qgis_test')
self.assertIsNone(model.data(model.index(model.rowCount(), 0, QModelIndex()), Qt.DisplayRole))
model.refresh()
self.assertEqual(model.rowCount(), old_count)
conn.createSchema('myNewSchema')
self.assertEqual(model.rowCount(), old_count)
model.refresh()
self.assertEqual(model.rowCount(), old_count + 1)
schemas = [model.data(model.index(r, 0, QModelIndex()), Qt.DisplayRole) for r in range(model.rowCount())]
self.assertIn('public', schemas)
self.assertIn('CamelCaseSchema', schemas)
self.assertIn('qgis_test', schemas)
self.assertIn('myNewSchema', schemas)
conn.createSchema('myNewSchema2')
conn.createSchema('myNewSchema3')
model.refresh()
self.assertEqual(model.rowCount(), old_count + 3)
schemas = [model.data(model.index(r, 0, QModelIndex()), Qt.DisplayRole) for r in range(model.rowCount())]
self.assertIn('public', schemas)
self.assertIn('CamelCaseSchema', schemas)
self.assertIn('qgis_test', schemas)
self.assertIn('myNewSchema', schemas)
self.assertIn('myNewSchema2', schemas)
self.assertIn('myNewSchema3', schemas)
conn.createSchema('myNewSchema4')
conn.dropSchema('myNewSchema2')
conn.dropSchema('myNewSchema')
model.refresh()
self.assertEqual(model.rowCount(), old_count + 2)
schemas = [model.data(model.index(r, 0, QModelIndex()), Qt.DisplayRole) for r in range(model.rowCount())]
self.assertIn('public', schemas)
self.assertIn('CamelCaseSchema', schemas)
self.assertIn('qgis_test', schemas)
self.assertNotIn('myNewSchema', schemas)
self.assertNotIn('myNewSchema2', schemas)
self.assertIn('myNewSchema3', schemas)
self.assertIn('myNewSchema4', schemas)
conn.dropSchema('myNewSchema3')
conn.dropSchema('myNewSchema4')
model.refresh()
self.assertEqual(model.rowCount(), old_count)
schemas = [model.data(model.index(r, 0, QModelIndex()), Qt.DisplayRole) for r in range(model.rowCount())]
self.assertIn('public', schemas)
self.assertIn('CamelCaseSchema', schemas)
self.assertIn('qgis_test', schemas)
self.assertNotIn('myNewSchema3', schemas)
self.assertNotIn('myNewSchema4', schemas)
def test_model_allow_empty(self):
"""Test model with empty entry"""
conn = QgsProviderRegistry.instance().providerMetadata('postgres').createConnection(self.uri, {})
self.assertTrue(conn)
model = QgsDatabaseSchemaModel(conn)
self.assertGreaterEqual(model.rowCount(), 3)
old_count = model.rowCount()
model.setAllowEmptySchema(True)
self.assertEqual(model.rowCount(), old_count + 1)
schemas = [model.data(model.index(r, 0, QModelIndex()), Qt.DisplayRole) for r in range(model.rowCount())]
self.assertIn('public', schemas)
self.assertIn('CamelCaseSchema', schemas)
self.assertIn('qgis_test', schemas)
self.assertFalse(model.data(model.index(0, 0, QModelIndex()), Qt.DisplayRole))
self.assertTrue(model.data(model.index(0, 0, QModelIndex()), QgsDatabaseSchemaModel.RoleEmpty))
self.assertFalse(model.data(model.index(schemas.index('qgis_test'), 0, QModelIndex()), QgsDatabaseSchemaModel.RoleEmpty))
self.assertIsNone(model.data(model.index(model.rowCount(), 0, QModelIndex()), Qt.DisplayRole))
model.refresh()
self.assertEqual(model.rowCount(), old_count + 1)
conn.createSchema('myNewSchema')
self.assertEqual(model.rowCount(), old_count + 1)
model.refresh()
self.assertEqual(model.rowCount(), old_count + 2)
schemas = [model.data(model.index(r, 0, QModelIndex()), Qt.DisplayRole) for r in range(model.rowCount())]
self.assertIn('public', schemas)
self.assertIn('CamelCaseSchema', schemas)
self.assertIn('qgis_test', schemas)
self.assertIn('myNewSchema', schemas)
self.assertFalse(model.data(model.index(0, 0, QModelIndex()), Qt.DisplayRole))
self.assertTrue(model.data(model.index(0, 0, QModelIndex()), QgsDatabaseSchemaModel.RoleEmpty))
self.assertFalse(model.data(model.index(schemas.index('qgis_test'), 0, QModelIndex()), QgsDatabaseSchemaModel.RoleEmpty))
model.setAllowEmptySchema(False)
self.assertEqual(model.rowCount(), old_count + 1)
self.assertTrue(model.data(model.index(0, 0, QModelIndex()), Qt.DisplayRole))
self.assertFalse(model.data(model.index(0, 0, QModelIndex()), QgsDatabaseSchemaModel.RoleEmpty))
model.setAllowEmptySchema(True)
self.assertEqual(model.rowCount(), old_count + 2)
self.assertFalse(model.data(model.index(0, 0, QModelIndex()), Qt.DisplayRole))
self.assertTrue(model.data(model.index(0, 0, QModelIndex()), QgsDatabaseSchemaModel.RoleEmpty))
self.assertFalse(model.data(model.index(schemas.index('qgis_test'), 0, QModelIndex()), QgsDatabaseSchemaModel.RoleEmpty))
conn.createSchema('myNewSchema2')
conn.createSchema('myNewSchema3')
model.refresh()
self.assertEqual(model.rowCount(), old_count + 4)
schemas = [model.data(model.index(r, 0, QModelIndex()), Qt.DisplayRole) for r in range(model.rowCount())]
self.assertIn('public', schemas)
self.assertIn('CamelCaseSchema', schemas)
self.assertIn('qgis_test', schemas)
self.assertIn('myNewSchema', schemas)
self.assertIn('myNewSchema2', schemas)
self.assertIn('myNewSchema3', schemas)
self.assertFalse(model.data(model.index(0, 0, QModelIndex()), Qt.DisplayRole))
self.assertTrue(model.data(model.index(0, 0, QModelIndex()), QgsDatabaseSchemaModel.RoleEmpty))
self.assertFalse(model.data(model.index(schemas.index('qgis_test'), 0, QModelIndex()), QgsDatabaseSchemaModel.RoleEmpty))
conn.createSchema('myNewSchema4')
conn.dropSchema('myNewSchema2')
conn.dropSchema('myNewSchema')
model.refresh()
self.assertEqual(model.rowCount(), old_count + 3)
schemas = [model.data(model.index(r, 0, QModelIndex()), Qt.DisplayRole) for r in range(model.rowCount())]
self.assertIn('public', schemas)
self.assertIn('CamelCaseSchema', schemas)
self.assertIn('qgis_test', schemas)
self.assertNotIn('myNewSchema', schemas)
self.assertNotIn('myNewSchema2', schemas)
self.assertIn('myNewSchema3', schemas)
self.assertIn('myNewSchema4', schemas)
self.assertFalse(model.data(model.index(0, 0, QModelIndex()), Qt.DisplayRole))
self.assertTrue(model.data(model.index(0, 0, QModelIndex()), QgsDatabaseSchemaModel.RoleEmpty))
self.assertFalse(model.data(model.index(schemas.index('qgis_test'), 0, QModelIndex()), QgsDatabaseSchemaModel.RoleEmpty))
conn.dropSchema('myNewSchema3')
conn.dropSchema('myNewSchema4')
model.refresh()
self.assertEqual(model.rowCount(), old_count + 1)
schemas = [model.data(model.index(r, 0, QModelIndex()), Qt.DisplayRole) for r in range(model.rowCount())]
self.assertIn('public', schemas)
self.assertIn('CamelCaseSchema', schemas)
self.assertIn('qgis_test', schemas)
self.assertNotIn('myNewSchema3', schemas)
self.assertNotIn('myNewSchema4', schemas)
self.assertFalse(model.data(model.index(0, 0, QModelIndex()), Qt.DisplayRole))
self.assertTrue(model.data(model.index(0, 0, QModelIndex()), QgsDatabaseSchemaModel.RoleEmpty))
self.assertFalse(model.data(model.index(schemas.index('qgis_test'), 0, QModelIndex()), QgsDatabaseSchemaModel.RoleEmpty))
model.setAllowEmptySchema(False)
self.assertEqual(model.rowCount(), old_count)
self.assertTrue(model.data(model.index(0, 0, QModelIndex()), Qt.DisplayRole))
self.assertFalse(model.data(model.index(0, 0, QModelIndex()), QgsDatabaseSchemaModel.RoleEmpty))
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -5,452,526,985,374,872,000 | -7,361,347,693,352,860,000 | 47.758294 | 129 | 0.672726 | false |
mueller-lab/PyFRAP | pyfrp/modules/pyfrp_optimization_module.py | 2 | 6867 | #=====================================================================================================================================
#Copyright
#=====================================================================================================================================
#Copyright (C) 2014 Alexander Blaessle, Patrick Mueller and the Friedrich Miescher Laboratory of the Max Planck Society
#This software is distributed under the terms of the GNU General Public License.
#This file is part of PyFRAP.
#PyFRAP is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#===========================================================================================================================================================================
#Module Description
#===========================================================================================================================================================================
"""Optimization module for PyFRAP toolbox.
Currently contains all functions necessary to transform a constrained FRAP optimization problem into
a unconstrained one, making it suitable to Nelder-Mead optimization algorithm.
"""
#===========================================================================================================================================================================
#Importing necessary modules
#===========================================================================================================================================================================
#Numpy/Scipy
import numpy as np
#PyFRAP
import pyfrp_fit_module
from pyfrp_term_module import *
#===========================================================================================================================================================================
#Module Functions
#===========================================================================================================================================================================
def constrObjFunc(x,fit,debug,ax,returnFit):
"""Objective function when using Constrained Nelder-Mead.
Calls :py:func:`pyfrp.modules.pyfrp_optimization_module.xTransform` to transform x into
constrained version, then uses :py:func:`pyfrp.modules.pyfrp_fit_module.FRAPObjFunc` to
find SSD.
Args:
x (list): Input vector, consiting of [D,(prod),(degr)].
fit (pyfrp.subclasses.pyfrp_fit): Fit object.
debug (bool): Display debugging output and plots.
ax (matplotlib.axes): Axes to display plots in.
returnFit (bool): Return fit instead of SSD.
Returns:
float: SSD of fit. Except ``returnFit==True``, then will return fit itself.
"""
LBs, UBs = buildBoundLists(fit)
x=xTransform(x,LBs,UBs)
ssd=pyfrp_fit_module.FRAPObjFunc(x,fit,debug,ax,returnFit)
return ssd
def xTransform(x,LB,UB):
"""Transforms ``x`` into constrained form, obeying upper
bounds ``UB`` and lower bounds ``LB``.
.. note:: Will add tiny offset to LB(D), to avoid singularities.
Idea taken from http://www.mathworks.com/matlabcentral/fileexchange/8277-fminsearchbnd--fminsearchcon
Args:
x (list): Input vector, consiting of [D,(prod),(degr)].
LB (list): List of lower bounds for ``D,prod,degr``.
UB (list): List of upper bounds for ``D,prod,degr``.
Returns:
list: Transformed x-values.
"""
#Make sure everything is float
x=np.asarray(x,dtype=np.float64)
LB=np.asarray(LB,dtype=np.float64)
UB=np.asarray(UB,dtype=np.float64)
#Check if LB_D==0, then add a little noise to it so we do not end up with xtrans[D]==0 and later have singularities when scaling tvec
if LB[0]==0:
LB[0]=1E-10
#Determine number of parameters to be fitted
nparams=len(x)
#Make empty vector
xtrans = np.zeros(np.shape(x))
# k allows some variables to be fixed, thus dropped from the
# optimization.
k=0
for i in range(nparams):
#Upper bound only
if UB[i]!=None and LB[i]==None:
xtrans[i]=UB[i]-x[k]**2
k=k+1
#Lower bound only
elif UB[i]==None and LB[i]!=None:
xtrans[i]=LB[i]+x[k]**2
k=k+1
#Both bounds
elif UB[i]!=None and LB[i]!=None:
xtrans[i] = (np.sin(x[k])+1.)/2.*(UB[i] - LB[i]) + LB[i]
xtrans[i] = max([LB[i],min([UB[i],xtrans[i]])])
k=k+1
#No bounds
elif UB[i]==None and LB[i]==None:
xtrans[i] = x[k]
k=k+1
#Note: The original file has here another case for fixed variable, but since we made the decision earlier which when we call frap_fitting, we don't need this here.
return xtrans
def transformX0(x0,LB,UB):
"""Transforms ``x0`` into constrained form, obeying upper
bounds ``UB`` and lower bounds ``LB``.
Idea taken from http://www.mathworks.com/matlabcentral/fileexchange/8277-fminsearchbnd--fminsearchcon
Args:
x0 (list): Input initial vector, consiting of [D,(prod),(degr)].
LB (list): List of lower bounds for ``D,prod,degr``.
UB (list): List of upper bounds for ``D,prod,degr``.
Returns:
list: Transformed x-values.
"""
x0u = list(x0)
nparams=len(x0)
k=0
for i in range(nparams):
#Upper bound only
if UB[i]!=None and LB[i]==None:
if UB[i]<=x0[i]:
x0u[k]=0
else:
x0u[k]=sqrt(UB[i]-x0[i])
k=k+1
#Lower bound only
elif UB[i]==None and LB[i]!=None:
if LB[i]>=x0[i]:
x0u[k]=0
else:
x0u[k]=np.sqrt(x0[i]-LB[i])
k=k+1
#Both bounds
elif UB[i]!=None and LB[i]!=None:
if UB[i]<=x0[i]:
x0u[k]=np.pi/2
elif LB[i]>=x0[i]:
x0u[k]=-np.pi/2
else:
x0u[k] = 2*(x0[i] - LB[i])/(UB[i]-LB[i]) - 1;
#shift by 2*pi to avoid problems at zero in fminsearch otherwise, the initial simplex is vanishingly small
x0u[k] = 2*np.pi+np.arcsin(max([-1,min(1,x0u[k])]));
k=k+1
#No bounds
elif UB[i]==None and LB[i]==None:
x0u[k] = x[i]
k=k+1
return x0u
def buildBoundLists(fit):
"""Builds list of lower bounds and upper bounds.
Args:
fit (pyfrp.subclasses.pyfrp_fit): Fit object.
Returns:
tuple: Tuple containing:
* LBs (list): List of lower bounds.
* UBs (list): List of upper bounds.
"""
LBs=[fit.LBD]+int(fit.fitProd)*[fit.LBProd]+int(fit.fitDegr)*[fit.LBDegr]+len(fit.ROIsFitted)*[fit.LBEqu]
UBs=[fit.UBD]+int(fit.fitProd)*[fit.UBProd]+int(fit.fitDegr)*[fit.UBDegr]+len(fit.ROIsFitted)*[fit.UBEqu]
return LBs,UBs | gpl-3.0 | -1,887,226,682,630,352,400 | -7,227,019,923,828,543,000 | 29.524444 | 172 | 0.549294 | false |
danielnyga/pracmln | python3/pracmln/mln/learning/softeval.py | 2 | 1371 | # -*- coding: utf-8 -*-
#
# Markov Logic Networks
#
# (C) 2006-2010 by Dominik Jain ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
def truthDegreeGivenSoftEvidence(gf, worldValues, mln):
return mln.getTruthDegreeGivenSoftEvidence(gf, worldValues)
def noisyOr(worldValues, disj, mln):
return mln._noisyOr(worldValues, disj)
| bsd-2-clause | -8,158,170,218,181,661,000 | 6,809,023,253,119,286,000 | 40.545455 | 72 | 0.764406 | false |
Netflix/sketchy | setup-nflx.py | 1 | 1460 | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='sketchy',
version='1.1.1',
long_description=read('README.md'),
packages=['sketchy'],
include_package_data=True,
zip_safe=False,
install_requires=[
'boto==2.49.0',
'Flask==0.10.1',
'Flask-SQLAlchemy==1.0',
'MySQL-python==1.2.5',
'Flask-Script==2.0.5',
'SQLAlchemy==0.9.7',
'Flask-RESTful==0.2.12',
#'requests==2.3.0',
'gunicorn==19.1.0',
'tldextract==1.4',
'supervisor==3.1.0',
'celery==3.1.13',
'redis==2.10.1',
'lxml==3.3.5',
'subprocess32==3.2.6',
'netaddr==0.7.18'
]
)
| apache-2.0 | -4,539,716,592,943,482,000 | 7,536,823,165,157,385,000 | 30.73913 | 78 | 0.573973 | false |
stannynuytkens/youtube-dl | youtube_dl/extractor/dvtv.py | 8 | 6807 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
js_to_json,
mimetype2ext,
unescapeHTML,
)
class DVTVIE(InfoExtractor):
IE_NAME = 'dvtv'
IE_DESC = 'http://video.aktualne.cz/'
_VALID_URL = r'https?://video\.aktualne\.cz/(?:[^/]+/)+r~(?P<id>[0-9a-f]{32})'
_TESTS = [{
'url': 'http://video.aktualne.cz/dvtv/vondra-o-ceskem-stoleti-pri-pohledu-na-havla-mi-bylo-trapne/r~e5efe9ca855511e4833a0025900fea04/',
'md5': '67cb83e4a955d36e1b5d31993134a0c2',
'info_dict': {
'id': 'dc0768de855511e49e4b0025900fea04',
'ext': 'mp4',
'title': 'Vondra o Českém století: Při pohledu na Havla mi bylo trapně',
'duration': 1484,
}
}, {
'url': 'http://video.aktualne.cz/dvtv/dvtv-16-12-2014-utok-talibanu-boj-o-kliniku-uprchlici/r~973eb3bc854e11e498be002590604f2e/',
'info_dict': {
'title': r're:^DVTV 16\. 12\. 2014: útok Talibanu, boj o kliniku, uprchlíci',
'id': '973eb3bc854e11e498be002590604f2e',
},
'playlist': [{
'md5': 'da7ca6be4935532241fa9520b3ad91e4',
'info_dict': {
'id': 'b0b40906854d11e4bdad0025900fea04',
'ext': 'mp4',
'title': 'Drtinová Veselovský TV 16. 12. 2014: Témata dne',
'description': 'md5:0916925dea8e30fe84222582280b47a0',
'timestamp': 1418760010,
'upload_date': '20141216',
}
}, {
'md5': '5f7652a08b05009c1292317b449ffea2',
'info_dict': {
'id': '420ad9ec854a11e4bdad0025900fea04',
'ext': 'mp4',
'title': 'Školní masakr možná změní boj s Talibanem, říká novinářka',
'description': 'md5:ff2f9f6de73c73d7cef4f756c1c1af42',
'timestamp': 1418760010,
'upload_date': '20141216',
}
}, {
'md5': '498eb9dfa97169f409126c617e2a3d64',
'info_dict': {
'id': '95d35580846a11e4b6d20025900fea04',
'ext': 'mp4',
'title': 'Boj o kliniku: Veřejný zájem, nebo právo na majetek?',
'description': 'md5:889fe610a70fee5511dc3326a089188e',
'timestamp': 1418760010,
'upload_date': '20141216',
}
}, {
'md5': 'b8dc6b744844032dab6ba3781a7274b9',
'info_dict': {
'id': '6fe14d66853511e4833a0025900fea04',
'ext': 'mp4',
'title': 'Pánek: Odmítání syrských uprchlíků je ostudou české vlády',
'description': 'md5:544f86de6d20c4815bea11bf2ac3004f',
'timestamp': 1418760010,
'upload_date': '20141216',
}
}],
}, {
'url': 'https://video.aktualne.cz/dvtv/zeman-si-jen-leci-mindraky-sobotku-nenavidi-a-babis-se-mu-te/r~960cdb3a365a11e7a83b0025900fea04/',
'md5': 'f8efe9656017da948369aa099788c8ea',
'info_dict': {
'id': '3c496fec365911e7a6500025900fea04',
'ext': 'mp4',
'title': 'Zeman si jen léčí mindráky, Sobotku nenávidí a Babiš se mu teď hodí, tvrdí Kmenta',
'duration': 1103,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://video.aktualne.cz/v-cechach-poprve-zazni-zelenkova-zrestaurovana-mse/r~45b4b00483ec11e4883b002590604f2e/',
'only_matching': True,
}]
def _parse_video_metadata(self, js, video_id, live_js=None):
data = self._parse_json(js, video_id, transform_source=js_to_json)
if live_js:
data.update(self._parse_json(
live_js, video_id, transform_source=js_to_json))
title = unescapeHTML(data['title'])
formats = []
for video in data['sources']:
video_url = video.get('file')
if not video_url:
continue
video_type = video.get('type')
ext = determine_ext(video_url, mimetype2ext(video_type))
if video_type == 'application/vnd.apple.mpegurl' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
elif video_type == 'application/dash+xml' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
video_url, video_id, mpd_id='dash', fatal=False))
else:
label = video.get('label')
height = self._search_regex(
r'^(\d+)[pP]', label or '', 'height', default=None)
format_id = ['http']
for f in (ext, label):
if f:
format_id.append(f)
formats.append({
'url': video_url,
'format_id': '-'.join(format_id),
'height': int_or_none(height),
})
self._sort_formats(formats)
return {
'id': data.get('mediaid') or video_id,
'title': title,
'description': data.get('description'),
'thumbnail': data.get('image'),
'duration': int_or_none(data.get('duration')),
'timestamp': int_or_none(data.get('pubtime')),
'formats': formats
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
# live content
live_item = self._search_regex(
r'(?s)embedData[0-9a-f]{32}\.asset\.liveStarter\s*=\s*(\{.+?\});',
webpage, 'video', default=None)
# single video
item = self._search_regex(
r'(?s)embedData[0-9a-f]{32}\[["\']asset["\']\]\s*=\s*(\{.+?\});',
webpage, 'video', default=None)
if item:
return self._parse_video_metadata(item, video_id, live_item)
# playlist
items = re.findall(
r"(?s)BBX\.context\.assets\['[0-9a-f]{32}'\]\.push\(({.+?})\);",
webpage)
if not items:
items = re.findall(r'(?s)var\s+asset\s*=\s*({.+?});\n', webpage)
if items:
return {
'_type': 'playlist',
'id': video_id,
'title': self._og_search_title(webpage),
'entries': [self._parse_video_metadata(i, video_id) for i in items]
}
raise ExtractorError('Could not find neither video nor playlist')
| unlicense | -2,302,263,648,260,617,000 | 5,875,425,463,674,214,000 | 37.420455 | 145 | 0.517746 | false |
ecino/compassion-modules | mobile_app_connector/models/res_users.py | 3 | 1638 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# @author: Quentin Gigon <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import logging
from odoo import models, api
from odoo.http import request
from ..mappings.compassion_login_mapping import MobileLoginMapping
logger = logging.getLogger(__name__)
class CompassionLogin(models.Model):
_inherit = 'res.users'
@api.model
def mobile_login(self, **other_params):
"""
Mobile app method:
Log a given user.
:param view: login view
:param username: the username of the user
:param password: the password of the user
:param other_params: all request parameters
:return: JSON filled with user's info
"""
username = self._get_required_param('username', other_params)
password = self._get_required_param('password', other_params)
uid = request.session.authenticate(
request.session.db, username, password)
if uid is not False:
self.save_session(request.cr, uid, request.context)
user = self.env['res.users'].browse(uid)
mapping = MobileLoginMapping(self.env)
result = mapping.get_connect_data(user)
return result
def _get_required_param(self, key, params):
if key not in params:
raise ValueError('Required parameter {}'.format(key))
return params[key]
| agpl-3.0 | 2,338,654,677,122,538,500 | -4,293,123,685,590,796,000 | 31.76 | 78 | 0.578144 | false |
aptivate/invite-registration | registration/views.py | 1 | 3680 | from django.conf import settings
from django.contrib import messages
from django.contrib.auth.views import password_change
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.views.generic import FormView, RedirectView
from .models import User
from .forms import ContactPasswordResetForm
########################################################################
# Account activation and password reset
########################################################################
class ResetPassword(FormView):
from_address = settings.EMAIL_BOT_ADDRESS
email_template = 'registration/email/password_reset_request.email'
template_name = 'registration/password_reset.html'
form_class = ContactPasswordResetForm
def get_subject(self):
return "{0}: password recovery".format(settings.SITE_NAME)
def form_valid(self, form):
opts = {
'use_https': self.request.is_secure(),
'from_email': self.from_address,
'email_template_name': self.email_template,
'subject': self.get_subject(),
'request': self.request,
}
form.save(**opts)
messages.success(
self.request, ('Reset password email was sent to this '
'contact. Please check your mailbox for further '
'instructions.'))
return HttpResponseRedirect(reverse('login'))
def form_invalid(self, form):
messages.error(self.request, ('Email could not be sent. Check if '
'provided email is correct.'))
return self.render_to_response(self.get_context_data(form=form))
def change_password(request):
return password_change(request,
post_change_redirect=reverse(settings.LOGIN_REDIRECT_URL))
class ActivationEmailsView(RedirectView):
from_address = settings.EMAIL_BOT_ADDRESS
email_template = 'registration/email/activation_body.email'
hide_messages = False
def get_subject(self):
raise NotImplementedError
def get(self, request, *args, **kwargs):
self.hide_messages = bool(request.REQUEST.get("hide_messages", False))
self.send_emails(request, **kwargs)
return super(ActivationEmailsView, self).get(request, *args, **kwargs)
class SendActivationEmailView(ActivationEmailsView):
reverse_name = "password_reset"
def get_redirect_url(self, **kwargs):
return reverse(self.reverse_name)
def get_subject(self):
return "Please activate your {0} account".format(settings.SITE_NAME)
def send_email(self, request, pk):
obj = get_object_or_404(User, pk=pk)
form = ContactPasswordResetForm({'email': obj.business_email})
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'from_email': self.from_address,
'email_template_name': self.email_template,
'subject': self.get_subject(),
'request': request,
}
form.save(**opts)
if not self.hide_messages:
messages.success(request,
'Activation email was sent to this contact.')
else:
if not self.hide_messages:
messages.error(request,
'Email could not be sent. \
Check if business email is correct.')
def send_emails(self, request, **kwargs):
self.pk = int(kwargs['pk'])
self.send_email(request, self.pk)
| gpl-3.0 | 7,322,045,118,898,958,000 | 8,745,726,546,008,058,000 | 36.938144 | 85 | 0.595924 | false |
xrmx/django | django/contrib/gis/db/backends/spatialite/operations.py | 257 | 11441 | """
SQL functions reference lists:
http://www.gaia-gis.it/spatialite-2.4.0/spatialite-sql-2.4.html
http://www.gaia-gis.it/spatialite-3.0.0-BETA/spatialite-sql-3.0.0.html
http://www.gaia-gis.it/gaia-sins/spatialite-sql-4.2.1.html
"""
import re
import sys
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.spatialite.adapter import SpatiaLiteAdapter
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.operations import DatabaseOperations
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.functional import cached_property
class SpatiaLiteOperations(BaseSpatialOperations, DatabaseOperations):
name = 'spatialite'
spatialite = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
Adapter = SpatiaLiteAdapter
Adaptor = Adapter # Backwards-compatibility alias.
area = 'Area'
centroid = 'Centroid'
collect = 'Collect'
contained = 'MbrWithin'
difference = 'Difference'
distance = 'Distance'
envelope = 'Envelope'
extent = 'Extent'
intersection = 'Intersection'
length = 'GLength' # OpenGis defines Length, but this conflicts with an SQLite reserved keyword
num_geom = 'NumGeometries'
num_points = 'NumPoints'
point_on_surface = 'PointOnSurface'
scale = 'ScaleCoords'
svg = 'AsSVG'
sym_difference = 'SymDifference'
transform = 'Transform'
translate = 'ShiftCoords'
union = 'GUnion' # OpenGis defines Union, but this conflicts with an SQLite reserved keyword
unionagg = 'GUnion'
from_text = 'GeomFromText'
from_wkb = 'GeomFromWKB'
select = 'AsText(%s)'
gis_operators = {
'equals': SpatialOperator(func='Equals'),
'disjoint': SpatialOperator(func='Disjoint'),
'touches': SpatialOperator(func='Touches'),
'crosses': SpatialOperator(func='Crosses'),
'within': SpatialOperator(func='Within'),
'overlaps': SpatialOperator(func='Overlaps'),
'contains': SpatialOperator(func='Contains'),
'intersects': SpatialOperator(func='Intersects'),
'relate': SpatialOperator(func='Relate'),
# Returns true if B's bounding box completely contains A's bounding box.
'contained': SpatialOperator(func='MbrWithin'),
# Returns true if A's bounding box completely contains B's bounding box.
'bbcontains': SpatialOperator(func='MbrContains'),
# Returns true if A's bounding box overlaps B's bounding box.
'bboverlaps': SpatialOperator(func='MbrOverlaps'),
# These are implemented here as synonyms for Equals
'same_as': SpatialOperator(func='Equals'),
'exact': SpatialOperator(func='Equals'),
'distance_gt': SpatialOperator(func='Distance', op='>'),
'distance_gte': SpatialOperator(func='Distance', op='>='),
'distance_lt': SpatialOperator(func='Distance', op='<'),
'distance_lte': SpatialOperator(func='Distance', op='<='),
}
@cached_property
def function_names(self):
return {
'Length': 'ST_Length',
'Reverse': 'ST_Reverse',
'Scale': 'ScaleCoords',
'Translate': 'ST_Translate' if self.spatial_version >= (3, 1, 0) else 'ShiftCoords',
'Union': 'ST_Union',
}
@cached_property
def unsupported_functions(self):
unsupported = {'BoundingCircle', 'ForceRHR', 'GeoHash', 'MemSize'}
if self.spatial_version < (3, 1, 0):
unsupported.add('SnapToGrid')
if self.spatial_version < (4, 0, 0):
unsupported.update({'Perimeter', 'Reverse'})
return unsupported
@cached_property
def spatial_version(self):
"""Determine the version of the SpatiaLite library."""
try:
version = self.spatialite_version_tuple()[1:]
except Exception as msg:
new_msg = (
'Cannot determine the SpatiaLite version for the "%s" '
'database (error was "%s"). Was the SpatiaLite initialization '
'SQL loaded on this database?') % (self.connection.settings_dict['NAME'], msg)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(new_msg), sys.exc_info()[2])
if version < (2, 4, 0):
raise ImproperlyConfigured('GeoDjango only supports SpatiaLite versions '
'2.4.0 and above')
return version
@property
def _version_greater_2_4_0_rc4(self):
if self.spatial_version >= (2, 4, 1):
return True
else:
# Spatialite 2.4.0-RC4 added AsGML and AsKML, however both
# RC2 (shipped in popular Debian/Ubuntu packages) and RC4
# report version as '2.4.0', so we fall back to feature detection
try:
self._get_spatialite_func("AsGML(GeomFromText('POINT(1 1)'))")
except DatabaseError:
return False
return True
@cached_property
def disallowed_aggregates(self):
disallowed = (aggregates.Extent3D, aggregates.MakeLine)
if self.spatial_version < (3, 0, 0):
disallowed += (aggregates.Collect, aggregates.Extent)
return disallowed
@cached_property
def gml(self):
return 'AsGML' if self._version_greater_2_4_0_rc4 else None
@cached_property
def kml(self):
return 'AsKML' if self._version_greater_2_4_0_rc4 else None
@cached_property
def geojson(self):
return 'AsGeoJSON' if self.spatial_version >= (3, 0, 0) else None
def convert_extent(self, box, srid):
"""
Convert the polygon data received from Spatialite to min/max values.
"""
if box is None:
return None
shell = Geometry(box, srid).shell
xmin, ymin = shell[0][:2]
xmax, ymax = shell[2][:2]
return (xmin, ymin, xmax, ymax)
def convert_geom(self, wkt, geo_field):
"""
Converts geometry WKT returned from a SpatiaLite aggregate.
"""
if wkt:
return Geometry(wkt, geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns None because geometry columnas are added via the
`AddGeometryColumn` stored procedure on SpatiaLite.
"""
return None
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type. SpatiaLite only supports regular
cartesian-based queries (no spheroid/sphere calculations for point
geometries like PostGIS).
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
raise ValueError('SpatiaLite does not support distance queries on '
'geometry fields with a geodetic coordinate system. '
'Distance objects; use a numeric value of your '
'distance in degrees instead.')
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
Transform() and GeomFromText() function call(s).
"""
def transform_value(value, srid):
return not (value is None or value.srid == srid)
if hasattr(value, 'as_sql'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitute in
# the column name instead.
sql, _ = compiler.compile(value)
return placeholder % sql
else:
if transform_value(value, f.srid):
# Adding Transform() to the SQL placeholder.
return '%s(%s(%%s,%s), %s)' % (self.transform, self.from_text, value.srid, f.srid)
else:
return '%s(%%s,%s)' % (self.from_text, f.srid)
def _get_spatialite_func(self, func):
"""
Helper routine for calling SpatiaLite functions and returning
their result.
Any error occurring in this method should be handled by the caller.
"""
cursor = self.connection._cursor()
try:
cursor.execute('SELECT %s' % func)
row = cursor.fetchone()
finally:
cursor.close()
return row[0]
def geos_version(self):
"Returns the version of GEOS used by SpatiaLite as a string."
return self._get_spatialite_func('geos_version()')
def proj4_version(self):
"Returns the version of the PROJ.4 library used by SpatiaLite."
return self._get_spatialite_func('proj4_version()')
def spatialite_version(self):
"Returns the SpatiaLite library version as a string."
return self._get_spatialite_func('spatialite_version()')
def spatialite_version_tuple(self):
"""
Returns the SpatiaLite version as a tuple (version string, major,
minor, subminor).
"""
version = self.spatialite_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse SpatiaLite version string: %s' % version)
return (version, major, minor1, minor2)
def spatial_aggregate_name(self, agg_name):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = 'unionagg' if agg_name.lower() == 'union' else agg_name.lower()
return getattr(self, agg_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialiteGeometryColumns
return SpatialiteGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialiteSpatialRefSys
return SpatialiteSpatialRefSys
def get_db_converters(self, expression):
converters = super(SpatiaLiteOperations, self).get_db_converters(expression)
if hasattr(expression.output_field, 'geom_type'):
converters.append(self.convert_geometry)
return converters
def convert_geometry(self, value, expression, connection, context):
if value:
value = Geometry(value)
if 'transformed_srid' in context:
value.srid = context['transformed_srid']
return value
| bsd-3-clause | -5,694,801,677,752,832,000 | -2,672,245,374,755,075,000 | 37.783051 | 100 | 0.617516 | false |
0x535431/textaapp | lib/requests/utils.py | 618 | 21334 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in re.split(", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
| bsd-3-clause | -4,817,464,648,103,575,000 | -8,343,427,819,916,307,000 | 29.175389 | 118 | 0.602091 | false |
Qalthos/ansible | test/units/modules/network/onyx/test_onyx_magp.py | 52 | 4649 | #
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_magp
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxMagpModule(TestOnyxModule):
module = onyx_magp
def setUp(self):
super(TestOnyxMagpModule, self).setUp()
self.mock_get_config = patch.object(
onyx_magp.OnyxMagpModule,
"_get_magp_config")
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_version = patch.object(onyx_magp.OnyxMagpModule,
"_get_os_version")
self.get_version = self.mock_get_version.start()
def tearDown(self):
super(TestOnyxMagpModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_get_version.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_magp_show.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
self.get_version.return_value = "3.6.5000"
def test_magp_absent_no_change(self):
set_module_args(dict(interface='Vlan 1002', magp_id=110,
state='absent'))
self.execute_module(changed=False)
def test_magp_no_change(self):
set_module_args(dict(interface='Vlan 1200', magp_id=103,
state='disabled'))
self.execute_module(changed=False)
def test_magp_present_no_change(self):
set_module_args(dict(interface='Vlan 1200', magp_id=103))
self.execute_module(changed=False)
def test_magp_enable(self):
set_module_args(dict(interface='Vlan 1200', magp_id=103,
state='enabled'))
commands = ['interface vlan 1200 magp 103 no shutdown']
self.execute_module(changed=True, commands=commands)
def test_magp_disable(self):
set_module_args(dict(interface='Vlan 1243', magp_id=102,
state='disabled', router_ip='10.0.0.43',
router_mac='01:02:03:04:05:06'))
commands = ['interface vlan 1243 magp 102 shutdown']
self.execute_module(changed=True, commands=commands)
def test_magp_change_address(self):
set_module_args(dict(interface='Vlan 1243', magp_id=102,
router_ip='10.0.0.44',
router_mac='01:02:03:04:05:07'))
commands = [
'interface vlan 1243 magp 102 ip virtual-router address 10.0.0.44',
'interface vlan 1243 magp 102 ip virtual-router mac-address 01:02:03:04:05:07']
self.execute_module(changed=True, commands=commands)
def test_magp_remove_address(self):
set_module_args(dict(interface='Vlan 1243', magp_id=102))
commands = [
'interface vlan 1243 magp 102 no ip virtual-router address',
'interface vlan 1243 magp 102 no ip virtual-router mac-address']
self.execute_module(changed=True, commands=commands)
def test_magp_add(self):
set_module_args(dict(interface='Vlan 1244', magp_id=104,
router_ip='10.0.0.44',
router_mac='01:02:03:04:05:07'))
commands = [
'interface vlan 1244 magp 104',
'exit',
'interface vlan 1244 magp 104 ip virtual-router address 10.0.0.44',
'interface vlan 1244 magp 104 ip virtual-router mac-address 01:02:03:04:05:07']
self.execute_module(changed=True, commands=commands, sort=False)
def test_magp_change_vlan(self):
set_module_args(dict(interface='Vlan 1244', magp_id=102,
router_ip='10.0.0.43',
router_mac='01:02:03:04:05:06'))
commands = [
'interface vlan 1243 no magp 102',
'interface vlan 1244 magp 102',
'exit',
'interface vlan 1244 magp 102 ip virtual-router address 10.0.0.43',
'interface vlan 1244 magp 102 ip virtual-router mac-address 01:02:03:04:05:06']
self.execute_module(changed=True, commands=commands)
| gpl-3.0 | 2,295,189,311,635,091,000 | -2,745,750,150,686,387,700 | 41.263636 | 92 | 0.605507 | false |
wanglongqi/sympy | sympy/physics/mechanics/body.py | 45 | 5982 | from sympy import Symbol
from sympy.physics.mechanics import (RigidBody, Particle, ReferenceFrame,
inertia)
from sympy.physics.vector import Point, Vector
__all__ = ['Body']
class Body(RigidBody, Particle):
"""
Body is a common representation of RigidBody or a Particle.
A Body represents either a rigid body or particle in classical mechanics.
Bodies have a body-fixed reference frame, a mass, a mass center and
possibly a body-fixed inertia.
Parameters
----------
name: String
Defines the name of the body. It is used as the base for defining body
specific properties.
masscenter : Point, optional
A point that represents the center of mass of the body or particle. If no
point is given, a point is generated.
frame : ReferenceFrame (optional)
The ReferenceFrame that represents the reference frame of the body. If
no frame is given, a frame is generated.
mass : Sympifyable, optional
A Sympifyable object which represents the mass of the body. if no mass
is passed, one is generated.
body_inertia : Dyadic
Central inertia dyadic of the body. If none is passed while creating
RigidBody, a default inertia is generated.
Examples
--------
Default behaviour. It creates a RigidBody after defining mass,
mass center, frame and inertia.
>>> from sympy.physics.mechanics import Body
>>> body = Body('name_of_body')
Passing attributes of Rigidbody. All the arguments needed to create a
RigidBody can be passed while creating a Body too.
>>> from sympy import Symbol
>>> from sympy.physics.mechanics import ReferenceFrame, Point, inertia
>>> from sympy.physics.mechanics import Body
>>> mass = Symbol('mass')
>>> masscenter = Point('masscenter')
>>> frame = ReferenceFrame('frame')
>>> ixx = Symbol('ixx')
>>> body_inertia = inertia(frame, ixx, 0, 0)
>>> body = Body('name_of_body',masscenter,mass,frame,body_inertia)
Creating a Particle. If masscenter and mass are passed, and inertia is
not then a Particle is created.
>>> from sympy import Symbol
>>> from sympy.physics.vector import Point
>>> from sympy.physics.mechanics import Body
>>> mass = Symbol('mass')
>>> masscenter = Point('masscenter')
>>> body = Body('name_of_body',masscenter,mass)
Similarly, A frame can also be passed while creating a Particle.
"""
def __init__(self, name, masscenter=None, mass=None, frame=None,
central_inertia=None):
self.name = name
self.loads = []
if frame is None:
frame = ReferenceFrame(name + '_frame')
if masscenter is None:
masscenter = Point(name + '_masscenter')
if central_inertia is None and mass is None:
ixx = Symbol(name + '_ixx')
iyy = Symbol(name + '_iyy')
izz = Symbol(name + '_izz')
izx = Symbol(name + '_izx')
ixy = Symbol(name + '_ixy')
iyz = Symbol(name + '_iyz')
_inertia = (inertia(frame, ixx, iyy, izz, ixy, iyz, izx),
masscenter)
else:
_inertia = (central_inertia, masscenter)
if mass is None:
_mass = Symbol(name + '_mass')
else:
_mass = mass
masscenter.set_vel(frame, 0)
# If user passes masscenter and mass then a particle is created
# otherwise a rigidbody. As a result a body may or may not have inertia.
if central_inertia is None and mass is not None:
self.frame = frame
self.masscenter = masscenter
Particle.__init__(self, name, masscenter, _mass)
else:
RigidBody.__init__(self, name, masscenter, frame, _mass, _inertia)
def apply_force(self, vec, point=None):
"""
Adds the force to the point (center of mass by default) on the body.
Parameters
----------
vec: Vector
Defines the force vector. Can be any vector w.r.t any frame or
combinations of frame.
point: Point, optional
Defines the point on which the force must be applied. Default is
Body's center of mass.
Example
-------
To apply a unit force in x direction of body's frame to body's
center of mass.
>>> from sympy import Symbol
>>> from sympy.physics.mechanics import Body
>>> body = Body('body')
>>> g = Symbol('g')
>>> body.apply_force(body.mass * g * body.frame.x)
To apply force to any other point than center of mass, pass that point
as well.
>>> from sympy import Symbol
>>> from sympy.physics.mechanics import Body
>>> parent = Body('parent')
>>> child = Body('child')
>>> g = Symbol('g')
>>> frame = parent.frame
>>> l = Symbol('l')
>>> point = child.masscenter.locatenew('force_point', l * body.frame.y)
>>> gravity = child.mass * g
>>> body.apply_force(gravity * body.frame.x, point)
"""
if not isinstance(point, Point):
if point is None:
point = self.masscenter # masscenter
else:
raise TypeError("A Point must be supplied to apply force to.")
if not isinstance(vec, Vector):
raise TypeError("A Vector must be supplied to apply force.")
self.loads.append((point, vec))
def apply_torque(self, vec):
"""
Adds torque to the body.
Parameters
----------
vec: Vector
Defines the torque vector. Can be any vector w.r.t any frame or
combinations of frame.
"""
if not isinstance(vec, Vector):
raise TypeError("A Vector must be supplied to add torque.")
self.loads.append((self.frame, vec))
| bsd-3-clause | 1,319,847,740,805,317,000 | -3,367,151,820,843,950,600 | 34.188235 | 81 | 0.59211 | false |
sanguinariojoe/FreeCAD | src/Mod/TechDraw/TDTest/DVAnnoSymImageTest.py | 27 | 1959 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# annotation & symbol test script for TechDraw module
# creates a page, 1 annotation and import 1 symbol
from __future__ import print_function
import FreeCAD
import Part
import Measure
import TechDraw
import os
def DVAnnoSymImageTest():
path = os.path.dirname(os.path.abspath(__file__))
print ('TDTestAnno path: ' + path)
templateFileSpec = path + '/TestTemplate.svg'
symbolFileSpec = path + '/TestSymbol.svg'
imageFileSpec = path + '/TestImage.png'
FreeCAD.newDocument("TDAnno")
FreeCAD.setActiveDocument("TDAnno")
FreeCAD.ActiveDocument=FreeCAD.getDocument("TDAnno")
page = FreeCAD.ActiveDocument.addObject('TechDraw::DrawPage','Page')
FreeCAD.ActiveDocument.addObject('TechDraw::DrawSVGTemplate','Template')
FreeCAD.ActiveDocument.Template.Template = templateFileSpec
FreeCAD.ActiveDocument.Page.Template = FreeCAD.ActiveDocument.Template
# page.ViewObject.show() # unit tests run in console mode
#annotation
anno = FreeCAD.ActiveDocument.addObject('TechDraw::DrawViewAnnotation','TestAnno')
s = 'Different Text'
sl = list()
sl.append(s)
anno.Text = sl
anno.TextStyle = 'Bold'
rc = page.addView(anno)
anno.X = 30.0
anno.Y = 150.0
#symbol
sym = FreeCAD.ActiveDocument.addObject('TechDraw::DrawViewSymbol','TestSymbol')
f = open(symbolFileSpec, 'r')
svg = f.read()
f.close()
sym.Symbol = svg
rc = page.addView(sym)
sym.X = 220.0
sym.Y = 150.0
#image
img = FreeCAD.ActiveDocument.addObject('TechDraw::DrawViewImage','TestImage')
img.ImageFile = imageFileSpec
rc = page.addView(img)
FreeCAD.ActiveDocument.recompute()
rc = False
if ("Up-to-date" in anno.State) and ("Up-to-date" in sym.State) and ("Up-to-date" in img.State):
rc = True
FreeCAD.closeDocument("TDAnno")
return rc
if __name__ == '__main__':
DVAnnoSymImageTest()
| lgpl-2.1 | 5,580,636,045,380,694,000 | 6,804,746,459,110,622,000 | 29.138462 | 100 | 0.680449 | false |
coffenbacher/askbot-devel | askbot/management/commands/askbot_import_jive.py | 11 | 15324 | from askbot import models
from askbot.conf import settings as askbot_settings
from askbot.utils.console import ProgressBar
from askbot.utils.slug import slugify
from askbot.utils.jive import JiveConverter
from askbot.utils.jive import internal_link_re
from askbot.utils.file_utils import make_file_name
from bs4 import BeautifulSoup
from django.conf import settings as django_settings
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
#from askbot.utils.transaction import dummy_transaction as transaction
from django.forms import EmailField, ValidationError
from django.utils import translation
from datetime import datetime
from optparse import make_option
import re
import os
import shutil
#todo: make a pass through all attachments
#and make sure that mimetypes dictionary is up to date
#raise an error if it's not
FILE_TYPES = {
"application/java-archive": 'jar',
"application/msword": 'doc',
"application/octet-stream": 'txt',
"application/text": 'txt',
"application/vnd.visio": 'vsd',
"application/x-bzip": 'bz',
"application/x-gzip": 'gz',
"application/x-java-archive": 'jar',
"application/x-shellscript": 'sh',
"application/x-zip-compressed": 'zip',
"application/xml": 'xml',
"application/zip": 'zip',
"image/bmp": 'bmp',
"image/gif": 'gif',
"image/jpeg": 'jpeg',
"image/pjpeg": 'pjpeg',
"image/png": 'png',
"image/x-png": 'png',
"text/html": 'html',
"text/java": 'java',
"text/plain": 'txt',
"text/x-java": 'java',
"text/x-java-source": 'java',
"text/x-log": 'log',
"text/xml": 'xml'
}
jive = JiveConverter()
def parse_date(date_str):
return datetime.strptime(date_str[:-8], '%Y/%m/%d %H:%M:%S')
def fix_internal_links_in_post(post):
"""will replace old internal urls with the new ones."""
def link_is_naked(match):
"""naked link either starts at the beginning of string
or is not inside the jive link construct: [...]"""
pos = match.start()
# the second test is rather naive as it assumes that a
# | will be preceded by something like [some link
# which we don't test here
return pos < 2 or post.text[pos-2] not in ('[', '|')
def internal_link_sub(match):
"""pull post by the matched pars in the old link
and returns link to the new post"""
link_type = match.group(1)
item_id = int(match.group(2))
lookup_key = (link_type == 'message' and 'old_answer_id' or 'old_question_id')
try:
post = models.Post.objects.get(**{lookup_key: item_id})
# if original link is naked, we put in into brackets
# so that the formatter will render the result correctly
# otherwise "naked" /url will stay plain text
new_url = post.get_absolute_url()
return (link_is_naked(match) and '[%s]' % new_url or new_url)
except models.Post.DoesNotExist:
return ''
post.text = internal_link_re.sub(internal_link_sub, post.text)
post.save()
def turn_first_company_user_to_admin(domain):
company_users = models.User.objects.filter(
email__endswith='@' + domain
).order_by('id')
if company_users.count() == 0:
return None
user = company_users[0]
user.is_staff = True
user.is_superuser = True
user.save()
return user
def thread_get_answer_from_company(thread, domain):
answers = thread.posts.filter(
post_type='answer'
).select_related(
'author__email'
)
for answer in answers:
if answer.author.email.endswith('@' + domain):
return answer
return None
def thread_find_first_comment_from_company(thread, domain):
comments = thread.posts.filter(
post_type='comment'
).select_related(
'author__email'
).order_by('added_at')
for comment in comments:
if comment.author.email.endswith('@' + domain):
return comment
return None
COMPANY_DOMAIN_HELP = """If used - first response from user with that domain
then first response in each question from user with matching email address
will be posted as answer and accepted as correct. Also, first user
with a matching email address will be a site administrator."""
JIVE_REDIRECTS_HELP = """This file will contain redirects from the old
posts to new"""
class Command(BaseCommand):
args = '<jive-dump.xml>'
option_list = BaseCommand.option_list + (
make_option('--company-domain',
action='store',
type='str',
dest='company_domain',
default=None,
help=COMPANY_DOMAIN_HELP
),
make_option('--redirects_file',
action='store',
type='str',
dest='redirects_file',
default='',
help=JIVE_REDIRECTS_HELP
)
)
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
#relax certain settings
askbot_settings.update('LIMIT_ONE_ANSWER_PER_USER', False)
askbot_settings.update('MAX_COMMENT_LENGTH', 1000000)
askbot_settings.update('MIN_REP_TO_INSERT_LINK', 1)
askbot_settings.update('MIN_REP_TO_SUGGEST_LINK', 1)
askbot_settings.update('COMMENTS_EDITOR_TYPE', 'rich-text')
askbot_settings.update('MARKUP_CODE_FRIENDLY', True)
self.bad_email_count = 0
self.attachments_path = ''
self.soup = None
self.jive_url = None
def handle(self, *args, **kwargs):
translation.activate(django_settings.LANGUAGE_CODE)
assert len(args) == 1, 'Dump file name is required'
dump_file_name = args[0]
xml = open(dump_file_name, 'r').read()
soup = BeautifulSoup(xml, ['lxml', 'xml'])
self.soup = soup
url_prop = self.soup.find('Property', attrs={'name': 'jiveURL'})
self.jive_url= url_prop['value']
dump_dir = os.path.dirname(os.path.abspath(dump_file_name))
self.attachments_path = os.path.join(dump_dir, 'attachments')
self.import_users()
self.import_forums()
if kwargs['company_domain']:
self.promote_company_replies(kwargs['company_domain'])
self.fix_internal_links()
self.add_legacy_links()
if kwargs['redirects_file']:
self.make_redirects(kwargs['redirects_file'])
self.convert_jive_markup_to_html()
models.Message.objects.all().delete()
@transaction.commit_manually
def add_legacy_links(self):
questions = models.Post.objects.filter(post_type='question')
count = questions.count()
message = 'Adding links to old forum'
template = """\n\n{quote}This thread was imported from the previous forum.
For your reference, the original is [available here|%s]{quote}"""
for question in ProgressBar(questions.iterator(), count, message):
thread_id = question.old_question_id
jive_url = self.jive_url
old_url = '%s/thread.jspa?threadID=%s' % (jive_url, thread_id)
question.text += template % old_url
question.save()
transaction.commit()
transaction.commit()
@transaction.commit_manually
def make_redirects(self):
"""todo: implement this when needed"""
pass
@transaction.commit_manually
def convert_jive_markup_to_html(self):
posts = models.Post.objects.all()
count = posts.count()
message = 'Converting jive markup to html'
for post in ProgressBar(posts.iterator(), count, message):
post.html = jive.convert(post.text)
post.summary = post.get_snippet()
post.save()
transaction.commit()
transaction.commit()
@transaction.commit_manually
def fix_internal_links(self):
jive_url = self.jive_url
print 'Base url of old forum: %s' % jive_url
posts = models.Post.objects.filter(text__contains=jive_url)
count = posts.count()
message = 'Fixing internal links'
for post in ProgressBar(posts.iterator(), count, message):
post.text = post.text.replace(jive_url, '')
fix_internal_links_in_post(post)
transaction.commit()
transaction.commit()
@transaction.commit_manually
def promote_company_replies(self, domain):
admin = turn_first_company_user_to_admin(domain)
if admin is None:
print "Note: did not find any users with email matching %s" % domain
return
message = 'Promoting company replies to accepted answers:'
threads = models.Thread.objects.all()
count = threads.count()
for thread in ProgressBar(threads.iterator(), count, message):
answer = thread_get_answer_from_company(thread, domain)
if answer == None:
comment = thread_find_first_comment_from_company(thread, domain)
if comment:
admin.repost_comment_as_answer(comment)
answer = comment
if answer:
admin.accept_best_answer(answer=answer, force=True)
transaction.commit()
transaction.commit()
@transaction.commit_manually
def import_users(self):
"""import users from jive to askbot"""
user_soup = self.soup.find_all('User')
message = 'Importing users:'
for user in ProgressBar(iter(user_soup), len(user_soup), message):
username = user.find('Username').text
real_name = user.find('Name').text
try:
email = EmailField().clean(user.find('Email').text)
except ValidationError:
email = 'unknown%[email protected]' % self.bad_email_count
self.bad_email_count += 1
joined_timestamp = parse_date(user.find('CreationDate').text)
user = models.User(
username=username,
email=email,
real_name=real_name,
date_joined=joined_timestamp
)
user.set_unusable_password()
user.save()
transaction.commit()
def import_forums(self):
"""import forums by associating each with a special tag,
and then importing all threads for the tag"""
admin = models.User.objects.get(id=1)
forum_soup = self.soup.find_all('Forum')
print 'Have %d forums' % len(forum_soup)
for forum in forum_soup:
threads_soup = forum.find_all('Thread')
self.import_threads(threads_soup, forum.find('Name').text)
@transaction.commit_manually
def import_threads(self, threads, tag_name):
message = 'Importing threads for %s' % tag_name
for thread in ProgressBar(iter(threads), len(threads), message):
self.import_thread(thread, tag_name)
transaction.commit()
def add_attachments_to_post(self, post, attachments):
if len(attachments) == 0:
return
post.text += '\nh4. Attachments\n'
for att in attachments:
att_id, name, mimetype = att
if mimetype not in FILE_TYPES:
continue
ext = '.' + FILE_TYPES[mimetype]
file_name = make_file_name(ext)
# copy attachment file to a new place
source_file = os.path.join(self.attachments_path, att_id + '.bin')
dest_file = os.path.join(django_settings.MEDIA_ROOT, file_name)
shutil.copyfile(source_file, dest_file)
# add link to file to the post text
post.text += '# [%s|%s%s]\n' % (name, django_settings.MEDIA_URL, file_name)
def import_thread(self, thread, tag_name):
"""import individual thread"""
question_soup = thread.find('Message')
post_id, title, body, attachments, timestamp, user = \
self.parse_post(question_soup)
if models.Post.objects.filter(old_question_id=thread['id']).count() == 1:
#this allows restarting the process of importing forums
#any time
return
#post question
question = user.post_question(
title=title,
body_text=body,
timestamp=timestamp,
tags=tag_name,
language=django_settings.LANGUAGE_CODE
)
self.add_attachments_to_post(question, attachments)
question.html = jive.convert(question.text)
question.old_question_id = int(thread['id'])
question.old_answer_id = post_id
question.summary = question.get_snippet()
question.save()
#post answers
message_list = question_soup.find_all('MessageList', recursive=False)
if len(message_list) == 0:
return
for answer_soup in message_list[0].find_all('Message', recursive=False):
post_id, title, body, attachments, timestamp, user = \
self.parse_post(answer_soup)
answer = user.post_answer(
question=question,
body_text=body,
timestamp=timestamp
)
self.add_attachments_to_post(answer, attachments)
answer.html = jive.convert(answer.text)
answer.summary = answer.get_snippet()
answer.old_answer_id = post_id
answer.save()
comments = answer_soup.find_all('Message')
for comment in comments:
post_id, title, body, attachments, timestamp, user = \
self.parse_post(comment)
comment = user.post_comment(
parent_post=answer,
body_text=body,
timestamp=timestamp
)
comment.old_answer_id = post_id
self.add_attachments_to_post(comment, attachments)
comment.html = jive.convert(comment.text)
comment.summary = comment.get_snippet()
comment.save()
def parse_post(self, post):
title = post.find('Subject').text
added_at = parse_date(post.find('CreationDate').text)
username = post.find('Username').text
body = post.find('Body').text
attachments_soup = post.find_all('Attachment')
attachments = list()
for att in attachments_soup:
att_id = att['id']
name = att.find('Name').text
content_type = att['contentType']
attachments.append((att_id, name, content_type))
try:
user = models.User.objects.get(username=username)
except models.User.DoesNotExist:
email = 'unknown%[email protected]' % self.bad_email_count
self.bad_email_count += 1
user = models.User(username=username, email=email)
user.save()
return int(post['id']), title, body, attachments, added_at, user
| gpl-3.0 | -1,287,298,728,133,770,500 | 6,576,795,210,161,769,000 | 37.31 | 87 | 0.588228 | false |
sharma1nitish/phantomjs | src/breakpad/src/tools/gyp/test/builddir/gyptest-all.py | 147 | 2358 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify the settings that cause a set of programs to be created in
a specific build directory, and that no intermediate built files
get created outside of that build directory hierarchy even when
referred to with deeply-nested ../../.. paths.
"""
import TestGyp
# TODO(mmoss): Make only supports (theoretically) a single, global build
# directory (through GYP_GENERATOR_FLAGS 'output_dir'), rather than
# gyp-file-specific settings (e.g. the stuff in builddir.gypi) that the other
# generators support, so this doesn't work yet for make.
# TODO(mmoss) Make also has the issue that the top-level Makefile is written to
# the "--depth" location, which is one level above 'src', but then this test
# moves 'src' somewhere else, leaving the Makefile behind, so make can't find
# its sources. I'm not sure if make is wrong for writing outside the current
# directory, or if the test is wrong for assuming everything generated is under
# the current directory.
test = TestGyp.TestGyp(formats=['!make'])
test.run_gyp('prog1.gyp', '--depth=..', chdir='src')
test.relocate('src', 'relocate/src')
test.subdir('relocate/builddir')
# Make sure that all the built ../../etc. files only get put under builddir,
# by making all of relocate read-only and then making only builddir writable.
test.writable('relocate', False)
test.writable('relocate/builddir', True)
# Suppress the test infrastructure's setting SYMROOT on the command line.
test.build('prog1.gyp', test.ALL, SYMROOT=None, chdir='relocate/src')
expect1 = """\
Hello from prog1.c
Hello from func1.c
"""
expect2 = """\
Hello from subdir2/prog2.c
Hello from func2.c
"""
expect3 = """\
Hello from subdir2/subdir3/prog3.c
Hello from func3.c
"""
expect4 = """\
Hello from subdir2/subdir3/subdir4/prog4.c
Hello from func4.c
"""
expect5 = """\
Hello from subdir2/subdir3/subdir4/subdir5/prog5.c
Hello from func5.c
"""
def run_builddir(prog, expect):
dir = 'relocate/builddir/Default/'
test.run(program=test.workpath(dir + prog), stdout=expect)
run_builddir('prog1', expect1)
run_builddir('prog2', expect2)
run_builddir('prog3', expect3)
run_builddir('prog4', expect4)
run_builddir('prog5', expect5)
test.pass_test()
| bsd-3-clause | -7,539,566,772,001,542,000 | 5,993,687,986,725,255,000 | 29.623377 | 79 | 0.736641 | false |
selste/micropython | tests/extmod/vfs_userfs.py | 5 | 1608 | # test VFS functionality with a user-defined filesystem
# also tests parts of uio.IOBase implementation
import sys
try:
import uio
uio.IOBase
import uos
uos.mount
except (ImportError, AttributeError):
print("SKIP")
raise SystemExit
class UserFile(uio.IOBase):
def __init__(self, data):
self.data = data
self.pos = 0
def read(self):
return self.data
def readinto(self, buf):
n = 0
while n < len(buf) and self.pos < len(self.data):
buf[n] = self.data[self.pos]
n += 1
self.pos += 1
return n
def ioctl(self, req, arg):
print("ioctl", req, arg)
return 0
class UserFS:
def __init__(self, files):
self.files = files
def mount(self, readonly, mksfs):
pass
def umount(self):
pass
def stat(self, path):
print("stat", path)
if path in self.files:
return (32768, 0, 0, 0, 0, 0, 0, 0, 0, 0)
raise OSError
def open(self, path, mode):
print("open", path, mode)
return UserFile(self.files[path])
# create and mount a user filesystem
user_files = {
"/data.txt": b"some data in a text file\n",
"/usermod1.py": b"print('in usermod1')\nimport usermod2",
"/usermod2.py": b"print('in usermod2')",
}
uos.mount(UserFS(user_files), "/userfs")
# open and read a file
f = open("/userfs/data.txt")
print(f.read())
# import files from the user filesystem
sys.path.append("/userfs")
import usermod1
# unmount and undo path addition
uos.umount("/userfs")
sys.path.pop()
| mit | 1,938,741,998,292,285,400 | -7,426,908,219,168,401,000 | 19.615385 | 61 | 0.589552 | false |
softlayer/softlayer-python | SoftLayer/CLI/vlan/list.py | 2 | 1798 | """List VLANs."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.CLI.vlan.detail import get_gateway_firewall
from SoftLayer import utils
COLUMNS = ['id',
'number',
'name',
'Gateway/Firewall',
'datacenter',
'hardware',
'virtual_servers',
'public_ips']
@click.command()
@click.option('--sortby',
help='Column to sort by',
type=click.Choice(COLUMNS))
@click.option('--datacenter', '-d',
help='Filter by datacenter shortname (sng01, dal05, ...)')
@click.option('--number', '-n', help='Filter by VLAN number')
@click.option('--name', help='Filter by VLAN name')
@click.option('--limit', '-l',
help='How many results to get in one api call, default is 100',
default=100,
show_default=True)
@environment.pass_env
def cli(env, sortby, datacenter, number, name, limit):
"""List VLANs."""
mgr = SoftLayer.NetworkManager(env.client)
table = formatting.Table(COLUMNS)
table.sortby = sortby
vlans = mgr.list_vlans(datacenter=datacenter,
vlan_number=number,
name=name,
limit=limit)
for vlan in vlans:
table.add_row([
vlan.get('id'),
vlan.get('vlanNumber'),
vlan.get('name') or formatting.blank(),
get_gateway_firewall(vlan),
utils.lookup(vlan, 'primaryRouter', 'datacenter', 'name'),
vlan.get('hardwareCount'),
vlan.get('virtualGuestCount'),
vlan.get('totalPrimaryIpAddressCount'),
])
env.fout(table)
| mit | -5,557,512,800,082,652,000 | -3,660,754,825,827,216,000 | 29.474576 | 77 | 0.571746 | false |
beernarrd/gramps | gramps/plugins/export/exportftree.py | 1 | 6827 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2006, 2008 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"Export to Web Family Tree"
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
import os
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
import collections
log = logging.getLogger(".WriteFtree")
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.utils.alive import probably_alive
from gramps.gui.plug.export import WriterOptionBox
from gramps.gui.glade import Glade
#-------------------------------------------------------------------------
#
# writeData
#
#-------------------------------------------------------------------------
def writeData(database, filename, user, option_box=None):
writer = FtreeWriter(database, filename, user, option_box)
return writer.export_data()
#-------------------------------------------------------------------------
#
# FtreeWriter
#
#-------------------------------------------------------------------------
class FtreeWriter:
def __init__(self, database, filename, user, option_box=None):
self.db = database
self.filename = filename
self.user = user
self.option_box = option_box
if isinstance(self.user.callback, collections.Callable): # callback is really callable
self.update = self.update_real
else:
self.update = self.update_empty
if option_box:
self.option_box.parse_options()
self.db = option_box.get_filtered_database(self.db)
self.plist = [x for x in self.db.iter_person_handles()]
def update_empty(self):
pass
def update_real(self):
self.count += 1
newval = int(100*self.count/self.total)
if newval != self.oldval:
self.user.callback(newval)
self.oldval = newval
def export_data(self):
name_map = {}
id_map = {}
id_name = {}
self.count = 0
self.oldval = 0
self.total = 2*len(self.plist)
for key in self.plist:
self.update()
pn = self.db.get_person_from_handle(key).get_primary_name()
sn = pn.get_surname()
items = pn.get_first_name().split()
n = ("%s %s" % (items[0], sn)) if items else sn
count = -1
if n in name_map:
count = 0
while 1:
nn = "%s%d" % (n, count)
if nn not in name_map:
break;
count += 1
name_map[nn] = key
id_map[key] = nn
else:
name_map[n] = key
id_map[key] = n
id_name[key] = get_name(pn, sn, count)
with open(self.filename, "w", encoding='utf_8') as f:
for key in self.plist:
self.update()
p = self.db.get_person_from_handle(key)
name = id_name[key]
father = mother = email = web = ""
family_handle = p.get_main_parents_family_handle()
if family_handle:
family = self.db.get_family_from_handle(family_handle)
if family.get_father_handle() and \
family.get_father_handle() in id_map:
father = id_map[family.get_father_handle()]
if family.get_mother_handle() and \
family.get_mother_handle() in id_map:
mother = id_map[family.get_mother_handle()]
#
# Calculate Date
#
birth_ref = p.get_birth_ref()
death_ref = p.get_death_ref()
if birth_ref:
birth_event = self.db.get_event_from_handle(birth_ref.ref)
birth = birth_event.get_date_object()
else:
birth = None
if death_ref:
death_event = self.db.get_event_from_handle(death_ref.ref)
death = death_event.get_date_object()
else:
death = None
#if self.restrict:
# alive = probably_alive(p, self.db)
#else:
# alive = 0
if birth:
if death:
dates = "%s-%s" % (fdate(birth), fdate(death))
else:
dates = fdate(birth)
else:
if death:
dates = fdate(death)
else:
dates = ""
f.write('%s;%s;%s;%s;%s;%s\n' % (name, father, mother, email, web,
dates))
return True
def fdate(val):
if val.get_year_valid():
if val.get_month_valid():
if val.get_day_valid():
return "%d/%d/%d" % (val.get_day(), val.get_month(),
val.get_year())
else:
return "%d/%d" % (val.get_month(), val.get_year())
else:
return "%d" % val.get_year()
else:
return ""
def get_name(name, surname, count):
"""returns a name string built from the components of the Name
instance, in the form of Firstname Surname"""
return (name.first_name + ' ' +
surname +
(str(count) if count != -1 else '') +
(', ' +name.suffix if name.suffix else '')
)
| gpl-2.0 | -4,585,591,831,898,377,700 | 2,333,087,642,256,603,600 | 33.135 | 94 | 0.461989 | false |
atodorov/lorax | src/composer/cli/cmdline.py | 5 | 2346 | #
# Copyright (C) 2018 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import argparse
from composer import vernum
from composer.cli.help import epilog
VERSION = "{0}-{1}".format(os.path.basename(sys.argv[0]), vernum)
def composer_cli_parser():
""" Return the ArgumentParser for composer-cli"""
parser = argparse.ArgumentParser(description="Lorax Composer commandline tool",
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
fromfile_prefix_chars="@")
parser.add_argument("-j", "--json", action="store_true", default=False,
help="Output the raw JSON response instead of the normal output.")
parser.add_argument("-s", "--socket", default="/run/weldr/api.socket", metavar="SOCKET",
help="Path to the socket file to listen on")
parser.add_argument("--log", dest="logfile", default=None, metavar="LOG",
help="Path to logfile (./composer-cli.log)")
parser.add_argument("-a", "--api", dest="api_version", default="1", metavar="APIVER",
help="API Version to use")
parser.add_argument("--test", dest="testmode", default=0, type=int, metavar="TESTMODE",
help="Pass test mode to compose. 1=Mock compose with fail. 2=Mock compose with finished.")
parser.add_argument("-V", action="store_true", dest="showver",
help="show program's version number and exit")
# Commands are implemented by parsing the remaining arguments outside of argparse
parser.add_argument('args', nargs=argparse.REMAINDER)
return parser
| gpl-2.0 | 1,723,936,240,566,254,600 | 3,105,417,702,177,637,400 | 45.92 | 114 | 0.6526 | false |
rschnapka/odoo | addons/mail/tests/test_message_read.py | 57 | 14622 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.tests.test_mail_base import TestMailBase
class test_mail_access_rights(TestMailBase):
def test_00_message_read(self):
""" Tests for message_read and expandables. """
cr, uid, user_admin, user_raoul, group_pigs = self.cr, self.uid, self.user_admin, self.user_raoul, self.group_pigs
self.mail_group.message_subscribe_users(cr, uid, [group_pigs.id], [user_raoul.id])
pigs_domain = [('model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id)]
# Data: create a discussion in Pigs (3 threads, with respectively 0, 4 and 4 answers)
msg_id0 = self.group_pigs.message_post(body='0', subtype='mt_comment')
msg_id1 = self.group_pigs.message_post(body='1', subtype='mt_comment')
msg_id2 = self.group_pigs.message_post(body='2', subtype='mt_comment')
msg_id3 = self.group_pigs.message_post(body='1-1', subtype='mt_comment', parent_id=msg_id1)
msg_id4 = self.group_pigs.message_post(body='2-1', subtype='mt_comment', parent_id=msg_id2)
msg_id5 = self.group_pigs.message_post(body='1-2', subtype='mt_comment', parent_id=msg_id1)
msg_id6 = self.group_pigs.message_post(body='2-2', subtype='mt_comment', parent_id=msg_id2)
msg_id7 = self.group_pigs.message_post(body='1-1-1', subtype='mt_comment', parent_id=msg_id3)
msg_id8 = self.group_pigs.message_post(body='2-1-1', subtype='mt_comment', parent_id=msg_id4)
msg_id9 = self.group_pigs.message_post(body='1-1-1', subtype='mt_comment', parent_id=msg_id3)
msg_id10 = self.group_pigs.message_post(body='2-1-1', subtype='mt_comment', parent_id=msg_id4)
msg_ids = [msg_id10, msg_id9, msg_id8, msg_id7, msg_id6, msg_id5, msg_id4, msg_id3, msg_id2, msg_id1, msg_id0]
ordered_msg_ids = [msg_id2, msg_id4, msg_id6, msg_id8, msg_id10, msg_id1, msg_id3, msg_id5, msg_id7, msg_id9, msg_id0]
# Test: raoul received notifications
raoul_notification_ids = self.mail_notification.search(cr, user_raoul.id, [('read', '=', False), ('message_id', 'in', msg_ids), ('partner_id', '=', user_raoul.partner_id.id)])
self.assertEqual(len(raoul_notification_ids), 11, 'message_post: wrong number of produced notifications')
# Test: read some specific ids
read_msg_list = self.mail_message.message_read(cr, user_raoul.id, ids=msg_ids[2:4], domain=[('body', 'like', 'dummy')], context={'mail_read_set_read': True})
read_msg_ids = [msg.get('id') for msg in read_msg_list]
self.assertEqual(msg_ids[2:4], read_msg_ids, 'message_read with direct ids should read only the requested ids')
# Test: read messages of Pigs through a domain, being thread or not threaded
read_msg_list = self.mail_message.message_read(cr, user_raoul.id, domain=pigs_domain, limit=200)
read_msg_ids = [msg.get('id') for msg in read_msg_list]
self.assertEqual(msg_ids, read_msg_ids, 'message_read flat with domain on Pigs should equal all messages of Pigs')
read_msg_list = self.mail_message.message_read(cr, user_raoul.id, domain=pigs_domain, limit=200, thread_level=1)
read_msg_ids = [msg.get('id') for msg in read_msg_list]
self.assertEqual(ordered_msg_ids, read_msg_ids,
'message_read threaded with domain on Pigs should equal all messages of Pigs, and sort them with newer thread first, last message last in thread')
# ----------------------------------------
# CASE1: message_read with domain, threaded
# We simulate an entire flow, using the expandables to test them
# ----------------------------------------
# Do: read last message, threaded
read_msg_list = self.mail_message.message_read(cr, uid, domain=pigs_domain, limit=1, thread_level=1)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# TDE TODO: test expandables order
type_list = map(lambda item: item.get('type'), read_msg_list)
# Test: structure content, ancestor is added to the read messages, ordered by id, ancestor is set, 2 expandables
self.assertEqual(len(read_msg_list), 4, 'message_read on last Pigs message should return 2 messages and 2 expandables')
self.assertEqual(set([msg_id2, msg_id10]), set(read_msg_ids), 'message_read on the last Pigs message should also get its parent')
self.assertEqual(read_msg_list[1].get('parent_id'), read_msg_list[0].get('id'), 'message_read should set the ancestor to the thread header')
# Data: get expandables
new_threads_exp, new_msg_exp = None, None
for msg in read_msg_list:
if msg.get('type') == 'expandable' and msg.get('nb_messages') == -1 and msg.get('max_limit'):
new_threads_exp = msg
elif msg.get('type') == 'expandable':
new_msg_exp = msg
# Do: fetch new messages in first thread, domain from expandable
self.assertIsNotNone(new_msg_exp, 'message_read on last Pigs message should have returned a new messages expandable')
domain = new_msg_exp.get('domain', [])
# Test: expandable, conditions in domain
self.assertIn(('id', 'child_of', msg_id2), domain, 'new messages expandable domain should contain a child_of condition')
self.assertIn(('id', '>=', msg_id4), domain, 'new messages expandable domain should contain an id greater than condition')
self.assertIn(('id', '<=', msg_id8), domain, 'new messages expandable domain should contain an id less than condition')
self.assertEqual(new_msg_exp.get('parent_id'), msg_id2, 'new messages expandable should have parent_id set to the thread header')
# Do: message_read with domain, thread_level=0, parent_id=msg_id2 (should be imposed by JS), 2 messages
read_msg_list = self.mail_message.message_read(cr, uid, domain=domain, limit=2, thread_level=0, parent_id=msg_id2)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
new_msg_exp = [msg for msg in read_msg_list if msg.get('type') == 'expandable'][0]
# Test: structure content, 2 messages and 1 thread expandable
self.assertEqual(len(read_msg_list), 3, 'message_read in Pigs thread should return 2 messages and 1 expandables')
self.assertEqual(set([msg_id6, msg_id8]), set(read_msg_ids), 'message_read in Pigs thread should return 2 more previous messages in thread')
# Do: read the last message
read_msg_list = self.mail_message.message_read(cr, uid, domain=new_msg_exp.get('domain'), limit=2, thread_level=0, parent_id=msg_id2)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: structure content, 1 message
self.assertEqual(len(read_msg_list), 1, 'message_read in Pigs thread should return 1 message')
self.assertEqual(set([msg_id4]), set(read_msg_ids), 'message_read in Pigs thread should return the last message in thread')
# Do: fetch a new thread, domain from expandable
self.assertIsNotNone(new_threads_exp, 'message_read on last Pigs message should have returned a new threads expandable')
domain = new_threads_exp.get('domain', [])
# Test: expandable, conditions in domain
for condition in pigs_domain:
self.assertIn(condition, domain, 'new threads expandable domain should contain the message_read domain parameter')
self.assertFalse(new_threads_exp.get('parent_id'), 'new threads expandable should not have an parent_id')
# Do: message_read with domain, thread_level=1 (should be imposed by JS)
read_msg_list = self.mail_message.message_read(cr, uid, domain=domain, limit=1, thread_level=1)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: structure content, ancestor is added to the read messages, ordered by id, ancestor is set, 2 expandables
self.assertEqual(len(read_msg_list), 4, 'message_read on Pigs should return 2 messages and 2 expandables')
self.assertEqual(set([msg_id1, msg_id9]), set(read_msg_ids), 'message_read on a Pigs message should also get its parent')
self.assertEqual(read_msg_list[1].get('parent_id'), read_msg_list[0].get('id'), 'message_read should set the ancestor to the thread header')
# Data: get expandables
new_threads_exp, new_msg_exp = None, None
for msg in read_msg_list:
if msg.get('type') == 'expandable' and msg.get('nb_messages') == -1 and msg.get('max_limit'):
new_threads_exp = msg
elif msg.get('type') == 'expandable':
new_msg_exp = msg
# Do: fetch new messages in second thread, domain from expandable
self.assertIsNotNone(new_msg_exp, 'message_read on Pigs message should have returned a new messages expandable')
domain = new_msg_exp.get('domain', [])
# Test: expandable, conditions in domain
self.assertIn(('id', 'child_of', msg_id1), domain, 'new messages expandable domain should contain a child_of condition')
self.assertIn(('id', '>=', msg_id3), domain, 'new messages expandable domain should contain an id greater than condition')
self.assertIn(('id', '<=', msg_id7), domain, 'new messages expandable domain should contain an id less than condition')
self.assertEqual(new_msg_exp.get('parent_id'), msg_id1, 'new messages expandable should have ancestor_id set to the thread header')
# Do: message_read with domain, thread_level=0, parent_id=msg_id1 (should be imposed by JS)
read_msg_list = self.mail_message.message_read(cr, uid, domain=domain, limit=200, thread_level=0, parent_id=msg_id1)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: other message in thread have been fetch
self.assertEqual(set([msg_id3, msg_id5, msg_id7]), set(read_msg_ids), 'message_read on the last Pigs message should also get its parent')
# Test: fetch a new thread, domain from expandable
self.assertIsNotNone(new_threads_exp, 'message_read should have returned a new threads expandable')
domain = new_threads_exp.get('domain', [])
# Test: expandable, conditions in domain
for condition in pigs_domain:
self.assertIn(condition, domain, 'general expandable domain should contain the message_read domain parameter')
# Do: message_read with domain, thread_level=1 (should be imposed by JS)
read_msg_list = self.mail_message.message_read(cr, uid, domain=domain, limit=1, thread_level=1)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: structure content, ancestor is added to the read messages, ordered by id, ancestor is set, 2 expandables
self.assertEqual(len(read_msg_list), 1, 'message_read on Pigs should return 1 message because everything else has been fetched')
self.assertEqual([msg_id0], read_msg_ids, 'message_read after 2 More should return only 1 last message')
# ----------------------------------------
# CASE2: message_read with domain, flat
# ----------------------------------------
# Do: read 2 lasts message, flat
read_msg_list = self.mail_message.message_read(cr, uid, domain=pigs_domain, limit=2, thread_level=0)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: structure content, ancestor is added to the read messages, ordered by id, ancestor is not set, 1 expandable
self.assertEqual(len(read_msg_list), 3, 'message_read on last Pigs message should return 2 messages and 1 expandable')
self.assertEqual(set([msg_id9, msg_id10]), set(read_msg_ids), 'message_read flat on Pigs last messages should only return those messages')
self.assertFalse(read_msg_list[0].get('parent_id'), 'message_read flat should set the ancestor as False')
self.assertFalse(read_msg_list[1].get('parent_id'), 'message_read flat should set the ancestor as False')
# Data: get expandables
new_threads_exp, new_msg_exp = None, None
for msg in read_msg_list:
if msg.get('type') == 'expandable' and msg.get('nb_messages') == -1 and msg.get('max_limit'):
new_threads_exp = msg
# Do: fetch new messages, domain from expandable
self.assertIsNotNone(new_threads_exp, 'message_read flat on the 2 last Pigs messages should have returns a new threads expandable')
domain = new_threads_exp.get('domain', [])
# Test: expandable, conditions in domain
for condition in pigs_domain:
self.assertIn(condition, domain, 'new threads expandable domain should contain the message_read domain parameter')
# Do: message_read with domain, thread_level=0 (should be imposed by JS)
read_msg_list = self.mail_message.message_read(cr, uid, domain=domain, limit=20, thread_level=0)
read_msg_ids = [msg.get('id') for msg in read_msg_list if msg.get('type') != 'expandable']
# Test: structure content, ancestor is added to the read messages, ordered by id, ancestor is set, 2 expandables
self.assertEqual(len(read_msg_list), 9, 'message_read on Pigs should return 9 messages and 0 expandable')
self.assertEqual([msg_id8, msg_id7, msg_id6, msg_id5, msg_id4, msg_id3, msg_id2, msg_id1, msg_id0], read_msg_ids,
'message_read, More on flat, should return all remaning messages')
| agpl-3.0 | 2,513,105,131,443,912,000 | 1,661,430,212,350,333,400 | 76.365079 | 183 | 0.655314 | false |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/pip/_vendor/progress/__init__.py | 916 | 3023 | # Copyright (c) 2012 Giorgos Verigakis <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division
from collections import deque
from datetime import timedelta
from math import ceil
from sys import stderr
from time import time
__version__ = '1.2'
class Infinite(object):
file = stderr
sma_window = 10
def __init__(self, *args, **kwargs):
self.index = 0
self.start_ts = time()
self._ts = self.start_ts
self._dt = deque(maxlen=self.sma_window)
for key, val in kwargs.items():
setattr(self, key, val)
def __getitem__(self, key):
if key.startswith('_'):
return None
return getattr(self, key, None)
@property
def avg(self):
return sum(self._dt) / len(self._dt) if self._dt else 0
@property
def elapsed(self):
return int(time() - self.start_ts)
@property
def elapsed_td(self):
return timedelta(seconds=self.elapsed)
def update(self):
pass
def start(self):
pass
def finish(self):
pass
def next(self, n=1):
if n > 0:
now = time()
dt = (now - self._ts) / n
self._dt.append(dt)
self._ts = now
self.index = self.index + n
self.update()
def iter(self, it):
for x in it:
yield x
self.next()
self.finish()
class Progress(Infinite):
def __init__(self, *args, **kwargs):
super(Progress, self).__init__(*args, **kwargs)
self.max = kwargs.get('max', 100)
@property
def eta(self):
return int(ceil(self.avg * self.remaining))
@property
def eta_td(self):
return timedelta(seconds=self.eta)
@property
def percent(self):
return self.progress * 100
@property
def progress(self):
return min(1, self.index / self.max)
@property
def remaining(self):
return max(self.max - self.index, 0)
def start(self):
self.update()
def goto(self, index):
incr = index - self.index
self.next(incr)
def iter(self, it):
try:
self.max = len(it)
except TypeError:
pass
for x in it:
yield x
self.next()
self.finish()
| gpl-3.0 | 129,264,561,148,038,290 | -3,719,728,530,700,048,400 | 23.577236 | 74 | 0.600397 | false |
Ayub-Khan/edx-platform | lms/djangoapps/lti_provider/signature_validator.py | 129 | 8935 | """
Subclass of oauthlib's RequestValidator that checks an OAuth signature.
"""
from oauthlib.oauth1 import SignatureOnlyEndpoint
from oauthlib.oauth1 import RequestValidator
class SignatureValidator(RequestValidator):
"""
Helper class that verifies the OAuth signature on a request.
The pattern required by the oauthlib library mandates that subclasses of
RequestValidator contain instance methods that can be called back into in
order to fetch the consumer secret or to check that fields conform to
application-specific requirements.
"""
def __init__(self, lti_consumer):
super(SignatureValidator, self).__init__()
self.endpoint = SignatureOnlyEndpoint(self)
self.lti_consumer = lti_consumer
# The OAuth signature uses the endpoint URL as part of the request to be
# hashed. By default, the oauthlib library rejects any URLs that do not
# use HTTPS. We turn this behavior off in order to allow edX to run without
# SSL in development mode. When the platform is deployed and running with
# SSL enabled, the URL passed to the signature verifier must start with
# 'https', otherwise the message signature would not match the one generated
# on the platform.
enforce_ssl = False
def check_client_key(self, key):
"""
Verify that the key supplied by the LTI consumer is valid for an LTI
launch. This method is only concerned with the structure of the key;
whether the key is associated with a known LTI consumer is checked in
validate_client_key. This method signature is required by the oauthlib
library.
:return: True if the client key is valid, or False if it is not.
"""
return key is not None and 0 < len(key) <= 32
def check_nonce(self, nonce):
"""
Verify that the nonce value that accompanies the OAuth signature is
valid. This method is concerned only with the structure of the nonce;
the validate_timestamp_and_nonce method will check that the nonce has
not been used within the specified time frame. This method signature is
required by the oauthlib library.
:return: True if the OAuth nonce is valid, or False if it is not.
"""
return nonce is not None and 0 < len(nonce) <= 64
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request, request_token=None,
access_token=None):
"""
Verify that the request is not too old (according to the timestamp), and
that the nonce value has not been used already within the period of time
in which the timestamp marks a request as valid. This method signature
is required by the oauthlib library.
:return: True if the OAuth nonce and timestamp are valid, False if they
are not.
"""
return True
def validate_client_key(self, client_key, request):
"""
Ensure that the client key supplied with the LTI launch is on that has
been generated by our platform, and that it has an associated client
secret.
:return: True if the key is valid, False if it is not.
"""
return self.lti_consumer.consumer_key == client_key
def get_client_secret(self, client_key, request):
"""
Fetch the client secret from the database. This method signature is
required by the oauthlib library.
:return: the client secret that corresponds to the supplied key if
present, or None if the key does not exist in the database.
"""
return self.lti_consumer.consumer_secret
def verify(self, request):
"""
Check the OAuth signature on a request. This method uses the
SignatureEndpoint class in the oauthlib library that in turn calls back
to the other methods in this class.
:param request: the HttpRequest object to be verified
:return: True if the signature matches, False if it does not.
"""
method = unicode(request.method)
url = request.build_absolute_uri()
body = request.body
# The oauthlib library assumes that headers are passed directly from the
# request, but Django mangles them into its own format. The only header
# that the library requires (for now) is 'Content-Type', so we
# reconstruct just that one.
headers = {"Content-Type": request.META['CONTENT_TYPE']}
result, __ = self.endpoint.validate_request(url, method, body, headers)
return result
def get_request_token_secret(self, client_key, token, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def get_redirect_uri(self, token, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def get_realms(self, token, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def invalidate_request_token(self, client_key, request_token, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def get_rsa_key(self, client_key, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def dummy_access_token(self):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def dummy_client(self):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def verify_realms(self, token, realms, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def validate_realms(self, client_key, token, request, uri=None,
realms=None):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def save_verifier(self, token, verifier, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def dummy_request_token(self):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def validate_redirect_uri(self, client_key, redirect_uri, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def verify_request_token(self, token, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def validate_request_token(self, client_key, token, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def get_default_realms(self, client_key, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def validate_access_token(self, client_key, token, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def save_access_token(self, token, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def validate_requested_realms(self, client_key, realms, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def validate_verifier(self, client_key, token, verifier, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def save_request_token(self, token, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
def get_access_token_secret(self, client_key, token, request):
"""
Unused abstract method from super class. See documentation in RequestValidator
"""
raise NotImplementedError
| agpl-3.0 | 597,931,440,358,882,000 | -4,138,417,224,070,406,700 | 36.542017 | 86 | 0.658198 | false |
iCarto/siga | extScripting/scripts/jython/console/jintrospect.py | 1 | 5168 | """Extend introspect.py for Java based Jython classes."""
from introspect import *
import string
__author__ = "Don Coleman <[email protected]>"
__cvsid__ = "$Id$"
def getAutoCompleteList(command='', locals=None, includeMagic=1,
includeSingle=1, includeDouble=1):
"""Return list of auto-completion options for command.
The list of options will be based on the locals namespace."""
attributes = []
# Get the proper chunk of code from the command.
root = getRoot(command, terminator='.')
try:
if locals is not None:
object = eval(root, locals)
else:
object = eval(root)
except:
return attributes
if ispython(object):
# use existing code
attributes = getAttributeNames(object, includeMagic, includeSingle, includeDouble)
else:
methods = methodsOf(object.__class__)
attributes = [eachMethod.__name__ for eachMethod in methods]
return attributes
def methodsOf(clazz):
"""return a list of all the methods in a class"""
classMembers = vars(clazz).values()
methods = [eachMember for eachMember in classMembers if callable(eachMember)]
for eachBase in clazz.__bases__:
methods.extend(methodsOf(eachBase))
return methods
def getCallTipJava(command='', locals=None):
"""For a command, return a tuple of object name, argspec, tip text.
The call tip information will be based on the locals namespace."""
calltip = ('', '', '') # object name, argspec, tip text.
# Get the proper chunk of code from the command.
root = getRoot(command, terminator='(')
try:
if locals is not None:
object = eval(root, locals)
else:
object = eval(root)
except:
return calltip
if ispython(object):
# Patrick's code handles python code
# TODO fix in future because getCallTip runs eval() again
return getCallTip(command, locals)
name = ''
try:
name = object.__name__
except AttributeError:
pass
tipList = []
argspec = '' # not using argspec for Java
if inspect.isbuiltin(object):
# inspect.isbuiltin() fails for Jython
# Can we get the argspec for Jython builtins? We can't in Python.
pass
elif inspect.isclass(object):
# get the constructor(s)
# TODO consider getting modifiers since jython can access private methods
constructors = object.getConstructors()
for constructor in constructors:
paramList = []
paramTypes = constructor.getParameterTypes()
# paramTypes is an array of classes, we need Strings
# TODO consider list comprehension
for param in paramTypes:
# TODO translate [B to byte[], [C to char[] etc
paramList.append(param.__name__)
paramString = string.join(paramList,', ')
tip = "%s(%s)" % (constructor.name, paramString)
tipList.append(tip)
elif inspect.ismethod(object):
method = object
object = method.im_class
# java allows overloading so we may have more than one method
methodArray = object.getMethods()
for eachMethod in methodArray:
if eachMethod.name == method.__name__:
paramList = []
for eachParam in eachMethod.parameterTypes:
paramList.append(eachParam.__name__)
paramString = string.join(paramList,', ')
# create a python style string a la PyCrust
# we're showing the parameter type rather than the parameter name, since that's all I can get
# we need to show multiple methods for overloading
# TODO improve message format
# do we want to show the method visibility
# how about exceptions?
# note: name, return type and exceptions same for EVERY overload method
tip = "%s(%s) -> %s" % (eachMethod.name, paramString, eachMethod.returnType)
tipList.append(tip)
# else:
# print "Not a java class :("
calltip = (name, argspec, string.join(tipList,"\n"))
return calltip
def ispython(object):
"""
Figure out if this is Python code or Java Code
"""
pyclass = 0
pycode = 0
pyinstance = 0
if inspect.isclass(object):
try:
object.__doc__
pyclass = 1
except AttributeError:
pyclass = 0
elif inspect.ismethod(object):
try:
object.__dict__
pycode = 1
except AttributeError:
pycode = 0
else: # I guess an instance of an object falls here
try:
object.__dict__
pyinstance = 1
except AttributeError:
pyinstance = 0
# print "object", object, "pyclass", pyclass, "pycode", pycode, "returning", pyclass | pycode
return pyclass | pycode | pyinstance
| gpl-3.0 | 8,362,608,389,322,583,000 | 8,075,200,569,423,401,000 | 31.708861 | 109 | 0.583978 | false |
edunham/toys | utilities/packingblocks.py | 1 | 2505 | #! /usr/bin/env python
# From IRC:
#
# "I was thinking about a toy idea for my kid to teach multiplication through
# area representation. 2x3 is a two-inch-by-three-inch slab of something with
# lines on it, etc. I'd need 45 pieces (since AxB = BxA, you can drop almost
# half) but if I wanted to put it away in almost equal 9x9 layers, how many
# layers would be required?"
# Let's draw a picture. We have a times table, a square from 1 to 9 each side,
# but a bunch of blocks are duplicates so I will X them out because we don't
# need to make them:
# 123456789
# 1 XXXXXXXX
# 2 XXXXXXX
# 3 XXXXXX
# 4 XXXXX
# 5 XXXX
# 6 XXX
# 7 XX
# 8 X
# 9
# First off I wanted to know if there's any hope of packing with no gaps. So I
# find the volume of units that it'll all take up. The function row() tells me
# the total area of the pieces in each row -- for row 3, I have a 3x1 piece, a
# 3x2 piece, and a 3x3 piece, so the total area is 18 units.
def row(end):
sum = 0
for i in range(1,end+1):
sum += end * i
return sum
# So to get the total volume of a set of times-table blocks going up to n (n has
# been 9 so far) I'll express which rows I have -- range(1,n+1) -- and sum up
# all their areas. Note that area of them all spread out, and volume, are
# synonymous here since I'm assuming they're 1 unit thick. This may come in
# handy later so I can put the blocks away making the best use of the 3d box,
# like if some go in vertically while others are horizontal. Again, here I'm
# just looking for a set size and box size that have a **chance** of packing
# into a box with a square footprint.
def math_toy_volume(n):
return sum(map(row, range(1,n+1)))
# I happen to know from the original problem that the set had 45 pieces. If I
# try other set sizes, though, I would also like to know how many pieces they
# have. Easy, but easier to name it.
def math_toy_pieces(n):
return sum(range(1,n+1))
# Anyways I want the ones that have any hope of packing into a square box so I
# need to get the factors of the area and then find dups in the list of factors.
# From https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python
# I get:
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
for i in range(1,21):
n = math_toy_volume(i)
print str(n) + "\t" + str(sorted(factors(n)))
| mit | -8,239,597,696,741,076,000 | -4,203,233,218,646,034,400 | 35.838235 | 130 | 0.678643 | false |
fishscene/streamlink | src/streamlink/plugins/disney_de.py | 3 | 1069 | """Plugin for Disney (Channel) Germany
Supports:
- http://video.disney.de/sehen/*
- http://disneychannel.de/sehen/*
- http://disneychannel.de/livestream
"""
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
from streamlink.plugin.api.utils import parse_json
from streamlink.stream import HLSStream
_url_re = re.compile("http(s)?://(\w+\.)?disney(channel)?.de/")
# stream urls are in `Grill.burger`->stack->data->externals->data
_stream_hls_re = re.compile("\"hlsStreamUrl\":\s*(\"[^\"]+\")")
_stream_data_re = re.compile("\"dataUrl\":\s*(\"[^\"]+\")")
class DisneyDE(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
res = http.get(self.url)
match = (_stream_hls_re.search(res.text) or
_stream_data_re.search(res.text))
if not match:
return
stream_url = parse_json(match.group(1))
return HLSStream.parse_variant_playlist(self.session, stream_url)
__plugin__ = DisneyDE
| bsd-2-clause | -9,051,360,931,568,585,000 | 6,318,381,686,796,126,000 | 26.410256 | 73 | 0.637979 | false |
dsajkl/123 | common/djangoapps/student/management/tests/test_transfer_students.py | 2 | 2501 | """
Tests the transfer student management command
"""
from django.conf import settings
from opaque_keys.edx import locator
import unittest
import ddt
from student.management.commands import transfer_students
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@ddt.ddt
class TestTransferStudents(ModuleStoreTestCase):
"""Tests for transferring students between courses."""
PASSWORD = 'test'
def test_transfer_students(self):
student = UserFactory()
student.set_password(self.PASSWORD) # pylint: disable=E1101
student.save() # pylint: disable=E1101
# Original Course
original_course_location = locator.CourseLocator('Org0', 'Course0', 'Run0')
course = self._create_course(original_course_location)
# Enroll the student in 'verified'
CourseEnrollment.enroll(student, course.id, mode="verified")
# New Course 1
course_location_one = locator.CourseLocator('Org1', 'Course1', 'Run1')
new_course_one = self._create_course(course_location_one)
# New Course 2
course_location_two = locator.CourseLocator('Org2', 'Course2', 'Run2')
new_course_two = self._create_course(course_location_two)
original_key = unicode(course.id)
new_key_one = unicode(new_course_one.id)
new_key_two = unicode(new_course_two.id)
# Run the actual management command
transfer_students.Command().handle(
source_course=original_key, dest_course_list=new_key_one + "," + new_key_two
)
# Confirm the enrollment mode is verified on the new courses, and enrollment is enabled as appropriate.
self.assertEquals(('verified', False), CourseEnrollment.enrollment_mode_for_user(student, course.id))
self.assertEquals(('verified', True), CourseEnrollment.enrollment_mode_for_user(student, new_course_one.id))
self.assertEquals(('verified', True), CourseEnrollment.enrollment_mode_for_user(student, new_course_two.id))
def _create_course(self, course_location):
""" Creates a course """
return CourseFactory.create(
org=course_location.org,
number=course_location.course,
run=course_location.run
)
| agpl-3.0 | -3,525,813,325,734,618,600 | -1,511,127,269,773,817,000 | 40.683333 | 116 | 0.694522 | false |
edx/lettuce | tests/integration/lib/Django-1.3/tests/modeltests/m2m_intermediary/tests.py | 92 | 1251 | from datetime import datetime
from django.test import TestCase
from models import Reporter, Article, Writer
class M2MIntermediaryTests(TestCase):
def test_intermeiary(self):
r1 = Reporter.objects.create(first_name="John", last_name="Smith")
r2 = Reporter.objects.create(first_name="Jane", last_name="Doe")
a = Article.objects.create(
headline="This is a test", pub_date=datetime(2005, 7, 27)
)
w1 = Writer.objects.create(reporter=r1, article=a, position="Main writer")
w2 = Writer.objects.create(reporter=r2, article=a, position="Contributor")
self.assertQuerysetEqual(
a.writer_set.select_related().order_by("-position"), [
("John Smith", "Main writer"),
("Jane Doe", "Contributor"),
],
lambda w: (unicode(w.reporter), w.position)
)
self.assertEqual(w1.reporter, r1)
self.assertEqual(w2.reporter, r2)
self.assertEqual(w1.article, a)
self.assertEqual(w2.article, a)
self.assertQuerysetEqual(
r1.writer_set.all(), [
("John Smith", "Main writer")
],
lambda w: (unicode(w.reporter), w.position)
)
| gpl-3.0 | -8,055,039,708,409,524,000 | -1,769,624,548,870,019,800 | 31.921053 | 82 | 0.592326 | false |
ddico/sale-workflow | sale_order_merge/models/sale_order_merge.py | 9 | 8093 | # coding: utf-8
# Copyright 2016 Opener B.V. - Stefan Rijnhart
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import api, fields, models
from openerp.exceptions import Warning as UserError
from openerp.tools.translate import _
class SaleOrderMerge(models.TransientModel):
_name = 'sale.order.merge'
_description = 'Merge sale orders'
sale_order = fields.Many2one(
'sale.order', 'Merge into', required=True, readonly=True)
mergeable = fields.Many2many(
comodel_name='sale.order',
related='sale_order.merge_with')
to_merge = fields.Many2many(
'sale.order', 'rel_sale_to_merge', 'sale_id', 'to_merge_id',
'Orders to merge')
@api.multi
def merge_order_lines(self):
self.sale_order.write({
'order_line': [
(4, line.id)
for line in self.to_merge.mapped('order_line')
]})
@api.multi
def merge_invoices(self):
""" Merge all draft invoices. For prepaid orders, the payment
of the original invoice is leading to start the procurement, but
there may still be other confirmed invoices. """
target = self.env['account.invoice']
other_inv = self.env['account.invoice']
keep_inv = self.env['account.invoice']
for invoice in (
self.sale_order.invoice_ids +
self.to_merge.mapped('invoice_ids')):
if invoice.state == 'draft' and not invoice.internal_number:
if target:
other_inv += invoice
else:
target = invoice
else:
keep_inv += invoice
if target:
other_inv.mapped('invoice_line').write({'invoice_id': target.id})
other_inv.mapped('tax_line').write({'invoice_id': target.id})
other_inv.unlink()
target.button_compute(set_total=True)
for inv in target + keep_inv:
self.sale_order.write({'invoice_ids': [(4, inv.id)]})
self.to_merge.write({'invoice_ids': [(6, 0, [])]})
@api.multi
def _picking_can_merge(self, picking):
return (picking.state not in ('done', 'cancel') and
picking.location_dest_id.usage == 'customer')
@api.multi
def _get_picking_map_key(self, picking):
return (picking.picking_type_id, picking.location_id,
picking.location_dest_id, picking.partner_id)
@api.multi
def merge_pickings(self):
""" Assign all pickings to the target sale order and merge any
pending pickings """
orders = self.sale_order + self.to_merge
group = self.env['procurement.group']
if self.sale_order.procurement_group_id:
group = self.sale_order.procurement_group_id
else:
for order in self.to_merge:
if order.procurement_group_id:
group = order.procurement_group_id
break
else:
return # no group, no pickings
self.sale_order.write({'procurement_group_id': group.id})
other_groups = orders.mapped('procurement_group_id')
self.env['stock.picking'].search(
[('group_id', 'in', other_groups.ids)]).write(
{'group_id': group.id})
self.env['stock.move'].search(
[('group_id', 'in', other_groups.ids)]).write(
{'group_id': group.id})
self.env['procurement.order'].search(
[('group_id', 'in', other_groups.ids)]).write(
{'group_id': group.id})
pick_map = {}
for picking in self.sale_order.picking_ids:
if self._picking_can_merge(picking):
key = self._get_picking_map_key(picking)
if key not in pick_map:
pick_map[key] = self.env['stock.picking']
pick_map[key] += picking
else:
picking.write({'origin': group.name})
for pickings in pick_map.values():
target = pickings[0]
if len(pickings) > 1:
pickings -= target
pickings.mapped('move_lines').write({'picking_id': target.id})
pickings.unlink()
target.write({'origin': group.name})
return True
@api.multi
def open_sale(self):
self.ensure_one()
return {
'name': _('Merged sale order'),
'view_type': 'form',
'view_mode': 'form',
'res_id': self.sale_order.id,
'res_model': 'sale.order',
'type': 'ir.actions.act_window',
}
@api.multi
def merge(self):
"""
If not all orders have the same policy:
If not all confirmed orders have the same policy:
raise
Set the policy to the policy of the confirmed order(s)
If there is one confirmed order, confirm all other orders. For
prepaid orders, this will generate draft invoices.
"""
self.ensure_one()
orders = self.sale_order + self.to_merge
create_picking = False
reset_wait_invoice = False
if not all(order.state in ('sent', 'draft') for order in orders):
# Propagate the order policy from the confirmed to the draft
# orders, as they may be different.
drafts = orders.filtered(
lambda o: o.state in ('sent', 'draft'))
confirmed = orders - drafts
order_policy = confirmed[0].order_policy
if not all(o.order_policy == order_policy for o in confirmed):
raise UserError(
_('Cannot merge these orders because their order '
'policies cannot be reconciled.'))
# Flag if the main order's workflow needs to be tickled after
# merging if it already has passed the point of picking or invoice
# generation
if (order_policy == 'prepaid' and
self.sale_order.picking_ids):
create_picking = True
if (order_policy == 'manual' and
self.sale_order.state == 'progress' and
(drafts or confirmed.filtered(
lambda o: o.state == 'manual'))):
reset_wait_invoice = True
# Propagate order policy across draft orders
drafts.filtered(
lambda o: o.order_policy != order_policy).write(
{'order_policy': order_policy})
for draft in drafts:
# confirm orders to align state and create invoices
# and/or pickings
draft.action_button_confirm()
self.merge_invoices()
self.merge_pickings()
self.merge_order_lines()
self.to_merge.delete_workflow()
self.to_merge.create_workflow()
self.to_merge.signal_workflow('cancel')
if create_picking:
self.sale_order.action_ship_create()
if reset_wait_invoice:
item = self.env['workflow.workitem'].sudo().search(
[('act_id', 'in', (
self.env.ref('sale.act_invoice_end').id,
self.env.ref('sale.act_invoice').id)),
('inst_id.res_id', '=', self.sale_order.id)])
if item:
item_vals = {
'act_id': self.env.ref('sale.act_wait_invoice').id}
if item.subflow_id:
item_vals['subflow_id'] = False
if item.state == 'running':
item_vals['state'] = 'active'
item.write(item_vals)
self.sale_order.write({'state': 'manual'})
for order in self.to_merge:
order.message_post(_('Merged into %s') % self.sale_order.name)
self.sale_order.message_post(
_('Order(s) %s merged into this one') % ','.join(
self.to_merge.mapped('name')))
return self.open_sale()
| agpl-3.0 | 7,830,384,469,672,003,000 | 2,592,434,751,921,001,500 | 39.263682 | 78 | 0.538984 | false |
dangillet/cocos | cocos/layer/scrolling.py | 2 | 16208 | # ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2015 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""This module defines the ScrollableLayer and ScrollingManager classes.
Controlling Scrolling
---------------------
You have two options for scrolling:
1. automatically scroll the map but stop at the map edges, and
2. scroll the map an allow the edge of the map to be displayed.
The ScrollingManager has a concept of "focus" which is the pixel
position of the player's view focus (*usually* the center of the
player sprite itself, but the player may be allowed to
move the view around, or you may move it around for them to highlight
something else in the scene). The ScrollingManager is clever enough to
manage many layers and handle scaling them.
Two methods are available for setting the map focus:
**set_focus(x, y)**
Attempt to set the focus to the pixel coordinates given. The layer(s)
contained in the ScrollingManager are moved accordingly. If a layer
would be moved outside of its define px_width, px_height then the
scrolling is restricted. The resultant restricted focal point is stored
on the ScrollingManager as restricted_fx and restricted_fy.
**force_focus(x, y)**
Force setting the focus to the pixel coordinates given. The layer(s)
contained in the ScrollingManager are moved accordingly regardless of
whether any out-of-bounds cells would be displayed. The .fx and .fy
attributes are still set, but they'll *always* be set to the supplied
x and y values.
"""
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
from cocos.director import director
from .base_layers import Layer
import pyglet
from pyglet.gl import *
class ScrollableLayer(Layer):
"""A Cocos Layer that is scrollable in a Scene.
A layer may have a "parallax" value which is used to scale the position
(and not the dimensions) of the view of the layer - the layer's view
(x, y) coordinates are calculated as::
my_view_x = parallax * passed_view_x
my_view_y = parallax * passed_view_y
Scrollable layers have a view which identifies the section of the layer
currently visible.
The scrolling is usually managed by a ScrollingManager.
Don't change scale_x , scale_y from the default 1.0 or scrolling and
coordinate changes will fail
"""
view_x, view_y = 0, 0
view_w, view_h = 0, 0
origin_x = origin_y = origin_z = 0
def __init__(self, parallax=1):
super(ScrollableLayer, self).__init__()
self.parallax = parallax
# force (cocos) transform anchor to be 0 so we don't OpenGL
# glTranslate() and screw up our pixel alignment on screen
self.transform_anchor_x = 0
self.transform_anchor_y = 0
# XXX batch eh?
self.batch = pyglet.graphics.Batch()
def on_enter(self):
director.push_handlers(self.on_cocos_resize)
super(ScrollableLayer, self).on_enter()
def on_exit(self):
super(ScrollableLayer, self).on_exit()
director.pop_handlers()
def set_view(self, x, y, w, h, viewport_ox=0, viewport_oy=0):
x *= self.parallax
y *= self.parallax
self.view_x, self.view_y = x, y
self.view_w, self.view_h = w, h
# print self, 'set_view - x, y, w, h:', self.view_x, self.view_y, self.view_w, self.view_h
x -= self.origin_x
y -= self.origin_y
x -= viewport_ox
y -= viewport_oy
self.position = (-x, -y)
def draw(self):
# invoked by Cocos machinery
super(ScrollableLayer, self).draw()
# XXX overriding draw eh?
glPushMatrix()
self.transform()
self.batch.draw()
glPopMatrix()
def set_dirty(self):
"""The viewport has changed in some way.
"""
pass
def on_cocos_resize(self, usable_width, usable_height):
self.set_dirty()
class ScrollingManager(Layer):
"""Manages scrolling of Layers in a Cocos Scene.
Each ScrollableLayer that is added to this manager (via standard list
methods) may have pixel dimensions .px_width and .px_height. Tile
module MapLayers have these attribtues. The manager will limit scrolling
to stay within the pixel boundary of the most limiting layer.
If a layer has no dimensions it will scroll freely and without bound.
The manager is initialised with the viewport (usually a Window) which has
the pixel dimensions .width and .height which are used during focusing.
A ScrollingManager knows how to convert pixel coordinates from its own
pixel space to the screen space.
"""
def __init__(self, viewport=None, do_not_scale=None):
if do_not_scale is None:
do_not_scale = not director.autoscale
self.autoscale = not do_not_scale and director.autoscale
self.viewport = viewport
# These variables define the Layer-space pixel view which is mapping
# to the viewport. If the Layer is not scrolled or scaled then this
# will be a one to one mapping.
self.view_x, self.view_y = 0, 0
self.view_w, self.view_h = 1, 1
self.childs_ox = 0
self.childs_oy = 0
# Focal point on the Layer
self.fx = self.fy = 0
super(ScrollingManager, self).__init__()
# always transform about 0,0
self.transform_anchor_x = 0
self.transform_anchor_y = 0
def on_enter(self):
super(ScrollingManager, self).on_enter()
director.push_handlers(self.on_cocos_resize)
self.update_view_size()
self.refresh_focus()
def on_exit(self):
director.pop_handlers()
super(ScrollingManager, self).on_exit()
def update_view_size(self):
if self.viewport is not None:
self.view_w, self.view_h = self.viewport.width, self.viewport.height
self.view_x, self.view_y = getattr(self.viewport, 'position', (0, 0))
if not director.autoscale:
self._scissor_flat = (self.view_x, self.view_y,
self.view_w, self.view_h)
else:
w, h = director.get_window_size()
sx = director._usable_width / w
sy = director._usable_height / h
self._scissor_flat = (int(self.view_x * sx), int(self.view_y * sy),
int(self.view_w * sx), int(self.view_h * sy))
elif self.autoscale:
self.view_w, self.view_h = director.get_window_size()
else:
self.view_w = director._usable_width
self.view_h = director._usable_height
def on_cocos_resize(self, usable_width, usable_height):
# when using an explicit viewport you should adjust the viewport for
# resize changes here, before the lines that follows.
# Also, if your app performs other changes in viewport it should
# use the lines that follows to update viewport-related internal state
self.update_view_size()
self.refresh_focus()
def refresh_focus(self):
if self.children:
self._old_focus = None # disable NOP check
self.set_focus(self.fx, self.fy)
_scale = 1.0
def set_scale(self, scale):
self._scale = 1.0 * scale
self.refresh_focus()
scale = property(lambda s: s._scale, set_scale)
def add(self, child, z=0, name=None):
"""Add the child and then update the manager's focus / viewport.
"""
super(ScrollingManager, self).add(child, z=z, name=name)
# set the focus again and force it so we don't just skip because the
# focal point hasn't changed
self.set_focus(self.fx, self.fy, force=True)
def pixel_from_screen(self, x, y):
"""Look up the Layer-space pixel matching the screen-space pixel.
Account for viewport, layer and screen transformations.
"""
# director display scaling
if director.autoscale:
x, y = director.get_virtual_coordinates(x, y)
# normalise x,y coord
ww, wh = director.get_window_size()
sx = x / self.view_w
sy = y / self.view_h
# get the map-space dimensions
vx, vy = self.childs_ox, self.childs_oy
# get our scaled view size
w = int(self.view_w / self.scale)
h = int(self.view_h / self.scale)
# print (int(x), int(y)), (vx, vy, w, h), int(vx + sx * w), int(vy + sy * h)
# convert screen pixel to map pixel
return int(vx + sx * w), int(vy + sy * h)
def pixel_to_screen(self, x, y):
"""Look up the screen-space pixel matching the Layer-space pixel.
Account for viewport, layer and screen transformations.
"""
screen_x = self.scale * (x - self.childs_ox)
screen_y = self.scale * (y - self.childs_oy)
return int(screen_x), int(screen_y)
_old_focus = None
def set_focus(self, fx, fy, force=False):
"""Determine the viewport based on a desired focus pixel in the
Layer space (fx, fy) and honoring any bounding restrictions of
child layers.
The focus will always be shifted to ensure no child layers display
out-of-bounds data, as defined by their dimensions px_width and px_height.
"""
# if no child specifies dimensions then just force the focus
if not [l for z, l in self.children if hasattr(l, 'px_width')]:
return self.force_focus(fx, fy)
# This calculation takes into account the scaling of this Layer (and
# therefore also its children).
# The result is that all chilren will have their viewport set, defining
# which of their pixels should be visible.
fx, fy = int(fx), int(fy)
self.fx, self.fy = fx, fy
a = (fx, fy, self.scale)
# check for NOOP (same arg passed in)
if not force and self._old_focus == a:
return
self._old_focus = a
# collate children dimensions
x1 = []
y1 = []
x2 = []
y2 = []
for z, layer in self.children:
if not hasattr(layer, 'px_width'):
continue
x1.append(layer.origin_x)
y1.append(layer.origin_y)
x2.append(layer.origin_x + layer.px_width)
y2.append(layer.origin_y + layer.px_height)
# figure the child layer min/max bounds
b_min_x = min(x1)
b_min_y = min(y1)
b_max_x = min(x2)
b_max_y = min(y2)
# get our viewport information, scaled as appropriate
w = int(self.view_w / self.scale)
h = int(self.view_h / self.scale)
w2, h2 = w // 2, h // 2
if (b_max_x - b_min_x) <= w:
# this branch for prety centered view and no view jump when
# crossing the center; both when world width <= view width
restricted_fx = (b_max_x + b_min_x) / 2
else:
if (fx - w2) < b_min_x:
restricted_fx = b_min_x + w2 # hit minimum X extent
elif (fx + w2) > b_max_x:
restricted_fx = b_max_x - w2 # hit maximum X extent
else:
restricted_fx = fx
if (b_max_y - b_min_y) <= h:
# this branch for prety centered view and no view jump when
# crossing the center; both when world height <= view height
restricted_fy = (b_max_y + b_min_y) / 2
else:
if (fy - h2) < b_min_y:
restricted_fy = b_min_y + h2 # hit minimum Y extent
elif (fy + h2) > b_max_y:
restricted_fy = b_max_y - h2 # hit maximum Y extent
else:
restricted_fy = fy
# ... and this is our focus point, center of screen
self.restricted_fx = int(restricted_fx)
self.restricted_fy = int(restricted_fy)
# determine child view bounds to match that focus point
x, y = int(restricted_fx - w2), int(restricted_fy - h2)
childs_scroll_x = x # - self.view_x/self.scale
childs_scroll_y = y # - self.view_y/self.scale
self.childs_ox = childs_scroll_x - self.view_x/self.scale
self.childs_oy = childs_scroll_y - self.view_y/self.scale
for z, layer in self.children:
layer.set_view(childs_scroll_x, childs_scroll_y, w, h,
self.view_x / self.scale, self.view_y / self.scale)
def force_focus(self, fx, fy):
"""Force the manager to focus on a point, regardless of any managed layer
visible boundaries.
"""
# This calculation takes into account the scaling of this Layer (and
# therefore also its children).
# The result is that all chilren will have their viewport set, defining
# which of their pixels should be visible.
self.fx, self.fy = map(int, (fx, fy))
self.fx, self.fy = fx, fy
# get our scaled view size
w = int(self.view_w / self.scale)
h = int(self.view_h / self.scale)
w2, h2 = w // 2, h // 2
# bottom-left corner of the
x, y = fx - w2, fy - h2
childs_scroll_x = x # - self.view_x/self.scale
childs_scroll_y = y # - self.view_y/self.scale
self.childs_ox = childs_scroll_x - self.view_x/self.scale
self.childs_oy = childs_scroll_y - self.view_y/self.scale
for z, layer in self.children:
layer.set_view(childs_scroll_x, childs_scroll_y, w, h,
self.view_x / self.scale, self.view_y / self.scale)
def set_state(self):
# preserve gl scissors info
self._scissor_enabled = glIsEnabled(GL_SCISSOR_TEST)
self._old_scissor_flat = (GLint * 4)() # 4-tuple
glGetIntegerv(GL_SCISSOR_BOX, self._old_scissor_flat)
# set our scissor
if not self._scissor_enabled:
glEnable(GL_SCISSOR_TEST)
glScissor(*self._scissor_flat)
def unset_state(self):
# restore gl scissors info
glScissor(*self._old_scissor_flat)
if not self._scissor_enabled:
glDisable(GL_SCISSOR_TEST)
def visit(self):
if self.viewport is not None:
self.set_state()
super(ScrollingManager, self).visit()
self.unset_state()
else:
super(ScrollingManager, self).visit()
| bsd-3-clause | -3,472,731,962,668,502,000 | 1,576,797,495,589,027,300 | 36.693023 | 98 | 0.614265 | false |
benspaulding/django | django/utils/encoding.py | 6 | 7319 | from __future__ import unicode_literals
import urllib
import locale
import datetime
import codecs
from decimal import Decimal
from django.utils.functional import Promise
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
class StrAndUnicode(object):
"""
A class whose __str__ returns its __unicode__ as a UTF-8 bytestring.
Useful as a mix-in.
"""
def __str__(self):
return self.__unicode__().encode('utf-8')
def smart_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a unicode object representing 's'. Treats bytestrings using the
'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_unicode(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
"""
return isinstance(obj, (
type(None),
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal)
)
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% in performance when s
# is an instance of unicode. This function gets called often in that
# setting.
if isinstance(s, unicode):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
try:
s = unicode(str(s), encoding, errors)
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
elif not isinstance(s, unicode):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and (s is None or isinstance(s, int)):
return s
if isinstance(s, Promise):
return unicode(s).encode(encoding, errors)
elif not isinstance(s, basestring):
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg, encoding, strings_only,
errors) for arg in s])
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return urllib.quote(smart_str(iri), safe=b"/#%[]=:;$&()+,!?*@'~")
def filepath_to_uri(path):
"""Convert an file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return urllib.quote(smart_str(path).replace("\\", "/"), safe=b"/~!*()'")
# The encoding of the default system locale but falls back to the
# given fallback encoding if the encoding is unsupported by python or could
# not be determined. See tickets #10335 and #5846
try:
DEFAULT_LOCALE_ENCODING = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(DEFAULT_LOCALE_ENCODING)
except:
DEFAULT_LOCALE_ENCODING = 'ascii'
| bsd-3-clause | 2,142,646,133,651,087,600 | -7,034,411,497,004,115,000 | 38.349462 | 79 | 0.612925 | false |
GoogleChromeLabs/chromeos_smart_card_connector | third_party/googletest/src/googlemock/scripts/generator/cpp/keywords.py | 19 | 1952 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C++ keywords and helper utilities for determining keywords."""
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
TYPES = set('bool char int long short double float void wchar_t unsigned signed'.split())
TYPE_MODIFIERS = set('auto register const inline extern static virtual volatile mutable'.split())
ACCESS = set('public protected private friend'.split())
CASTS = set('static_cast const_cast dynamic_cast reinterpret_cast'.split())
OTHERS = set('true false asm class namespace using explicit this operator sizeof'.split())
OTHER_TYPES = set('new delete typedef struct union enum typeid typename template'.split())
CONTROL = set('case switch default if else return goto'.split())
EXCEPTION = set('try catch throw'.split())
LOOP = set('while do for break continue'.split())
ALL = TYPES | TYPE_MODIFIERS | ACCESS | CASTS | OTHERS | OTHER_TYPES | CONTROL | EXCEPTION | LOOP
def IsKeyword(token):
return token in ALL
def IsBuiltinType(token):
if token in ('virtual', 'inline'):
# These only apply to methods, they can't be types by themselves.
return False
return token in TYPES or token in TYPE_MODIFIERS
| apache-2.0 | 5,486,435,805,670,912,000 | 6,275,203,351,383,155,000 | 33.857143 | 97 | 0.728996 | false |
OsirisSPS/osiris-sps | client/share/plugins/AF9A4C281070FDB0F34CF417CDB168AB38C8A388/lib/uu.py | 251 | 6555 | #! /usr/bin/env python
# Copyright 1994 by Lance Ellinghouse
# Cathedral City, California Republic, United States of America.
# All Rights Reserved
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Lance Ellinghouse
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS, IN NO EVENT SHALL LANCE ELLINGHOUSE CENTRUM BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Modified by Jack Jansen, CWI, July 1995:
# - Use binascii module to do the actual line-by-line conversion
# between ascii and binary. This results in a 1000-fold speedup. The C
# version is still 5 times faster, though.
# - Arguments more compliant with python standard
"""Implementation of the UUencode and UUdecode functions.
encode(in_file, out_file [,name, mode])
decode(in_file [, out_file, mode])
"""
import binascii
import os
import sys
__all__ = ["Error", "encode", "decode"]
class Error(Exception):
pass
def encode(in_file, out_file, name=None, mode=None):
"""Uuencode file"""
#
# If in_file is a pathname open it and change defaults
#
opened_files = []
try:
if in_file == '-':
in_file = sys.stdin
elif isinstance(in_file, basestring):
if name is None:
name = os.path.basename(in_file)
if mode is None:
try:
mode = os.stat(in_file).st_mode
except AttributeError:
pass
in_file = open(in_file, 'rb')
opened_files.append(in_file)
#
# Open out_file if it is a pathname
#
if out_file == '-':
out_file = sys.stdout
elif isinstance(out_file, basestring):
out_file = open(out_file, 'wb')
opened_files.append(out_file)
#
# Set defaults for name and mode
#
if name is None:
name = '-'
if mode is None:
mode = 0666
#
# Write the data
#
out_file.write('begin %o %s\n' % ((mode&0777),name))
data = in_file.read(45)
while len(data) > 0:
out_file.write(binascii.b2a_uu(data))
data = in_file.read(45)
out_file.write(' \nend\n')
finally:
for f in opened_files:
f.close()
def decode(in_file, out_file=None, mode=None, quiet=0):
"""Decode uuencoded file"""
#
# Open the input file, if needed.
#
opened_files = []
if in_file == '-':
in_file = sys.stdin
elif isinstance(in_file, basestring):
in_file = open(in_file)
opened_files.append(in_file)
try:
#
# Read until a begin is encountered or we've exhausted the file
#
while True:
hdr = in_file.readline()
if not hdr:
raise Error('No valid begin line found in input file')
if not hdr.startswith('begin'):
continue
hdrfields = hdr.split(' ', 2)
if len(hdrfields) == 3 and hdrfields[0] == 'begin':
try:
int(hdrfields[1], 8)
break
except ValueError:
pass
if out_file is None:
out_file = hdrfields[2].rstrip()
if os.path.exists(out_file):
raise Error('Cannot overwrite existing file: %s' % out_file)
if mode is None:
mode = int(hdrfields[1], 8)
#
# Open the output file
#
if out_file == '-':
out_file = sys.stdout
elif isinstance(out_file, basestring):
fp = open(out_file, 'wb')
try:
os.path.chmod(out_file, mode)
except AttributeError:
pass
out_file = fp
opened_files.append(out_file)
#
# Main decoding loop
#
s = in_file.readline()
while s and s.strip() != 'end':
try:
data = binascii.a2b_uu(s)
except binascii.Error, v:
# Workaround for broken uuencoders by /Fredrik Lundh
nbytes = (((ord(s[0])-32) & 63) * 4 + 5) // 3
data = binascii.a2b_uu(s[:nbytes])
if not quiet:
sys.stderr.write("Warning: %s\n" % v)
out_file.write(data)
s = in_file.readline()
if not s:
raise Error('Truncated input file')
finally:
for f in opened_files:
f.close()
def test():
"""uuencode/uudecode main program"""
import optparse
parser = optparse.OptionParser(usage='usage: %prog [-d] [-t] [input [output]]')
parser.add_option('-d', '--decode', dest='decode', help='Decode (instead of encode)?', default=False, action='store_true')
parser.add_option('-t', '--text', dest='text', help='data is text, encoded format unix-compatible text?', default=False, action='store_true')
(options, args) = parser.parse_args()
if len(args) > 2:
parser.error('incorrect number of arguments')
sys.exit(1)
input = sys.stdin
output = sys.stdout
if len(args) > 0:
input = args[0]
if len(args) > 1:
output = args[1]
if options.decode:
if options.text:
if isinstance(output, basestring):
output = open(output, 'w')
else:
print sys.argv[0], ': cannot do -t to stdout'
sys.exit(1)
decode(input, output)
else:
if options.text:
if isinstance(input, basestring):
input = open(input, 'r')
else:
print sys.argv[0], ': cannot do -t from stdin'
sys.exit(1)
encode(input, output)
if __name__ == '__main__':
test()
| gpl-3.0 | -7,384,050,621,011,764,000 | 4,742,546,955,992,276,000 | 32.443878 | 145 | 0.557742 | false |
TangXT/GreatCatMOOC | common/lib/calc/calc/tests/test_calc.py | 55 | 20805 | """
Unit tests for calc.py
"""
import unittest
import numpy
import calc
from pyparsing import ParseException
# numpy's default behavior when it evaluates a function outside its domain
# is to raise a warning (not an exception) which is then printed to STDOUT.
# To prevent this from polluting the output of the tests, configure numpy to
# ignore it instead.
# See http://docs.scipy.org/doc/numpy/reference/generated/numpy.seterr.html
numpy.seterr(all='ignore') # Also: 'ignore', 'warn' (default), 'raise'
class EvaluatorTest(unittest.TestCase):
"""
Run tests for calc.evaluator
Go through all functionalities as specifically as possible--
work from number input to functions and complex expressions
Also test custom variable substitutions (i.e.
`evaluator({'x':3.0}, {}, '3*x')`
gives 9.0) and more.
"""
def test_number_input(self):
"""
Test different kinds of float inputs
See also
test_trailing_period (slightly different)
test_exponential_answer
test_si_suffix
"""
easy_eval = lambda x: calc.evaluator({}, {}, x)
self.assertEqual(easy_eval("13"), 13)
self.assertEqual(easy_eval("3.14"), 3.14)
self.assertEqual(easy_eval(".618033989"), 0.618033989)
self.assertEqual(easy_eval("-13"), -13)
self.assertEqual(easy_eval("-3.14"), -3.14)
self.assertEqual(easy_eval("-.618033989"), -0.618033989)
def test_period(self):
"""
The string '.' should not evaluate to anything.
"""
with self.assertRaises(ParseException):
calc.evaluator({}, {}, '.')
with self.assertRaises(ParseException):
calc.evaluator({}, {}, '1+.')
def test_trailing_period(self):
"""
Test that things like '4.' will be 4 and not throw an error
"""
self.assertEqual(4.0, calc.evaluator({}, {}, '4.'))
def test_exponential_answer(self):
"""
Test for correct interpretation of scientific notation
"""
answer = 50
correct_responses = [
"50", "50.0", "5e1", "5e+1",
"50e0", "50.0e0", "500e-1"
]
incorrect_responses = ["", "3.9", "4.1", "0", "5.01e1"]
for input_str in correct_responses:
result = calc.evaluator({}, {}, input_str)
fail_msg = "Expected '{0}' to equal {1}".format(
input_str, answer
)
self.assertEqual(answer, result, msg=fail_msg)
for input_str in incorrect_responses:
result = calc.evaluator({}, {}, input_str)
fail_msg = "Expected '{0}' to not equal {1}".format(
input_str, answer
)
self.assertNotEqual(answer, result, msg=fail_msg)
def test_si_suffix(self):
"""
Test calc.py's unique functionality of interpreting si 'suffixes'.
For instance 'k' stand for 'kilo-' so '1k' should be 1,000
"""
test_mapping = [
('4.2%', 0.042), ('2.25k', 2250), ('8.3M', 8300000),
('9.9G', 9.9e9), ('1.2T', 1.2e12), ('7.4c', 0.074),
('5.4m', 0.0054), ('8.7u', 0.0000087),
('5.6n', 5.6e-9), ('4.2p', 4.2e-12)
]
for (expr, answer) in test_mapping:
tolerance = answer * 1e-6 # Make rel. tolerance, because of floats
fail_msg = "Failure in testing suffix '{0}': '{1}' was not {2}"
fail_msg = fail_msg.format(expr[-1], expr, answer)
self.assertAlmostEqual(
calc.evaluator({}, {}, expr), answer,
delta=tolerance, msg=fail_msg
)
def test_operator_sanity(self):
"""
Test for simple things like '5+2' and '5/2'
"""
var1 = 5.0
var2 = 2.0
operators = [('+', 7), ('-', 3), ('*', 10), ('/', 2.5), ('^', 25)]
for (operator, answer) in operators:
input_str = "{0} {1} {2}".format(var1, operator, var2)
result = calc.evaluator({}, {}, input_str)
fail_msg = "Failed on operator '{0}': '{1}' was not {2}".format(
operator, input_str, answer
)
self.assertEqual(answer, result, msg=fail_msg)
def test_raises_zero_division_err(self):
"""
Ensure division by zero gives an error
"""
with self.assertRaises(ZeroDivisionError):
calc.evaluator({}, {}, '1/0')
with self.assertRaises(ZeroDivisionError):
calc.evaluator({}, {}, '1/0.0')
with self.assertRaises(ZeroDivisionError):
calc.evaluator({'x': 0.0}, {}, '1/x')
def test_parallel_resistors(self):
"""
Test the parallel resistor operator ||
The formula is given by
a || b || c ...
= 1 / (1/a + 1/b + 1/c + ...)
It is the resistance of a parallel circuit of resistors with resistance
a, b, c, etc&. See if this evaulates correctly.
"""
self.assertEqual(calc.evaluator({}, {}, '1||1'), 0.5)
self.assertEqual(calc.evaluator({}, {}, '1||1||2'), 0.4)
self.assertEqual(calc.evaluator({}, {}, "j||1"), 0.5 + 0.5j)
def test_parallel_resistors_with_zero(self):
"""
Check the behavior of the || operator with 0
"""
self.assertTrue(numpy.isnan(calc.evaluator({}, {}, '0||1')))
self.assertTrue(numpy.isnan(calc.evaluator({}, {}, '0.0||1')))
self.assertTrue(numpy.isnan(calc.evaluator({'x': 0.0}, {}, 'x||1')))
def assert_function_values(self, fname, ins, outs, tolerance=1e-3):
"""
Helper function to test many values at once
Test the accuracy of evaluator's use of the function given by fname
Specifically, the equality of `fname(ins[i])` against outs[i].
This is used later to test a whole bunch of f(x) = y at a time
"""
for (arg, val) in zip(ins, outs):
input_str = "{0}({1})".format(fname, arg)
result = calc.evaluator({}, {}, input_str)
fail_msg = "Failed on function {0}: '{1}' was not {2}".format(
fname, input_str, val
)
self.assertAlmostEqual(val, result, delta=tolerance, msg=fail_msg)
def test_trig_functions(self):
"""
Test the trig functions provided in calc.py
which are: sin, cos, tan, arccos, arcsin, arctan
"""
angles = ['-pi/4', '0', 'pi/6', 'pi/5', '5*pi/4', '9*pi/4', '1 + j']
sin_values = [-0.707, 0, 0.5, 0.588, -0.707, 0.707, 1.298 + 0.635j]
cos_values = [0.707, 1, 0.866, 0.809, -0.707, 0.707, 0.834 - 0.989j]
tan_values = [-1, 0, 0.577, 0.727, 1, 1, 0.272 + 1.084j]
# Cannot test tan(pi/2) b/c pi/2 is a float and not precise...
self.assert_function_values('sin', angles, sin_values)
self.assert_function_values('cos', angles, cos_values)
self.assert_function_values('tan', angles, tan_values)
# Include those where the real part is between -pi/2 and pi/2
arcsin_inputs = ['-0.707', '0', '0.5', '0.588', '1.298 + 0.635*j']
arcsin_angles = [-0.785, 0, 0.524, 0.629, 1 + 1j]
self.assert_function_values('arcsin', arcsin_inputs, arcsin_angles)
# Rather than a complex number, numpy.arcsin gives nan
self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arcsin(-1.1)')))
self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arcsin(1.1)')))
# Include those where the real part is between 0 and pi
arccos_inputs = ['1', '0.866', '0.809', '0.834-0.989*j']
arccos_angles = [0, 0.524, 0.628, 1 + 1j]
self.assert_function_values('arccos', arccos_inputs, arccos_angles)
self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arccos(-1.1)')))
self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arccos(1.1)')))
# Has the same range as arcsin
arctan_inputs = ['-1', '0', '0.577', '0.727', '0.272 + 1.084*j']
arctan_angles = arcsin_angles
self.assert_function_values('arctan', arctan_inputs, arctan_angles)
def test_reciprocal_trig_functions(self):
"""
Test the reciprocal trig functions provided in calc.py
which are: sec, csc, cot, arcsec, arccsc, arccot
"""
angles = ['-pi/4', 'pi/6', 'pi/5', '5*pi/4', '9*pi/4', '1 + j']
sec_values = [1.414, 1.155, 1.236, -1.414, 1.414, 0.498 + 0.591j]
csc_values = [-1.414, 2, 1.701, -1.414, 1.414, 0.622 - 0.304j]
cot_values = [-1, 1.732, 1.376, 1, 1, 0.218 - 0.868j]
self.assert_function_values('sec', angles, sec_values)
self.assert_function_values('csc', angles, csc_values)
self.assert_function_values('cot', angles, cot_values)
arcsec_inputs = ['1.1547', '1.2361', '2', '-2', '-1.4142', '0.4983+0.5911*j']
arcsec_angles = [0.524, 0.628, 1.047, 2.094, 2.356, 1 + 1j]
self.assert_function_values('arcsec', arcsec_inputs, arcsec_angles)
arccsc_inputs = ['-1.1547', '-1.4142', '2', '1.7013', '1.1547', '0.6215-0.3039*j']
arccsc_angles = [-1.047, -0.785, 0.524, 0.628, 1.047, 1 + 1j]
self.assert_function_values('arccsc', arccsc_inputs, arccsc_angles)
# Has the same range as arccsc
arccot_inputs = ['-0.5774', '-1', '1.7321', '1.3764', '0.5774', '(0.2176-0.868*j)']
arccot_angles = arccsc_angles
self.assert_function_values('arccot', arccot_inputs, arccot_angles)
def test_hyperbolic_functions(self):
"""
Test the hyperbolic functions
which are: sinh, cosh, tanh, sech, csch, coth
"""
inputs = ['0', '0.5', '1', '2', '1+j']
neg_inputs = ['0', '-0.5', '-1', '-2', '-1-j']
negate = lambda x: [-k for k in x]
# sinh is odd
sinh_vals = [0, 0.521, 1.175, 3.627, 0.635 + 1.298j]
self.assert_function_values('sinh', inputs, sinh_vals)
self.assert_function_values('sinh', neg_inputs, negate(sinh_vals))
# cosh is even - do not negate
cosh_vals = [1, 1.128, 1.543, 3.762, 0.834 + 0.989j]
self.assert_function_values('cosh', inputs, cosh_vals)
self.assert_function_values('cosh', neg_inputs, cosh_vals)
# tanh is odd
tanh_vals = [0, 0.462, 0.762, 0.964, 1.084 + 0.272j]
self.assert_function_values('tanh', inputs, tanh_vals)
self.assert_function_values('tanh', neg_inputs, negate(tanh_vals))
# sech is even - do not negate
sech_vals = [1, 0.887, 0.648, 0.266, 0.498 - 0.591j]
self.assert_function_values('sech', inputs, sech_vals)
self.assert_function_values('sech', neg_inputs, sech_vals)
# the following functions do not have 0 in their domain
inputs = inputs[1:]
neg_inputs = neg_inputs[1:]
# csch is odd
csch_vals = [1.919, 0.851, 0.276, 0.304 - 0.622j]
self.assert_function_values('csch', inputs, csch_vals)
self.assert_function_values('csch', neg_inputs, negate(csch_vals))
# coth is odd
coth_vals = [2.164, 1.313, 1.037, 0.868 - 0.218j]
self.assert_function_values('coth', inputs, coth_vals)
self.assert_function_values('coth', neg_inputs, negate(coth_vals))
def test_hyperbolic_inverses(self):
"""
Test the inverse hyperbolic functions
which are of the form arc[X]h
"""
results = [0, 0.5, 1, 2, 1 + 1j]
sinh_vals = ['0', '0.5211', '1.1752', '3.6269', '0.635+1.2985*j']
self.assert_function_values('arcsinh', sinh_vals, results)
cosh_vals = ['1', '1.1276', '1.5431', '3.7622', '0.8337+0.9889*j']
self.assert_function_values('arccosh', cosh_vals, results)
tanh_vals = ['0', '0.4621', '0.7616', '0.964', '1.0839+0.2718*j']
self.assert_function_values('arctanh', tanh_vals, results)
sech_vals = ['1.0', '0.8868', '0.6481', '0.2658', '0.4983-0.5911*j']
self.assert_function_values('arcsech', sech_vals, results)
results = results[1:]
csch_vals = ['1.919', '0.8509', '0.2757', '0.3039-0.6215*j']
self.assert_function_values('arccsch', csch_vals, results)
coth_vals = ['2.164', '1.313', '1.0373', '0.868-0.2176*j']
self.assert_function_values('arccoth', coth_vals, results)
def test_other_functions(self):
"""
Test the non-trig functions provided in calc.py
Specifically:
sqrt, log10, log2, ln, abs,
fact, factorial
"""
# Test sqrt
self.assert_function_values(
'sqrt',
[0, 1, 2, 1024], # -1
[0, 1, 1.414, 32] # 1j
)
# sqrt(-1) is NAN not j (!!).
# Test logs
self.assert_function_values(
'log10',
[0.1, 1, 3.162, 1000000, '1+j'],
[-1, 0, 0.5, 6, 0.151 + 0.341j]
)
self.assert_function_values(
'log2',
[0.5, 1, 1.414, 1024, '1+j'],
[-1, 0, 0.5, 10, 0.5 + 1.133j]
)
self.assert_function_values(
'ln',
[0.368, 1, 1.649, 2.718, 42, '1+j'],
[-1, 0, 0.5, 1, 3.738, 0.347 + 0.785j]
)
# Test abs
self.assert_function_values('abs', [-1, 0, 1, 'j'], [1, 0, 1, 1])
# Test factorial
fact_inputs = [0, 1, 3, 7]
fact_values = [1, 1, 6, 5040]
self.assert_function_values('fact', fact_inputs, fact_values)
self.assert_function_values('factorial', fact_inputs, fact_values)
self.assertRaises(ValueError, calc.evaluator, {}, {}, "fact(-1)")
self.assertRaises(ValueError, calc.evaluator, {}, {}, "fact(0.5)")
self.assertRaises(ValueError, calc.evaluator, {}, {}, "factorial(-1)")
self.assertRaises(ValueError, calc.evaluator, {}, {}, "factorial(0.5)")
def test_constants(self):
"""
Test the default constants provided in calc.py
which are: j (complex number), e, pi, k, c, T, q
"""
# Of the form ('expr', python value, tolerance (or None for exact))
default_variables = [
('i', 1j, None),
('j', 1j, None),
('e', 2.7183, 1e-4),
('pi', 3.1416, 1e-4),
('k', 1.3806488e-23, 1e-26), # Boltzmann constant (Joules/Kelvin)
('c', 2.998e8, 1e5), # Light Speed in (m/s)
('T', 298.15, 0.01), # Typical room temperature (Kelvin)
('q', 1.602176565e-19, 1e-22) # Fund. Charge (Coulombs)
]
for (variable, value, tolerance) in default_variables:
fail_msg = "Failed on constant '{0}', not within bounds".format(
variable
)
result = calc.evaluator({}, {}, variable)
if tolerance is None:
self.assertEqual(value, result, msg=fail_msg)
else:
self.assertAlmostEqual(
value, result,
delta=tolerance, msg=fail_msg
)
def test_complex_expression(self):
"""
Calculate combinations of operators and default functions
"""
self.assertAlmostEqual(
calc.evaluator({}, {}, "(2^2+1.0)/sqrt(5e0)*5-1"),
10.180,
delta=1e-3
)
self.assertAlmostEqual(
calc.evaluator({}, {}, "1+1/(1+1/(1+1/(1+1)))"),
1.6,
delta=1e-3
)
self.assertAlmostEqual(
calc.evaluator({}, {}, "10||sin(7+5)"),
-0.567, delta=0.01
)
self.assertAlmostEqual(
calc.evaluator({}, {}, "sin(e)"),
0.41, delta=0.01
)
self.assertAlmostEqual(
calc.evaluator({}, {}, "k*T/q"),
0.025, delta=1e-3
)
self.assertAlmostEqual(
calc.evaluator({}, {}, "e^(j*pi)"),
-1, delta=1e-5
)
def test_explicit_sci_notation(self):
"""
Expressions like 1.6*10^-3 (not 1.6e-3) it should evaluate.
"""
self.assertEqual(
calc.evaluator({}, {}, "-1.6*10^-3"),
-0.0016
)
self.assertEqual(
calc.evaluator({}, {}, "-1.6*10^(-3)"),
-0.0016
)
self.assertEqual(
calc.evaluator({}, {}, "-1.6*10^3"),
-1600
)
self.assertEqual(
calc.evaluator({}, {}, "-1.6*10^(3)"),
-1600
)
def test_simple_vars(self):
"""
Substitution of variables into simple equations
"""
variables = {'x': 9.72, 'y': 7.91, 'loooooong': 6.4}
# Should not change value of constant
# even with different numbers of variables...
self.assertEqual(calc.evaluator({'x': 9.72}, {}, '13'), 13)
self.assertEqual(calc.evaluator({'x': 9.72, 'y': 7.91}, {}, '13'), 13)
self.assertEqual(calc.evaluator(variables, {}, '13'), 13)
# Easy evaluation
self.assertEqual(calc.evaluator(variables, {}, 'x'), 9.72)
self.assertEqual(calc.evaluator(variables, {}, 'y'), 7.91)
self.assertEqual(calc.evaluator(variables, {}, 'loooooong'), 6.4)
# Test a simple equation
self.assertAlmostEqual(
calc.evaluator(variables, {}, '3*x-y'),
21.25, delta=0.01 # = 3 * 9.72 - 7.91
)
self.assertAlmostEqual(
calc.evaluator(variables, {}, 'x*y'),
76.89, delta=0.01
)
self.assertEqual(calc.evaluator({'x': 9.72, 'y': 7.91}, {}, "13"), 13)
self.assertEqual(calc.evaluator(variables, {}, "13"), 13)
self.assertEqual(
calc.evaluator(
{'a': 2.2997471478310274, 'k': 9, 'm': 8, 'x': 0.6600949841121},
{}, "5"
),
5
)
def test_variable_case_sensitivity(self):
"""
Test the case sensitivity flag and corresponding behavior
"""
self.assertEqual(
calc.evaluator({'R1': 2.0, 'R3': 4.0}, {}, "r1*r3"),
8.0
)
variables = {'t': 1.0}
self.assertEqual(calc.evaluator(variables, {}, "t"), 1.0)
self.assertEqual(calc.evaluator(variables, {}, "T"), 1.0)
self.assertEqual(
calc.evaluator(variables, {}, "t", case_sensitive=True),
1.0
)
# Recall 'T' is a default constant, with value 298.15
self.assertAlmostEqual(
calc.evaluator(variables, {}, "T", case_sensitive=True),
298, delta=0.2
)
def test_simple_funcs(self):
"""
Subsitution of custom functions
"""
variables = {'x': 4.712}
functions = {'id': lambda x: x}
self.assertEqual(calc.evaluator({}, functions, 'id(2.81)'), 2.81)
self.assertEqual(calc.evaluator({}, functions, 'id(2.81)'), 2.81)
self.assertEqual(calc.evaluator(variables, functions, 'id(x)'), 4.712)
functions.update({'f': numpy.sin})
self.assertAlmostEqual(
calc.evaluator(variables, functions, 'f(x)'),
-1, delta=1e-3
)
def test_function_case_insensitive(self):
"""
Test case insensitive evaluation
Normal functions with some capitals should be fine
"""
self.assertAlmostEqual(
-0.28,
calc.evaluator({}, {}, 'SiN(6)', case_sensitive=False),
delta=1e-3
)
def test_function_case_sensitive(self):
"""
Test case sensitive evaluation
Incorrectly capitilized should fail
Also, it should pick the correct version of a function.
"""
with self.assertRaisesRegexp(calc.UndefinedVariable, 'SiN'):
calc.evaluator({}, {}, 'SiN(6)', case_sensitive=True)
# With case sensitive turned on, it should pick the right function
functions = {'f': lambda x: x, 'F': lambda x: x + 1}
self.assertEqual(
6, calc.evaluator({}, functions, 'f(6)', case_sensitive=True)
)
self.assertEqual(
7, calc.evaluator({}, functions, 'F(6)', case_sensitive=True)
)
def test_undefined_vars(self):
"""
Check to see if the evaluator catches undefined variables
"""
variables = {'R1': 2.0, 'R3': 4.0}
with self.assertRaisesRegexp(calc.UndefinedVariable, 'QWSEKO'):
calc.evaluator({}, {}, "5+7*QWSEKO")
with self.assertRaisesRegexp(calc.UndefinedVariable, 'r2'):
calc.evaluator({'r1': 5}, {}, "r1+r2")
with self.assertRaisesRegexp(calc.UndefinedVariable, 'r1 r3'):
calc.evaluator(variables, {}, "r1*r3", case_sensitive=True)
| agpl-3.0 | -4,783,157,679,308,810,000 | 7,485,744,412,799,293,000 | 36.151786 | 91 | 0.534102 | false |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/oauth2_provider/views/application.py | 3 | 2342 | from django.core.urlresolvers import reverse_lazy
from django.forms.models import modelform_factory
from django.views.generic import CreateView, DetailView, DeleteView, ListView, UpdateView
from braces.views import LoginRequiredMixin
from ..models import get_application_model
class ApplicationOwnerIsUserMixin(LoginRequiredMixin):
"""
This mixin is used to provide an Application queryset filtered by the current request.user.
"""
fields = '__all__'
def get_queryset(self):
return get_application_model().objects.filter(user=self.request.user)
class ApplicationRegistration(LoginRequiredMixin, CreateView):
"""
View used to register a new Application for the request.user
"""
template_name = "oauth2_provider/application_registration_form.html"
def get_form_class(self):
"""
Returns the form class for the application model
"""
return modelform_factory(
get_application_model(),
fields=('name', 'client_id', 'client_secret', 'client_type',
'authorization_grant_type', 'redirect_uris')
)
def form_valid(self, form):
form.instance.user = self.request.user
return super(ApplicationRegistration, self).form_valid(form)
class ApplicationDetail(ApplicationOwnerIsUserMixin, DetailView):
"""
Detail view for an application instance owned by the request.user
"""
context_object_name = 'application'
template_name = "oauth2_provider/application_detail.html"
class ApplicationList(ApplicationOwnerIsUserMixin, ListView):
"""
List view for all the applications owned by the request.user
"""
context_object_name = 'applications'
template_name = "oauth2_provider/application_list.html"
class ApplicationDelete(ApplicationOwnerIsUserMixin, DeleteView):
"""
View used to delete an application owned by the request.user
"""
context_object_name = 'application'
success_url = reverse_lazy('oauth2_provider:list')
template_name = "oauth2_provider/application_confirm_delete.html"
class ApplicationUpdate(ApplicationOwnerIsUserMixin, UpdateView):
"""
View used to update an application owned by the request.user
"""
context_object_name = 'application'
template_name = "oauth2_provider/application_form.html"
| agpl-3.0 | -6,375,141,724,452,558,000 | -4,453,840,642,715,351,000 | 31.985915 | 95 | 0.711358 | false |
brainelectronics/towerdefense | _build/lib/pyglet/media/drivers/pulse/__init__.py | 7 | 18560 | #!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import sys
import lib_pulseaudio as pa
from pyglet.media import AbstractAudioDriver, AbstractAudioPlayer, \
AbstractListener, MediaException, MediaEvent
import pyglet
_debug = pyglet.options['debug_media']
def check(result):
if result < 0:
error = pa.pa_context_errno(context._context)
raise MediaException(pa.pa_strerror(error))
return result
def check_not_null(value):
if not value:
error = pa.pa_context_errno(context._context)
raise MediaException(pa.pa_strerror(error))
return value
def noop(*args):
"""Empty callback to replace deleted callbacks in PA"""
pass
class PulseAudioDriver(AbstractAudioDriver):
_context = None
def __init__(self):
self.threaded_mainloop = pa.pa_threaded_mainloop_new()
self.mainloop = pa.pa_threaded_mainloop_get_api(
self.threaded_mainloop)
self._players = pyglet.app.WeakSet()
self._listener = PulseAudioListener(self)
def create_audio_player(self, source_group, player):
player = PulseAudioPlayer(source_group, player)
self._players.add(player)
return player
def connect(self, server=None):
'''Connect to pulseaudio server.
:Parameters:
`server` : str
Server to connect to, or ``None`` for the default local
server (which may be spawned as a daemon if no server is
found).
'''
# TODO disconnect from old
assert not self._context, 'Already connected'
# Create context
app_name = self.get_app_name()
self._context = pa.pa_context_new(self.mainloop, app_name.encode('ASCII'))
# Context state callback
self._state_cb_func = pa.pa_context_notify_cb_t(self._state_cb)
pa.pa_context_set_state_callback(self._context,
self._state_cb_func, None)
# Connect
check(
pa.pa_context_connect(self._context, server, 0, None)
)
self.lock()
check(
pa.pa_threaded_mainloop_start(self.threaded_mainloop)
)
try:
# Wait for context ready.
self.wait()
if pa.pa_context_get_state(self._context) != pa.PA_CONTEXT_READY:
check(-1)
finally:
self.unlock()
def _state_cb(self, context, userdata):
if _debug:
print 'context state cb'
state = pa.pa_context_get_state(self._context)
if state in (pa.PA_CONTEXT_READY,
pa.PA_CONTEXT_TERMINATED,
pa.PA_CONTEXT_FAILED):
self.signal()
def lock(self):
'''Lock the threaded mainloop against events. Required for all
calls into PA.'''
pa.pa_threaded_mainloop_lock(self.threaded_mainloop)
def unlock(self):
'''Unlock the mainloop thread.'''
pa.pa_threaded_mainloop_unlock(self.threaded_mainloop)
def signal(self):
'''Signal the mainloop thread to break from a wait.'''
pa.pa_threaded_mainloop_signal(self.threaded_mainloop, 0)
def wait(self):
'''Wait for a signal.'''
pa.pa_threaded_mainloop_wait(self.threaded_mainloop)
def sync_operation(self, op):
'''Wait for an operation to be done or cancelled, then release it.
Uses a busy-loop -- make sure a callback is registered to
signal this listener.'''
while pa.pa_operation_get_state(op) == pa.PA_OPERATION_RUNNING:
pa.pa_threaded_mainloop_wait(self.threaded_mainloop)
pa.pa_operation_unref(op)
def async_operation(self, op):
'''Release the operation immediately without waiting for it to
complete.'''
pa.pa_operation_unref(op)
def get_app_name(self):
'''Get the application name as advertised to the pulseaudio server.'''
# TODO move app name into pyglet.app (also useful for OS X menu bar?).
return sys.argv[0]
def dump_debug_info(self):
print 'Client version: ', pa.pa_get_library_version()
print 'Server: ', pa.pa_context_get_server(self._context)
print 'Protocol: ', pa.pa_context_get_protocol_version(
self._context)
print 'Server protocol:', pa.pa_context_get_server_protocol_version(
self._context)
print 'Local context: ', (
pa.pa_context_is_local(self._context) and 'Yes' or 'No')
def delete(self):
'''Completely shut down pulseaudio client.'''
self.lock()
pa.pa_context_unref(self._context)
self.unlock()
pa.pa_threaded_mainloop_stop(self.threaded_mainloop)
pa.pa_threaded_mainloop_free(self.threaded_mainloop)
self.threaded_mainloop = None
self.mainloop = None
def get_listener(self):
return self._listener
class PulseAudioListener(AbstractListener):
def __init__(self, driver):
self.driver = driver
def _set_volume(self, volume):
self._volume = volume
for player in self.driver._players:
player.set_volume(player._volume)
def _set_position(self, position):
self._position = position
def _set_forward_orientation(self, orientation):
self._forward_orientation = orientation
def _set_up_orientation(self, orientation):
self._up_orientation = orientation
class PulseAudioPlayer(AbstractAudioPlayer):
_volume = 1.0
def __init__(self, source_group, player):
super(PulseAudioPlayer, self).__init__(source_group, player)
self._events = []
self._timestamps = [] # List of (ref_time, timestamp)
self._write_index = 0 # Current write index (tracked manually)
self._read_index_valid = False # True only if buffer has non-stale data
self._clear_write = False
self._buffered_audio_data = None
self._underflow_is_eos = False
self._playing = False
audio_format = source_group.audio_format
assert audio_format
# Create sample_spec
sample_spec = pa.pa_sample_spec()
if audio_format.sample_size == 8:
sample_spec.format = pa.PA_SAMPLE_U8
elif audio_format.sample_size == 16:
if sys.byteorder == 'little':
sample_spec.format = pa.PA_SAMPLE_S16LE
else:
sample_spec.format = pa.PA_SAMPLE_S16BE
else:
raise MediaException('Unsupported sample size')
sample_spec.rate = audio_format.sample_rate
sample_spec.channels = audio_format.channels
channel_map = None
self.sample_rate = audio_format.sample_rate
try:
context.lock()
# Create stream
self.stream = pa.pa_stream_new(context._context,
str(id(self)).encode('ASCII'),
sample_spec,
channel_map)
check_not_null(self.stream)
# Callback trampoline for success operations
self._success_cb_func = pa.pa_stream_success_cb_t(self._success_cb)
self._context_success_cb_func = \
pa.pa_context_success_cb_t(self._context_success_cb)
# Callback for underflow (to detect EOS when expected pa_timestamp
# does not get reached).
self._underflow_cb_func = \
pa.pa_stream_notify_cb_t(self._underflow_cb)
pa.pa_stream_set_underflow_callback(self.stream,
self._underflow_cb_func, None)
# Callback for data write
self._write_cb_func = pa.pa_stream_request_cb_t(self._write_cb)
pa.pa_stream_set_write_callback(self.stream,
self._write_cb_func, None)
# Connect to sink
device = None
buffer_attr = None
flags = (pa.PA_STREAM_START_CORKED |
pa.PA_STREAM_INTERPOLATE_TIMING |
pa.PA_STREAM_VARIABLE_RATE)
sync_stream = None # TODO use this
check(
pa.pa_stream_connect_playback(self.stream,
device,
buffer_attr,
flags,
None,
sync_stream)
)
# Wait for stream readiness
self._state_cb_func = pa.pa_stream_notify_cb_t(self._state_cb)
pa.pa_stream_set_state_callback(self.stream,
self._state_cb_func, None)
while pa.pa_stream_get_state(self.stream) == pa.PA_STREAM_CREATING:
context.wait()
if pa.pa_stream_get_state(self.stream) != pa.PA_STREAM_READY:
check(-1)
finally:
context.unlock()
if _debug:
print 'stream ready'
def _state_cb(self, stream, data):
context.signal()
def _success_cb(self, stream, success, data):
context.signal()
def _context_success_cb(self, ctxt, success, data):
context.signal()
def _write_cb(self, stream, bytes, data):
if _debug:
print 'write callback: %d bytes' % bytes
# Asynchronously update time
if self._events:
context.async_operation(
pa.pa_stream_update_timing_info(self.stream,
self._success_cb_func, None)
)
# Grab next audio packet, or leftovers from last callback.
if self._buffered_audio_data:
audio_data = self._buffered_audio_data
self._buffered_audio_data = None
else:
audio_data = self.source_group.get_audio_data(bytes)
seek_flag = pa.PA_SEEK_RELATIVE
if self._clear_write:
if _debug:
print 'seek PA_SEEK_RELATIVE_ON_READ'
seek_flag = pa.PA_SEEK_RELATIVE_ON_READ
self._clear_write = False
# Keep writing packets until `bytes` is depleted
while audio_data and bytes > 0:
if _debug:
print 'packet', audio_data.timestamp
if _debug and audio_data.events:
print 'events', audio_data.events
for event in audio_data.events:
event_index = self._write_index + event.timestamp * \
self.source_group.audio_format.bytes_per_second
self._events.append((event_index, event))
consumption = min(bytes, audio_data.length)
check(
pa.pa_stream_write(self.stream,
audio_data.data,
consumption,
pa.pa_free_cb_t(0), # Data is copied
0,
seek_flag)
)
seek_flag = pa.PA_SEEK_RELATIVE
self._read_index_valid = True
self._timestamps.append((self._write_index, audio_data.timestamp))
self._write_index += consumption
self._underflow_is_eos = False
if _debug:
print 'write', consumption
if consumption < audio_data.length:
audio_data.consume(consumption, self.source_group.audio_format)
self._buffered_audio_data = audio_data
break
bytes -= consumption
if bytes > 0:
audio_data = self.source_group.get_audio_data(bytes) #XXX name change
if not audio_data:
# Whole source group has been written. Any underflow encountered
# after now is the EOS.
self._underflow_is_eos = True
# In case the source group wasn't long enough to prebuffer stream
# to PA's satisfaction, trigger immediate playback (has no effect
# if stream is already playing).
if self._playing:
context.async_operation(
pa.pa_stream_trigger(self.stream,
pa.pa_stream_success_cb_t(0), None)
)
self._process_events()
def _underflow_cb(self, stream, data):
self._process_events()
if self._underflow_is_eos:
self._sync_dispatch_player_event('on_eos')
self._sync_dispatch_player_event('on_source_group_eos')
self._underflow_is_eos = False
if _debug:
print 'eos'
else:
if _debug:
print 'underflow'
# TODO: does PA automatically restart stream when buffered again?
# XXX: sometimes receive an underflow after EOS... need to filter?
def _process_events(self):
if not self._events:
return
timing_info = pa.pa_stream_get_timing_info(self.stream)
if not timing_info:
if _debug:
print 'abort _process_events'
return
read_index = timing_info.contents.read_index
while self._events and self._events[0][0] < read_index:
_, event = self._events.pop(0)
if _debug:
print 'dispatch event', event
event._sync_dispatch_to_player(self.player)
def _sync_dispatch_player_event(self, event, *args):
pyglet.app.platform_event_loop.post_event(self.player, event, *args)
def __del__(self):
try:
self.delete()
except:
pass
def delete(self):
if _debug:
print 'delete'
if not self.stream:
return
context.lock()
pa.pa_stream_disconnect(self.stream)
pa.pa_stream_set_state_callback(self.stream, pa.pa_stream_notify_cb_t(noop), None)
context.unlock()
pa.pa_stream_unref(self.stream)
self.stream = None
def clear(self):
if _debug:
print 'clear'
self._clear_write = True
self._write_index = self._get_read_index()
self._timestamps = []
self._events = []
context.lock()
self._read_index_valid = False
context.sync_operation(
pa.pa_stream_prebuf(self.stream, self._success_cb_func, None)
)
context.unlock()
def play(self):
if _debug:
print 'play'
context.lock()
context.async_operation(
pa.pa_stream_cork(self.stream, 0,
pa.pa_stream_success_cb_t(0), None)
)
# If whole stream has already been written, trigger immediate
# playback.
if self._underflow_is_eos:
context.async_operation(
pa.pa_stream_trigger(self.stream,
pa.pa_stream_success_cb_t(0), None)
)
context.unlock()
self._playing = True
def stop(self):
if _debug:
print 'stop'
context.lock()
context.async_operation(
pa.pa_stream_cork(self.stream, 1,
pa.pa_stream_success_cb_t(0), None)
)
context.unlock()
self._playing = False
def _get_read_index(self):
#time = pa.pa_usec_t()
context.lock()
context.sync_operation(
pa.pa_stream_update_timing_info(self.stream,
self._success_cb_func, None)
)
context.unlock()
timing_info = pa.pa_stream_get_timing_info(self.stream)
if timing_info:
read_index = timing_info.contents.read_index
else:
read_index = 0
if _debug:
print '_get_read_index ->', read_index
return read_index
def _get_write_index(self):
timing_info = pa.pa_stream_get_timing_info(self.stream)
if timing_info:
write_index = timing_info.contents.write_index
else:
write_index = 0
if _debug:
print '_get_write_index ->', write_index
return write_index
def get_time(self):
if not self._read_index_valid:
if _debug:
print 'get_time <_read_index_valid = False> -> None'
return
read_index = self._get_read_index()
write_index = 0
timestamp = 0.0
try:
write_index, timestamp = self._timestamps[0]
write_index, timestamp = self._timestamps[1]
while read_index >= write_index:
del self._timestamps[0]
write_index, timestamp = self._timestamps[1]
except IndexError:
pass
bytes_per_second = self.source_group.audio_format.bytes_per_second
time = timestamp + (read_index - write_index) / float(bytes_per_second)
if _debug:
print 'get_time ->', time
return time
def set_volume(self, volume):
self._volume = volume
if not self.stream:
return
volume *= context._listener._volume
cvolume = pa.pa_cvolume()
volume = pa.pa_sw_volume_from_linear(volume)
pa.pa_cvolume_set(cvolume,
self.source_group.audio_format.channels,
volume)
context.lock()
idx = pa.pa_stream_get_index(self.stream)
context.sync_operation(
pa.pa_context_set_sink_input_volume(context._context,
idx,
cvolume,
self._context_success_cb_func,
None)
)
context.unlock()
def set_pitch(self, pitch):
pa.pa_stream_update_sample_rate(self.stream,
int(pitch*self.sample_rate),
self._success_cb_func,
None)
def create_audio_driver():
global context
context = PulseAudioDriver()
context.connect()
if _debug:
context.dump_debug_info()
return context
| bsd-3-clause | 1,893,899,580,149,396,700 | -5,282,342,653,611,852,000 | 32.261649 | 90 | 0.536692 | false |
wlawski/libmots | tests/monkeyrunner-tests/t0037-shared-elem-avoid-long-press-landscape.py | 1 | 1970 | #
# The MIT License (MIT)
#
# Copyright (c) 2014 Wiktor Lawski <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#
# This test checks that after significantly moving shared element in landscape
# mode, long press behavior will not be triggered without moving finger away
# from the screen.
#
import inspect
import os
import sys
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice
currentDir = os.environ["LIBMOTS_TEST_DIR"]
sys.path.insert(0, currentDir)
import helpers
calledScript = inspect.getfile(inspect.currentframe())
device = helpers.getDevice()
x = helpers.sharedElemX
y = helpers.sharedElemY
helpers.createSharedElement(device, True)
helpers.setLandscape(device)
device.touch(x, y, MonkeyDevice.DOWN)
helpers.moveSharedElement(device, x, y, -50, 50, False)
helpers.waitForLongPress()
result = device.takeSnapshot().getSubImage(helpers.landscapeRect)
helpers.checkResult(result, currentDir, calledScript)
| mit | -1,620,101,375,334,824,200 | -8,064,589,298,678,723,000 | 34.818182 | 79 | 0.784264 | false |
makerbot/ReplicatorG | skein_engines/skeinforge-50/fabmetheus_utilities/geometry/manipulation_meta/_copy.py | 12 | 2877 | """
Boolean geometry copy.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.creation import lineation
from fabmetheus_utilities.geometry.creation import solid
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.geometry.geometry_utilities import matrix
from fabmetheus_utilities import euclidean
__author__ = 'Enrique Perez ([email protected])'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getNewDerivation(elementNode):
'Get new derivation.'
return CopyDerivation(elementNode)
def processElementNode(elementNode):
'Process the xml element.'
processElementNodeByDerivation(None, elementNode)
def processElementNodeByDerivation(derivation, elementNode):
'Process the xml element by derivation.'
if derivation == None:
derivation = CopyDerivation(elementNode)
if derivation.target == None:
print('Warning, copy could not get target for:')
print(elementNode)
return
del elementNode.attributes['target']
copyMatrix = matrix.getBranchMatrixSetElementNode(elementNode)
targetMatrix = matrix.getBranchMatrixSetElementNode(derivation.target)
targetDictionaryCopy = evaluate.removeIdentifiersFromDictionary(derivation.target.attributes.copy())
targetDictionaryCopy.update(elementNode.attributes)
elementNode.attributes = targetDictionaryCopy
euclidean.removeTrueFromDictionary(elementNode.attributes, 'visible')
elementNode.localName = derivation.target.localName
derivation.target.copyXMLChildNodes(elementNode.getIDSuffix(), elementNode)
elementNode.getXMLProcessor().processElementNode(elementNode)
if copyMatrix != None and targetMatrix != None:
elementNode.xmlObject.matrix4X4 = copyMatrix.getSelfTimesOther(targetMatrix.tetragrid)
if elementNode.xmlObject == None:
return
if len(elementNode.xmlObject.getPaths()) > 0:
lineation.processElementNode(elementNode)
return
geometryOutput = elementNode.xmlObject.getGeometryOutput()
if geometryOutput == None:
return
solidMatchingPlugins = solid.getSolidMatchingPlugins(elementNode)
if len(solidMatchingPlugins) == 0:
return
geometryOutput = solid.getGeometryOutputByManipulation(elementNode, geometryOutput)
elementNode.xmlObject.transformGeometryOutput(geometryOutput)
lineation.removeChildNodesFromElementObject(elementNode)
elementNode.getXMLProcessor().convertElementNode(elementNode, geometryOutput)
class CopyDerivation:
"Class to hold copy variables."
def __init__(self, elementNode):
'Set defaults.'
self.target = evaluate.getElementNodeByKey(elementNode, 'target')
| gpl-2.0 | 2,287,116,652,759,315,200 | -906,818,298,711,541,000 | 38.958333 | 157 | 0.809524 | false |
kevinge314gh/tornado | tornado/netutil.py | 91 | 20028 | #!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Miscellaneous network utility code."""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import os
import sys
import socket
import stat
from tornado.concurrent import dummy_executor, run_on_executor
from tornado.ioloop import IOLoop
from tornado.platform.auto import set_close_exec
from tornado.util import u, Configurable, errno_from_exception
try:
import ssl
except ImportError:
# ssl is not available on Google App Engine
ssl = None
try:
import certifi
except ImportError:
# certifi is optional as long as we have ssl.create_default_context.
if ssl is None or hasattr(ssl, 'create_default_context'):
certifi = None
else:
raise
try:
xrange # py2
except NameError:
xrange = range # py3
if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+
ssl_match_hostname = ssl.match_hostname
SSLCertificateError = ssl.CertificateError
elif ssl is None:
ssl_match_hostname = SSLCertificateError = None
else:
import backports.ssl_match_hostname
ssl_match_hostname = backports.ssl_match_hostname.match_hostname
SSLCertificateError = backports.ssl_match_hostname.CertificateError
if hasattr(ssl, 'SSLContext'):
if hasattr(ssl, 'create_default_context'):
# Python 2.7.9+, 3.4+
# Note that the naming of ssl.Purpose is confusing; the purpose
# of a context is to authentiate the opposite side of the connection.
_client_ssl_defaults = ssl.create_default_context(
ssl.Purpose.SERVER_AUTH)
_server_ssl_defaults = ssl.create_default_context(
ssl.Purpose.CLIENT_AUTH)
else:
# Python 3.2-3.3
_client_ssl_defaults = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
_client_ssl_defaults.verify_mode = ssl.CERT_REQUIRED
_client_ssl_defaults.load_verify_locations(certifi.where())
_server_ssl_defaults = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
if hasattr(ssl, 'OP_NO_COMPRESSION'):
# Disable TLS compression to avoid CRIME and related attacks.
# This constant wasn't added until python 3.3.
_client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
_server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
elif ssl:
# Python 2.6-2.7.8
_client_ssl_defaults = dict(cert_reqs=ssl.CERT_REQUIRED,
ca_certs=certifi.where())
_server_ssl_defaults = {}
else:
# Google App Engine
_client_ssl_defaults = dict(cert_reqs=None,
ca_certs=None)
_server_ssl_defaults = {}
# ThreadedResolver runs getaddrinfo on a thread. If the hostname is unicode,
# getaddrinfo attempts to import encodings.idna. If this is done at
# module-import time, the import lock is already held by the main thread,
# leading to deadlock. Avoid it by caching the idna encoder on the main
# thread now.
u('foo').encode('idna')
# These errnos indicate that a non-blocking operation must be retried
# at a later time. On most platforms they're the same value, but on
# some they differ.
_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
if hasattr(errno, "WSAEWOULDBLOCK"):
_ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,)
# Default backlog used when calling sock.listen()
_DEFAULT_BACKLOG = 128
def bind_sockets(port, address=None, family=socket.AF_UNSPEC,
backlog=_DEFAULT_BACKLOG, flags=None):
"""Creates listening sockets bound to the given port and address.
Returns a list of socket objects (multiple sockets are returned if
the given address maps to multiple IP addresses, which is most common
for mixed IPv4 and IPv6 use).
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen() <socket.socket.listen>`.
``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
"""
sockets = []
if address == "":
address = None
if not socket.has_ipv6 and family == socket.AF_UNSPEC:
# Python can be compiled with --disable-ipv6, which causes
# operations on AF_INET6 sockets to fail, but does not
# automatically exclude those results from getaddrinfo
# results.
# http://bugs.python.org/issue16208
family = socket.AF_INET
if flags is None:
flags = socket.AI_PASSIVE
bound_port = None
for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM,
0, flags)):
af, socktype, proto, canonname, sockaddr = res
if (sys.platform == 'darwin' and address == 'localhost' and
af == socket.AF_INET6 and sockaddr[3] != 0):
# Mac OS X includes a link-local address fe80::1%lo0 in the
# getaddrinfo results for 'localhost'. However, the firewall
# doesn't understand that this is a local address and will
# prompt for access (often repeatedly, due to an apparent
# bug in its ability to remember granting access to an
# application). Skip these addresses.
continue
try:
sock = socket.socket(af, socktype, proto)
except socket.error as e:
if errno_from_exception(e) == errno.EAFNOSUPPORT:
continue
raise
set_close_exec(sock.fileno())
if os.name != 'nt':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if af == socket.AF_INET6:
# On linux, ipv6 sockets accept ipv4 too by default,
# but this makes it impossible to bind to both
# 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
# separate sockets *must* be used to listen for both ipv4
# and ipv6. For consistency, always disable ipv4 on our
# ipv6 sockets and use a separate ipv4 socket when needed.
#
# Python 2.x on windows doesn't have IPPROTO_IPV6.
if hasattr(socket, "IPPROTO_IPV6"):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
# automatic port allocation with port=None
# should bind on the same port on IPv4 and IPv6
host, requested_port = sockaddr[:2]
if requested_port == 0 and bound_port is not None:
sockaddr = tuple([host, bound_port] + list(sockaddr[2:]))
sock.setblocking(0)
sock.bind(sockaddr)
bound_port = sock.getsockname()[1]
sock.listen(backlog)
sockets.append(sock)
return sockets
if hasattr(socket, 'AF_UNIX'):
def bind_unix_socket(file, mode=0o600, backlog=_DEFAULT_BACKLOG):
"""Creates a listening unix socket.
If a socket with the given name already exists, it will be deleted.
If any other file with that name exists, an exception will be
raised.
Returns a socket object (not a list of socket objects like
`bind_sockets`)
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
set_close_exec(sock.fileno())
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
try:
st = os.stat(file)
except OSError as err:
if errno_from_exception(err) != errno.ENOENT:
raise
else:
if stat.S_ISSOCK(st.st_mode):
os.remove(file)
else:
raise ValueError("File %s exists and is not a socket", file)
sock.bind(file)
os.chmod(file, mode)
sock.listen(backlog)
return sock
def add_accept_handler(sock, callback, io_loop=None):
"""Adds an `.IOLoop` event handler to accept new connections on ``sock``.
When a connection is accepted, ``callback(connection, address)`` will
be run (``connection`` is a socket object, and ``address`` is the
address of the other end of the connection). Note that this signature
is different from the ``callback(fd, events)`` signature used for
`.IOLoop` handlers.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
if io_loop is None:
io_loop = IOLoop.current()
def accept_handler(fd, events):
# More connections may come in while we're handling callbacks;
# to prevent starvation of other tasks we must limit the number
# of connections we accept at a time. Ideally we would accept
# up to the number of connections that were waiting when we
# entered this method, but this information is not available
# (and rearranging this method to call accept() as many times
# as possible before running any callbacks would have adverse
# effects on load balancing in multiprocess configurations).
# Instead, we use the (default) listen backlog as a rough
# heuristic for the number of connections we can reasonably
# accept at once.
for i in xrange(_DEFAULT_BACKLOG):
try:
connection, address = sock.accept()
except socket.error as e:
# _ERRNO_WOULDBLOCK indicate we have accepted every
# connection that is available.
if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
return
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if errno_from_exception(e) == errno.ECONNABORTED:
continue
raise
callback(connection, address)
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
def is_valid_ip(ip):
"""Returns true if the given string is a well-formed IP address.
Supports IPv4 and IPv6.
"""
if not ip or '\x00' in ip:
# getaddrinfo resolves empty strings to localhost, and truncates
# on zero bytes.
return False
try:
res = socket.getaddrinfo(ip, 0, socket.AF_UNSPEC,
socket.SOCK_STREAM,
0, socket.AI_NUMERICHOST)
return bool(res)
except socket.gaierror as e:
if e.args[0] == socket.EAI_NONAME:
return False
raise
return True
class Resolver(Configurable):
"""Configurable asynchronous DNS resolver interface.
By default, a blocking implementation is used (which simply calls
`socket.getaddrinfo`). An alternative implementation can be
chosen with the `Resolver.configure <.Configurable.configure>`
class method::
Resolver.configure('tornado.netutil.ThreadedResolver')
The implementations of this interface included with Tornado are
* `tornado.netutil.BlockingResolver`
* `tornado.netutil.ThreadedResolver`
* `tornado.netutil.OverrideResolver`
* `tornado.platform.twisted.TwistedResolver`
* `tornado.platform.caresresolver.CaresResolver`
"""
@classmethod
def configurable_base(cls):
return Resolver
@classmethod
def configurable_default(cls):
return BlockingResolver
def resolve(self, host, port, family=socket.AF_UNSPEC, callback=None):
"""Resolves an address.
The ``host`` argument is a string which may be a hostname or a
literal IP address.
Returns a `.Future` whose result is a list of (family,
address) pairs, where address is a tuple suitable to pass to
`socket.connect <socket.socket.connect>` (i.e. a ``(host,
port)`` pair for IPv4; additional fields may be present for
IPv6). If a ``callback`` is passed, it will be run with the
result as an argument when it is complete.
"""
raise NotImplementedError()
def close(self):
"""Closes the `Resolver`, freeing any resources used.
.. versionadded:: 3.1
"""
pass
class ExecutorResolver(Resolver):
"""Resolver implementation using a `concurrent.futures.Executor`.
Use this instead of `ThreadedResolver` when you require additional
control over the executor being used.
The executor will be shut down when the resolver is closed unless
``close_resolver=False``; use this if you want to reuse the same
executor elsewhere.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def initialize(self, io_loop=None, executor=None, close_executor=True):
self.io_loop = io_loop or IOLoop.current()
if executor is not None:
self.executor = executor
self.close_executor = close_executor
else:
self.executor = dummy_executor
self.close_executor = False
def close(self):
if self.close_executor:
self.executor.shutdown()
self.executor = None
@run_on_executor
def resolve(self, host, port, family=socket.AF_UNSPEC):
# On Solaris, getaddrinfo fails if the given port is not found
# in /etc/services and no socket type is given, so we must pass
# one here. The socket type used here doesn't seem to actually
# matter (we discard the one we get back in the results),
# so the addresses we return should still be usable with SOCK_DGRAM.
addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM)
results = []
for family, socktype, proto, canonname, address in addrinfo:
results.append((family, address))
return results
class BlockingResolver(ExecutorResolver):
"""Default `Resolver` implementation, using `socket.getaddrinfo`.
The `.IOLoop` will be blocked during the resolution, although the
callback will not be run until the next `.IOLoop` iteration.
"""
def initialize(self, io_loop=None):
super(BlockingResolver, self).initialize(io_loop=io_loop)
class ThreadedResolver(ExecutorResolver):
"""Multithreaded non-blocking `Resolver` implementation.
Requires the `concurrent.futures` package to be installed
(available in the standard library since Python 3.2,
installable with ``pip install futures`` in older versions).
The thread pool size can be configured with::
Resolver.configure('tornado.netutil.ThreadedResolver',
num_threads=10)
.. versionchanged:: 3.1
All ``ThreadedResolvers`` share a single thread pool, whose
size is set by the first one to be created.
"""
_threadpool = None
_threadpool_pid = None
def initialize(self, io_loop=None, num_threads=10):
threadpool = ThreadedResolver._create_threadpool(num_threads)
super(ThreadedResolver, self).initialize(
io_loop=io_loop, executor=threadpool, close_executor=False)
@classmethod
def _create_threadpool(cls, num_threads):
pid = os.getpid()
if cls._threadpool_pid != pid:
# Threads cannot survive after a fork, so if our pid isn't what it
# was when we created the pool then delete it.
cls._threadpool = None
if cls._threadpool is None:
from concurrent.futures import ThreadPoolExecutor
cls._threadpool = ThreadPoolExecutor(num_threads)
cls._threadpool_pid = pid
return cls._threadpool
class OverrideResolver(Resolver):
"""Wraps a resolver with a mapping of overrides.
This can be used to make local DNS changes (e.g. for testing)
without modifying system-wide settings.
The mapping can contain either host strings or host-port pairs.
"""
def initialize(self, resolver, mapping):
self.resolver = resolver
self.mapping = mapping
def close(self):
self.resolver.close()
def resolve(self, host, port, *args, **kwargs):
if (host, port) in self.mapping:
host, port = self.mapping[(host, port)]
elif host in self.mapping:
host = self.mapping[host]
return self.resolver.resolve(host, port, *args, **kwargs)
# These are the keyword arguments to ssl.wrap_socket that must be translated
# to their SSLContext equivalents (the other arguments are still passed
# to SSLContext.wrap_socket).
_SSL_CONTEXT_KEYWORDS = frozenset(['ssl_version', 'certfile', 'keyfile',
'cert_reqs', 'ca_certs', 'ciphers'])
def ssl_options_to_context(ssl_options):
"""Try to convert an ``ssl_options`` dictionary to an
`~ssl.SSLContext` object.
The ``ssl_options`` dictionary contains keywords to be passed to
`ssl.wrap_socket`. In Python 2.7.9+, `ssl.SSLContext` objects can
be used instead. This function converts the dict form to its
`~ssl.SSLContext` equivalent, and may be used when a component which
accepts both forms needs to upgrade to the `~ssl.SSLContext` version
to use features like SNI or NPN.
"""
if isinstance(ssl_options, dict):
assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options
if (not hasattr(ssl, 'SSLContext') or
isinstance(ssl_options, ssl.SSLContext)):
return ssl_options
context = ssl.SSLContext(
ssl_options.get('ssl_version', ssl.PROTOCOL_SSLv23))
if 'certfile' in ssl_options:
context.load_cert_chain(ssl_options['certfile'], ssl_options.get('keyfile', None))
if 'cert_reqs' in ssl_options:
context.verify_mode = ssl_options['cert_reqs']
if 'ca_certs' in ssl_options:
context.load_verify_locations(ssl_options['ca_certs'])
if 'ciphers' in ssl_options:
context.set_ciphers(ssl_options['ciphers'])
if hasattr(ssl, 'OP_NO_COMPRESSION'):
# Disable TLS compression to avoid CRIME and related attacks.
# This constant wasn't added until python 3.3.
context.options |= ssl.OP_NO_COMPRESSION
return context
def ssl_wrap_socket(socket, ssl_options, server_hostname=None, **kwargs):
"""Returns an ``ssl.SSLSocket`` wrapping the given socket.
``ssl_options`` may be either an `ssl.SSLContext` object or a
dictionary (as accepted by `ssl_options_to_context`). Additional
keyword arguments are passed to ``wrap_socket`` (either the
`~ssl.SSLContext` method or the `ssl` module function as
appropriate).
"""
context = ssl_options_to_context(ssl_options)
if hasattr(ssl, 'SSLContext') and isinstance(context, ssl.SSLContext):
if server_hostname is not None and getattr(ssl, 'HAS_SNI'):
# Python doesn't have server-side SNI support so we can't
# really unittest this, but it can be manually tested with
# python3.2 -m tornado.httpclient https://sni.velox.ch
return context.wrap_socket(socket, server_hostname=server_hostname,
**kwargs)
else:
return context.wrap_socket(socket, **kwargs)
else:
return ssl.wrap_socket(socket, **dict(context, **kwargs))
| apache-2.0 | -7,434,231,560,714,996,000 | 6,643,010,287,847,162,000 | 38.117188 | 90 | 0.64984 | false |
RuiNascimento/krepo | script.module.lambdascrapers/lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/projectfree.py | 3 | 3718 | # -*- coding: utf-8 -*-
'''
Yoda Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import proxy
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['project-free-tv.ch','project-free-tv.ag']
self.base_link = 'http://project-free-tv.ag'
self.search_link = '/movies/%s-%s/'
self.search_link_2 = '/movies/search-form/?free=%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.search_link % (cleantitle.geturl(title), year)
q = urlparse.urljoin(self.base_link, url)
r = proxy.geturl(q)
if not r == None: return url
t = cleantitle.get(title)
q = self.search_link_2 % urllib.quote_plus(cleantitle.query(title))
q = urlparse.urljoin(self.base_link, q)
r = client.request(q)
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
r = [(i[0], re.findall('(?:\'|\")(.+?)(?:\'|\")', i[1])) for i in r]
r = [(i[0], [re.findall('(.+?)\((\d{4})', x) for x in i[1]]) for i in r]
r = [(i[0], [x[0] for x in i[1] if x]) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]]
url = re.findall('(?://.+?|)(/.+)', r[0])[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
r = proxy.request(url, 'movies')
links = client.parseDOM(r, 'tr')
for i in links:
try:
url = re.findall('callvalue\((.+?)\)', i)[0]
url = re.findall('(http.+?)(?:\'|\")', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = host.encode('utf-8')
quality = re.findall('quality(\w+)\.png', i)[0]
if quality == 'CAM' in i or quality == 'TS': quality = 'CAM'
else: quality = 'SD'
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
except:
pass
filter = [i for i in sources if i['quality'] == 'SD']
if filter: sources = filter
return sources
except:
return sources
def resolve(self, url):
return url
| gpl-2.0 | -1,738,807,970,190,960,000 | -6,568,366,122,755,913,000 | 32.495495 | 140 | 0.526896 | false |
bjolivot/ansible | lib/ansible/modules/commands/script.py | 44 | 2918 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: script
version_added: "0.9"
short_description: Runs a local script on a remote node after transferring it
description:
- "The C(script) module takes the script name followed by a list of
space-delimited arguments. "
- "The local script at path will be transferred to the remote node and then executed. "
- "The given script will be processed through the shell environment on the remote node. "
- "This module does not require python on the remote system, much like
the M(raw) module. "
options:
free_form:
description:
- path to the local script file followed by optional arguments. There is no parameter actually named 'free form'; see the examples!
required: true
default: null
aliases: []
creates:
description:
- a filename, when it already exists, this step will B(not) be run.
required: no
default: null
version_added: "1.5"
removes:
description:
- a filename, when it does not exist, this step will B(not) be run.
required: no
default: null
version_added: "1.5"
notes:
- It is usually preferable to write Ansible modules than pushing scripts. Convert your script to an Ansible module for bonus points!
- The ssh connection plugin will force pseudo-tty allocation via -tt when scripts are executed. pseudo-ttys do not have a stderr channel and all
stderr is sent to stdout. If you depend on separated stdout and stderr result keys, please switch to a copy+command set of tasks instead of using script.
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- decrypt
"""
EXAMPLES = '''
# Example from Ansible Playbooks
- script: /some/local/script.sh --some-arguments 1234
# Run a script that creates a file, but only if the file is not yet created
- script: /some/local/create_file.sh --some-arguments 1234
args:
creates: /the/created/file.txt
# Run a script that removes a file, but only if the file is not yet removed
- script: /some/local/remove_file.sh --some-arguments 1234
args:
removes: /the/removed/file.txt
'''
| gpl-3.0 | 284,939,978,722,092,830 | -5,961,968,198,263,875,000 | 37.394737 | 157 | 0.711103 | false |
Maplecroft/Magellan | magellan/deps_utils.py | 1 | 36288 | import os
import operator
from pkg_resources import parse_version
from pkg_resources import resource_filename as pkg_res_resource_filename
from pprint import pformat
import requests
import json
import logging
# from terminaltables import AsciiTable as OutputTableType
from terminaltables import SingleTable as OutputTableType
from magellan.package_utils import Package
from magellan.env_utils import Environment
from magellan.utils import MagellanConfig, run_in_subprocess, print_col
# Logging:
maglog = logging.getLogger("magellan_logger")
class DepTools(object):
"""Tools for conflict detection."""
@staticmethod
def check_changes_in_requirements_vs_env(requirements, descendants):
"""
Checks to see if there are any new or removed packages in a
requirements set vs what is currently in the env.
NB: Checks name only, not version!
:param dict requirements:
:param list descendants: current env dependencies of package.
:rtype: dict{list, list}
:returns {new_deps, removed_deps} : new and removed(from previous
dependency requirements) dependencies.
requirements = DepTools.get_deps_for_package_version(package, version)
descendants look like a list of edges in acyclic graph e.g.:
[..[('celery', '3.0.19'), ('kombu', '2.5.16')
, [('>=', '2.5.10'), ('<', '3.0')]]..[] etc]
(NB: specs are optional)
"""
try:
dec_keys = {x[1][0].lower(): x[1][0] for x in descendants}
except KeyError:
maglog.exception("Error in check_changes_in_requirements_vs_env")
return {'removed_deps': [], 'new_deps': []}
try:
rec_keys = {x['key']: x['project_name']
for x in requirements['requires'].values()}
except KeyError as e:
maglog.debug("Error in check_changes_in_requirements_vs_env: {}"
.format(e))
return {'removed_deps': [], 'new_deps': []}
dset = set(dec_keys.keys())
rset = set(rec_keys.keys())
removed_deps = [dec_keys[x] for x in (dset - rset)]
new_deps = [rec_keys[x] for x in (rset - dset)]
# return new_deps, removed_deps # as list
out = {'removed_deps': removed_deps, 'new_deps': new_deps}
return out
@staticmethod
def check_req_deps_satisfied_by_current_env(requirements, nodes):
"""
Checks nodes (package, version) of current environment against
requirements to see if they are satisfied
:param dict requirements:
requirements = DepTools.get_deps_for_package_version(package, version)
:param list nodes: current env nodes (package, version) tuples list
:rtype dict{dict, dict, list}
:returns: to_return{checks, conflicts, missing}
"checks" is a dictionary of the current checks
"conflicts" has at least 1 conflict with required specs
"missing" highlights any packages that are not in current environment
"""
check_ret = DepTools.check_requirement_satisfied
node_keys = {x[0].lower(): x[1] for x in nodes}
checks = {}
conflicts = {}
missing = []
if 'requires' not in requirements:
maglog.error("Requirements missing in "
"check_req_deps_satisfied_by_current_env")
return
for r in requirements['requires'].values():
key = r['key']
project_name = r['project_name']
specs = r['specs']
checks[project_name] = []
if key not in node_keys.keys():
maglog.info("Requirement {0}{1} not in current environment"
.format(project_name, specs))
checks[project_name].append(None)
missing.append(project_name)
else:
for s in specs:
req_satisfied, req_dets = check_ret(node_keys[key], s)
# print(req_dets)
checks[project_name].append(req_dets)
if not req_satisfied:
if project_name not in conflicts:
conflicts[project_name] = [req_dets]
else:
conflicts[project_name].append(req_dets)
to_return = {
'checks': checks,
'conflicts': conflicts,
'missing': missing,
}
return to_return
@staticmethod
def check_requirement_satisfied(cur_ver, requirement_spec):
""" tests to see whether a requirement is satisfied by the
current version.
:param str cur_ver: current version to use for comparison.
:param tuple (str, str) requirement_spec: is tuple of: (spec, version)
:returns: bool
"""
ops = {'<': operator.lt, '<=': operator.le,
'==': operator.eq, '!=': operator.ne,
'>=': operator.ge, '>': operator.gt, }
requirement_ver = requirement_spec[1]
requirement_sym = requirement_spec[0]
requirement_met = ops[requirement_sym](
parse_version(cur_ver), parse_version(requirement_ver))
# print(cur_ver, requirement_sym, requirement_ver, requirement_met)
return requirement_met, (cur_ver, requirement_sym,
requirement_ver, requirement_met)
@staticmethod
def get_deps_for_package_version(package, version, vex_options=None):
"""Gets dependencies for a specific version of a package.
Specifically:
0. Check if this has already been done and cached & return that.
1. Set up temporary virtualenv
2. installs package/version into there using pip
3. Write file to interrogate through virtual env using
vex/pip/setuptool combo
4. Run file, which pickles results to temp file
5. reads that file from current program
6. deletes file and returns info
7. Delete tmp env?
"""
if vex_options is None:
vex_options = ''
req_out_file = ("{0}_{1}_req.json"
.format(package.lower(), version.replace(".", "_")))
# 0. Check if this has already been done and cached & return that.
cached_file = os.path.join(MagellanConfig.cache_dir, req_out_file)
if os.path.exists(cached_file):
maglog.info("Using previously cached result at {0}"
.format(cached_file))
return json.load(open(cached_file, 'r'))
# 1. Set up temporary virtualenv
tmp_env = Environment(name=MagellanConfig.tmp_env_dir)
tmp_env.create_vex_new_virtual_env(vex_options) # NB: delete if extant!!
# todo (aj); by default?
# 1.5 Upgrade pip
run_in_subprocess("vex {} {} pip install pip --upgrade"
.format(vex_options, tmp_env.name))
# 2. installs package/version into there using pip
# tmp_pip_options = "--cache-dir {}".format(MagellanConfig.cache_dir)
tmp_pip_options = ("--cache-dir {} --no-deps"
.format(MagellanConfig.cache_dir))
pip_package_str = '{0}=={1}'.format(package, version)
tmp_env.vex_install_requirement(
tmp_env.name, pip_package_str, tmp_pip_options, vex_options)
# 3. File to interrogate through virtual env for package
interrogation_file = pkg_res_resource_filename(
'magellan', 'package_interrogation.py')
# 4. Run file, which pickles results to temp file
run_in_subprocess("vex {} {} python {} {} {}".format(
vex_options, tmp_env.name, interrogation_file, package,
MagellanConfig.cache_dir))
# 5. reads that file from current program
try:
result = json.load(open(cached_file, 'r'))
except IOError:
result = {}
return result
@staticmethod
def check_if_ancestors_still_satisfied(
package, new_version, ancestors, package_requirements):
"""
Makes sure you haven't offended any of your forefathers...
Checks whether the packages which depend on the current package
and version will still have their requirements satisfied.
:param str package:
:param str new_version:
:param list ancestors:
:param dict package_requirements: from virtual env
:rtype dict{dict, dict}
:return: checks, conflicts
NB: Note distinction between package_requirements and the requirements
that generally go in other methods in this class. The former lists the
requirements for all packages int he current environment whereas the
latter is package specific.
"""
package_key = package.lower()
to_check = [x[0][0] for x in ancestors if x[0][0] != 'root']
checks = {}
conflicts = {}
for anc in to_check:
anc_key = anc.lower()
anc_specs = \
package_requirements[anc_key]['requires'][package_key]['specs']
checks[anc_key] = anc_specs
# print(anc_specs)
for s in anc_specs:
is_ok, dets = DepTools.check_requirement_satisfied(
new_version, s)
if not is_ok:
if anc_key in conflicts:
conflicts[anc_key].append(dets)
else:
conflicts[anc_key] = dets
# pprint(checks)
# pprint(conflicts)
# return checks, conflicts
return {'checks': checks, 'conflicts': conflicts}
@staticmethod
def detect_upgrade_conflicts(packages, venv, pretty=False):
"""
Detect conflicts between packages in current environment when upgrading
other packages.
At present this routine will look at just the immediate connections
to a graph in the environment. It does this in 3 major ways:
1. DEPENDENCY SET - check_changes_in_requirements_vs_env
Checks the required dependencies of new version against
current environment to see additions/removals BY NAME ONLY.
2. REQUIRED VERSIONS - check_req_deps_satisfied_by_current_env
For all dependencies of new version, checks to see whether
they are satisfied by current environment versions.
3. ANCESTOR DEPENDENCIES - check_if_ancestors_still_satisfied
For all the ancestor nodes that depend on PACKAGE, it checks
whether the dependency specs are satisfied by the new version.
:param list packages: List of (package, desired_version)'s
:param Environment venv: virtual environment
"""
uc_deps = {}
conflicts = {}
for u in packages:
package = u[0]
version = u[1]
p_v = "{0}_{1}".format(package, version.replace('.', '_'))
uc_deps[p_v] = {}
p_key = package.lower()
cur_ver = venv.all_packages[p_key].version
if parse_version(cur_ver) == parse_version(version):
s = ("{} version {} is same as current!"
.format(package, version))
print_col(s, 'red', 'black', pretty)
continue
if not PyPIHelper.check_package_version_on_pypi(package, version):
continue
uc_deps[p_v]['requirements'] = \
DepTools.get_deps_for_package_version(
package, version, vex_options=MagellanConfig.vex_options)
ancestors, descendants = Package.get_direct_links_to_any_package(
package, venv.edges)
# 1: DEPENDENCY SET - check_changes_in_requirements_vs_env
uc_deps[p_v]['dependency_set'] = \
DepTools.check_changes_in_requirements_vs_env(
uc_deps[p_v]['requirements'], descendants)
# 2. REQUIRED VERSIONS - check_req_deps_satisfied_by_current_env
uc_deps[p_v]['required_versions'] = \
DepTools.check_req_deps_satisfied_by_current_env(
uc_deps[p_v]['requirements'], venv.nodes)
# 3. ANCESTOR DEPENDENCIES - check_if_ancestors_still_satisfied
uc_deps[p_v]['ancestor_dependencies'] = \
DepTools.check_if_ancestors_still_satisfied(
package, version, ancestors, venv.package_requirements)
conflicts[p_v] = {}
try:
conflicts[p_v]['dep_set'] = uc_deps[p_v]['dependency_set']
conflicts[p_v]['req_ver'] = \
uc_deps[p_v]['required_versions']['conflicts']
conflicts[p_v]['missing_packages'] = \
uc_deps[p_v]['required_versions']['missing']
conflicts[p_v]['anc_dep'] = \
uc_deps[p_v]['ancestor_dependencies']['conflicts']
except TypeError as e:
maglog.debug("Error when attempting to assess conflicts {}"
.format(e))
return conflicts, uc_deps
@staticmethod
def highlight_conflicts_in_current_env(
nodes, package_requirements, pretty=False):
"""
Checks through all nodes (packages) in the venv environment
:param list nodes: list of nodes (packages) as (name, ver) tuple
:param dict package_requirements: dependencies dictionary.
:rtype list
:return: current_env_conflicts
"""
if not nodes or not package_requirements:
print("venv missing required data: nodes or package_requirements.")
return []
current_env_conflicts = []
ver_info = {n[0].lower(): n[1] for n in nodes}
if 'argparse' not in ver_info:
ver_info['argparse'] = ""
for n in nodes:
n_key = n[0].lower()
if n_key not in package_requirements:
print ("{} missing from package_requirements".format(n))
continue
if 'requires' not in package_requirements[n_key]:
print("{} does not have key 'requires'".format(n_key))
continue
node_requirements = package_requirements[n_key]['requires']
for r in node_requirements:
try:
cur_ver = ver_info[r.lower()]
except KeyError:
maglog.debug("KeyError for {}".format(r))
cur_ver = ''
for s in node_requirements[r]['specs']:
req_met, req_details = \
DepTools.check_requirement_satisfied(cur_ver, s)
if not req_met:
current_env_conflicts.append(
(n, node_requirements[r]['project_name'],
req_details))
DepTools.table_print_cur_env_conflicts(current_env_conflicts, pretty)
return current_env_conflicts
@staticmethod
def detect_package_addition_conflicts(packages, venv):
"""
Detect if there will be any conflicts with the addition of a new
package
:param packages: list of (name, version) tuple
:param venv: virtual env where package will be installed, of type
magellan.env_utils.Environment
:rtype dict
:return: conflicts
0. Check if package (name) is already in environment.
1. Check new packages to be installed
2. Check current environment satisfies requirements.
"""
ver_info = {x[0].lower(): x[1] for x in venv.nodes}
deps = {}
for p in packages:
package = p[0]
version = p[1]
p_v = "{0}_{1}".format(package, version.replace('.', '_'))
deps[p_v] = {}
if not PyPIHelper.check_package_version_on_pypi(package, version):
print("Cannot get package info for {} {} on PyPI"
.format(package, version))
deps[p_v]['status'] = "No package info on PyPI."
continue
# 0 EXTANT PACKAGE:
p_extant, details = DepTools.package_in_environment(
package, version, venv.nodes)
if p_extant: # should use upgrade conflict detection.
deps[p_v]['status'] = (
"Package currently exists - use upgrade -U.")
continue
# Get requirements if it's actually a new package & on PyPI.
requirements = DepTools.get_deps_for_package_version(
package, version, vex_options=MagellanConfig.vex_options)
deps[p_v]['requirements'] = requirements
deps[p_v]['new_packages'] = []
deps[p_v]['may_try_upgrade'] = []
deps[p_v]['may_be_okay'] = []
if not requirements:
deps[p_v] = {"status": "NO DATA returned from function."}
continue
for r in requirements['requires']:
r_key = r.lower()
# 1 New packages
if r_key not in ver_info:
deps[p_v]['new_packages'].append(
requirements['requires'][r]['project_name'])
# 2 Packages that may try to upgrade. All n = 1
else:
if not requirements['requires'][r]['specs']:
deps[p_v]['may_be_okay'].append(r)
current_version = ver_info[r_key]
for s in requirements['requires'][r]['specs']:
res, deets = DepTools.check_requirement_satisfied(
current_version, s)
if not res:
deps[p_v]['may_try_upgrade'].append((r, deets))
else:
deps[p_v]['may_be_okay'].append((r, deets))
return deps
@staticmethod
def package_in_environment(package, version, nodes):
"""
Check to see if package exists in current env and see if it
matches the current version if so.
:param package: str name of package
:param version: str version of package
:param nodes: list of env nodes
:rtype bool, dict
:return: whether package exists, and if so which version.
"""
key = package.lower()
ver_info = {x[0].lower(): x[1] for x in nodes if x[0].lower() == key}
if ver_info:
current_version = ver_info[key]
if version == current_version:
maglog.info("Package {0} exists with specified version {1}"
.format(package, version))
else:
maglog.info("Package {0} exists with version {1} that differs "
"from {2}. Try running with Upgrade Package flag"
" -U.".format(package, current_version, version))
return True, {'name': package, 'env_version': current_version}
else:
maglog.info("Package {} does not exist in current env"
.format(package))
return False, {}
@staticmethod
def process_package_conflicts(conflict_list, venv, pretty=False):
"""
:param conflict_list: list of (package, version) tuples passed in
from CLI
:param venv: magellan.env_utils.Environment
:return: addition_conflicts, upgrade_conflicts
"""
upgrade_conflicts = []
addition_conflicts = []
for p in conflict_list:
p_in_env, p_details = venv.package_in_env(p[0])
if p_in_env:
upgrade_conflicts.append(p)
else: # NB: may also be non-existent package
addition_conflicts.append(p)
if upgrade_conflicts:
maglog.info(upgrade_conflicts)
upgrade_conflicts, uc_deps = DepTools.detect_upgrade_conflicts(
upgrade_conflicts, venv, pretty)
DepTools.table_print_upgrade_conflicts(
upgrade_conflicts, uc_deps, venv, pretty)
maglog.info(pformat(upgrade_conflicts))
maglog.debug(pformat(uc_deps))
if addition_conflicts:
maglog.info(addition_conflicts)
addition_conflicts = DepTools.detect_package_addition_conflicts(
addition_conflicts, venv)
DepTools.table_print_additional_package_conflicts(
addition_conflicts, pretty)
maglog.info(pformat(addition_conflicts))
return addition_conflicts, upgrade_conflicts
@staticmethod
def table_print_upgrade_conflicts(conflicts, dep_info, venv, pretty=False):
"""
Prints the upgrade conflicts to stdout in format easily digestible
for people.
:param dict conflicts: dict of upgrade conflicts
:param dict dep_info: dependency information
:param Environment venv: virtual environment
"""
if not conflicts:
return
print("\n")
s = "Upgrade Conflicts:"
print_col(s, pretty=pretty, header=True)
for p_k, p in conflicts.items():
has_recs = dep_info.get(p_k).get('requirements')
if not has_recs:
print_col("Requirements not found for {}, possible failure "
"when installating package into temporary "
"directory?".format(p_k), pretty=pretty)
continue
p_name = dep_info[p_k]['requirements']['project_name']
ver = dep_info[p_k]['requirements']['version']
cur_ver = venv.all_packages[p_name.lower()].version
if parse_version(cur_ver) < parse_version(ver):
direction = "upgrade"
else:
direction = "downgrade"
s = ("{} {}: {} from {} to {}.".format(
p_name, ver, direction, cur_ver, ver))
print_col(s, pretty=pretty)
missing_from_env = p['missing_packages']
new_dependencies = p['dep_set']['new_deps']
removed_dependencies = p['dep_set']['removed_deps']
broken_reqs = ["{0}: {1}".format(x, v)
for x, v in p['anc_dep'].items()]
if not (missing_from_env or new_dependencies
or removed_dependencies or broken_reqs):
print_col("No conflicts detected", pretty=pretty)
_print_if(missing_from_env,
"Packages not in environment (to be installed):",
pretty=pretty)
_print_if(new_dependencies,
"New dependencies of {}:".format(p_name), pretty=pretty)
_print_if(removed_dependencies,
"{} will no longer depend on:".format(p_name),
pretty=pretty)
_print_if(broken_reqs,
"These packages will have their requirements broken:{}",
pretty=pretty)
print("\n")
@staticmethod
def table_print_additional_package_conflicts(conflicts, pretty=False):
"""
Prints the upgrade conflicts to stdout in format easily digestible
for people.
:param conflicts: dict of upgrade conflicts
"""
print_col("Package Addition Conflicts:", pretty=pretty, header=True)
for p_k, p in conflicts.items():
has_recs = p.get('requirements')
if not has_recs:
print_col("Requirements not found for {}, possible failure "
"when installating package into temporary "
"directory?".format(p_k), pretty=pretty)
continue
p_name = p.get('requirements').get('project_name')
ver = p.get('requirements').get('version')
print_col("{0} {1}:".format(p_name, ver),
pretty=pretty, header=True)
okay = p['may_be_okay']
up = p['may_try_upgrade']
new_ps = p['new_packages']
if not (okay or up or new_ps):
s = (" No conflicts detected for the addition of {0} {1}."
.format(p_name, ver))
print_col(s, pretty=pretty)
_print_if(okay, "Should be okay:", pretty=pretty)
_print_if(up, "May try to upgrade:", pretty=pretty)
_print_if(new_ps, "New packages to add:", pretty=pretty)
print("\n")
@staticmethod
def table_print_cur_env_conflicts(conflicts, pretty=False):
"""
Print current conflicts in environment using terminaltables.
"""
ts = "No conflicts detected in environment"
if conflicts:
print_col("Conflicts in environment:", pretty=pretty, header=True)
table_data = [['PACKAGE', 'DEPENDENCY', 'CONFLICT']]
for conflict in conflicts:
maglog.info(conflict)
try:
c_name = conflict[0][0]
c_ver = conflict[0][1]
c_dep = conflict[1]
c_dep_dets = conflict[-1]
t_row = [" ".join([c_name, c_ver]),
c_dep,
_string_requirement_details(c_dep_dets)]
table_data.append(t_row)
except Exception as e:
maglog.exception(e)
print("There was an error in printing output; check -v")
ts = OutputTableType(table_data).table
print_col(ts, pretty=pretty)
@staticmethod
def acquire_and_display_dependencies(package_version_list, pretty=False):
"""
Gets the dependencies information by installing the package and
version from PyPI
"""
for p in package_version_list:
package = p[0]
version = p[1]
if not PyPIHelper.check_package_version_on_pypi(package, version):
print_col("{} {} not found on PyPI.".format(package, version),
pretty=pretty, header=True)
continue
requirements = DepTools.get_deps_for_package_version(
package, version, vex_options=MagellanConfig.vex_options)
maglog.debug(pformat(requirements))
_table_print_requirements(requirements, pretty)
@staticmethod
def get_ancestors_of_packages(package_list, venv, pretty=False):
"""
Prints a list of ancestors of package to indicate what brought a
package into the environment.
:param package_list: list of names of package to query
:param venv: magellan.env_utils.Environment
:rtype dict:
:returns: dictionary with list of ancestors.
"""
anc_dict = {}
for p in package_list:
p_key = p[0].lower() # [0] as list of lists from argparse
if p_key not in venv.all_packages:
anc_dict[p_key] = None
maglog.info("{} not found in env".format(p_key))
continue
ancs = venv.all_packages[p_key].ancestors(venv.edges)
anc_dict[p_key] = [x[0] for x in ancs if x[0][0] != "root"]
DepTools().pprint_anc_dict(anc_dict, venv, pretty)
return anc_dict
@staticmethod
def pprint_anc_dict(ancestor_dictionary, venv, pretty=False):
"""
Pretty prints ancestors dictionary to standard out.
:param ancestor_dictionary:
:param venv: magellan.env_utils.Environment
"""
env_name = "the current environment" if not venv.name else venv.name
for pk, p in ancestor_dictionary.items():
if p:
s = "These packages depend on {} in {}:"\
.format(venv.all_packages[pk].name, env_name)
print_col(s, pretty=pretty, header=True)
for a in p:
try:
print_col("{} {}".format(a[0], a[1]), pretty=pretty)
except Exception as e:
maglog.exception(e)
@staticmethod
def get_descendants_of_packages(package_list, venv, pretty=False):
"""
Prints a list of descendants of package to indicate what brought a
package into the environment.
:param package_list: list of names of package to query
:param venv: magellan.env_utils.Environment
:rtype dict:
:returns: dictionary with list of descendants.
"""
dec_dic = {}
for p in package_list:
p_key = p[0].lower() # [0] as list of lists from argparse
if p_key not in venv.all_packages:
dec_dic[p_key] = None
maglog.info("{} not found in env".format(p_key))
continue
decs = venv.all_packages[p_key].descendants(venv.edges)
dec_dic[p_key] = [x[1] for x in decs]
DepTools().pprint_dec_dict(dec_dic, venv, pretty)
return dec_dic
# todo (aj) refactor the anc dic
@staticmethod
def pprint_dec_dict(descendant_dictionary, venv, pretty=False):
"""
Pretty prints ancestors dictionary to standard out.
:param descendant_dictionary:
:param venv: magellan.env_utils.Environment
"""
env_name = "the current environment" if not venv.name else venv.name
for pk, p in descendant_dictionary.items():
if p:
s = "{} depends on these packages in {}:"\
.format(venv.all_packages[pk].name, env_name)
print_col(s, pretty=pretty, header=True)
for a in p:
try:
print_col("{} {}".format(a[0], a[1]), pretty=pretty)
except Exception as e:
maglog.exception(e)
def _table_print_requirements(requirements, pretty=False):
"""
Table print requirements to stdout for human consumption.
:param dict requirements: dictionary of requirements from PyPI
"""
package = requirements.get('project_name')
version = requirements.get('version')
reqs = requirements.get('requires', {})
if not reqs:
s = "{} {} appears to have no dependencies.".format(package, version)
print_col(s, pretty=pretty, header=True)
else:
s = "Dependencies of {} {}:".format(package, version)
print_col(s, pretty=pretty, header=True)
table_data = [['PACKAGE', 'SPECS']]
for r_key, r in reqs.items():
table_row = [r['project_name']]
if r['specs']:
spec_string = ""
for s in r['specs']:
spec_string += "{} {}\n".format(s[0], s[1])
table_row.append(spec_string)
else:
table_row.append('\n')
table_data.append(table_row)
table = OutputTableType(table_data)
print_col(table.table, pretty=pretty)
def _print_if(list_in, lead_in_text=None, tab_space=2, pretty=False):
"""
prints the list if it has items.
:param list list_in: list of input items
:param str lead_in_text: what to print before list print.
:param int tab_space: indentation for prettiness.
:param bool lead_nl: lead print with newline
"""
if list_in:
if lead_in_text:
print_col(" "*tab_space + lead_in_text, pretty=pretty)
for item in list_in:
if type(item) == tuple:
_item = item[0] + " as " + _string_requirement_details(item[1])
else:
_item = item
print_col(" "*tab_space + "".join(_item), pretty=pretty)
def _string_requirement_details(dets):
"""
Converts details from DepTools.check_requirement_satisfied into an
easily readable string.
:param dets: details from DepTools.check_requirement_satisfied
e.g. dets = ('1.9.0', u'>=', u'1.7.3', True)
:rtype str:
:return:requirement details as a string.
"""
try:
passed = " is " if dets[-1] else " is not "
s = dets[0] + passed + " ".join(dets[1:3])
except Exception as e:
maglog.error(e)
s = ""
return s
def _return_interrogation_script_json(package, filename=None):
"""Return script to interrogate deps for package inside env.
Uses json.dump instead of pickle due to cryptic pickle/requests bug."""
head = """
import pip
import json
pkgs = pip.get_installed_distributions()
"""
mid = "package = '{0}'".format(package.lower())
if not filename:
out = ('fn = "{0}_{1}_req.dat"'
'.format(p.key, p.version.replace(".","_"))')
else:
out = 'fn = "{0}"'.format(filename)
conv = """
p = [x for x in pkgs if x.key == package][0]
req_dic = {'project_name': p.project_name,
'version': p.version, 'requires': {}}
for r in p.requires():
req_dic['requires'][r.key] = {}
req_dic['requires'][r.key]['project_name'] = r.project_name
req_dic['requires'][r.key]['key'] = r.key
req_dic['requires'][r.key]['specs'] = r.specs
"""
end = "json.dump(req_dic, open(fn, 'wb'))"
nl = '\n'
return head + nl + mid + nl + conv + nl + out + nl + end + nl
class PyPIHelper(object):
"""Collection of static methods to assist in interrogating PyPI"""
@staticmethod
def check_package_version_on_pypi(package, version):
"""
Queries PyPI to see if the specific version of "package" exists.
:param str package: package name
:param str version: package version
:rtype bool:
:return: True if package-version on PyPI
"""
package_json = PyPIHelper.acquire_package_json_info(package)
if not package_json:
return False
else:
# print("JSON acquired")
return version in package_json['releases'].keys()
@staticmethod
def acquire_package_json_info(package, localcache=None):
"""
Perform lookup on packages and versions. Currently just uses PyPI.
Returns JSON
p is package name
localCacheDir is a location of local cache
"""
package = str(package)
p_json = package + '.json'
if not localcache:
f = os.path.join(MagellanConfig.cache_dir, p_json)
else:
f = os.path.join(localcache, p_json)
if os.path.exists(f):
maglog.info("retrieving file {0} from local cache".format(f))
with open(f, 'r') as ff:
return json.load(ff)
pypi_template = 'https://pypi.python.org/pypi/{0}/json'
try:
r = requests.get(pypi_template.format(package))
if r.status_code == 200: # if successfully retrieved:
maglog.info("{0} JSON successfully retrieved from PyPI"
.format(package))
# Save to local cache...
with open(f, 'w') as outf:
json.dump(r.json(), outf)
# ... and return to caller:
return r.json()
else: # retrieval failed
maglog.info("failed to download {0}".format(package))
return {}
except requests.ConnectionError as e:
maglog.warn("Connection to PyPI failed: {}".format(e))
return {}
@staticmethod
def all_package_versions_on_pypi(package):
"""Return a list of all released packages on PyPI.
:param str package: input package name
:rtype: list
:return: list of all package versions
"""
all_package_info = PyPIHelper.acquire_package_json_info(package)
out = []
if 'releases' in all_package_info:
out = list(all_package_info['releases'].keys())
return out
| mit | -5,800,108,928,248,311,000 | -5,993,868,441,738,253,000 | 35.543807 | 81 | 0.553351 | false |
conates/my_site | frontend/templatetags/set_var.py | 1 | 1670 | from django import template
register = template.Library()
from backend.models import *
slider_principal = WpPosts.objects.all().filter(
post_status="publish",
post_type="post",
wptermrelationships__term_taxonomy__term__name="Slider Principal",
)
slider_principal.filter(wppostmeta__meta_key__in=["data-icon","data-slice2-scale","data-slice1-scale","data-slice2-rotation","data-slice1-rotation","class",])
slider_principal.order_by("wppostmeta__meta_value")
@register.simple_tag
def get_custom_var_orientation(postId):
var_custom = WpPostmeta.objects.filter(post_id=postId)[2:3]
for value in var_custom:
return value.meta_value
@register.simple_tag
def get_custom_var_rotation_1(postId):
var_custom = WpPostmeta.objects.filter(post_id=postId)[3:4]
for value in var_custom:
return value.meta_value
@register.simple_tag
def get_custom_var_rotation_2(postId):
var_custom = WpPostmeta.objects.filter(post_id=postId)[4:5]
for value in var_custom:
return value.meta_value
@register.simple_tag
def get_custom_var_scale_1(postId):
var_custom = WpPostmeta.objects.filter(post_id=postId)[5:6]
for value in var_custom:
return value.meta_value
@register.simple_tag
def get_custom_var_scale_2(postId):
var_custom = WpPostmeta.objects.filter(post_id=postId)[6:7]
for value in var_custom:
return value.meta_value
@register.simple_tag
def get_custom_var_icon(postId):
var_custom = WpPostmeta.objects.filter(post_id=postId)[7:8]
for value in var_custom:
return value.meta_value
@register.simple_tag
def get_custom_var_class(postId):
var_custom = WpPostmeta.objects.filter(post_id=postId)[9:10]
for value in var_custom:
return value.meta_value | apache-2.0 | 490,307,886,395,671,740 | 2,562,977,759,442,736,600 | 29.381818 | 158 | 0.756287 | false |
cheif/django-rest-framework | rest_framework/authtoken/models.py | 81 | 1364 | import binascii
import os
from django.conf import settings
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# Prior to Django 1.5, the AUTH_USER_MODEL setting does not exist.
# Note that we don't perform this code in the compat module due to
# bug report #1297
# See: https://github.com/tomchristie/django-rest-framework/issues/1297
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
@python_2_unicode_compatible
class Token(models.Model):
"""
The default authorization token model.
"""
key = models.CharField(max_length=40, primary_key=True)
user = models.OneToOneField(AUTH_USER_MODEL, related_name='auth_token')
created = models.DateTimeField(auto_now_add=True)
class Meta:
# Work around for a bug in Django:
# https://code.djangoproject.com/ticket/19422
#
# Also see corresponding ticket:
# https://github.com/tomchristie/django-rest-framework/issues/705
abstract = 'rest_framework.authtoken' not in settings.INSTALLED_APPS
def save(self, *args, **kwargs):
if not self.key:
self.key = self.generate_key()
return super(Token, self).save(*args, **kwargs)
def generate_key(self):
return binascii.hexlify(os.urandom(20)).decode()
def __str__(self):
return self.key
| bsd-2-clause | -1,359,982,730,022,622,700 | 7,689,185,903,149,478,000 | 32.268293 | 76 | 0.68695 | false |
wbc2010/django1.2.5 | tests/regressiontests/queries/tests.py | 38 | 67172 | import datetime
import pickle
import sys
import unittest
from django.conf import settings
from django.core.exceptions import FieldError
from django.db import DatabaseError, connection, connections, DEFAULT_DB_ALIAS
from django.db.models import Count
from django.db.models.query import Q, ITER_CHUNK_SIZE, EmptyQuerySet
from django.test import TestCase
from django.utils.datastructures import SortedDict
from models import (Annotation, Article, Author, Celebrity, Child, Cover, Detail,
DumbCategory, ExtraInfo, Fan, Item, LeafA, LoopX, LoopZ, ManagedModel,
Member, NamedCategory, Note, Number, Plaything, PointerA, Ranking, Related,
Report, ReservedName, Tag, TvChef, Valid, X, Food, Eaten, Node)
class BaseQuerysetTest(TestCase):
def assertValueQuerysetEqual(self, qs, values):
return self.assertQuerysetEqual(qs, values, transform=lambda x: x)
def assertRaisesMessage(self, exc, msg, func, *args, **kwargs):
try:
func(*args, **kwargs)
except Exception, e:
self.assertEqual(msg, str(e))
self.assertTrue(isinstance(e, exc), "Expected %s, got %s" % (exc, type(e)))
else:
if hasattr(exc, '__name__'):
excName = exc.__name__
else:
excName = str(exc)
raise AssertionError, "%s not raised" % excName
class Queries1Tests(BaseQuerysetTest):
def setUp(self):
generic = NamedCategory.objects.create(name="Generic")
self.t1 = Tag.objects.create(name='t1', category=generic)
self.t2 = Tag.objects.create(name='t2', parent=self.t1, category=generic)
self.t3 = Tag.objects.create(name='t3', parent=self.t1)
t4 = Tag.objects.create(name='t4', parent=self.t3)
self.t5 = Tag.objects.create(name='t5', parent=self.t3)
self.n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
self.n3 = Note.objects.create(note='n3', misc='foo', id=3)
ann1 = Annotation.objects.create(name='a1', tag=self.t1)
ann1.notes.add(self.n1)
ann2 = Annotation.objects.create(name='a2', tag=t4)
ann2.notes.add(n2, self.n3)
# Create these out of order so that sorting by 'id' will be different to sorting
# by 'info'. Helps detect some problems later.
self.e2 = ExtraInfo.objects.create(info='e2', note=n2)
e1 = ExtraInfo.objects.create(info='e1', note=self.n1)
self.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
self.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=self.e2)
self.a4 = Author.objects.create(name='a4', num=4004, extra=self.e2)
self.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
self.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
self.i1 = Item.objects.create(name='one', created=self.time1, modified=self.time1, creator=self.a1, note=self.n3)
self.i1.tags = [self.t1, self.t2]
self.i2 = Item.objects.create(name='two', created=self.time2, creator=self.a2, note=n2)
self.i2.tags = [self.t1, self.t3]
self.i3 = Item.objects.create(name='three', created=time3, creator=self.a2, note=self.n3)
i4 = Item.objects.create(name='four', created=time4, creator=self.a4, note=self.n3)
i4.tags = [t4]
self.r1 = Report.objects.create(name='r1', creator=self.a1)
Report.objects.create(name='r2', creator=a3)
Report.objects.create(name='r3')
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
# will be rank3, rank2, rank1.
self.rank1 = Ranking.objects.create(rank=2, author=self.a2)
Cover.objects.create(title="first", item=i4)
Cover.objects.create(title="second", item=self.i2)
def test_ticket1050(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=True),
['<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__id__isnull=True),
['<Item: three>']
)
def test_ticket1801(self):
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i3),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3),
['<Author: a2>']
)
def test_ticket2306(self):
# Checking that no join types are "left outer" joins.
query = Item.objects.filter(tags=self.t2).query
self.assertTrue(query.LOUTER not in [x[2] for x in query.alias_map.values()])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(creator__name='fred')|Q(tags=self.t2)),
['<Item: one>']
)
# Each filter call is processed "at once" against a single table, so this is
# different from the previous example as it tries to find tags that are two
# things at once (rather than two tags).
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)),
[]
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1), Q(creator__name='fred')|Q(tags=self.t2)),
[]
)
qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id)
self.assertQuerysetEqual(list(qs), ['<Author: a2>'])
self.assertEqual(2, qs.query.count_active_tables(), 2)
qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id)
self.assertEqual(qs.query.count_active_tables(), 3)
def test_ticket4464(self):
self.assertQuerysetEqual(
Item.objects.filter(tags=self.t1).filter(tags=self.t2),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3),
['<Item: two>']
)
# Make sure .distinct() works with slicing (this was broken in Oracle).
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).order_by('name')[:3],
['<Item: one>', '<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name')[:3],
['<Item: one>', '<Item: two>']
)
def test_tickets_2080_3592(self):
self.assertQuerysetEqual(
Author.objects.filter(item__name='one') | Author.objects.filter(name='a3'),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='one') | Q(name='a3')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(name='a3') | Q(item__name='one')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='three') | Q(report__name='r3')),
['<Author: a2>']
)
def test_ticket6074(self):
# Merging two empty result sets shouldn't leave a queryset with no constraints
# (which would match everything).
self.assertQuerysetEqual(Author.objects.filter(Q(id__in=[])), [])
self.assertQuerysetEqual(
Author.objects.filter(Q(id__in=[])|Q(id__in=[])),
[]
)
def test_tickets_1878_2939(self):
self.assertEqual(Item.objects.values('creator').distinct().count(), 3)
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
xx = Item(name='four', created=self.time1, creator=self.a2, note=self.n1)
xx.save()
self.assertEqual(
Item.objects.exclude(name='two').values('creator', 'name').distinct().count(),
4
)
self.assertEqual(
Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name', 'foo').distinct().count(),
4
)
self.assertEqual(
Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name').distinct().count(),
4
)
xx.delete()
def test_ticket7323(self):
self.assertEqual(Item.objects.values('creator', 'name').count(), 4)
def test_ticket2253(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
self.assertQuerysetEqual(
q1,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(q2, ['<Item: one>'])
self.assertQuerysetEqual(
(q1 | q2).order_by('name'),
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual((q1 & q2).order_by('name'), ['<Item: one>'])
# FIXME: This is difficult to fix and very much an edge case, so punt for now.
# This is related to the order_by() tests, below, but the old bug exhibited
# itself here (q2 was pulling too many tables into the combined query with the
# new ordering, but only because we have evaluated q2 already).
#
#self.assertEqual(len((q1 & q2).order_by('name').query.tables), 1)
q1 = Item.objects.filter(tags=self.t1)
q2 = Item.objects.filter(note=self.n3, tags=self.t2)
q3 = Item.objects.filter(creator=self.a4)
self.assertQuerysetEqual(
((q1 & q2) | q3).order_by('name'),
['<Item: four>', '<Item: one>']
)
def test_tickets_4088_4306(self):
self.assertQuerysetEqual(
Report.objects.filter(creator=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__num=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(Report.objects.filter(creator__id=1001), [])
self.assertQuerysetEqual(
Report.objects.filter(creator__id=self.a1.id),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__name='a1'),
['<Report: r1>']
)
def test_ticket4510(self):
self.assertQuerysetEqual(
Author.objects.filter(report__name='r1'),
['<Author: a1>']
)
def test_ticket7378(self):
self.assertQuerysetEqual(self.a1.report_set.all(), ['<Report: r1>'])
def test_tickets_5324_6704(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__name='t4'),
['<Item: four>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct(),
['<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse(),
['<Item: two>', '<Item: three>', '<Item: one>']
)
self.assertQuerysetEqual(
Author.objects.exclude(item__name='one').distinct().order_by('name'),
['<Author: a2>', '<Author: a3>', '<Author: a4>']
)
# Excluding across a m2m relation when there is more than one related
# object associated was problematic.
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').exclude(tags__name='t4'),
['<Item: three>']
)
# Excluding from a relation that cannot be NULL should not use outer joins.
query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query
self.assertTrue(query.LOUTER not in [x[2] for x in query.alias_map.values()])
# Similarly, when one of the joins cannot possibly, ever, involve NULL
# values (Author -> ExtraInfo, in the following), it should never be
# promoted to a left outer join. So the following query should only
# involve one "left outer" join (Author -> Item is 0-to-many).
qs = Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1)|Q(item__note=self.n3))
self.assertEqual(
len([x[2] for x in qs.query.alias_map.values() if x[2] == query.LOUTER and qs.query.alias_refcount[x[1]]]),
1
)
# The previous changes shouldn't affect nullable foreign key joins.
self.assertQuerysetEqual(
Tag.objects.filter(parent__isnull=True).order_by('name'),
['<Tag: t1>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__isnull=True).order_by('name'),
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
def test_ticket2091(self):
t = Tag.objects.get(name='t4')
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[t]),
['<Item: four>']
)
def test_heterogeneous_qs_combination(self):
# Combining querysets built on different models should behave in a well-defined
# fashion. We raise an error.
self.assertRaisesMessage(
AssertionError,
'Cannot combine queries on two different base models.',
lambda: Author.objects.all() & Tag.objects.all()
)
self.assertRaisesMessage(
AssertionError,
'Cannot combine queries on two different base models.',
lambda: Author.objects.all() | Tag.objects.all()
)
def test_ticket3141(self):
self.assertEqual(Author.objects.extra(select={'foo': '1'}).count(), 4)
self.assertEqual(
Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count(),
4
)
def test_ticket2400(self):
self.assertQuerysetEqual(
Author.objects.filter(item__isnull=True),
['<Author: a3>']
)
self.assertQuerysetEqual(
Tag.objects.filter(item__isnull=True),
['<Tag: t5>']
)
def test_ticket2496(self):
self.assertQuerysetEqual(
Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1],
['<Item: four>']
)
def test_tickets_2076_7256(self):
# Ordering on related tables should be possible, even if the table is
# not otherwise involved.
self.assertQuerysetEqual(
Item.objects.order_by('note__note', 'name'),
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# Ordering on a related field should use the remote model's default
# ordering as a final step.
self.assertQuerysetEqual(
Author.objects.order_by('extra', '-name'),
['<Author: a2>', '<Author: a1>', '<Author: a4>', '<Author: a3>']
)
# Using remote model default ordering can span multiple models (in this
# case, Cover is ordered by Item's default, which uses Note's default).
self.assertQuerysetEqual(
Cover.objects.all(),
['<Cover: first>', '<Cover: second>']
)
# If the remote model does not have a default ordering, we order by its 'id'
# field.
self.assertQuerysetEqual(
Item.objects.order_by('creator', 'name'),
['<Item: one>', '<Item: three>', '<Item: two>', '<Item: four>']
)
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That
# isn't Django's problem. Garbage in, garbage out.
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=False).order_by('tags', 'id'),
['<Item: one>', '<Item: two>', '<Item: one>', '<Item: two>', '<Item: four>']
)
# If we replace the default ordering, Django adjusts the required
# tables automatically. Item normally requires a join with Note to do
# the default ordering, but that isn't needed here.
qs = Item.objects.order_by('name')
self.assertQuerysetEqual(
qs,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertEqual(len(qs.query.tables), 1)
def test_tickets_2874_3002(self):
qs = Item.objects.select_related().order_by('note__note', 'name')
self.assertQuerysetEqual(
qs,
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# This is also a good select_related() test because there are multiple
# Note entries in the SQL. The two Note items should be different.
self.assertTrue(repr(qs[0].note), '<Note: n2>')
self.assertEqual(repr(qs[0].creator.extra.note), '<Note: n1>')
def test_ticket3037(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(creator__name='a3', name='two')|Q(creator__name='a4', name='four')),
['<Item: four>']
)
def test_tickets_5321_7070(self):
# Ordering columns must be included in the output columns. Note that
# this means results that might otherwise be distinct are not (if there
# are multiple values in the ordering cols), as in this example. This
# isn't a bug; it's a warning to be careful with the selection of
# ordering columns.
self.assertValueQuerysetEqual(
Note.objects.values('misc').distinct().order_by('note', '-misc'),
[{'misc': u'foo'}, {'misc': u'bar'}, {'misc': u'foo'}]
)
def test_ticket4358(self):
# If you don't pass any fields to values(), relation fields are
# returned as "foo_id" keys, not "foo". For consistency, you should be
# able to pass "foo_id" in the fields list and have it work, too. We
# actually allow both "foo" and "foo_id".
# The *_id version is returned by default.
self.assertTrue('note_id' in ExtraInfo.objects.values()[0])
# You can also pass it in explicitly.
self.assertValueQuerysetEqual(
ExtraInfo.objects.values('note_id'),
[{'note_id': 1}, {'note_id': 2}]
)
# ...or use the field name.
self.assertValueQuerysetEqual(
ExtraInfo.objects.values('note'),
[{'note': 1}, {'note': 2}]
)
def test_ticket2902(self):
# Parameters can be given to extra_select, *if* you use a SortedDict.
# (First we need to know which order the keys fall in "naturally" on
# your system, so we can put things in the wrong way around from
# normal. A normal dict would thus fail.)
s = [('a', '%s'), ('b', '%s')]
params = ['one', 'two']
if {'a': 1, 'b': 2}.keys() == ['a', 'b']:
s.reverse()
params.reverse()
# This slightly odd comparison works around the fact that PostgreSQL will
# return 'one' and 'two' as strings, not Unicode objects. It's a side-effect of
# using constants here and not a real concern.
d = Item.objects.extra(select=SortedDict(s), select_params=params).values('a', 'b')[0]
self.assertEqual(d, {'a': u'one', 'b': u'two'})
# Order by the number of tags attached to an item.
l = Item.objects.extra(select={'count': 'select count(*) from queries_item_tags where queries_item_tags.item_id = queries_item.id'}).order_by('-count')
self.assertEqual([o.count for o in l], [2, 2, 1, 0])
def test_ticket6154(self):
# Multiple filter statements are joined using "AND" all the time.
self.assertQuerysetEqual(
Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1)|Q(item__note=self.n3)),
['<Author: a1>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(extra__note=self.n1)|Q(item__note=self.n3)).filter(id=self.a1.id),
['<Author: a1>']
)
def test_ticket6981(self):
self.assertQuerysetEqual(
Tag.objects.select_related('parent').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket9926(self):
self.assertQuerysetEqual(
Tag.objects.select_related("parent", "category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.select_related('parent', "parent__category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_tickets_6180_6203(self):
# Dates with limits and/or counts
self.assertEqual(Item.objects.count(), 4)
self.assertEqual(Item.objects.dates('created', 'month').count(), 1)
self.assertEqual(Item.objects.dates('created', 'day').count(), 2)
self.assertEqual(len(Item.objects.dates('created', 'day')), 2)
self.assertEqual(Item.objects.dates('created', 'day')[0], datetime.datetime(2007, 12, 19, 0, 0))
def test_tickets_7087_12242(self):
# Dates with extra select columns
self.assertQuerysetEqual(
Item.objects.dates('created', 'day').extra(select={'a': 1}),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(select={'a': 1}).dates('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
name="one"
self.assertQuerysetEqual(
Item.objects.dates('created', 'day').extra(where=['name=%s'], params=[name]),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(where=['name=%s'], params=[name]).dates('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7155(self):
# Nullable dates
self.assertQuerysetEqual(
Item.objects.dates('modified', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7098(self):
# Make sure semi-deprecated ordering by related models syntax still
# works.
self.assertValueQuerysetEqual(
Item.objects.values('note__note').order_by('queries_note.note', 'id'),
[{'note__note': u'n2'}, {'note__note': u'n3'}, {'note__note': u'n3'}, {'note__note': u'n3'}]
)
def test_ticket7096(self):
# Make sure exclude() with multiple conditions continues to work.
self.assertQuerysetEqual(
Tag.objects.filter(parent=self.t1, name='t3').order_by('name'),
['<Tag: t3>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent=self.t1, name='t3').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct(),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
# More twisted cases, involving nested negations.
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one')),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(~Q(tags__name='t1', name='one'), name='two'),
['<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two'),
['<Item: four>', '<Item: one>', '<Item: three>']
)
def test_tickets_7204_7506(self):
# Make sure querysets with related fields can be pickled. If this
# doesn't crash, it's a Good Thing.
pickle.dumps(Item.objects.all())
def test_ticket7813(self):
# We should also be able to pickle things that use select_related().
# The only tricky thing here is to ensure that we do the related
# selections properly after unpickling.
qs = Item.objects.select_related()
query = qs.query.get_compiler(qs.db).as_sql()[0]
query2 = pickle.loads(pickle.dumps(qs.query))
self.assertEqual(
query2.get_compiler(qs.db).as_sql()[0],
query
)
def test_deferred_load_qs_pickling(self):
# Check pickling of deferred-loading querysets
qs = Item.objects.defer('name', 'creator')
q2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(list(qs), list(q2))
q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL))
self.assertEqual(list(qs), list(q3))
def test_ticket7277(self):
self.assertQuerysetEqual(
self.n1.annotation_set.filter(Q(tag=self.t5) | Q(tag__children=self.t5) | Q(tag__children__children=self.t5)),
['<Annotation: a1>']
)
def test_tickets_7448_7707(self):
# Complex objects should be converted to strings before being used in
# lookups.
self.assertQuerysetEqual(
Item.objects.filter(created__in=[self.time1, self.time2]),
['<Item: one>', '<Item: two>']
)
def test_ticket7235(self):
# An EmptyQuerySet should not raise exceptions if it is filtered.
q = EmptyQuerySet()
self.assertQuerysetEqual(q.all(), [])
self.assertQuerysetEqual(q.filter(x=10), [])
self.assertQuerysetEqual(q.exclude(y=3), [])
self.assertQuerysetEqual(q.complex_filter({'pk': 1}), [])
self.assertQuerysetEqual(q.select_related('spam', 'eggs'), [])
self.assertQuerysetEqual(q.annotate(Count('eggs')), [])
self.assertQuerysetEqual(q.order_by('-pub_date', 'headline'), [])
self.assertQuerysetEqual(q.distinct(), [])
self.assertQuerysetEqual(
q.extra(select={'is_recent': "pub_date > '2006-01-01'"}),
[]
)
q.query.low_mark = 1
self.assertRaisesMessage(
AssertionError,
'Cannot change a query once a slice has been taken',
q.extra, select={'is_recent': "pub_date > '2006-01-01'"}
)
self.assertQuerysetEqual(q.reverse(), [])
self.assertQuerysetEqual(q.defer('spam', 'eggs'), [])
self.assertQuerysetEqual(q.only('spam', 'eggs'), [])
def test_ticket7791(self):
# There were "issues" when ordering and distinct-ing on fields related
# via ForeignKeys.
self.assertEqual(
len(Note.objects.order_by('extrainfo__info').distinct()),
3
)
# Pickling of DateQuerySets used to fail
qs = Item.objects.dates('created', 'month')
_ = pickle.loads(pickle.dumps(qs))
def test_ticket9997(self):
# If a ValuesList or Values queryset is passed as an inner query, we
# make sure it's only requesting a single value and use that as the
# thing to select.
self.assertQuerysetEqual(
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name')),
['<Tag: t2>', '<Tag: t3>']
)
# Multi-valued values() and values_list() querysets should raise errors.
self.assertRaisesMessage(
TypeError,
'Cannot use a multi-field ValuesQuerySet as a filter value.',
lambda: Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id'))
)
self.assertRaisesMessage(
TypeError,
'Cannot use a multi-field ValuesListQuerySet as a filter value.',
lambda: Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id'))
)
def test_ticket9985(self):
# qs.values_list(...).values(...) combinations should work.
self.assertValueQuerysetEqual(
Note.objects.values_list("note", flat=True).values("id").order_by("id"),
[{'id': 1}, {'id': 2}, {'id': 3}]
)
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1").values_list('note').values('id')),
['<Annotation: a1>']
)
def test_ticket10205(self):
# When bailing out early because of an empty "__in" filter, we need
# to set things up correctly internally so that subqueries can continue properly.
self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0)
def test_ticket10432(self):
# Testing an empty "__in" filter with a generator as the value.
def f():
return iter([])
n_obj = Note.objects.all()[0]
def g():
for i in [n_obj.pk]:
yield i
self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), [])
self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj])
def test_ticket10742(self):
# Queries used in an __in clause don't execute subqueries
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.filter(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a1>', '<Author: a2>'])
# The subquery result cache should not be populated
self.assertTrue(subq._result_cache is None)
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.exclude(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a3>', '<Author: a4>'])
# The subquery result cache should not be populated
self.assertTrue(subq._result_cache is None)
subq = Author.objects.filter(num__lt=3000)
self.assertQuerysetEqual(
Author.objects.filter(Q(pk__in=subq) & Q(name='a1')),
['<Author: a1>']
)
# The subquery result cache should not be populated
self.assertTrue(subq._result_cache is None)
def test_ticket7076(self):
# Excluding shouldn't eliminate NULL entries.
self.assertQuerysetEqual(
Item.objects.exclude(modified=self.time1).order_by('name'),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__name=self.t1.name),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket7181(self):
# Ordering by related tables should accomodate nullable fields (this
# test is a little tricky, since NULL ordering is database dependent.
# Instead, we just count the number of results).
self.assertEqual(len(Tag.objects.order_by('parent__name')), 5)
# Empty querysets can be merged with others.
self.assertQuerysetEqual(
Note.objects.none() | Note.objects.all(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(
Note.objects.all() | Note.objects.none(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(Note.objects.none() & Note.objects.all(), [])
self.assertQuerysetEqual(Note.objects.all() & Note.objects.none(), [])
def test_ticket9411(self):
# Make sure bump_prefix() (an internal Query method) doesn't (re-)break. It's
# sufficient that this query runs without error.
qs = Tag.objects.values_list('id', flat=True).order_by('id')
qs.query.bump_prefix()
first = qs[0]
self.assertEqual(list(qs), range(first, first+5))
def test_ticket8439(self):
# Complex combinations of conjunctions, disjunctions and nullable
# relations.
self.assertQuerysetEqual(
Author.objects.filter(Q(item__note__extrainfo=self.e2)|Q(report=self.r1, name='xyz')),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(report=self.r1, name='xyz')|Q(item__note__extrainfo=self.e2)),
['<Author: a2>']
)
self.assertQuerysetEqual(
Annotation.objects.filter(Q(tag__parent=self.t1)|Q(notes__note='n1', name='a1')),
['<Annotation: a1>']
)
xx = ExtraInfo.objects.create(info='xx', note=self.n3)
self.assertQuerysetEqual(
Note.objects.filter(Q(extrainfo__author=self.a1)|Q(extrainfo=xx)),
['<Note: n1>', '<Note: n3>']
)
xx.delete()
q = Note.objects.filter(Q(extrainfo__author=self.a1)|Q(extrainfo=xx)).query
self.assertEqual(
len([x[2] for x in q.alias_map.values() if x[2] == q.LOUTER and q.alias_refcount[x[1]]]),
1
)
class Queries2Tests(TestCase):
def setUp(self):
Number.objects.create(num=4)
Number.objects.create(num=8)
Number.objects.create(num=12)
def test_ticket4289(self):
# A slight variation on the restricting the filtering choices by the
# lookup constraints.
self.assertQuerysetEqual(Number.objects.filter(num__lt=4), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=8, num__lt=12), [])
self.assertQuerysetEqual(
Number.objects.filter(num__gt=8, num__lt=13),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)),
['<Number: 8>']
)
def test_ticket12239(self):
# Float was being rounded to integer on gte queries on integer field. Tests
# show that gt, lt, gte, and lte work as desired. Note that the fix changes
# get_prep_lookup for gte and lt queries only.
self.assertQuerysetEqual(
Number.objects.filter(num__gt=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gt=12), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.0), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.1), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12),
['<Number: 4>', '<Number: 8>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.0),
['<Number: 4>', '<Number: 8>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12.0),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.1), [])
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.9), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lte=11.9),
['<Number: 4>', '<Number: 8>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12),
['<Number: 4>', '<Number: 8>', '<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.0),
['<Number: 4>', '<Number: 8>', '<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.9),
['<Number: 4>', '<Number: 8>', '<Number: 12>']
)
def test_ticket7411(self):
# Saving to db must work even with partially read result set in another
# cursor.
for num in range(2 * ITER_CHUNK_SIZE + 1):
_ = Number.objects.create(num=num)
for i, obj in enumerate(Number.objects.all()):
obj.save()
if i > 10: break
def test_ticket7759(self):
# Count should work with a partially read result set.
count = Number.objects.count()
qs = Number.objects.all()
def run():
for obj in qs:
return qs.count() == count
self.assertTrue(run())
class Queries3Tests(BaseQuerysetTest):
def test_ticket7107(self):
# This shouldn't create an infinite loop.
self.assertQuerysetEqual(Valid.objects.all(), [])
def test_ticket8683(self):
# Raise proper error when a DateQuerySet gets passed a wrong type of
# field
self.assertRaisesMessage(
AssertionError,
"'name' isn't a DateField.",
Item.objects.dates, 'name', 'month'
)
class Queries4Tests(BaseQuerysetTest):
def setUp(self):
generic = NamedCategory.objects.create(name="Generic")
self.t1 = Tag.objects.create(name='t1', category=generic)
n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
a1 = Author.objects.create(name='a1', num=1001, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=e2)
Report.objects.create(name='r1', creator=a1)
Report.objects.create(name='r2', creator=a3)
Report.objects.create(name='r3')
def test_ticket7095(self):
# Updates that are filtered on the model being updated are somewhat
# tricky in MySQL. This exercises that case.
ManagedModel.objects.create(data='mm1', tag=self.t1, public=True)
self.assertEqual(ManagedModel.objects.update(data='mm'), 1)
# A values() or values_list() query across joined models must use outer
# joins appropriately.
# Note: In Oracle, we expect a null CharField to return u'' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_charfield_repr = u''
else:
expected_null_charfield_repr = None
self.assertValueQuerysetEqual(
Report.objects.values_list("creator__extra__info", flat=True).order_by("name"),
[u'e1', u'e2', expected_null_charfield_repr],
)
# Similarly for select_related(), joins beyond an initial nullable join
# must use outer joins so that all results are included.
self.assertQuerysetEqual(
Report.objects.select_related("creator", "creator__extra").order_by("name"),
['<Report: r1>', '<Report: r2>', '<Report: r3>']
)
# When there are multiple paths to a table from another table, we have
# to be careful not to accidentally reuse an inappropriate join when
# using select_related(). We used to return the parent's Detail record
# here by mistake.
d1 = Detail.objects.create(data="d1")
d2 = Detail.objects.create(data="d2")
m1 = Member.objects.create(name="m1", details=d1)
m2 = Member.objects.create(name="m2", details=d2)
Child.objects.create(person=m2, parent=m1)
obj = m1.children.select_related("person__details")[0]
self.assertEqual(obj.person.details.data, u'd2')
def test_order_by_resetting(self):
# Calling order_by() with no parameters removes any existing ordering on the
# model. But it should still be possible to add new ordering after that.
qs = Author.objects.order_by().order_by('name')
self.assertTrue('ORDER BY' in qs.query.get_compiler(qs.db).as_sql()[0])
def test_ticket10181(self):
# Avoid raising an EmptyResultSet if an inner query is probably
# empty (and hence, not executed).
self.assertQuerysetEqual(
Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])),
[]
)
class Queries5Tests(TestCase):
def setUp(self):
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
# will be rank3, rank2, rank1.
n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
a1 = Author.objects.create(name='a1', num=1001, extra=e1)
a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=e2)
self.rank1 = Ranking.objects.create(rank=2, author=a2)
Ranking.objects.create(rank=1, author=a3)
Ranking.objects.create(rank=3, author=a1)
def test_ordering(self):
# Cross model ordering is possible in Meta, too.
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
self.assertQuerysetEqual(
Ranking.objects.all().order_by('rank'),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
# Ordering of extra() pieces is possible, too and you can mix extra
# fields and model fields in the ordering.
self.assertQuerysetEqual(
Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank']),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
self.assertEqual(
[o.good for o in qs.extra(order_by=('-good',))],
[True, False, False]
)
self.assertQuerysetEqual(
qs.extra(order_by=('-good', 'id')),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
# Despite having some extra aliases in the query, we can still omit
# them in a values() query.
dicts = qs.values('id', 'rank').order_by('id')
self.assertEqual(
[d.items()[1] for d in dicts],
[('rank', 2), ('rank', 1), ('rank', 3)]
)
def test_ticket7256(self):
# An empty values() call includes all aliases, including those from an
# extra()
qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
dicts = qs.values().order_by('id')
for d in dicts: del d['id']; del d['author_id']
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('good', 0), ('rank', 2)], [('good', 0), ('rank', 1)], [('good', 1), ('rank', 3)]]
)
def test_ticket7045(self):
# Extra tables used to crash SQL construction on the second use.
qs = Ranking.objects.extra(tables=['django_site'])
qs.query.get_compiler(qs.db).as_sql()
# test passes if this doesn't raise an exception.
qs.query.get_compiler(qs.db).as_sql()
def test_ticket9848(self):
# Make sure that updates which only filter on sub-tables don't
# inadvertently update the wrong records (bug #9848).
# Make sure that the IDs from different tables don't happen to match.
self.assertQuerysetEqual(
Ranking.objects.filter(author__name='a1'),
['<Ranking: 3: a1>']
)
self.assertEqual(
Ranking.objects.filter(author__name='a1').update(rank='4'),
1
)
r = Ranking.objects.filter(author__name='a1')[0]
self.assertNotEqual(r.id, r.author.id)
self.assertEqual(r.rank, 4)
r.rank = 3
r.save()
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
def test_ticket5261(self):
self.assertQuerysetEqual(
Note.objects.exclude(Q()),
['<Note: n1>', '<Note: n2>']
)
class SelectRelatedTests(TestCase):
def test_tickets_3045_3288(self):
# Once upon a time, select_related() with circular relations would loop
# infinitely if you forgot to specify "depth". Now we set an arbitrary
# default upper bound.
self.assertQuerysetEqual(X.objects.all(), [])
self.assertQuerysetEqual(X.objects.select_related(), [])
class SubclassFKTests(TestCase):
def test_ticket7778(self):
# Model subclasses could not be deleted if a nullable foreign key
# relates to a model that relates back.
num_celebs = Celebrity.objects.count()
tvc = TvChef.objects.create(name="Huey")
self.assertEqual(Celebrity.objects.count(), num_celebs + 1)
Fan.objects.create(fan_of=tvc)
Fan.objects.create(fan_of=tvc)
tvc.delete()
# The parent object should have been deleted as well.
self.assertEqual(Celebrity.objects.count(), num_celebs)
class CustomPkTests(TestCase):
def test_ticket7371(self):
self.assertQuerysetEqual(Related.objects.order_by('custom'), [])
class NullableRelOrderingTests(TestCase):
def test_ticket10028(self):
# Ordering by model related to nullable relations(!) should use outer
# joins, so that all results are included.
_ = Plaything.objects.create(name="p1")
self.assertQuerysetEqual(
Plaything.objects.all(),
['<Plaything: p1>']
)
class DisjunctiveFilterTests(TestCase):
def setUp(self):
self.n1 = Note.objects.create(note='n1', misc='foo', id=1)
ExtraInfo.objects.create(info='e1', note=self.n1)
def test_ticket7872(self):
# Another variation on the disjunctive filtering theme.
# For the purposes of this regression test, it's important that there is no
# Join object releated to the LeafA we create.
LeafA.objects.create(data='first')
self.assertQuerysetEqual(LeafA.objects.all(), ['<LeafA: first>'])
self.assertQuerysetEqual(
LeafA.objects.filter(Q(data='first')|Q(join__b__data='second')),
['<LeafA: first>']
)
def test_ticket8283(self):
# Checking that applying filters after a disjunction works correctly.
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(note=self.n1)|ExtraInfo.objects.filter(info='e2')).filter(note=self.n1),
['<ExtraInfo: e1>']
)
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(info='e2')|ExtraInfo.objects.filter(note=self.n1)).filter(note=self.n1),
['<ExtraInfo: e1>']
)
class Queries6Tests(TestCase):
def setUp(self):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
t2 = Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
t4 = Tag.objects.create(name='t4', parent=t3)
t5 = Tag.objects.create(name='t5', parent=t3)
n1 = Note.objects.create(note='n1', misc='foo', id=1)
ann1 = Annotation.objects.create(name='a1', tag=t1)
ann1.notes.add(n1)
ann2 = Annotation.objects.create(name='a2', tag=t4)
# FIXME!! This next test causes really weird PostgreSQL behaviour, but it's
# only apparent much later when the full test suite runs. I don't understand
# what's going on here yet.
##def test_slicing_and_cache_interaction(self):
## # We can do slicing beyond what is currently in the result cache,
## # too.
##
## # We need to mess with the implementation internals a bit here to decrease the
## # cache fill size so that we don't read all the results at once.
## from django.db.models import query
## query.ITER_CHUNK_SIZE = 2
## qs = Tag.objects.all()
##
## # Fill the cache with the first chunk.
## self.assertTrue(bool(qs))
## self.assertEqual(len(qs._result_cache), 2)
##
## # Query beyond the end of the cache and check that it is filled out as required.
## self.assertEqual(repr(qs[4]), '<Tag: t5>')
## self.assertEqual(len(qs._result_cache), 5)
##
## # But querying beyond the end of the result set will fail.
## self.assertRaises(IndexError, lambda: qs[100])
def test_parallel_iterators(self):
# Test that parallel iterators work.
qs = Tag.objects.all()
i1, i2 = iter(qs), iter(qs)
self.assertEqual(repr(i1.next()), '<Tag: t1>')
self.assertEqual(repr(i1.next()), '<Tag: t2>')
self.assertEqual(repr(i2.next()), '<Tag: t1>')
self.assertEqual(repr(i2.next()), '<Tag: t2>')
self.assertEqual(repr(i2.next()), '<Tag: t3>')
self.assertEqual(repr(i1.next()), '<Tag: t3>')
qs = X.objects.all()
self.assertEqual(bool(qs), False)
self.assertEqual(bool(qs), False)
def test_nested_queries_sql(self):
# Nested queries should not evaluate the inner query as part of constructing the
# SQL (so we should see a nested query here, indicated by two "SELECT" calls).
qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy"))
self.assertEqual(
qs.query.get_compiler(qs.db).as_sql()[0].count('SELECT'),
2
)
def test_tickets_8921_9188(self):
# Incorrect SQL was being generated for certain types of exclude()
# queries that crossed multi-valued relations (#8921, #9188 and some
# pre-emptively discovered cases).
self.assertQuerysetEqual(
PointerA.objects.filter(connection__pointerb__id=1),
[]
)
self.assertQuerysetEqual(
PointerA.objects.exclude(connection__pointerb__id=1),
[]
)
# This next makes exactly *zero* sense, but it works. It's needed
# because MySQL fails to give the right results the first time this
# query is executed. If you run the same query a second time, it
# works fine. It's a hack, but it works...
list(Tag.objects.exclude(children=None))
self.assertQuerysetEqual(
Tag.objects.exclude(children=None),
['<Tag: t1>', '<Tag: t3>']
)
# This example is tricky because the parent could be NULL, so only checking
# parents with annotations omits some results (tag t1, in this case).
self.assertQuerysetEqual(
Tag.objects.exclude(parent__annotation__name="a1"),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
# The annotation->tag link is single values and tag->children links is
# multi-valued. So we have to split the exclude filter in the middle
# and then optimise the inner query without losing results.
self.assertQuerysetEqual(
Annotation.objects.exclude(tag__children__name="t2"),
['<Annotation: a2>']
)
# Nested queries are possible (although should be used with care, since
# they have performance problems on backends like MySQL.
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")),
['<Annotation: a1>']
)
def test_ticket3739(self):
# The all() method on querysets returns a copy of the queryset.
q1 = Tag.objects.order_by('name')
self.assertTrue(q1 is not q1.all())
class GeneratorExpressionTests(TestCase):
def test_ticket10432(self):
# Using an empty generator expression as the rvalue for an "__in"
# lookup is legal.
self.assertQuerysetEqual(
Note.objects.filter(pk__in=(x for x in ())),
[]
)
class ComparisonTests(TestCase):
def setUp(self):
self.n1 = Note.objects.create(note='n1', misc='foo', id=1)
e1 = ExtraInfo.objects.create(info='e1', note=self.n1)
self.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
def test_ticket8597(self):
# Regression tests for case-insensitive comparisons
_ = Item.objects.create(name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
_ = Item.objects.create(name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="x%Y"),
['<Item: x%y>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__istartswith="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iendswith="A_b"),
['<Item: a_b>']
)
class ExistsSql(TestCase):
def setUp(self):
settings.DEBUG = True
def test_exists(self):
self.assertFalse(Tag.objects.exists())
# Ok - so the exist query worked - but did it include too many columns?
self.assertTrue("id" not in connection.queries[-1]['sql'] and "name" not in connection.queries[-1]['sql'])
def tearDown(self):
settings.DEBUG = False
class QuerysetOrderedTests(unittest.TestCase):
"""
Tests for the Queryset.ordered attribute.
"""
def test_no_default_or_explicit_ordering(self):
self.assertEqual(Annotation.objects.all().ordered, False)
def test_cleared_default_ordering(self):
self.assertEqual(Tag.objects.all().ordered, True)
self.assertEqual(Tag.objects.all().order_by().ordered, False)
def test_explicit_ordering(self):
self.assertEqual(Annotation.objects.all().order_by('id').ordered, True)
def test_order_by_extra(self):
self.assertEqual(Annotation.objects.all().extra(order_by=['id']).ordered, True)
def test_annotated_ordering(self):
qs = Annotation.objects.annotate(num_notes=Count('notes'))
self.assertEqual(qs.ordered, False)
self.assertEqual(qs.order_by('num_notes').ordered, True)
class SubqueryTests(TestCase):
def setUp(self):
DumbCategory.objects.create(id=1)
DumbCategory.objects.create(id=2)
DumbCategory.objects.create(id=3)
def test_ordered_subselect(self):
"Subselects honor any manual ordering"
try:
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])
self.assertEquals(set(query.values_list('id', flat=True)), set([2,3]))
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[:2])
self.assertEquals(set(query.values_list('id', flat=True)), set([2,3]))
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])
self.assertEquals(set(query.values_list('id', flat=True)), set([1]))
except DatabaseError:
# Oracle and MySQL both have problems with sliced subselects.
# This prevents us from even evaluating this test case at all.
# Refs #10099
self.assertFalse(connections[DEFAULT_DB_ALIAS].features.allow_sliced_subqueries)
def test_sliced_delete(self):
"Delete queries can safely contain sliced subqueries"
try:
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:1]).delete()
self.assertEquals(set(DumbCategory.objects.values_list('id', flat=True)), set([1,2]))
except DatabaseError:
# Oracle and MySQL both have problems with sliced subselects.
# This prevents us from even evaluating this test case at all.
# Refs #10099
self.assertFalse(connections[DEFAULT_DB_ALIAS].features.allow_sliced_subqueries)
class CloneTests(TestCase):
def test_evaluated_queryset_as_argument(self):
"#13227 -- If a queryset is already evaluated, it can still be used as a query arg"
n = Note(note='Test1', misc='misc')
n.save()
e = ExtraInfo(info='good', note=n)
e.save()
n_list = Note.objects.all()
# Evaluate the Note queryset, populating the query cache
list(n_list)
# Use the note queryset in a query, and evalute
# that query in a way that involves cloning.
try:
self.assertEquals(ExtraInfo.objects.filter(note__in=n_list)[0].info, 'good')
except:
self.fail('Query should be clonable')
class EmptyQuerySetTests(TestCase):
def test_emptyqueryset_values(self):
# #14366 -- Calling .values() on an EmptyQuerySet and then cloning that
# should not cause an error"
self.assertQuerysetEqual(
Number.objects.none().values('num').order_by('num'), []
)
def test_values_subquery(self):
self.assertQuerysetEqual(
Number.objects.filter(pk__in=Number.objects.none().values("pk")),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(pk__in=Number.objects.none().values_list("pk")),
[]
)
class ValuesQuerysetTests(BaseQuerysetTest):
def test_flat_values_lits(self):
Number.objects.create(num=72)
qs = Number.objects.values_list("num")
qs = qs.values_list("num", flat=True)
self.assertValueQuerysetEqual(
qs, [72]
)
class WeirdQuerysetSlicingTests(BaseQuerysetTest):
def setUp(self):
Number.objects.create(num=1)
Number.objects.create(num=2)
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
Article.objects.create(name='three', created=datetime.datetime.now())
Article.objects.create(name='four', created=datetime.datetime.now())
def test_tickets_7698_10202(self):
# People like to slice with '0' as the high-water mark.
self.assertQuerysetEqual(Article.objects.all()[0:0], [])
self.assertQuerysetEqual(Article.objects.all()[0:0][:10], [])
self.assertEqual(Article.objects.all()[:0].count(), 0)
self.assertRaisesMessage(
AssertionError,
'Cannot change a query once a slice has been taken.',
Article.objects.all()[:0].latest, 'created'
)
class EscapingTests(TestCase):
def test_ticket_7302(self):
# Reserved names are appropriately escaped
_ = ReservedName.objects.create(name='a', order=42)
ReservedName.objects.create(name='b', order=37)
self.assertQuerysetEqual(
ReservedName.objects.all().order_by('order'),
['<ReservedName: b>', '<ReservedName: a>']
)
self.assertQuerysetEqual(
ReservedName.objects.extra(select={'stuff':'name'}, order_by=('order','stuff')),
['<ReservedName: b>', '<ReservedName: a>']
)
class ToFieldTests(TestCase):
def test_in_query(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food__in=[apple, pear])),
set([lunch, dinner]),
)
def test_reverse_in(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch_apple = Eaten.objects.create(food=apple, meal="lunch")
lunch_pear = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])),
set([apple, pear])
)
def test_single_object(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=apple, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food=apple)),
set([lunch, dinner])
)
def test_single_object_reverse(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Food.objects.filter(eaten=lunch)),
set([apple])
)
def test_recursive_fk(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(parent=node1)),
[node2]
)
def test_recursive_fk_reverse(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(node=node2)),
[node1]
)
# In Python 2.6 beta releases, exceptions raised in __len__ are swallowed
# (Python issue 1242657), so these cases return an empty list, rather than
# raising an exception. Not a lot we can do about that, unfortunately, due to
# the way Python handles list() calls internally. Thus, we skip the tests for
# Python 2.6.
if sys.version_info[:2] != (2, 6):
class OrderingLoopTests(BaseQuerysetTest):
def setUp(self):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
t2 = Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
t4 = Tag.objects.create(name='t4', parent=t3)
t5 = Tag.objects.create(name='t5', parent=t3)
def test_infinite_loop(self):
# If you're not careful, it's possible to introduce infinite loops via
# default ordering on foreign keys in a cycle. We detect that.
self.assertRaisesMessage(
FieldError,
'Infinite loop caused by ordering.',
lambda: list(LoopX.objects.all()) # Force queryset evaluation with list()
)
self.assertRaisesMessage(
FieldError,
'Infinite loop caused by ordering.',
lambda: list(LoopZ.objects.all()) # Force queryset evaluation with list()
)
# Note that this doesn't cause an infinite loop, since the default
# ordering on the Tag model is empty (and thus defaults to using "id"
# for the related field).
self.assertEqual(len(Tag.objects.order_by('parent')), 5)
# ... but you can still order in a non-recursive fashion amongst linked
# fields (the previous test failed because the default ordering was
# recursive).
self.assertQuerysetEqual(
LoopX.objects.all().order_by('y__x__y__x__id'),
[]
)
# When grouping without specifying ordering, we add an explicit "ORDER BY NULL"
# portion in MySQL to prevent unnecessary sorting.
if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] == "django.db.backends.mysql":
class GroupingTests(TestCase):
def test_null_ordering_added(self):
query = Tag.objects.values_list('parent_id', flat=True).order_by().query
query.group_by = ['parent_id']
sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0]
fragment = "ORDER BY "
pos = sql.find(fragment)
self.assertEqual(sql.find(fragment, pos + 1), -1)
self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment))
# Sqlite 3 does not support passing in more than 1000 parameters except by
# changing a parameter at compilation time.
if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] != "django.db.backends.sqlite3":
class InLookupTests(TestCase):
def test_ticket14244(self):
# Test that the "in" lookup works with lists of 1000 items or more.
Number.objects.all().delete()
numbers = range(2500)
for num in numbers:
_ = Number.objects.create(num=num)
self.assertEqual(
Number.objects.filter(num__in=numbers[:1000]).count(),
1000
)
self.assertEqual(
Number.objects.filter(num__in=numbers[:1001]).count(),
1001
)
self.assertEqual(
Number.objects.filter(num__in=numbers[:2000]).count(),
2000
)
self.assertEqual(
Number.objects.filter(num__in=numbers).count(),
2500
)
| bsd-3-clause | 1,431,622,991,955,776,300 | 8,837,603,053,665,845,000 | 39.661017 | 159 | 0.587075 | false |
ryanraaum/african-mtdna | popdata_sources/coelho2009/process.py | 1 | 2502 | from oldowan.mtconvert import seq2sites, sites2seq, str2sites
from string import translate
import pandas as pd
import sys
sys.path.append('../../scripts')
from utils import *
## load metadata
metadata = pd.read_csv('metadata.csv', index_col=0)
regionparts = metadata.ix[0,'SeqRange'].split(';')
region1 = range2region(regionparts[0])
region2 = range2region(regionparts[1])
with open('coelho2009_haplotypes.csv', 'rU') as f:
f.readline() # skip past header
data = f.readlines()
hids = []
hvr1sites = []
hvr2sites = []
for l in data:
parts = l.strip().split(',')
if int(parts[3]) == 377 and int(parts[7]) == 268:
hids.append(parts[0])
hvr1sites.append(parts[4])
hvr2sites.append(parts[8])
## need to preprocess sites data for some nonstandard notation in hvr2
hvr1 = []
hvr2 = []
for i in range(len(hids)):
s1 = str2sites(hvr1sites[i], add16k=True)
hvr1.append(s1)
s2 = hvr2sites[i].split()
s2new = []
for j in range(len(s2)):
if s2[j].endswith('.2C'):
parts = s2[j].split('.')
s2new.append('%s.1C' % parts[0])
s2new.append('%s.2C' % parts[0])
else:
s2new.append(s2[j])
s2 = str2sites(' '.join(s2new))
hvr2.append(s2)
newsites = []
for i in range(len(hvr1)):
newsites.append(hvr1[i] + hvr2[i])
## Validate
passed_validation = True
for i in range(len(newsites)):
curr_sites = newsites[i]
seq1 = translate(sites2seq(curr_sites, region1), None, '-')
seq2 = translate(sites2seq(curr_sites, region2), None, '-')
mysites = seq2sites(seq1) + seq2sites(seq1)
if not mysites == curr_sites:
myseq1 = translate(sites2seq(mysites, region1), None, '-')
myseq2 = translate(sites2seq(mysites, region2), None, '-')
if not seq1 == myseq1 and seq2 == myseq2:
passed_validation = False
print i, hids[i]
if passed_validation:
counts = pd.read_csv('coelho2009_counts.csv', index_col=0)
counts = counts.fillna(0)
counter = [0] * 5
with open('processed.csv', 'w') as f:
for i in range(len(newsites)):
hid = hids[i]
curr_sites = newsites[i]
seq1 = translate(sites2seq(curr_sites, region1), None, '-')
seq2 = translate(sites2seq(curr_sites, region2), None, '-')
mysites = seq2sites(seq1) + seq2sites(seq2)
mysites = ' '.join([str(x) for x in mysites])
for j in range(len(metadata.index)):
prefix = metadata.ix[metadata.index[j],'NewPrefix']
for k in range(int(counts.ix[hid, metadata.index[j]])):
counter[j] += 1
num = str(counter[j]).zfill(3)
newid = prefix + num
f.write('%s,%s,%s\n' % (newid, hid, mysites)) | cc0-1.0 | 8,990,435,987,189,967,000 | -8,512,252,083,284,681,000 | 27.770115 | 70 | 0.660671 | false |
maximeolivier/pyCAF | pycaf/architecture/devices/server_features/file.py | 1 | 2897 | #| This file is part of pyCAF. |
#| |
#| pyCAF is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| pyCAF is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
# -*- coding: utf-8 -*-
"""
Created on Tue May 20 15:01:43 2014
@author: thierry
Class file
"""
class File():
"""Definition of what is a file
@param path : full path of the file
@param rights : a 10 char string wich contain nature and rights (-rwxrwxrwx)
@param user : file owner
@param group : file group owner
@param time : the date of last modification
"""
def __init__(self, path, rights, user, group, time):
self.path = path
self.rights = rights
self.user = user
self.group = group
self.time = time
def __str__(self):
"""
Print file attributes
"""
return "%s%s%s%s%s" % (str(self.rights).ljust(15), str(self.user).ljust(20),
str(self.group).ljust(20), str(self.time).ljust(16), (str(self.path)).ljust(60)) | gpl-3.0 | -2,848,556,980,335,907,000 | -4,228,528,476,709,562,000 | 62 | 150 | 0.338281 | false |
dklempner/grpc | src/python/grpcio_tests/tests/unit/framework/foundation/stream_testing.py | 23 | 2911 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for testing stream-related code."""
from grpc.framework.foundation import stream
class TestConsumer(stream.Consumer):
"""A stream.Consumer instrumented for testing.
Attributes:
calls: A sequence of value-termination pairs describing the history of calls
made on this object.
"""
def __init__(self):
self.calls = []
def consume(self, value):
"""See stream.Consumer.consume for specification."""
self.calls.append((value, False))
def terminate(self):
"""See stream.Consumer.terminate for specification."""
self.calls.append((None, True))
def consume_and_terminate(self, value):
"""See stream.Consumer.consume_and_terminate for specification."""
self.calls.append((value, True))
def is_legal(self):
"""Reports whether or not a legal sequence of calls has been made."""
terminated = False
for value, terminal in self.calls:
if terminated:
return False
elif terminal:
terminated = True
elif value is None:
return False
else: # pylint: disable=useless-else-on-loop
return True
def values(self):
"""Returns the sequence of values that have been passed to this Consumer."""
return [value for value, _ in self.calls if value]
| bsd-3-clause | -7,251,862,405,496,459,000 | -7,061,575,937,906,399,000 | 39.430556 | 84 | 0.705943 | false |
ironbox360/django | django/contrib/gis/db/backends/postgis/models.py | 396 | 2158 | """
The GeometryColumns and SpatialRefSys models for the PostGIS backend.
"""
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class PostGISGeometryColumns(models.Model):
"""
The 'geometry_columns' table from the PostGIS. See the PostGIS
documentation at Ch. 4.3.2.
On PostGIS 2, this is a view.
"""
f_table_catalog = models.CharField(max_length=256)
f_table_schema = models.CharField(max_length=256)
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
type = models.CharField(max_length=30)
class Meta:
app_label = 'gis'
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'f_geometry_column'
def __str__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class PostGISSpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from PostGIS. See the PostGIS
documentaiton at Ch. 4.2.1.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
srtext = models.CharField(max_length=2048)
proj4text = models.CharField(max_length=2048)
class Meta:
app_label = 'gis'
db_table = 'spatial_ref_sys'
managed = False
@property
def wkt(self):
return self.srtext
@classmethod
def wkt_col(cls):
return 'srtext'
| bsd-3-clause | 7,806,279,909,515,007,000 | -6,496,408,483,492,047,000 | 28.561644 | 79 | 0.646432 | false |
ankur-gupta91/horizon-net-ip | openstack_dashboard/dashboards/project/access_and_security/keypairs/tests.py | 7 | 10966 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
import six
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.\
keypairs.forms import CreateKeypair
from openstack_dashboard.dashboards.project.access_and_security.\
keypairs.forms import KEYPAIR_ERROR_MESSAGES
from openstack_dashboard.test import helpers as test
INDEX_VIEW_URL = reverse('horizon:project:access_and_security:index')
class KeyPairViewTests(test.TestCase):
def test_delete_keypair(self):
keypair = self.keypairs.first()
self.mox.StubOutWithMock(api.network, 'floating_ip_supported')
self.mox.StubOutWithMock(api.nova, 'keypair_list')
self.mox.StubOutWithMock(api.nova, 'keypair_delete')
# floating_ip_supported is called in Floating IP tab allowed().
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.nova.keypair_delete(IsA(http.HttpRequest), keypair.name)
self.mox.ReplayAll()
formData = {'action': 'keypairs__delete__%s' % keypair.name}
res = self.client.post(INDEX_VIEW_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_VIEW_URL)
def test_delete_keypair_exception(self):
keypair = self.keypairs.first()
self.mox.StubOutWithMock(api.network, 'floating_ip_supported')
self.mox.StubOutWithMock(api.nova, 'keypair_list')
self.mox.StubOutWithMock(api.nova, 'keypair_delete')
# floating_ip_supported is called in Floating IP tab allowed().
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.nova.keypair_delete(IsA(http.HttpRequest), keypair.name) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'keypairs__delete__%s' % keypair.name}
res = self.client.post(INDEX_VIEW_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_VIEW_URL)
def test_create_keypair_get(self):
res = self.client.get(
reverse('horizon:project:access_and_security:keypairs:create'))
self.assertTemplateUsed(
res, 'project/access_and_security/keypairs/create.html')
def test_download_keypair_get(self):
keypair_name = "keypair"
context = {'keypair_name': keypair_name}
url = reverse('horizon:project:access_and_security:keypairs:download',
kwargs={'keypair_name': keypair_name})
res = self.client.get(url, context)
self.assertTemplateUsed(
res, 'project/access_and_security/keypairs/download.html')
def test_generate_keypair_get(self):
keypair = self.keypairs.first()
keypair.private_key = "secret"
self.mox.StubOutWithMock(api.nova, 'keypair_create')
api.nova.keypair_create(IsA(http.HttpRequest),
keypair.name).AndReturn(keypair)
self.mox.ReplayAll()
context = {'keypair_name': keypair.name}
url = reverse('horizon:project:access_and_security:keypairs:generate',
kwargs={'keypair_name': keypair.name})
res = self.client.get(url, context)
self.assertTrue(res.has_header('content-disposition'))
def test_keypair_detail_get(self):
keypair = self.keypairs.first()
keypair.private_key = "secrete"
self.mox.StubOutWithMock(api.nova, 'keypair_get')
api.nova.keypair_get(IsA(http.HttpRequest),
keypair.name).AndReturn(keypair)
self.mox.ReplayAll()
context = {'keypair_name': keypair.name}
url = reverse('horizon:project:access_and_security:keypairs:detail',
kwargs={'keypair_name': keypair.name})
res = self.client.get(url, context)
# Note(Itxaka): With breadcrumbs, the title is in a list as active
self.assertContains(res, '<li class="active">Key Pair Details</li>',
1, 200)
self.assertContains(res, "<dd>%s</dd>" % keypair.name, 1, 200)
@test.create_stubs({api.nova: ("keypair_create", "keypair_delete")})
def test_regenerate_keypair_get(self):
keypair = self.keypairs.first()
keypair.private_key = "secret"
optional_param = "regenerate"
api.nova.keypair_delete(IsA(http.HttpRequest), keypair.name)
api.nova.keypair_create(IsA(http.HttpRequest),
keypair.name).AndReturn(keypair)
self.mox.ReplayAll()
url = reverse('horizon:project:access_and_security:keypairs:generate',
kwargs={'keypair_name': keypair.name,
'optional': optional_param})
res = self.client.get(url)
self.assertTrue(res.has_header('content-disposition'))
@test.create_stubs({api.nova: ("keypair_import",)})
def test_import_keypair(self):
key1_name = "new_key_pair"
public_key = "ssh-rsa ABCDEFGHIJKLMNOPQR\r\n" \
"STUVWXYZ1234567890\r" \
"XXYYZZ user@computer\n\n"
api.nova.keypair_import(IsA(http.HttpRequest), key1_name,
public_key.replace("\r", "").replace("\n", ""))
self.mox.ReplayAll()
formData = {'method': 'ImportKeypair',
'name': key1_name,
'public_key': public_key}
url = reverse('horizon:project:access_and_security:keypairs:import')
res = self.client.post(url, formData)
self.assertMessageCount(res, success=1)
@test.create_stubs({api.nova: ("keypair_import",)})
def test_import_keypair_invalid_key(self):
key_name = "new_key_pair"
public_key = "ABCDEF"
api.nova.keypair_import(IsA(http.HttpRequest), key_name, public_key) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'method': 'ImportKeypair',
'name': key_name,
'public_key': public_key}
url = reverse('horizon:project:access_and_security:keypairs:import')
res = self.client.post(url, formData, follow=True)
self.assertEqual(res.redirect_chain, [])
msg = 'Unable to import key pair.'
self.assertFormErrors(res, count=1, message=msg)
def test_import_keypair_invalid_key_name(self):
key_name = "invalid#key?name=!"
public_key = "ABCDEF"
formData = {'method': 'ImportKeypair',
'name': key_name,
'public_key': public_key}
url = reverse('horizon:project:access_and_security:keypairs:import')
res = self.client.post(url, formData, follow=True)
self.assertEqual(res.redirect_chain, [])
msg = six.text_type(KEYPAIR_ERROR_MESSAGES['invalid'])
self.assertFormErrors(res, count=1, message=msg)
@test.create_stubs({api.nova: ("keypair_create",)})
def test_generate_keypair_exception(self):
keypair = self.keypairs.first()
api.nova.keypair_create(IsA(http.HttpRequest), keypair.name) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
context = {'keypair_name': keypair.name}
url = reverse('horizon:project:access_and_security:keypairs:generate',
kwargs={'keypair_name': keypair.name})
res = self.client.get(url, context)
self.assertRedirectsNoFollow(
res, reverse('horizon:project:access_and_security:index'))
@test.create_stubs({api.nova: ("keypair_import",)})
def test_import_keypair_with_regex_defined_name(self):
key1_name = "new-key-pair with_regex"
public_key = "ssh-rsa ABCDEFGHIJKLMNOPQR\r\n" \
"STUVWXYZ1234567890\r" \
"XXYYZZ user@computer\n\n"
api.nova.keypair_import(IsA(http.HttpRequest), key1_name,
public_key.replace("\r", "").replace("\n", ""))
self.mox.ReplayAll()
formData = {'method': 'ImportKeypair',
'name': key1_name,
'public_key': public_key}
url = reverse('horizon:project:access_and_security:keypairs:import')
res = self.client.post(url, formData)
self.assertMessageCount(res, success=1)
@test.create_stubs({api.nova: ("keypair_create",)})
def test_create_keypair_with_regex_name_get(self):
keypair = self.keypairs.first()
keypair.name = "key-space pair-regex_name-0123456789"
keypair.private_key = "secret"
api.nova.keypair_create(IsA(http.HttpRequest),
keypair.name).AndReturn(keypair)
self.mox.ReplayAll()
context = {'keypair_name': keypair.name}
url = reverse('horizon:project:access_and_security:keypairs:generate',
kwargs={'keypair_name': keypair.name})
res = self.client.get(url, context)
self.assertTrue(res.has_header('content-disposition'))
def test_download_with_regex_name_get(self):
keypair_name = "key pair-regex_name-0123456789"
context = {'keypair_name': keypair_name}
url = reverse('horizon:project:access_and_security:keypairs:download',
kwargs={'keypair_name': keypair_name})
res = self.client.get(url, context)
self.assertTemplateUsed(
res, 'project/access_and_security/keypairs/download.html')
@test.create_stubs({api.nova: ('keypair_list',)})
def test_create_duplicate_keypair(self):
keypair_name = self.keypairs.first().name
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
self.mox.ReplayAll()
form = CreateKeypair(self.request, data={'name': keypair_name})
self.assertFalse(form.is_valid())
self.assertIn('The name is already in use.',
form.errors['name'][0])
| apache-2.0 | -2,852,853,208,581,772,300 | -7,485,815,801,704,733,000 | 41.339768 | 79 | 0.625935 | false |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/PyAMF-0.6.1/pyamf/util/imports.py | 45 | 3707 | # Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Tools for doing dynamic imports.
@since: 0.3
"""
import sys
__all__ = ['when_imported']
def when_imported(name, *hooks):
"""
Call C{hook(module)} when module named C{name} is first imported. C{name}
must be a fully qualified (i.e. absolute) module name.
C{hook} must accept one argument: which will be the imported module object.
If the module has already been imported, 'hook(module)' is called
immediately, and the module object is returned from this function. If the
module has not been imported, then the hook is called when the module is
first imported.
"""
global finder
finder.when_imported(name, *hooks)
class ModuleFinder(object):
"""
This is a special module finder object that executes a collection of
callables when a specific module has been imported. An instance of this
is placed in C{sys.meta_path}, which is consulted before C{sys.modules} -
allowing us to provide this functionality.
@ivar post_load_hooks: C{dict} of C{full module path -> callable} to be
executed when the module is imported.
@ivar loaded_modules: C{list} of modules that this finder has seen. Used
to stop recursive imports in L{load_module}
@see: L{when_imported}
@since: 0.5
"""
def __init__(self):
self.post_load_hooks = {}
self.loaded_modules = []
def find_module(self, name, path=None):
"""
Called when an import is made. If there are hooks waiting for this
module to be imported then we stop the normal import process and
manually load the module.
@param name: The name of the module being imported.
@param path The root path of the module (if a package). We ignore this.
@return: If we want to hook this module, we return a C{loader}
interface (which is this instance again). If not we return C{None}
to allow the standard import process to continue.
"""
if name in self.loaded_modules:
return None
hooks = self.post_load_hooks.get(name, None)
if hooks:
return self
def load_module(self, name):
"""
If we get this far, then there are hooks waiting to be called on
import of this module. We manually load the module and then run the
hooks.
@param name: The name of the module to import.
"""
self.loaded_modules.append(name)
try:
__import__(name, {}, {}, [])
mod = sys.modules[name]
self._run_hooks(name, mod)
except:
self.loaded_modules.pop()
raise
return mod
def when_imported(self, name, *hooks):
"""
@see: L{when_imported}
"""
if name in sys.modules:
for hook in hooks:
hook(sys.modules[name])
return
h = self.post_load_hooks.setdefault(name, [])
h.extend(hooks)
def _run_hooks(self, name, module):
"""
Run all hooks for a module.
"""
hooks = self.post_load_hooks.pop(name, [])
for hook in hooks:
hook(module)
def __getstate__(self):
return (self.post_load_hooks.copy(), self.loaded_modules[:])
def __setstate__(self, state):
self.post_load_hooks, self.loaded_modules = state
def _init():
"""
Internal function to install the module finder.
"""
global finder
if finder is None:
finder = ModuleFinder()
if finder not in sys.meta_path:
sys.meta_path.insert(0, finder)
finder = None
_init()
| mit | 8,619,981,937,289,349,000 | -2,965,873,891,471,190,500 | 26.058394 | 79 | 0.605611 | false |
Alwnikrotikz/chimerascan | chimerascan/deprecated/sam_v1.py | 6 | 8725 | '''
Created on Jun 2, 2011
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import operator
from chimerascan import pysam
from seq import DNA_reverse_complement
#
# constants used for CIGAR alignments
#
CIGAR_M = 0 #match Alignment match (can be a sequence match or mismatch)
CIGAR_I = 1 #insertion Insertion to the reference
CIGAR_D = 2 #deletion Deletion from the reference
CIGAR_N = 3 #skip Skipped region from the reference
CIGAR_S = 4 #softclip Soft clip on the read (clipped sequence present in <seq>)
CIGAR_H = 5 #hardclip Hard clip on the read (clipped sequence NOT present in <seq>)
CIGAR_P = 6 #padding Padding (silent deletion from the padded reference sequence)
def parse_reads_by_qname(samfh):
"""
generator function to parse and return lists of
reads that share the same qname
"""
reads = []
for read in samfh:
if len(reads) > 0 and read.qname != reads[-1].qname:
yield reads
reads = []
reads.append(read)
if len(reads) > 0:
yield reads
def parse_pe_reads(bamfh):
pe_reads = ([], [])
# reads must be sorted by qname
num_reads = 0
prev_qname = None
for read in bamfh:
# get read attributes
qname = read.qname
readnum = 1 if read.is_read2 else 0
# if query name changes we have completely finished
# the fragment and can reset the read data
if num_reads > 0 and qname != prev_qname:
yield pe_reads
# reset state variables
pe_reads = ([], [])
num_reads = 0
pe_reads[readnum].append(read)
prev_qname = qname
num_reads += 1
if num_reads > 0:
yield pe_reads
def parse_unpaired_pe_reads(bamfh):
"""
parses alignments that were aligned in single read mode
and hence all hits are labeled as 'read1' and lack mate
information. instead the read1 read2 information is
attached to the 'qname' field
"""
pe_reads = ([], [])
num_reads = 0
prev_qname = None
for read in bamfh:
# extract read1/2 from qname
readnum = int(read.qname[-1])
if readnum == 1:
read.is_read1 = True
mate = 0
elif readnum == 2:
mate = 1
read.is_read2 = True
# reconstitute correct qname
qname = read.qname[:-2]
read.qname = qname
# if query name changes we have completely finished
# the fragment and can reset the read data
if num_reads > 0 and qname != prev_qname:
yield pe_reads
# reset state variables
pe_reads = ([], [])
num_reads = 0
pe_reads[mate].append(read)
prev_qname = qname
num_reads += 1
if num_reads > 0:
yield pe_reads
def group_read_pairs(pe_reads):
"""
Given tuple of ([read1 reads],[read2 reads]) paired-end read alignments
return mate-pairs and unpaired reads
"""
# group paired reads
paired_reads = ([],[])
unpaired_reads = ([],[])
for rnum,reads in enumerate(pe_reads):
for r in reads:
if r.is_proper_pair:
paired_reads[rnum].append(r)
else:
unpaired_reads[rnum].append(r)
# check if we have at least one pair
pairs = []
if all((len(reads) > 0) for reads in paired_reads):
# index read1 by mate reference name and position
rdict = {}
for r in paired_reads[0]:
rdict[(r.mrnm,r.mpos)] = r
# iterate through read2 and get mate pairs
for r2 in paired_reads[1]:
r1 = rdict[(r.rname,r.pos)]
pairs.append((r1,r2))
return pairs, unpaired_reads
def select_best_scoring_pairs(pairs):
"""
return the set of read pairs (provided as a list of tuples) with
the highest summed alignment score
"""
if len(pairs) == 0:
return []
# gather alignment scores for each pair
pair_scores = [(pair[0].opt('AS') + pair[1].opt('AS'), pair) for pair in pairs]
pair_scores.sort(key=operator.itemgetter(0))
best_score = pair_scores[0][0]
best_pairs = [pair_scores[0][1]]
for score,pair in pair_scores[1:]:
if score < best_score:
break
best_pairs.append(pair)
return best_pairs
def select_primary_alignments(reads):
"""
return only reads that lack the secondary alignment bit
"""
if len(reads) == 0:
return []
# sort reads by number of mismatches
unmapped_reads = []
primary_reads = []
for r in reads:
if r.is_unmapped:
unmapped_reads.append(r)
elif not r.is_secondary:
primary_reads.append(r)
if len(primary_reads) == 0:
assert len(unmapped_reads) > 0
return unmapped_reads
return primary_reads
def select_best_mismatch_strata(reads, mismatch_tolerance=0):
if len(reads) == 0:
return []
# sort reads by number of mismatches
mapped_reads = []
unmapped_reads = []
for r in reads:
if r.is_unmapped:
unmapped_reads.append(r)
else:
mapped_reads.append((r.opt('NM'), r))
if len(mapped_reads) == 0:
return unmapped_reads
sorted_reads = sorted(mapped_reads, key=operator.itemgetter(0))
best_nm = sorted_reads[0][0]
worst_nm = sorted_reads[-1][0]
sorted_reads.extend((worst_nm+1, r) for r in unmapped_reads)
# choose reads within a certain mismatch tolerance
best_reads = []
for mismatches, r in sorted_reads:
if mismatches > (best_nm + mismatch_tolerance):
break
best_reads.append(r)
return best_reads
def copy_read(r):
a = pysam.AlignedRead()
a.qname = r.qname
a.seq = r.seq
a.flag = r.flag
a.rname = r.rname
a.pos = r.pos
a.mapq = r.mapq
a.cigar = r.cigar
a.mrnm = r.mrnm
a.mpos = r.mpos
a.isize = r.isize
a.qual = r.qual
a.tags = r.tags
return a
def soft_pad_read(fq, r):
"""
'fq' is the fastq record
'r' in the AlignedRead SAM read
"""
# make sequence soft clipped
ext_length = len(fq.seq) - len(r.seq)
cigar_softclip = [(CIGAR_S, ext_length)]
cigar = r.cigar
# reconstitute full length sequence in read
if r.is_reverse:
seq = DNA_reverse_complement(fq.seq)
qual = fq.qual[::-1]
if (cigar is not None) and (ext_length > 0):
cigar = cigar_softclip + cigar
else:
seq = fq.seq
qual = fq.qual
if (cigar is not None) and (ext_length > 0):
cigar = cigar + cigar_softclip
# replace read field
r.seq = seq
r.qual = qual
r.cigar = cigar
def pair_reads(r1, r2, tags=None):
'''
fill in paired-end fields in SAM record
'''
if tags is None:
tags = []
# convert read1 to paired-end
r1.is_paired = True
r1.is_proper_pair = True
r1.is_read1 = True
r1.mate_is_reverse = r2.is_reverse
r1.mate_is_unmapped = r2.is_unmapped
r1.mpos = r2.pos
r1.mrnm = r2.rname
r1.tags = r1.tags + tags
# convert read2 to paired-end
r2.is_paired = True
r2.is_proper_pair = True
r2.is_read2 = True
r2.mate_is_reverse = r1.is_reverse
r2.mate_is_unmapped = r1.is_unmapped
r2.mpos = r1.pos
r2.mrnm = r1.rname
r2.tags = r2.tags + tags
# compute insert size
if r1.rname != r2.rname:
r1.isize = 0
r2.isize = 0
elif r1.pos > r2.pos:
isize = r1.aend - r2.pos
r1.isize = -isize
r2.isize = isize
else:
isize = r2.aend - r1.pos
r1.isize = isize
r2.isize = -isize
def get_clipped_interval(r):
cigar = r.cigar
padstart, padend = r.pos, r.aend
if len(cigar) > 1:
if (cigar[0][0] == CIGAR_S or
cigar[0][0] == CIGAR_H):
padstart -= cigar[0][1]
elif (cigar[-1][0] == CIGAR_S or
cigar[-1][0] == CIGAR_H):
padend += cigar[-1][1]
return padstart, padend
| gpl-3.0 | 5,478,865,551,829,399,000 | -1,732,602,783,844,014,300 | 29.295139 | 84 | 0.594842 | false |
mrry/tensorflow | tensorflow/models/embedding/word2vec_optimized_test.py | 31 | 2294 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for word2vec_optimized module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.models.embedding import word2vec_optimized
flags = tf.app.flags
FLAGS = flags.FLAGS
class Word2VecTest(tf.test.TestCase):
def setUp(self):
FLAGS.train_data = os.path.join(self.get_temp_dir() + "test-text.txt")
FLAGS.eval_data = os.path.join(self.get_temp_dir() + "eval-text.txt")
FLAGS.save_path = self.get_temp_dir()
with open(FLAGS.train_data, "w") as f:
f.write(
"""alice was beginning to get very tired of sitting by her sister on
the bank, and of having nothing to do: once or twice she had peeped
into the book her sister was reading, but it had no pictures or
conversations in it, 'and what is the use of a book,' thought alice
'without pictures or conversations?' So she was considering in her own
mind (as well as she could, for the hot day made her feel very sleepy
and stupid), whether the pleasure of making a daisy-chain would be
worth the trouble of getting up and picking the daisies, when suddenly
a White rabbit with pink eyes ran close by her.\n""")
with open(FLAGS.eval_data, "w") as f:
f.write("alice she rabbit once\n")
def testWord2VecOptimized(self):
FLAGS.batch_size = 5
FLAGS.num_neg_samples = 10
FLAGS.epochs_to_train = 1
FLAGS.min_count = 0
word2vec_optimized.main([])
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 2,519,815,345,246,456,300 | -3,144,053,570,072,421,000 | 36 | 80 | 0.671752 | false |
jamilatta/scielo-manager | scielomanager/scielomanager/utils/modelmanagers/base.py | 2 | 1789 | # coding: utf-8
"""
The UserObjectManager interface
===============================
Each model object that aims to be contextualized by the current
app user and the visibility rules defined, must provide a
manager called ``userobjects`` following the context protocol:
Custom instance of ``models.Manager``
-------------------------------------
* ``get_query_set`` returns a custom subclass of models.query.QuerySet;
* ``all`` returns all objects the user can access;
* ``active`` returns a subset of ``all``, only with objects from
the active collection.
Custom instance of ``models.query.QuerySet``
--------------------------------------------
* ``all`` returns all objects the user can access;
* ``active`` returns all objects from the active collection.
* ``startswith`` (optional) returns all objects with the given
initial char in a meaningful field. this is used for sorting
and presentation purposes.
* ``simple_search`` (optional) performs a simple search query on one or more
meaningful fields. accepts only 1 string as search the search term.
* ``available`` returns all objects not marked as trash.
* ``unavailable`` returns all objects marked as trash.
"""
import caching.base
class UserObjectQuerySet(caching.base.CachingQuerySet):
"""
Provides a basic implementation of userobject querysets with
caching features.
"""
def available(self):
return self
def unavailable(self):
return self.none()
class UserObjectManager(caching.base.CachingManager):
"""
Provides a basic implementation of userobject managers with
caching features.
"""
def all(self, **kwargs):
return self.get_query_set().all(**kwargs)
def active(self, **kwargs):
return self.get_query_set().active(**kwargs)
| bsd-2-clause | -2,886,954,484,633,015,300 | 4,248,576,884,495,126,500 | 30.385965 | 76 | 0.674679 | false |
tyndyll/py-morsecode | docs/source/conf.py | 1 | 8359 | # -*- coding: utf-8 -*-
#
# MorseCode documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 26 16:14:19 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../.."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MorseCode'
copyright = u'2013, Tyndyll'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MorseCodedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'MorseCode.tex', u'MorseCode Documentation',
u'Tyndyll', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'morsecode', u'MorseCode Documentation',
[u'Tyndyll'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'MorseCode', u'MorseCode Documentation',
u'Tyndyll', 'MorseCode', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| bsd-2-clause | 8,226,007,835,419,042,000 | -5,991,909,378,917,693,000 | 30.424812 | 79 | 0.707501 | false |
praveenkumar/dorrie | dorrie/comps/models.py | 1 | 1663 | # Dorrie - Web interface for building Fedora Spins/Remixes.
# Copyright (C) 2009 Red Hat Inc.
# Author: Shreyank Gupta <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
class Spin(models.Model):
"""Class for the releases"""
name = models.TextField(
help_text="The name of the spin.")
language = models.TextField()
timezone = models.TextField()
rootpwd = models.TextField()
baseks = models.TextField()
gplus = models.ManyToManyField('Group', related_name='gplus_set')
gminus = models.ManyToManyField('Group', related_name='gminus_set')
pplus = models.ManyToManyField('Package', related_name='pplus_set')
pminus = models.ManyToManyField('Package', related_name='pminus_set')
pid = models.IntegerField(default=0)
class Group(models.Model):
"""Package Groups"""
name = models.TextField(help_text="The name of the package group.")
class Package(models.Model):
"""A Package."""
name = models.TextField(help_text="The name of the package.")
| agpl-3.0 | 8,830,248,575,451,524,000 | -140,338,237,179,942,180 | 37.674419 | 74 | 0.72279 | false |
cad-lab/blog | pelicanconf.py | 1 | 2136 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'kmol'
SITENAME = 'CADLab (虎尾科大MDE)'
#SITEURL = 'http://cad-lab.github.io/blog/'
# 不要用文章所在目錄作為類別
USE_FOLDER_AS_CATEGORY = False
#PATH = 'content'
#OUTPUT_PATH = 'output'
TIMEZONE = 'Asia/Taipei'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('pelican-bootstrap3', 'https://github.com/DandyDev/pelican-bootstrap3/'),
('pelican-plugins', 'https://github.com/getpelican/pelican-plugins'),
('Tipue search', 'https://github.com/Tipue/Tipue-Search'),)
# Social widget
#SOCIAL = (('You can add links in your config file', '#'),('Another social link', '#'),)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
# 必須絕對目錄或相對於設定檔案所在目錄
PLUGIN_PATHS = ['plugin']
PLUGINS = ['liquid_tags.notebook', 'summary', 'tipue_search', 'sitemap', 'render_math']
# for sitemap plugin
SITEMAP = {
'format': 'xml',
'priorities': {
'articles': 0.5,
'indexes': 0.5,
'pages': 0.5
},
'changefreqs': {
'articles': 'monthly',
'indexes': 'daily',
'pages': 'monthly'
}
}
# search is for Tipue search
DIRECT_TEMPLATES = (('index', 'tags', 'categories', 'authors', 'archives', 'search'))
# for pelican-bootstrap3 theme settings
#TAG_CLOUD_MAX_ITEMS = 50
DISPLAY_CATEGORIES_ON_SIDEBAR = True
DISPLAY_RECENT_POSTS_ON_SIDEBAR = True
DISPLAY_TAGS_ON_SIDEBAR = True
DISPLAY_TAGS_INLINE = True
TAGS_URL = "tags.html"
CATEGORIES_URL = "categories.html"
#SHOW_ARTICLE_AUTHOR = True
#MENUITEMS = [('Home', '/'), ('Archives', '/archives.html'), ('Search', '/search.html')]
# 希望將部份常用的 Javascript 最新版程式庫放到這裡, 可以透過 http://cadlab.mde.tw/post/js/ 呼叫
STATIC_PATHS = ['js', 'by'] | agpl-3.0 | 4,664,584,497,725,401,000 | 4,772,400,597,901,071,000 | 25.92 | 88 | 0.659564 | false |
chongtianfeiyu/kbengine | kbe/src/lib/python/Lib/encodings/cp858.py | 270 | 34015 | """ Python Character Mapping Codec for CP858, modified from cp850.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp858',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x00f0, # LATIN SMALL LETTER ETH
0x00d1: 0x00d0, # LATIN CAPITAL LETTER ETH
0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00d5: 0x20ac, # EURO SIGN
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x00fe, # LATIN SMALL LETTER THORN
0x00e8: 0x00de, # LATIN CAPITAL LETTER THORN
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2017, # DOUBLE LOW LINE
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
'\xa3' # 0x009c -> POUND SIGN
'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
'\xd7' # 0x009e -> MULTIPLICATION SIGN
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\xae' # 0x00a9 -> REGISTERED SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xa9' # 0x00b8 -> COPYRIGHT SIGN
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\xa2' # 0x00bd -> CENT SIGN
'\xa5' # 0x00be -> YEN SIGN
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa4' # 0x00cf -> CURRENCY SIGN
'\xf0' # 0x00d0 -> LATIN SMALL LETTER ETH
'\xd0' # 0x00d1 -> LATIN CAPITAL LETTER ETH
'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
'\u20ac' # 0x00d5 -> EURO SIGN
'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\xa6' # 0x00dd -> BROKEN BAR
'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xb5' # 0x00e6 -> MICRO SIGN
'\xfe' # 0x00e7 -> LATIN SMALL LETTER THORN
'\xde' # 0x00e8 -> LATIN CAPITAL LETTER THORN
'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xaf' # 0x00ee -> MACRON
'\xb4' # 0x00ef -> ACUTE ACCENT
'\xad' # 0x00f0 -> SOFT HYPHEN
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2017' # 0x00f2 -> DOUBLE LOW LINE
'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
'\xb6' # 0x00f4 -> PILCROW SIGN
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\xb8' # 0x00f7 -> CEDILLA
'\xb0' # 0x00f8 -> DEGREE SIGN
'\xa8' # 0x00f9 -> DIAERESIS
'\xb7' # 0x00fa -> MIDDLE DOT
'\xb9' # 0x00fb -> SUPERSCRIPT ONE
'\xb3' # 0x00fc -> SUPERSCRIPT THREE
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x00bd, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a5: 0x00be, # YEN SIGN
0x00a6: 0x00dd, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x00b8, # COPYRIGHT SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00af: 0x00ee, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00f7, # CEDILLA
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d0: 0x00d1, # LATIN CAPITAL LETTER ETH
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00de: 0x00e8, # LATIN CAPITAL LETTER THORN
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f0: 0x00d0, # LATIN SMALL LETTER ETH
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x00fe: 0x00e7, # LATIN SMALL LETTER THORN
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x20ac: 0x00d5, # EURO SIGN
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x2017: 0x00f2, # DOUBLE LOW LINE
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| lgpl-3.0 | -3,537,223,360,390,411,000 | -3,216,209,118,140,835,000 | 47.732092 | 73 | 0.597766 | false |
OSSystems/lava-server | dashboard_app/migrations/0003_add_index_HardwareDevice_device_type.py | 1 | 13377 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'HardwareDevice', fields ['device_type', 'id']
db.create_index('dashboard_app_hardwaredevice', ['device_type', 'id'])
def backwards(self, orm):
# Removing index on 'HardwareDevice', fields ['device_type', 'id']
db.delete_index('dashboard_app_hardwaredevice', ['device_type', 'id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dashboard_app.attachment': {
'Meta': {'object_name': 'Attachment'},
'content': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'content_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'public_url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'blank': 'True'})
},
'dashboard_app.bundle': {
'Meta': {'ordering': "['-uploaded_on']", 'object_name': 'Bundle'},
'bundle_stream': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bundles'", 'to': "orm['dashboard_app.BundleStream']"}),
'content': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'content_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'content_sha1': ('django.db.models.fields.CharField', [], {'max_length': '40', 'unique': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deserialized': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'uploaded_bundles'", 'null': 'True', 'to': "orm['auth.User']"}),
'uploaded_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'})
},
'dashboard_app.bundledeserializationerror': {
'Meta': {'object_name': 'BundleDeserializationError'},
'bundle': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'deserialization_error'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['dashboard_app.Bundle']"}),
'error_message': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'traceback': ('django.db.models.fields.TextField', [], {'max_length': '32768'})
},
'dashboard_app.bundlestream': {
'Meta': {'object_name': 'BundleStream'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'pathname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'dashboard_app.hardwaredevice': {
'Meta': {'object_name': 'HardwareDevice'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'device_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'dashboard_app.namedattribute': {
'Meta': {'unique_together': "(('object_id', 'name'),)", 'object_name': 'NamedAttribute'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'dashboard_app.softwarepackage': {
'Meta': {'unique_together': "(('name', 'version'),)", 'object_name': 'SoftwarePackage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'dashboard_app.softwaresource': {
'Meta': {'object_name': 'SoftwareSource'},
'branch_revision': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'branch_url': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'branch_vcs': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'commit_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project_name': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'dashboard_app.test': {
'Meta': {'object_name': 'Test'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'test_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'dashboard_app.testcase': {
'Meta': {'unique_together': "(('test', 'test_case_id'),)", 'object_name': 'TestCase'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_cases'", 'to': "orm['dashboard_app.Test']"}),
'test_case_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'dashboard_app.testresult': {
'Meta': {'ordering': "('_order',)", 'object_name': 'TestResult'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lineno': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'measurement': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '10', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'microseconds': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'relative_index': ('django.db.models.fields.PositiveIntegerField', [], {}),
'result': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'test_case': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'test_results'", 'null': 'True', 'to': "orm['dashboard_app.TestCase']"}),
'test_run': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_results'", 'to': "orm['dashboard_app.TestRun']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'dashboard_app.testrun': {
'Meta': {'ordering': "['-import_assigned_date']", 'object_name': 'TestRun'},
'analyzer_assigned_date': ('django.db.models.fields.DateTimeField', [], {}),
'analyzer_assigned_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}),
'bundle': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_runs'", 'to': "orm['dashboard_app.Bundle']"}),
'devices': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'test_runs'", 'blank': 'True', 'to': "orm['dashboard_app.HardwareDevice']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_assigned_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'packages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'test_runs'", 'blank': 'True', 'to': "orm['dashboard_app.SoftwarePackage']"}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'test_runs'", 'blank': 'True', 'to': "orm['dashboard_app.SoftwareSource']"}),
'sw_image_desc': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_runs'", 'to': "orm['dashboard_app.Test']"}),
'time_check_performed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
}
}
complete_apps = ['dashboard_app']
| agpl-3.0 | -6,435,784,412,863,363,000 | -1,666,859,975,896,525,000 | 78.153846 | 200 | 0.556627 | false |
zvoase/twactor | twactor/cache.py | 1 | 17379 | # -*- coding:utf-8 -*-
# twactor.cache - Cache framework for twactor.
import operator
import time
try:
import threading
except:
import dummy_threading as threading
from twactor import connection, function_sync, propertyfix
class CachedMetaclass(type):
"""Metaclass for subclasses of ``CachedObject``."""
def __new__(cls, name, bases, attrs):
# Fix _update_cache
update_cache = attrs.get('_update_cache', lambda *args, **kwargs: None)
def fixed_update_cache(self, *args, **kwargs):
val = update_cache(self, *args, **kwargs)
if hasattr(bases[-1], '_update_cache'):
bases[-1]._update_cache(self, *args, **kwargs)
return val
attrs['_update_cache'] = function_sync(update_cache, fixed_update_cache)
# Fix __init__
init = attrs.get('__init__', lambda *args, **kwargs: None)
def fixed_init(self, *args, **kwargs):
if hasattr(bases[-1], '__init__') and bases[-1] is not object:
bases[-1].__init__(self, *args, **kwargs)
init(self, *args, **kwargs)
attrs['__init__'] = function_sync(init, fixed_init)
return type.__new__(cls, name, bases, attrs)
class CachedObject(object):
"""Superclass for cached objects."""
__metaclass__ = CachedMetaclass
_connection_broker = connection.DEFAULT_CB
def __init__(self, *args, **kwargs):
self._cache = kwargs.pop('cache', {})
self._updated = kwargs.pop('_updated', {'__count': 0, '__time': 0})
def _update_cache(self, *args, **kwargs):
self._updated['__count'] = self._updated.get('__count', 0) + 1
self._updated['__time'] = time.time()
def _with_connection_broker(self, cb):
copy = self._copy()
copy._connection_broker = cb
return copy
def _copy(self):
return type(self)(self._cache.get('id', None), cache=self._cache.copy(),
updated=self._updated.copy())
class CachedMirror(object):
"""Superclass for objects which rely on another object's cache."""
def __init__(self, mirrored_object):
setattr(self, self._mirrored_attribute, mirrored_object)
self._mirrored = mirrored_object
def mirror_attribute(attribute):
"""Shortcut for mirroring an attribute on another object."""
def attr_methods():
def fget(self):
return reduce(getattr, attribute.split('.'), self)
def fset(self, value):
setattr(reduce(getattr, attribute.split('.')[:-1], self),
attribute.split('.'), value)
def fdel(self):
delattr(reduce(getattr, attribute.split('.')[:-1], self),
attribute.split('.'))
return {'fget': fget, 'fset': fset, 'fdel': fdel}
return property(**attr_methods())
_cache = mirror_attribute('_mirrored._cache')
_update_cache = mirror_attribute('_mirrored._update_cache')
_updated = mirror_attribute('_mirrored._updated')
del mirror_attribute
class CachedListMetaclass(type):
def __new__(cls, name, bases, attrs):
# Fix __init__
init = attrs.get('__init__', lambda *args, **kwargs: None)
def fixed_init(self, *args, **kwargs):
for base in reversed(bases):
if base is object:
break
base.__init__(self, *args, **kwargs)
init(self, *args, **kwargs)
attrs['__init__'] = function_sync(init, fixed_init)
# Fix _update_cache
update_cache = attrs.get('_update_cache', None)
if not update_cache:
for base in reversed(bases):
if hasattr(base, '_update_cache'):
update_cache = base._update_cache
break
if update_cache:
def fixed_update_cache(self, *args, **kwargs):
data = update_cache(self, *args, **kwargs)
for base in reversed(bases):
if hasattr(base, '_insert_into_cache'):
base._insert_into_cache(self, data)
break
attrs['_update_cache'] = function_sync(update_cache,
fixed_update_cache)
return type.__new__(cls, name, bases, attrs)
class CachedList(object):
__metaclass__ = CachedListMetaclass
_connection_broker = connection.DEFAULT_CB
_sort_attrs = ('created', 'id')
_reverse_class = None
OBJ_CLASS = lambda cache: cache
UPDATE_INTERVAL = 60 * 3 # Three-minute update interval by default.
def __init__(self, *args, **kwargs):
self._cache = kwargs.pop('cache', [])
self._object_cache = kwargs.pop('object_cache', {})
self._updated = kwargs.pop('updated', {'__count': 0, '__time': 0})
self.update_monitor = CachedListUpdateMonitorThread(self)
def __getitem__(self, pos_or_slice):
if isinstance(pos_or_slice, (int, long)):
return self._cache_to_obj(
self._cache[self._resolve_cache_index(pos_or_slice)])
start, stop, step = [getattr(pos_or_slice, attr)
for attr in ('start', 'stop', 'step')]
start = self._resolve_cache_index(start, start=True)
stop = self._resolve_cache_index(stop, start=False)
new_cache = map(self._cache.__getitem__, range(start, stop, step or 1))
new_updated = {'__count': self._updated['__count'],
'__time': self._updated['__time']}
for item in new_cache:
count_key = '%s__count' % (item.get('id', repr(item)))
time_key = '%s__time' % (item.get('id', repr(item)))
new_updated[count_key] = self._updated.get(count_key, None)
new_updated[time_key] = self._updated.get(time_key, None)
return type(self)(
cache=new_cache, updated=new_updated)._with_connection_broker(
self._connection_broker)
def __delitem__(self, pos_or_slice):
raise NotImplementedError
def __iter__(self):
for item in self._cache:
yield self._cache_to_obj(item)
def __reversed__(self):
raise NotImplementedError
def __contains__(self, obj):
if not isinstance(obj, self.OBJ_CLASS):
return False
return obj.id in (obj2.id for obj2 in self._objects)
def __len__(self):
raise NotImplementedError
def _cache_to_obj(self, cache_item):
if 'id' in cache_item and cache_item['id'] in self._object_cache:
obj = self._object_cache[cache_item['id']]
elif 'id' in cache_item and cache_item['id'] not in self._object_cache:
obj = self.OBJ_CLASS(cache_item['id'], cache=cache_item)
self._object_cache[cache_item['id']] = obj
else:
obj = self.OBJ_CLASS(None, cache=cache_item)
self._object_cache[repr(obj)] = obj
if hasattr(obj, '_with_connection_broker'):
return obj._with_connection_broker(self._connection_broker)
return obj
def _clean_object_cache(self):
obj_cache_ids = self._object_cache.keys()
data_cache_ids = map(operator.attrgetter('id'), self._objects)
for obj_id in obj_cache_ids:
if obj_id not in data_cache_ids:
del self._objects[obj_id]
def _copy(self):
copy = type(self)(cache=self._cache[:],
updated=self._updated.copy())
copy._connection_broker = self._connection_broker
return copy
@property
def _objects(self):
return map(self._cache_to_obj, self._cache)
def _resolve_cache_index(self, index, start=True):
if index < 0:
old_length, length = None, len(self._cache)
while (old_length != length):
old_length = length
self._update_cache()
length = len(self._cache)
if abs(index) <= length:
return length + index
raise IndexError('list index out of range')
elif (not index) and (index != 0):
return 0 if start else (len(self._cache) - 1)
elif index < len(self._cache):
return index
old_length, length = None, len(self._cache)
while (index >= length) and (old_length != length):
old_length = length
self._update_cache()
length = len(self._cache)
if old_length == length:
raise IndexError('list index out of range')
return index
def _sort_key(self, item):
return operator.attrgetter(*self._sort_attrs)(item)
def _with_connection_broker(self, connection_broker):
copy = self._copy()
copy._connection_broker = connection_broker
return copy
class CachedListUpdateMonitorThread(threading.Thread):
def __init__(self, object, *args, **kwargs):
super(CachedListUpdateMonitorThread, self).__init__(
*args, **kwargs)
self.object = object
self.kill_flag = False
def run(self):
while not self.kill_flag:
self.object._update_cache()
time.sleep(self.object.UPDATE_INTERVAL)
self.kill_flag = False
def stop(self):
self.kill_flag = True
class ForwardCachedList(CachedList):
def _insert_into_cache(self, fetched_data):
if not fetched_data:
self._updated['__count'] = self._updated.get('__count', 0) + 1
self._updated['__time'] = time.time()
return
fetched_objects = zip(fetched_data,
map(self._cache_to_obj, fetched_data))
sorted_objects = sorted(fetched_objects,
key=lambda pair: self._sort_key(pair[1]))
timestamp = time.time()
if not self._cache:
for data, object in sorted_objects:
count_key = '%s__count' % (getattr(object, 'id', repr(object)),)
time_key = '%s__time' % (getattr(object, 'id', repr(object)),)
self._updated[count_key] = self._updated.get(count_key, 0) + 1
self._updated[time_key] = timestamp
self._cache.extend(pair[0] for pair in sorted_objects)
else:
latest_key = self._sort_key(self._cache_to_obj(self._cache[-1]))
add_to_cache = self._sort_key(sorted_objects[0][1]) > latest_key
for data, object in sorted_objects:
count_key = '%s__count' % (getattr(object, 'id', repr(object)),)
time_key = '%s__time' % (getattr(object, 'id', repr(object)),)
self._updated[count_key] = self._updated.get(count_key, 0) + 1
self._updated[time_key] = timestamp
if add_to_cache or (self._sort_key(object) > latest_key):
self._cache.append(data)
if self._sort_key(object) >= latest_key:
add_to_cache = True
self._updated['__count'] = self._updated.get('__count', 0) + 1
self._updated['__time'] = time.time()
self._clean_object_cache()
class ReverseCachedList(CachedList):
def _insert_into_cache(self, fetched_data):
if not fetched_data:
self._updated['__count'] = self._updated.get('__count', 0) + 1
self._updated['__time'] = time.time()
return
fetched_objects = zip(fetched_data,
map(self._cache_to_obj, fetched_data))
sorted_objects = sorted(fetched_objects, reverse=True,
key=lambda pair: self._sort_key(pair[1]))
timestamp = time.time()
if not self._cache:
for data, object in sorted_objects:
count_key = '%s__count' % (getattr(object, 'id', repr(object)),)
time_key = '%s__time' % (getattr(object, 'id', repr(object)),)
self._updated[count_key] = self._updated.get(count_key, 0) + 1
self._updated[time_key] = timestamp
self._cache.extend(pair[0] for pair in sorted_objects)
else:
latest_key = self._sort_key(self._cache_to_obj(self._cache[-1]))
add_to_cache = self._sort_key(sorted_objects[0][1]) < latest_key
for data, object in sorted_objects:
count_key = '%s__count' % (getattr(object, 'id', repr(object)),)
time_key = '%s__time' % (getattr(object, 'id', repr(object)),)
self._updated[count_key] = self._updated.get(count_key, 0) + 1
self._updated[time_key] = timestamp
if add_to_cache or (self._sort_key(object) < latest_key):
self._cache.append(data)
if self._sort_key(object) <= latest_key:
add_to_cache = True
self._updated['__count'] = self._updated.get('__count', 0) + 1
self._updated['__time'] = time.time()
self._clean_object_cache()
def update_once(method):
"""
Make sure the cache has been updated at least once before calling a method.
This should be used as a decorator, and it wraps a method on a cached object
to make sure that the object's cache has been updated at least once before
the method is called. This allows you to implement lazy evaluation, which
is especially useful when fetching data over the network.
"""
def wrapper(self, *args, **kwargs):
if not self._updated.get('__count', 0):
self._update_cache()
self._updated['__count'] = self._updated.get('__count', 0) + 1
return method(self, *args, **kwargs)
return function_sync(method, wrapper)
def update_on_key(key, always=False):
"""
Make sure the cache has a particular key present before calling a method.
This decorator accepts a key which it will look up in the cache before
calling the wrapped method. If the cache doesn't have the key, it will
perform an update before calling the method. Note that it does not keep
updating the cache until the key is present - this may result in a
non-terminating loop.
You may also pass the decorator an additional keyword, ``always``, which
will tell it whether or not to keep checking for the key every time the
method is called. By default, this is ``False``, which means that the key
will be checked only the first time the method is called. If set to true,
the key will be checked *every* time the method is called.
"""
def wrapper_deco(method):
def wrapper(self, *args, **kwargs):
if always:
if key not in self._cache:
self._update_cache()
return method(self, *args, **kwargs)
elif (key not in self._cache and
(not self._updated.get('key__' + key, False))):
self._update_cache()
self._updated['key__' + key] = True
return method(self, *args, **kwargs)
return function_sync(method, wrapper)
return wrapper_deco
def update_on_time(length):
"""
Update the cache if an amount of time has passed before calling a method.
This decorator accepts a length of time in seconds, and will wrap a method
with a cache-checker. Every time the method is called, the wrapper will
check to see that a certain amount of time has passed. If the time that has
passed is greater than or equal to the specified length, the cache is
updated. Finally, the method is called.
"""
def wrapper_deco(method):
def wrapper(self, *args, **kwargs):
if (time.time() - self._updated.get('__time', 0)) >= length:
self._update_cache()
self._updated['__time'] = time.time()
return method(self, *args, **kwargs)
return function_sync(method, wrapper)
return wrapper_deco
def update_on_count(num):
"""
Update the cache if a method has been called a certain number of times.
This decorator accepts a number, and keeps track of how many times the
method it is wrapping has been called. When the number of calls reaches this
number, the cache is updated.
"""
def wrapper_deco(method):
def wrapper(self, *args, **kwargs):
if self._updated.get('count__' + method.__name__, num) == num:
self._update_cache()
self._updated['count__' + method.__name__] = 1
else:
self._updated['count__' + method] = self._updated.get(
'count__' + method, 0) + 1
return method(self, *args, **kwargs)
return function_sync(method, wrapper)
return wrapper_deco
def simple_map(key):
"""
Shortcut for a typical cacheing use-case.
This is a shortcut for the following pattern::
class SomeCachedObject(CachedObject):
@property
@update_on_key(key_name)
def attrname(self):
return self._cache[key_name]
Instead you can do this::
class SomeCachedObject(CachedObject):
attrname = simple_map(key_name)
"""
return property(update_on_key(key)(lambda self: self._cache[key])) | mit | 8,315,623,595,768,698,000 | -7,147,267,314,180,363,000 | 38.410431 | 80 | 0.565165 | false |
tinloaf/home-assistant | homeassistant/components/xiaomi_aqara.py | 3 | 10991 | """
Support for Xiaomi Gateways.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/xiaomi_aqara/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.discovery import SERVICE_XIAOMI_GW
from homeassistant.const import (
ATTR_BATTERY_LEVEL, CONF_HOST, CONF_MAC, CONF_PORT,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import callback
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
from homeassistant.util import slugify
REQUIREMENTS = ['PyXiaomiGateway==0.11.1']
_LOGGER = logging.getLogger(__name__)
ATTR_GW_MAC = 'gw_mac'
ATTR_RINGTONE_ID = 'ringtone_id'
ATTR_RINGTONE_VOL = 'ringtone_vol'
ATTR_DEVICE_ID = 'device_id'
CONF_DISCOVERY_RETRY = 'discovery_retry'
CONF_GATEWAYS = 'gateways'
CONF_INTERFACE = 'interface'
CONF_KEY = 'key'
CONF_DISABLE = 'disable'
DOMAIN = 'xiaomi_aqara'
PY_XIAOMI_GATEWAY = "xiaomi_gw"
TIME_TILL_UNAVAILABLE = timedelta(minutes=150)
SERVICE_PLAY_RINGTONE = 'play_ringtone'
SERVICE_STOP_RINGTONE = 'stop_ringtone'
SERVICE_ADD_DEVICE = 'add_device'
SERVICE_REMOVE_DEVICE = 'remove_device'
GW_MAC = vol.All(
cv.string,
lambda value: value.replace(':', '').lower(),
vol.Length(min=12, max=12)
)
SERVICE_SCHEMA_PLAY_RINGTONE = vol.Schema({
vol.Required(ATTR_RINGTONE_ID):
vol.All(vol.Coerce(int), vol.NotIn([9, 14, 15, 16, 17, 18, 19])),
vol.Optional(ATTR_RINGTONE_VOL):
vol.All(vol.Coerce(int), vol.Clamp(min=0, max=100))
})
SERVICE_SCHEMA_REMOVE_DEVICE = vol.Schema({
vol.Required(ATTR_DEVICE_ID):
vol.All(cv.string, vol.Length(min=14, max=14))
})
GATEWAY_CONFIG = vol.Schema({
vol.Optional(CONF_MAC, default=None): vol.Any(GW_MAC, None),
vol.Optional(CONF_KEY):
vol.All(cv.string, vol.Length(min=16, max=16)),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=9898): cv.port,
vol.Optional(CONF_DISABLE, default=False): cv.boolean,
})
def _fix_conf_defaults(config):
"""Update some configuration defaults."""
config['sid'] = config.pop(CONF_MAC, None)
if config.get(CONF_KEY) is None:
_LOGGER.warning(
'Key is not provided for gateway %s. Controlling the gateway '
'will not be possible', config['sid'])
if config.get(CONF_HOST) is None:
config.pop(CONF_PORT)
return config
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_GATEWAYS, default={}):
vol.All(cv.ensure_list, [GATEWAY_CONFIG], [_fix_conf_defaults]),
vol.Optional(CONF_INTERFACE, default='any'): cv.string,
vol.Optional(CONF_DISCOVERY_RETRY, default=3): cv.positive_int
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the Xiaomi component."""
gateways = []
interface = 'any'
discovery_retry = 3
if DOMAIN in config:
gateways = config[DOMAIN][CONF_GATEWAYS]
interface = config[DOMAIN][CONF_INTERFACE]
discovery_retry = config[DOMAIN][CONF_DISCOVERY_RETRY]
async def xiaomi_gw_discovered(service, discovery_info):
"""Perform action when Xiaomi Gateway device(s) has been found."""
# We don't need to do anything here, the purpose of Home Assistant's
# discovery service is to just trigger loading of this
# component, and then its own discovery process kicks in.
discovery.listen(hass, SERVICE_XIAOMI_GW, xiaomi_gw_discovered)
from xiaomi_gateway import XiaomiGatewayDiscovery
xiaomi = hass.data[PY_XIAOMI_GATEWAY] = XiaomiGatewayDiscovery(
hass.add_job, gateways, interface)
_LOGGER.debug("Expecting %s gateways", len(gateways))
for k in range(discovery_retry):
_LOGGER.info("Discovering Xiaomi Gateways (Try %s)", k + 1)
xiaomi.discover_gateways()
if len(xiaomi.gateways) >= len(gateways):
break
if not xiaomi.gateways:
_LOGGER.error("No gateway discovered")
return False
xiaomi.listen()
_LOGGER.debug("Gateways discovered. Listening for broadcasts")
for component in ['binary_sensor', 'sensor', 'switch', 'light', 'cover',
'lock']:
discovery.load_platform(hass, component, DOMAIN, {}, config)
def stop_xiaomi(event):
"""Stop Xiaomi Socket."""
_LOGGER.info("Shutting down Xiaomi Hub")
xiaomi.stop_listen()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_xiaomi)
def play_ringtone_service(call):
"""Service to play ringtone through Gateway."""
ring_id = call.data.get(ATTR_RINGTONE_ID)
gateway = call.data.get(ATTR_GW_MAC)
kwargs = {'mid': ring_id}
ring_vol = call.data.get(ATTR_RINGTONE_VOL)
if ring_vol is not None:
kwargs['vol'] = ring_vol
gateway.write_to_hub(gateway.sid, **kwargs)
def stop_ringtone_service(call):
"""Service to stop playing ringtone on Gateway."""
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, mid=10000)
def add_device_service(call):
"""Service to add a new sub-device within the next 30 seconds."""
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, join_permission='yes')
hass.components.persistent_notification.async_create(
'Join permission enabled for 30 seconds! '
'Please press the pairing button of the new device once.',
title='Xiaomi Aqara Gateway')
def remove_device_service(call):
"""Service to remove a sub-device from the gateway."""
device_id = call.data.get(ATTR_DEVICE_ID)
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, remove_device=device_id)
gateway_only_schema = _add_gateway_to_schema(xiaomi, vol.Schema({}))
hass.services.register(
DOMAIN, SERVICE_PLAY_RINGTONE, play_ringtone_service,
schema=_add_gateway_to_schema(xiaomi, SERVICE_SCHEMA_PLAY_RINGTONE))
hass.services.register(
DOMAIN, SERVICE_STOP_RINGTONE, stop_ringtone_service,
schema=gateway_only_schema)
hass.services.register(
DOMAIN, SERVICE_ADD_DEVICE, add_device_service,
schema=gateway_only_schema)
hass.services.register(
DOMAIN, SERVICE_REMOVE_DEVICE, remove_device_service,
schema=_add_gateway_to_schema(xiaomi, SERVICE_SCHEMA_REMOVE_DEVICE))
return True
class XiaomiDevice(Entity):
"""Representation a base Xiaomi device."""
def __init__(self, device, device_type, xiaomi_hub):
"""Initialize the Xiaomi device."""
self._state = None
self._is_available = True
self._sid = device['sid']
self._name = '{}_{}'.format(device_type, self._sid)
self._type = device_type
self._write_to_hub = xiaomi_hub.write_to_hub
self._get_from_hub = xiaomi_hub.get_from_hub
self._device_state_attributes = {}
self._remove_unavailability_tracker = None
self._xiaomi_hub = xiaomi_hub
self.parse_data(device['data'], device['raw_data'])
self.parse_voltage(device['data'])
if hasattr(self, '_data_key') \
and self._data_key: # pylint: disable=no-member
self._unique_id = slugify("{}-{}".format(
self._data_key, # pylint: disable=no-member
self._sid))
else:
self._unique_id = slugify("{}-{}".format(self._type, self._sid))
def _add_push_data_job(self, *args):
self.hass.add_job(self.push_data, *args)
async def async_added_to_hass(self):
"""Start unavailability tracking."""
self._xiaomi_hub.callbacks[self._sid].append(self._add_push_data_job)
self._async_track_unavailable()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
@property
def should_poll(self):
"""Return the polling state. No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._device_state_attributes
@callback
def _async_set_unavailable(self, now):
"""Set state to UNAVAILABLE."""
self._remove_unavailability_tracker = None
self._is_available = False
self.async_schedule_update_ha_state()
@callback
def _async_track_unavailable(self):
if self._remove_unavailability_tracker:
self._remove_unavailability_tracker()
self._remove_unavailability_tracker = async_track_point_in_utc_time(
self.hass, self._async_set_unavailable,
utcnow() + TIME_TILL_UNAVAILABLE)
if not self._is_available:
self._is_available = True
return True
return False
@callback
def push_data(self, data, raw_data):
"""Push from Hub."""
_LOGGER.debug("PUSH >> %s: %s", self, data)
was_unavailable = self._async_track_unavailable()
is_data = self.parse_data(data, raw_data)
is_voltage = self.parse_voltage(data)
if is_data or is_voltage or was_unavailable:
self.async_schedule_update_ha_state()
def parse_voltage(self, data):
"""Parse battery level data sent by gateway."""
if 'voltage' not in data:
return False
max_volt = 3300
min_volt = 2800
voltage = data['voltage']
voltage = min(voltage, max_volt)
voltage = max(voltage, min_volt)
percent = ((voltage - min_volt) / (max_volt - min_volt)) * 100
self._device_state_attributes[ATTR_BATTERY_LEVEL] = round(percent, 1)
return True
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
raise NotImplementedError()
def _add_gateway_to_schema(xiaomi, schema):
"""Extend a voluptuous schema with a gateway validator."""
def gateway(sid):
"""Convert sid to a gateway."""
sid = str(sid).replace(':', '').lower()
for gateway in xiaomi.gateways.values():
if gateway.sid == sid:
return gateway
raise vol.Invalid('Unknown gateway sid {}'.format(sid))
gateways = list(xiaomi.gateways.values())
kwargs = {}
# If the user has only 1 gateway, make it the default for services.
if len(gateways) == 1:
kwargs['default'] = gateways[0]
return schema.extend({
vol.Required(ATTR_GW_MAC, **kwargs): gateway
})
| apache-2.0 | 1,732,796,041,462,634,800 | -783,094,438,675,863,300 | 32.105422 | 77 | 0.638249 | false |
Omegaphora/external_chromium_org | mojo/python/tests/system_unittest.py | 26 | 11412 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
import sys
import time
import unittest
# pylint: disable=F0401
import mojo.embedder
from mojo import system
DATA_SIZE = 1024
def _GetRandomBuffer(size):
random.seed(size)
return bytearray(''.join(chr(random.randint(0, 255)) for i in xrange(size)))
class BaseMojoTest(unittest.TestCase):
def setUp(self):
mojo.embedder.Init()
class CoreTest(BaseMojoTest):
def testResults(self):
self.assertEquals(system.RESULT_OK, 0)
self.assertLess(system.RESULT_CANCELLED, 0)
self.assertLess(system.RESULT_UNKNOWN, 0)
self.assertLess(system.RESULT_INVALID_ARGUMENT, 0)
self.assertLess(system.RESULT_DEADLINE_EXCEEDED, 0)
self.assertLess(system.RESULT_NOT_FOUND, 0)
self.assertLess(system.RESULT_ALREADY_EXISTS, 0)
self.assertLess(system.RESULT_PERMISSION_DENIED, 0)
self.assertLess(system.RESULT_RESOURCE_EXHAUSTED, 0)
self.assertLess(system.RESULT_FAILED_PRECONDITION, 0)
self.assertLess(system.RESULT_ABORTED, 0)
self.assertLess(system.RESULT_OUT_OF_RANGE, 0)
self.assertLess(system.RESULT_UNIMPLEMENTED, 0)
self.assertLess(system.RESULT_INTERNAL, 0)
self.assertLess(system.RESULT_UNAVAILABLE, 0)
self.assertLess(system.RESULT_DATA_LOSS, 0)
self.assertLess(system.RESULT_BUSY, 0)
self.assertLess(system.RESULT_SHOULD_WAIT, 0)
def testConstants(self):
self.assertGreaterEqual(system.DEADLINE_INDEFINITE, 0)
self.assertGreaterEqual(system.HANDLE_SIGNAL_NONE, 0)
self.assertGreaterEqual(system.HANDLE_SIGNAL_READABLE, 0)
self.assertGreaterEqual(system.HANDLE_SIGNAL_WRITABLE, 0)
self.assertGreaterEqual(system.WRITE_MESSAGE_FLAG_NONE, 0)
self.assertGreaterEqual(system.READ_MESSAGE_FLAG_NONE, 0)
self.assertGreaterEqual(system.READ_MESSAGE_FLAG_MAY_DISCARD, 0)
self.assertGreaterEqual(system.WRITE_DATA_FLAG_NONE, 0)
self.assertGreaterEqual(system.WRITE_DATA_FLAG_ALL_OR_NONE, 0)
self.assertGreaterEqual(system.READ_DATA_FLAG_NONE, 0)
self.assertGreaterEqual(system.READ_DATA_FLAG_ALL_OR_NONE, 0)
self.assertGreaterEqual(system.READ_DATA_FLAG_DISCARD, 0)
self.assertGreaterEqual(system.READ_DATA_FLAG_QUERY, 0)
self.assertGreaterEqual(system.MAP_BUFFER_FLAG_NONE, 0)
def testGetTimeTicksNow(self):
pt1 = time.time()
v1 = system.GetTimeTicksNow()
time.sleep(1e-3)
v2 = system.GetTimeTicksNow()
pt2 = time.time()
self.assertGreater(v1, 0)
self.assertGreater(v2, v1 + 1000)
self.assertGreater(1e6 * (pt2 - pt1), v2 - v1)
def _testHandlesCreation(self, *args):
for handle in args:
self.assertTrue(handle.IsValid())
handle.Close()
self.assertFalse(handle.IsValid())
def _TestMessageHandleCreation(self, handles):
self._testHandlesCreation(handles.handle0, handles.handle1)
def testCreateMessagePipe(self):
self._TestMessageHandleCreation(system.MessagePipe())
def testCreateMessagePipeWithNoneOptions(self):
self._TestMessageHandleCreation(system.MessagePipe(None))
def testCreateMessagePipeWithOptions(self):
self._TestMessageHandleCreation(
system.MessagePipe(system.CreateMessagePipeOptions()))
def testWaitOverMessagePipe(self):
handles = system.MessagePipe()
handle = handles.handle0
self.assertEquals(system.RESULT_OK, handle.Wait(
system.HANDLE_SIGNAL_WRITABLE, system.DEADLINE_INDEFINITE))
self.assertEquals(system.RESULT_DEADLINE_EXCEEDED,
handle.Wait(system.HANDLE_SIGNAL_READABLE, 0))
handles.handle1.WriteMessage()
self.assertEquals(
system.RESULT_OK,
handle.Wait(
system.HANDLE_SIGNAL_READABLE,
system.DEADLINE_INDEFINITE))
def testWaitOverManyMessagePipe(self):
handles = system.MessagePipe()
handle0 = handles.handle0
handle1 = handles.handle1
self.assertEquals(
0,
system.WaitMany(
[(handle0, system.HANDLE_SIGNAL_WRITABLE),
(handle1, system.HANDLE_SIGNAL_WRITABLE)],
system.DEADLINE_INDEFINITE))
self.assertEquals(
system.RESULT_DEADLINE_EXCEEDED,
system.WaitMany(
[(handle0, system.HANDLE_SIGNAL_READABLE),
(handle1, system.HANDLE_SIGNAL_READABLE)], 0))
handle0.WriteMessage()
self.assertEquals(
1,
system.WaitMany(
[(handle0, system.HANDLE_SIGNAL_READABLE),
(handle1, system.HANDLE_SIGNAL_READABLE)],
system.DEADLINE_INDEFINITE))
def testSendBytesOverMessagePipe(self):
handles = system.MessagePipe()
data = _GetRandomBuffer(DATA_SIZE)
handles.handle0.WriteMessage(data)
(res, buffers, next_message) = handles.handle1.ReadMessage()
self.assertEquals(system.RESULT_RESOURCE_EXHAUSTED, res)
self.assertEquals(None, buffers)
self.assertEquals((DATA_SIZE, 0), next_message)
result = bytearray(DATA_SIZE)
(res, buffers, next_message) = handles.handle1.ReadMessage(result)
self.assertEquals(system.RESULT_OK, res)
self.assertEquals(None, next_message)
self.assertEquals((data, []), buffers)
def testSendEmptyDataOverMessagePipe(self):
handles = system.MessagePipe()
handles.handle0.WriteMessage(None)
(res, buffers, next_message) = handles.handle1.ReadMessage()
self.assertEquals(system.RESULT_OK, res)
self.assertEquals(None, next_message)
self.assertEquals((None, []), buffers)
def testSendHandleOverMessagePipe(self):
handles = system.MessagePipe()
handles_to_send = system.MessagePipe()
handles.handle0.WriteMessage(handles=[handles_to_send.handle0,
handles_to_send.handle1])
(res, buffers, next_message) = handles.handle1.ReadMessage(
max_number_of_handles=2)
self.assertFalse(handles_to_send.handle0.IsValid())
self.assertFalse(handles_to_send.handle1.IsValid())
self.assertEquals(system.RESULT_OK, res)
self.assertEquals(None, next_message)
self.assertEquals(None, buffers[0])
self.assertEquals(2, len(buffers[1]))
handles = buffers[1]
for handle in handles:
self.assertTrue(handle.IsValid())
(res, buffers, next_message) = handle.ReadMessage()
self.assertEquals(system.RESULT_SHOULD_WAIT, res)
for handle in handles:
handle.WriteMessage()
for handle in handles:
(res, buffers, next_message) = handle.ReadMessage()
self.assertEquals(system.RESULT_OK, res)
def _TestDataHandleCreation(self, handles):
self._testHandlesCreation(
handles.producer_handle, handles.consumer_handle)
def testCreateDataPipe(self):
self._TestDataHandleCreation(system.DataPipe())
def testCreateDataPipeWithNoneOptions(self):
self._TestDataHandleCreation(system.DataPipe(None))
def testCreateDataPipeWithDefaultOptions(self):
self._TestDataHandleCreation(
system.DataPipe(system.CreateDataPipeOptions()))
def testCreateDataPipeWithDiscardFlag(self):
options = system.CreateDataPipeOptions()
options.flags = system.CreateDataPipeOptions.FLAG_MAY_DISCARD
self._TestDataHandleCreation(system.DataPipe(options))
def testCreateDataPipeWithElementSize(self):
options = system.CreateDataPipeOptions()
options.element_num_bytes = 5
self._TestDataHandleCreation(system.DataPipe(options))
def testCreateDataPipeWithCapacity(self):
options = system.CreateDataPipeOptions()
options.element_capacity_num_bytes = DATA_SIZE
self._TestDataHandleCreation(system.DataPipe(options))
def testCreateDataPipeWithIncorrectParameters(self):
options = system.CreateDataPipeOptions()
options.element_num_bytes = 5
options.capacity_num_bytes = DATA_SIZE
with self.assertRaises(system.MojoException) as cm:
self._TestDataHandleCreation(system.DataPipe(options))
self.assertEquals(system.RESULT_INVALID_ARGUMENT, cm.exception.mojo_result)
def testSendEmptyDataOverDataPipe(self):
pipes = system.DataPipe()
self.assertEquals((system.RESULT_OK, 0), pipes.producer_handle.WriteData())
self.assertEquals(
(system.RESULT_OK, None), pipes.consumer_handle.ReadData())
def testSendDataOverDataPipe(self):
pipes = system.DataPipe()
data = _GetRandomBuffer(DATA_SIZE)
self.assertEquals((system.RESULT_OK, DATA_SIZE),
pipes.producer_handle.WriteData(data))
self.assertEquals((system.RESULT_OK, data),
pipes.consumer_handle.ReadData(bytearray(DATA_SIZE)))
def testTwoPhaseWriteOnDataPipe(self):
pipes = system.DataPipe()
(res, buf) = pipes.producer_handle.BeginWriteData(DATA_SIZE)
self.assertEquals(system.RESULT_OK, res)
self.assertGreaterEqual(len(buf.buffer), DATA_SIZE)
data = _GetRandomBuffer(DATA_SIZE)
buf.buffer[0:DATA_SIZE] = data
self.assertEquals(system.RESULT_OK, buf.End(DATA_SIZE))
self.assertEquals((system.RESULT_OK, data),
pipes.consumer_handle.ReadData(bytearray(DATA_SIZE)))
def testTwoPhaseReadOnDataPipe(self):
pipes = system.DataPipe()
data = _GetRandomBuffer(DATA_SIZE)
self.assertEquals((system.RESULT_OK, DATA_SIZE),
pipes.producer_handle.WriteData(data))
(res, buf) = pipes.consumer_handle.BeginReadData()
self.assertEquals(system.RESULT_OK, res)
self.assertEquals(DATA_SIZE, len(buf.buffer))
self.assertEquals(data, buf.buffer)
self.assertEquals(system.RESULT_OK, buf.End(DATA_SIZE))
def testCreateSharedBuffer(self):
self._testHandlesCreation(system.CreateSharedBuffer(DATA_SIZE))
def testCreateSharedBufferWithNoneOptions(self):
self._testHandlesCreation(system.CreateSharedBuffer(DATA_SIZE, None))
def testCreateSharedBufferWithDefaultOptions(self):
self._testHandlesCreation(
system.CreateSharedBuffer(
DATA_SIZE,
system.CreateSharedBufferOptions()))
def testDuplicateSharedBuffer(self):
handle = system.CreateSharedBuffer(DATA_SIZE)
self._testHandlesCreation(handle.Duplicate())
def testDuplicateSharedBufferWithNoneOptions(self):
handle = system.CreateSharedBuffer(DATA_SIZE)
self._testHandlesCreation(handle.Duplicate(None))
def testDuplicateSharedBufferWithDefaultOptions(self):
handle = system.CreateSharedBuffer(DATA_SIZE)
self._testHandlesCreation(
handle.Duplicate(system.DuplicateSharedBufferOptions()))
def testSendBytesOverSharedBuffer(self):
handle = system.CreateSharedBuffer(DATA_SIZE)
duplicated = handle.Duplicate()
data = _GetRandomBuffer(DATA_SIZE)
(res1, buf1) = handle.Map(0, DATA_SIZE)
(res2, buf2) = duplicated.Map(0, DATA_SIZE)
self.assertEquals(system.RESULT_OK, res1)
self.assertEquals(system.RESULT_OK, res2)
self.assertEquals(DATA_SIZE, len(buf1.buffer))
self.assertEquals(DATA_SIZE, len(buf2.buffer))
self.assertEquals(buf1.buffer, buf2.buffer)
buf1.buffer[:] = data
self.assertEquals(data, buf1.buffer)
self.assertEquals(data, buf2.buffer)
self.assertEquals(buf1.buffer, buf2.buffer)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(CoreTest)
test_results = unittest.TextTestRunner(verbosity=0).run(suite)
if not test_results.wasSuccessful():
sys.exit(1)
sys.exit(0)
| bsd-3-clause | 4,819,326,799,721,708,000 | -8,495,320,831,891,510,000 | 35.812903 | 79 | 0.720645 | false |
botswana-harvard/bhp065_project | bhp065/apps/hnscc_subject/admin/enrollment_admin.py | 1 | 1104 | from django.contrib import admin
from edc.base.modeladmin.admin import BaseModelAdmin
from ..forms import EnrollmentForm
from ..models import Enrollment
class EnrollmentAdmin(BaseModelAdmin):
dashboard_type = 'subject'
form = EnrollmentForm
def __init__(self, *args, **kwargs):
super(EnrollmentAdmin, self).__init__(*args, **kwargs)
self.fields = [
'report_datetime',
'pathology_no',
'gender',
'age',
'hiv_status',
'smoking_status',
"bpcc_enrolled",
"bid_number", ]
self.list_display = ("registered_subject", "pathology_no", "gender", "age",
"bpcc_enrolled", "bid_number")
self.radio_fields = {'gender': admin.VERTICAL,
'hiv_status': admin.VERTICAL,
'smoking_status': admin.VERTICAL,
"bpcc_enrolled": admin.VERTICAL, }
self.list_filter = ('gender', "bpcc_enrolled", "report_datetime")
admin.site.register(Enrollment, EnrollmentAdmin)
| gpl-2.0 | 4,962,485,052,719,704,000 | 431,603,956,346,381,060 | 33.5 | 83 | 0.557065 | false |
iogf/vy | vyapp/plugins/ycmd/ycm_extra_conf.py | 6 | 8197 | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
from distutils.sysconfig import get_python_inc
import platform
import os.path as p
import subprocess
import ycm_core
DIR_OF_THIS_SCRIPT = p.abspath( p.dirname( __file__ ) )
DIR_OF_THIRD_PARTY = p.join( DIR_OF_THIS_SCRIPT, 'third_party' )
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER and/or -DYCM_EXPORT in your flags;
# only the YCM source code needs it.
'-DUSE_CLANG_COMPLETER',
'-DYCM_EXPORT=',
# THIS IS IMPORTANT! Without the '-x' flag, Clang won't know which language to
# use when compiling headers. So it will guess. Badly. So C++ headers will be
# compiled as C headers. You don't want that so ALWAYS specify the '-x' flag.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'cpp/pybind11',
'-isystem',
'cpp/whereami',
'-isystem',
'cpp/BoostParts',
'-isystem',
get_python_inc(),
'-isystem',
'cpp/llvm/include',
'-isystem',
'cpp/llvm/tools/clang/include',
'-I',
'cpp/ycm',
'-I',
'cpp/ycm/ClangCompleter',
'-isystem',
'cpp/ycm/tests/gmock/gtest',
'-isystem',
'cpp/ycm/tests/gmock/gtest/include',
'-isystem',
'cpp/ycm/tests/gmock',
'-isystem',
'cpp/ycm/tests/gmock/include',
'-isystem',
'cpp/ycm/benchmarks/benchmark/include',
]
# Clang automatically sets the '-std=' flag to 'c++14' for MSVC 2015 or later,
# which is required for compiling the standard library, and to 'c++11' for older
# versions.
if platform.system() != 'Windows':
flags.append( '-std=c++11' )
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if p.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
def IsHeaderFile( filename ):
extension = p.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def FindCorrespondingSourceFile( filename ):
if IsHeaderFile( filename ):
basename = p.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if p.exists( replacement_file ):
return replacement_file
return filename
def PathToPythonUsedDuringBuild():
try:
filepath = p.join( DIR_OF_THIS_SCRIPT, 'PYTHON_USED_DURING_BUILDING' )
with open( filepath ) as f:
return f.read().strip()
# We need to check for IOError for Python 2 and OSError for Python 3.
except ( IOError, OSError ):
return None
def Settings( **kwargs ):
language = kwargs[ 'language' ]
if language == 'cfamily':
# If the file is a header, try to find the corresponding source file and
# retrieve its flags from the compilation database if using one. This is
# necessary since compilation databases don't have entries for header files.
# In addition, use this source file as the translation unit. This makes it
# possible to jump from a declaration in the header file to its definition
# in the corresponding source file.
filename = FindCorrespondingSourceFile( kwargs[ 'filename' ] )
if not database:
return {
'flags': flags,
'include_paths_relative_to_dir': DIR_OF_THIS_SCRIPT,
'override_filename': filename
}
compilation_info = database.GetCompilationInfoForFile( filename )
if not compilation_info.compiler_flags_:
return {}
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object.
final_flags = list( compilation_info.compiler_flags_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
return {
'flags': final_flags,
'include_paths_relative_to_dir': compilation_info.compiler_working_dir_,
'override_filename': filename
}
if language == 'python':
return {
'interpreter_path': PathToPythonUsedDuringBuild()
}
return {}
def GetStandardLibraryIndexInSysPath( sys_path ):
for index, path in enumerate( sys_path ):
if p.isfile( p.join( path, 'os.py' ) ):
return index
raise RuntimeError( 'Could not find standard library path in Python path.' )
def PythonSysPath( **kwargs ):
sys_path = kwargs[ 'sys_path' ]
interpreter_path = kwargs[ 'interpreter_path' ]
major_version = subprocess.check_output( [
interpreter_path, '-c', 'import sys; print( sys.version_info[ 0 ] )' ]
).rstrip().decode( 'utf8' )
sys_path.insert( GetStandardLibraryIndexInSysPath( sys_path ) + 1,
p.join( DIR_OF_THIRD_PARTY, 'python-future', 'src' ) )
sys_path[ 0:0 ] = [ p.join( DIR_OF_THIS_SCRIPT ),
p.join( DIR_OF_THIRD_PARTY, 'bottle' ),
p.join( DIR_OF_THIRD_PARTY, 'cregex',
'regex_{}'.format( major_version ) ),
p.join( DIR_OF_THIRD_PARTY, 'frozendict' ),
p.join( DIR_OF_THIRD_PARTY, 'jedi_deps', 'jedi' ),
p.join( DIR_OF_THIRD_PARTY, 'jedi_deps', 'numpydoc' ),
p.join( DIR_OF_THIRD_PARTY, 'jedi_deps', 'parso' ),
p.join( DIR_OF_THIRD_PARTY, 'requests_deps', 'requests' ),
p.join( DIR_OF_THIRD_PARTY, 'requests_deps',
'urllib3',
'src' ),
p.join( DIR_OF_THIRD_PARTY, 'requests_deps',
'chardet' ),
p.join( DIR_OF_THIRD_PARTY, 'requests_deps',
'certifi' ),
p.join( DIR_OF_THIRD_PARTY, 'requests_deps',
'idna' ),
p.join( DIR_OF_THIRD_PARTY, 'waitress' ) ]
return sys_path
| mit | -6,338,096,936,958,199,000 | -7,741,485,965,840,340,000 | 35.269912 | 80 | 0.651458 | false |
Algomorph/gpxanalyzer | gpxanalyzer/filters/color_structure_pythonic.py | 1 | 10874 | '''
Created on Apr 25, 2014
@author: Gregory Kramida
@copyright: (c) Gregory Kramida 2014
@license: GNU v3
'''
import gpxanalyzer.gpxanalyzer_internals as gi
import numpy as np
import sys
import math
import timeit
amplitude_thresholds = np.array([0.0, 0.000000000001, 0.037, 0.08, 0.195, 0.32],dtype=np.float64)
n_amplitude_levels = np.array([1, 25, 20, 35, 35, 140]);
difference_thresholds = np.array([
[0, 6, 60, 110, 256, -1],
[0, 6, 20, 60, 110, 256],
[0, 6, 20, 60, 110, 256],
[0, 6, 20, 60, 110, 256]], dtype=np.int16)
n_hue_levels = np.array([
[1, 4, 4, 4, 0],
[1, 4, 4, 8, 8],
[1, 4, 8, 8, 8],
[1, 4, 16, 16, 16]], dtype=np.uint8)
n_sum_levels = np.array([
[8, 4, 1, 1, 0],
[8, 4, 4, 2, 1],
[16, 4, 4, 4, 4],
[32, 8, 4, 4, 4]], dtype=np.uint8)
n_cum_levels = np.array([
[24, 8, 4, 0, 0],
[56, 40, 24, 8, 0],
[112, 96, 64, 32, 0],
[224, 192, 128, 64, 0]], dtype=np.uint8)
def reshape_bitstrings(bts):
return bts.transpose((1,0,2)).reshape((bts.shape[1],bts.shape[1],bts.shape[2]*2))
def convert_RGB2HMMD(raster):
out = np.zeros(raster.shape, dtype = np.int16)
for y in xrange(raster.shape[0]):
for x in xrange(raster.shape[1]):
(R,G,B) = raster[y,x,:].astype(np.int32)
mx=R
if(mx<G): mx=G
if(mx<B): mx=B
mn=R
if(mn>G): mn=G
if(mn>B): mn=B
if (mx == mn): # ( R == G == B )//exactly gray
hue = -1; #hue is undefined
else:
#solve Hue
if(R==mx):
hue=float(G-B)* 60.0/(mx-mn)
elif(G==mx):
hue=120.0+float(B-R)* 60.0/(mx-mn)
elif(B==mx):
hue=240.0+float(R-G)* 60.0/(mx-mn)
if(hue<0.0): hue+=360.0
H = int(hue + 0.5) #range [0,360]
S = int((mx + mn)/2.0 + 0.5) #range [0,255]
D = mx - mn #range [0,255]
out[y,x,:] = (H,S,D)
return out
def to_bitstring(arr):
bts = np.zeros((8),np.uint32)
for bn in arr:
idxUint = bn >> 5
idxBit = bn - (idxUint << 5)
bts[idxUint] |= (1 << idxBit)
return bts
def extract_row_bitstrings(quant_cell):
bitstrings = np.zeros((quant_cell.shape[0]*2,quant_cell.shape[1],4),dtype=np.uint32)
for ix_row in xrange(0,quant_cell.shape[0]):
row = quant_cell[ix_row]
for ix_bt in xrange(0, quant_cell.shape[1]-7):
bt = to_bitstring(row[ix_bt:ix_bt+8])
ix_ins = ix_bt<<1
bitstrings[ix_ins,ix_row] = bt[0:4]
bitstrings[ix_ins+1,ix_row] = bt[4:8]
return bitstrings
def check_row_bitstrings(quant_cell,row_bitstrings, raise_exception = False):
rb = row_bitstrings
for ix_row in xrange(0,row_bitstrings.shape[0]- gi.WINDOW_SIZE*2 + 2,2):
for ix_col in xrange(0,row_bitstrings.shape[1]):
bitstring = rb[ix_row:ix_row+2,ix_col]
x = ix_row / 2
y = ix_col
sample = quant_cell[y,x:x+gi.WINDOW_SIZE].copy()
vals = bitstring_vals(bitstring)
sample = np.unique(sample)
sample.sort()
vals.sort()
if(not np.array_equal(sample, vals)):
if(raise_exception):
raise ValueError("Row bitstring failure at x,y: {0:d},{1:d}".format(x,y))
else:
return False
return True
def agg_bitstrings(bitstring_arr):
if(len(bitstring_arr.shape) > 2):
bitstring_arr = bitstring_arr.transpose(1,0,2).reshape((8,-1))
agg = np.array([0,0,0,0,0,0,0,0],dtype=np.uint32)
for bitstring in bitstring_arr:
agg |= bitstring
return agg
def extract_window_bitstrings(row_bitstrings):
bitstrings = np.zeros_like(row_bitstrings)
for ix_row in xrange(0,row_bitstrings.shape[0],2):
for ix_col in xrange(0,row_bitstrings.shape[1]-7):
chunk = row_bitstrings[ix_row:ix_row+2,ix_col:ix_col+8]
bitstring = agg_bitstrings(chunk)
bitstrings[ix_row,ix_col] = bitstring[0:4]
bitstrings[ix_row+1,ix_col] = bitstring[4:8]
return bitstrings
def check_window_bitstrings(quant_cell,window_bitstrings, raise_exception = False):
wb = window_bitstrings
for ix_row in xrange(0,window_bitstrings.shape[0]-gi.WINDOW_SIZE+1):
for ix_col in xrange(0,window_bitstrings.shape[1]-gi.WINDOW_SIZE+1):
bitstring = wb[ix_row,ix_col]
y = ix_row
x = ix_col
sample = np.unique(quant_cell[y:y+gi.WINDOW_SIZE,x:x+gi.WINDOW_SIZE])
vals = bitstring_vals(bitstring)
if(not np.array_equal(sample, vals)):
if(raise_exception):
raise ValueError("Window bitstring failure at x,y: {0:d},{1:d}".format(x,y))
else:
return False
return True
def extract_histogram(quant_cell, x, y, verbose = False, first_n_cols = None, first_n_rows = None):
region = quant_cell[x:x+gi.REGION_SIZE,y:y+gi.REGION_SIZE]
descr = np.zeros((gi.BASE_QUANT_SPACE,),dtype=np.uint16)
stop_at_col = gi.REGION_CLIP
if first_n_cols != None:
stop_at_col = first_n_cols
stop_at_row = gi.REGION_CLIP
if first_n_rows != None:
stop_at_row = first_n_rows
for ix_col in xrange(0,stop_at_col):
hist = np.zeros((gi.BASE_QUANT_SPACE,),dtype=np.int32)
for ix_row in xrange(0,gi.WINDOW_SIZE):
for ix_wincol in xrange(ix_col,ix_col + gi.WINDOW_SIZE):
hist[region[ix_row,ix_wincol]]+=1
for ix in xrange(0,gi.BASE_QUANT_SPACE):
descr[ix] += int(hist[ix] > 0)
for ix_row in xrange(gi.WINDOW_SIZE,stop_at_row + gi.WINDOW_SIZE-1):
ix_row_sub = ix_row - gi.WINDOW_SIZE
for ix_wincol in xrange(ix_col,ix_col + gi.WINDOW_SIZE):
hist[region[ix_row_sub,ix_wincol]]-=1
hist[region[ix_row,ix_wincol]]+=1
for ix in xrange(0,gi.BASE_QUANT_SPACE):
descr[ix] += int(hist[ix] > 0)
if(verbose):
print "Finished column {0:d} out of {1:d}".format(ix_col+1, stop_at_col),
sys.stdout.flush()
print "\r",
return descr
def quantize_amplitude(descriptor):
des = descriptor
norm = gi.REGION_NORM
n_total_levels = n_amplitude_levels.sum()
des_out = np.zeros(des.shape,dtype=np.uint8)
for i_bin in xrange(0,des.size):
val = float(des[i_bin]) / norm
quant_val = 0
i_quant = 0
while (i_quant+1 < amplitude_thresholds.size and val >= amplitude_thresholds[i_quant+1]):
quant_val += n_amplitude_levels[i_quant]
i_quant+=1
next_thresh = amplitude_thresholds[i_quant+1] if i_quant+1 < n_amplitude_levels.size else 1.0
val = int(quant_val +
(val - amplitude_thresholds[i_quant]) *
(n_amplitude_levels[i_quant] / (next_thresh - amplitude_thresholds[i_quant])))
if(val == n_total_levels):
val = n_total_levels - 1
des_out[i_bin] = val
return des_out
def bitstrings_to_histogram(window_bitstrings,x,y, verbose = False):
chunk = window_bitstrings[y:y+gi.REGION_CLIP,x:x+gi.REGION_CLIP];
descr = np.zeros((gi.BASE_QUANT_SPACE),dtype=np.uint16)
i_row = 0
for row in chunk:
for bitstring in row:
vals = bitstring_vals(bitstring)
for val in vals:
descr[val]+=1
if verbose:
i_row+=1
print "Finished column {0:d} out of {1:d}".format(i_row, gi.REGION_CLIP),
sys.stdout.flush()
print "\r",
return descr
def bitstring_vals(bitstring_arr):
if(len(bitstring_arr.shape) > 1):
bitstring_arr = bitstring_arr.flatten()
vals = []
for ix_uint in range(0,8):
uint = bitstring_arr[ix_uint]
addend = (ix_uint << 5)
for bit_ix in range(0,32):
if(uint & (1 << bit_ix)):
vals.append(addend + bit_ix)
return np.uint8(vals)
def tune_group_size(stmt):
for x in xrange(0,9):
y = 0
while((2**x)*(2**y) <= 512):
size_x = 2**x
size_y = 2**y
stmt = "evt = extr.program.zeroOutImage(mgr.queue,extr.bitstring_buffer.shape,({0:d},{1:d})"+\
",extr.bitstring_buffer); cl.wait_for_events([evt])".format(size_x,size_y)
timeit.timeit(stmt)
y+=1
def hist_bin(raster):
log_area = math.log(float(raster.size),2)
scale_power = max(int(0.5 * log_area - 8 + 0.5),0)
subsample = 1 << scale_power
window_width = 8 * subsample
window_height = 8 * subsample
mod_width = raster.shape[1] - (window_width - 1)
mod_height = raster.shape[0] - (window_height - 1)
hist = np.zeros((256),dtype=np.uint64)
descr = np.zeros((256),dtype=np.uint64)
for col in xrange(0,mod_width,subsample):
hist[:] = 0
stop_at = col + window_width
for row in xrange(0,window_height,subsample):
for loc in xrange(col,stop_at,subsample):
val = raster[row,loc]
hist[val]+=1
for ix in xrange(0,len(hist)):
if(hist[ix]):
descr[ix] +=1
for row in xrange(subsample,mod_height,subsample):
del_row = row - subsample
add_row = row + window_height - subsample
for loc in xrange(col,stop_at,subsample):
del_val = raster[del_row,loc]
add_val = raster[add_row,loc]
hist[del_val]-=1
hist[add_val]+=1
for ix in xrange(0,len(hist)):
if(hist[ix]):
descr[ix] +=1
return descr
def quantize_HMMD(raster):
out = np.zeros((raster.shape[0],raster.shape[1]), dtype = np.uint8)
N = 3
for y in xrange(raster.shape[0]):
for x in xrange(raster.shape[1]):
(H,S,D) = raster[y,x]
iSub = 0
while(difference_thresholds[N,iSub + 1] <= D):
iSub +=1
Hindex = int((H / 360.0) * n_hue_levels[N,iSub]);
if (H == 360):
Hindex = 0
Sindex = int(math.floor((S - 0.5*difference_thresholds[N,iSub])
* n_sum_levels[N,iSub]
/ (255 - difference_thresholds[N,iSub])))
if Sindex >= n_sum_levels[N,iSub]:
Sindex = n_sum_levels[N,iSub] - 1
px = n_cum_levels[N,iSub] + Hindex*n_sum_levels[N,iSub] + Sindex
out[y,x] = px
return out | gpl-3.0 | -6,056,147,674,100,289,000 | -8,408,268,614,523,364,000 | 35.009934 | 106 | 0.529704 | false |
inwotep/lava-android-test | lava_android_test/test_definitions/install_prep_4bench.py | 2 | 1731 | # Copyright (c) 2012 Linaro
# Author: Linaro Validation Team <[email protected]>
#
# This file is part of LAVA Android Test.
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Remove the Linaro wallpaper before start the benchmark test
**URL:** None
**Default options:** None
"""
import lava_android_test.config
import lava_android_test.testdef
test_name = 'install_prep_4bench'
INSTALL_STEPS_ADB_PRE = []
ADB_SHELL_STEPS = ['rm /data/system/wallpaper_info.xml',
"echo install_prep_4bench.wallpaper: PASS"]
PATTERN = "^\s*(?P<test_case_id>[^:]+?):\s+(?P<result>(PASS|FAIL)?)\s*$"
inst = lava_android_test.testdef.AndroidTestInstaller(
steps_adb_pre=INSTALL_STEPS_ADB_PRE)
run = lava_android_test.testdef.AndroidTestRunner(
adbshell_steps=ADB_SHELL_STEPS)
parser = lava_android_test.testdef.AndroidTestParser(PATTERN)
testobj = lava_android_test.testdef.AndroidTest(testname=test_name,
installer=inst,
runner=run,
parser=parser)
| gpl-3.0 | -8,070,211,828,804,615,000 | 9,100,707,455,780,898,000 | 36.630435 | 72 | 0.667822 | false |
ravenland/ycmWinRepo | python/ycm/client/command_request.py | 10 | 3213 | #!/usr/bin/env python
#
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import vim
from ycm.client.base_request import BaseRequest, BuildRequestData, ServerError
from ycm import vimsupport
from ycmd.utils import ToUtf8IfNeeded
def _EnsureBackwardsCompatibility( arguments ):
if arguments and arguments[ 0 ] == 'GoToDefinitionElseDeclaration':
arguments[ 0 ] = 'GoTo'
return arguments
class CommandRequest( BaseRequest ):
def __init__( self, arguments, completer_target = None ):
super( CommandRequest, self ).__init__()
self._arguments = _EnsureBackwardsCompatibility( arguments )
self._completer_target = ( completer_target if completer_target
else 'filetype_default' )
self._is_goto_command = (
self._arguments and self._arguments[ 0 ].startswith( 'GoTo' ) )
self._response = None
def Start( self ):
request_data = BuildRequestData()
request_data.update( {
'completer_target': self._completer_target,
'command_arguments': self._arguments
} )
try:
self._response = self.PostDataToHandler( request_data,
'run_completer_command' )
except ServerError as e:
vimsupport.PostVimMessage( e )
def Response( self ):
return self._response
def RunPostCommandActionsIfNeeded( self ):
if not self._is_goto_command or not self.Done() or not self._response:
return
if isinstance( self._response, list ):
defs = [ _BuildQfListItem( x ) for x in self._response ]
vim.eval( 'setqflist( %s )' % repr( defs ) )
vim.eval( 'youcompleteme#OpenGoToList()' )
else:
vimsupport.JumpToLocation( self._response[ 'filepath' ],
self._response[ 'line_num' ],
self._response[ 'column_num' ] )
def SendCommandRequest( arguments, completer ):
request = CommandRequest( arguments, completer )
# This is a blocking call.
request.Start()
request.RunPostCommandActionsIfNeeded()
return request.Response()
def _BuildQfListItem( goto_data_item ):
qf_item = {}
if 'filepath' in goto_data_item:
qf_item[ 'filename' ] = ToUtf8IfNeeded( goto_data_item[ 'filepath' ] )
if 'description' in goto_data_item:
qf_item[ 'text' ] = ToUtf8IfNeeded( goto_data_item[ 'description' ] )
if 'line_num' in goto_data_item:
qf_item[ 'lnum' ] = goto_data_item[ 'line_num' ]
if 'column_num' in goto_data_item:
qf_item[ 'col' ] = goto_data_item[ 'column_num' ] - 1
return qf_item
| gpl-3.0 | 5,661,661,645,976,253,000 | 5,354,259,670,500,795,000 | 33.548387 | 78 | 0.666978 | false |
bennojoy/ansible | test/units/errors/test_errors.py | 170 | 3108 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
from ansible.errors import AnsibleError
from ansible.compat.tests import BUILTINS
from ansible.compat.tests.mock import mock_open, patch
class TestErrors(unittest.TestCase):
def setUp(self):
self.message = 'This is the error message'
self.obj = AnsibleBaseYAMLObject()
def tearDown(self):
pass
def test_basic_error(self):
e = AnsibleError(self.message)
self.assertEqual(e.message, 'ERROR! ' + self.message)
self.assertEqual(e.__repr__(), 'ERROR! ' + self.message)
@patch.object(AnsibleError, '_get_error_lines_from_file')
def test_error_with_object(self, mock_method):
self.obj.ansible_pos = ('foo.yml', 1, 1)
mock_method.return_value = ('this is line 1\n', '')
e = AnsibleError(self.message, self.obj)
self.assertEqual(e.message, "ERROR! This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n")
def test_get_error_lines_from_file(self):
m = mock_open()
m.return_value.readlines.return_value = ['this is line 1\n']
with patch('{0}.open'.format(BUILTINS), m):
# this line will be found in the file
self.obj.ansible_pos = ('foo.yml', 1, 1)
e = AnsibleError(self.message, self.obj)
self.assertEqual(e.message, "ERROR! This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n")
# this line will not be found, as it is out of the index range
self.obj.ansible_pos = ('foo.yml', 2, 1)
e = AnsibleError(self.message, self.obj)
self.assertEqual(e.message, "ERROR! This is the error message\n\nThe error appears to have been in 'foo.yml': line 2, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\n(specified line no longer in file, maybe it changed?)")
| gpl-3.0 | -7,253,102,896,623,593,000 | 471,503,877,356,834,000 | 44.705882 | 281 | 0.692728 | false |
bpsinc-native/src_third_party_scons-2.0.1 | engine/SCons/Scanner/Fortran.py | 61 | 14347 | """SCons.Scanner.Fortran
This module implements the dependency scanner for Fortran code.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Scanner/Fortran.py 5134 2010/08/16 23:02:40 bdeegan"
import re
import SCons.Node
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
import SCons.Warnings
class F90Scanner(SCons.Scanner.Classic):
"""
A Classic Scanner subclass for Fortran source files which takes
into account both USE and INCLUDE statements. This scanner will
work for both F77 and F90 (and beyond) compilers.
Currently, this scanner assumes that the include files do not contain
USE statements. To enable the ability to deal with USE statements
in include files, add logic right after the module names are found
to loop over each include file, search for and locate each USE
statement, and append each module name to the list of dependencies.
Caching the search results in a common dictionary somewhere so that
the same include file is not searched multiple times would be a
smart thing to do.
"""
def __init__(self, name, suffixes, path_variable,
use_regex, incl_regex, def_regex, *args, **kw):
self.cre_use = re.compile(use_regex, re.M)
self.cre_incl = re.compile(incl_regex, re.M)
self.cre_def = re.compile(def_regex, re.M)
def _scan(node, env, path, self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, env, path)
kw['function'] = _scan
kw['path_function'] = SCons.Scanner.FindPathDirs(path_variable)
kw['recursive'] = 1
kw['skeys'] = suffixes
kw['name'] = name
SCons.Scanner.Current.__init__(self, *args, **kw)
def scan(self, node, env, path=()):
# cache the includes list in node so we only scan it once:
if node.includes != None:
mods_and_includes = node.includes
else:
# retrieve all included filenames
includes = self.cre_incl.findall(node.get_text_contents())
# retrieve all USE'd module names
modules = self.cre_use.findall(node.get_text_contents())
# retrieve all defined module names
defmodules = self.cre_def.findall(node.get_text_contents())
# Remove all USE'd module names that are defined in the same file
# (case-insensitively)
d = {}
for m in defmodules:
d[m.lower()] = 1
modules = [m for m in modules if m.lower() not in d]
# Convert module name to a .mod filename
suffix = env.subst('$FORTRANMODSUFFIX')
modules = [x.lower() + suffix for x in modules]
# Remove unique items from the list
mods_and_includes = SCons.Util.unique(includes+modules)
node.includes = mods_and_includes
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the USE or INCLUDE line, which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if callable(path):
path = path()
for dep in mods_and_includes:
n, i = self.find_include(dep, source_dir, path)
if n is None:
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (referenced by: %s) -- file not found" % (i, node))
else:
sortkey = self.sort_key(dep)
nodes.append((sortkey, n))
return [pair[1] for pair in sorted(nodes)]
def FortranScan(path_variable="FORTRANPATH"):
"""Return a prototype Scanner instance for scanning source files
for Fortran USE & INCLUDE statements"""
# The USE statement regex matches the following:
#
# USE module_name
# USE :: module_name
# USE, INTRINSIC :: module_name
# USE, NON_INTRINSIC :: module_name
#
# Limitations
#
# -- While the regex can handle multiple USE statements on one line,
# it cannot properly handle them if they are commented out.
# In either of the following cases:
#
# ! USE mod_a ; USE mod_b [entire line is commented out]
# USE mod_a ! ; USE mod_b [in-line comment of second USE statement]
#
# the second module name (mod_b) will be picked up as a dependency
# even though it should be ignored. The only way I can see
# to rectify this would be to modify the scanner to eliminate
# the call to re.findall, read in the contents of the file,
# treating the comment character as an end-of-line character
# in addition to the normal linefeed, loop over each line,
# weeding out the comments, and looking for the USE statements.
# One advantage to this is that the regex passed to the scanner
# would no longer need to match a semicolon.
#
# -- I question whether or not we need to detect dependencies to
# INTRINSIC modules because these are built-in to the compiler.
# If we consider them a dependency, will SCons look for them, not
# find them, and kill the build? Or will we there be standard
# compiler-specific directories we will need to point to so the
# compiler and SCons can locate the proper object and mod files?
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^ : start of line
# (?: : group a collection of regex symbols without saving the match as a "group"
# ^|; : matches either the start of the line or a semicolon - semicolon
# ) : end the unsaved grouping
# \s* : any amount of white space
# USE : match the string USE, case insensitive
# (?: : group a collection of regex symbols without saving the match as a "group"
# \s+| : match one or more whitespace OR .... (the next entire grouped set of regex symbols)
# (?: : group a collection of regex symbols without saving the match as a "group"
# (?: : establish another unsaved grouping of regex symbols
# \s* : any amount of white space
# , : match a comma
# \s* : any amount of white space
# (?:NON_)? : optionally match the prefix NON_, case insensitive
# INTRINSIC : match the string INTRINSIC, case insensitive
# )? : optionally match the ", INTRINSIC/NON_INTRINSIC" grouped expression
# \s* : any amount of white space
# :: : match a double colon that must appear after the INTRINSIC/NON_INTRINSIC attribute
# ) : end the unsaved grouping
# ) : end the unsaved grouping
# \s* : match any amount of white space
# (\w+) : match the module name that is being USE'd
#
#
use_regex = "(?i)(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"
# The INCLUDE statement regex matches the following:
#
# INCLUDE 'some_Text'
# INCLUDE "some_Text"
# INCLUDE "some_Text" ; INCLUDE "some_Text"
# INCLUDE kind_"some_Text"
# INCLUDE kind_'some_Text"
#
# where some_Text can include any alphanumeric and/or special character
# as defined by the Fortran 2003 standard.
#
# Limitations:
#
# -- The Fortran standard dictates that a " or ' in the INCLUDE'd
# string must be represented as a "" or '', if the quotes that wrap
# the entire string are either a ' or ", respectively. While the
# regular expression below can detect the ' or " characters just fine,
# the scanning logic, presently is unable to detect them and reduce
# them to a single instance. This probably isn't an issue since,
# in practice, ' or " are not generally used in filenames.
#
# -- This regex will not properly deal with multiple INCLUDE statements
# when the entire line has been commented out, ala
#
# ! INCLUDE 'some_file' ; INCLUDE 'some_file'
#
# In such cases, it will properly ignore the first INCLUDE file,
# but will actually still pick up the second. Interestingly enough,
# the regex will properly deal with these cases:
#
# INCLUDE 'some_file'
# INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# To get around the above limitation, the FORTRAN programmer could
# simply comment each INCLUDE statement separately, like this
#
# ! INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# The way I see it, the only way to get around this limitation would
# be to modify the scanning logic to replace the calls to re.findall
# with a custom loop that processes each line separately, throwing
# away fully commented out lines before attempting to match against
# the INCLUDE syntax.
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# (?: : begin a non-saving group that matches the following:
# ^ : either the start of the line
# | : or
# ['">]\s*; : a semicolon that follows a single quote,
# double quote or greater than symbol (with any
# amount of whitespace in between). This will
# allow the regex to match multiple INCLUDE
# statements per line (although it also requires
# the positive lookahead assertion that is
# used below). It will even properly deal with
# (i.e. ignore) cases in which the additional
# INCLUDES are part of an in-line comment, ala
# " INCLUDE 'someFile' ! ; INCLUDE 'someFile2' "
# ) : end of non-saving group
# \s* : any amount of white space
# INCLUDE : match the string INCLUDE, case insensitive
# \s+ : match one or more white space characters
# (?\w+_)? : match the optional "kind-param _" prefix allowed by the standard
# [<"'] : match the include delimiter - an apostrophe, double quote, or less than symbol
# (.+?) : match one or more characters that make up
# the included path and file name and save it
# in a group. The Fortran standard allows for
# any non-control character to be used. The dot
# operator will pick up any character, including
# control codes, but I can't conceive of anyone
# putting control codes in their file names.
# The question mark indicates it is non-greedy so
# that regex will match only up to the next quote,
# double quote, or greater than symbol
# (?=["'>]) : positive lookahead assertion to match the include
# delimiter - an apostrophe, double quote, or
# greater than symbol. This level of complexity
# is required so that the include delimiter is
# not consumed by the match, thus allowing the
# sub-regex discussed above to uniquely match a
# set of semicolon-separated INCLUDE statements
# (as allowed by the F2003 standard)
include_regex = """(?i)(?:^|['">]\s*;)\s*INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
# The MODULE statement regex finds module definitions by matching
# the following:
#
# MODULE module_name
#
# but *not* the following:
#
# MODULE PROCEDURE procedure_name
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^\s* : any amount of white space
# MODULE : match the string MODULE, case insensitive
# \s+ : match one or more white space characters
# (?!PROCEDURE) : but *don't* match if the next word matches
# PROCEDURE (negative lookahead assertion),
# case insensitive
# (\w+) : match one or more alphanumeric characters
# that make up the defined module name and
# save it in a group
def_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(\w+)"""
scanner = F90Scanner("FortranScan",
"$FORTRANSUFFIXES",
path_variable,
use_regex,
include_regex,
def_regex)
return scanner
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | -2,803,407,039,446,795,300 | 998,321,060,039,338,000 | 44.401899 | 125 | 0.599777 | false |
xcyan/models | domain_adaptation/pixel_domain_adaptation/baselines/baseline_train.py | 10 | 5624 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Trains the classification/pose baselines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
# Dependency imports
import tensorflow as tf
from domain_adaptation.datasets import dataset_factory
from domain_adaptation.pixel_domain_adaptation import pixelda_preprocess
from domain_adaptation.pixel_domain_adaptation import pixelda_task_towers
flags = tf.app.flags
FLAGS = flags.FLAGS
slim = tf.contrib.slim
flags.DEFINE_string('master', '', 'BNS name of the tensorflow server')
flags.DEFINE_integer('task', 0, 'The task ID.')
flags.DEFINE_integer('num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then '
'the parameters are handled locally by the worker.')
flags.DEFINE_integer('batch_size', 32, 'The number of samples per batch.')
flags.DEFINE_string('dataset_name', None, 'The name of the dataset.')
flags.DEFINE_string('dataset_dir', None,
'The directory where the data is stored.')
flags.DEFINE_string('split_name', None, 'The name of the train/test split.')
flags.DEFINE_float('learning_rate', 0.001, 'The initial learning rate.')
flags.DEFINE_integer(
'learning_rate_decay_steps', 20000,
'The frequency, in steps, at which the learning rate is decayed.')
flags.DEFINE_float('learning_rate_decay_factor',
0.95,
'The factor with which the learning rate is decayed.')
flags.DEFINE_float('adam_beta1', 0.5, 'The beta1 value for the AdamOptimizer')
flags.DEFINE_float('weight_decay', 1e-5,
'The L2 coefficient on the model weights.')
flags.DEFINE_string(
'logdir', None, 'The location of the logs and checkpoints.')
flags.DEFINE_integer('save_interval_secs', 600,
'How often, in seconds, we save the model to disk.')
flags.DEFINE_integer('save_summaries_secs', 600,
'How often, in seconds, we compute the summaries.')
flags.DEFINE_integer(
'num_readers', 4,
'The number of parallel readers that read data from the dataset.')
flags.DEFINE_float(
'moving_average_decay', 0.9999,
'The amount of decay to use for moving averages.')
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
hparams = tf.contrib.training.HParams()
hparams.weight_decay_task_classifier = FLAGS.weight_decay
if FLAGS.dataset_name in ['mnist', 'mnist_m', 'usps']:
hparams.task_tower = 'mnist'
else:
raise ValueError('Unknown dataset %s' % FLAGS.dataset_name)
with tf.Graph().as_default():
with tf.device(
tf.train.replica_device_setter(FLAGS.num_ps_tasks, merge_devices=True)):
dataset = dataset_factory.get_dataset(FLAGS.dataset_name,
FLAGS.split_name, FLAGS.dataset_dir)
num_classes = dataset.num_classes
preprocess_fn = partial(pixelda_preprocess.preprocess_classification,
is_training=True)
images, labels = dataset_factory.provide_batch(
FLAGS.dataset_name,
FLAGS.split_name,
dataset_dir=FLAGS.dataset_dir,
num_readers=FLAGS.num_readers,
batch_size=FLAGS.batch_size,
num_preprocessing_threads=FLAGS.num_readers)
# preprocess_fn=preprocess_fn)
# Define the model
logits, _ = pixelda_task_towers.add_task_specific_model(
images, hparams, num_classes=num_classes, is_training=True)
# Define the losses
if 'classes' in labels:
one_hot_labels = labels['classes']
loss = tf.losses.softmax_cross_entropy(
onehot_labels=one_hot_labels, logits=logits)
tf.summary.scalar('losses/Classification_Loss', loss)
else:
raise ValueError('Only support classification for now.')
total_loss = tf.losses.get_total_loss()
tf.summary.scalar('losses/Total_Loss', total_loss)
# Setup the moving averages
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, slim.get_or_create_global_step())
tf.add_to_collection(
tf.GraphKeys.UPDATE_OPS,
variable_averages.apply(moving_average_variables))
# Specify the optimization scheme:
learning_rate = tf.train.exponential_decay(
FLAGS.learning_rate,
slim.get_or_create_global_step(),
FLAGS.learning_rate_decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate, beta1=FLAGS.adam_beta1)
train_op = slim.learning.create_train_op(total_loss, optimizer)
slim.learning.train(
train_op,
FLAGS.logdir,
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 | -7,380,751,866,454,453,000 | 7,146,819,407,552,623 | 33.931677 | 80 | 0.670341 | false |
michaelBenin/django-oscar | oscar/apps/catalogue/migrations/0006_auto__add_field_product_is_discountable.py | 18 | 12901 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Product.is_discountable'
db.add_column('catalogue_product', 'is_discountable', self.gf('django.db.models.fields.BooleanField')(default=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Product.is_discountable'
db.delete_column('catalogue_product', 'is_discountable')
models = {
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'})
},
'catalogue.contributor': {
'Meta': {'object_name': 'Contributor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'})
},
'catalogue.contributorrole': {
'Meta': {'object_name': 'ContributorRole'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name_plural': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'catalogue.productcontributor': {
'Meta': {'object_name': 'ProductContributor'},
'contributor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Contributor']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ContributorRole']", 'null': 'True', 'blank': 'True'})
},
'catalogue.productimage': {
'Meta': {'ordering': "['display_order']", 'unique_together': "(('product', 'display_order'),)", 'object_name': 'ProductImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['catalogue.Product']"})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
}
}
complete_apps = ['catalogue']
| bsd-3-clause | 2,747,448,799,633,360,000 | 2,546,885,987,570,185,700 | 78.635802 | 222 | 0.562747 | false |
ovnicraft/odoo | addons/crm/crm_phonecall.py | 255 | 14844 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm
from datetime import datetime
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
class crm_phonecall(osv.osv):
""" Model for CRM phonecalls """
_name = "crm.phonecall"
_description = "Phonecall"
_order = "id desc"
_inherit = ['mail.thread']
_columns = {
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'create_date': fields.datetime('Creation Date' , readonly=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.'),
'user_id': fields.many2one('res.users', 'Responsible'),
'partner_id': fields.many2one('res.partner', 'Contact'),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Description'),
'state': fields.selection(
[('open', 'Confirmed'),
('cancel', 'Cancelled'),
('pending', 'Pending'),
('done', 'Held')
], string='Status', readonly=True, track_visibility='onchange',
help='The status is set to Confirmed, when a case is created.\n'
'When the call is over, the status is set to Held.\n'
'If the callis not applicable anymore, the status can be set to Cancelled.'),
'email_from': fields.char('Email', size=128, help="These people will receive email."),
'date_open': fields.datetime('Opened', readonly=True),
# phonecall fields
'name': fields.char('Call Summary', required=True),
'active': fields.boolean('Active', required=False),
'duration': fields.float('Duration', help='Duration in minutes and seconds.'),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="['|',('section_id','=',section_id),('section_id','=',False),\
('object_id.model', '=', 'crm.phonecall')]"),
'partner_phone': fields.char('Phone'),
'partner_mobile': fields.char('Mobile'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'date_closed': fields.datetime('Closed', readonly=True),
'date': fields.datetime('Date'),
'opportunity_id': fields.many2one ('crm.lead', 'Lead/Opportunity'),
}
def _get_default_state(self, cr, uid, context=None):
if context and context.get('default_state'):
return context.get('default_state')
return 'open'
_defaults = {
'date': fields.datetime.now,
'priority': '1',
'state': _get_default_state,
'user_id': lambda self, cr, uid, ctx: uid,
'active': 1
}
def on_change_partner_id(self, cr, uid, ids, partner_id, context=None):
values = {}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
values = {
'partner_phone': partner.phone,
'partner_mobile': partner.mobile,
}
return {'value': values}
def write(self, cr, uid, ids, values, context=None):
""" Override to add case management: open/close dates """
if values.get('state'):
if values.get('state') == 'done':
values['date_closed'] = fields.datetime.now()
self.compute_duration(cr, uid, ids, context=context)
elif values.get('state') == 'open':
values['date_open'] = fields.datetime.now()
values['duration'] = 0.0
return super(crm_phonecall, self).write(cr, uid, ids, values, context=context)
def compute_duration(self, cr, uid, ids, context=None):
for phonecall in self.browse(cr, uid, ids, context=context):
if phonecall.duration <= 0:
duration = datetime.now() - datetime.strptime(phonecall.date, DEFAULT_SERVER_DATETIME_FORMAT)
values = {'duration': duration.seconds/float(60)}
self.write(cr, uid, [phonecall.id], values, context=context)
return True
def schedule_another_phonecall(self, cr, uid, ids, schedule_time, call_summary, \
user_id=False, section_id=False, categ_id=False, action='schedule', context=None):
"""
action :('schedule','Schedule a call'), ('log','Log a call')
"""
model_data = self.pool.get('ir.model.data')
phonecall_dict = {}
if not categ_id:
try:
res_id = model_data._get_id(cr, uid, 'crm', 'categ_phone2')
categ_id = model_data.browse(cr, uid, res_id, context=context).res_id
except ValueError:
pass
for call in self.browse(cr, uid, ids, context=context):
if not section_id:
section_id = call.section_id and call.section_id.id or False
if not user_id:
user_id = call.user_id and call.user_id.id or False
if not schedule_time:
schedule_time = call.date
vals = {
'name' : call_summary,
'user_id' : user_id or False,
'categ_id' : categ_id or False,
'description' : call.description or False,
'date' : schedule_time,
'section_id' : section_id or False,
'partner_id': call.partner_id and call.partner_id.id or False,
'partner_phone' : call.partner_phone,
'partner_mobile' : call.partner_mobile,
'priority': call.priority,
'opportunity_id': call.opportunity_id and call.opportunity_id.id or False,
}
new_id = self.create(cr, uid, vals, context=context)
if action == 'log':
self.write(cr, uid, [new_id], {'state': 'done'}, context=context)
phonecall_dict[call.id] = new_id
return phonecall_dict
def _call_create_partner(self, cr, uid, phonecall, context=None):
partner = self.pool.get('res.partner')
partner_id = partner.create(cr, uid, {
'name': phonecall.name,
'user_id': phonecall.user_id.id,
'comment': phonecall.description,
'address': []
})
return partner_id
def on_change_opportunity(self, cr, uid, ids, opportunity_id, context=None):
values = {}
if opportunity_id:
opportunity = self.pool.get('crm.lead').browse(cr, uid, opportunity_id, context=context)
values = {
'section_id' : opportunity.section_id and opportunity.section_id.id or False,
'partner_phone' : opportunity.phone,
'partner_mobile' : opportunity.mobile,
'partner_id' : opportunity.partner_id and opportunity.partner_id.id or False,
}
return {'value' : values}
def _call_set_partner(self, cr, uid, ids, partner_id, context=None):
write_res = self.write(cr, uid, ids, {'partner_id' : partner_id}, context=context)
self._call_set_partner_send_note(cr, uid, ids, context)
return write_res
def _call_create_partner_address(self, cr, uid, phonecall, partner_id, context=None):
address = self.pool.get('res.partner')
return address.create(cr, uid, {
'parent_id': partner_id,
'name': phonecall.name,
'phone': phonecall.partner_phone,
})
def handle_partner_assignation(self, cr, uid, ids, action='create', partner_id=False, context=None):
"""
Handle partner assignation during a lead conversion.
if action is 'create', create new partner with contact and assign lead to new partner_id.
otherwise assign lead to specified partner_id
:param list ids: phonecalls ids to process
:param string action: what has to be done regarding partners (create it, assign an existing one, or nothing)
:param int partner_id: partner to assign if any
:return dict: dictionary organized as followed: {lead_id: partner_assigned_id}
"""
#TODO this is a duplication of the handle_partner_assignation method of crm_lead
partner_ids = {}
# If a partner_id is given, force this partner for all elements
force_partner_id = partner_id
for call in self.browse(cr, uid, ids, context=context):
# If the action is set to 'create' and no partner_id is set, create a new one
if action == 'create':
partner_id = force_partner_id or self._call_create_partner(cr, uid, call, context=context)
self._call_create_partner_address(cr, uid, call, partner_id, context=context)
self._call_set_partner(cr, uid, [call.id], partner_id, context=context)
partner_ids[call.id] = partner_id
return partner_ids
def redirect_phonecall_view(self, cr, uid, phonecall_id, context=None):
model_data = self.pool.get('ir.model.data')
# Select the view
tree_view = model_data.get_object_reference(cr, uid, 'crm', 'crm_case_phone_tree_view')
form_view = model_data.get_object_reference(cr, uid, 'crm', 'crm_case_phone_form_view')
search_view = model_data.get_object_reference(cr, uid, 'crm', 'view_crm_case_phonecalls_filter')
value = {
'name': _('Phone Call'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'crm.phonecall',
'res_id' : int(phonecall_id),
'views': [(form_view and form_view[1] or False, 'form'), (tree_view and tree_view[1] or False, 'tree'), (False, 'calendar')],
'type': 'ir.actions.act_window',
'search_view_id': search_view and search_view[1] or False,
}
return value
def convert_opportunity(self, cr, uid, ids, opportunity_summary=False, partner_id=False, planned_revenue=0.0, probability=0.0, context=None):
partner = self.pool.get('res.partner')
opportunity = self.pool.get('crm.lead')
opportunity_dict = {}
default_contact = False
for call in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = call.partner_id and call.partner_id.id or False
if partner_id:
address_id = partner.address_get(cr, uid, [partner_id])['default']
if address_id:
default_contact = partner.browse(cr, uid, address_id, context=context)
opportunity_id = opportunity.create(cr, uid, {
'name': opportunity_summary or call.name,
'planned_revenue': planned_revenue,
'probability': probability,
'partner_id': partner_id or False,
'mobile': default_contact and default_contact.mobile,
'section_id': call.section_id and call.section_id.id or False,
'description': call.description or False,
'priority': call.priority,
'type': 'opportunity',
'phone': call.partner_phone or False,
'email_from': default_contact and default_contact.email,
})
vals = {
'partner_id': partner_id,
'opportunity_id': opportunity_id,
'state': 'done',
}
self.write(cr, uid, [call.id], vals, context=context)
opportunity_dict[call.id] = opportunity_id
return opportunity_dict
def action_make_meeting(self, cr, uid, ids, context=None):
"""
Open meeting's calendar view to schedule a meeting on current phonecall.
:return dict: dictionary value for created meeting view
"""
partner_ids = []
phonecall = self.browse(cr, uid, ids[0], context)
if phonecall.partner_id and phonecall.partner_id.email:
partner_ids.append(phonecall.partner_id.id)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'default_phonecall_id': phonecall.id,
'default_partner_ids': partner_ids,
'default_user_id': uid,
'default_email_from': phonecall.email_from,
'default_name': phonecall.name,
}
return res
def action_button_convert2opportunity(self, cr, uid, ids, context=None):
"""
Convert a phonecall into an opp and then redirect to the opp view.
:param list ids: list of calls ids to convert (typically contains a single id)
:return dict: containing view information
"""
if len(ids) != 1:
raise osv.except_osv(_('Warning!'),_('It\'s only possible to convert one phonecall at a time.'))
opportunity_dict = self.convert_opportunity(cr, uid, ids, context=context)
return self.pool.get('crm.lead').redirect_opportunity_view(cr, uid, opportunity_dict[ids[0]], context)
# ----------------------------------------
# OpenChatter
# ----------------------------------------
def _call_set_partner_send_note(self, cr, uid, ids, context=None):
return self.message_post(cr, uid, ids, body=_("Partner has been <b>created</b>."), context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,266,285,847,501,864,000 | 3,064,167,102,674,276,000 | 47.990099 | 145 | 0.568243 | false |
AltSchool/django | django/conf/locale/en/formats.py | 1007 | 1815 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause | 3,083,053,286,319,915,500 | 4,869,941,424,852,685,000 | 41.209302 | 81 | 0.516253 | false |