repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
giuliov/ansible | refs/heads/devel | lib/ansible/plugins/inventory/directory.py | 93 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from . aggregate import InventoryAggregateParser
class InventoryDirectoryParser(InventoryAggregateParser):
CONDITION="is_dir(%s)"
def __init__(self, inven_directory):
directory = inven_directory
names = os.listdir(inven_directory)
filtered_names = []
# Clean up the list of filenames
for filename in names:
# Skip files that end with certain extensions or characters
if any(filename.endswith(ext) for ext in C.DEFAULT_INVENTORY_IGNORE):
continue
# Skip hidden files
if filename.startswith('.') and not filename.startswith('.{0}'.format(os.path.sep)):
continue
# These are things inside of an inventory basedir
if filename in ("host_vars", "group_vars", "vars_plugins"):
continue
fullpath = os.path.join(directory, filename)
new_names.append(fullpath)
super(InventoryDirectoryParser, self).__init__(new_names)
def parse(self):
return super(InventoryDirectoryParser, self).parse()
|
debianitram/w2p-acreditacion | refs/heads/master | languages/my-mm.py | 85 | # -*- coding: utf-8 -*-
{
'!langcode!': 'my-mm',
'!langname!': 'မြန်မာ',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%s %%{row} deleted': '%s %%{row} ဖျက်ပြီးပြီ',
'%s %%{row} updated': '%s %%{row} ပြင်ပြီးပြီ',
'%s selected': '%s ခု ရွေးထားသည်',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(requires internet access, experimental)': '(requires internet access, experimental)',
'(something like "it-it")': '(something like "it-it")',
'@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'About': 'အကြောင်း',
'Access Control': 'အသုံးပြု ခြင်းဆိုင်ရာ ထိန်းချုပ်ရန်',
'Additional code for your application': 'Additional code for your application',
'Admin language': 'Admin language',
'administrative interface': 'administrative interface',
'Administrative Interface': 'စီမံခန့်ခွဲရာ အင်တာဖေ့စ်',
'Administrator Password:': 'Administrator Password:',
'Ajax Recipes': 'Ajax Recipes',
'and rename it:': 'and rename it:',
'appadmin is disabled because insecure channel': 'စိတ်မချရသော လမ်းကြောင်းမှ ဝင်ရောက်သဖြင့် appadmin ကို အသုံးပြု၍ မရပါ',
'Application name:': 'Application name:',
'are not used': 'အသုံးမပြုပါ',
'are not used yet': 'အသုံးမပြုသေးပါ',
'Are you sure you want to delete this object?': 'သင် ဒီအရာ ဖျက်ရန် သေချာပါသလား။',
'Available Databases and Tables': 'အသုံးပြုနိုင်သော ဒေတာဘေစ့်များနှင့် ဇယားများ',
'Buy this book': 'ဒီစာအုပ်ကို ဝယ်ပါ',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'can be a git repo': 'can be a git repo',
'Cannot be empty': 'အလွတ် မဖြစ်ရပါ',
'Change admin password': 'Change admin password',
'Check to delete': 'ဖျက်ရန် စစ်ဆေးပါ',
'Checking for upgrades...': 'အဆင့်မြှင့်တင်မှုများအတွက် စစ်ဆေးနေသည် ...',
'Clean': 'ရှင်းလင်းရန်',
'Clear CACHE?': 'CACHE ကို ရှင်းလင်းမည်မှာ ဟုတ်ပါသလား။',
'Clear DISK': 'DISK ကို ရှင်းလင်းမည်။',
'Clear RAM': 'RAM ကို ရှင်းလင်းမည်။',
'Client IP': 'Client IP',
'collapse/expand all': 'collapse/expand all',
'Community': 'အသိုင်းအဝိုင်း',
'Compile': 'Compile',
'Components and Plugins': 'Components and Plugins',
'Controller': 'ကွန်ထရိုလာ',
'Controllers': 'ကွန်ထရိုလာများ',
'controllers': 'controllers',
'Copyright': 'မူပိုင်ခွင့်',
'Create': 'ဖန်တီးရန်',
'create file with filename:': 'create file with filename:',
'Create/Upload': 'Create/Upload',
'created by': 'ဖန်းတီးသူ',
'Created By': 'ပြုလုပ်ဖန်တီးသူ',
'Created On': 'ပြုလုပ်ဖန်တီးသည့်အချိန်',
'crontab': 'crontab',
'Current request': 'Current request',
'Current response': 'Current response',
'Current session': 'Current session',
'currently running': 'လက်ရှိတွင် လုပ်ဆောင်နေသည်',
'data uploaded': 'data uploaded',
'Database': 'ဒေတာဘေစ့်',
'Database %s select': 'Database %s select',
'database administration': 'ဒေတာဘေ့(စ်) စီမံခန့်ခွဲခြင်း',
'Database Administration (appadmin)': 'ဒေတာဘေစ့် စီမံခန့်ခွဲခြင်း (appadmin)',
'db': 'db',
'DB Model': 'DB Model',
'Debug': 'အမှားရှာရန်',
'Delete this file (you will be asked to confirm deletion)': 'Delete this file (you will be asked to confirm deletion)',
'Delete:': 'Delete:',
'Demo': 'အစမ်း၊ သရုပ်ပြမှုများ',
'Deploy': 'Deploy',
'Deploy on Google App Engine': 'Deploy on Google App Engine',
'Deploy to OpenShift': 'Deploy to OpenShift',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'ဖော်ပြချက်',
'design': 'design',
'direction: ltr': 'direction: ltr',
'Disable': 'ပိတ်ရန်',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk ရှင်းလင်းပြီးပြီ',
'Documentation': 'စာရွက်စာတမ်း အထောက်အကူများ',
"Don't know what to do?": 'ဘာလုပ်ရမည်မသိ ဖြစ်နေပါသလား။',
'done!': 'လုပ်ငန်း ဆောင်ရွက်ပြီးပြီ!',
'Download': 'Download',
'Download layouts from repository': 'Download layouts from repository',
'Download plugins from repository': 'Download plugins from repository',
'E-mail': 'အီးမေးလ်',
'Edit': 'ပြင်ဆင်ရန်',
'Edit application': 'Application ကို ပြင်ရန်',
'Edit current record': 'လက်ရှိ မှတ်တမ်းကို ပြင်ရန်',
'Email and SMS': 'အီးမေးလ်နှင့် SMS',
'Enable': 'ဖွင့်ရန်',
'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g',
'Errors': 'အမှားများ',
'export as csv file': ' csv file အနေနဲ့ ထုတ်ပေးရန်',
'exposes': 'exposes',
'extends': 'extends',
'FAQ': 'ဖြစ်လေ့ရှိသော ပြဿနာများ',
'filter': 'filter',
'First name': 'အမည်၏ ပထမဆုံး စာလုံး',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'အခမဲ့ Applications',
'graph model': 'graph model',
'Graph Model': 'Graph Model',
'Group ID': 'Group ID',
'Groups': 'အဖွဲ့များ',
'Hello World': 'မင်္ဂလာပါ ကမ္ဘာကြီး။',
'Help': 'အကူအညီ',
'Home': 'မူလသို့',
'How did you get here?': 'သင် ဘယ်လို ရောက်လာခဲ့သလဲ။',
'import': 'သွင်းယူရန်',
'Import/Export': 'သွင်းယူရန်/ထုတ်ယူရန်',
'includes': 'includes',
'Install': 'Install',
'Installed applications': 'ထည့်သွင်းပြီး application များ',
'Internal State': 'Internal State',
'Introduction': 'မိတ်ဆက်',
'Invalid email': 'အီးမေးလ် ဖြည့်သွင်းမှုမှားနေသည်',
'Invalid Query': 'Invalid Query',
'invalid request': 'invalid request',
'Is Active': 'Is Active',
'Key': 'Key',
'Language': 'ဘာသာစကား',
'languages': 'ဘာသာစကားများ',
'Languages': 'ဘာသာစကားများ',
'Last name': 'မျိုးနွယ်အမည်',
'Layout': 'အပြင်အဆင်',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'အပြင်အဆင်များ',
'Live Chat': 'တိုက်ရိုက် ဆက်သွယ် ပြောကြားရန်',
'Login': 'ဝင်ရောက်အသုံးပြုရန်',
'Login to the Administrative Interface': 'Login to the Administrative Interface',
'Logout': 'ထွက်ရန်',
'Lost Password': 'စကားဝှက် မသိတော့ပါ',
'Lost password?': 'စကားဝှက် မသိတော့ဘူးလား။',
'Manage': 'စီမံခန့်ခွဲရန်',
'Manage %(action)s': '%(action)s ကို စီမံရန်',
'Manage Access Control': 'အသုံးပြုခြင်းဆိုင်ရာ ထိန်းချုပ်မှု စီမံခန့်ခွဲရန်',
'Manage Cache': 'Manage Cache',
'Memberships': 'အသင်းဝင်များ',
'Menu Model': 'Menu Model',
'models': 'models',
'Models': 'Models',
'Modified By': 'ပြင်ဆင်မွမ်းမံသူ',
'Modified On': 'ပြင်ဆင်မွမ်းမံသည့် အချိန်',
'Modules': 'Modules',
'modules': 'modules',
'My Sites': 'ကျွန်ုပ်၏ Site များ',
'Name': 'အမည်',
'New application wizard': 'New application wizard',
'New Record': 'မှတ်တမ်း အသစ်',
'new record inserted': 'မှတ်တမ်း အသစ် ဖြည့်သွင်းပြီးပြီ',
'New simple application': 'ရိုးရိုး application အသစ်',
'next %s rows': 'နောက်အတန်း %s တန်း',
'No databases in this application': 'ဒီ application တွင် မည်သည့် ဒေတာဘေစ့်မှ မရှိပါ',
'no package selected': 'no package selected',
'Object or table name': 'Object or table name',
'Online examples': 'အွန်လိုင်း နမူနာများ',
'or alternatively': 'or alternatively',
'Or Get from URL:': 'Or Get from URL:',
'or import from csv file': 'or import from csv file',
'Origin': 'မူလ အစ',
'Other Plugins': 'အခြား Plugins',
'Other Recipes': 'အခြား Recipes',
'Overview': 'အပေါ်ယံရှုမြင်ခြင်း',
'Overwrite installed app': 'Overwrite installed app',
'Pack all': 'အားလုံးကို ထုပ်ပိုးရန်',
'Pack custom': 'ရွေးချယ်ထုပ်ပိုးရန်',
'Password': 'စကားဝှက်',
"Password fields don't match": 'စကားဝှက်များ ကိုက်ညီမှု မရှိပါ',
'Permission': 'ခွင့်ပြုချက်',
'Permissions': 'ခွင့်ပြုချက်များ',
'please input your password again': 'ကျေးဇူးပြု၍ စကားဝှက်ကို ထပ်မံ ဖြည့်သွင်းပေးပါ',
'Plugins': 'Plugins',
'plugins': 'plugins',
'Plural-Forms:': 'Plural-Forms:',
'Powered by': 'အားဖြည့်စွမ်းအားပေးသူ',
'Preface': 'နိဒါန်း',
'previous %s rows': 'previous %s rows',
'Private files': 'Private files',
'private files': 'private files',
'pygraphviz library not found': 'pygraphviz library ကို မတွေ့ပါ',
'Python': 'Python',
'Query:': 'Query:',
'Quick Examples': 'အမြန် အသုံးပြုနိုင်သော နမူနာများ',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram ရှင်းလင်းပြီးပြီ',
'Recipes': 'Recipes',
'Record': 'မှတ်တမ်း',
'record does not exist': 'မှတ်တမ်း မရှိပါ',
'Record ID': 'Record ID',
'Record id': 'Record id',
'Register': 'မှတ်ပုံတင်ရန်',
'Registration identifier': 'Registration identifier',
'Registration key': 'Registration key',
'Reload routes': 'Reload routes',
'Remember me (for 30 days)': 'Remember me (for 30 days)',
'Request reset password': 'စကားဝှက် အသစ် တောင်းဆိုရန်',
'Reset Password key': 'Reset Password key',
'Role': 'Role',
'Roles': 'Roles',
'Rows in Table': 'Rows in Table',
'Rows selected': 'ရွေးထားသော အတန်းများ',
"Run tests in this file (to run all files, you may also use the button labelled 'test')": "Run tests in this file (to run all files, you may also use the button labelled 'test')",
'Running on %s': 'Running on %s',
'Save model as...': 'Save model as...',
'Semantic': 'Semantic',
'Services': 'Services',
'shell': 'shell',
'Site': 'Site',
'Size of cache:': 'Size of cache:',
'Start wizard': 'Start wizard',
'state': 'state',
'static': 'static',
'Static': 'Static',
'Statistics': 'ကိန်းဂဏန်း အချက်အလက်များ',
'Stylesheet': 'Stylesheet',
'submit': 'ပြုလုပ်ပါ',
'Submit': 'Submit',
'Support': 'အထောက်အပံ့',
'Table': 'ဇယား',
'test': 'test',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The application logic, each URL path is mapped in one exposed function in the controller': 'The application logic, each URL path is mapped in one exposed function in the controller',
'The Core': 'The Core',
'The data representation, define database tables and sets': 'The data representation, define database tables and sets',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The presentations layer, views are also known as templates': 'The presentations layer, views are also known as templates',
'The Views': 'The Views',
'There are no plugins': 'There are no plugins',
'There are no private files': 'There are no private files',
'These files are not served, they are only available from within your app': 'These files are not served, they are only available from within your app',
'These files are served without processing, your images go here': 'These files are served without processing, your images go here',
'This App': 'ဒီ App',
'This email already has an account': 'ဒီအီးမေးလ်တွင် အကောင့် ရှိပြီး ဖြစ်ပါသည်',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Timestamp',
'To create a plugin, name a file/folder plugin_[name]': 'To create a plugin, name a file/folder plugin_[name]',
'Traceback': 'Traceback',
'Translation strings for the application': 'Translation strings for the application',
'Try the mobile interface': 'Try the mobile interface',
'Twitter': 'Twitter',
'unable to parse csv file': 'unable to parse csv file',
'Uninstall': 'Uninstall',
'update all languages': 'update all languages',
'Update:': 'Update:',
'Upload': 'Upload',
'Upload a package:': 'Upload a package:',
'Upload and install packed application': 'Upload and install packed application',
'upload file:': 'upload file:',
'upload plugin file:': 'upload plugin file:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'User': 'အသုံးပြုသူ',
'User ID': 'User ID',
'Users': 'အသုံးပြုသူများ',
'Verify Password': 'စကားဝှက်ကို အတည်ပြုပါ',
'Version': 'Version',
'Versioning': 'Versioning',
'Videos': 'ဗွီဒီယိုများ',
'View': 'ဗျူး',
'views': 'views',
'Views': 'ဗျူးများ',
'Web Framework': 'Web Framework',
'Welcome': 'ကြိုဆိုပါ၏',
'Welcome to web2py!': 'web2py မှ ကြိုဆိုပါသည်။',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'Working...': 'ဆောင်ရွက်နေပါသည် ။ ။ ။',
'You are successfully running web2py': 'သင်သည် web2py ကို အောင်မြင်စွာ လည်ပတ်မောင်းနှင်စေပါသည်။',
'You can modify this application and adapt it to your needs': 'သင် ဒီ application ကို ပြုပြင်မွမ်းမံနိုင်ပါသည်။ ထို့အပြင် သင့်လိုအပ်ချက်များနှင့် ကိုက်ညီစေရန် ပြုလုပ်နိုင်ပါသည်။',
'You visited the url %s': 'သင် လည်ပတ်ခဲ့သော URL %s',
'စကားဝှက် အသစ် တောင်းဆိုရန်': 'စကားဝှက် အသစ် တောင်းဆိုရန်',
'မှတ်ပုံတင်ရန်': 'မှတ်ပုံတင်ရန်',
'ဝင်ရောက်အသုံးပြုရန်': 'ဝင်ရောက်အသုံးပြုရန်',
}
|
cailloumajor/home-web | refs/heads/master | backend/heating/serializers.py | 1 | # -*- coding: utf-8 -*-
import operator
from datetime import datetime, time, timedelta
from functools import reduce
from django.db.models import Q
from rest_framework import serializers
from .models import Zone, Slot, Derogation, PilotwireLog
TIME_FORMAT = '%H:%M'
def validate_quarter_hour(value):
if value.minute % 15 != 0:
raise serializers.ValidationError(
"Seules les valeurs 00, 15, 30 et 45 "
"sont autorisées pour les minutes"
)
class OffsetTimeField(serializers.TimeField):
def to_internal_value(self, data):
super_time = super().to_internal_value(data)
validate_quarter_hour(super_time)
dt = datetime(1, 1, 2, super_time.hour, super_time.minute)
return (dt - timedelta(minutes=1)).time()
def to_representation(self, obj):
if isinstance(obj, time): # pragma: no cover
dt = datetime(1, 1, 1, obj.hour, obj.minute)
obj = (dt + timedelta(minutes=1)).time()
return super().to_representation(obj)
class CustomDateTimeField(serializers.DateTimeField):
def __init__(self, with_offset=False, *args, **kwargs):
self.with_offset = with_offset
super().__init__(*args, **kwargs)
def to_internal_value(self, data):
super_dt = super().to_internal_value(data)
super_dt = super_dt.replace(second=0, microsecond=0)
validate_quarter_hour(super_dt)
if self.with_offset:
super_dt = super_dt - timedelta(minutes=1)
return super_dt
def to_representation(self, obj):
if isinstance(obj, datetime) and self.with_offset:
obj = obj + timedelta(minutes=1)
return super().to_representation(obj)
class ZoneSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Zone
fields = ('url', 'num', 'desc')
extra_kwargs = {
'url': {'view_name': 'heating:zone-detail'},
}
class SlotSerializer(serializers.HyperlinkedModelSerializer):
end_time = OffsetTimeField(format=TIME_FORMAT, input_formats=[TIME_FORMAT])
class Meta:
model = Slot
fields = ('url', 'id', 'zone', 'mode', 'start_time', 'end_time',
'mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun')
extra_kwargs = {
'start_time': {
'format': TIME_FORMAT, 'input_formats': [TIME_FORMAT]
},
'url': {'view_name': 'heating:slot-detail'},
'zone': {'view_name': 'heating:zone-detail'},
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['start_time'].validators += [validate_quarter_hour]
def validate(self, data):
days_on = [d for d in
['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
if data.get(d)]
q_objects = [Q(t) for t in [(d, True) for d in days_on]]
s_time = data.get('start_time')
e_time = data.get('end_time')
zone = data.get('zone')
if not days_on:
raise serializers.ValidationError("Aucun jour sélectionné")
if not s_time < e_time:
raise serializers.ValidationError(
"L'heure de fin doit être supérieure à l'heure de début"
)
instance_pk = getattr(self.instance, 'pk', None)
queryset = Slot.objects.exclude(pk=instance_pk).filter(zone=zone)
queryset = queryset.filter(reduce(operator.or_, q_objects))
queryset = queryset.filter(
(Q(start_time__lte=s_time) & Q(end_time__gte=s_time)) |
(Q(start_time__lte=e_time) & Q(end_time__gte=e_time)) |
(Q(start_time__gte=s_time) & Q(end_time__lte=e_time)))
if queryset.exists():
raise serializers.ValidationError(
"Les horaires sont en conflit avec un créneau existant"
)
return data
class DerogationSerializer(serializers.HyperlinkedModelSerializer):
start_dt = CustomDateTimeField()
end_dt = CustomDateTimeField(with_offset=True)
start_initial = serializers.DateTimeField(write_only=True)
class Meta:
model = Derogation
fields = ('url', 'id', 'mode', 'creation_dt', 'start_dt', 'end_dt',
'zones', 'start_initial')
extra_kwargs = {
'url': {'view_name': 'heating:derogation-detail'},
'zones': {'view_name': 'heating:zone-detail'},
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if isinstance(self.instance, Derogation):
del self.fields['start_initial']
def create(self, validated_data):
del validated_data['start_initial']
return super().create(validated_data)
def validate(self, data):
start_initial = data.get('start_initial')
start_dt = data['start_dt']
end_dt = data['end_dt']
if start_initial and start_dt < start_initial:
raise serializers.ValidationError({
'start_dt':
"La prise d'effet ne doit pas se situer dans le passé"
})
if not start_dt < end_dt:
raise serializers.ValidationError({
'end_dt':
"La fin d'effet doit être ultérieure à la prise d'effet"
})
instance_pk = getattr(self.instance, 'pk', None)
queryset = Derogation.objects.exclude(pk=instance_pk)
queryset = queryset.filter(zones__in=data['zones'])
queryset = queryset.filter(
(Q(start_dt__lte=start_dt) & Q(end_dt__gte=start_dt)) |
(Q(start_dt__lte=end_dt) & Q(end_dt__gte=end_dt)) |
(Q(start_dt__gte=start_dt) & Q(end_dt__lte=end_dt))
)
if queryset.exists():
raise serializers.ValidationError(
"Les horaires sont en conflit avec une dérogation existante"
)
return data
class PilotwireLogSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = PilotwireLog
fields = ('url', 'id', 'timestamp', 'level', 'message')
extra_kwargs = {
'url': {'view_name': 'heating:pilotwirelog-detail'},
}
|
yangleo/cloud-github | refs/heads/master | horizon/templatetags/sizeformat.py | 65 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Template tags for displaying sizes
"""
from oslo_utils import units
from django import template
from django.utils import formats
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
register = template.Library()
def int_format(value):
return int(value)
def float_format(value):
rounded_value = round(value, 1)
if rounded_value.is_integer():
decimal_pos = 0
else:
decimal_pos = 1
return formats.number_format(rounded_value, decimal_pos)
def filesizeformat(bytes, filesize_number_format):
try:
bytes = float(bytes)
except (TypeError, ValueError, UnicodeDecodeError):
return ungettext_lazy("%(size)d Byte",
"%(size)d Bytes", 0) % {'size': 0}
if bytes < units.Ki:
bytes = int(bytes)
return ungettext_lazy("%(size)d Byte",
"%(size)d Bytes", bytes) % {'size': bytes}
if bytes < units.Mi:
return _("%s KB") % filesize_number_format(bytes / units.Ki)
if bytes < units.Gi:
return _("%s MB") % filesize_number_format(bytes / units.Mi)
if bytes < units.Ti:
return _("%s GB") % filesize_number_format(bytes / units.Gi)
if bytes < units.Pi:
return _("%s TB") % filesize_number_format(bytes / units.Ti)
return _("%s PB") % filesize_number_format(bytes / units.Pi)
def float_cast_filesizeformat(value, multiplier=1, format=int_format):
try:
value = float(value)
value = filesizeformat(value * multiplier, format).replace(' ', '')
except (TypeError, ValueError):
value = value or _('0 Bytes')
return value
@register.filter(name='mbformat')
def mbformat(mb):
return float_cast_filesizeformat(mb, units.Mi, int_format)
@register.filter(name='mb_float_format')
def mb_float_format(mb):
return float_cast_filesizeformat(mb, units.Mi, float_format)
@register.filter(name='diskgbformat')
def diskgbformat(gb):
return float_cast_filesizeformat(gb, units.Gi, float_format)
|
kvr777/deep-learning | refs/heads/master | transfer-learning/tensorflow_vgg/vgg16.py | 153 | import inspect
import os
import numpy as np
import tensorflow as tf
import time
VGG_MEAN = [103.939, 116.779, 123.68]
class Vgg16:
def __init__(self, vgg16_npy_path=None):
if vgg16_npy_path is None:
path = inspect.getfile(Vgg16)
path = os.path.abspath(os.path.join(path, os.pardir))
path = os.path.join(path, "vgg16.npy")
vgg16_npy_path = path
print(path)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
print("npy file loaded")
def build(self, rgb):
"""
load variable from npy to build the VGG
:param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
"""
start_time = time.time()
print("build model started")
rgb_scaled = rgb * 255.0
# Convert RGB to BGR
red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)
assert red.get_shape().as_list()[1:] == [224, 224, 1]
assert green.get_shape().as_list()[1:] == [224, 224, 1]
assert blue.get_shape().as_list()[1:] == [224, 224, 1]
bgr = tf.concat(axis=3, values=[
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
self.conv1_1 = self.conv_layer(bgr, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.pool3 = self.max_pool(self.conv3_3, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.pool4 = self.max_pool(self.conv4_3, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.pool5 = self.max_pool(self.conv5_3, 'pool5')
self.fc6 = self.fc_layer(self.pool5, "fc6")
assert self.fc6.get_shape().as_list()[1:] == [4096]
self.relu6 = tf.nn.relu(self.fc6)
self.fc7 = self.fc_layer(self.relu6, "fc7")
self.relu7 = tf.nn.relu(self.fc7)
self.fc8 = self.fc_layer(self.relu7, "fc8")
self.prob = tf.nn.softmax(self.fc8, name="prob")
self.data_dict = None
print(("build model finished: %ds" % (time.time() - start_time)))
def avg_pool(self, bottom, name):
return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def max_pool(self, bottom, name):
return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
def conv_layer(self, bottom, name):
with tf.variable_scope(name):
filt = self.get_conv_filter(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def fc_layer(self, bottom, name):
with tf.variable_scope(name):
shape = bottom.get_shape().as_list()
dim = 1
for d in shape[1:]:
dim *= d
x = tf.reshape(bottom, [-1, dim])
weights = self.get_fc_weight(name)
biases = self.get_bias(name)
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
return fc
def get_conv_filter(self, name):
return tf.constant(self.data_dict[name][0], name="filter")
def get_bias(self, name):
return tf.constant(self.data_dict[name][1], name="biases")
def get_fc_weight(self, name):
return tf.constant(self.data_dict[name][0], name="weights")
|
carlosefr/quicklook | refs/heads/master | templates/processes/__init__.py | 1 | #!/usr/bin/env python
# -*- coding: iso8859-1 -*-
#
# Copyright (c) 2005-2007, Carlos Rodrigues <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (version 2) as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
__all__ = ["index", "load", "forks"]
# EOF - __init__.py
|
senthil10/scilifelab | refs/heads/master | scripts/demultiplex_mctag.py | 4 | """
Demultiplex haloplex data including molecular tags.
"""
from __future__ import print_function
import argparse
import collections
import datetime
import fcntl
import itertools
import os
import random
import re
import resource
import shlex
import shutil
import subprocess
import sys
import time
#from Bio import Seq, pairwise2
from scilifelab.utils.fastq_utils import FastQParser, FastQWriter
# TODO memoize sequence corrections? optimize somehow if possible
# TODO ensure read 1,2 files are paired (SciLifeLab code)
# TODO add directory processing
def main(read_one, read_two, read_index, data_directory, read_index_num, output_directory,
index_file, max_mismatches=1, force_overwrite=False, progress_interval=1000):
check_input(read_one, read_two, read_index, data_directory, read_index_num,\
output_directory, index_file, max_mismatches, progress_interval)
output_directory = create_output_dir(output_directory, force_overwrite)
index_dict = load_index_file(index_file)
check_index_distances(index_dict.keys(), max_mismatches)
start_time = datetime.datetime.now()
if read_one and read_two and read_index:
reads_processed, num_match, num_ambigmatch, num_nonmatch, num_corrected = \
parse_readset_byindexdict(read_one, read_two, read_index, index_dict, \
output_directory, max_mismatches, progress_interval)
else:
parse_directory() # not yet implemented
elapsed_time = time.strftime('%H:%M:%S', time.gmtime((datetime.datetime.now() - start_time).total_seconds()))
print( "\nProcessing complete in {elapsed_time}:\n\t" \
"{reads_processed} reads processed\n\t" \
"{num_match:>{pad_length}} ({num_match_percent:>6.2f}%) matched to supplied indexes\n\t" \
"{num_corrected:>{pad_length}} ({num_corrected_percent:>6.2f}%) corrected indexes\n\t" \
"{num_ambigmatch:>{pad_length}} ({num_ambigmatch_percent:>6.2f}%) matches to more than one supplied index.\n\t" \
"{num_nonmatch:>{pad_length}} ({num_nonmatch_percent:>6.2f}%) unmatched to supplied indexes".format(
elapsed_time=elapsed_time,
reads_processed=reads_processed, num_match=num_match, num_nonmatch=num_nonmatch, num_ambigmatch=num_ambigmatch, num_corrected=num_corrected,
num_match_percent = (100.0 * num_match)/reads_processed,
num_corrected_percent = (100.0 * num_corrected)/reads_processed,
num_ambigmatch_percent = (100.0 * num_ambigmatch)/reads_processed,
num_nonmatch_percent = (100.0 * num_nonmatch)/reads_processed,
pad_length = len(str(reads_processed))), file=sys.stdout)
def check_input(read_one, read_two, read_index, data_directory, read_index_num, output_directory, index_file, max_mismatches, progress_interval):
"""
Check user-supplied inputs for validity, completeness.
"""
if not output_directory:
raise SyntaxError("Must specify output directory.")
if not index_file:
raise SyntaxError("Must specify file containing indexes.")
if not (read_one and read_index):
raise SyntaxError("Must speify both data and index reads.")
if (read_one or read_two or read_index) and (data_directory or read_index_num):
raise SyntaxError("Ambiguous: too many options specified. Specify either file paths " \
"or directory and read index number.")
if not (read_one and read_two and read_index) or (data_directory and read_index_num):
raise SyntaxError("Insufficient information: either a directory and read index number " \
"or explicit paths to sequencing files must be specified.")
try:
assert(type(progress_interval) == int and progress_interval > 0)
except AssertionError:
raise SyntaxError("Progress interval must be a positive integer.")
try:
assert(type(max_mismatches) == int and max_mismatches >= 0)
except AssertionError:
raise SyntaxError("Maximum mismatches in error correction must be >= 0.")
def parse_directory():
"""
Searches the directory for fastq file sets and calls parse_readset() on them.
"""
raise NotImplementedError("I haven't implemented this yet, so don't go using it.")
# possibly implement as generator, calling parse_readset_byindexdict in a for loop from the calling loop
def parse_readset_byindexdict(read_1_fq, read_2_fq, read_index_fq, index_dict, output_directory, max_mismatches=1, progress_interval=1000):
"""
Parse input fastq files, searching for matches to each index.
"""
print("Processing read set associated with \"{}\" using user-supplied indexes.".format(read_1_fq), file=sys.stderr)
print("Maximum number of mismatches for error correction is {}.".format(max_mismatches), file=sys.stderr)
reads_processed, num_match, num_ambigmatch, num_nonmatch, num_corrected = 0, 0, 0, 0, 0
fqp_1, fqp_2, fqp_ind = map(FastQParser, (read_1_fq, read_2_fq, read_index_fq))
print("Counting total number of lines in fastq files...", file=sys.stderr, end="")
# I think du -k * 16 / 1.024 should give approximately the right number for any number of reads greater than 1000 or so
total_lines_in_file = int(subprocess.check_output(shlex.split("wc -l {}".format(read_1_fq))).split()[0])
print(" complete.", file=sys.stderr)
if not progress_interval: progress_interval = 1000
if progress_interval > (total_lines_in_file / 4): progress_interval = (total_lines_in_file / 4)
index_fh_dict = collections.defaultdict(list)
print("Demultiplexing...", file=sys.stderr)
time_started = datetime.datetime.now()
for read_1, read_2, read_ind in itertools.izip(fqp_1, fqp_2, fqp_ind):
read_ind_seq = read_ind[1]
matches_dict = collections.defaultdict(list)
# Sort indexes by descending length to match longer indexes first
for supplied_index in sorted(index_dict.keys(), key=lambda x: (-len(x))):
mismatches = find_dist(supplied_index, read_ind_seq, max_mismatches)
matches_dict[mismatches].append(supplied_index)
if mismatches == 0:
break
for x in range(0, max_mismatches+1):
if matches_dict.get(x):
if len(matches_dict.get(x)) == 1:
# Single unamibiguous match
index_seq = matches_dict[x][0]
index_len = len(index_seq)
molecular_tag = read_ind_seq[index_len:]
modify_reads( (read_1, read_2), index_seq, molecular_tag)
sample_name = index_dict[index_seq] if index_dict[index_seq] else index_seq
data_write_loop(read_1, read_2, sample_name, output_directory, index_fh_dict, index_seq)
num_match += 1
if not x == 0:
num_corrected += 1
break
else:
# Ambiguous match
sample_name = "Ambiguous"
index_seq_list = ",".join(matches_dict.get(x))
modify_reads( (read_1, read_2), index_seq_list, read_ind_seq)
data_write_loop(read_1, read_2, sample_name, output_directory, index_fh_dict, sample_name)
num_ambigmatch += 1
break
else:
# No match
sample_name = "Undetermined"
modify_reads( (read_1, read_2), "", read_ind_seq)
data_write_loop(read_1, read_2, sample_name, output_directory, index_fh_dict, sample_name)
num_nonmatch += 1
reads_processed += 1
if reads_processed % progress_interval == 0:
print_progress(reads_processed, (total_lines_in_file / 4), time_started=time_started)
return reads_processed, num_match, num_ambigmatch, num_nonmatch, num_corrected
def data_write_loop(read_1, read_2, sample_name, output_directory, index_fh_dict, index):
"""
Writes data using FastQAppender, closing files if we open too many.
"""
for read_num, read in enumerate([read_1, read_2]):
try:
index_fh_dict[index][read_num].write(read)
except IndexError:
file_path = os.path.join(output_directory, "{sample_name}_R{read_num}.fastq".format( \
sample_name=sample_name, read_num=read_num+1))
try:
index_fh_dict[index].append(FastQAppender(file_path))
except IOError as e:
# Too many open filehandles
if e.errno == 24:
for fh1, fh2 in index_fh_dict.values():
map(file.close, [ fh1, fh2 ] )
index_fh_dict[index].append(FastQAppender(file_path))
index_fh_dict[index][read_num].write(read)
else:
raise IOError(e)
except ValueError:
# File was closed previously
index_fh_dict[index][read_num].reopen()
# TODO make this faster
# TODO compare to Bio.align.pairwise2 for speed
# TODO possibly @memoize somehow
def find_dist(str_01, str_02, max_mismatches=None, approach="shorten"):
"""
Find the number of mismatches between two strings. The longer string is truncated
to the length of the shorter unless approach is "lengthen".
"""
if len(str_01) > len(str_02):
if approach == "lengthen":
str_02 = "{:<{length}}".format(str_02, length=len(str_01))
else:
str_01 = str_01[:len(str_02)]
elif len(str_02) > len(str_01):
if approach == "lengthen":
str_01 = "{:<{length}}".format(str_01, length=len(str_02))
else:
str_02 = str_02[:len(str_01)]
mismatches = 0
for a, b in itertools.izip(str_01, str_02):
if a != b:
mismatches += 1
if max_mismatches and mismatches > max_mismatches:
break
return mismatches
def print_progress(processed, total, type='text', time_started=None, leading_text=""):
"""
Prints the progress, either in text or in visual form.
"""
percentage_complete = float(processed) / total
if time_started:
completion_time = estimate_completion_time(time_started, percentage_complete)
else:
time_started = "-"
_, term_cols = os.popen('stty size', 'r').read().split()
term_cols = int(term_cols) - 10 - len(leading_text)
sys.stderr.write('\r')
if type == 'bar' and term_cols > 10:
progress = int(term_cols * percentage_complete)
sys.stderr.write("{leading_text}[{progress:<{cols}}] {percentage:0.2f}%".format(
leading_text=leading_text, progress="="*progress, cols=term_cols, percentage=percentage_complete*100))
else:
sys.stderr.write("{leading_text}{processed}/{total} items processed ({percentage_complete:0.2f}% finished) ETA: {completion_time}".format(
leading_text=leading_text, processed=processed, total=total, percentage_complete=percentage_complete*100, completion_time=completion_time))
sys.stderr.flush()
def estimate_completion_time(start_time, percent_complete):
"""
http://xkcd.com/612/
"""
if not type(start_time) == datetime.datetime:
return None
seconds_elapsed = (datetime.datetime.now() - start_time).total_seconds()
seconds_total = seconds_elapsed / percent_complete
seconds_left = seconds_total - seconds_elapsed
# More than a day remaining
if seconds_left > 86400:
days = int(seconds_left // 86400)
seconds = seconds_left - (days * 86400)
return "{}:{}".format(days, time.strftime('%H:%M:%S', time.gmtime(seconds)))
else:
return time.strftime('%H:%M:%S', time.gmtime(seconds_left))
def modify_reads(read_list, index, molecular_tag):
"""
Add the extra tags to the header of each read
"""
for read in read_list:
read[0] = read[0] + "{}:{}:".format(index, molecular_tag)
def create_output_dir(output_directory, force_overwrite):
"""
Create the output directory, forcing overwrite if the -f flag is passed; otherwise, fail if file exists."
"""
output_directory = os.path.abspath(output_directory)
if os.path.exists(output_directory):
if force_overwrite:
print("Warning: removing/overwriting output directory \"{}\"...".format(output_directory), file=sys.stderr, end="")
shutil.rmtree(output_directory)
print(" removed.", file=sys.stderr)
os.makedirs(output_directory)
return output_directory
def load_index_file(csv_file, usecols=(0,1)):
"""
Load known indexes from a csv file.
csv file should be in format:
sequence[,index_name]
where index_name is optional.
Returns a dict of sequence->name pairs.
"""
if not len(usecols) == 2:
raise ValueError("Warning: only two columns (index, sample_name) can be selected.", file=sys.stderr)
if not usecols == (0,1):
print("Warning: using non-standard columns for index, sample_name ({} instead of (0,1))".format(usecols), file=sys.stderr)
index_column, name_column = usecols
index_dict = {}
with open(csv_file, 'r') as f:
for line in f:
# could also use csv.sniffer to dynamically determine delimiter
index_line = re.split(r'[\t,;]', line.strip())
try:
index_dict[ index_line[index_column] ] = index_line[name_column]
except IndexError:
index_dict[ index_line[index_column] ] = None
return index_dict
def check_index_distances(index_list, max_mismatches):
"""
Determines if too many mismatches are allowed for this set of indexes to resolve unambiguously.
"""
for i1, i2 in itertools.combinations(index_list, r=2):
if find_dist(i1, i2, max_mismatches, approach="lengthen") <= max_mismatches:
print("Warning: indexes \"{}\" and \"{}\" are insufficiently different for the specified number of mismatches ({}). Reads matching either index will be classified as ambiguous.".format(i1, i2, max_mismatches), file=sys.stderr)
# TODO This doesn't really belong here and should probably be its own module
def count_top_indexes(count_num, index_file, index_length, progress_interval):
"""
Determine the most common indexes, sampling at most 200,000 reads.
"""
assert(type(count_num) == int and count_num > 0), "Number passed must be a positive integer."
fqp_ind = FastQParser(index_file)
# This should perhaps be added to the FastQParser class
print("Counting total number of lines in fastq file...", file=sys.stderr, end="")
total_lines = int(subprocess.check_output(shlex.split("wc -l {}".format(index_file))).split()[0])
total_reads = total_lines / 4
print(" complete.", file=sys.stderr)
index_tally = collections.defaultdict(int)
reads_processed = 0
# Subsample if file is large
if (total_reads) > 200000:
print("Subsampling 200,000 reads from index file...", file=sys.stderr)
fqp_ind = iter_sample_fast(fqp_ind, 200000, total_reads)
print("Complete.", file=sys.stderr)
total_reads = 200000
print("Tallying indexes in {} records...".format(total_reads), file=sys.stderr)
start_time = datetime.datetime.now()
for index in fqp_ind:
index_read_seq = index[1]
index_seq = index_read_seq[:index_length]
index_tally[index_seq] += 1
reads_processeds += 1
if reads_processed % progress_interval == 0:
print_progress(reads_processed, total_reads, start_time)
print("\n", file=sys.stderr)
if count_num > len(index_tally.keys()):
print("Number of indexes found ({}) is fewer than those requested ({}). Printing all indexes found.".format(len(index_tally.keys()), count_num), file=sys.stderr)
print("Printing indexes...", file=sys.stderr)
count_num = len(index_tally.keys())
print("{:<20} {:>20} {:>11}".format("Index", "Occurences", "Percentage"))
for index, _ in sorted(index_tally.items(), key=(lambda x: x[1]), reverse=True)[:count_num]:
percentage = (100.0 * index_tally[index] ) / total_reads
print("{:<20} {:>20,} {:>10.2f}%".format(index, index_tally[index], percentage))
def iter_sample_fast(iterable, samplesize, total_size):
"""
http://stackoverflow.com/questions/12581437/python-random-sample-with-a-generator/12583436#12583436
"""
results = []
iterator = iter(iterable)
# Fill in the first samplesize elements:
try:
for _ in xrange(samplesize):
results.append(iterator.next())
print_progress(len(results), 200000)
except StopIteration:
raise ValueError("Sample larger than population.")
random.shuffle(results) # Randomize their positions
for i, v in enumerate(iterator, samplesize):
r = random.randint(0, i)
if r < samplesize:
results[r] = v # at a decreasing rate, replace random items
print_progress(i, total_size)
return results
class FastQAppender(FastQWriter):
"""
A good deal like the FastQWriter but appends instead of writing.
Also allows file re-opening.
"""
def __init__(self, file):
self.fname = file
fh = open(file,"ab")
if file.endswith(".gz"):
self._fh = gzip.GzipFile(fileobj=fh)
else:
self._fh = fh
self._records_written = 0
def reopen(self):
_records_written = self._records_written
self.__init__(self.fname)
self._records_written = _records_written
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output-directory",
help="The directory to be used for storing output data. Required.")
parser.add_argument("-i", "--index-file",
help="File containing haloplex indexes (one per line, optional name " \
"in second column separated by tab, comma, or semicolon).")
parser.add_argument("-1", "--read-one",
help="Read 1 fastq file.")
parser.add_argument("-2", "--read-two",
help="Read 2 fastq file.")
parser.add_argument("-r", "--read-index",
help="Index read fastq file.")
parser.add_argument("-d", "--data-directory",
help="Directory containing fastq read data. Requires specifying read number (-n).")
parser.add_argument("-n", "--read-index-num", type=int,
help="Which read is the index (e.g. 1, 2, 3).")
parser.add_argument("-f", "--force-overwrite", action="store_true",
help="Force overwrite to output directory.")
parser.add_argument("-t", "--top-indexes", type=int,
help="Find the n most common indexes. Pair with -l (index length) and -r (read index file). Does not perform any demultiplexing.")
parser.add_argument("-l", "--index-length", type=int,
help="The length of the index.")
parser.add_argument("-m", "--mismatches", type=int, dest="max_mismatches", default=1,
help="The maximum number of mismatches allowed when performing error correction. Default is 1; set to 0 for max speed.")
parser.add_argument("-p", "--progress-interval", type=int, default=1000,
help="Update progress, estimated completion time every N reads (default 1000).")
arg_vars = vars(parser.parse_args())
# It's my namespace and I'll clobber it if I want to
locals().update(arg_vars)
if arg_vars['top_indexes']:
if not arg_vars['index_length']:
raise SyntaxError("Must indicate index length to tally.")
if not arg_vars['read_index']:
raise SyntaxError("Must indicate file to parse for indexes.")
else:
count_top_indexes(top_indexes, read_index, index_length, progress_interval)
else:
main(read_one, read_two, read_index, data_directory, read_index_num, output_directory, index_file, max_mismatches, force_overwrite, progress_interval)
|
AOSPA-L/android_external_skia | refs/heads/lollipop-mr1 | gm/rebaseline_server/download_actuals_test.py | 66 | #!/usr/bin/python
"""
Copyright 2014 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Test download.py
TODO(epoger): Create a command to update the expected results (in
self._output_dir_expected) when appropriate. For now, you should:
1. examine the results in self._output_dir_actual and make sure they are ok
2. rm -rf self._output_dir_expected
3. mv self._output_dir_actual self._output_dir_expected
Although, if you're using an SVN checkout, this will blow away .svn directories
within self._output_dir_expected, which wouldn't be good...
"""
# System-level imports
import os
import shutil
import tempfile
import urllib
# Imports from within Skia
import fix_pythonpath # must do this first
from pyutils import url_utils
import base_unittest
import download_actuals
class DownloadTest(base_unittest.TestCase):
def test_fetch(self):
"""Tests fetch() of GM results from actual-results.json ."""
downloader = download_actuals.Download(
actuals_base_url=url_utils.create_filepath_url(
os.path.join(self._input_dir, 'gm-actuals')),
gm_actuals_root_url=url_utils.create_filepath_url(
os.path.join(self._input_dir, 'fake-gm-imagefiles')))
downloader.fetch(
builder_name='Test-Android-GalaxyNexus-SGX540-Arm7-Release',
dest_dir=self._output_dir_actual)
def main():
base_unittest.main(DownloadTest)
if __name__ == '__main__':
main()
|
mozilla/mwc | refs/heads/master | vendor-local/packages/requests/requests/async.py | 4 | # -*- coding: utf-8 -*-
"""
requests.async
~~~~~~~~~~~~~~
This module contains an asynchronous replica of ``requests.api``, powered
by gevent. All API methods return a ``Request`` instance (as opposed to
``Response``). A list of requests can be sent with ``map()``.
"""
try:
import gevent
from gevent import monkey as curious_george
from gevent.pool import Pool
except ImportError:
raise RuntimeError('Gevent is required for requests.async.')
# Monkey-patch.
curious_george.patch_all(thread=False)
from . import api
__all__ = (
'map',
'get', 'options', 'head', 'post', 'put', 'patch', 'delete', 'request'
)
def patched(f):
"""Patches a given API function to not send."""
def wrapped(*args, **kwargs):
kwargs['return_response'] = False
kwargs['prefetch'] = True
config = kwargs.get('config', {})
config.update(safe_mode=True)
kwargs['config'] = config
return f(*args, **kwargs)
return wrapped
def send(r, pool=None, prefetch=False):
"""Sends the request object using the specified pool. If a pool isn't
specified this method blocks. Pools are useful because you can specify size
and can hence limit concurrency."""
if pool != None:
return pool.spawn(r.send, prefetch=prefetch)
return gevent.spawn(r.send, prefetch=prefetch)
# Patched requests.api functions.
get = patched(api.get)
options = patched(api.options)
head = patched(api.head)
post = patched(api.post)
put = patched(api.put)
patch = patched(api.patch)
delete = patched(api.delete)
request = patched(api.request)
def map(requests, prefetch=True, size=None):
"""Concurrently converts a list of Requests to Responses.
:param requests: a collection of Request objects.
:param prefetch: If False, the content will not be downloaded immediately.
:param size: Specifies the number of requests to make at a time. If None, no throttling occurs.
"""
requests = list(requests)
pool = Pool(size) if size else None
jobs = [send(r, pool, prefetch=prefetch) for r in requests]
gevent.joinall(jobs)
return [r.response for r in requests] |
mdclyburn/ardupilot | refs/heads/master | mk/PX4/Tools/genmsg/src/genmsg/msg_loader.py | 215 | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
"""
Loader for messages and :class:`MsgContext` that assumes a
dictionary-based search path scheme (keys are the package/namespace,
values are the paths). Compatible with ROS package system and other
possible layouts.
"""
import os
import sys
try:
from cStringIO import StringIO # Python 2.x
except ImportError:
from io import StringIO # Python 3.x
from . base import InvalidMsgSpec, log, SEP, COMMENTCHAR, CONSTCHAR, IODELIM, EXT_MSG, EXT_SRV
from . msgs import MsgSpec, TIME, TIME_MSG, DURATION, DURATION_MSG, HEADER, HEADER_FULL_NAME, \
is_builtin, is_valid_msg_field_name, is_valid_msg_type, bare_msg_type, is_valid_constant_type, \
Field, Constant, resolve_type
from . names import normalize_package_context, package_resource_name
from . srvs import SrvSpec
class MsgNotFound(Exception):
pass
def get_msg_file(package, base_type, search_path, ext=EXT_MSG):
"""
Determine the file system path for the specified ``.msg`` on
*search_path*.
:param package: name of package file is in, ``str``
:param base_type: type name of message, e.g. 'Point2DFloat32', ``str``
:param search_path: dictionary mapping message namespaces to a directory locations
:param ext: msg file extension. Override with EXT_SRV to search for services instead.
:returns: filesystem path of requested file, ``str``
:raises: :exc:`MsgNotFound` If message cannot be located.
"""
log("msg_file(%s, %s, %s)" % (package, base_type, str(search_path)))
if not isinstance(search_path, dict):
raise ValueError("search_path must be a dictionary of {namespace: dirpath}")
if not package in search_path:
raise MsgNotFound("Cannot locate message [%s]: unknown package [%s] on search path [%s]" \
% (base_type, package, search_path))
else:
for path_tmp in search_path[package]:
path = os.path.join(path_tmp, "%s%s"%(base_type, ext))
if os.path.isfile(path):
return path
raise MsgNotFound("Cannot locate message [%s] in package [%s] with paths [%s]"%
(base_type, package, str(search_path[package])))
def get_srv_file(package, base_type, search_path):
"""
Determine the file system path for the specified .srv on path.
:param package: name of package ``.srv`` file is in, ``str``
:param base_type: type name of service, e.g. 'Empty', ``str``
:param search_path: dictionary mapping message namespaces to a directory locations
:returns: file path of ``.srv`` file in specified package, ``str``
:raises: :exc:`MsgNotFound` If service file cannot be located.
"""
return get_msg_file(package, base_type, search_path, ext=EXT_SRV)
def load_msg_by_type(msg_context, msg_type, search_path):
"""
Load message specification for specified type.
NOTE: this will register the message in the *msg_context*.
:param msg_context: :class:`MsgContext` for finding loaded dependencies
:param msg_type: relative or full message type.
:param search_path: dictionary mapping message namespaces to a directory locations
:returns: :class:`MsgSpec` instance, ``(str, MsgSpec)``
:raises: :exc:`MsgNotFound` If message cannot be located.
"""
log("load_msg_by_type(%s, %s)" % (msg_type, str(search_path)))
if not isinstance(search_path, dict):
raise ValueError("search_path must be a dictionary of {namespace: dirpath}")
if msg_type == HEADER:
msg_type = HEADER_FULL_NAME
package_name, base_type = package_resource_name(msg_type)
file_path = get_msg_file(package_name, base_type, search_path)
log("file_path", file_path)
spec = load_msg_from_file(msg_context, file_path, msg_type)
msg_context.set_file(msg_type, file_path)
return spec
def load_srv_by_type(msg_context, srv_type, search_path):
"""
Load service specification for specified type.
NOTE: services are *never* registered in a :class:`MsgContext`.
:param msg_context: :class:`MsgContext` for finding loaded dependencies
:param srv_type: relative or full message type.
:param search_path: dictionary mapping message namespaces to a directory locations
:returns: :class:`MsgSpec` instance, ``(str, MsgSpec)``
:raises: :exc:`MsgNotFound` If message cannot be located.
"""
log("load_srv_by_type(%s, %s)" % (srv_type, str(search_path)))
if not isinstance(search_path, dict):
raise ValueError("search_path must be a dictionary of {namespace: dirpath}")
package_name, base_type = package_resource_name(srv_type)
file_path = get_srv_file(package_name, base_type, search_path)
log("file_path", file_path)
return load_srv_from_file(msg_context, file_path, srv_type)
def convert_constant_value(field_type, val):
"""
Convert constant value declaration to python value. Does not do
type-checking, so ValueError or other exceptions may be raised.
:param field_type: ROS field type, ``str``
:param val: string representation of constant, ``str``
:raises: :exc:`ValueError` If unable to convert to python representation
:raises: :exc:`InvalidMsgSpec` If value exceeds specified integer width
"""
if field_type in ['float32','float64']:
return float(val)
elif field_type in ['string']:
return val.strip() #string constants are always stripped
elif field_type in ['int8', 'uint8', 'int16','uint16','int32','uint32','int64','uint64', 'char', 'byte']:
# bounds checking
bits = [('int8', 8), ('uint8', 8), ('int16', 16),('uint16', 16),\
('int32', 32),('uint32', 32), ('int64', 64),('uint64', 64),\
('byte', 8), ('char', 8)]
b = [b for t, b in bits if t == field_type][0]
import math
if field_type[0] == 'u' or field_type == 'char':
lower = 0
upper = int(math.pow(2, b)-1)
else:
upper = int(math.pow(2, b-1)-1)
lower = -upper - 1 #two's complement min
val = int(val) #python will autocast to long if necessary
if val > upper or val < lower:
raise InvalidMsgSpec("cannot coerce [%s] to %s (out of bounds)"%(val, field_type))
return val
elif field_type == 'bool':
# TODO: need to nail down constant spec for bool
return True if eval(val) else False
raise InvalidMsgSpec("invalid constant type: [%s]"%field_type)
def _load_constant_line(orig_line):
"""
:raises: :exc:`InvalidMsgSpec`
"""
clean_line = _strip_comments(orig_line)
line_splits = [s for s in [x.strip() for x in clean_line.split(" ")] if s] #split type/name, filter out empties
field_type = line_splits[0]
if not is_valid_constant_type(field_type):
raise InvalidMsgSpec("%s is not a legal constant type"%field_type)
if field_type == 'string':
# strings contain anything to the right of the equals sign, there are no comments allowed
idx = orig_line.find(CONSTCHAR)
name = orig_line[orig_line.find(' ')+1:idx]
val = orig_line[idx+1:]
else:
line_splits = [x.strip() for x in ' '.join(line_splits[1:]).split(CONSTCHAR)] #resplit on '='
if len(line_splits) != 2:
raise InvalidMsgSpec("Invalid constant declaration: %s"%l)
name = line_splits[0]
val = line_splits[1]
try:
val_converted = convert_constant_value(field_type, val)
except Exception as e:
raise InvalidMsgSpec("Invalid constant value: %s"%e)
return Constant(field_type, name, val_converted, val.strip())
def _load_field_line(orig_line, package_context):
"""
:returns: (field_type, name) tuple, ``(str, str)``
:raises: :exc:`InvalidMsgSpec`
"""
#log("_load_field_line", orig_line, package_context)
clean_line = _strip_comments(orig_line)
line_splits = [s for s in [x.strip() for x in clean_line.split(" ")] if s] #split type/name, filter out empties
if len(line_splits) != 2:
raise InvalidMsgSpec("Invalid declaration: %s"%(orig_line))
field_type, name = line_splits
if not is_valid_msg_field_name(name):
raise InvalidMsgSpec("%s is not a legal message field name"%name)
if not is_valid_msg_type(field_type):
raise InvalidMsgSpec("%s is not a legal message field type"%field_type)
if package_context and not SEP in field_type:
if field_type == HEADER:
field_type = HEADER_FULL_NAME
elif not is_builtin(bare_msg_type(field_type)):
field_type = "%s/%s"%(package_context, field_type)
elif field_type == HEADER:
field_type = HEADER_FULL_NAME
return field_type, name
def _strip_comments(line):
return line.split(COMMENTCHAR)[0].strip() #strip comments
def load_msg_from_string(msg_context, text, full_name):
"""
Load message specification from a string.
NOTE: this will register the message in the *msg_context*.
:param msg_context: :class:`MsgContext` for finding loaded dependencies
:param text: .msg text , ``str``
:returns: :class:`MsgSpec` specification
:raises: :exc:`InvalidMsgSpec` If syntax errors or other problems are detected in file
"""
log("load_msg_from_string", full_name)
package_name, short_name = package_resource_name(full_name)
types = []
names = []
constants = []
for orig_line in text.split('\n'):
clean_line = _strip_comments(orig_line)
if not clean_line:
continue #ignore empty lines
if CONSTCHAR in clean_line:
constants.append(_load_constant_line(orig_line))
else:
field_type, name = _load_field_line(orig_line, package_name)
types.append(field_type)
names.append(name)
spec = MsgSpec(types, names, constants, text, full_name, package_name)
msg_context.register(full_name, spec)
return spec
def load_msg_from_file(msg_context, file_path, full_name):
"""
Convert the .msg representation in the file to a :class:`MsgSpec` instance.
NOTE: this will register the message in the *msg_context*.
:param file_path: path of file to load from, ``str``
:returns: :class:`MsgSpec` instance
:raises: :exc:`InvalidMsgSpec`: if syntax errors or other problems are detected in file
"""
log("Load spec from", file_path)
with open(file_path, 'r') as f:
text = f.read()
try:
return load_msg_from_string(msg_context, text, full_name)
except InvalidMsgSpec as e:
raise InvalidMsgSpec('%s: %s'%(file_path, e))
def load_msg_depends(msg_context, spec, search_path):
"""
Add the list of message types that spec depends on to depends.
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: message to compute dependencies for, :class:`MsgSpec`/:class:`SrvSpec`
:param search_path: dictionary mapping message namespaces to a directory locations
:param deps: for recursion use only, do not set
:returns: list of dependency names, ``[str]``
:raises: :exc:`MsgNotFound` If dependency cannot be located.
"""
package_context = spec.package
log("load_msg_depends <spec>", spec.full_name, package_context)
depends = []
# Iterate over each field, loading as necessary
for unresolved_type in spec.types:
bare_type = bare_msg_type(unresolved_type)
resolved_type = resolve_type(bare_type, package_context)
if is_builtin(resolved_type):
continue
# Retrieve the MsgSpec instance of the field
if msg_context.is_registered(resolved_type):
depspec = msg_context.get_registered(resolved_type)
else:
# load and register on demand
depspec = load_msg_by_type(msg_context, resolved_type, search_path)
msg_context.register(resolved_type, depspec)
# Update dependencies
depends.append(resolved_type)
# - check to see if we have compute dependencies of field
dep_dependencies = msg_context.get_depends(resolved_type)
if dep_dependencies is None:
load_msg_depends(msg_context, depspec, search_path)
assert spec.full_name, "MsgSpec must have a properly set full name"
msg_context.set_depends(spec.full_name, depends)
# have to copy array in order to prevent inadvertent mutation (we've stored this list in set_dependencies)
return depends[:]
def load_depends(msg_context, spec, msg_search_path):
"""
Compute dependencies of *spec* and load their MsgSpec dependencies
into *msg_context*.
NOTE: *msg_search_path* is only for finding .msg files. ``.srv``
files have a separate and distinct search path. As services
cannot depend on other services, it is not necessary to provide
the srv search path here.
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` or :class:`SrvSpec` instance to load dependencies for.
:param msg_search_path: dictionary mapping message namespaces to a directory locations.
:raises: :exc:`MsgNotFound` If dependency cannot be located.
"""
if isinstance(spec, MsgSpec):
return load_msg_depends(msg_context, spec, msg_search_path)
elif isinstance(spec, SrvSpec):
depends = load_msg_depends(msg_context, spec.request, msg_search_path)
depends.extend(load_msg_depends(msg_context, spec.response, msg_search_path))
return depends
else:
raise ValueError("spec does not appear to be a message or service")
class MsgContext(object):
"""
Context object for storing :class:`MsgSpec` instances and related
metadata.
NOTE: All APIs work on :class:`MsgSpec` instance information.
Thus, for services, there is information for the request and
response messages, but there is no direct information about the
:class:`SrvSpec` instance.
"""
def __init__(self):
self._registered_packages = {}
self._files = {}
self._dependencies = {}
def set_file(self, full_msg_type, file_path):
self._files[full_msg_type] = file_path
def get_file(self, full_msg_type):
return self._files.get(full_msg_type, None)
def set_depends(self, full_msg_type, dependencies):
"""
:param dependencies: direct first order
dependencies for *full_msg_type*
"""
log("set_depends", full_msg_type, dependencies)
self._dependencies[full_msg_type] = dependencies
def get_depends(self, full_msg_type):
"""
:returns: List of dependencies for *full_msg_type*,
only first order dependencies
"""
return self._dependencies.get(full_msg_type, None)
def get_all_depends(self, full_msg_type):
all_deps = []
depends = self.get_depends(full_msg_type)
if depends is None:
raise KeyError(full_msg_type)
for d in depends:
all_deps.extend([d])
all_deps.extend(self.get_all_depends(d))
return all_deps
@staticmethod
def create_default():
msg_context = MsgContext()
# register builtins (needed for serialization). builtins have no package.
load_msg_from_string(msg_context, TIME_MSG, TIME)
load_msg_from_string(msg_context, DURATION_MSG, DURATION)
return msg_context
def register(self, full_msg_type, msgspec):
full_msg_type = bare_msg_type(full_msg_type)
package, base_type = package_resource_name(full_msg_type)
if package not in self._registered_packages:
self._registered_packages[package] = {}
self._registered_packages[package][base_type] = msgspec
def is_registered(self, full_msg_type):
"""
:param full_msg_type: Fully resolve message type
:param default_package: default package namespace to resolve
in. May be ignored by special types (e.g. time/duration).
:returns: ``True`` if :class:`MsgSpec` instance has been loaded for the requested type.
"""
full_msg_type = bare_msg_type(full_msg_type)
package, base_type = package_resource_name(full_msg_type)
if package in self._registered_packages:
return base_type in self._registered_packages[package]
else:
return False
def get_registered(self, full_msg_type):
"""
:raises: :exc:`KeyError` If not registered
"""
full_msg_type = bare_msg_type(full_msg_type)
if self.is_registered(full_msg_type):
package, base_type = package_resource_name(full_msg_type)
return self._registered_packages[package][base_type]
else:
raise KeyError(full_msg_type)
def __str__(self):
return str(self._registered_packages)
def load_srv_from_string(msg_context, text, full_name):
"""
Load :class:`SrvSpec` from the .srv file.
:param msg_context: :class:`MsgContext` instance to load request/response messages into.
:param text: .msg text , ``str``
:param package_name: context to use for msg type name, i.e. the package name,
or '' to use local naming convention. ``str``
:returns: :class:`SrvSpec` instance
:raises :exc:`InvalidMsgSpec` If syntax errors or other problems are detected in file
"""
text_in = StringIO()
text_out = StringIO()
accum = text_in
for l in text.split('\n'):
l = l.split(COMMENTCHAR)[0].strip() #strip comments
if l.startswith(IODELIM): #lenient, by request
accum = text_out
else:
accum.write(l+'\n')
# create separate MsgSpec objects for each half of file
msg_in = load_msg_from_string(msg_context, text_in.getvalue(), '%sRequest'%(full_name))
msg_out = load_msg_from_string(msg_context, text_out.getvalue(), '%sResponse'%(full_name))
return SrvSpec(msg_in, msg_out, text, full_name)
def load_srv_from_file(msg_context, file_path, full_name):
"""
Convert the .srv representation in the file to a :class:`SrvSpec` instance.
:param msg_context: :class:`MsgContext` instance to load request/response messages into.
:param file_name: name of file to load from, ``str``
:returns: :class:`SrvSpec` instance
:raise: :exc:`InvalidMsgSpec` If syntax errors or other problems are detected in file
"""
log("Load spec from %s %s\n"%(file_path, full_name))
with open(file_path, 'r') as f:
text = f.read()
spec = load_srv_from_string(msg_context, text, full_name)
msg_context.set_file('%sRequest'%(full_name), file_path)
msg_context.set_file('%sResponse'%(full_name), file_path)
return spec
|
geerlingguy/ansible | refs/heads/devel | hacking/build_library/build_ansible/command_plugins/collection_meta.py | 5 | # coding: utf-8
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import os.path
import pathlib
import yaml
from jinja2 import Environment, FileSystemLoader
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes
# Pylint doesn't understand Python3 namespace modules.
from ..change_detection import update_file_if_different # pylint: disable=relative-beyond-top-level
from ..commands import Command # pylint: disable=relative-beyond-top-level
from ..jinja2.filters import documented_type, rst_ify # pylint: disable=relative-beyond-top-level
DEFAULT_TEMPLATE_FILE = 'collections_galaxy_meta.rst.j2'
DEFAULT_TEMPLATE_DIR = pathlib.Path(__file__).parents[4] / 'docs/templates'
def normalize_options(options):
"""Normalize the options to make for easy templating"""
for opt in options:
if isinstance(opt['description'], string_types):
opt['description'] = [opt['description']]
class DocumentCollectionMeta(Command):
name = 'collection-meta'
@classmethod
def init_parser(cls, add_parser):
parser = add_parser(cls.name, description='Generate collection galaxy.yml documentation from shared metadata')
parser.add_argument("-t", "--template-file", action="store", dest="template_file",
default=DEFAULT_TEMPLATE_FILE,
help="Jinja2 template to use for the config")
parser.add_argument("-T", "--template-dir", action="store", dest="template_dir",
default=str(DEFAULT_TEMPLATE_DIR),
help="directory containing Jinja2 templates")
parser.add_argument("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/',
help="Output directory for rst files")
parser.add_argument("collection_defs", metavar="COLLECTION-OPTION-DEFINITIONS.yml", type=str,
help="Source for collection metadata option docs")
@staticmethod
def main(args):
output_dir = os.path.abspath(args.output_dir)
template_file_full_path = os.path.abspath(os.path.join(args.template_dir, args.template_file))
template_file = os.path.basename(template_file_full_path)
template_dir = os.path.dirname(template_file_full_path)
with open(args.collection_defs) as f:
options = yaml.safe_load(f)
normalize_options(options)
env = Environment(loader=FileSystemLoader(template_dir),
variable_start_string="@{",
variable_end_string="}@",
trim_blocks=True)
env.filters['documented_type'] = documented_type
env.filters['rst_ify'] = rst_ify
template = env.get_template(template_file)
output_name = os.path.join(output_dir, template_file.replace('.j2', ''))
temp_vars = {'options': options}
data = to_bytes(template.render(temp_vars))
update_file_if_different(output_name, data)
return 0
|
zakki/openhsp | refs/heads/master | hsp3ll/llvm/utils/lint/common_lint.py | 147 | #!/usr/bin/python
#
# Common lint functions applicable to multiple types of files.
import re
def VerifyLineLength(filename, lines, max_length):
"""Checks to make sure the file has no lines with lines exceeding the length
limit.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
max_length: maximum acceptable line length as number
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
line_num = 1
for line in lines:
length = len(line.rstrip('\n'))
if length > max_length:
lint.append((filename, line_num,
'Line exceeds %d chars (%d)' % (max_length, length)))
line_num += 1
return lint
def VerifyTabs(filename, lines):
"""Checks to make sure the file has no tab characters.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
Returns:
A list of tuples with format [(line_number, msg), ...] with any violations
found.
"""
lint = []
tab_re = re.compile(r'\t')
line_num = 1
for line in lines:
if tab_re.match(line.rstrip('\n')):
lint.append((filename, line_num, 'Tab found instead of whitespace'))
line_num += 1
return lint
def VerifyTrailingWhitespace(filename, lines):
"""Checks to make sure the file has no lines with trailing whitespace.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
trailing_whitespace_re = re.compile(r'\s+$')
line_num = 1
for line in lines:
if trailing_whitespace_re.match(line.rstrip('\n')):
lint.append((filename, line_num, 'Trailing whitespace'))
line_num += 1
return lint
class BaseLint:
def RunOnFile(filename, lines):
raise Exception('RunOnFile() unimplemented')
def RunLintOverAllFiles(linter, filenames):
"""Runs linter over the contents of all files.
Args:
lint: subclass of BaseLint, implementing RunOnFile()
filenames: list of all files whose contents will be linted
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
for filename in filenames:
file = open(filename, 'r')
if not file:
print 'Cound not open %s' % filename
continue
lines = file.readlines()
lint.extend(linter.RunOnFile(filename, lines))
return lint
|
quasiben/bokeh | refs/heads/master | examples/plotting/server/fourier_animated.py | 6 | # You must first run "bokeh serve" to view this example
#
# Example inspired by:
#
# https://www.youtube.com/watch?v=LznjC4Lo7lE
from __future__ import division
from collections import OrderedDict
from math import pi
import numpy as np
from bokeh.client import push_session
from bokeh.driving import repeat
from bokeh.io import vplot
from bokeh.models.sources import ColumnDataSource as CDS
from bokeh.plotting import figure, curdoc
N = 100
newx = x = np.linspace(0, 2*pi, N)
shift = 2.2
base_x = x + shift
period = pi/2
palette = ['#08519c', '#3182bd', '#6baed6', '#bdd7e7']
def new_source():
return dict(curve=CDS(), lines=CDS(), circle_point=CDS(), circleds=CDS())
def create_circle_glyphs(p, color, sources):
p.circle('x', 'y', size=1., line_color=color, color=None, source=sources['circleds'])
p.circle('x', 'y', size=5, line_color=color, color=color, source=sources['circle_point'])
p.line('radius_x', 'radius_y', line_color=color, color=color, alpha=0.5, source=sources['lines'])
def create_plot(foos, title='', r = 1, y_range=None, period = pi/2, cfoos=None):
if y_range is None:
y_range=[-2, 2]
# create new figure
p = figure(title=title, width=800, height=300, x_range=[-2.5, 9], y_range=y_range)
p.xgrid.bounds = (-2, 2)
p.xaxis.bounds = (-2, 2)
_sources = []
cx, cy = 0, 0
for i, foo in enumerate(foos):
sources = new_source()
get_new_sources(x, foo, sources, cfoos[i], cx, cy, i==0)
cp = sources['circle_point'].data
cx, cy = cp['x'][0], cp['y'][0]
if i==0:
# compute the full fourier eq
full_y = sum([foo(x) for foo in foos])
# replace the foo curve with the full fourier eq
sources['curve'] = CDS(dict(x=x, base_x=base_x, y=full_y))
# draw the line
p.line('base_x','y', color="orange", line_width=2, source=sources['curve'],
legend="4sin(x)/pi + 4sin(3x)/3pi + 4sin(5x)/5pi + 4sin(7x)/7pi")
if i==len(foos)-1:
# if it's the last foo let's draw a circle on the head of the curve
sources['floating_point'] = CDS({'x':[shift], 'y': [cy]})
p.line('line_x', 'line_y', color=palette[i], line_width=2, source=sources['lines'])
p.circle('x', 'y', size=10, line_color=palette[i], color=palette[i], source=sources['floating_point'])
# draw the circle, radius and circle point realted to foo domain
create_circle_glyphs(p, palette[i], sources)
_sources.append(sources)
return p, _sources
def get_new_sources(xs, foo, sources, cfoo, cx=0, cy=0, compute_curve = True):
if compute_curve:
ys = foo(xs)
sources['curve'].data = dict(x=xs, base_x=base_x, y=ys)
r = foo(period)
y = foo(xs[0]) + cy
x = cfoo(xs[0]) + cx
sources['lines'].data = {
'line_x': [x, shift], 'line_y': [y, y],
'radius_x': [0, x], 'radius_y': [0, y]
}
sources['circle_point'].data = {'x': [x], 'y': [y], 'r': [r]}
sources['circleds'].data=dict(
x = cx + np.cos(np.linspace(0, 2*pi, N)) * r,
y = cy + np.sin(np.linspace(0, 2*pi, N)) * r,
)
def update_sources(sources, foos, newx, ind, cfoos):
cx, cy = 0, 0
for i, foo in enumerate(foos):
get_new_sources(newx, foo, sources[i], cfoos[i], cx, cy,
compute_curve = i != 0)
if i == 0:
full_y = sum([foo(newx) for foo in foos])
sources[i]['curve'].data = dict(x=newx, base_x=base_x, y=full_y)
cp = sources[i]['circle_point'].data
cx, cy = cp['x'][0], cp['y'][0]
if i == len(foos)-1:
sources[i]['floating_point'].data['x'] = [shift]
sources[i]['floating_point'].data['y'] = [cy]
def update_centric_sources(sources, foos, newx, ind, cfoos):
for i, foo in enumerate(foos):
get_new_sources(newx, foo, sources[i], cfoos[i])
def create_centric_plot(foos, title='', r = 1, y_range=(-2, 2), period = pi/2, cfoos=None):
p = figure(title=title, width=800, height=300, x_range=[-1.5, 10.5], y_range=y_range)
p.xgrid.bounds = (-2, 2)
p.xaxis.bounds = (-2, 2)
_sources = []
for i, foo in enumerate(foos):
sources = new_source()
get_new_sources(x, foo, sources, cfoos[i])
_sources.append(sources)
if i:
legend = "4sin(%(c)sx)/%(c)spi" % {'c': i*2+1}
else:
legend = "4sin(x)/pi"
p.line('base_x','y', color=palette[i], line_width=2, source=sources['curve'])
p.line('line_x', 'line_y', color=palette[i], line_width=2,
source=sources['lines'], legend=legend)
create_circle_glyphs(p, palette[i], sources)
return p, _sources
# create the series partials
f1 = lambda x: (4*np.sin(x))/pi
f2 = lambda x: (4*np.sin(3*x))/(3*pi)
f3 = lambda x: (4*np.sin(5*x))/(5*pi)
f4 = lambda x: (4*np.sin(7*x))/(7*pi)
cf1 = lambda x: (4*np.cos(x))/pi
cf2 = lambda x: (4*np.cos(3*x))/(3*pi)
cf3 = lambda x: (4*np.cos(5*x))/(5*pi)
cf4 = lambda x: (4*np.cos(7*x))/(7*pi)
fourier = OrderedDict(
fourier_4 = {
'f': lambda x: f1(x) + f2(x) + f3(x) + f4(x),
'fs': [f1, f2, f3, f4],
'cfs': [cf1, cf2, cf3, cf4]
},
)
for k, p in fourier.items():
p['plot'], p['sources'] = create_plot(
p['fs'], 'Fourier (Sum of the first 4 Harmonic Circles)', r = p['f'](period), cfoos = p['cfs']
)
for k, p in fourier.items():
p['cplot'], p['csources'] = create_centric_plot(
p['fs'], 'Fourier First 4 Harmonics & Harmonic Circles', r = p['f'](period), cfoos = p['cfs']
)
layout = vplot(*[f['plot'] for f in fourier.values()] + [f['cplot'] for f in fourier.values()])
# open a session to keep our local document in sync with server
session = push_session(curdoc())
@repeat(range(N))
def cb(gind):
global newx
oldx = np.delete(newx, 0)
newx = np.hstack([oldx, [oldx[-1] + 2*pi/N]])
for k, p in fourier.items():
update_sources(p['sources'], p['fs'], newx, gind, p['cfs'])
update_centric_sources(p['csources'], p['fs'], newx, gind, p['cfs'])
curdoc().add_periodic_callback(cb, 100)
session.show(layout) # open the document in a browser
session.loop_until_closed() # run forever
|
mxngyn/django-s3direct | refs/heads/master | example/cat/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
Weihonghao/ECM | refs/heads/master | Vpy34/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/jisfreq.py | 3130 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
|
afdnlw/dnf | refs/heads/master | tests/test_i18n.py | 3 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.pycomp import PY3
from dnf.i18n import fill_exact_width, textwrap_fill
from tests.support import TestCase
from tests.support import mock
import unittest
import dnf.i18n
import sys
UC_TEXT = 'Šířka' # means 'Width' in Czech
UC_TEXT_OSERROR = 'Soubor již existuje' # 'File already exists'
STR_TEXT_OSERROR = 'Soubor již existuje'
@mock.patch('locale.setlocale')
class TestLocale(TestCase):
def test_setup_locale(self, mock_setlocale):
dnf.i18n.setup_locale()
self.assertTrue(2 <= mock_setlocale.call_count <= 3)
class TestStdout(TestCase):
def test_setup_stdout(self):
# No stdout output can be seen when sys.stdout is patched, debug msgs,
# etc. included.
with mock.patch('sys.stdout', spec=('write', 'isatty')):
retval = dnf.i18n.setup_stdout()
self.assertFalse(retval)
with mock.patch('sys.stdout') as mock_stdout:
mock_stdout.encoding = None
retval = dnf.i18n.setup_stdout()
self.assertFalse(retval)
with mock.patch('sys.stdout') as mock_stdout:
mock_stdout.encoding = 'UTF-8'
retval = dnf.i18n.setup_stdout()
self.assertTrue(retval)
with mock.patch('sys.stdout') as mock_stdout:
mock_stdout.encoding = 'ISO-8859-2'
retval = dnf.i18n.setup_stdout()
self.assertFalse(retval)
def test_stream(self):
fileobj = dnf.pycomp.StringIO()
stream = dnf.i18n.UnicodeStream(fileobj, "ISO-8859-2")
stream.write(UC_TEXT)
output = fileobj.getvalue()
self.assertEqual(output, u'\u0160\xed\u0159ka' if PY3 else b'\xa9\xed\xf8ka')
self.assertEqual(len(output), len(UC_TEXT))
class TestInput(TestCase):
@unittest.skipIf(PY3, "builtin input accepts unicode and bytes")
def test_assumption(self):
""" Test that raw_input() always fails on a unicode string with accented
characters. If this is not the case we might not need i18n.input()
as a raw_input() wrapper.
"""
if sys.stdout.isatty():
# Only works when stdout is a terminal (and not captured in some
# way, for instance when nosetests is run without the -s switch).
self.assertRaises(UnicodeEncodeError, raw_input, UC_TEXT)
@unittest.skipIf(PY3, "in python3 there's no conversion in dnf.i18n.input")
@mock.patch('sys.stdout')
@mock.patch('__builtin__.raw_input', lambda x: x)
def test_input(self, stdout):
stdout.encoding = None
s = dnf.i18n.ucd_input(UC_TEXT)
self.assertEqual(s, UC_TEXT.encode('utf8'))
stdout.encoding = 'iso-8859-2'
s = dnf.i18n.ucd_input(UC_TEXT)
self.assertEqual(s, UC_TEXT.encode('iso-8859-2'))
self.assertRaises(TypeError, dnf.i18n.ucd_input, b"string")
class TestConversion(TestCase):
@mock.patch('dnf.i18n._guess_encoding', return_value='utf-8')
def test_ucd(self, _unused):
s = UC_TEXT.encode('utf8')
# the assumption is this string can't be simply converted back to
# unicode:
u = dnf.i18n.ucd(s)
self.assertEqual(u, UC_TEXT)
# test a sample OSError, typically constructed with an error code and a
# utf-8 encoded string:
obj = OSError(17, 'Soubor již existuje')
expected = u"[Errno 17] %s" % UC_TEXT_OSERROR
self.assertEqual(dnf.i18n.ucd(obj), expected)
# ucd() should return unicode unmodified
self.assertEqual(dnf.i18n.ucd(expected), expected)
def test_download_error_unicode(self):
err_map = {"e1": ["x", "y"]}
err = dnf.exceptions.DownloadError(err_map)
self.assertEqual("e1: x\ne1: y", str(err))
self.assertEqual("e1: x\ne1: y", dnf.i18n.ucd(err))
@mock.patch('locale.getpreferredencoding', return_value='ANSI_X3.4-1968')
def test_ucd_acii(self, _unused):
s = UC_TEXT.encode('utf8')
# ascii coding overridden by utf8
u = dnf.i18n.ucd(s)
self.assertEqual(u, UC_TEXT)
@mock.patch('dnf.i18n._guess_encoding', return_value='utf-8')
def test_ucd_skip(self, _unused):
s = UC_TEXT.encode('iso-8859-2')
# not decoded chars are skipped
u = dnf.i18n.ucd(s)
self.assertEqual(u, "ka")
class TestFormatedOutput(TestCase):
def test_fill_exact_width(self):
msg = "message"
pre = "<"
suf = ">"
self.assertEqual("%-*.*s" % (5, 10, msg), fill_exact_width(msg, 5, 10))
self.assertEqual("重uř ", fill_exact_width("重uř", 5, 10))
self.assertEqual("%10.5s" % msg,
fill_exact_width(msg, 10, 5, left=False))
self.assertEqual("%s%.5s%s" % (pre, msg, suf),
fill_exact_width(msg, 0, 5, prefix=pre, suffix=suf))
def test_exact_width(self):
self.assertEqual(dnf.i18n.exact_width("重uř"), 4)
def test_textwrap_fill(self):
msg = "12345 67890"
one_line = textwrap_fill(msg, 12)
self.assertEqual(one_line, "12345 67890")
two_lines = textwrap_fill(msg, 7, subsequent_indent=">>")
self.assertEqual(two_lines,
"12345\n>>67890")
asian_msg = "重重 uř"
self.assertEqual(textwrap_fill(asian_msg, 7), asian_msg)
asian_two_lines = textwrap_fill("重重\nuř", 5, subsequent_indent=">>")
self.assertEqual(asian_two_lines, "重重\n>>uř")
|
Spoken-tutorial/spoken-website | refs/heads/master | events/migrations/0046_auto_20210318_1807.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2021-03-18 12:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0045_auto_20201111_1648'),
]
operations = [
migrations.AlterField(
model_name='academicpaymentstatus',
name='college_type',
field=models.CharField(choices=[('', '-----'), ('Engg', 'Engg'), ('ASC', 'ASC'), ('Polytechnic', 'Polytechnic'), ('University', 'University'), ('School', 'School')], max_length=50),
),
]
|
IptvBrasilGroup/Cleitonleonelcreton.repository | refs/heads/master | plugin.video.iptvbrondemand.PC/mechanize/_gzip.py | 137 | from cStringIO import StringIO
import _response
import _urllib2_fork
# GzipConsumer was taken from Fredrik Lundh's effbot.org-0.1-20041009 library
class GzipConsumer:
def __init__(self, consumer):
self.__consumer = consumer
self.__decoder = None
self.__data = ""
def __getattr__(self, key):
return getattr(self.__consumer, key)
def feed(self, data):
if self.__decoder is None:
# check if we have a full gzip header
data = self.__data + data
try:
i = 10
flag = ord(data[3])
if flag & 4: # extra
x = ord(data[i]) + 256*ord(data[i+1])
i = i + 2 + x
if flag & 8: # filename
while ord(data[i]):
i = i + 1
i = i + 1
if flag & 16: # comment
while ord(data[i]):
i = i + 1
i = i + 1
if flag & 2: # crc
i = i + 2
if len(data) < i:
raise IndexError("not enough data")
if data[:3] != "\x1f\x8b\x08":
raise IOError("invalid gzip data")
data = data[i:]
except IndexError:
self.__data = data
return # need more data
import zlib
self.__data = ""
self.__decoder = zlib.decompressobj(-zlib.MAX_WBITS)
data = self.__decoder.decompress(data)
if data:
self.__consumer.feed(data)
def close(self):
if self.__decoder:
data = self.__decoder.flush()
if data:
self.__consumer.feed(data)
self.__consumer.close()
# --------------------------------------------------------------------
# the rest of this module is John Lee's stupid code, not
# Fredrik's nice code :-)
class stupid_gzip_consumer:
def __init__(self): self.data = []
def feed(self, data): self.data.append(data)
class stupid_gzip_wrapper(_response.closeable_response):
def __init__(self, response):
self._response = response
c = stupid_gzip_consumer()
gzc = GzipConsumer(c)
gzc.feed(response.read())
self.__data = StringIO("".join(c.data))
def read(self, size=-1):
return self.__data.read(size)
def readline(self, size=-1):
return self.__data.readline(size)
def readlines(self, sizehint=-1):
return self.__data.readlines(sizehint)
def __getattr__(self, name):
# delegate unknown methods/attributes
return getattr(self._response, name)
class HTTPGzipProcessor(_urllib2_fork.BaseHandler):
handler_order = 200 # response processing before HTTPEquivProcessor
def http_request(self, request):
request.add_header("Accept-Encoding", "gzip")
return request
def http_response(self, request, response):
# post-process response
enc_hdrs = response.info().getheaders("Content-encoding")
for enc_hdr in enc_hdrs:
if ("gzip" in enc_hdr) or ("compress" in enc_hdr):
return stupid_gzip_wrapper(response)
return response
https_response = http_response
|
jhawkesworth/ansible | refs/heads/devel | test/runner/lib/sanity/validate_modules.py | 14 | """Sanity test using validate-modules."""
from __future__ import absolute_import, print_function
import collections
import json
import os
from lib.sanity import (
SanitySingleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanitySkipped,
)
from lib.util import (
SubprocessError,
display,
run_command,
read_lines_without_comments,
)
from lib.ansible_util import (
ansible_environment,
)
from lib.config import (
SanityConfig,
)
from lib.test import (
calculate_confidence,
calculate_best_confidence,
)
VALIDATE_SKIP_PATH = 'test/sanity/validate-modules/skip.txt'
VALIDATE_IGNORE_PATH = 'test/sanity/validate-modules/ignore.txt'
UNSUPPORTED_PYTHON_VERSIONS = (
'2.6',
'2.7',
)
class ValidateModulesTest(SanitySingleVersion):
"""Sanity test using validate-modules."""
def test(self, args, targets):
"""
:type args: SanityConfig
:type targets: SanityTargets
:rtype: TestResult
"""
if args.python_version in UNSUPPORTED_PYTHON_VERSIONS:
display.warning('Skipping validate-modules on unsupported Python version %s.' % args.python_version)
return SanitySkipped(self.name)
skip_paths = read_lines_without_comments(VALIDATE_SKIP_PATH)
skip_paths_set = set(skip_paths)
env = ansible_environment(args, color=False)
paths = sorted([i.path for i in targets.include if i.module and i.path not in skip_paths_set])
if not paths:
return SanitySkipped(self.name)
cmd = [
args.python_executable,
'test/sanity/validate-modules/validate-modules',
'--format', 'json',
'--arg-spec',
] + paths
invalid_ignores = []
ignore_entries = read_lines_without_comments(VALIDATE_IGNORE_PATH)
ignore = collections.defaultdict(dict)
line = 0
for ignore_entry in ignore_entries:
line += 1
if not ignore_entry:
continue
if ' ' not in ignore_entry:
invalid_ignores.append((line, 'Invalid syntax'))
continue
path, code = ignore_entry.split(' ', 1)
ignore[path][code] = line
if args.base_branch:
cmd.extend([
'--base-branch', args.base_branch,
])
else:
display.warning('Cannot perform module comparison against the base branch. Base branch not detected when running locally.')
try:
stdout, stderr = run_command(args, cmd, env=env, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr or status not in (0, 3):
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
return SanitySuccess(self.name)
messages = json.loads(stdout)
errors = []
for filename in messages:
output = messages[filename]
for item in output['errors']:
errors.append(SanityMessage(
path=filename,
line=int(item['line']) if 'line' in item else 0,
column=int(item['column']) if 'column' in item else 0,
level='error',
code='E%s' % item['code'],
message=item['msg'],
))
filtered = []
for error in errors:
if error.code in ignore[error.path]:
ignore[error.path][error.code] = None # error ignored, clear line number of ignore entry to track usage
else:
filtered.append(error) # error not ignored
errors = filtered
for invalid_ignore in invalid_ignores:
errors.append(SanityMessage(
code='A201',
message=invalid_ignore[1],
path=VALIDATE_IGNORE_PATH,
line=invalid_ignore[0],
column=1,
confidence=calculate_confidence(VALIDATE_IGNORE_PATH, line, args.metadata) if args.metadata.changes else None,
))
line = 0
for path in skip_paths:
line += 1
if not path:
continue
if not os.path.exists(path):
# Keep files out of the list which no longer exist in the repo.
errors.append(SanityMessage(
code='A101',
message='Remove "%s" since it does not exist' % path,
path=VALIDATE_SKIP_PATH,
line=line,
column=1,
confidence=calculate_best_confidence(((VALIDATE_SKIP_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None,
))
for path in paths:
if path not in ignore:
continue
for code in ignore[path]:
line = ignore[path][code]
if not line:
continue
errors.append(SanityMessage(
code='A102',
message='Remove since "%s" passes "%s" test' % (path, code),
path=VALIDATE_IGNORE_PATH,
line=line,
column=1,
confidence=calculate_best_confidence(((VALIDATE_IGNORE_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None,
))
if errors:
return SanityFailure(self.name, messages=errors)
return SanitySuccess(self.name)
|
bkahlert/seqan-research | refs/heads/master | raw/pmsb13/pmsb13-data-20130530/trunk/misc/trac_plugins/IncludeMacro/setup.py | 7 | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
import os.path
from setuptools import setup
setup(
name = 'TracIncludeMacro',
version = '2.1fub3',
packages = ['includemacro'],
author = 'Manuel Holtgrewe',
author_email = '[email protected]',
description = 'Include the contents of external URLs and other Trac objects in a wiki page. Extended version that supports fragments.',
long_description = open(os.path.join(os.path.dirname(__file__), 'README')).read(),
license = 'BSD',
keywords = 'trac 0.11 plugin wiki include macro',
url = 'http://trac-hacks.org/wiki/IncludeMacro',
classifiers = [
'Framework :: Trac',
],
install_requires = ['Trac'],
entry_points = {
'trac.plugins': [
'includemacro.macros = includemacro.macros',
]
}
)
|
jasonharrison/Solus | refs/heads/master | main.py | 1 | import asynchat, asyncore, socket, sys, os, time, subprocess
try:
import config
except ImportError:
print("Please edit config.py.dist - rename it to config.py when you're done")
exit()
class asynchat_bot(asynchat.async_chat):
def __init__(self, host, port):
asynchat.async_chat.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_terminator('\r\n')
self.data = ''
self.remote = (host, port)
self.connect(self.remote)
# set vars
self.label = subprocess.check_output(["git", "describe"]) # get current git hash
self.version = "Solus " + str(self.label) + ". "
self.modules = {}
# stuff to be set on a rehash
self.remotehost = config.remotehost
self.remoteport = config.remoteport
self.protocolname = config.protocolname
self.loglevel = config.loglevel
self.reportchannel = config.reportchannel
# end of stuff to be set on a rehash
self.servername = config.servername
self.serverdesc = config.serverdesc
self.mysid = config.sid
self.debugmode = debugmode
self.firstping = True
self.ignored = []
self.myclients = []
try:
__import__("modules.protocol." + config.protocolname)
self.protocol = sys.modules["modules.protocol." + config.protocolname]
self.protocol.modinit(self)
except ImportError:
print("Error: protocol \"" + config.protocolname + "\" does not exist.")
exit()
# api
def sendLine(self, data):
if self.debugmode:
print("{" + str(time.time()) + "} Send: " + data)
self.push(data + "\r\n")
def modexists(module, modname):
modparts = modname.split(".")
modparts.pop(0)
assert modparts != []
currentpart = ""
for modpart in modparts:
currentpart = currentpart + "." + modpart
if hasattr(module, currentpart):
pass
else:
return False
return True
def load(self, modname):
module = __import__(modname)
if "." in modname:
modparts = modname.split(".")[1:]
for part in modparts:
module = getattr(module, part)
module.modinit(self)
self.modules[modname] = module
def unload(self, modname):
self.modules[modname].moddeinit(self)
del self.modules[modname]
def modreload(self, modname):
reload(self.modules[modname])
def modfullreload(self, modname):
self.modules[modname].moddeinit(self)
reload(self.modules[modname])
self.modules[modname].modinit(self)
def rehash(self):
try:
reload(config)
self.remotehost = config.remotehost
self.remoteport = config.remoteport
self.protocolname = config.protocolname
self.loglevel = config.loglevel
self.reportchannel = config.reportchannel
except Exception, e:
print
"Error: " + str(e)
def sendNotice(self, sender, target, message):
self.protocol.sendNotice(self, sender, target, message)
def sendPrivmsg(self, sender, target, message):
self.protocol.sendPrivmsg(self, sender, target, message)
def log(self, level, data):
if level.lower() in self.loglevel.lower():
if self.myclients == []:
self.sendNotice("server", self.reportchannel, data)
else:
self.sendNotice(self.myclients[0], self.reportchannel, data)
def add_kline(self, kliner, time, user, host, reason):
self.protocol.add_kline(kliner, time, user, host, reason)
def getVersion(self):
version = self.version + self.servername+" "+self.protocolname
return version
def createClient(self, cnick, cuser, chost, cgecos):
c = self.protocol.createClient(self, cnick, cuser, chost, cgecos)
self.myclients.append(c)
return c
def destroyClient(self, client, reason):
self.protocol.destroyClient(self, client, reason)
def joinChannel(self, client, channel):
self.protocol.joinChannel(self, client, channel)
def partChannel(self, client, channel):
self.protocol.partChannel(self, client, channel)
def kill(self, client, killee, reason):
self.protocol.kill(client, killee, reason)
def getUserList(self):
if self.protocolname == "TS6":
return self.uidstore.items()
def find_user(self, client):
if type(client) == str:
return self.protocol.find_user(self, client)
elif type(client) == dict:
return client
def kill_user(self, killer, killed, reason):
self.protocol.kill_user(self, killer, killed, reason)
def getMask(self, client):
if type(client) == str:
client = self.find_user(client)
hostmask = client['nick'] + "!" + client['user'] + "@" + client['host']
return hostmask
# end of api
# begin hooks
def getConnect(self, user):
for modname, module in self.modules.items():
if hasattr(module, "onConnect"):
module.onConnect(self, user)
def getQuit(self, user, reason):
for modname, module in self.modules.items():
if hasattr(module, "onQuit"):
module.onQuit(self, user, reason)
def getPrivmsg(self, user, target, message):
for modname, module in self.modules.items():
if hasattr(module, "onPrivmsg"):
module.onPrivmsg(self, user, target, message)
def getChannelMessage(self, user, channel, message):
for modname, module in self.modules.items():
if hasattr(module, "onChannelPrivmsg"):
module.onChannelPrivmsg(self, user, channel, message)
# end hooks
def handle_connect(self):
self.protocol.handle_connect(self, config)
f = open("modules.conf", "r")
for line in f.read().split("\n"):
if "#" in line or line == "":
pass
else:
self.load(line)
self.startts = time.time()
def handle_error(self):
raise
def get_data(self):
r = self.data
self.data = ''
return r
def collect_incoming_data(self, data):
self.data += data
def found_terminator(self):
data = self.get_data()
if self.debugmode:
print("{" + str(time.time()) + "} Recv: " + data)
self.protocol.handle_data(self, data)
if __name__ == '__main__':
debugmode = False
for arg in sys.argv[1:]:
if " " not in arg and "python" not in arg and "main.py" not in arg:
if arg == "-d":
debugmode = True
print "Starting in debug mode."
else:
print
"Unknown argument: " + arg + " - ignoring"
print("Solus started. PID: " + str(os.getpid()))
asynchat_bot(config.remotehost, int(config.remoteport))
asyncore.loop()
|
mach6/selenium | refs/heads/master | py/selenium/webdriver/opera/webdriver.py | 11 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import warnings
from selenium.webdriver.chrome.webdriver import WebDriver as ChromiumDriver
from .options import Options
class OperaDriver(ChromiumDriver):
"""Controls the new OperaDriver and allows you
to drive the Opera browser based on Chromium."""
def __init__(self, executable_path=None, port=0,
options=None, service_args=None,
desired_capabilities=None, service_log_path=None,
opera_options=None):
"""
Creates a new instance of the operadriver.
Starts the service and then creates new instance of operadriver.
:Args:
- executable_path - path to the executable. If the default is used
it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0,
a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- options: this takes an instance of ChromeOptions
"""
if opera_options:
warnings.warn('use options instead of opera_options', DeprecationWarning)
options = opera_options
executable_path = (executable_path if executable_path is not None
else "operadriver")
ChromiumDriver.__init__(self,
executable_path=executable_path,
port=port,
options=options,
service_args=service_args,
desired_capabilities=desired_capabilities,
service_log_path=service_log_path)
def create_options(self):
return Options()
class WebDriver(OperaDriver):
class ServiceType:
CHROMIUM = 2
def __init__(self,
desired_capabilities=None,
executable_path=None,
port=0,
service_log_path=None,
service_args=None,
options=None):
OperaDriver.__init__(self, executable_path=executable_path,
port=port, options=options,
service_args=service_args,
desired_capabilities=desired_capabilities,
service_log_path=service_log_path)
|
bonitadecker77/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/test/sample_doctest.py | 203 | """This is a sample module that doesn't really test anything all that
interesting.
It simply has a few tests, some of which succeed and some of which fail.
It's important that the numbers remain constant as another test is
testing the running of these tests.
>>> 2+2
4
"""
def foo():
"""
>>> 2+2
5
>>> 2+2
4
"""
def bar():
"""
>>> 2+2
4
"""
def test_silly_setup():
"""
>>> import test.test_doctest
>>> test.test_doctest.sillySetup
True
"""
def w_blank():
"""
>>> if 1:
... print('a')
... print()
... print('b')
a
<BLANKLINE>
b
"""
x = 1
def x_is_one():
"""
>>> x
1
"""
def y_is_one():
"""
>>> y
1
"""
__test__ = {'good': """
>>> 42
42
""",
'bad': """
>>> 42
666
""",
}
def test_suite():
import doctest
return doctest.DocTestSuite()
|
dexterx17/nodoSocket | refs/heads/master | clients/Python-2.7.6/Lib/test/test_defaultdict.py | 108 | """Unit tests for collections.defaultdict."""
import os
import copy
import tempfile
import unittest
from test import test_support
from collections import defaultdict
def foobar():
return list
class TestDefaultDict(unittest.TestCase):
def test_basic(self):
d1 = defaultdict()
self.assertEqual(d1.default_factory, None)
d1.default_factory = list
d1[12].append(42)
self.assertEqual(d1, {12: [42]})
d1[12].append(24)
self.assertEqual(d1, {12: [42, 24]})
d1[13]
d1[14]
self.assertEqual(d1, {12: [42, 24], 13: [], 14: []})
self.assertTrue(d1[12] is not d1[13] is not d1[14])
d2 = defaultdict(list, foo=1, bar=2)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, {"foo": 1, "bar": 2})
self.assertEqual(d2["foo"], 1)
self.assertEqual(d2["bar"], 2)
self.assertEqual(d2[42], [])
self.assertIn("foo", d2)
self.assertIn("foo", d2.keys())
self.assertIn("bar", d2)
self.assertIn("bar", d2.keys())
self.assertIn(42, d2)
self.assertIn(42, d2.keys())
self.assertNotIn(12, d2)
self.assertNotIn(12, d2.keys())
d2.default_factory = None
self.assertEqual(d2.default_factory, None)
try:
d2[15]
except KeyError, err:
self.assertEqual(err.args, (15,))
else:
self.fail("d2[15] didn't raise KeyError")
self.assertRaises(TypeError, defaultdict, 1)
def test_missing(self):
d1 = defaultdict()
self.assertRaises(KeyError, d1.__missing__, 42)
d1.default_factory = list
self.assertEqual(d1.__missing__(42), [])
def test_repr(self):
d1 = defaultdict()
self.assertEqual(d1.default_factory, None)
self.assertEqual(repr(d1), "defaultdict(None, {})")
self.assertEqual(eval(repr(d1)), d1)
d1[11] = 41
self.assertEqual(repr(d1), "defaultdict(None, {11: 41})")
d2 = defaultdict(int)
self.assertEqual(d2.default_factory, int)
d2[12] = 42
self.assertEqual(repr(d2), "defaultdict(<type 'int'>, {12: 42})")
def foo(): return 43
d3 = defaultdict(foo)
self.assertTrue(d3.default_factory is foo)
d3[13]
self.assertEqual(repr(d3), "defaultdict(%s, {13: 43})" % repr(foo))
def test_print(self):
d1 = defaultdict()
def foo(): return 42
d2 = defaultdict(foo, {1: 2})
# NOTE: We can't use tempfile.[Named]TemporaryFile since this
# code must exercise the tp_print C code, which only gets
# invoked for *real* files.
tfn = tempfile.mktemp()
try:
f = open(tfn, "w+")
try:
print >>f, d1
print >>f, d2
f.seek(0)
self.assertEqual(f.readline(), repr(d1) + "\n")
self.assertEqual(f.readline(), repr(d2) + "\n")
finally:
f.close()
finally:
os.remove(tfn)
def test_copy(self):
d1 = defaultdict()
d2 = d1.copy()
self.assertEqual(type(d2), defaultdict)
self.assertEqual(d2.default_factory, None)
self.assertEqual(d2, {})
d1.default_factory = list
d3 = d1.copy()
self.assertEqual(type(d3), defaultdict)
self.assertEqual(d3.default_factory, list)
self.assertEqual(d3, {})
d1[42]
d4 = d1.copy()
self.assertEqual(type(d4), defaultdict)
self.assertEqual(d4.default_factory, list)
self.assertEqual(d4, {42: []})
d4[12]
self.assertEqual(d4, {42: [], 12: []})
# Issue 6637: Copy fails for empty default dict
d = defaultdict()
d['a'] = 42
e = d.copy()
self.assertEqual(e['a'], 42)
def test_shallow_copy(self):
d1 = defaultdict(foobar, {1: 1})
d2 = copy.copy(d1)
self.assertEqual(d2.default_factory, foobar)
self.assertEqual(d2, d1)
d1.default_factory = list
d2 = copy.copy(d1)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, d1)
def test_deep_copy(self):
d1 = defaultdict(foobar, {1: [1]})
d2 = copy.deepcopy(d1)
self.assertEqual(d2.default_factory, foobar)
self.assertEqual(d2, d1)
self.assertTrue(d1[1] is not d2[1])
d1.default_factory = list
d2 = copy.deepcopy(d1)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, d1)
def test_keyerror_without_factory(self):
d1 = defaultdict()
try:
d1[(1,)]
except KeyError, err:
self.assertEqual(err.args[0], (1,))
else:
self.fail("expected KeyError")
def test_recursive_repr(self):
# Issue2045: stack overflow when default_factory is a bound method
class sub(defaultdict):
def __init__(self):
self.default_factory = self._factory
def _factory(self):
return []
d = sub()
self.assertTrue(repr(d).startswith(
"defaultdict(<bound method sub._factory of defaultdict(..."))
# NOTE: printing a subclass of a builtin type does not call its
# tp_print slot. So this part is essentially the same test as above.
tfn = tempfile.mktemp()
try:
f = open(tfn, "w+")
try:
print >>f, d
finally:
f.close()
finally:
os.remove(tfn)
def test_callable_arg(self):
self.assertRaises(TypeError, defaultdict, {})
def test_main():
test_support.run_unittest(TestDefaultDict)
if __name__ == "__main__":
test_main()
|
eustislab/horton | refs/heads/master | data/examples/hamiltonian/even_tempered_li.py | 1 | #!/usr/bin/env python
import numpy as np
from horton import *
# specify the even tempered basis set
alpha_low = 5e-3
alpha_high = 5e2
nbasis = 30
lnratio = (np.log(alpha_high) - np.log(alpha_low))/(nbasis-1)
# build a list of "contractions". These aren't real contractions as every
# contraction only contains one basis function.
bcs = []
for ibasis in xrange(nbasis):
alpha = alpha_low**lnratio
# arguments of GOBasisContraction:
# shell_type, list of exponents, list of contraction coefficients
bcs.append(GOBasisContraction(0, np.array([alpha]), np.array([1.0])))
# Finish setting up the basis set:
ba = GOBasisAtom(bcs)
obasis = get_gobasis(np.array([[0.0, 0.0, 0.0]]), np.array([3]), default=ba)
|
infoelliex/addons-yelizariev | refs/heads/8.0 | gamification_extra/__init__.py | 16 | import gamification_extra_models
|
tinloaf/home-assistant | refs/heads/dev | homeassistant/components/notify/sendgrid.py | 6 | """
SendGrid notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.sendgrid/
"""
import logging
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_TITLE_DEFAULT, PLATFORM_SCHEMA, BaseNotificationService)
from homeassistant.const import (
CONF_API_KEY, CONF_SENDER, CONF_RECIPIENT, CONTENT_TYPE_TEXT_PLAIN)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['sendgrid==5.6.0']
_LOGGER = logging.getLogger(__name__)
# pylint: disable=no-value-for-parameter
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_SENDER): vol.Email(),
vol.Required(CONF_RECIPIENT): vol.Email(),
})
def get_service(hass, config, discovery_info=None):
"""Get the SendGrid notification service."""
api_key = config.get(CONF_API_KEY)
sender = config.get(CONF_SENDER)
recipient = config.get(CONF_RECIPIENT)
return SendgridNotificationService(api_key, sender, recipient)
class SendgridNotificationService(BaseNotificationService):
"""Implementation the notification service for email via Sendgrid."""
def __init__(self, api_key, sender, recipient):
"""Initialize the service."""
from sendgrid import SendGridAPIClient
self.api_key = api_key
self.sender = sender
self.recipient = recipient
self._sg = SendGridAPIClient(apikey=self.api_key)
def send_message(self, message='', **kwargs):
"""Send an email to a user via SendGrid."""
subject = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = {
"personalizations": [
{
"to": [
{
"email": self.recipient
}
],
"subject": subject
}
],
"from": {
"email": self.sender
},
"content": [
{
"type": CONTENT_TYPE_TEXT_PLAIN,
"value": message
}
]
}
response = self._sg.client.mail.send.post(request_body=data)
if response.status_code != 202:
_LOGGER.error("Unable to send notification")
|
jmartinm/invenio-workflows | refs/heads/master | invenio_workflows/tasks/sample_tasks.py | 5 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Collection of tasks used for tests."""
import time
from functools import wraps
def add_data(data_param):
"""Add data_param to obj.data."""
@wraps(add_data)
def _add_data(obj, eng):
# due to python 2 way of managing closure
data = data_param
obj.data += data
return _add_data
def generate_error(obj, eng):
"""Generate a ZeroDevisionError."""
call_a()
def call_a():
"""Used in order to test deep stack trace output."""
call_b()
def call_b():
"""Used in order to test deep stack trace output."""
call_c()
def call_c():
"""Used in order to test deep stack trace output."""
raise ZeroDivisionError
def halt_if_data_less_than(threshold):
"""Static task to halt if data is lesser than threshold.
Halt workflow execution for this object if its value is less than given
threshold.
"""
@wraps(halt_if_data_less_than)
def _halt_if_data_less_than(obj, eng):
if obj.data < threshold:
eng.halt("Value of data is too small.")
return _halt_if_data_less_than
def set_data(data):
"""Task using closure to allow parameters and change data."""
@wraps(set_data)
def _set_data(obj, eng):
obj.data = data
return _set_data
def reduce_data_by_one(times):
"""Task to substract one to data."""
@wraps(reduce_data_by_one)
def _reduce_data_by_one(obj, eng):
a = times
while a > 0:
obj.data -= 1
a -= 1
return _reduce_data_by_one
def add_metadata():
"""Task to add metadata."""
@wraps(add_metadata)
def _add_metadata(obj, eng):
if obj['content_type'] == 'book':
obj.add_field("meta1", "elefant")
else:
obj.add_field("meta1", "hippo")
return _add_metadata
def task_b(obj, eng):
"""Function task_b docstring."""
if obj.data < 20:
eng.log.info("data < 20")
obj.add_task_result("task_b", {'a': 12, 'b': 13, 'c': 14})
def sleep_task(t):
"""Task to wait t seconds."""
@wraps(sleep_task)
def _sleep_task(dummy_obj, eng):
time.sleep(t)
return _sleep_task
def lower_than_20(obj, eng):
"""Function checks if variable is lower than 20."""
if obj.data < 20:
eng.halt("Value of filed: a in object is lower than 20.")
def halt_if_higher_than_20(obj, eng):
"""Function checks if variable is higher than 20."""
if obj.data > 20:
eng.halt("Value of filed: a in object is higher than 20.")
def subtract(value):
"""Function subtract value from variable."""
@wraps(subtract)
def _subtract(obj, dummy_eng):
obj.data -= value
return _subtract
def halt_whatever(obj, eng):
"""Task to stop processing in halted status."""
eng.halt("halt!", None)
def task_reduce_and_halt(obj, eng):
"""Task to substract one to data and stop."""
if obj.data > 0:
obj.data -= 1
obj.save()
eng.halt("test halt")
else:
return None
|
harveywwu/vnpy | refs/heads/master | vnpy/trader/app/dataRecorder/drEngine.py | 1 | # encoding: UTF-8
'''
本文件中实现了行情数据记录引擎,用于汇总TICK数据,并生成K线插入数据库。
使用DR_setting.json来配置需要收集的合约,以及主力合约代码。
'''
import json
import csv
import os
import copy
import traceback
from collections import OrderedDict
from datetime import datetime, timedelta
from Queue import Queue, Empty
from threading import Thread
from pymongo.errors import DuplicateKeyError
from vnpy.event import Event
from vnpy.trader.vtEvent import *
from vnpy.trader.vtFunction import todayDate, getJsonPath
from vnpy.trader.vtObject import VtSubscribeReq, VtLogData, VtBarData, VtTickData
from vnpy.trader.app.ctaStrategy.ctaTemplate import BarGenerator
from .drBase import *
from .language import text
########################################################################
class DrEngine(object):
"""数据记录引擎"""
settingFileName = 'DR_setting.json'
settingFilePath = getJsonPath(settingFileName, __file__)
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 当前日期
self.today = todayDate()
# 主力合约代码映射字典,key为具体的合约代码(如IF1604),value为主力合约代码(如IF0000)
self.activeSymbolDict = {}
# Tick对象字典
self.tickSymbolSet = set()
# K线合成器字典
self.bgDict = {}
# 配置字典
self.settingDict = OrderedDict()
# 负责执行数据库插入的单独线程相关
self.active = False # 工作状态
self.queue = Queue() # 队列
self.thread = Thread(target=self.run) # 线程
# 载入设置,订阅行情
self.loadSetting()
# 启动数据插入线程
self.start()
# 注册事件监听
self.registerEvent()
#----------------------------------------------------------------------
def loadSetting(self):
"""加载配置"""
with open(self.settingFilePath) as f:
drSetting = json.load(f)
# 如果working设为False则不启动行情记录功能
working = drSetting['working']
if not working:
return
# Tick记录配置
if 'tick' in drSetting:
l = drSetting['tick']
for setting in l:
symbol = setting[0]
gateway = setting[1]
vtSymbol = symbol
req = VtSubscribeReq()
req.symbol = setting[0]
# 针对LTS和IB接口,订阅行情需要交易所代码
if len(setting)>=3:
req.exchange = setting[2]
vtSymbol = '.'.join([symbol, req.exchange])
# 针对IB接口,订阅行情需要货币和产品类型
if len(setting)>=5:
req.currency = setting[3]
req.productClass = setting[4]
self.mainEngine.subscribe(req, gateway)
#tick = VtTickData() # 该tick实例可以用于缓存部分数据(目前未使用)
#self.tickDict[vtSymbol] = tick
self.tickSymbolSet.add(vtSymbol)
# 保存到配置字典中
if vtSymbol not in self.settingDict:
d = {
'symbol': symbol,
'gateway': gateway,
'tick': True
}
self.settingDict[vtSymbol] = d
else:
d = self.settingDict[vtSymbol]
d['tick'] = True
# 分钟线记录配置
if 'bar' in drSetting:
l = drSetting['bar']
for setting in l:
symbol = setting[0]
gateway = setting[1]
vtSymbol = symbol
req = VtSubscribeReq()
req.symbol = symbol
if len(setting)>=3:
req.exchange = setting[2]
vtSymbol = '.'.join([symbol, req.exchange])
if len(setting)>=5:
req.currency = setting[3]
req.productClass = setting[4]
self.mainEngine.subscribe(req, gateway)
# 保存到配置字典中
if vtSymbol not in self.settingDict:
d = {
'symbol': symbol,
'gateway': gateway,
'bar': True
}
self.settingDict[vtSymbol] = d
else:
d = self.settingDict[vtSymbol]
d['bar'] = True
# 创建BarManager对象
self.bgDict[vtSymbol] = BarGenerator(self.onBar)
# 主力合约记录配置
if 'active' in drSetting:
d = drSetting['active']
self.activeSymbolDict = {vtSymbol:activeSymbol for activeSymbol, vtSymbol in d.items()}
#----------------------------------------------------------------------
def getSetting(self):
"""获取配置"""
return self.settingDict, self.activeSymbolDict
#----------------------------------------------------------------------
def procecssTickEvent(self, event):
"""处理行情事件"""
tick = event.dict_['data']
vtSymbol = tick.vtSymbol
# 生成datetime对象
if not tick.datetime:
tick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S.%f')
self.onTick(tick)
bm = self.bgDict.get(vtSymbol, None)
if bm:
bm.updateTick(tick)
#----------------------------------------------------------------------
def onTick(self, tick):
"""Tick更新"""
vtSymbol = tick.vtSymbol
if vtSymbol in self.tickSymbolSet:
self.insertData(TICK_DB_NAME, vtSymbol, tick)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(TICK_DB_NAME, activeSymbol, tick)
self.writeDrLog(text.TICK_LOGGING_MESSAGE.format(symbol=tick.vtSymbol,
time=tick.time,
last=tick.lastPrice,
bid=tick.bidPrice1,
ask=tick.askPrice1))
#----------------------------------------------------------------------
def onBar(self, bar):
"""分钟线更新"""
vtSymbol = bar.vtSymbol
self.insertData(MINUTE_DB_NAME, vtSymbol, bar)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(MINUTE_DB_NAME, activeSymbol, bar)
self.writeDrLog(text.BAR_LOGGING_MESSAGE.format(symbol=bar.vtSymbol,
time=bar.time,
open=bar.open,
high=bar.high,
low=bar.low,
close=bar.close))
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_TICK, self.procecssTickEvent)
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""插入数据到数据库(这里的data可以是VtTickData或者VtBarData)"""
self.queue.put((dbName, collectionName, data.__dict__))
#----------------------------------------------------------------------
def run(self):
"""运行插入线程"""
while self.active:
try:
dbName, collectionName, d = self.queue.get(block=True, timeout=1)
# 这里采用MongoDB的update模式更新数据,在记录tick数据时会由于查询
# 过于频繁,导致CPU占用和硬盘读写过高后系统卡死,因此不建议使用
#flt = {'datetime': d['datetime']}
#self.mainEngine.dbUpdate(dbName, collectionName, d, flt, True)
# 使用insert模式更新数据,可能存在时间戳重复的情况,需要用户自行清洗
try:
self.mainEngine.dbInsert(dbName, collectionName, d)
except DuplicateKeyError:
self.writeDrLog(u'键值重复插入失败,报错信息:%s' %traceback.format_exc())
except Empty:
pass
#----------------------------------------------------------------------
def start(self):
"""启动"""
self.active = True
self.thread.start()
#----------------------------------------------------------------------
def stop(self):
"""退出"""
if self.active:
self.active = False
self.thread.join()
#----------------------------------------------------------------------
def writeDrLog(self, content):
"""快速发出日志事件"""
log = VtLogData()
log.logContent = content
event = Event(type_=EVENT_DATARECORDER_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
|
cedi4155476/QGIS | refs/heads/master | python/plugins/processing/algs/gdal/tri.py | 11 | # -*- coding: utf-8 -*-
"""
***************************************************************************
tri.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputRaster
from processing.algs.gdal.GdalUtils import GdalUtils
class tri(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
COMPUTE_EDGES = 'COMPUTE_EDGES'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('TRI (Terrain Ruggedness Index)')
self.group, self.i18n_group = self.trAlgorithm('[GDAL] Analysis')
self.addParameter(ParameterRaster(self.INPUT, self.tr('Input layer')))
self.addParameter(ParameterNumber(self.BAND,
self.tr('Band number'), 1, 99, 1))
self.addParameter(ParameterBoolean(self.COMPUTE_EDGES,
self.tr('Compute edges'), False))
self.addOutput(OutputRaster(self.OUTPUT, self.tr('Terrain Ruggedness Index')))
def getConsoleCommands(self):
arguments = ['TRI']
arguments.append(unicode(self.getParameterValue(self.INPUT)))
arguments.append(unicode(self.getOutputValue(self.OUTPUT)))
arguments.append('-b')
arguments.append(unicode(self.getParameterValue(self.BAND)))
if self.getParameterValue(self.COMPUTE_EDGES):
arguments.append('-compute_edges')
return ['gdaldem', GdalUtils.escapeAndJoin(arguments)]
|
flying-circus/asq | refs/heads/master | asq/queryables.py | 5 | '''Classes which support the Queryable interface.'''
# Copyright (c) 2011 Robert Smallshire.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = 'Robert Smallshire'
import heapq
import itertools
import operator
from asq.selectors import make_selector
from .selectors import identity
from .extension import extend
from ._types import (is_iterable, is_type)
from ._portability import (imap, ifilter, irange, izip, izip_longest,
fold, is_callable, OrderedDict, has_unicode_type,
itervalues, iteritems, totally_ordered)
# A sentinel singleton used to identify default argument values.
default = object()
class OutOfRangeError(ValueError):
'''A subclass of ValueError for signalling out of range values.'''
pass
class Queryable(object):
'''Queries over iterables executed serially.
Queryable objects are constructed from iterables.
'''
def __init__(self, iterable):
'''Construct a Queryable from any iterable.
Args:
iterable: Any object supporting the iterator protocol.
Raises:
TypeError: if iterable does not support the iterator protocol.
'''
if not is_iterable(iterable):
raise TypeError("Cannot construct Queryable from non-iterable {0}"
.format(str(type(iterable))[7: -2]))
self._iterable = iterable
def __iter__(self):
'''Support for the iterator protocol.
Allows Queryable instances to be used anywhere an iterable is required.
Returns:
An iterator over the values in the query result.
Raises:
ValueError: If the Queryable has been closed().
'''
if self.closed():
raise ValueError("Attempt to use closed() Queryable")
return self._iter()
def _iter(self):
'''Return an unsorted iterator over the iterable.
Useful in subclasses to obtain a raw iterator over the iterable where
__iter__ has been overridden.
'''
return iter(self._iterable)
def _create(self, iterable):
'''Create a Queryable using the the supplied iterable.
This method exists to allow it to be overridden by subclasses of
Queryable.
Args:
iterable: An iterable.
Returns:
A Queryable constructed using the supplied iterable.
Raises:
TypeError: If the argument is not in fact iterable.
'''
return Queryable(iterable)
def _create_ordered(self, iterable, direction, func):
'''Create an ordered iterable using the supplied iterable.
This method exists to allow it to be overridden by subclasses of
Queryable.
Args:
iterable: The iterable sequence to be ordered.
order: +1 for ascending, -1 for descending.
func: The function to select the sorting key.
'''
return OrderedQueryable(iterable, direction, func)
def __enter__(self):
'''Support for the context manager protocol.'''
return self
def __exit__(self, type, value, traceback):
'''Support for the context manager protocol.
Ensures that close() is called on the Queryable.
'''
self.close()
return False
def closed(self):
'''Determine whether the Queryable has been closed.
Returns:
True if closed, otherwise False.
'''
return self._iterable is None
def close(self):
'''Closes the queryable.
The Queryable should not be used following a call to close. This method
is idempotent. Other calls to a Queryable following close() will raise
ValueError.
'''
self._iterable = None
def select(self, selector):
'''Transforms each element of a sequence into a new form.
Each element of the source is transformed through a selector function
to produce a corresponding element in teh result sequence.
If the selector is identity the method will return self.
Note: This method uses deferred execution.
Args:
selector: A unary function mapping a value in the source sequence
to the corresponding value in the generated generated sequence.
The single positional argument to the selector function is the
element value. The return value of the selector function
should be the corresponding element of the result sequence.
Returns:
A Queryable over generated sequence whose elements are the result
of invoking the selector function on each element of the source
sequence.
Raises:
ValueError: If this Queryable has been closed.
TypeError: If selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call select() on a closed Queryable.")
try:
selector = make_selector(selector)
except ValueError:
raise TypeError("select() parameter selector={selector} cannot be"
"converted into a callable "
"selector".format(selector=repr(selector)))
if selector is identity:
return self
return self._create(imap(selector, self))
def select_with_index(self, selector=lambda index, element: (index,
element)):
'''Transforms each element of a sequence into a new form, incorporating
the index of the element.
Each element is transformed through a selector function which accepts
the element value and its zero-based index in the source sequence. The
generated sequence is lazily evaluated.
Note: This method uses deferred execution.
Args:
selector: A binary function mapping the index of a value in
the source sequence and the element value itself to the
corresponding value in the generated sequence. The two
positional arguments of the selector function are the zero-
based index of the current element and the value of the current
element. The return value should be the corresponding value in
the result sequence. The default selector produces a 2-tuple
containing the index and the element giving this function
similar behaviour to the built-in enumerate().
Returns:
A Queryable whose elements are the result of invoking the selector
function on each element of the source sequence
Raises:
ValueError: If this Queryable has been closed.
TypeError: If selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call select_with_index() on a "
"closed Queryable.")
if not is_callable(selector):
raise TypeError("select_with_index() parameter selector={0} is "
"not callable".format(repr(selector)))
return self._create(itertools.starmap(selector, enumerate(iter(self))))
def select_many(self, collection_selector=identity,
result_selector=identity):
'''Projects each element of a sequence to an intermediate new sequence,
flattens the resulting sequences into one sequence and optionally
transforms the flattened sequence using a selector function.
Note: This method uses deferred execution.
Args:
collection_selector: A unary function mapping each element of the
source iterable into an intermediate sequence. The single
argument of the collection_selector is the value of an element
from the source sequence. The return value should be an
iterable derived from that element value. The default
collection_selector, which is the identity function, assumes
that each element of the source sequence is itself iterable.
result_selector: An optional unary function mapping the elements in
the flattened intermediate sequence to corresponding elements
of the result sequence. The single argument of the
result_selector is the value of an element from the flattened
intermediate sequence. The return value should be the
corresponding value in the result sequence. The default
result_selector is the identity function.
Returns:
A Queryable over a generated sequence whose elements are the result
of applying the one-to-many collection_selector to each element of
the source sequence, concatenating the results into an intermediate
sequence, and then mapping each of those elements through the
result_selector into the result sequence.
Raises:
ValueError: If this Queryable has been closed.
TypeError: If either collection_selector or result_selector are not
callable.
'''
if self.closed():
raise ValueError("Attempt to call select_many() on a closed "
"Queryable.")
if not is_callable(collection_selector):
raise TypeError("select_many() parameter projector={0} is not "
"callable".format(repr(collection_selector)))
if not is_callable(result_selector):
raise TypeError("select_many() parameter selector={selector} is "
" not callable".format(selector=repr(result_selector)))
sequences = self.select(collection_selector)
chained_sequence = itertools.chain.from_iterable(sequences)
return self._create(chained_sequence).select(result_selector)
def select_many_with_index(self,
collection_selector=lambda index, source_element: (index,
source_element),
result_selector=lambda source_element,
collection_element: collection_element):
'''Projects each element of a sequence to an intermediate new sequence,
incorporating the index of the element, flattens the resulting sequence
into one sequence and optionally transforms the flattened sequence
using a selector function.
Note: This method uses deferred execution.
Args:
collection_selector: A binary function mapping each element of the
source sequence into an intermediate sequence, by incorporating
its index in the source sequence. The two positional arguments
to the function are the zero-based index of the source element
and the value of the element. The result of the function
should be an iterable derived from the index and element value.
If no collection_selector is provided, the elements of the
intermediate sequence will consist of tuples of (index,
element) from the source sequence.
result_selector:
An optional binary function mapping the elements in the
flattened intermediate sequence together with their
corresponding source elements to elements of the result
sequence. The two positional arguments of the result_selector
are, first the source element corresponding to an element from
the intermediate sequence, and second the actual element from
the intermediate sequence. The return value should be the
corresponding value in the result sequence. If no
result_selector function is provided, the elements of the
flattened intermediate sequence are returned untransformed.
Returns:
A Queryable over a generated sequence whose elements are the result
of applying the one-to-many collection_selector to each element of
the source sequence which incorporates both the index and value of
the source element, concatenating the results into an intermediate
sequence, and then mapping each of those elements through the
result_selector into the result sequence.
Raises:
ValueError: If this Queryable has been closed.
TypeError: If projector [and selector] are not callable.
'''
if self.closed():
raise ValueError("Attempt to call select_many_with_index() on a "
"closed Queryable.")
if not is_callable(collection_selector):
raise TypeError("select_many_with_correspondence() parameter "
"projector={0} is not callable".format(repr(collection_selector)))
if not is_callable(result_selector):
raise TypeError("select_many_with_correspondence() parameter "
"selector={0} is not callable".format(repr(result_selector)))
return self._create(
self._generate_select_many_with_index(collection_selector,
result_selector))
def _generate_select_many_with_index(self, collection_selector,
result_selector):
for index, source_element in enumerate(self):
collection = collection_selector(index, source_element)
for collection_element in collection:
value = result_selector(source_element, collection_element)
yield value
def select_many_with_correspondence(self, collection_selector=identity,
result_selector=lambda source_element,
collection_element: (source_element,
collection_element)):
'''Projects each element of a sequence to an intermediate new sequence,
and flattens the resulting sequence, into one sequence and uses a
selector function to incorporate the corresponding source for each item
in the result sequence.
Note: This method uses deferred execution.
Args:
collection_selector: A unary function mapping each element of the
source iterable into an intermediate sequence. The single
argument of the collection_selector is the value of an element
from the source sequence. The return value should be an
iterable derived from that element value. The default
collection_selector, which is the identity function, assumes
that each element of the source sequence is itself iterable.
result_selector:
An optional binary function mapping the elements in the
flattened intermediate sequence together with their
corresponding source elements to elements of the result
sequence. The two positional arguments of the result_selector
are, first the source element corresponding to an element from
the intermediate sequence, and second the actual element from
the intermediate sequence. The return value should be the
corresponding value in the result sequence. If no
result_selector function is provided, the elements of the
result sequence are 2-tuple pairs of the form (source_element,
intermediate_element).
Returns:
A Queryable over a generated sequence whose elements are the result
of applying the one-to-many collection_selector to each element of
the source sequence, concatenating the results into an intermediate
sequence, and then mapping each of those elements through the
result_selector which incorporates the corresponding source element
into the result sequence.
Raises:
ValueError: If this Queryable has been closed.
TypeError: If projector or selector are not callable.
'''
if self.closed():
raise ValueError("Attempt to call "
"select_many_with_correspondence() on a closed Queryable.")
if not is_callable(collection_selector):
raise TypeError("select_many_with_correspondence() parameter "
"projector={0} is not callable".format(repr(collection_selector)))
if not is_callable(result_selector):
raise TypeError("select_many_with_correspondence() parameter "
"selector={0} is not callable".format(repr(result_selector)))
return self._create(
self._generate_select_many_with_correspondence(collection_selector,
result_selector))
def _generate_select_many_with_correspondence(self, collection_selector,
result_selector):
for source_element in self:
intermediate_sequence = collection_selector(source_element)
for intermediate_item in intermediate_sequence:
value = result_selector(source_element, intermediate_item)
yield value
def group_by(self, key_selector=identity,
element_selector=identity,
result_selector=lambda key, grouping: grouping):
'''Groups the elements according to the value of a key extracted by a
selector function.
Note: This method has different behaviour to itertools.groupby in the
Python standard library because it aggregates all items with the
same key, rather than returning groups of consecutive items of the
same key.
Note: This method uses deferred execution, but consumption of a single
result will lead to evaluation of the whole source sequence.
Args:
key_selector: An optional unary function used to extract a key from
each element in the source sequence. The default is the
identity function.
element_selector: A optional unary function to map elements in the
source sequence to elements in a resulting Grouping. The
default is the identity function.
result_selector: An optional binary function to create a result
from each group. The first positional argument is the key
identifying the group. The second argument is a Grouping object
containing the members of the group. The default is a function
which simply returns the Grouping.
Returns:
A Queryable sequence of elements of the where each element
represents a group. If the default result_selector is relied upon
this is a Grouping object.
Raises:
ValueError: If the Queryable is closed().
TypeError: If key_selector is not callable.
TypeError: If element_selector is not callable.
TypeError: If result_selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call select_with_index() on a closed "
"Queryable.")
if not is_callable(key_selector):
raise TypeError("group_by() parameter key_selector={0} is not "
"callable".format(repr(key_selector)))
if not is_callable(element_selector):
raise TypeError("group_by() parameter element_selector={0} is not "
"callable".format(repr(element_selector)))
if not is_callable(result_selector):
raise TypeError("group_by() parameter result_selector={0} is not "
"callable".format(repr(result_selector)))
return self._create(self._generate_group_by_result(key_selector,
element_selector, result_selector))
def _generate_group_by_result(self, key_selector, element_selector,
result_selector):
lookup = self.to_lookup(key_selector, element_selector)
for grouping in lookup:
yield result_selector(grouping.key, grouping)
def where(self, predicate):
'''Filters elements according to whether they match a predicate.
Note: This method uses deferred execution.
Args:
predicate: A unary function which is applied to each element in the
source sequence. Source elements for which the predicate
returns True will be present in the result.
Returns:
A Queryable over those elements of the source sequence for which
the predicate is True.
Raises:
ValueError: If the Queryable is closed.
TypeError: If the predicate is not callable.
'''
if self.closed():
raise ValueError("Attempt to call where() on a closed Queryable.")
if not is_callable(predicate):
raise TypeError("where() parameter predicate={predicate} is not "
"callable".format(predicate=repr(predicate)))
return self._create(ifilter(predicate, self))
def of_type(self, classinfo):
'''Filters elements according to whether they are of a certain type.
Note: This method uses deferred execution.
Args:
classinfo: If classinfo is neither a class object nor a type object
it may be a tuple of class or type objects, or may recursively
contain other such tuples (other sequence types are not
accepted).
Returns:
A Queryable over those elements of the source sequence for which
the predicate is True.
Raises:
ValueError: If the Queryable is closed.
TypeError: If classinfo is not a class, type, or tuple of classes,
types, and such tuples.
'''
if self.closed():
raise ValueError("Attempt to call of_type() on a closed "
"Queryable.")
if not is_type(classinfo):
raise TypeError("of_type() parameter classinfo={0} is not a class "
"object or a type objector a tuple of class or "
"type objects.".format(classinfo))
return self.where(lambda x: isinstance(x, classinfo))
def order_by(self, key_selector=identity):
'''Sorts by a key in ascending order.
Introduces a primary sorting order to the sequence. Additional sort
criteria should be specified by subsequent calls to then_by() and
then_by_descending(). Calling order_by() or order_by_descending() on
the results of a call to order_by() will introduce a new primary
ordering which will override any already established ordering.
This method performs a stable sort. The order of two elements with the
same key will be preserved.
Note: This method uses deferred execution.
Args:
key_selector: A unary function which extracts a key from each
element using which the result will be ordered.
Returns:
An OrderedQueryable over the sorted elements.
Raises:
ValueError: If the Queryable is closed.
TypeError: If the key_selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call order_by() on a "
"closed Queryable.")
if not is_callable(key_selector):
raise TypeError("order_by() parameter key_selector={key_selector} "
"is not callable".format(key_selector=repr(key_selector)))
return self._create_ordered(iter(self), -1, key_selector)
def order_by_descending(self, key_selector=identity):
'''Sorts by a key in descending order.
Introduces a primary sorting order to the sequence. Additional sort
criteria should be specified by subsequent calls to then_by() and
then_by_descending(). Calling order_by() or order_by_descending() on
the results of a call to order_by() will introduce a new primary
ordering which will override any already established ordering.
This method performs a stable sort. The order of two elements with the
same key will be preserved.
Note: This method uses deferred execution.
Args:
key_selector: A unary function which extracts a key from each
element using which the result will be ordered.
Returns:
An OrderedQueryable over the sorted elements.
Raises:
ValueError: If the Queryable is closed.
TypeError: If the key_selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call order_by_descending() on a "
"closed Queryable.")
if not is_callable(key_selector):
raise TypeError("order_by_descending() parameter key_selector={0} "
"is not callable".format(repr(key_selector)))
return self._create_ordered(iter(self), +1, key_selector)
def take(self, count=1):
'''Returns a specified number of elements from the start of a sequence.
If the source sequence contains fewer elements than requested only the
available elements will be returned and no exception will be raised.
Note: This method uses deferred execution.
Args:
count: An optional number of elements to take. The default is one.
Returns:
A Queryable over the first count elements of the source sequence,
or the all elements of elements in the source, whichever is fewer.
Raises:
ValueError: If the Queryable is closed()
'''
if self.closed():
raise ValueError("Attempt to call take() on a closed Queryable.")
count = max(0, count)
return self._create(itertools.islice(self, count))
def take_while(self, predicate):
'''Returns elements from the start while the predicate is True.
Note: This method uses deferred execution.
Args:
predicate: A function returning True or False with which elements
will be tested.
Returns:
A Queryable over the elements from the beginning of the source
sequence for which predicate is True.
Raises:
ValueError: If the Queryable is closed()
TypeError: If the predicate is not callable.
'''
if self.closed():
raise ValueError("Attempt to call take_while() on a closed "
"Queryable.")
if not is_callable(predicate):
raise TypeError("take_while() parameter predicate={0} is "
"not callable".format(repr(predicate)))
# Cannot use itertools.takewhile here because it is not lazy
return self._create(self._generate_take_while_result(predicate))
def _generate_take_while_result(self, predicate):
for x in self:
if predicate(x):
yield x
else:
break
def skip(self, count=1):
'''Skip the first count contiguous elements of the source sequence.
If the source sequence contains fewer than count elements returns an
empty sequence and does not raise an exception.
Note: This method uses deferred execution.
Args:
count: The number of elements to skip from the beginning of the
sequence. If omitted defaults to one. If count is less than one
the result sequence will be empty.
Returns:
A Queryable over the elements of source excluding the first count
elements.
Raises:
ValueError: If the Queryable is closed().
'''
if self.closed():
raise ValueError("Attempt to call skip() on a closed Queryable.")
count = max(0, count)
if count == 0:
return self
# Try an optimised version
if hasattr(self._iterable, "__getitem__"):
try:
stop = len(self._iterable)
return self._create(self._generate_optimized_skip_result(count,
stop))
except TypeError:
pass
# Fall back to the unoptimized version
return self._create(self._generate_skip_result(count))
def _generate_optimized_skip_result(self, count, stop):
for i in irange(count, stop):
yield self._iterable[i]
def _generate_skip_result(self, count):
for i, item in enumerate(self):
if i < count:
continue
yield item
def skip_while(self, predicate):
'''Omit elements from the start for which a predicate is True.
Note: This method uses deferred execution.
Args:
predicate: A single argument predicate function.
Returns:
A Queryable over the sequence of elements beginning with the first
element for which the predicate returns False.
Raises:
ValueError: If the Queryable is closed().
TypeError: If predicate is not callable.
'''
if self.closed():
raise ValueError("Attempt to call take_while() on a "
"closed Queryable.")
if not is_callable(predicate):
raise TypeError("skip_while() parameter predicate={0} is "
"not callable".format(repr(predicate)))
return self._create(itertools.dropwhile(predicate, self))
def concat(self, second_iterable):
'''Concatenates two sequences.
Note: This method uses deferred execution.
Args:
second_iterable: The sequence to concatenate on to the sequence.
Returns:
A Queryable over the concatenated sequences.
Raises:
ValueError: If the Queryable is closed().
TypeError: If second_iterable is not in fact iterable.
'''
if self.closed():
raise ValueError("Attempt to call concat() on a closed Queryable.")
if not is_iterable(second_iterable):
raise TypeError("Cannot compute concat() with second_iterable of "
"non-iterable {0}".format(str(type(second_iterable))[7: -1]))
return self._create(itertools.chain(self, second_iterable))
def reverse(self):
'''Returns the sequence reversed.
Note: This method uses deferred execution, but the whole source
sequence is consumed once execution commences.
Returns:
The source sequence in reverse order.
Raises:
ValueError: If the Queryable is closed().
'''
if self.closed():
raise ValueError("Attempt to call reverse() on a "
"closed Queryable.")
# Attempt an optimised version
try:
r = reversed(self._iterable)
return self._create(r)
except TypeError:
pass
# Fall through to a sequential version
return self._create(self._generate_reverse_result())
def _generate_reverse_result(self):
lst = list(iter(self))
lst.reverse()
for item in lst:
yield item
def element_at(self, index):
'''Return the element at ordinal index.
Note: This method uses immediate execution.
Args:
index: The index of the element to be returned.
Returns:
The element at ordinal index in the source sequence.
Raises:
ValueError: If the Queryable is closed().
ValueError: If index is out of range.
'''
if self.closed():
raise ValueError("Attempt to call element_at() on a "
"closed Queryable.")
if index < 0:
raise OutOfRangeError("Attempt to use negative index.")
# Attempt to use __getitem__
try:
return self._iterable[index]
except IndexError:
raise OutOfRangeError("Index out of range.")
except TypeError:
pass
# Fall back to iterating
for i, item in enumerate(self):
if i == index:
return item
raise OutOfRangeError("element_at(index) out of range.")
def count(self, predicate=None):
'''Return the number of elements (which match an optional predicate).
Note: This method uses immediate execution.
Args:
predicate: An optional unary predicate function used to identify
elements which will be counted. The single positional argument
of the function is the element value. The function should
return True or False.
Returns:
The number of elements in the sequence if the predicate is None
(the default), or if the predicate is supplied the number of
elements for which the predicate evaluates to True.
Raises:
ValueError: If the Queryable is closed().
TypeError: If predicate is neither None nor a callable.
'''
if self.closed():
raise ValueError("Attempt to call element_at() on a "
"closed Queryable.")
return self._count() if predicate is None else self._count_predicate(predicate)
def _count(self):
# Attempt to use len()
try:
return len(self._iterable)
except TypeError:
pass
# Fall back to iterating
index = -1
for index, item in enumerate(self):
pass
return index + 1
def _count_predicate(self, predicate):
if not is_callable(predicate):
raise TypeError("count() parameter predicate={0} is "
"not callable".format(repr(predicate)))
return self.where(predicate).count()
def any(self, predicate=None):
'''Determine if the source sequence contains any elements which satisfy
the predicate.
Only enough of the sequence to satisfy the predicate once is consumed.
Note: This method uses immediate execution.
Args:
predicate: An optional single argument function used to test each
element. If omitted, or None, this method returns True if there
is at least one element in the source.
Returns:
True if the sequence contains at least one element which satisfies
the predicate, otherwise False.
Raises:
ValueError: If the Queryable is closed()
'''
if self.closed():
raise ValueError("Attempt to call any() on a closed Queryable.")
if predicate is None:
predicate = lambda x: True
if not is_callable(predicate):
raise TypeError("any() parameter predicate={predicate} is not callable".format(predicate=repr(predicate)))
for item in self.select(predicate):
if item:
return True
return False
def all(self, predicate=bool):
'''Determine if all elements in the source sequence satisfy a condition.
All of the source sequence will be consumed.
Note: This method uses immediate execution.
Args:
predicate (callable): An optional single argument function used to
test each elements. If omitted, the bool() function is used
resulting in the elements being tested directly.
Returns:
True if all elements in the sequence meet the predicate condition,
otherwise False.
Raises:
ValueError: If the Queryable is closed()
TypeError: If predicate is not callable.
'''
if self.closed():
raise ValueError("Attempt to call all() on a closed Queryable.")
if not is_callable(predicate):
raise TypeError("all() parameter predicate={0} is "
"not callable".format(repr(predicate)))
return all(self.select(predicate))
def min(self, selector=identity):
'''Return the minimum value in a sequence.
All of the source sequence will be consumed.
Note: This method uses immediate execution.
Args:
selector: An optional single argument function which will be used
to project the elements of the sequence. If omitted, the
identity function is used.
Returns:
The minimum value of the projected sequence.
Raises:
ValueError: If the Queryable has been closed.
ValueError: If the sequence is empty.
'''
if self.closed():
raise ValueError("Attempt to call min() on a closed Queryable.")
if not is_callable(selector):
raise TypeError("min() parameter selector={0} is "
"not callable".format(repr(selector)))
return min(self.select(selector))
def max(self, selector=identity):
'''Return the maximum value in a sequence.
All of the source sequence will be consumed.
Note: This method uses immediate execution.
Args:
selector: An optional single argument function which will be used
to project the elements of the sequence. If omitted, the
identity function is used.
Returns:
The maximum value of the projected sequence.
Raises:
ValueError: If the Queryable has been closed.
ValueError: If the sequence is empty.
'''
if self.closed():
raise ValueError("Attempt to call max() on a closed Queryable.")
if not is_callable(selector):
raise TypeError("max() parameter selector={0} is "
"not callable".format(repr(selector)))
return max(self.select(selector))
def sum(self, selector=identity):
'''Return the arithmetic sum of the values in the sequence..
All of the source sequence will be consumed.
Note: This method uses immediate execution.
Args:
selector: An optional single argument function which will be used
to project the elements of the sequence. If omitted, the
identity function is used.
Returns:
The total value of the projected sequence, or zero for an empty
sequence.
Raises:
ValueError: If the Queryable has been closed.
'''
if self.closed():
raise ValueError("Attempt to call sum() on a closed Queryable.")
if not is_callable(selector):
raise TypeError("sum() parameter selector={0} is "
"not callable".format(repr(selector)))
return sum(self.select(selector))
def average(self, selector=identity):
'''Return the arithmetic mean of the values in the sequence..
All of the source sequence will be consumed.
Note: This method uses immediate execution.
Args:
selector: An optional single argument function which will be used
to project the elements of the sequence. If omitted, the
identity function is used.
Returns:
The arithmetic mean value of the projected sequence.
Raises:
ValueError: If the Queryable has been closed.
ValueError: I the source sequence is empty.
'''
if self.closed():
raise ValueError("Attempt to call average() on a "
"closed Queryable.")
if not is_callable(selector):
raise TypeError("average() parameter selector={0} is "
"not callable".format(repr(selector)))
total = 0
count = 0
for item in self.select(selector):
total += item
count += 1
if count == 0:
raise ValueError("Cannot compute average() of an empty sequence.")
return total / count
def contains(self, value, equality_comparer=operator.eq):
'''Determines whether the sequence contains a particular value.
Execution is immediate. Depending on the type of the sequence, all or
none of the sequence may be consumed by this operation.
Note: This method uses immediate execution.
Args:
value: The value to test for membership of the sequence
Returns:
True if value is in the sequence, otherwise False.
Raises:
ValueError: If the Queryable has been closed.
'''
if self.closed():
raise ValueError("Attempt to call contains() on a "
"closed Queryable.")
if not is_callable(equality_comparer):
raise TypeError("contains() parameter equality_comparer={0} is "
"not callable".format(repr(equality_comparer)))
if equality_comparer is operator.eq:
return value in self._iterable
for item in self:
if equality_comparer(value, item):
return True
return False
def default_if_empty(self, default):
'''If the source sequence is empty return a single element sequence
containing the supplied default value, otherwise return the source
sequence unchanged.
Note: This method uses deferred execution.
Args:
default: The element to be returned if the source sequence is empty.
Returns:
The source sequence, or if the source sequence is empty an sequence
containing a single element with the supplied default value.
Raises:
ValueError: If the Queryable has been closed.
'''
if self.closed():
raise ValueError("Attempt to call default_if_empty() on a "
"closed Queryable.")
return self._create(self._generate_default_if_empty_result(default))
def _generate_default_if_empty_result(self, default):
# Try to get an element from the iterator, if we succeed, the sequence
# is non-empty. We store the extracted value in a generator and chain
# it to the tail of the sequence in order to recreate the original
# sequence.
try:
items = iter(self)
head = next(items)
yield head
for item in items:
yield item
except StopIteration:
yield default
def distinct(self, selector=identity):
'''Eliminate duplicate elements from a sequence.
Note: This method uses deferred execution.
Args:
selector: An optional single argument function the result of which
is the value compared for uniqueness against elements already
consumed. If omitted, the element value itself is compared for
uniqueness.
Returns:
Unique elements of the source sequence as determined by the
selector function. Note that it is unprojected elements that are
returned, even if a selector was provided.
Raises:
ValueError: If the Queryable is closed.
TypeError: If the selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call distinct() on a "
"closed Queryable.")
if not is_callable(selector):
raise TypeError("distinct() parameter selector={0} is "
"not callable".format(repr(selector)))
return self._create(self._generate_distinct_result(selector))
def _generate_distinct_result(self, selector):
seen = set()
for item in self:
t_item = selector(item)
if t_item in seen:
continue
seen.add(t_item)
yield item
def difference(self, second_iterable, selector=identity):
'''Returns those elements which are in the source sequence which are not
in the second_iterable.
This method is equivalent to the Except() LINQ operator, renamed to a
valid Python identifier.
Note: This method uses deferred execution, but as soon as execution
commences the entirety of the second_iterable is consumed;
therefore, although the source sequence may be infinite the
second_iterable must be finite.
Args:
second_iterable: Elements from this sequence are excluded from the
returned sequence. This sequence will be consumed in its
entirety, so must be finite.
selector: A optional single argument function with selects from the
elements of both sequences the values which will be
compared for equality. If omitted the identity function will
be used.
Returns:
A sequence containing all elements in the source sequence except
those which are also members of the second sequence.
Raises:
ValueError: If the Queryable has been closed.
TypeError: If the second_iterable is not in fact iterable.
TypeError: If the selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call difference() on a "
"closed Queryable.")
if not is_iterable(second_iterable):
raise TypeError("Cannot compute difference() with second_iterable"
"of non-iterable {0}".format(str(type(second_iterable))[7: -2]))
if not is_callable(selector):
raise TypeError("difference() parameter selector={0} is "
"not callable".format(repr(selector)))
return self._create(self._generate_difference_result(second_iterable,
selector))
def _generate_difference_result(self, second_iterable, selector):
seen_elements = self._create(second_iterable).select(selector) \
.distinct().to_set()
for item in self:
sitem = selector(item)
if selector(item) not in seen_elements:
seen_elements.add(sitem)
yield item
def intersect(self, second_iterable, selector=identity):
'''Returns those elements which are both in the source sequence and in
the second_iterable.
Note: This method uses deferred execution.
Args:
second_iterable: Elements are returned if they are also in the
sequence.
selector: An optional single argument function which is used to
project the elements in the source and second_iterables prior
to comparing them. If omitted the identity function will be
used.
Returns:
A sequence containing all elements in the source sequence which
are also members of the second sequence.
Raises:
ValueError: If the Queryable has been closed.
TypeError: If the second_iterable is not in fact iterable.
TypeError: If the selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call intersect() on a "
"closed Queryable.")
if not is_iterable(second_iterable):
raise TypeError("Cannot compute intersect() with second_iterable "
"of non-iterable {0}".format(str(type(second_iterable))[7: -1]))
if not is_callable(selector):
raise TypeError("intersect() parameter selector={0} is "
"not callable".format(repr(selector)))
return self._create(self._generate_intersect_result(second_iterable,
selector))
def _generate_intersect_result(self, second_iterable, selector):
second_set = self._create(second_iterable).select(selector) \
.distinct().to_set()
for item in self:
sitem = selector(item)
if sitem in second_set:
second_set.remove(sitem)
yield item
def union(self, second_iterable, selector=identity):
'''Returns those elements which are either in the source sequence or in
the second_iterable, or in both.
Note: This method uses deferred execution.
Args:
second_iterable: Elements from this sequence are returns if they
are not also in the source sequence.
selector: An optional single argument function which is used to
project the elements in the source and second_iterables prior
to comparing them. If omitted the identity function will be
used.
Returns:
A sequence containing all elements in the source sequence and second
sequence.
Raises:
ValueError: If the Queryable has been closed.
TypeError: If the second_iterable is not in fact iterable.
TypeError: If the selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call union() on a closed Queryable.")
if not is_iterable(second_iterable):
raise TypeError("Cannot compute union() with second_iterable of "
"non-iterable {0}".format(str(type(second_iterable))[7: -1]))
return self._create(itertools.chain(self, second_iterable)).distinct(selector)
def join(self, inner_iterable, outer_key_selector=identity,
inner_key_selector=identity,
result_selector=lambda outer, inner: (outer, inner)):
'''Perform an inner join with a second sequence using selected keys.
The order of elements from outer is maintained. For each of these the
order of elements from inner is also preserved.
Note: This method uses deferred execution.
Args:
inner_iterable: The sequence to join with the outer sequence.
outer_key_selector: An optional unary function to extract keys from
elements of the outer (source) sequence. The first positional
argument of the function should accept outer elements and the
result value should be the key. If omitted, the identity
function is used.
inner_key_selector: An optional unary function to extract keys
from elements of the inner_iterable. The first positional
argument of the function should accept outer elements and the
result value should be the key. If omitted, the identity
function is used.
result_selector: An optional binary function to create a result
element from two matching elements of the outer and inner. If
omitted the result elements will be a 2-tuple pair of the
matching outer and inner elements.
Returns:
A Queryable whose elements are the result of performing an inner-
join on two sequences.
Raises:
ValueError: If the Queryable has been closed.
TypeError: If the inner_iterable is not in fact iterable.
TypeError: If the outer_key_selector is not callable.
TypeError: If the inner_key_selector is not callable.
TypeError: If the result_selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call join() on a closed Queryable.")
if not is_iterable(inner_iterable):
raise TypeError("Cannot compute join() with inner_iterable of "
"non-iterable {0}".format(str(type(inner_iterable))[7: -1]))
if not is_callable(outer_key_selector):
raise TypeError("join() parameter outer_key_selector={0} is not "
"callable".format(repr(outer_key_selector)))
if not is_callable(inner_key_selector):
raise TypeError("join() parameter inner_key_selector={0} is not "
"callable".format(repr(inner_key_selector)))
if not is_callable(result_selector):
raise TypeError("join() parameter result_selector={0} is not "
"callable".format(repr(result_selector)))
return self._create(self._generate_join_result(inner_iterable, outer_key_selector,
inner_key_selector, result_selector))
def _generate_join_result(self, inner_iterable, outer_key_selector, inner_key_selector, result_selector):
lookup = self._create(inner_iterable).to_lookup(inner_key_selector)
result = self.select_many_with_correspondence(lambda outer_element: lookup[outer_key_selector(outer_element)],
result_selector)
for item in result:
yield item
def group_join(self, inner_iterable, outer_key_selector=identity, inner_key_selector=identity,
result_selector=lambda outer, grouping: grouping):
'''Match elements of two sequences using keys and group the results.
The group_join() query produces a hierarchical result, with all of the
inner elements in the result grouped against the matching outer
element.
The order of elements from outer is maintained. For each of these the
order of elements from inner is also preserved.
Note: This method uses deferred execution.
Args:
inner_iterable: The sequence to join with the outer sequence.
outer_key_selector: An optional unary function to extract keys from
elements of the outer (source) sequence. The first positional
argument of the function should accept outer elements and the
result value should be the key. If omitted, the identity
function is used.
inner_key_selector: An optional unary function to extract keys
from elements of the inner_iterable. The first positional
argument of the function should accept outer elements and the
result value should be the key. If omitted, the identity
function is used.
result_selector: An optional binary function to create a result
element from an outer element and the Grouping of matching
inner elements. The first positional argument is the outer
elements and the second in the Grouping of inner elements
which match the outer element according to the key selectors
used. If omitted, the result elements will be the Groupings
directly.
Returns:
A Queryable over a sequence with one element for each group in the
result as returned by the result_selector. If the default result
selector is used, the result is a sequence of Grouping objects.
Raises:
ValueError: If the Queryable has been closed.
TypeError: If the inner_iterable is not in fact iterable.
TypeError: If the outer_key_selector is not callable.
TypeError: If the inner_key_selector is not callable.
TypeError: If the result_selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call group_join() on a closed Queryable.")
if not is_iterable(inner_iterable):
raise TypeError("Cannot compute group_join() with inner_iterable of non-iterable {type}".format(
type=str(type(inner_iterable))[7: -1]))
if not is_callable(outer_key_selector):
raise TypeError("group_join() parameter outer_key_selector={outer_key_selector} is not callable".format(
outer_key_selector=repr(outer_key_selector)))
if not is_callable(inner_key_selector):
raise TypeError("group_join() parameter inner_key_selector={inner_key_selector} is not callable".format(
inner_key_selector=repr(inner_key_selector)))
if not is_callable(result_selector):
raise TypeError("group_join() parameter result_selector={result_selector} is not callable".format(
result_selector=repr(result_selector)))
return self._create(self._generate_group_join_result(inner_iterable, outer_key_selector,
inner_key_selector, result_selector))
def _generate_group_join_result(self, inner_iterable, outer_key_selector, inner_key_selector, result_selector):
lookup = self._create(inner_iterable).to_lookup(inner_key_selector)
for outer_element in self:
outer_key = outer_key_selector(outer_element)
yield result_selector(outer_element, lookup[outer_key])
def first(self, predicate=None):
'''The first element in a sequence (optionally satisfying a predicate).
If the predicate is omitted or is None this query returns the first
element in the sequence; otherwise, it returns the first element in
the sequence for which the predicate evaluates to True. Exceptions are
raised if there is no such element.
Note: This method uses immediate execution.
Args:
predicate: An optional unary predicate function, the only argument
to which is the element. The return value should be True for
matching elements, otherwise False. If the predicate is
omitted or None the first element of the source sequence will
be returned.
Returns:
The first element of the sequence if predicate is None, otherwise
the first element for which the predicate returns True.
Raises:
ValueError: If the Queryable is closed.
ValueError: If the source sequence is empty.
ValueError: If there are no elements matching the predicate.
TypeError: If the predicate is not callable.
'''
if self.closed():
raise ValueError("Attempt to call first() on a closed Queryable.")
return self._first() if predicate is None else self._first_predicate(predicate)
def _first(self):
try:
return next(iter(self))
except StopIteration:
raise ValueError("Cannot return first() from an empty sequence.")
def _first_predicate(self, predicate):
for item in self:
if predicate(item):
return item
raise ValueError("No elements matching predicate in call to first()")
def first_or_default(self, default, predicate=None):
'''The first element (optionally satisfying a predicate) or a default.
If the predicate is omitted or is None this query returns the first
element in the sequence; otherwise, it returns the first element in
the sequence for which the predicate evaluates to True. If there is no
such element the value of the default argument is returned.
Note: This method uses immediate execution.
Args:
default: The value which will be returned if either the sequence is
empty or there are no elements matching the predicate.
predicate: An optional unary predicate function, the only argument
to which is the element. The return value should be True for
matching elements, otherwise False. If the predicate is
omitted or None the first element of the source sequence will
be returned.
Returns:
The first element of the sequence if predicate is None, otherwise
the first element for which the predicate returns True. If there is
no such element, the default argument is returned.
Raises:
ValueError: If the Queryable is closed.
TypeError: If the predicate is not callable.
'''
if self.closed():
raise ValueError("Attempt to call first_or_default() on a "
"closed Queryable.")
return self._first_or_default(default) if predicate is None else self._first_or_default_predicate(default, predicate)
def _first_or_default(self, default):
try:
return next(iter(self))
except StopIteration:
return default
def _first_or_default_predicate(self, default, predicate):
for item in self:
if predicate(item):
return item
return default
def single(self, predicate=None):
'''The only element (which satisfies a condition).
If the predicate is omitted or is None this query returns the only
element in the sequence; otherwise, it returns the only element in
the sequence for which the predicate evaluates to True. Exceptions are
raised if there is either no such element or more than one such
element.
Note: This method uses immediate execution.
Args:
predicate: An optional unary predicate function, the only argument
to which is the element. The return value should be True for
matching elements, otherwise False. If the predicate is
omitted or None the only element of the source sequence will
be returned.
Returns:
The only element of the sequence if predicate is None, otherwise
the only element for which the predicate returns True.
Raises:
ValueError: If the Queryable is closed.
ValueError: If, when predicate is None the source sequence contains
more than one element.
ValueError: If there are no elements matching the predicate or more
then one element matching the predicate.
TypeError: If the predicate is not callable.
'''
if self.closed():
raise ValueError("Attempt to call single() on a closed Queryable.")
return self._single() if predicate is None else self._single_predicate(predicate)
def _single(self):
p = iter(self)
try:
result = next(p)
except StopIteration:
raise ValueError("Cannot return single() from an empty sequence.")
try:
next(p)
except StopIteration:
return result
raise ValueError("Sequence for single() contains multiple elements")
def _single_predicate(self, predicate):
found = False
for item in self:
if predicate(item):
if found == True:
raise ValueError("Sequence contains more than one value matching single() predicate.")
result = item
found = True
if found == False:
raise ValueError("Sequence for single() contains no items matching the predicate.")
return result
def single_or_default(self, default, predicate=None):
'''The only element (which satisfies a condition) or a default.
If the predicate is omitted or is None this query returns the only
element in the sequence; otherwise, it returns the only element in
the sequence for which the predicate evaluates to True. A default value
is returned if there is no such element. An exception is raised if
there is more than one such element.
Note: This method uses immediate execution.
Args:
default: The value which will be returned if either the sequence is
empty or there are no elements matching the predicate.
predicate: An optional unary predicate function, the only argument
to which is the element. The return value should be True for
matching elements, otherwise False. If the predicate is
omitted or None the only element of the source sequence will
be returned.
Returns:
The only element of the sequence if predicate is None, otherwise
the only element for which the predicate returns True. If there are
no such elements the default value will returned.
Raises:
ValueError: If the Queryable is closed.
ValueError: If, when predicate is None the source sequence contains
more than one element.
ValueError: If there is more then one element matching the
predicate.
TypeError: If the predicate is not callable.
'''
if self.closed():
raise ValueError("Attempt to call single_or_default() on a closed Queryable.")
return self._single_or_default(default) if predicate is None else self._single_or_default_predicate(default, predicate)
def _single_or_default(self, default):
p = iter(self)
try:
result = next(p)
except StopIteration:
return default
try:
next(p)
except StopIteration:
return result
raise ValueError("Sequence for single_or_default() contains multiple elements.")
def _single_or_default_predicate(self, default, predicate):
found = False
result = default
for item in self:
if predicate(item):
if found == True:
raise ValueError("Sequence contains more than one value matching single_or_default() predicate.")
result = item
found = True
return result
def last(self, predicate=None):
'''The last element in a sequence (optionally satisfying a predicate).
If the predicate is omitted or is None this query returns the last
element in the sequence; otherwise, it returns the last element in
the sequence for which the predicate evaluates to True. Exceptions are
raised if there is no such element.
Note: This method uses immediate execution.
Args:
predicate: An optional unary predicate function, the only argument
to which is the element. The return value should be True for
matching elements, otherwise False. If the predicate is
omitted or None the last element of the source sequence will
be returned.
Returns:
The last element of the sequence if predicate is None, otherwise
the last element for which the predicate returns True.
Raises:
ValueError: If the Queryable is closed.
ValueError: If the source sequence is empty.
ValueError: If there are no elements matching the predicate.
TypeError: If the predicate is not callable.
'''
if self.closed():
raise ValueError("Attempt to call last() on a closed Queryable.")
return self._last() if predicate is None else self._last_predicate(predicate)
def _last(self):
# Attempt an optimised version
try:
return self._iterable[-1]
except IndexError:
raise ValueError("Cannot return last() from an empty sequence.")
except TypeError:
pass
sentinel = object()
result = sentinel
for item in self:
result = item
if result is sentinel:
raise ValueError("Cannot return last() from an empty sequence.")
return result
def _last_predicate(self, predicate):
# Attempt an optimised version
try:
r = reversed(self._iterable)
self._create(r).first(predicate)
except TypeError:
pass
# Fall through to the sequential version
sentinel = object()
result = sentinel
for item in self:
if predicate(item):
result = item
if result is sentinel:
raise ValueError("No item matching predicate in call to last().")
return result
def last_or_default(self, default, predicate=None):
'''The last element (optionally satisfying a predicate) or a default.
If the predicate is omitted or is None this query returns the last
element in the sequence; otherwise, it returns the last element in
the sequence for which the predicate evaluates to True. If there is no
such element the value of the default argument is returned.
Note: This method uses immediate execution.
Args:
default: The value which will be returned if either the sequence is
empty or there are no elements matching the predicate.
predicate: An optional unary predicate function, the only argument
to which is the element. The return value should be True for
matching elements, otherwise False. If the predicate is
omitted or None the last element of the source sequence will
be returned.
Returns:
The last element of the sequence if predicate is None, otherwise
the last element for which the predicate returns True. If there is
no such element, the default argument is returned.
Raises:
ValueError: If the Queryable is closed.
TypeError: If the predicate is not callable.
'''
if self.closed():
raise ValueError("Attempt to call last_or_default() on a "
"closed Queryable.")
return self._last_or_default(default) if predicate is None else self._last_or_default_predicate(default, predicate)
def _last_or_default(self, default):
# Attempt an optimised version
try:
return self._iterable[-1]
except IndexError:
return default
except TypeError:
pass
# Fall through to the sequential version
sentinel = object()
result = sentinel
for item in iter(self):
result = item
if result is sentinel:
return default
return result
def _last_or_default_predicate(self, default, predicate):
try:
r = reversed(self._iterable)
return self._create(r).first_or_default(default, predicate)
except TypeError:
# Fall through to the sequential version
pass
sentinel = object()
result = sentinel
for item in iter(self):
if predicate(item):
result = item
if result is sentinel:
return default
return result
def aggregate(self, reducer, seed=default, result_selector=identity):
'''Apply a function over a sequence to produce a single result.
Apply a binary function cumulatively to the elements of the source
sequence so as to reduce the iterable to a single value.
Note: This method uses immediate execution.
Args:
reducer: A binary function the first positional argument of which
is an accumulated value and the second is the update value from
the source sequence. The return value should be the new
accumulated value after the update value has been incorporated.
seed: An optional value used to initialise the accumulator before
iteration over the source sequence. If seed is omitted the
and the source sequence contains only one item, then that item
is returned.
result_selector: An optional unary function applied to the final
accumulator value to produce the result. If omitted, defaults
to the identity function.
Raises:
ValueError: If called on an empty sequence with no seed value.
TypeError: If reducer is not callable.
TypeError: If result_selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call aggregate() on a "
"closed Queryable.")
if not is_callable(reducer):
raise TypeError("aggregate() parameter reducer={0} is "
"not callable".format(repr(reducer)))
if not is_callable(result_selector):
raise TypeError("aggregate() parameter result_selector={0} is "
"not callable".format(repr(result_selector)))
if seed is default:
try:
return result_selector(fold(reducer, self))
except TypeError as e:
if 'empty sequence' in str(e):
raise ValueError("Cannot aggregate() empty sequence with "
"no seed value")
return result_selector(fold(reducer, self, seed))
def zip(self, second_iterable, result_selector=lambda x, y: (x, y)):
'''Elementwise combination of two sequences.
The source sequence and the second iterable are merged element-by-
element using a function to combine them into the single corresponding
element of the result sequence. The length of the result sequence is
equal to the length of the shorter of the two input sequences.
Note: This method uses deferred execution.
Args:
second_iterable: The second sequence to be combined with the source
sequence.
result_selector: An optional binary function for combining
corresponding elements of the source sequences into an
element of the result sequence. The first and second positional
arguments are the elements from the source sequences. The
result should be the result sequence element. If omitted, the
result sequence will consist of 2-tuple pairs of corresponding
elements from the source sequences.
Returns:
A Queryable over the merged elements.
Raises:
ValueError: If the Queryable is closed.
TypeError: If result_selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call zip() on a closed Queryable.")
if not is_iterable(second_iterable):
raise TypeError("Cannot compute zip() with second_iterable of "
"non-iterable {0}".format(str(type(second_iterable))[7: -1]))
if not is_callable(result_selector):
raise TypeError("zip() parameter result_selector={0} is "
"not callable".format(repr(result_selector)))
return self._create(result_selector(*t) for t in izip(self, second_iterable))
def to_list(self):
'''Convert the source sequence to a list.
Note: This method uses immediate execution.
'''
if self.closed():
raise ValueError("Attempt to call to_list() on a closed Queryable.")
# Maybe use with closable(self) construct to achieve this.
if isinstance(self._iterable, list):
return self._iterable
lst = list(self)
# Ideally we would close here. Why can't we - what is the problem?
#self.close()
return lst
def to_tuple(self):
'''Convert the source sequence to a tuple.
Note: This method uses immediate execution.
'''
if self.closed():
raise ValueError("Attempt to call to_tuple() on a closed Queryable.")
if isinstance(self._iterable, tuple):
return self._iterable
tup = tuple(self)
# Ideally we would close here
#self.close()
return tup
def to_set(self):
'''Convert the source sequence to a set.
Note: This method uses immediate execution.
Raises:
ValueError: If duplicate keys are in the projected source sequence.
ValueError: If the Queryable is closed().
'''
if self.closed():
raise ValueError("Attempt to call to_set() on a closed Queryable.")
if isinstance(self._iterable, set):
return self._iterable
s = set()
for item in self:
if item in s:
raise ValueError("Duplicate item value {0} in sequence "
"during to_set()".format(repr(item)))
s.add(item)
# Ideally we would close here
#self.close()
return s
def to_lookup(self, key_selector=identity, value_selector=identity):
'''Returns a Lookup object, using the provided selector to generate a
key for each item.
Note: This method uses immediate execution.
'''
if self.closed():
raise ValueError("Attempt to call to_lookup() on a closed Queryable.")
if not is_callable(key_selector):
raise TypeError("to_lookup() parameter key_selector={key_selector} is not callable".format(
key_selector=repr(key_selector)))
if not is_callable(value_selector):
raise TypeError("to_lookup() parameter value_selector={value_selector} is not callable".format(
value_selector=repr(value_selector)))
key_value_pairs = self.select(lambda item: (key_selector(item), value_selector(item)))
lookup = Lookup(key_value_pairs)
# Ideally we would close here
#self.close()
return lookup
def to_dictionary(self, key_selector=identity, value_selector=identity):
'''Build a dictionary from the source sequence.
Note: This method uses immediate execution.
Raises:
ValueError: If the Queryable is closed.
ValueError: If duplicate keys are in the projected source sequence.
TypeError: If key_selector is not callable.
TypeError: If value_selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call to_dictionary() on a closed Queryable.")
if not is_callable(key_selector):
raise TypeError("to_dictionary() parameter key_selector={key_selector} is not callable".format(
key_selector=repr(key_selector)))
if not is_callable(value_selector):
raise TypeError("to_dictionary() parameter value_selector={value_selector} is not callable".format(
value_selector=repr(value_selector)))
dictionary = {}
for key, value in self.select(lambda x: (key_selector(x), value_selector(x))):
if key in dictionary:
raise ValueError("Duplicate key value {key} in sequence during to_dictionary()".format(key=repr(key)))
dictionary[key] = value
return dictionary
def to_str(self, separator=''):
'''Build a string from the source sequence.
The elements of the query result will each coerced to a string and then
the resulting strings concatenated to return a single string. This
allows the natural processing of character sequences as strings. An
optional separator which will be inserted between each item may be
specified.
Note: this method uses immediate execution.
Args:
separator: An optional separator which will be coerced to a string
and inserted between each source item in the resulting string.
Returns:
A single string which is the result of stringifying each element
and concatenating the results into a single string.
Raises:
TypeError: If any element cannot be coerced to a string.
TypeError: If the separator cannot be coerced to a string.
ValueError: If the Queryable is closed.
'''
if self.closed():
raise ValueError("Attempt to call to_str() on a closed Queryable.")
return str(separator).join(self.select(str))
def sequence_equal(self, second_iterable, equality_comparer=operator.eq):
'''
Determine whether two sequences are equal by elementwise comparison.
Sequence equality is defined as the two sequences being equal length
and corresponding elements being equal as determined by the equality
comparer.
Note: This method uses immediate execution.
Args:
second_iterable: The sequence which will be compared with the
source sequence.
equality_comparer: An optional binary predicate function which is
used to compare corresponding elements. Should return True if
the elements are equal, otherwise False. The default equality
comparer is operator.eq which calls __eq__ on elements of the
source sequence with the corresponding element of the second
sequence as a parameter.
Returns:
True if the sequences are equal, otherwise False.
Raises:
ValueError: If the Queryable is closed.
TypeError: If second_iterable is not in fact iterable.
TypeError: If equality_comparer is not callable.
'''
if self.closed():
raise ValueError("Attempt to call to_tuple() on a closed Queryable.")
if not is_iterable(second_iterable):
raise TypeError("Cannot compute sequence_equal() with second_iterable of non-iterable {type}".format(
type=str(type(second_iterable))[7: -1]))
if not is_callable(equality_comparer):
raise TypeError("aggregate() parameter equality_comparer={equality_comparer} is not callable".format(
equality_comparer=repr(equality_comparer)))
# Try to check the lengths directly as an optimization
try:
if len(self._iterable) != len(second_iterable):
return False
except TypeError:
pass
sentinel = object()
for first, second in izip_longest(self, second_iterable, fillvalue=sentinel):
if first is sentinel or second is sentinel:
return False
if not equality_comparer(first, second):
return False
return True
def __eq__(self, rhs):
'''Determine value equality with another iterable.
Args:
rhs: Any iterable collection.
Returns:
True if the sequences are equal in value, otherwise False.
'''
return self.sequence_equal(rhs)
def __ne__(self, rhs):
'''Determine value inequality with another iterable.
Args:
rhs: Any iterable collection.
Returns:
True if the sequences are inequal in value, otherwise False.
'''
return not (self == rhs)
def log(self, logger=None, label=None, eager=False):
'''
Log query result consumption details to a logger.
Args:
logger: Any object which supports a debug() method which accepts a
str, such as a Python standard library logger object from the
logging module. If logger is not provided or is None, this
method has no logging side effects.
label: An optional label which will be inserted into each line of
logging output produced by this particular use of log
eager: An optional boolean which controls how the query result will
be consumed. If True, the sequence will be consumed and logged
in its entirety. If False (the default) the sequence will be
evaluated and logged lazily as it consumed.
Warning: Use of eager=True requires use of sufficient memory to
hold the entire sequence which is obviously not possible with
infinite sequences. Use with care!
Returns:
A queryable over the unaltered source sequence.
Raises:
AttributeError: If logger does not support a debug() method.
ValueError: If the Queryable has been closed.
'''
if self.closed():
raise ValueError("Attempt to call log() on a closed Queryable.")
if logger is None:
return self
if label is None:
label = repr(self)
if eager:
return self._create(self._eager_log_result(logger, label))
return self._create(self._generate_lazy_log_result(logger, label))
def _eager_log_result(self, logger, label):
seq1, seq2 = itertools.tee(self)
logger.debug(label + " : BEGIN (EAGER)")
for index, element in enumerate(seq1):
logger.debug(label + ' : [' + str(index) + '] = ' + repr(element))
logger.debug(label + " : END (EAGER)")
return seq2
def _generate_lazy_log_result(self, logger, label):
logger.debug(label + " : BEGIN (DEFERRED)")
for index, element in enumerate(self):
logger.debug(label + ' : [' + str(index) + '] yields ' + repr(element))
yield element
logger.debug(label + " : END (DEFERRED)")
def as_parallel(self, pool=None):
'''Return a ParallelQueryable for parallel execution of queries.
Warning: This feature should be considered experimental alpha quality.
Args:
pool: An optional multiprocessing pool which will provide execution
resources for parellel processing. If omitted, a pool will be
created if necessary and managed internally.
Returns:
A ParallelQueryable on which all the standard query operators may
be called.
'''
from .parallel_queryable import ParallelQueryable
return ParallelQueryable(self, pool)
# More operators
def scan(self, func=operator.add):
'''
An inclusive prefix sum which returns the cumulative application of the
supplied function up to an including the current element.
Args:
func: An optional binary function which is commutative - that is,
the order of the arguments is unimportant. Defaults to a
summing operator.
Returns:
A Queryable such that the nth element is the sum of the first n
elements of the source sequence.
Raises:
ValueError: If the Queryable has been closed.
TypeError: If func is not callable.
'''
if self.closed():
raise ValueError("Attempt to call scan() on a "
"closed Queryable.")
if not is_callable(func):
raise TypeError("scan() parameter func={0} is "
"not callable".format(repr(func)))
return self._create(self._generate_scan_result(func))
def _generate_scan_result(self, func):
i = iter(self)
try:
item = next(i)
yield item
accumulator = item
except StopIteration:
return
for item in i:
accumulator = func(accumulator, item)
yield accumulator
def pre_scan(self, func=operator.add, seed=0):
'''
An exclusive prefix sum which returns the cumulative application of the
supplied function up to but excluding the current element.
Args:
func: An optional binary function which is commutative - that is,
the order of the arguments is unimportant. Defaults to a
summing operator.
seed: The first element of the prefix sum and therefore also the
first element of the returned sequence.
Returns:
A Queryable such that the nth element is the sum of the first n-1
elements of the source sequence.
Raises:
ValueError: If the Queryable has been closed.
TypeError: If func is not callable.
'''
if self.closed():
raise ValueError("Attempt to call pre_scan() on a "
"closed Queryable.")
if not is_callable(func):
raise TypeError("pre_scan() parameter func={0} is "
"not callable".format(repr(func)))
return self._create(self._generate_pre_scan_result(func, seed))
def _generate_pre_scan_result(self, func, seed):
accumulator = seed
for item in self:
yield accumulator
accumulator = func(accumulator, item)
# Methods for more Pythonic usage
# Note: __len__ cannot be efficiently implemented in an idempotent fashion
# (without consuming the iterable or changing the state of the object. Call
# count() instead see
# http://stackoverflow.com/questions/3723337/listy-behavior-is-wrong-on-first-call
# for more details. This is problematic if a Queryable is consumed using the
# list() constructor, which calls __len__ prior to constructing the list as
# an efficiency optimisation.
def __contains__(self, item):
'''Support for membership testing using the 'in' operator.
Args:
item: The item for which to test membership.
Returns:
True if item is in the sequence, otherwise False.
'''
return self.contains(item)
def __getitem__(self, index):
'''Support for indexing into the sequence using square brackets.
Equivalent to element_at().
Args:
index: The index should be between zero and count() - 1 inclusive.
Negative indices are not interpreted in the same way they are
for built-in lists, and are considered out-of-range.
Returns:
The value of the element at offset index into the sequence.
Raises:
ValueError: If the Queryable is closed().
IndexError: If the index is out-of-range.
'''
try:
return self.element_at(index)
except OutOfRangeError as e:
raise IndexError(str(e))
def __reversed__(self):
'''Support for sequence reversal using the reversed() built-in.
Called by reversed() to implement reverse iteration.
Equivalent to the reverse() method.
Returns:
A Queryable over the reversed sequence.
Raises:
ValueError: If the Queryable is closed().
'''
return self.reverse()
def __repr__(self):
'''Returns a stringified representation of the Queryable.
The string will *not* necessarily contain the sequence data.
Returns:
A stringified representation of the Queryable.
'''
# Must be careful not to consume the iterable here
return 'Queryable({iterable})'.format(iterable=self._iterable)
def __str__(self):
'''Returns a stringified representation of the Queryable.
The string *will* necessarily contain the sequence data.
Returns:
A stringified representation of the Queryable.
'''
return self.to_str()
if has_unicode_type():
@extend(Queryable)
def __unicode__(self):
'''Returns a stringified unicode representation of the Queryable.
Note: This method is only available on Python implementations which
support the named unicode type (e.g. Python 2.x).
The string *will* necessarily contain the sequence data.
Returns:
A stringified unicode representation of the Queryable.
'''
return self.to_unicode()
@extend(Queryable)
def to_unicode(self, separator=''):
'''Build a unicode string from the source sequence.
Note: This method is only available on Python implementations which
support the named unicode type (e.g. Python 2.x).
The elements of the query result will each coerced to a unicode
string and then the resulting strings concatenated to return a
single string. This allows the natural processing of character
sequences as strings. An optional separator which will be inserted
between each item may be specified.
Note: this method uses immediate execution.
Args:
separator: An optional separator which will be coerced to a
unicode string and inserted between each source item in the
resulting string.
Returns:
A single unicode string which is the result of stringifying each
element and concatenating the results into a single string.
Raises:
TypeError: If any element cannot be coerced to a string.
TypeError: If the separator cannot be coerced to a string.
ValueError: If the Queryable is closed.
'''
if self.closed():
raise ValueError("Attempt to call to_unicode() on a closed "
"Queryable.")
return unicode(separator).join(self.select(unicode))
class OrderedQueryable(Queryable):
'''A Queryable representing an ordered iterable.
The sorting implemented by this class is an incremental partial sort so
you don't pay for sorting results which are never enumerated.'''
def __init__(self, iterable, order, func):
'''Create an OrderedIterable.
Args:
iterable: The iterable sequence to be ordered.
order: +1 for ascending, -1 for descending.
func: The function to select the sorting key.
'''
assert abs(order) == 1, 'order argument must be +1 or -1'
super(OrderedQueryable, self).__init__(iterable)
self._funcs = [(order, func)]
def then_by(self, key_selector=identity):
'''Introduce subsequent ordering to the sequence with an optional key.
The returned sequence will be sorted in ascending order by the
selected key.
Note: This method uses deferred execution.
Args:
key_selector: A unary function the only positional argument to
which is the element value from which the key will be
selected. The return value should be the key from that
element.
Returns:
An OrderedQueryable over the sorted items.
Raises:
ValueError: If the OrderedQueryable is closed().
TypeError: If key_selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call then_by() on a "
"closed OrderedQueryable.")
if not is_callable(key_selector):
raise TypeError("then_by() parameter key_selector={key_selector} "
"is not callable".format(key_selector=repr(key_selector)))
self._funcs.append((-1, key_selector))
return self
def then_by_descending(self, key_selector=identity):
'''Introduce subsequent ordering to the sequence with an optional key.
The returned sequence will be sorted in descending order by the
selected key.
Note: This method uses deferred execution.
Args:
key_selector: A unary function the only positional argument to
which is the element value from which the key will be
selected. The return value should be the key from that
element.
Returns:
An OrderedQueryable over the sorted items.
Raises:
ValueError: If the OrderedQueryable is closed().
TypeError: If key_selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call then_by() on a closed OrderedQueryable.")
if not is_callable(key_selector):
raise TypeError("then_by_descending() parameter key_selector={key_selector} is not callable".format(key_selector=repr(key_selector)))
self._funcs.append((+1, key_selector))
return self
def __iter__(self):
'''Support for the iterator protocol.
Returns:
An iterator object over the sorted elements.
'''
# Determine which sorting algorithms to use
directions = [direction for direction, _ in self._funcs]
direction_total = sum(directions)
if direction_total == -len(self._funcs):
# Uniform ascending sort - do nothing
MultiKey = tuple
elif direction_total == len(self._funcs):
# Uniform descending sort - invert sense of operators
@totally_ordered
class MultiKey(object):
def __init__(self, t):
self.t = tuple(t)
def __lt__(lhs, rhs):
# Uniform descending sort - swap the comparison operators
return lhs.t > rhs.t
def __eq__(lhs, rhs):
return lhs.t == rhs.t
else:
# Mixed ascending/descending sort - override all operators
@totally_ordered
class MultiKey(object):
def __init__(self, t):
self.t = tuple(t)
# TODO: [asq 1.1] We could use some runtime code generation here to compile a custom comparison operator
def __lt__(lhs, rhs):
for direction, lhs_element, rhs_element in zip(directions, lhs.t, rhs.t):
cmp = (lhs_element > rhs_element) - (rhs_element > lhs_element)
if cmp == direction:
return True
if cmp == -direction:
return False
return False
def __eq__(lhs, rhs):
return lhs.t == rhs.t
# Uniform ascending sort - decorate, sort, undecorate using tuple element
def create_key(index, item):
return MultiKey(func(item) for _, func in self._funcs)
lst = [(create_key(index, item), index, item) for index, item in enumerate(self._iterable)]
heapq.heapify(lst)
while lst:
key, index, item = heapq.heappop(lst)
yield item
class Lookup(Queryable):
'''A multi-valued dictionary.
A Lookup represents a collection of keys, each one of which is mapped to
one or more values. The keys in the Lookup are maintained in the order in
which they were added. The values for each key are also maintained in
order.
Note: Lookup objects are immutable.
All standard query operators may be used on a Lookup. When iterated or
used as a Queryable the elements are returned as a sequence of Grouping
objects.
'''
def __init__(self, key_value_pairs):
'''Construct a Lookup with a sequence of (key, value) tuples.
Args:
key_value_pairs:
An iterable over 2-tuples each containing a key, value pair.
'''
# Maintain an ordered dictionary of groups represented as lists
self._dict = OrderedDict()
for key, value in key_value_pairs:
if key not in self._dict:
self._dict[key] = []
self._dict[key].append(value)
# Replace each list with a Grouping
for key, value in iteritems(self._dict):
grouping = Grouping(key, value)
self._dict[key] = grouping
super(Lookup, self).__init__(self._dict)
def _iter(self):
return itervalues(self._dict)
def __getitem__(self, key):
'''The sequence corresponding to a given key, or an empty sequence if
there are no values corresponding to that key.
Args:
key: The key of the group to be returned.
Returns:
The Grouping corresponding to the supplied key.
'''
if key in self._dict:
return self._dict[key]
return Grouping(key, [])
def __len__(self):
'''Support for the len() built-in function.
Returns:
The number of Groupings (keys) in the lookup.'''
return len(self._dict)
def __contains__(self, key):
'''Support for the 'in' membership operator.
Args:
key: The key for which membership will be tested.
Returns:
True if the Lookup contains a Grouping for the specified key,
otherwise False.'''
return key in self._dict
def __repr__(self):
'''Support for the repr() built-in function.
Returns:
The official string representation of the object.
'''
return 'Lookup({d})'.format(d=list(self._generate_repr_result()))
def _generate_repr_result(self):
for key in self._dict:
for value in self._dict[key]:
yield (key, value)
def apply_result_selector(self, selector=lambda key, sequence: sequence):
return self._create(self._generate_apply_result_selector(selector))
def _generate_apply_result_selector(self, selector):
for grouping in self:
yield selector(grouping.key, grouping)
class Grouping(Queryable):
'''A collection of objects which share a common key.
All standard query operators may be used on a Grouping.
Note: It is not intended that clients should directly create Grouping
objects. Instances of this class are retrieved from Lookup objects.
'''
def __init__(self, key, items):
'''Create a Grouping with a given key and a collection of members.
Args:
key: The key corresponding to this Grouping
items: An iterable collection of the members of the group.
'''
self._key = key
sequence = list(items)
super(Grouping, self).__init__(sequence)
key = property(lambda self: self._key,
doc="The key common to all elements.")
def __len__(self):
'''The number of items in the Grouping.'''
return self.count()
def __eq__(self, rhs):
'''Determine value equality with another grouping.
Args:
rhs: The object on the right-hand-side of the comparison must
support a property called 'key' and be iterable.
Returns:
True if the keys and sequences are equal, otherwise False.
'''
return self.key == rhs.key and self.sequence_equal(rhs)
def __ne__(self, rhs):
'''Determine value inequality with another grouping.
Args:
rhs: The object on the right-hand-side of the comparison must
support a property called 'key' and be iterable.
Returns:
True if the keys or sequences are not equal, otherwise False.
'''
return self.key != rhs.key or not self.sequence_equal(rhs)
def __repr__(self):
return 'Grouping(key={key}, items={items})'.format(key=repr(self._key),
items=repr(self.to_list()))
|
DickJC123/mxnet | refs/heads/master | python/mxnet/model.py | 9 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""MXNet model module"""
import os
import logging
from collections import namedtuple
import numpy as np
from . import ndarray as nd
from . import symbol as sym
from . import kvstore as kvs
from .context import cpu
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_sparse_kvstore(kvstore):
"""Create kvstore assuming some parameters' storage types are row_sparse.
Parameters
----------
kvstore : KVStore or str
The kvstore.
Returns
-------
kvstore : KVStore
update_on_kvstore : bool. Always True.
"""
# always update on kvstore
if isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
kv = kvs.create(kvstore)
else:
raise TypeError("Cannot create '%s' KVStore with row_sparse parameters. "
"The type must be KVStore or str." % kvstore)
assert kv.is_capable(kvs.KVStoreBase.OPTIMIZER), \
"KVStore with sparse weight requires optimizer support. " \
"However, type(kv) does not support optimizer. " \
"Please consider other kvstore backends (e.g. dist_device) instead."
return (kv, True)
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")))
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStoreBase):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device == 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
else:
update_on_kvstore &= kv.is_capable(kvs.KVStoreBase.OPTIMIZER)
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
if not update_on_kvstore or arg_params[name].stype != 'default':
kvstore.init(name, arg_params[name])
else:
kvstore.broadcast(name, arg_params[name], out=param_on_devs)
def _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on NCCL kvstore."""
valid_indices = [index for index, grad_list in
enumerate(grad_arrays) if grad_list[0] is not None]
valid_grad_arrays = [grad_arrays[i] for i in valid_indices]
valid_param_arrays = [param_arrays[i] for i in valid_indices]
valid_param_names = [param_names[i] for i in valid_indices]
size = len(valid_grad_arrays)
start = 0
# Use aggregation by default only with NCCL
default_batch = '16'
batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch))
while start < size:
end = start + batch if start + batch < size else size
# push gradient, priority is negative index
# pull back the weights
kvstore.pushpull(valid_param_names[start:end], valid_grad_arrays[start:end],
out=valid_param_arrays[start:end], priority=-start)
start = end
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
# pull back the weights
if grad_list[0].stype == 'default' and arg_list[0].stype == 'default':
kvstore.pushpull(name, grad_list, out=arg_list, priority=-index)
else:
kvstore.push(name, grad_list, priority=-index)
kvstore.pull(name, out=arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
updates = [[] for _ in range(num_device)]
for i, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
index = i
if kvstore:
name = param_names[index]
# push gradient, priority is negative index
if grad_list[0].stype == 'default' and arg_list[0].stype == 'default':
kvstore.pushpull(name, grad_list, priority=-index)
else:
kvstore.push(name, grad_list, priority=-index)
kvstore.pull(name, out=grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution later
w, g = p
updates[k].append((index*num_device+k, g, w))
for dev_updates in updates:
# update params if param_arrays and grad_arrays are not empty
if dev_updates:
i, w, g = zip(*dev_updates)
updater(i, w, g)
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params, remove_amp_cast=True):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
remove_amp_cast : bool, optional
Whether to remove the amp_cast and amp_multicast operators, before saving the model.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix, remove_amp_cast=remove_amp_cast)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_params(prefix, epoch):
"""Load params from a file
"""
save_dict = nd.load("%s-%04d.params" % (prefix, epoch))
arg_params = {}
aux_params = {}
if not save_dict:
logging.warning("Params file '%s' is empty", '%s-%04d.params' % (prefix, epoch))
return (arg_params, aux_params)
for k, v in save_dict.items():
tp, name = k.split(":", 1)
if tp == "arg":
arg_params[name] = v
if tp == "aux":
aux_params[name] = v
return (arg_params, aux_params)
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
arg_params, aux_params = load_params(prefix, epoch)
return (symbol, arg_params, aux_params)
|
gbaty/pyside2 | refs/heads/master | tests/pysidetest/typedef_signal_test.py | 3 |
import unittest
from PySide2.QtCore import QObject
from testbinding import TestObject
class Receiver(QObject):
def __init__(self):
QObject.__init__(self)
self.received = None
def slot(self, value):
self.received = value
class TypedefSignal(unittest.TestCase):
def testTypedef(self):
obj = TestObject(0)
receiver = Receiver()
obj.signalWithTypedefValue.connect(receiver.slot)
obj.emitSignalWithTypedefValue(2)
self.assertEqual(receiver.received.value, 2)
if __name__ == '__main__':
unittest.main()
|
dbaxa/django | refs/heads/master | tests/gis_tests/gis_migrations/migrations/__init__.py | 12133432 | |
thisiscam/django-db-obfuscate-id | refs/heads/master | db_obfuscate/management/commands/__init__.py | 12133432 | |
vanabo/mattress | refs/heads/master | src/newsletter/migrations/__init__.py | 12133432 | |
patpatpatpatpat/digestus | refs/heads/master | updates/__init__.py | 12133432 | |
libracore/erpnext | refs/heads/v12 | erpnext/education/doctype/course_activity/__init__.py | 12133432 | |
praekelt/mtvc-api-client | refs/heads/master | mtvc_client/client.py | 1 | import logging
import hammock
from requests.auth import AuthBase
logger = logging.getLogger(__name__)
class APIClientException(Exception):
"""
Exception class that contains the error code and message from
the MTVC
"""
def __init__(self, error_code=None, error_message=None, **kwargs):
self.error_code = error_code
self.error_message = error_message
self.__dict__.update(kwargs)
def __str__(self):
return '[%(error_code)s] %(error_message)s' % (self.__dict__)
class APIClientAuthentication(AuthBase):
"""
Attaches Tastypie-style HTTP ApiKey Authentication to the given
Request object.
"""
def __init__(self, username, key):
self.username = username
self.key = key
def __call__(self, r):
r.headers['Authorization'] = 'ApiKey %s:%s' % (self.username, self.key)
return r
class APIClient(object):
def __init__(self, offering_id, host, username, key, port=80,
version='v1'):
self.api = hammock.Hammock(
'http://%s:%s/api/%s' % (host, port, version),
auth=APIClientAuthentication(username, key),
append_slash=True)
self.offering_id = offering_id
def from_json_response(self, response):
if response.status_code < 200 or response.status_code >= 300:
error_context = {
'status_code': response.status_code,
'status_reason': response.reason,
'error_code': response.status_code,
'error_message': response.reason,
'content': response.content,
}
try:
error_context.update(response.json())
except ValueError:
pass
logger.error('MTVC Server error %s: %s' % (
response.status_code, error_context))
raise APIClientException(**error_context)
try:
return response.json()
except ValueError:
# the server did not return JSON, so just return {}
return {}
def get_countries(self):
return self.from_json_response(self.api.country.GET())
def get_channels(self, **kwargs):
params = {'offering__slug': self.offering_id}
params.update(kwargs)
return self.from_json_response(
self.api.channel.GET(params=params))
def get_shows(self, **kwargs):
params = {'offering__slug': self.offering_id}
params.update(kwargs)
return self.from_json_response(
self.api.show.GET(params=params))
def get_showchannels(self, **kwargs):
params = {'offering__slug': self.offering_id}
params.update(kwargs)
return self.from_json_response(
self.api.showchannel.GET(params=params))
def get_clips(self, **kwargs):
params = {'offering__slug': self.offering_id}
params.update(kwargs)
return self.from_json_response(
self.api.clip.GET(params=params))
def get_clip(self, clip_id, **kwargs):
params = {'offering__slug': self.offering_id}
params.update(kwargs)
return self.from_json_response(
self.api.clip(clip_id).GET(params=params))
def get_epg(self, channel_id, **kwargs):
params = {'days': 1}
params.update(kwargs)
return self.from_json_response(
self.api.channel(channel_id).GET(params=params))
def get_banners(self, **kwargs):
params = {'offering__slug': self.offering_id}
params.update(kwargs)
return self.from_json_response(
self.api.banner.GET(params=params))
def get_stream_url(
self, content_type, content_id, user_agent, msisdn, client_ip):
return self.from_json_response(
self.api(content_type)(content_id).play.GET(
params={'offering__slug': self.offering_id},
headers={
'User-Agent': user_agent,
'X-MSISDN': msisdn,
'X-FORWARDED-FOR': client_ip,
}))
def get_account_info(self, msisdn, client_ip):
return self.from_json_response(self.api.subscriber(msisdn).GET())
def get_profile_schema(self):
return self.from_json_response(self.api.subscriberprofile.schema.GET(
params={'offering__slug': self.offering_id}))
def post_profile(self, msisdn, client_ip, data):
return self.from_json_response(self.api.subscriberprofile.POST(
headers={
'X-MSISDN': msisdn,
'X-FORWARDED-FOR': client_ip,
'Content-Type': 'application/json'},
params={'offering__slug': self.offering_id},
data=data))
def get_transaction_schema(self):
return self.from_json_response(
self.api.subscribertransaction.schema.GET(
params={'offering__slug': self.offering_id}))
def post_transaction(self, user_agent, msisdn, client_ip, data):
return self.from_json_response(self.api.subscribertransaction.POST(
headers={
'User-Agent': user_agent,
'X-MSISDN': msisdn,
'X-FORWARDED-FOR': client_ip,
'Content-Type': 'application/json'},
params={'offering__slug': self.offering_id},
data=data))
|
ucrawler/cp-uc | refs/heads/master | libs/CodernityDB/env.py | 83 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
It's CodernityDB environment.
Handles internal informations.'
"""
cdb_environment = {
'mode': 'normal'
}
|
remitamine/youtube-dl | refs/heads/master | youtube_dl/extractor/vgtv.py | 21 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .xstream import XstreamIE
from ..utils import (
ExtractorError,
float_or_none,
try_get,
)
class VGTVIE(XstreamIE):
IE_DESC = 'VGTV, BTTV, FTV, Aftenposten and Aftonbladet'
_GEO_BYPASS = False
_HOST_TO_APPNAME = {
'vgtv.no': 'vgtv',
'bt.no/tv': 'bttv',
'aftenbladet.no/tv': 'satv',
'fvn.no/fvntv': 'fvntv',
'aftenposten.no/webtv': 'aptv',
'ap.vgtv.no/webtv': 'aptv',
'tv.aftonbladet.se/abtv': 'abtv',
'www.aftonbladet.se/tv': 'abtv',
}
_APP_NAME_TO_VENDOR = {
'vgtv': 'vgtv',
'bttv': 'bt',
'satv': 'sa',
'fvntv': 'fvn',
'aptv': 'ap',
'abtv': 'ab',
}
_VALID_URL = r'''(?x)
(?:https?://(?:www\.)?
(?P<host>
%s
)
/?
(?:
(?:\#!/)?(?:video|live)/|
embed?.*id=|
a(?:rticles)?/
)|
(?P<appname>
%s
):)
(?P<id>\d+)
''' % ('|'.join(_HOST_TO_APPNAME.keys()), '|'.join(_APP_NAME_TO_VENDOR.keys()))
_TESTS = [
{
# streamType: vod
'url': 'http://www.vgtv.no/#!/video/84196/hevnen-er-soet-episode-10-abu',
'md5': 'b8be7a234cebb840c0d512c78013e02f',
'info_dict': {
'id': '84196',
'ext': 'mp4',
'title': 'Hevnen er søt: Episode 10 - Abu',
'description': 'md5:e25e4badb5f544b04341e14abdc72234',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 648.000,
'timestamp': 1404626400,
'upload_date': '20140706',
'view_count': int,
},
},
{
# streamType: wasLive
'url': 'http://www.vgtv.no/#!/live/100764/opptak-vgtv-foelger-em-kvalifiseringen',
'info_dict': {
'id': '100764',
'ext': 'flv',
'title': 'OPPTAK: VGTV følger EM-kvalifiseringen',
'description': 'md5:3772d9c0dc2dff92a886b60039a7d4d3',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 9103.0,
'timestamp': 1410113864,
'upload_date': '20140907',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'Video is no longer available',
},
{
# streamType: wasLive
'url': 'http://www.vgtv.no/#!/live/113063/direkte-v75-fra-solvalla',
'info_dict': {
'id': '113063',
'ext': 'mp4',
'title': 'V75 fra Solvalla 30.05.15',
'description': 'md5:b3743425765355855f88e096acc93231',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 25966,
'timestamp': 1432975582,
'upload_date': '20150530',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
'url': 'http://www.aftenposten.no/webtv/#!/video/21039/trailer-sweatshop-i-can-t-take-any-more',
'md5': 'fd828cd29774a729bf4d4425fe192972',
'info_dict': {
'id': '21039',
'ext': 'mp4',
'title': 'TRAILER: «SWEATSHOP» - I can´t take any more',
'description': 'md5:21891f2b0dd7ec2f78d84a50e54f8238',
'duration': 66,
'timestamp': 1417002452,
'upload_date': '20141126',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
'url': 'http://www.bt.no/tv/#!/video/100250/norling-dette-er-forskjellen-paa-1-divisjon-og-eliteserien',
'only_matching': True,
},
{
'url': 'http://ap.vgtv.no/webtv#!/video/111084/de-nye-bysyklene-lettere-bedre-gir-stoerre-hjul-og-feste-til-mobil',
'only_matching': True,
},
{
# geoblocked
'url': 'http://www.vgtv.no/#!/video/127205/inside-the-mind-of-favela-funk',
'only_matching': True,
},
{
'url': 'http://tv.aftonbladet.se/abtv/articles/36015',
'only_matching': True,
},
{
'url': 'https://www.aftonbladet.se/tv/a/36015',
'only_matching': True,
},
{
'url': 'abtv:140026',
'only_matching': True,
},
{
'url': 'http://www.vgtv.no/video/84196/hevnen-er-soet-episode-10-abu',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
host = mobj.group('host')
appname = self._HOST_TO_APPNAME[host] if host else mobj.group('appname')
vendor = self._APP_NAME_TO_VENDOR[appname]
data = self._download_json(
'http://svp.vg.no/svp/api/v1/%s/assets/%s?appName=%s-website'
% (vendor, video_id, appname),
video_id, 'Downloading media JSON')
if data.get('status') == 'inactive':
raise ExtractorError(
'Video %s is no longer available' % video_id, expected=True)
info = {
'formats': [],
}
if len(video_id) == 5:
if appname == 'bttv':
info = self._extract_video_info('btno', video_id)
streams = data['streamUrls']
stream_type = data.get('streamType')
is_live = stream_type == 'live'
formats = []
hls_url = streams.get('hls')
if hls_url:
formats.extend(self._extract_m3u8_formats(
hls_url, video_id, 'mp4',
entry_protocol='m3u8' if is_live else 'm3u8_native',
m3u8_id='hls', fatal=False))
hds_url = streams.get('hds')
if hds_url:
hdcore_sign = 'hdcore=3.7.0'
f4m_formats = self._extract_f4m_formats(
hds_url + '?%s' % hdcore_sign, video_id, f4m_id='hds', fatal=False)
if f4m_formats:
for entry in f4m_formats:
# URLs without the extra param induce an 404 error
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.append(entry)
mp4_urls = streams.get('pseudostreaming') or []
mp4_url = streams.get('mp4')
if mp4_url:
mp4_urls.append(mp4_url)
for mp4_url in mp4_urls:
format_info = {
'url': mp4_url,
}
mobj = re.search(r'(\d+)_(\d+)_(\d+)', mp4_url)
if mobj:
tbr = int(mobj.group(3))
format_info.update({
'width': int(mobj.group(1)),
'height': int(mobj.group(2)),
'tbr': tbr,
'format_id': 'mp4-%s' % tbr,
})
formats.append(format_info)
info['formats'].extend(formats)
if not info['formats']:
properties = try_get(
data, lambda x: x['streamConfiguration']['properties'], list)
if properties and 'geoblocked' in properties:
raise self.raise_geo_restricted(
countries=[host.rpartition('.')[-1].partition('/')[0].upper()])
self._sort_formats(info['formats'])
info.update({
'id': video_id,
'title': self._live_title(data['title']) if is_live else data['title'],
'description': data['description'],
'thumbnail': data['images']['main'] + '?t[]=900x506q80',
'timestamp': data['published'],
'duration': float_or_none(data['duration'], 1000),
'view_count': data['displays'],
'is_live': is_live,
})
return info
class BTArticleIE(InfoExtractor):
IE_NAME = 'bt:article'
IE_DESC = 'Bergens Tidende Articles'
_VALID_URL = r'https?://(?:www\.)?bt\.no/(?:[^/]+/)+(?P<id>[^/]+)-\d+\.html'
_TEST = {
'url': 'http://www.bt.no/nyheter/lokalt/Kjemper-for-internatet-1788214.html',
'md5': '2acbe8ad129b3469d5ae51b1158878df',
'info_dict': {
'id': '23199',
'ext': 'mp4',
'title': 'Alrekstad internat',
'description': 'md5:dc81a9056c874fedb62fc48a300dac58',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 191,
'timestamp': 1289991323,
'upload_date': '20101117',
'view_count': int,
},
}
def _real_extract(self, url):
webpage = self._download_webpage(url, self._match_id(url))
video_id = self._search_regex(
r'<video[^>]+data-id="(\d+)"', webpage, 'video id')
return self.url_result('bttv:%s' % video_id, 'VGTV')
class BTVestlendingenIE(InfoExtractor):
IE_NAME = 'bt:vestlendingen'
IE_DESC = 'Bergens Tidende - Vestlendingen'
_VALID_URL = r'https?://(?:www\.)?bt\.no/spesial/vestlendingen/#!/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.bt.no/spesial/vestlendingen/#!/86588',
'md5': 'd7d17e3337dc80de6d3a540aefbe441b',
'info_dict': {
'id': '86588',
'ext': 'mov',
'title': 'Otto Wollertsen',
'description': 'Vestlendingen Otto Fredrik Wollertsen',
'timestamp': 1430473209,
'upload_date': '20150501',
},
'skip': '404 Error',
}, {
'url': 'http://www.bt.no/spesial/vestlendingen/#!/86255',
'md5': 'a2893f8632e96389f4bdf36aa9463ceb',
'info_dict': {
'id': '86255',
'ext': 'mov',
'title': 'Du må tåle å fryse og være sulten',
'description': 'md5:b8046f4d022d5830ddab04865791d063',
'upload_date': '20150321',
'timestamp': 1426942023,
},
}]
def _real_extract(self, url):
return self.url_result('bttv:%s' % self._match_id(url), 'VGTV')
|
xros/apm_planner | refs/heads/master | libs/mavlink/share/pyshared/pymavlink/examples/apmsetrate.py | 30 | #!/usr/bin/env python
'''
set stream rate on an APM
'''
import sys, struct, time, os
# allow import from the parent directory, where mavlink.py is
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
from optparse import OptionParser
parser = OptionParser("apmsetrate.py [options]")
parser.add_option("--baudrate", dest="baudrate", type='int',
help="master port baud rate", default=115200)
parser.add_option("--device", dest="device", default=None, help="serial device")
parser.add_option("--rate", dest="rate", default=4, type='int', help="requested stream rate")
parser.add_option("--source-system", dest='SOURCE_SYSTEM', type='int',
default=255, help='MAVLink source system for this GCS')
parser.add_option("--showmessages", dest="showmessages", action='store_true',
help="show incoming messages", default=False)
parser.add_option("--mav10", action='store_true', default=False, help="Use MAVLink protocol 1.0")
(opts, args) = parser.parse_args()
if opts.mav10:
os.environ['MAVLINK10'] = '1'
import mavlink10 as mavlink
else:
import mavlink
import mavutil
if opts.device is None:
print("You must specify a serial device")
sys.exit(1)
def wait_heartbeat(m):
'''wait for a heartbeat so we know the target system IDs'''
print("Waiting for APM heartbeat")
m.wait_heartbeat()
print("Heartbeat from APM (system %u component %u)" % (m.target_system, m.target_system))
def show_messages(m):
'''show incoming mavlink messages'''
while True:
msg = m.recv_match(blocking=True)
if not msg:
return
if msg.get_type() == "BAD_DATA":
if mavutil.all_printable(msg.data):
sys.stdout.write(msg.data)
sys.stdout.flush()
else:
print(msg)
# create a mavlink serial instance
master = mavutil.mavlink_connection(opts.device, baud=opts.baudrate)
# wait for the heartbeat msg to find the system ID
wait_heartbeat(master)
print("Sending all stream request for rate %u" % opts.rate)
for i in range(0, 3):
master.mav.request_data_stream_send(master.target_system, master.target_component,
mavlink.MAV_DATA_STREAM_ALL, opts.rate, 1)
if opts.showmessages:
show_messages(master)
|
Dhivyap/ansible | refs/heads/devel | test/units/modules/storage/netapp/test_na_ontap_vscan_on_access_policy.py | 23 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit tests for Ansible module: na_ontap_vscan_scanner_pool '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_vscan_on_access_policy \
import NetAppOntapVscanOnAccessPolicy as policy_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
HAS_NETAPP_ZAPI_MSG = "pip install netapp_lib is required"
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.kind = kind
self.params = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.kind == 'policy':
xml = self.build_access_policy_info(self.params)
self.xml_out = xml
return xml
@staticmethod
def build_access_policy_info(policy_details):
xml = netapp_utils.zapi.NaElement('xml')
attributes = {'num-records': 1,
'attributes-list': {'vscan-on-access-policy-info': {'policy-name': policy_details['policy_name']}}}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' Unit tests for na_ontap_job_schedule '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.mock_access_policy = {
'state': 'present',
'vserver': 'test_vserver',
'policy_name': 'test_carchi'
}
def mock_args(self):
return {
'state': self.mock_access_policy['state'],
'vserver': self.mock_access_policy['vserver'],
'policy_name': self.mock_access_policy['policy_name'],
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!'
}
def get_policy_mock_object(self, kind=None):
policy_obj = policy_module()
if kind is None:
policy_obj.server = MockONTAPConnection()
else:
policy_obj.server = MockONTAPConnection(kind='policy', data=self.mock_access_policy)
return policy_obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
policy_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_get_nonexistent_policy(self):
set_module_args(self.mock_args())
result = self.get_policy_mock_object().exists_access_policy()
assert not result
def test_get_existing_scanner(self):
set_module_args(self.mock_args())
result = self.get_policy_mock_object('policy').exists_access_policy()
assert result
def test_successfully_create(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_policy_mock_object().apply()
assert exc.value.args[0]['changed']
def test_create_idempotency(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_policy_mock_object('policy').apply()
assert exc.value.args[0]['changed']
def test_successfully_delete(self):
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_policy_mock_object('policy').apply()
assert exc.value.args[0]['changed']
def test_delete_idempotency(self):
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_policy_mock_object().apply()
assert not exc.value.args[0]['changed']
|
Filechaser/nzbToMedia | refs/heads/master | libs/guessit/rules/properties/bonus.py | 34 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
bonus property
"""
from rebulk.remodule import re
from rebulk import Rebulk, AppendMatch, Rule
from .title import TitleFromPosition
from ..common.formatters import cleanup
from ..common.validators import seps_surround
def bonus():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE)
rebulk.regex(r'x(\d+)', name='bonus', private_parent=True, children=True, formatter=int,
validator={'__parent__': lambda match: seps_surround},
conflict_solver=lambda match, conflicting: match
if conflicting.name in ['video_codec', 'episode'] and 'bonus-conflict' not in conflicting.tags
else '__default__')
rebulk.rules(BonusTitleRule)
return rebulk
class BonusTitleRule(Rule):
"""
Find bonus title after bonus.
"""
dependency = TitleFromPosition
consequence = AppendMatch
properties = {'bonus_title': [None]}
def when(self, matches, context):
bonus_number = matches.named('bonus', lambda match: not match.private, index=0)
if bonus_number:
filepath = matches.markers.at_match(bonus_number, lambda marker: marker.name == 'path', 0)
hole = matches.holes(bonus_number.end, filepath.end + 1, formatter=cleanup, index=0)
if hole and hole.value:
hole.name = 'bonus_title'
return hole
|
divya-csekar/flask-microblog-server | refs/heads/master | flask/Lib/site-packages/sqlalchemy/orm/mapper.py | 22 | # orm/mapper.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Logic to map Python classes to and from selectables.
Defines the :class:`~sqlalchemy.orm.mapper.Mapper` class, the central
configurational unit which associates a class with a database table.
This is a semi-private module; the main configurational API of the ORM is
available in :class:`~sqlalchemy.orm.`.
"""
from __future__ import absolute_import
import types
import weakref
from itertools import chain
from collections import deque
from .. import sql, util, log, exc as sa_exc, event, schema, inspection
from ..sql import expression, visitors, operators, util as sql_util
from . import instrumentation, attributes, exc as orm_exc, loading
from . import properties
from .interfaces import MapperProperty, _InspectionAttr, _MappedAttribute
from .base import _class_to_mapper, _state_mapper, class_mapper, \
state_str, _INSTRUMENTOR
from .path_registry import PathRegistry
import sys
_mapper_registry = weakref.WeakKeyDictionary()
_already_compiling = False
_memoized_configured_property = util.group_expirable_memoized_property()
# a constant returned by _get_attr_by_column to indicate
# this mapper is not handling an attribute for a particular
# column
NO_ATTRIBUTE = util.symbol('NO_ATTRIBUTE')
# lock used to synchronize the "mapper configure" step
_CONFIGURE_MUTEX = util.threading.RLock()
@inspection._self_inspects
@log.class_logger
class Mapper(_InspectionAttr):
"""Define the correlation of class attributes to database table
columns.
The :class:`.Mapper` object is instantiated using the
:func:`~sqlalchemy.orm.mapper` function. For information
about instantiating new :class:`.Mapper` objects, see
that function's documentation.
When :func:`.mapper` is used
explicitly to link a user defined class with table
metadata, this is referred to as *classical mapping*.
Modern SQLAlchemy usage tends to favor the
:mod:`sqlalchemy.ext.declarative` extension for class
configuration, which
makes usage of :func:`.mapper` behind the scenes.
Given a particular class known to be mapped by the ORM,
the :class:`.Mapper` which maintains it can be acquired
using the :func:`.inspect` function::
from sqlalchemy import inspect
mapper = inspect(MyClass)
A class which was mapped by the :mod:`sqlalchemy.ext.declarative`
extension will also have its mapper available via the ``__mapper__``
attribute.
"""
_new_mappers = False
def __init__(self,
class_,
local_table=None,
properties=None,
primary_key=None,
non_primary=False,
inherits=None,
inherit_condition=None,
inherit_foreign_keys=None,
extension=None,
order_by=False,
always_refresh=False,
version_id_col=None,
version_id_generator=None,
polymorphic_on=None,
_polymorphic_map=None,
polymorphic_identity=None,
concrete=False,
with_polymorphic=None,
allow_partial_pks=True,
batch=True,
column_prefix=None,
include_properties=None,
exclude_properties=None,
passive_updates=True,
confirm_deleted_rows=True,
eager_defaults=False,
legacy_is_orphan=False,
_compiled_cache_size=100,
):
"""Return a new :class:`~.Mapper` object.
This function is typically used behind the scenes
via the Declarative extension. When using Declarative,
many of the usual :func:`.mapper` arguments are handled
by the Declarative extension itself, including ``class_``,
``local_table``, ``properties``, and ``inherits``.
Other options are passed to :func:`.mapper` using
the ``__mapper_args__`` class variable::
class MyClass(Base):
__tablename__ = 'my_table'
id = Column(Integer, primary_key=True)
type = Column(String(50))
alt = Column("some_alt", Integer)
__mapper_args__ = {
'polymorphic_on' : type
}
Explicit use of :func:`.mapper`
is often referred to as *classical mapping*. The above
declarative example is equivalent in classical form to::
my_table = Table("my_table", metadata,
Column('id', Integer, primary_key=True),
Column('type', String(50)),
Column("some_alt", Integer)
)
class MyClass(object):
pass
mapper(MyClass, my_table,
polymorphic_on=my_table.c.type,
properties={
'alt':my_table.c.some_alt
})
.. seealso::
:ref:`classical_mapping` - discussion of direct usage of
:func:`.mapper`
:param class\_: The class to be mapped. When using Declarative,
this argument is automatically passed as the declared class
itself.
:param local_table: The :class:`.Table` or other selectable
to which the class is mapped. May be ``None`` if
this mapper inherits from another mapper using single-table
inheritance. When using Declarative, this argument is
automatically passed by the extension, based on what
is configured via the ``__table__`` argument or via the
:class:`.Table` produced as a result of the ``__tablename__``
and :class:`.Column` arguments present.
:param always_refresh: If True, all query operations for this mapped
class will overwrite all data within object instances that already
exist within the session, erasing any in-memory changes with
whatever information was loaded from the database. Usage of this
flag is highly discouraged; as an alternative, see the method
:meth:`.Query.populate_existing`.
:param allow_partial_pks: Defaults to True. Indicates that a
composite primary key with some NULL values should be considered as
possibly existing within the database. This affects whether a
mapper will assign an incoming row to an existing identity, as well
as if :meth:`.Session.merge` will check the database first for a
particular primary key value. A "partial primary key" can occur if
one has mapped to an OUTER JOIN, for example.
:param batch: Defaults to ``True``, indicating that save operations
of multiple entities can be batched together for efficiency.
Setting to False indicates
that an instance will be fully saved before saving the next
instance. This is used in the extremely rare case that a
:class:`.MapperEvents` listener requires being called
in between individual row persistence operations.
:param column_prefix: A string which will be prepended
to the mapped attribute name when :class:`.Column`
objects are automatically assigned as attributes to the
mapped class. Does not affect explicitly specified
column-based properties.
See the section :ref:`column_prefix` for an example.
:param concrete: If True, indicates this mapper should use concrete
table inheritance with its parent mapper.
See the section :ref:`concrete_inheritance` for an example.
:param confirm_deleted_rows: defaults to True; when a DELETE occurs
of one more rows based on specific primary keys, a warning is
emitted when the number of rows matched does not equal the number
of rows expected. This parameter may be set to False to handle the
case where database ON DELETE CASCADE rules may be deleting some of
those rows automatically. The warning may be changed to an
exception in a future release.
.. versionadded:: 0.9.4 - added
:paramref:`.mapper.confirm_deleted_rows` as well as conditional
matched row checking on delete.
:param eager_defaults: if True, the ORM will immediately fetch the
value of server-generated default values after an INSERT or UPDATE,
rather than leaving them as expired to be fetched on next access.
This can be used for event schemes where the server-generated values
are needed immediately before the flush completes. By default,
this scheme will emit an individual ``SELECT`` statement per row
inserted or updated, which note can add significant performance
overhead. However, if the
target database supports :term:`RETURNING`, the default values will
be returned inline with the INSERT or UPDATE statement, which can
greatly enhance performance for an application that needs frequent
access to just-generated server defaults.
.. versionchanged:: 0.9.0 The ``eager_defaults`` option can now
make use of :term:`RETURNING` for backends which support it.
:param exclude_properties: A list or set of string column names to
be excluded from mapping.
See :ref:`include_exclude_cols` for an example.
:param extension: A :class:`.MapperExtension` instance or
list of :class:`.MapperExtension` instances which will be applied
to all operations by this :class:`.Mapper`. **Deprecated.**
Please see :class:`.MapperEvents`.
:param include_properties: An inclusive list or set of string column
names to map.
See :ref:`include_exclude_cols` for an example.
:param inherits: A mapped class or the corresponding :class:`.Mapper`
of one indicating a superclass to which this :class:`.Mapper`
should *inherit* from. The mapped class here must be a subclass
of the other mapper's class. When using Declarative, this argument
is passed automatically as a result of the natural class
hierarchy of the declared classes.
.. seealso::
:ref:`inheritance_toplevel`
:param inherit_condition: For joined table inheritance, a SQL
expression which will
define how the two tables are joined; defaults to a natural join
between the two tables.
:param inherit_foreign_keys: When ``inherit_condition`` is used and
the columns present are missing a :class:`.ForeignKey`
configuration, this parameter can be used to specify which columns
are "foreign". In most cases can be left as ``None``.
:param legacy_is_orphan: Boolean, defaults to ``False``.
When ``True``, specifies that "legacy" orphan consideration
is to be applied to objects mapped by this mapper, which means
that a pending (that is, not persistent) object is auto-expunged
from an owning :class:`.Session` only when it is de-associated
from *all* parents that specify a ``delete-orphan`` cascade towards
this mapper. The new default behavior is that the object is
auto-expunged when it is de-associated with *any* of its parents
that specify ``delete-orphan`` cascade. This behavior is more
consistent with that of a persistent object, and allows behavior to
be consistent in more scenarios independently of whether or not an
orphanable object has been flushed yet or not.
See the change note and example at :ref:`legacy_is_orphan_addition`
for more detail on this change.
.. versionadded:: 0.8 - the consideration of a pending object as
an "orphan" has been modified to more closely match the
behavior as that of persistent objects, which is that the object
is expunged from the :class:`.Session` as soon as it is
de-associated from any of its orphan-enabled parents. Previously,
the pending object would be expunged only if de-associated
from all of its orphan-enabled parents. The new flag
``legacy_is_orphan`` is added to :func:`.orm.mapper` which
re-establishes the legacy behavior.
:param non_primary: Specify that this :class:`.Mapper` is in addition
to the "primary" mapper, that is, the one used for persistence.
The :class:`.Mapper` created here may be used for ad-hoc
mapping of the class to an alternate selectable, for loading
only.
:paramref:`.Mapper.non_primary` is not an often used option, but
is useful in some specific :func:`.relationship` cases.
.. seealso::
:ref:`relationship_non_primary_mapper`
:param order_by: A single :class:`.Column` or list of :class:`.Column`
objects for which selection operations should use as the default
ordering for entities. By default mappers have no pre-defined
ordering.
:param passive_updates: Indicates UPDATE behavior of foreign key
columns when a primary key column changes on a joined-table
inheritance mapping. Defaults to ``True``.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will handle
propagation of an UPDATE from a source column to dependent columns
on joined-table rows.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The unit of work process will
emit an UPDATE statement for the dependent columns during a
primary key change.
.. seealso::
:ref:`passive_updates` - description of a similar feature as
used with :func:`.relationship`
:param polymorphic_on: Specifies the column, attribute, or
SQL expression used to determine the target class for an
incoming row, when inheriting classes are present.
This value is commonly a :class:`.Column` object that's
present in the mapped :class:`.Table`::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":discriminator,
"polymorphic_identity":"employee"
}
It may also be specified
as a SQL expression, as in this example where we
use the :func:`.case` construct to provide a conditional
approach::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee"),
"polymorphic_identity":"employee"
}
It may also refer to any attribute
configured with :func:`.column_property`, or to the
string name of one::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
employee_type = column_property(
case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee")
)
__mapper_args__ = {
"polymorphic_on":employee_type,
"polymorphic_identity":"employee"
}
.. versionchanged:: 0.7.4
``polymorphic_on`` may be specified as a SQL expression,
or refer to any attribute configured with
:func:`.column_property`, or to the string name of one.
When setting ``polymorphic_on`` to reference an
attribute or expression that's not present in the
locally mapped :class:`.Table`, yet the value
of the discriminator should be persisted to the database,
the value of the
discriminator is not automatically set on new
instances; this must be handled by the user,
either through manual means or via event listeners.
A typical approach to establishing such a listener
looks like::
from sqlalchemy import event
from sqlalchemy.orm import object_mapper
@event.listens_for(Employee, "init", propagate=True)
def set_identity(instance, *arg, **kw):
mapper = object_mapper(instance)
instance.discriminator = mapper.polymorphic_identity
Where above, we assign the value of ``polymorphic_identity``
for the mapped class to the ``discriminator`` attribute,
thus persisting the value to the ``discriminator`` column
in the database.
.. warning::
Currently, **only one discriminator column may be set**, typically
on the base-most class in the hierarchy. "Cascading" polymorphic
columns are not yet supported.
.. seealso::
:ref:`inheritance_toplevel`
:param polymorphic_identity: Specifies the value which
identifies this particular class as returned by the
column expression referred to by the ``polymorphic_on``
setting. As rows are received, the value corresponding
to the ``polymorphic_on`` column expression is compared
to this value, indicating which subclass should
be used for the newly reconstructed object.
:param properties: A dictionary mapping the string names of object
attributes to :class:`.MapperProperty` instances, which define the
persistence behavior of that attribute. Note that :class:`.Column`
objects present in
the mapped :class:`.Table` are automatically placed into
``ColumnProperty`` instances upon mapping, unless overridden.
When using Declarative, this argument is passed automatically,
based on all those :class:`.MapperProperty` instances declared
in the declared class body.
:param primary_key: A list of :class:`.Column` objects which define
the primary key to be used against this mapper's selectable unit.
This is normally simply the primary key of the ``local_table``, but
can be overridden here.
:param version_id_col: A :class:`.Column`
that will be used to keep a running version id of rows
in the table. This is used to detect concurrent updates or
the presence of stale data in a flush. The methodology is to
detect if an UPDATE statement does not match the last known
version id, a
:class:`~sqlalchemy.orm.exc.StaleDataError` exception is
thrown.
By default, the column must be of :class:`.Integer` type,
unless ``version_id_generator`` specifies an alternative version
generator.
.. seealso::
:ref:`mapper_version_counter` - discussion of version counting
and rationale.
:param version_id_generator: Define how new version ids should
be generated. Defaults to ``None``, which indicates that
a simple integer counting scheme be employed. To provide a custom
versioning scheme, provide a callable function of the form::
def generate_version(version):
return next_version
Alternatively, server-side versioning functions such as triggers,
or programmatic versioning schemes outside of the version id
generator may be used, by specifying the value ``False``.
Please see :ref:`server_side_version_counter` for a discussion
of important points when using this option.
.. versionadded:: 0.9.0 ``version_id_generator`` supports
server-side version number generation.
.. seealso::
:ref:`custom_version_counter`
:ref:`server_side_version_counter`
:param with_polymorphic: A tuple in the form ``(<classes>,
<selectable>)`` indicating the default style of "polymorphic"
loading, that is, which tables are queried at once. <classes> is
any single or list of mappers and/or classes indicating the
inherited classes that should be loaded at once. The special value
``'*'`` may be used to indicate all descending classes should be
loaded immediately. The second tuple argument <selectable>
indicates a selectable that will be used to query for multiple
classes.
.. seealso::
:ref:`with_polymorphic` - discussion of polymorphic querying
techniques.
"""
self.class_ = util.assert_arg_type(class_, type, 'class_')
self.class_manager = None
self._primary_key_argument = util.to_list(primary_key)
self.non_primary = non_primary
if order_by is not False:
self.order_by = util.to_list(order_by)
else:
self.order_by = order_by
self.always_refresh = always_refresh
if isinstance(version_id_col, MapperProperty):
self.version_id_prop = version_id_col
self.version_id_col = None
else:
self.version_id_col = version_id_col
if version_id_generator is False:
self.version_id_generator = False
elif version_id_generator is None:
self.version_id_generator = lambda x: (x or 0) + 1
else:
self.version_id_generator = version_id_generator
self.concrete = concrete
self.single = False
self.inherits = inherits
self.local_table = local_table
self.inherit_condition = inherit_condition
self.inherit_foreign_keys = inherit_foreign_keys
self._init_properties = properties or {}
self._delete_orphans = []
self.batch = batch
self.eager_defaults = eager_defaults
self.column_prefix = column_prefix
self.polymorphic_on = expression._clause_element_as_expr(
polymorphic_on)
self._dependency_processors = []
self.validators = util.immutabledict()
self.passive_updates = passive_updates
self.legacy_is_orphan = legacy_is_orphan
self._clause_adapter = None
self._requires_row_aliasing = False
self._inherits_equated_pairs = None
self._memoized_values = {}
self._compiled_cache_size = _compiled_cache_size
self._reconstructor = None
self._deprecated_extensions = util.to_list(extension or [])
self.allow_partial_pks = allow_partial_pks
if self.inherits and not self.concrete:
self.confirm_deleted_rows = False
else:
self.confirm_deleted_rows = confirm_deleted_rows
self._set_with_polymorphic(with_polymorphic)
if isinstance(self.local_table, expression.SelectBase):
raise sa_exc.InvalidRequestError(
"When mapping against a select() construct, map against "
"an alias() of the construct instead."
"This because several databases don't allow a "
"SELECT from a subquery that does not have an alias."
)
if self.with_polymorphic and \
isinstance(self.with_polymorphic[1],
expression.SelectBase):
self.with_polymorphic = (self.with_polymorphic[0],
self.with_polymorphic[1].alias())
# our 'polymorphic identity', a string name that when located in a
# result set row indicates this Mapper should be used to construct
# the object instance for that row.
self.polymorphic_identity = polymorphic_identity
# a dictionary of 'polymorphic identity' names, associating those
# names with Mappers that will be used to construct object instances
# upon a select operation.
if _polymorphic_map is None:
self.polymorphic_map = {}
else:
self.polymorphic_map = _polymorphic_map
if include_properties is not None:
self.include_properties = util.to_set(include_properties)
else:
self.include_properties = None
if exclude_properties:
self.exclude_properties = util.to_set(exclude_properties)
else:
self.exclude_properties = None
self.configured = False
# prevent this mapper from being constructed
# while a configure_mappers() is occurring (and defer a
# configure_mappers() until construction succeeds)
_CONFIGURE_MUTEX.acquire()
try:
self.dispatch._events._new_mapper_instance(class_, self)
self._configure_inheritance()
self._configure_legacy_instrument_class()
self._configure_class_instrumentation()
self._configure_listeners()
self._configure_properties()
self._configure_polymorphic_setter()
self._configure_pks()
Mapper._new_mappers = True
self._log("constructed")
self._expire_memoizations()
finally:
_CONFIGURE_MUTEX.release()
# major attributes initialized at the classlevel so that
# they can be Sphinx-documented.
is_mapper = True
"""Part of the inspection API."""
@property
def mapper(self):
"""Part of the inspection API.
Returns self.
"""
return self
@property
def entity(self):
"""Part of the inspection API.
Returns self.class\_.
"""
return self.class_
local_table = None
"""The :class:`.Selectable` which this :class:`.Mapper` manages.
Typically is an instance of :class:`.Table` or :class:`.Alias`.
May also be ``None``.
The "local" table is the
selectable that the :class:`.Mapper` is directly responsible for
managing from an attribute access and flush perspective. For
non-inheriting mappers, the local table is the same as the
"mapped" table. For joined-table inheritance mappers, local_table
will be the particular sub-table of the overall "join" which
this :class:`.Mapper` represents. If this mapper is a
single-table inheriting mapper, local_table will be ``None``.
.. seealso::
:attr:`~.Mapper.mapped_table`.
"""
mapped_table = None
"""The :class:`.Selectable` to which this :class:`.Mapper` is mapped.
Typically an instance of :class:`.Table`, :class:`.Join`, or
:class:`.Alias`.
The "mapped" table is the selectable that
the mapper selects from during queries. For non-inheriting
mappers, the mapped table is the same as the "local" table.
For joined-table inheritance mappers, mapped_table references the
full :class:`.Join` representing full rows for this particular
subclass. For single-table inheritance mappers, mapped_table
references the base table.
.. seealso::
:attr:`~.Mapper.local_table`.
"""
inherits = None
"""References the :class:`.Mapper` which this :class:`.Mapper`
inherits from, if any.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
configured = None
"""Represent ``True`` if this :class:`.Mapper` has been configured.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
.. seealso::
:func:`.configure_mappers`.
"""
concrete = None
"""Represent ``True`` if this :class:`.Mapper` is a concrete
inheritance mapper.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
tables = None
"""An iterable containing the collection of :class:`.Table` objects
which this :class:`.Mapper` is aware of.
If the mapper is mapped to a :class:`.Join`, or an :class:`.Alias`
representing a :class:`.Select`, the individual :class:`.Table`
objects that comprise the full construct will be represented here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
primary_key = None
"""An iterable containing the collection of :class:`.Column` objects
which comprise the 'primary key' of the mapped table, from the
perspective of this :class:`.Mapper`.
This list is against the selectable in :attr:`~.Mapper.mapped_table`. In
the case of inheriting mappers, some columns may be managed by a
superclass mapper. For example, in the case of a :class:`.Join`, the
primary key is determined by all of the primary key columns across all
tables referenced by the :class:`.Join`.
The list is also not necessarily the same as the primary key column
collection associated with the underlying tables; the :class:`.Mapper`
features a ``primary_key`` argument that can override what the
:class:`.Mapper` considers as primary key columns.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_ = None
"""The Python class which this :class:`.Mapper` maps.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_manager = None
"""The :class:`.ClassManager` which maintains event listeners
and class-bound descriptors for this :class:`.Mapper`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
single = None
"""Represent ``True`` if this :class:`.Mapper` is a single table
inheritance mapper.
:attr:`~.Mapper.local_table` will be ``None`` if this flag is set.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
non_primary = None
"""Represent ``True`` if this :class:`.Mapper` is a "non-primary"
mapper, e.g. a mapper that is used only to selet rows but not for
persistence management.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_on = None
"""The :class:`.Column` or SQL expression specified as the
``polymorphic_on`` argument
for this :class:`.Mapper`, within an inheritance scenario.
This attribute is normally a :class:`.Column` instance but
may also be an expression, such as one derived from
:func:`.cast`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_map = None
"""A mapping of "polymorphic identity" identifiers mapped to
:class:`.Mapper` instances, within an inheritance scenario.
The identifiers can be of any type which is comparable to the
type of column represented by :attr:`~.Mapper.polymorphic_on`.
An inheritance chain of mappers will all reference the same
polymorphic map object. The object is used to correlate incoming
result rows to target mappers.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_identity = None
"""Represent an identifier which is matched against the
:attr:`~.Mapper.polymorphic_on` column during result row loading.
Used only with inheritance, this object can be of any type which is
comparable to the type of column represented by
:attr:`~.Mapper.polymorphic_on`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
base_mapper = None
"""The base-most :class:`.Mapper` in an inheritance chain.
In a non-inheriting scenario, this attribute will always be this
:class:`.Mapper`. In an inheritance scenario, it references
the :class:`.Mapper` which is parent to all other :class:`.Mapper`
objects in the inheritance chain.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
columns = None
"""A collection of :class:`.Column` or other scalar expression
objects maintained by this :class:`.Mapper`.
The collection behaves the same as that of the ``c`` attribute on
any :class:`.Table` object, except that only those columns included in
this mapping are present, and are keyed based on the attribute name
defined in the mapping, not necessarily the ``key`` attribute of the
:class:`.Column` itself. Additionally, scalar expressions mapped
by :func:`.column_property` are also present here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
validators = None
"""An immutable dictionary of attributes which have been decorated
using the :func:`~.orm.validates` decorator.
The dictionary contains string attribute names as keys
mapped to the actual validation method.
"""
c = None
"""A synonym for :attr:`~.Mapper.columns`."""
@util.memoized_property
def _path_registry(self):
return PathRegistry.per_mapper(self)
def _configure_inheritance(self):
"""Configure settings related to inherting and/or inherited mappers
being present."""
# a set of all mappers which inherit from this one.
self._inheriting_mappers = util.WeakSequence()
if self.inherits:
if isinstance(self.inherits, type):
self.inherits = class_mapper(self.inherits, configure=False)
if not issubclass(self.class_, self.inherits.class_):
raise sa_exc.ArgumentError(
"Class '%s' does not inherit from '%s'" %
(self.class_.__name__, self.inherits.class_.__name__))
if self.non_primary != self.inherits.non_primary:
np = not self.non_primary and "primary" or "non-primary"
raise sa_exc.ArgumentError(
"Inheritance of %s mapper for class '%s' is "
"only allowed from a %s mapper" %
(np, self.class_.__name__, np))
# inherit_condition is optional.
if self.local_table is None:
self.local_table = self.inherits.local_table
self.mapped_table = self.inherits.mapped_table
self.single = True
elif self.local_table is not self.inherits.local_table:
if self.concrete:
self.mapped_table = self.local_table
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
else:
if self.inherit_condition is None:
# figure out inherit condition from our table to the
# immediate table of the inherited mapper, not its
# full table which could pull in other stuff we don't
# want (allows test/inheritance.InheritTest4 to pass)
self.inherit_condition = sql_util.join_condition(
self.inherits.local_table,
self.local_table)
self.mapped_table = sql.join(
self.inherits.mapped_table,
self.local_table,
self.inherit_condition)
fks = util.to_set(self.inherit_foreign_keys)
self._inherits_equated_pairs = \
sql_util.criterion_as_pairs(
self.mapped_table.onclause,
consider_as_foreign_keys=fks)
else:
self.mapped_table = self.local_table
if self.polymorphic_identity is not None and not self.concrete:
self._identity_class = self.inherits._identity_class
else:
self._identity_class = self.class_
if self.version_id_col is None:
self.version_id_col = self.inherits.version_id_col
self.version_id_generator = self.inherits.version_id_generator
elif self.inherits.version_id_col is not None and \
self.version_id_col is not self.inherits.version_id_col:
util.warn(
"Inheriting version_id_col '%s' does not match inherited "
"version_id_col '%s' and will not automatically populate "
"the inherited versioning column. "
"version_id_col should only be specified on "
"the base-most mapper that includes versioning." %
(self.version_id_col.description,
self.inherits.version_id_col.description)
)
if self.order_by is False and \
not self.concrete and \
self.inherits.order_by is not False:
self.order_by = self.inherits.order_by
self.polymorphic_map = self.inherits.polymorphic_map
self.batch = self.inherits.batch
self.inherits._inheriting_mappers.append(self)
self.base_mapper = self.inherits.base_mapper
self.passive_updates = self.inherits.passive_updates
self._all_tables = self.inherits._all_tables
if self.polymorphic_identity is not None:
self.polymorphic_map[self.polymorphic_identity] = self
else:
self._all_tables = set()
self.base_mapper = self
self.mapped_table = self.local_table
if self.polymorphic_identity is not None:
self.polymorphic_map[self.polymorphic_identity] = self
self._identity_class = self.class_
if self.mapped_table is None:
raise sa_exc.ArgumentError(
"Mapper '%s' does not have a mapped_table specified."
% self)
def _set_with_polymorphic(self, with_polymorphic):
if with_polymorphic == '*':
self.with_polymorphic = ('*', None)
elif isinstance(with_polymorphic, (tuple, list)):
if isinstance(
with_polymorphic[0], util.string_types + (tuple, list)):
self.with_polymorphic = with_polymorphic
else:
self.with_polymorphic = (with_polymorphic, None)
elif with_polymorphic is not None:
raise sa_exc.ArgumentError("Invalid setting for with_polymorphic")
else:
self.with_polymorphic = None
if isinstance(self.local_table, expression.SelectBase):
raise sa_exc.InvalidRequestError(
"When mapping against a select() construct, map against "
"an alias() of the construct instead."
"This because several databases don't allow a "
"SELECT from a subquery that does not have an alias."
)
if self.with_polymorphic and \
isinstance(self.with_polymorphic[1],
expression.SelectBase):
self.with_polymorphic = (self.with_polymorphic[0],
self.with_polymorphic[1].alias())
if self.configured:
self._expire_memoizations()
def _set_concrete_base(self, mapper):
"""Set the given :class:`.Mapper` as the 'inherits' for this
:class:`.Mapper`, assuming this :class:`.Mapper` is concrete
and does not already have an inherits."""
assert self.concrete
assert not self.inherits
assert isinstance(mapper, Mapper)
self.inherits = mapper
self.inherits.polymorphic_map.update(self.polymorphic_map)
self.polymorphic_map = self.inherits.polymorphic_map
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
self.batch = self.inherits.batch
for mp in self.self_and_descendants:
mp.base_mapper = self.inherits.base_mapper
self.inherits._inheriting_mappers.append(self)
self.passive_updates = self.inherits.passive_updates
self._all_tables = self.inherits._all_tables
for key, prop in mapper._props.items():
if key not in self._props and \
not self._should_exclude(key, key, local=False,
column=None):
self._adapt_inherited_property(key, prop, False)
def _set_polymorphic_on(self, polymorphic_on):
self.polymorphic_on = polymorphic_on
self._configure_polymorphic_setter(True)
def _configure_legacy_instrument_class(self):
if self.inherits:
self.dispatch._update(self.inherits.dispatch)
super_extensions = set(
chain(*[m._deprecated_extensions
for m in self.inherits.iterate_to_root()]))
else:
super_extensions = set()
for ext in self._deprecated_extensions:
if ext not in super_extensions:
ext._adapt_instrument_class(self, ext)
def _configure_listeners(self):
if self.inherits:
super_extensions = set(
chain(*[m._deprecated_extensions
for m in self.inherits.iterate_to_root()]))
else:
super_extensions = set()
for ext in self._deprecated_extensions:
if ext not in super_extensions:
ext._adapt_listener(self, ext)
def _configure_class_instrumentation(self):
"""If this mapper is to be a primary mapper (i.e. the
non_primary flag is not set), associate this Mapper with the
given class_ and entity name.
Subsequent calls to ``class_mapper()`` for the class_/entity
name combination will return this mapper. Also decorate the
`__init__` method on the mapped class to include optional
auto-session attachment logic.
"""
manager = attributes.manager_of_class(self.class_)
if self.non_primary:
if not manager or not manager.is_mapped:
raise sa_exc.InvalidRequestError(
"Class %s has no primary mapper configured. Configure "
"a primary mapper first before setting up a non primary "
"Mapper." % self.class_)
self.class_manager = manager
self._identity_class = manager.mapper._identity_class
_mapper_registry[self] = True
return
if manager is not None:
assert manager.class_ is self.class_
if manager.is_mapped:
raise sa_exc.ArgumentError(
"Class '%s' already has a primary mapper defined. "
"Use non_primary=True to "
"create a non primary Mapper. clear_mappers() will "
"remove *all* current mappers from all classes." %
self.class_)
# else:
# a ClassManager may already exist as
# ClassManager.instrument_attribute() creates
# new managers for each subclass if they don't yet exist.
_mapper_registry[self] = True
self.dispatch.instrument_class(self, self.class_)
if manager is None:
manager = instrumentation.register_class(self.class_)
self.class_manager = manager
manager.mapper = self
manager.deferred_scalar_loader = util.partial(
loading.load_scalar_attributes, self)
# The remaining members can be added by any mapper,
# e_name None or not.
if manager.info.get(_INSTRUMENTOR, False):
return
event.listen(manager, 'first_init', _event_on_first_init, raw=True)
event.listen(manager, 'init', _event_on_init, raw=True)
event.listen(manager, 'resurrect', _event_on_resurrect, raw=True)
for key, method in util.iterate_attributes(self.class_):
if isinstance(method, types.FunctionType):
if hasattr(method, '__sa_reconstructor__'):
self._reconstructor = method
event.listen(manager, 'load', _event_on_load, raw=True)
elif hasattr(method, '__sa_validators__'):
validation_opts = method.__sa_validation_opts__
for name in method.__sa_validators__:
self.validators = self.validators.union(
{name: (method, validation_opts)}
)
manager.info[_INSTRUMENTOR] = self
@classmethod
def _configure_all(cls):
"""Class-level path to the :func:`.configure_mappers` call.
"""
configure_mappers()
def dispose(self):
# Disable any attribute-based compilation.
self.configured = True
if hasattr(self, '_configure_failed'):
del self._configure_failed
if not self.non_primary and \
self.class_manager is not None and \
self.class_manager.is_mapped and \
self.class_manager.mapper is self:
instrumentation.unregister_class(self.class_)
def _configure_pks(self):
self.tables = sql_util.find_tables(self.mapped_table)
self._pks_by_table = {}
self._cols_by_table = {}
all_cols = util.column_set(chain(*[
col.proxy_set for col in
self._columntoproperty]))
pk_cols = util.column_set(c for c in all_cols if c.primary_key)
# identify primary key columns which are also mapped by this mapper.
tables = set(self.tables + [self.mapped_table])
self._all_tables.update(tables)
for t in tables:
if t.primary_key and pk_cols.issuperset(t.primary_key):
# ordering is important since it determines the ordering of
# mapper.primary_key (and therefore query.get())
self._pks_by_table[t] = \
util.ordered_column_set(t.primary_key).\
intersection(pk_cols)
self._cols_by_table[t] = \
util.ordered_column_set(t.c).\
intersection(all_cols)
# determine cols that aren't expressed within our tables; mark these
# as "read only" properties which are refreshed upon INSERT/UPDATE
self._readonly_props = set(
self._columntoproperty[col]
for col in self._columntoproperty
if not hasattr(col, 'table') or
col.table not in self._cols_by_table)
# if explicit PK argument sent, add those columns to the
# primary key mappings
if self._primary_key_argument:
for k in self._primary_key_argument:
if k.table not in self._pks_by_table:
self._pks_by_table[k.table] = util.OrderedSet()
self._pks_by_table[k.table].add(k)
# otherwise, see that we got a full PK for the mapped table
elif self.mapped_table not in self._pks_by_table or \
len(self._pks_by_table[self.mapped_table]) == 0:
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'" %
(self, self.mapped_table.description))
elif self.local_table not in self._pks_by_table and \
isinstance(self.local_table, schema.Table):
util.warn("Could not assemble any primary "
"keys for locally mapped table '%s' - "
"no rows will be persisted in this Table."
% self.local_table.description)
if self.inherits and \
not self.concrete and \
not self._primary_key_argument:
# if inheriting, the "primary key" for this mapper is
# that of the inheriting (unless concrete or explicit)
self.primary_key = self.inherits.primary_key
else:
# determine primary key from argument or mapped_table pks -
# reduce to the minimal set of columns
if self._primary_key_argument:
primary_key = sql_util.reduce_columns(
[self.mapped_table.corresponding_column(c) for c in
self._primary_key_argument],
ignore_nonexistent_tables=True)
else:
primary_key = sql_util.reduce_columns(
self._pks_by_table[self.mapped_table],
ignore_nonexistent_tables=True)
if len(primary_key) == 0:
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'" %
(self, self.mapped_table.description))
self.primary_key = tuple(primary_key)
self._log("Identified primary key columns: %s", primary_key)
def _configure_properties(self):
# Column and other ClauseElement objects which are mapped
self.columns = self.c = util.OrderedProperties()
# object attribute names mapped to MapperProperty objects
self._props = util.OrderedDict()
# table columns mapped to lists of MapperProperty objects
# using a list allows a single column to be defined as
# populating multiple object attributes
self._columntoproperty = _ColumnMapping(self)
# load custom properties
if self._init_properties:
for key, prop in self._init_properties.items():
self._configure_property(key, prop, False)
# pull properties from the inherited mapper if any.
if self.inherits:
for key, prop in self.inherits._props.items():
if key not in self._props and \
not self._should_exclude(key, key, local=False,
column=None):
self._adapt_inherited_property(key, prop, False)
# create properties for each column in the mapped table,
# for those columns which don't already map to a property
for column in self.mapped_table.columns:
if column in self._columntoproperty:
continue
column_key = (self.column_prefix or '') + column.key
if self._should_exclude(
column.key, column_key,
local=self.local_table.c.contains_column(column),
column=column
):
continue
# adjust the "key" used for this column to that
# of the inheriting mapper
for mapper in self.iterate_to_root():
if column in mapper._columntoproperty:
column_key = mapper._columntoproperty[column].key
self._configure_property(column_key,
column,
init=False,
setparent=True)
def _configure_polymorphic_setter(self, init=False):
"""Configure an attribute on the mapper representing the
'polymorphic_on' column, if applicable, and not
already generated by _configure_properties (which is typical).
Also create a setter function which will assign this
attribute to the value of the 'polymorphic_identity'
upon instance construction, also if applicable. This
routine will run when an instance is created.
"""
setter = False
if self.polymorphic_on is not None:
setter = True
if isinstance(self.polymorphic_on, util.string_types):
# polymorphic_on specified as a string - link
# it to mapped ColumnProperty
try:
self.polymorphic_on = self._props[self.polymorphic_on]
except KeyError:
raise sa_exc.ArgumentError(
"Can't determine polymorphic_on "
"value '%s' - no attribute is "
"mapped to this name." % self.polymorphic_on)
if self.polymorphic_on in self._columntoproperty:
# polymorphic_on is a column that is already mapped
# to a ColumnProperty
prop = self._columntoproperty[self.polymorphic_on]
polymorphic_key = prop.key
self.polymorphic_on = prop.columns[0]
polymorphic_key = prop.key
elif isinstance(self.polymorphic_on, MapperProperty):
# polymorphic_on is directly a MapperProperty,
# ensure it's a ColumnProperty
if not isinstance(self.polymorphic_on,
properties.ColumnProperty):
raise sa_exc.ArgumentError(
"Only direct column-mapped "
"property or SQL expression "
"can be passed for polymorphic_on")
prop = self.polymorphic_on
self.polymorphic_on = prop.columns[0]
polymorphic_key = prop.key
elif not expression._is_column(self.polymorphic_on):
# polymorphic_on is not a Column and not a ColumnProperty;
# not supported right now.
raise sa_exc.ArgumentError(
"Only direct column-mapped "
"property or SQL expression "
"can be passed for polymorphic_on"
)
else:
# polymorphic_on is a Column or SQL expression and
# doesn't appear to be mapped. this means it can be 1.
# only present in the with_polymorphic selectable or
# 2. a totally standalone SQL expression which we'd
# hope is compatible with this mapper's mapped_table
col = self.mapped_table.corresponding_column(
self.polymorphic_on)
if col is None:
# polymorphic_on doesn't derive from any
# column/expression isn't present in the mapped
# table. we will make a "hidden" ColumnProperty
# for it. Just check that if it's directly a
# schema.Column and we have with_polymorphic, it's
# likely a user error if the schema.Column isn't
# represented somehow in either mapped_table or
# with_polymorphic. Otherwise as of 0.7.4 we
# just go with it and assume the user wants it
# that way (i.e. a CASE statement)
setter = False
instrument = False
col = self.polymorphic_on
if isinstance(col, schema.Column) and (
self.with_polymorphic is None or
self.with_polymorphic[1].
corresponding_column(col) is None):
raise sa_exc.InvalidRequestError(
"Could not map polymorphic_on column "
"'%s' to the mapped table - polymorphic "
"loads will not function properly"
% col.description)
else:
# column/expression that polymorphic_on derives from
# is present in our mapped table
# and is probably mapped, but polymorphic_on itself
# is not. This happens when
# the polymorphic_on is only directly present in the
# with_polymorphic selectable, as when use
# polymorphic_union.
# we'll make a separate ColumnProperty for it.
instrument = True
key = getattr(col, 'key', None)
if key:
if self._should_exclude(col.key, col.key, False, col):
raise sa_exc.InvalidRequestError(
"Cannot exclude or override the "
"discriminator column %r" %
col.key)
else:
self.polymorphic_on = col = \
col.label("_sa_polymorphic_on")
key = col.key
self._configure_property(
key,
properties.ColumnProperty(col,
_instrument=instrument),
init=init, setparent=True)
polymorphic_key = key
else:
# no polymorphic_on was set.
# check inheriting mappers for one.
for mapper in self.iterate_to_root():
# determine if polymorphic_on of the parent
# should be propagated here. If the col
# is present in our mapped table, or if our mapped
# table is the same as the parent (i.e. single table
# inheritance), we can use it
if mapper.polymorphic_on is not None:
if self.mapped_table is mapper.mapped_table:
self.polymorphic_on = mapper.polymorphic_on
else:
self.polymorphic_on = \
self.mapped_table.corresponding_column(
mapper.polymorphic_on)
# we can use the parent mapper's _set_polymorphic_identity
# directly; it ensures the polymorphic_identity of the
# instance's mapper is used so is portable to subclasses.
if self.polymorphic_on is not None:
self._set_polymorphic_identity = \
mapper._set_polymorphic_identity
self._validate_polymorphic_identity = \
mapper._validate_polymorphic_identity
else:
self._set_polymorphic_identity = None
return
if setter:
def _set_polymorphic_identity(state):
dict_ = state.dict
state.get_impl(polymorphic_key).set(
state, dict_,
state.manager.mapper.polymorphic_identity,
None)
def _validate_polymorphic_identity(mapper, state, dict_):
if polymorphic_key in dict_ and \
dict_[polymorphic_key] not in \
mapper._acceptable_polymorphic_identities:
util.warn(
"Flushing object %s with "
"incompatible polymorphic identity %r; the "
"object may not refresh and/or load correctly" % (
state_str(state),
dict_[polymorphic_key]
)
)
self._set_polymorphic_identity = _set_polymorphic_identity
self._validate_polymorphic_identity = \
_validate_polymorphic_identity
else:
self._set_polymorphic_identity = None
_validate_polymorphic_identity = None
@_memoized_configured_property
def _version_id_prop(self):
if self.version_id_col is not None:
return self._columntoproperty[self.version_id_col]
else:
return None
@_memoized_configured_property
def _acceptable_polymorphic_identities(self):
identities = set()
stack = deque([self])
while stack:
item = stack.popleft()
if item.mapped_table is self.mapped_table:
identities.add(item.polymorphic_identity)
stack.extend(item._inheriting_mappers)
return identities
def _adapt_inherited_property(self, key, prop, init):
if not self.concrete:
self._configure_property(key, prop, init=False, setparent=False)
elif key not in self._props:
self._configure_property(
key,
properties.ConcreteInheritedProperty(),
init=init, setparent=True)
def _configure_property(self, key, prop, init=True, setparent=True):
self._log("_configure_property(%s, %s)", key, prop.__class__.__name__)
if not isinstance(prop, MapperProperty):
prop = self._property_from_column(key, prop)
if isinstance(prop, properties.ColumnProperty):
col = self.mapped_table.corresponding_column(prop.columns[0])
# if the column is not present in the mapped table,
# test if a column has been added after the fact to the
# parent table (or their parent, etc.) [ticket:1570]
if col is None and self.inherits:
path = [self]
for m in self.inherits.iterate_to_root():
col = m.local_table.corresponding_column(prop.columns[0])
if col is not None:
for m2 in path:
m2.mapped_table._reset_exported()
col = self.mapped_table.corresponding_column(
prop.columns[0])
break
path.append(m)
# subquery expression, column not present in the mapped
# selectable.
if col is None:
col = prop.columns[0]
# column is coming in after _readonly_props was
# initialized; check for 'readonly'
if hasattr(self, '_readonly_props') and \
(not hasattr(col, 'table') or
col.table not in self._cols_by_table):
self._readonly_props.add(prop)
else:
# if column is coming in after _cols_by_table was
# initialized, ensure the col is in the right set
if hasattr(self, '_cols_by_table') and \
col.table in self._cols_by_table and \
col not in self._cols_by_table[col.table]:
self._cols_by_table[col.table].add(col)
# if this properties.ColumnProperty represents the "polymorphic
# discriminator" column, mark it. We'll need this when rendering
# columns in SELECT statements.
if not hasattr(prop, '_is_polymorphic_discriminator'):
prop._is_polymorphic_discriminator = \
(col is self.polymorphic_on or
prop.columns[0] is self.polymorphic_on)
self.columns[key] = col
for col in prop.columns + prop._orig_columns:
for col in col.proxy_set:
self._columntoproperty[col] = prop
prop.key = key
if setparent:
prop.set_parent(self, init)
if key in self._props and \
getattr(self._props[key], '_mapped_by_synonym', False):
syn = self._props[key]._mapped_by_synonym
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" % (syn, key, key, syn)
)
if key in self._props and \
not isinstance(prop, properties.ColumnProperty) and \
not isinstance(self._props[key], properties.ColumnProperty):
util.warn("Property %s on %s being replaced with new "
"property %s; the old property will be discarded" % (
self._props[key],
self,
prop,
))
self._props[key] = prop
if not self.non_primary:
prop.instrument_class(self)
for mapper in self._inheriting_mappers:
mapper._adapt_inherited_property(key, prop, init)
if init:
prop.init()
prop.post_instrument_class(self)
if self.configured:
self._expire_memoizations()
def _property_from_column(self, key, prop):
"""generate/update a :class:`.ColumnProprerty` given a
:class:`.Column` object. """
# we were passed a Column or a list of Columns;
# generate a properties.ColumnProperty
columns = util.to_list(prop)
column = columns[0]
if not expression._is_column(column):
raise sa_exc.ArgumentError(
"%s=%r is not an instance of MapperProperty or Column"
% (key, prop))
prop = self._props.get(key, None)
if isinstance(prop, properties.ColumnProperty):
if (
not self._inherits_equated_pairs or
(prop.columns[0], column) not in self._inherits_equated_pairs
) and \
not prop.columns[0].shares_lineage(column) and \
prop.columns[0] is not self.version_id_col and \
column is not self.version_id_col:
warn_only = prop.parent is not self
msg = ("Implicitly combining column %s with column "
"%s under attribute '%s'. Please configure one "
"or more attributes for these same-named columns "
"explicitly." % (prop.columns[-1], column, key))
if warn_only:
util.warn(msg)
else:
raise sa_exc.InvalidRequestError(msg)
# existing properties.ColumnProperty from an inheriting
# mapper. make a copy and append our column to it
prop = prop.copy()
prop.columns.insert(0, column)
self._log("inserting column to existing list "
"in properties.ColumnProperty %s" % (key))
return prop
elif prop is None or isinstance(prop,
properties.ConcreteInheritedProperty):
mapped_column = []
for c in columns:
mc = self.mapped_table.corresponding_column(c)
if mc is None:
mc = self.local_table.corresponding_column(c)
if mc is not None:
# if the column is in the local table but not the
# mapped table, this corresponds to adding a
# column after the fact to the local table.
# [ticket:1523]
self.mapped_table._reset_exported()
mc = self.mapped_table.corresponding_column(c)
if mc is None:
raise sa_exc.ArgumentError(
"When configuring property '%s' on %s, "
"column '%s' is not represented in the mapper's "
"table. Use the `column_property()` function to "
"force this column to be mapped as a read-only "
"attribute." % (key, self, c))
mapped_column.append(mc)
return properties.ColumnProperty(*mapped_column)
else:
raise sa_exc.ArgumentError(
"WARNING: when configuring property '%s' on %s, "
"column '%s' conflicts with property '%r'. "
"To resolve this, map the column to the class under a "
"different name in the 'properties' dictionary. Or, "
"to remove all awareness of the column entirely "
"(including its availability as a foreign key), "
"use the 'include_properties' or 'exclude_properties' "
"mapper arguments to control specifically which table "
"columns get mapped." %
(key, self, column.key, prop))
def _post_configure_properties(self):
"""Call the ``init()`` method on all ``MapperProperties``
attached to this mapper.
This is a deferred configuration step which is intended
to execute once all mappers have been constructed.
"""
self._log("_post_configure_properties() started")
l = [(key, prop) for key, prop in self._props.items()]
for key, prop in l:
self._log("initialize prop %s", key)
if prop.parent is self and not prop._configure_started:
prop.init()
if prop._configure_finished:
prop.post_instrument_class(self)
self._log("_post_configure_properties() complete")
self.configured = True
def add_properties(self, dict_of_properties):
"""Add the given dictionary of properties to this mapper,
using `add_property`.
"""
for key, value in dict_of_properties.items():
self.add_property(key, value)
def add_property(self, key, prop):
"""Add an individual MapperProperty to this mapper.
If the mapper has not been configured yet, just adds the
property to the initial properties dictionary sent to the
constructor. If this Mapper has already been configured, then
the given MapperProperty is configured immediately.
"""
self._init_properties[key] = prop
self._configure_property(key, prop, init=self.configured)
def _expire_memoizations(self):
for mapper in self.iterate_to_root():
_memoized_configured_property.expire_instance(mapper)
@property
def _log_desc(self):
return "(" + self.class_.__name__ + \
"|" + \
(self.local_table is not None and
self.local_table.description or
str(self.local_table)) +\
(self.non_primary and
"|non-primary" or "") + ")"
def _log(self, msg, *args):
self.logger.info(
"%s " + msg, *((self._log_desc,) + args)
)
def _log_debug(self, msg, *args):
self.logger.debug(
"%s " + msg, *((self._log_desc,) + args)
)
def __repr__(self):
return '<Mapper at 0x%x; %s>' % (
id(self), self.class_.__name__)
def __str__(self):
return "Mapper|%s|%s%s" % (
self.class_.__name__,
self.local_table is not None and
self.local_table.description or None,
self.non_primary and "|non-primary" or ""
)
def _is_orphan(self, state):
orphan_possible = False
for mapper in self.iterate_to_root():
for (key, cls) in mapper._delete_orphans:
orphan_possible = True
has_parent = attributes.manager_of_class(cls).has_parent(
state, key, optimistic=state.has_identity)
if self.legacy_is_orphan and has_parent:
return False
elif not self.legacy_is_orphan and not has_parent:
return True
if self.legacy_is_orphan:
return orphan_possible
else:
return False
def has_property(self, key):
return key in self._props
def get_property(self, key, _configure_mappers=True):
"""return a MapperProperty associated with the given key.
"""
if _configure_mappers and Mapper._new_mappers:
configure_mappers()
try:
return self._props[key]
except KeyError:
raise sa_exc.InvalidRequestError(
"Mapper '%s' has no property '%s'" % (self, key))
def get_property_by_column(self, column):
"""Given a :class:`.Column` object, return the
:class:`.MapperProperty` which maps this column."""
return self._columntoproperty[column]
@property
def iterate_properties(self):
"""return an iterator of all MapperProperty objects."""
if Mapper._new_mappers:
configure_mappers()
return iter(self._props.values())
def _mappers_from_spec(self, spec, selectable):
"""given a with_polymorphic() argument, return the set of mappers it
represents.
Trims the list of mappers to just those represented within the given
selectable, if present. This helps some more legacy-ish mappings.
"""
if spec == '*':
mappers = list(self.self_and_descendants)
elif spec:
mappers = set()
for m in util.to_list(spec):
m = _class_to_mapper(m)
if not m.isa(self):
raise sa_exc.InvalidRequestError(
"%r does not inherit from %r" %
(m, self))
if selectable is None:
mappers.update(m.iterate_to_root())
else:
mappers.add(m)
mappers = [m for m in self.self_and_descendants if m in mappers]
else:
mappers = []
if selectable is not None:
tables = set(sql_util.find_tables(selectable,
include_aliases=True))
mappers = [m for m in mappers if m.local_table in tables]
return mappers
def _selectable_from_mappers(self, mappers, innerjoin):
"""given a list of mappers (assumed to be within this mapper's
inheritance hierarchy), construct an outerjoin amongst those mapper's
mapped tables.
"""
from_obj = self.mapped_table
for m in mappers:
if m is self:
continue
if m.concrete:
raise sa_exc.InvalidRequestError(
"'with_polymorphic()' requires 'selectable' argument "
"when concrete-inheriting mappers are used.")
elif not m.single:
if innerjoin:
from_obj = from_obj.join(m.local_table,
m.inherit_condition)
else:
from_obj = from_obj.outerjoin(m.local_table,
m.inherit_condition)
return from_obj
@_memoized_configured_property
def _single_table_criterion(self):
if self.single and \
self.inherits and \
self.polymorphic_on is not None:
return self.polymorphic_on.in_(
m.polymorphic_identity
for m in self.self_and_descendants)
else:
return None
@_memoized_configured_property
def _with_polymorphic_mappers(self):
if Mapper._new_mappers:
configure_mappers()
if not self.with_polymorphic:
return []
return self._mappers_from_spec(*self.with_polymorphic)
@_memoized_configured_property
def _with_polymorphic_selectable(self):
if not self.with_polymorphic:
return self.mapped_table
spec, selectable = self.with_polymorphic
if selectable is not None:
return selectable
else:
return self._selectable_from_mappers(
self._mappers_from_spec(spec, selectable),
False)
with_polymorphic_mappers = _with_polymorphic_mappers
"""The list of :class:`.Mapper` objects included in the
default "polymorphic" query.
"""
@property
def selectable(self):
"""The :func:`.select` construct this :class:`.Mapper` selects from
by default.
Normally, this is equivalent to :attr:`.mapped_table`, unless
the ``with_polymorphic`` feature is in use, in which case the
full "polymorphic" selectable is returned.
"""
return self._with_polymorphic_selectable
def _with_polymorphic_args(self, spec=None, selectable=False,
innerjoin=False):
if self.with_polymorphic:
if not spec:
spec = self.with_polymorphic[0]
if selectable is False:
selectable = self.with_polymorphic[1]
elif selectable is False:
selectable = None
mappers = self._mappers_from_spec(spec, selectable)
if selectable is not None:
return mappers, selectable
else:
return mappers, self._selectable_from_mappers(mappers,
innerjoin)
@_memoized_configured_property
def _polymorphic_properties(self):
return list(self._iterate_polymorphic_properties(
self._with_polymorphic_mappers))
def _iterate_polymorphic_properties(self, mappers=None):
"""Return an iterator of MapperProperty objects which will render into
a SELECT."""
if mappers is None:
mappers = self._with_polymorphic_mappers
if not mappers:
for c in self.iterate_properties:
yield c
else:
# in the polymorphic case, filter out discriminator columns
# from other mappers, as these are sometimes dependent on that
# mapper's polymorphic selectable (which we don't want rendered)
for c in util.unique_list(
chain(*[
list(mapper.iterate_properties) for mapper in
[self] + mappers
])
):
if getattr(c, '_is_polymorphic_discriminator', False) and \
(self.polymorphic_on is None or
c.columns[0] is not self.polymorphic_on):
continue
yield c
@util.memoized_property
def attrs(self):
"""A namespace of all :class:`.MapperProperty` objects
associated this mapper.
This is an object that provides each property based on
its key name. For instance, the mapper for a
``User`` class which has ``User.name`` attribute would
provide ``mapper.attrs.name``, which would be the
:class:`.ColumnProperty` representing the ``name``
column. The namespace object can also be iterated,
which would yield each :class:`.MapperProperty`.
:class:`.Mapper` has several pre-filtered views
of this attribute which limit the types of properties
returned, inclding :attr:`.synonyms`, :attr:`.column_attrs`,
:attr:`.relationships`, and :attr:`.composites`.
.. seealso::
:attr:`.Mapper.all_orm_descriptors`
"""
if Mapper._new_mappers:
configure_mappers()
return util.ImmutableProperties(self._props)
@util.memoized_property
def all_orm_descriptors(self):
"""A namespace of all :class:`._InspectionAttr` attributes associated
with the mapped class.
These attributes are in all cases Python :term:`descriptors`
associated with the mapped class or its superclasses.
This namespace includes attributes that are mapped to the class
as well as attributes declared by extension modules.
It includes any Python descriptor type that inherits from
:class:`._InspectionAttr`. This includes
:class:`.QueryableAttribute`, as well as extension types such as
:class:`.hybrid_property`, :class:`.hybrid_method` and
:class:`.AssociationProxy`.
To distinguish between mapped attributes and extension attributes,
the attribute :attr:`._InspectionAttr.extension_type` will refer
to a constant that distinguishes between different extension types.
When dealing with a :class:`.QueryableAttribute`, the
:attr:`.QueryableAttribute.property` attribute refers to the
:class:`.MapperProperty` property, which is what you get when
referring to the collection of mapped properties via
:attr:`.Mapper.attrs`.
.. versionadded:: 0.8.0
.. seealso::
:attr:`.Mapper.attrs`
"""
return util.ImmutableProperties(
dict(self.class_manager._all_sqla_attributes()))
@_memoized_configured_property
def synonyms(self):
"""Return a namespace of all :class:`.SynonymProperty`
properties maintained by this :class:`.Mapper`.
.. seealso::
:attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.SynonymProperty)
@_memoized_configured_property
def column_attrs(self):
"""Return a namespace of all :class:`.ColumnProperty`
properties maintained by this :class:`.Mapper`.
.. seealso::
:attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.ColumnProperty)
@_memoized_configured_property
def relationships(self):
"""Return a namespace of all :class:`.RelationshipProperty`
properties maintained by this :class:`.Mapper`.
.. seealso::
:attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.RelationshipProperty)
@_memoized_configured_property
def composites(self):
"""Return a namespace of all :class:`.CompositeProperty`
properties maintained by this :class:`.Mapper`.
.. seealso::
:attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.CompositeProperty)
def _filter_properties(self, type_):
if Mapper._new_mappers:
configure_mappers()
return util.ImmutableProperties(util.OrderedDict(
(k, v) for k, v in self._props.items()
if isinstance(v, type_)
))
@_memoized_configured_property
def _get_clause(self):
"""create a "get clause" based on the primary key. this is used
by query.get() and many-to-one lazyloads to load this item
by primary key.
"""
params = [(primary_key, sql.bindparam(None, type_=primary_key.type))
for primary_key in self.primary_key]
return sql.and_(*[k == v for (k, v) in params]), \
util.column_dict(params)
@_memoized_configured_property
def _equivalent_columns(self):
"""Create a map of all *equivalent* columns, based on
the determination of column pairs that are equated to
one another based on inherit condition. This is designed
to work with the queries that util.polymorphic_union
comes up with, which often don't include the columns from
the base table directly (including the subclass table columns
only).
The resulting structure is a dictionary of columns mapped
to lists of equivalent columns, i.e.
{
tablea.col1:
set([tableb.col1, tablec.col1]),
tablea.col2:
set([tabled.col2])
}
"""
result = util.column_dict()
def visit_binary(binary):
if binary.operator == operators.eq:
if binary.left in result:
result[binary.left].add(binary.right)
else:
result[binary.left] = util.column_set((binary.right,))
if binary.right in result:
result[binary.right].add(binary.left)
else:
result[binary.right] = util.column_set((binary.left,))
for mapper in self.base_mapper.self_and_descendants:
if mapper.inherit_condition is not None:
visitors.traverse(
mapper.inherit_condition, {},
{'binary': visit_binary})
return result
def _is_userland_descriptor(self, obj):
if isinstance(obj, (_MappedAttribute,
instrumentation.ClassManager,
expression.ColumnElement)):
return False
else:
return True
def _should_exclude(self, name, assigned_name, local, column):
"""determine whether a particular property should be implicitly
present on the class.
This occurs when properties are propagated from an inherited class, or
are applied from the columns present in the mapped table.
"""
# check for class-bound attributes and/or descriptors,
# either local or from an inherited class
if local:
if self.class_.__dict__.get(assigned_name, None) is not None \
and self._is_userland_descriptor(
self.class_.__dict__[assigned_name]):
return True
else:
if getattr(self.class_, assigned_name, None) is not None \
and self._is_userland_descriptor(
getattr(self.class_, assigned_name)):
return True
if self.include_properties is not None and \
name not in self.include_properties and \
(column is None or column not in self.include_properties):
self._log("not including property %s" % (name))
return True
if self.exclude_properties is not None and \
(
name in self.exclude_properties or
(column is not None and column in self.exclude_properties)
):
self._log("excluding property %s" % (name))
return True
return False
def common_parent(self, other):
"""Return true if the given mapper shares a
common inherited parent as this mapper."""
return self.base_mapper is other.base_mapper
def _canload(self, state, allow_subtypes):
s = self.primary_mapper()
if self.polymorphic_on is not None or allow_subtypes:
return _state_mapper(state).isa(s)
else:
return _state_mapper(state) is s
def isa(self, other):
"""Return True if the this mapper inherits from the given mapper."""
m = self
while m and m is not other:
m = m.inherits
return bool(m)
def iterate_to_root(self):
m = self
while m:
yield m
m = m.inherits
@_memoized_configured_property
def self_and_descendants(self):
"""The collection including this mapper and all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
"""
descendants = []
stack = deque([self])
while stack:
item = stack.popleft()
descendants.append(item)
stack.extend(item._inheriting_mappers)
return util.WeakSequence(descendants)
def polymorphic_iterator(self):
"""Iterate through the collection including this mapper and
all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
To iterate through an entire hierarchy, use
``mapper.base_mapper.polymorphic_iterator()``.
"""
return iter(self.self_and_descendants)
def primary_mapper(self):
"""Return the primary mapper corresponding to this mapper's class key
(class)."""
return self.class_manager.mapper
@property
def primary_base_mapper(self):
return self.class_manager.mapper.base_mapper
def identity_key_from_row(self, row, adapter=None):
"""Return an identity-map key for use in storing/retrieving an
item from the identity map.
:param row: A :class:`.RowProxy` instance. The columns which are
mapped by this :class:`.Mapper` should be locatable in the row,
preferably via the :class:`.Column` object directly (as is the case
when a :func:`.select` construct is executed), or via string names of
the form ``<tablename>_<colname>``.
"""
pk_cols = self.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
return self._identity_class, \
tuple(row[column] for column in pk_cols)
def identity_key_from_primary_key(self, primary_key):
"""Return an identity-map key for use in storing/retrieving an
item from an identity map.
:param primary_key: A list of values indicating the identifier.
"""
return self._identity_class, tuple(primary_key)
def identity_key_from_instance(self, instance):
"""Return the identity key for the given instance, based on
its primary key attributes.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
This value is typically also found on the instance state under the
attribute name `key`.
"""
return self.identity_key_from_primary_key(
self.primary_key_from_instance(instance))
def _identity_key_from_state(self, state):
dict_ = state.dict
manager = state.manager
return self._identity_class, tuple([
manager[self._columntoproperty[col].key].
impl.get(state, dict_, attributes.PASSIVE_OFF)
for col in self.primary_key
])
def primary_key_from_instance(self, instance):
"""Return the list of primary key values for the given
instance.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
"""
state = attributes.instance_state(instance)
return self._primary_key_from_state(state)
def _primary_key_from_state(self, state):
dict_ = state.dict
manager = state.manager
return [
manager[self._columntoproperty[col].key].
impl.get(state, dict_, attributes.PASSIVE_OFF)
for col in self.primary_key
]
def _get_state_attr_by_column(self, state, dict_, column,
passive=attributes.PASSIVE_OFF):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get(state, dict_, passive=passive)
def _set_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set(state, dict_, value, None)
def _get_committed_attr_by_column(self, obj, column):
state = attributes.instance_state(obj)
dict_ = attributes.instance_dict(obj)
return self._get_committed_state_attr_by_column(state, dict_, column)
def _get_committed_state_attr_by_column(
self,
state,
dict_,
column,
passive=attributes.PASSIVE_OFF):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.\
get_committed_value(state, dict_, passive=passive)
def _optimized_get_statement(self, state, attribute_names):
"""assemble a WHERE clause which retrieves a given state by primary
key, using a minimized set of tables.
Applies to a joined-table inheritance mapper where the
requested attribute names are only present on joined tables,
not the base table. The WHERE clause attempts to include
only those tables to minimize joins.
"""
props = self._props
tables = set(chain(
*[sql_util.find_tables(c, check_columns=True)
for key in attribute_names
for c in props[key].columns]
))
if self.base_mapper.local_table in tables:
return None
class ColumnsNotAvailable(Exception):
pass
def visit_binary(binary):
leftcol = binary.left
rightcol = binary.right
if leftcol is None or rightcol is None:
return
if leftcol.table not in tables:
leftval = self._get_committed_state_attr_by_column(
state, state.dict,
leftcol,
passive=attributes.PASSIVE_NO_INITIALIZE)
if leftval is attributes.PASSIVE_NO_RESULT or leftval is None:
raise ColumnsNotAvailable()
binary.left = sql.bindparam(None, leftval,
type_=binary.right.type)
elif rightcol.table not in tables:
rightval = self._get_committed_state_attr_by_column(
state, state.dict,
rightcol,
passive=attributes.PASSIVE_NO_INITIALIZE)
if rightval is attributes.PASSIVE_NO_RESULT or \
rightval is None:
raise ColumnsNotAvailable()
binary.right = sql.bindparam(None, rightval,
type_=binary.right.type)
allconds = []
try:
start = False
for mapper in reversed(list(self.iterate_to_root())):
if mapper.local_table in tables:
start = True
elif not isinstance(mapper.local_table,
expression.TableClause):
return None
if start and not mapper.single:
allconds.append(visitors.cloned_traverse(
mapper.inherit_condition,
{},
{'binary': visit_binary}
)
)
except ColumnsNotAvailable:
return None
cond = sql.and_(*allconds)
cols = []
for key in attribute_names:
cols.extend(props[key].columns)
return sql.select(cols, cond, use_labels=True)
def cascade_iterator(self, type_, state, halt_on=None):
"""Iterate each element and its mapper in an object graph,
for all relationships that meet the given cascade rule.
:param type_:
The name of the cascade rule (i.e. save-update, delete,
etc.)
:param state:
The lead InstanceState. child items will be processed per
the relationships defined for this object's mapper.
the return value are object instances; this provides a strong
reference so that they don't fall out of scope immediately.
"""
visited_states = set()
prp, mpp = object(), object()
visitables = deque([(deque(self._props.values()), prp,
state, state.dict)])
while visitables:
iterator, item_type, parent_state, parent_dict = visitables[-1]
if not iterator:
visitables.pop()
continue
if item_type is prp:
prop = iterator.popleft()
if type_ not in prop.cascade:
continue
queue = deque(prop.cascade_iterator(
type_, parent_state, parent_dict,
visited_states, halt_on))
if queue:
visitables.append((queue, mpp, None, None))
elif item_type is mpp:
instance, instance_mapper, corresponding_state, \
corresponding_dict = iterator.popleft()
yield instance, instance_mapper, \
corresponding_state, corresponding_dict
visitables.append((deque(instance_mapper._props.values()),
prp, corresponding_state,
corresponding_dict))
@_memoized_configured_property
def _compiled_cache(self):
return util.LRUCache(self._compiled_cache_size)
@_memoized_configured_property
def _sorted_tables(self):
table_to_mapper = {}
for mapper in self.base_mapper.self_and_descendants:
for t in mapper.tables:
table_to_mapper.setdefault(t, mapper)
extra_dependencies = []
for table, mapper in table_to_mapper.items():
super_ = mapper.inherits
if super_:
extra_dependencies.extend([
(super_table, table)
for super_table in super_.tables
])
def skip(fk):
# attempt to skip dependencies that are not
# significant to the inheritance chain
# for two tables that are related by inheritance.
# while that dependency may be important, it's technically
# not what we mean to sort on here.
parent = table_to_mapper.get(fk.parent.table)
dep = table_to_mapper.get(fk.column.table)
if parent is not None and \
dep is not None and \
dep is not parent and \
dep.inherit_condition is not None:
cols = set(sql_util._find_columns(dep.inherit_condition))
if parent.inherit_condition is not None:
cols = cols.union(sql_util._find_columns(
parent.inherit_condition))
return fk.parent not in cols and fk.column not in cols
else:
return fk.parent not in cols
return False
sorted_ = sql_util.sort_tables(table_to_mapper,
skip_fn=skip,
extra_dependencies=extra_dependencies)
ret = util.OrderedDict()
for t in sorted_:
ret[t] = table_to_mapper[t]
return ret
def _memo(self, key, callable_):
if key in self._memoized_values:
return self._memoized_values[key]
else:
self._memoized_values[key] = value = callable_()
return value
@util.memoized_property
def _table_to_equated(self):
"""memoized map of tables to collections of columns to be
synchronized upwards to the base mapper."""
result = util.defaultdict(list)
for table in self._sorted_tables:
cols = set(table.c)
for m in self.iterate_to_root():
if m._inherits_equated_pairs and \
cols.intersection(
util.reduce(set.union,
[l.proxy_set for l, r in
m._inherits_equated_pairs])
):
result[table].append((m, m._inherits_equated_pairs))
return result
def configure_mappers():
"""Initialize the inter-mapper relationships of all mappers that
have been constructed thus far.
This function can be called any number of times, but in
most cases is handled internally.
"""
if not Mapper._new_mappers:
return
_CONFIGURE_MUTEX.acquire()
try:
global _already_compiling
if _already_compiling:
return
_already_compiling = True
try:
# double-check inside mutex
if not Mapper._new_mappers:
return
Mapper.dispatch(Mapper).before_configured()
# initialize properties on all mappers
# note that _mapper_registry is unordered, which
# may randomly conceal/reveal issues related to
# the order of mapper compilation
for mapper in list(_mapper_registry):
if getattr(mapper, '_configure_failed', False):
e = sa_exc.InvalidRequestError(
"One or more mappers failed to initialize - "
"can't proceed with initialization of other "
"mappers. Original exception was: %s"
% mapper._configure_failed)
e._configure_failed = mapper._configure_failed
raise e
if not mapper.configured:
try:
mapper._post_configure_properties()
mapper._expire_memoizations()
mapper.dispatch.mapper_configured(
mapper, mapper.class_)
except:
exc = sys.exc_info()[1]
if not hasattr(exc, '_configure_failed'):
mapper._configure_failed = exc
raise
Mapper._new_mappers = False
finally:
_already_compiling = False
finally:
_CONFIGURE_MUTEX.release()
Mapper.dispatch(Mapper).after_configured()
def reconstructor(fn):
"""Decorate a method as the 'reconstructor' hook.
Designates a method as the "reconstructor", an ``__init__``-like
method that will be called by the ORM after the instance has been
loaded from the database or otherwise reconstituted.
The reconstructor will be invoked with no arguments. Scalar
(non-collection) database-mapped attributes of the instance will
be available for use within the function. Eagerly-loaded
collections are generally not yet available and will usually only
contain the first element. ORM state changes made to objects at
this stage will not be recorded for the next flush() operation, so
the activity within a reconstructor should be conservative.
"""
fn.__sa_reconstructor__ = True
return fn
def validates(*names, **kw):
"""Decorate a method as a 'validator' for one or more named properties.
Designates a method as a validator, a method which receives the
name of the attribute as well as a value to be assigned, or in the
case of a collection, the value to be added to the collection.
The function can then raise validation exceptions to halt the
process from continuing (where Python's built-in ``ValueError``
and ``AssertionError`` exceptions are reasonable choices), or can
modify or replace the value before proceeding. The function should
otherwise return the given value.
Note that a validator for a collection **cannot** issue a load of that
collection within the validation routine - this usage raises
an assertion to avoid recursion overflows. This is a reentrant
condition which is not supported.
:param \*names: list of attribute names to be validated.
:param include_removes: if True, "remove" events will be
sent as well - the validation function must accept an additional
argument "is_remove" which will be a boolean.
.. versionadded:: 0.7.7
:param include_backrefs: defaults to ``True``; if ``False``, the
validation function will not emit if the originator is an attribute
event related via a backref. This can be used for bi-directional
:func:`.validates` usage where only one validator should emit per
attribute operation.
.. versionadded:: 0.9.0
.. seealso::
:ref:`simple_validators` - usage examples for :func:`.validates`
"""
include_removes = kw.pop('include_removes', False)
include_backrefs = kw.pop('include_backrefs', True)
def wrap(fn):
fn.__sa_validators__ = names
fn.__sa_validation_opts__ = {
"include_removes": include_removes,
"include_backrefs": include_backrefs
}
return fn
return wrap
def _event_on_load(state, ctx):
instrumenting_mapper = state.manager.info[_INSTRUMENTOR]
if instrumenting_mapper._reconstructor:
instrumenting_mapper._reconstructor(state.obj())
def _event_on_first_init(manager, cls):
"""Initial mapper compilation trigger.
instrumentation calls this one when InstanceState
is first generated, and is needed for legacy mutable
attributes to work.
"""
instrumenting_mapper = manager.info.get(_INSTRUMENTOR)
if instrumenting_mapper:
if Mapper._new_mappers:
configure_mappers()
def _event_on_init(state, args, kwargs):
"""Run init_instance hooks.
This also includes mapper compilation, normally not needed
here but helps with some piecemeal configuration
scenarios (such as in the ORM tutorial).
"""
instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR)
if instrumenting_mapper:
if Mapper._new_mappers:
configure_mappers()
if instrumenting_mapper._set_polymorphic_identity:
instrumenting_mapper._set_polymorphic_identity(state)
def _event_on_resurrect(state):
# re-populate the primary key elements
# of the dict based on the mapping.
instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR)
if instrumenting_mapper:
for col, val in zip(instrumenting_mapper.primary_key, state.key[1]):
instrumenting_mapper._set_state_attr_by_column(
state, state.dict, col, val)
class _ColumnMapping(dict):
"""Error reporting helper for mapper._columntoproperty."""
def __init__(self, mapper):
self.mapper = mapper
def __missing__(self, column):
prop = self.mapper._props.get(column)
if prop:
raise orm_exc.UnmappedColumnError(
"Column '%s.%s' is not available, due to "
"conflicting property '%s':%r" % (
column.table.name, column.name, column.key, prop))
raise orm_exc.UnmappedColumnError(
"No column %s is configured on mapper %s..." %
(column, self.mapper))
|
fabianrost84/cython | refs/heads/master | Cython/Build/Tests/TestInline.py | 5 | import os, tempfile
from Cython.Shadow import inline
from Cython.Build.Inline import safe_type
from Cython.TestUtils import CythonTest
try:
import numpy
has_numpy = True
except:
has_numpy = False
test_kwds = dict(force=True, quiet=True)
global_value = 100
class TestInline(CythonTest):
def setUp(self):
CythonTest.setUp(self)
self.test_kwds = dict(test_kwds)
if os.path.isdir('TEST_TMP'):
lib_dir = os.path.join('TEST_TMP','inline')
else:
lib_dir = tempfile.mkdtemp(prefix='cython_inline_')
self.test_kwds['lib_dir'] = lib_dir
def test_simple(self):
self.assertEquals(inline("return 1+2", **self.test_kwds), 3)
def test_types(self):
self.assertEquals(inline("""
cimport cython
return cython.typeof(a), cython.typeof(b)
""", a=1.0, b=[], **self.test_kwds), ('double', 'list object'))
def test_locals(self):
a = 1
b = 2
self.assertEquals(inline("return a+b", **self.test_kwds), 3)
def test_globals(self):
self.assertEquals(inline("return global_value + 1", **self.test_kwds), global_value + 1)
def test_no_return(self):
self.assertEquals(inline("""
a = 1
cdef double b = 2
cdef c = []
""", **self.test_kwds), dict(a=1, b=2.0, c=[]))
def test_def_node(self):
foo = inline("def foo(x): return x * x", **self.test_kwds)['foo']
self.assertEquals(foo(7), 49)
def test_pure(self):
import cython as cy
b = inline("""
b = cy.declare(float, a)
c = cy.declare(cy.pointer(cy.float), &b)
return b
""", a=3, **self.test_kwds)
self.assertEquals(type(b), float)
if has_numpy:
def test_numpy(self):
import numpy
a = numpy.ndarray((10, 20))
a[0,0] = 10
self.assertEquals(safe_type(a), 'numpy.ndarray[numpy.float64_t, ndim=2]')
self.assertEquals(inline("return a[0,0]", a=a, **self.test_kwds), 10.0)
|
nicholasserra/sentry | refs/heads/master | src/sentry/migrations/0083_migrate_dupe_groups.py | 26 | # -*- coding: utf-8 -*-
from __future__ import print_function
from south.v2 import DataMigration
class Migration(DataMigration):
def forwards(self, orm):
from django.db.models import F
from collections import defaultdict
from sentry.db.models import create_or_update
from sentry.utils.query import RangeQuerySetWrapper
# We don't fully merge results because it's simply not worth it
for group in RangeQuerySetWrapper(orm['sentry.Group'].objects.all()):
# could be already migrated
if not orm['sentry.Group'].objects.filter(id=group.id).exists():
continue
matches = list(orm['sentry.Group'].objects.exclude(id=group.id).filter(
checksum=group.checksum, project=group.project))
if not matches:
continue
print("Merging duplicate events for %r" % (group,))
updates = defaultdict(int)
updates.update({
'first_seen': group.first_seen,
'last_seen': group.last_seen,
'active_at': group.active_at,
})
tag_updates = defaultdict(lambda: defaultdict(int))
counts = defaultdict(lambda: defaultdict(int))
for other in matches:
# migrate events first
orm['sentry.Event'].objects.filter(group=other).update(group=group)
updates['times_seen'] += other.times_seen
updates['users_seen'] += other.users_seen
updates['time_spent_total'] += other.time_spent_total
updates['time_spent_count'] += other.time_spent_count
for datecol in ('active_at', 'last_seen', 'first_seen'):
val = getattr(other, datecol)
if val and updates[datecol]:
updates[datecol] = max(val, updates[datecol])
elif val:
updates[datecol] = val
# determine missing tags
for tag in RangeQuerySetWrapper(orm['sentry.MessageFilterValue'].objects.filter(group=other)):
key = tag_updates[(tag.key, tag.value)]
key['times_seen'] += other.times_seen
for datecol in ('last_seen', 'first_seen'):
val = getattr(other, datecol)
if val and updates[datecol]:
updates[datecol] = max(val, updates[datecol])
elif val:
updates[datecol] = val
# determine counts
for count in RangeQuerySetWrapper(orm['sentry.MessageCountByMinute'].objects.filter(group=other)):
key = counts[count.date]
key['times_seen'] += count.times_seen
key['time_spent_total'] += count.time_spent_total
key['time_spent_count'] += count.time_spent_count
# migrate tags
for (key, value), data in tag_updates.iteritems():
defaults = {
'times_seen': F('times_seen') + data['times_seen'],
}
if 'last_seen' in data:
defaults['last_seen'] = data['last_seen']
if 'first_seen' in data:
defaults['first_seen'] = data['first_seen']
create_or_update(orm['sentry.MessageFilterValue'],
project=group.project,
group=group,
key=key,
value=value,
values=values,
)
orm['sentry.MessageFilterValue'].objects.filter(group__in=matches).delete()
# migrate counts
for date, data in counts.iteritems():
create_or_update(orm['sentry.MessageCountByMinute'],
project=group.project,
group=group,
date=date,
values={
'times_seen': F('times_seen') + data['times_seen'],
'time_spent_total': F('time_spent_total') + data['time_spent_total'],
'time_spent_count': F('time_spent_count') + data['time_spent_count'],
}
)
orm['sentry.MessageCountByMinute'].objects.filter(group__in=matches).delete()
orm['sentry.Group'].objects.filter(id=group.id).update(
times_seen=F('times_seen') + updates['times_seen'],
users_seen=F('users_seen') + updates['user_seen'],
time_spent_total=F('time_spent_total') + updates['time_spent_total'],
time_spent_count=F('time_spent_count') + updates['time_spent_count'],
last_seen=updates['last_seen'],
first_seen=updates['first_seen'],
active_at=updates['active_at'],
)
for other in matches:
other.delete()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.affecteduserbygroup': {
'Meta': {'unique_together': "(('project', 'tuser', 'group'),)", 'object_name': 'AffectedUserByGroup'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'tuser': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.TrackedUser']", 'null': 'True'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'users_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.trackeduser': {
'Meta': {'unique_together': "(('project', 'ident'),)", 'object_name': 'TrackedUser'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Group']", 'through': "orm['sentry.AffectedUserByGroup']", 'symmetrical': 'False'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'num_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
symmetrical = True
|
sgerhart/ansible | refs/heads/maintenance_policy_module | lib/ansible/modules/network/eos/eos_static_route.py | 27 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_static_route
version_added: "2.5"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage static IP routes on Arista EOS network devices
description:
- This module provides declarative management of static
IP routes on Arista EOS network devices.
notes:
- Tested against EOS 4.15
options:
address:
description:
- Network address with prefix of the static route.
required: true
aliases: ['prefix']
next_hop:
description:
- Next hop IP of the static route.
required: true
admin_distance:
description:
- Admin distance of the static route.
default: 1
aggregate:
description: List of static route definitions
state:
description:
- State of the static route configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: eos
"""
EXAMPLES = """
- name: configure static route
eos_static_route:
address: 10.0.2.0/24
next_hop: 10.8.38.1
admin_distance: 2
- name: delete static route
eos_static_route:
address: 10.0.2.0/24
next_hop: 10.8.38.1
state: absent
- name: configure static routes using aggregate
eos_static_route:
aggregate:
- { address: 10.0.1.0/24, next_hop: 10.8.38.1 }
- { address: 10.0.3.0/24, next_hop: 10.8.38.1 }
- name: Delete static route using aggregate
eos_static_route:
aggregate:
- { address: 10.0.1.0/24, next_hop: 10.8.38.1 }
- { address: 10.0.3.0/24, next_hop: 10.8.38.1 }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- ip route 10.0.2.0/24 10.8.38.1 3
- no ip route 10.0.2.0/24 10.8.38.1
"""
import re
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import is_masklen, validate_ip_address
from ansible.module_utils.network.common.utils import remove_default_spec, validate_prefix
from ansible.module_utils.network.eos.eos import get_config, load_config
from ansible.module_utils.network.eos.eos import eos_argument_spec, check_args
def is_address(value):
if value:
address = value.split('/')
if is_masklen(address[1]) and validate_ip_address(address[0]):
return True
return False
def is_hop(value):
if value:
if validate_ip_address(value):
return True
return False
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
address = w['address']
next_hop = w['next_hop']
admin_distance = w['admin_distance']
state = w['state']
del w['state']
if state == 'absent' and w in have:
commands.append('no ip route %s %s' % (address, next_hop))
elif state == 'present' and w not in have:
commands.append('ip route %s %s %d' % (address, next_hop, admin_distance))
return commands
def map_params_to_obj(module, required_together=None):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
module._check_required_together(required_together, item)
d = item.copy()
obj.append(d)
else:
obj.append({
'address': module.params['address'].strip(),
'next_hop': module.params['next_hop'].strip(),
'admin_distance': module.params['admin_distance'],
'state': module.params['state']
})
return obj
def map_config_to_obj(module):
objs = []
try:
out = get_config(module, flags=['| include ip.route'])
except IndexError:
out = ''
if out:
lines = out.splitlines()
for line in lines:
obj = {}
add_match = re.search(r'ip route (\S+)', line, re.M)
if add_match:
address = add_match.group(1)
if is_address(address):
obj['address'] = address
hop_match = re.search(r'ip route {0} (\S+)'.format(address), line, re.M)
if hop_match:
hop = hop_match.group(1)
if is_hop(hop):
obj['next_hop'] = hop
dist_match = re.search(r'ip route {0} {1} (\d+)'.format(address, hop), line, re.M)
if dist_match:
distance = dist_match.group(1)
obj['admin_distance'] = int(distance)
else:
obj['admin_distance'] = 1
objs.append(obj)
return objs
def main():
""" main entry point for module execution
"""
element_spec = dict(
address=dict(type='str', aliases=['prefix']),
next_hop=dict(type='str'),
admin_distance=dict(default=1, type='int'),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['address'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(eos_argument_spec)
required_one_of = [['aggregate', 'address']]
required_together = [['address', 'next_hop']]
mutually_exclusive = [['aggregate', 'address']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
required_together=required_together,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
address = module.params['address']
if address is not None:
prefix = address.split('/')[-1]
if address and prefix:
if '/' not in address or not validate_ip_address(address.split('/')[0]):
module.fail_json(msg='{} is not a valid IP address'.format(address))
if not validate_prefix(prefix):
module.fail_json(msg='Length of prefix should be between 0 and 32 bits')
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
wartalker/BlogSpider | refs/heads/master | spider/src/mydm/middlewares/__init__.py | 3 | # -*- coding: utf-8 -*-
from .etag import ETagMiddleware
from .ifmodifysince import IfModifySinceMiddleware
__all__ = [
'ETagMiddleware',
'IfModifySinceMiddleware',
]
|
tyagi-prashant/letsencrypt | refs/heads/master | letsencrypt/proof_of_possession.py | 37 | """Proof of Possession Identifier Validation Challenge."""
import logging
import os
from cryptography import x509
from cryptography.hazmat.backends import default_backend
import zope.component
from acme import challenges
from acme import jose
from acme import other
from letsencrypt import interfaces
from letsencrypt.display import util as display_util
logger = logging.getLogger(__name__)
class ProofOfPossession(object): # pylint: disable=too-few-public-methods
"""Proof of Possession Identifier Validation Challenge.
Based on draft-barnes-acme, section 6.5.
:ivar installer: Installer object
:type installer: :class:`~letsencrypt.interfaces.IInstaller`
"""
def __init__(self, installer):
self.installer = installer
def perform(self, achall):
"""Perform the Proof of Possession Challenge.
:param achall: Proof of Possession Challenge
:type achall: :class:`letsencrypt.achallenges.ProofOfPossession`
:returns: Response or None/False if the challenge cannot be completed
:rtype: :class:`acme.challenges.ProofOfPossessionResponse`
or False
"""
if (achall.alg in [jose.HS256, jose.HS384, jose.HS512] or
not isinstance(achall.hints.jwk, achall.alg.kty)):
return None
for cert, key, _ in self.installer.get_all_certs_keys():
with open(cert) as cert_file:
cert_data = cert_file.read()
try:
cert_obj = x509.load_pem_x509_certificate(
cert_data, default_backend())
except ValueError:
try:
cert_obj = x509.load_der_x509_certificate(
cert_data, default_backend())
except ValueError:
logger.warn("Certificate is neither PER nor DER: %s", cert)
cert_key = achall.alg.kty(key=cert_obj.public_key())
if cert_key == achall.hints.jwk:
return self._gen_response(achall, key)
# Is there are different prompt we should give the user?
code, key = zope.component.getUtility(
interfaces.IDisplay).input(
"Path to private key for identifier: %s " % achall.domain)
if code != display_util.CANCEL:
return self._gen_response(achall, key)
# If we get here, the key wasn't found
return False
def _gen_response(self, achall, key_path): # pylint: disable=no-self-use
"""Create the response to the Proof of Possession Challenge.
:param achall: Proof of Possession Challenge
:type achall: :class:`letsencrypt.achallenges.ProofOfPossession`
:param str key_path: Path to the key corresponding to the hinted to
public key.
:returns: Response or False if the challenge cannot be completed
:rtype: :class:`acme.challenges.ProofOfPossessionResponse`
or False
"""
if os.path.isfile(key_path):
with open(key_path, 'rb') as key:
try:
# Needs to be changed if JWKES doesn't have a key attribute
jwk = achall.alg.kty.load(key.read())
sig = other.Signature.from_msg(achall.nonce, jwk.key,
alg=achall.alg)
except (IndexError, ValueError, TypeError, jose.errors.Error):
return False
return challenges.ProofOfPossessionResponse(nonce=achall.nonce,
signature=sig)
return False
|
stef1927/python-driver | refs/heads/master | tests/integration/standard/test_custom_protocol_handler.py | 2 | # Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra.protocol import ProtocolHandler, ResultMessage, UUIDType, read_int, EventMessage
from cassandra.query import tuple_factory
from cassandra.cluster import Cluster
from tests.integration import use_singledc, PROTOCOL_VERSION, drop_keyspace_shutdown_cluster
from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES
from tests.integration.standard.utils import create_table_with_all_types, get_all_primitive_params
from six import binary_type
import uuid
def setup_module():
use_singledc()
update_datatypes()
class CustomProtocolHandlerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cls.session = cls.cluster.connect()
cls.session.execute("CREATE KEYSPACE custserdes WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}")
cls.session.set_keyspace("custserdes")
@classmethod
def tearDownClass(cls):
drop_keyspace_shutdown_cluster("custserdes", cls.session, cls.cluster)
def test_custom_raw_uuid_row_results(self):
"""
Test to validate that custom protocol handlers work with raw row results
Connect and validate that the normal protocol handler is used.
Re-Connect and validate that the custom protocol handler is used.
Re-Connect and validate that the normal protocol handler is used.
@since 2.7
@jira_ticket PYTHON-313
@expected_result custom protocol handler is invoked appropriately.
@test_category data_types:serialization
"""
# Ensure that we get normal uuid back first
session = Cluster(protocol_version=PROTOCOL_VERSION).connect(keyspace="custserdes")
session.row_factory = tuple_factory
result = session.execute("SELECT schema_version FROM system.local")
uuid_type = result[0][0]
self.assertEqual(type(uuid_type), uuid.UUID)
# use our custom protocol handlder
session.client_protocol_handler = CustomTestRawRowType
session.row_factory = tuple_factory
result_set = session.execute("SELECT schema_version FROM system.local")
raw_value = result_set[0][0]
self.assertTrue(isinstance(raw_value, binary_type))
self.assertEqual(len(raw_value), 16)
# Ensure that we get normal uuid back when we re-connect
session.client_protocol_handler = ProtocolHandler
result_set = session.execute("SELECT schema_version FROM system.local")
uuid_type = result_set[0][0]
self.assertEqual(type(uuid_type), uuid.UUID)
session.shutdown()
def test_custom_raw_row_results_all_types(self):
"""
Test to validate that custom protocol handlers work with varying types of
results
Connect, create a table with all sorts of data. Query the data, make the sure the custom results handler is
used correctly.
@since 2.7
@jira_ticket PYTHON-313
@expected_result custom protocol handler is invoked with various result types
@test_category data_types:serialization
"""
# Connect using a custom protocol handler that tracks the various types the result message is used with.
session = Cluster(protocol_version=PROTOCOL_VERSION).connect(keyspace="custserdes")
session.client_protocol_handler = CustomProtocolHandlerResultMessageTracked
session.row_factory = tuple_factory
colnames = create_table_with_all_types("alltypes", session, 1)
columns_string = ", ".join(colnames)
# verify data
params = get_all_primitive_params(0)
results = session.execute("SELECT {0} FROM alltypes WHERE primkey=0".format(columns_string))[0]
for expected, actual in zip(params, results):
self.assertEqual(actual, expected)
# Ensure we have covered the various primitive types
self.assertEqual(len(CustomResultMessageTracked.checked_rev_row_set), len(PRIMITIVE_DATATYPES)-1)
session.shutdown()
class CustomResultMessageRaw(ResultMessage):
"""
This is a custom Result Message that is used to return raw results, rather then
results which contain objects.
"""
my_type_codes = ResultMessage.type_codes.copy()
my_type_codes[0xc] = UUIDType
type_codes = my_type_codes
@classmethod
def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata):
paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map)
rowcount = read_int(f)
rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)]
colnames = [c[2] for c in column_metadata]
coltypes = [c[3] for c in column_metadata]
return paging_state, coltypes, (colnames, rows)
class CustomTestRawRowType(ProtocolHandler):
"""
This is the a custom protocol handler that will substitute the the
customResultMesageRowRaw Result message for our own implementation
"""
my_opcodes = ProtocolHandler.message_types_by_opcode.copy()
my_opcodes[CustomResultMessageRaw.opcode] = CustomResultMessageRaw
message_types_by_opcode = my_opcodes
class CustomResultMessageTracked(ResultMessage):
"""
This is a custom Result Message that is use to track what primitive types
have been processed when it receives results
"""
my_type_codes = ResultMessage.type_codes.copy()
my_type_codes[0xc] = UUIDType
type_codes = my_type_codes
checked_rev_row_set = set()
@classmethod
def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata):
paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map)
rowcount = read_int(f)
rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)]
colnames = [c[2] for c in column_metadata]
coltypes = [c[3] for c in column_metadata]
cls.checked_rev_row_set.update(coltypes)
parsed_rows = [
tuple(ctype.from_binary(val, protocol_version)
for ctype, val in zip(coltypes, row))
for row in rows]
return paging_state, coltypes, (colnames, parsed_rows)
class CustomProtocolHandlerResultMessageTracked(ProtocolHandler):
"""
This is the a custom protocol handler that will substitute the the
CustomTestRawRowTypeTracked Result message for our own implementation
"""
my_opcodes = ProtocolHandler.message_types_by_opcode.copy()
my_opcodes[CustomResultMessageTracked.opcode] = CustomResultMessageTracked
message_types_by_opcode = my_opcodes
|
arborh/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/scan_ops_test.py | 20 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for scan ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def numpy_reverse(x, axis):
length = len(x.shape)
if axis < 0:
axis = length + axis
ix = [
slice(None, None, -1) if i == axis else slice(None) for i in range(length)
]
return x[ix]
def handle_options(func, x, axis, exclusive, reverse):
"""Adds tf options to numpy scan ops."""
length = len(x.shape)
if axis < 0:
axis = length + axis
if reverse:
x = numpy_reverse(x, axis)
if exclusive:
ix_head = [slice(0, 1) if i == axis else slice(None) for i in range(length)]
ix_init = [
slice(0, -1) if i == axis else slice(None) for i in range(length)
]
if func == np.cumsum:
init = np.zeros_like(x[ix_head])
elif func == np.cumprod:
init = np.ones_like(x[ix_head])
else:
raise ValueError("Unknown scan function.")
x = np.concatenate([init, func(x[ix_init], axis)], axis=axis)
else:
x = func(x, axis=axis)
if reverse:
x = numpy_reverse(x, axis)
return x
class CumsumTest(test.TestCase):
valid_dtypes = [
np.int32, np.int64, np.float16, np.float32, np.float64, np.complex64,
np.complex128
]
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
with self.cached_session(use_gpu=True):
tf_out = math_ops.cumsum(x, axis, exclusive, reverse).eval()
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
@test_util.run_deprecated_v1
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True):
axis = constant_op.constant(0, axis_dtype)
tf_out = math_ops.cumsum(x, axis).eval()
@test_util.run_deprecated_v1
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 10).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 20).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123860949") # The computation is constant folded
def testLarge(self):
for dtype in self.valid_dtypes:
x = np.ones([1000000], dtype=dtype) / 1024
self._compareAll(x, 0)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = ops.convert_to_tensor(x)
with self.session(use_gpu=True):
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumsum(input_tensor, [0]).eval()
def _compareGradient(self, shape, axis, exclusive, reverse):
x = np.arange(0, 50).reshape(shape).astype(np.float64)
with self.cached_session(use_gpu=True):
t = ops.convert_to_tensor(x)
result = math_ops.cumsum(t, axis, exclusive, reverse)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, shape, result, shape, x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient(self):
for axis in (-1, 0):
self._compareGradient([50], axis, False, False)
@test_util.run_deprecated_v1
def testGradientReverse(self):
for axis in (-1, 0):
self._compareGradient([50], axis, False, True)
@test_util.run_deprecated_v1
def testGradientExclusive(self):
for axis in (-1, 0):
self._compareGradient([50], axis, True, False)
@test_util.run_deprecated_v1
def testGradientExclusiveReverse(self):
for axis in (-1, 0):
self._compareGradient([50], axis, True, True)
@test_util.run_deprecated_v1
def testGradient2D(self):
for axis in (-1, 0, 1):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compareGradient([5, 10], axis, exclusive, reverse)
class CumprodTest(test.TestCase):
valid_dtypes = [
np.int32, np.int64, np.float16, np.float32, np.float64, np.complex64,
np.complex128
]
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
with self.cached_session(use_gpu=True):
tf_out = math_ops.cumprod(x, axis, exclusive, reverse).eval()
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
@test_util.run_deprecated_v1
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True):
axis = constant_op.constant(0, axis_dtype)
tf_out = math_ops.cumprod(x, axis).eval()
@test_util.run_deprecated_v1
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 11).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 21).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = ops.convert_to_tensor(x)
with self.session(use_gpu=True):
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumprod(input_tensor, [0]).eval()
def _compareGradient(self, shape, axis, exclusive, reverse):
x = np.arange(1, 9).reshape(shape).astype(np.float64)
with self.cached_session(use_gpu=True):
t = ops.convert_to_tensor(x)
result = math_ops.cumprod(t, axis, exclusive, reverse)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, shape, result, shape, x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient(self):
for axis in (-1, 0):
self._compareGradient([8], axis, False, False)
@test_util.run_deprecated_v1
def testGradientReverse(self):
for axis in (-1, 0):
self._compareGradient([8], axis, False, True)
@test_util.run_deprecated_v1
def testGradientExclusive(self):
for axis in (-1, 0):
self._compareGradient([8], axis, True, False)
@test_util.run_deprecated_v1
def testGradientExclusiveReverse(self):
for axis in (-1, 0):
self._compareGradient([8], axis, True, True)
@test_util.run_deprecated_v1
def testGradient2D(self):
for axis in (-2, -1, 0, 1):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compareGradient([2, 4], axis, exclusive, reverse)
if __name__ == "__main__":
test.main()
|
shubhdev/edx-platform | refs/heads/master | common/djangoapps/track/tests/test_shim.py | 111 | """Ensure emitted events contain the fields legacy processors expect to find."""
from mock import sentinel
from django.test.utils import override_settings
from openedx.core.lib.tests.assertions.events import assert_events_equal
from track.tests import EventTrackingTestCase, FROZEN_TIME
LEGACY_SHIM_PROCESSOR = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
}
]
GOOGLE_ANALYTICS_PROCESSOR = [
{
'ENGINE': 'track.shim.GoogleAnalyticsProcessor'
}
]
@override_settings(
EVENT_TRACKING_PROCESSORS=LEGACY_SHIM_PROCESSOR,
)
class LegacyFieldMappingProcessorTestCase(EventTrackingTestCase):
"""Ensure emitted events contain the fields legacy processors expect to find."""
def test_event_field_mapping(self):
data = {sentinel.key: sentinel.value}
context = {
'accept_language': sentinel.accept_language,
'referer': sentinel.referer,
'username': sentinel.username,
'session': sentinel.session,
'ip': sentinel.ip,
'host': sentinel.host,
'agent': sentinel.agent,
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'accept_language': sentinel.accept_language,
'referer': sentinel.referer,
'event_type': sentinel.name,
'name': sentinel.name,
'context': {
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'path': sentinel.path,
},
'event': data,
'username': sentinel.username,
'event_source': 'server',
'time': FROZEN_TIME,
'agent': sentinel.agent,
'host': sentinel.host,
'ip': sentinel.ip,
'page': None,
'session': sentinel.session,
}
assert_events_equal(expected_event, emitted_event)
def test_missing_fields(self):
self.tracker.emit(sentinel.name)
emitted_event = self.get_event()
expected_event = {
'accept_language': '',
'referer': '',
'event_type': sentinel.name,
'name': sentinel.name,
'context': {},
'event': {},
'username': '',
'event_source': 'server',
'time': FROZEN_TIME,
'agent': '',
'host': '',
'ip': '',
'page': None,
'session': '',
}
assert_events_equal(expected_event, emitted_event)
@override_settings(
EVENT_TRACKING_PROCESSORS=GOOGLE_ANALYTICS_PROCESSOR,
)
class GoogleAnalyticsProcessorTestCase(EventTrackingTestCase):
"""Ensure emitted events contain the fields necessary for Google Analytics."""
def test_event_fields(self):
""" Test that course_id is added as the label if present, and nonInteraction is set. """
data = {sentinel.key: sentinel.value}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'context': context,
'data': data,
'label': sentinel.course_id,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, emitted_event)
def test_no_course_id(self):
""" Test that a label is not added if course_id is not specified, but nonInteraction is still set. """
data = {sentinel.key: sentinel.value}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'context': context,
'data': data,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, emitted_event)
|
bperreault-va/eloworld | refs/heads/master | src/lib/jinja2/debug.py | 132 | # -*- coding: utf-8 -*-
"""
jinja2.debug
~~~~~~~~~~~~
Implements the debug interface for Jinja. This module does some pretty
ugly stuff with the Python traceback system in order to achieve tracebacks
with correct line numbers, locals and contents.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import traceback
from types import TracebackType, CodeType
from jinja2.utils import missing, internal_code
from jinja2.exceptions import TemplateSyntaxError
from jinja2._compat import iteritems, reraise, PY2
# on pypy we can take advantage of transparent proxies
try:
from __pypy__ import tproxy
except ImportError:
tproxy = None
# how does the raise helper look like?
try:
exec("raise TypeError, 'foo'")
except SyntaxError:
raise_helper = 'raise __jinja_exception__[1]'
except TypeError:
raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
class TracebackFrameProxy(object):
"""Proxies a traceback frame."""
def __init__(self, tb):
self.tb = tb
self._tb_next = None
@property
def tb_next(self):
return self._tb_next
def set_next(self, next):
if tb_set_next is not None:
try:
tb_set_next(self.tb, next and next.tb or None)
except Exception:
# this function can fail due to all the hackery it does
# on various python implementations. We just catch errors
# down and ignore them if necessary.
pass
self._tb_next = next
@property
def is_jinja_frame(self):
return '__jinja_template__' in self.tb.tb_frame.f_globals
def __getattr__(self, name):
return getattr(self.tb, name)
def make_frame_proxy(frame):
proxy = TracebackFrameProxy(frame)
if tproxy is None:
return proxy
def operation_handler(operation, *args, **kwargs):
if operation in ('__getattribute__', '__getattr__'):
return getattr(proxy, args[0])
elif operation == '__setattr__':
proxy.__setattr__(*args, **kwargs)
else:
return getattr(proxy, operation)(*args, **kwargs)
return tproxy(TracebackType, operation_handler)
class ProcessedTraceback(object):
"""Holds a Jinja preprocessed traceback for printing or reraising."""
def __init__(self, exc_type, exc_value, frames):
assert frames, 'no frames for this traceback?'
self.exc_type = exc_type
self.exc_value = exc_value
self.frames = frames
# newly concatenate the frames (which are proxies)
prev_tb = None
for tb in self.frames:
if prev_tb is not None:
prev_tb.set_next(tb)
prev_tb = tb
prev_tb.set_next(None)
def render_as_text(self, limit=None):
"""Return a string with the traceback."""
lines = traceback.format_exception(self.exc_type, self.exc_value,
self.frames[0], limit=limit)
return ''.join(lines).rstrip()
def render_as_html(self, full=False):
"""Return a unicode string with the traceback as rendered HTML."""
from jinja2.debugrenderer import render_traceback
return u'%s\n\n<!--\n%s\n-->' % (
render_traceback(self, full=full),
self.render_as_text().decode('utf-8', 'replace')
)
@property
def is_template_syntax_error(self):
"""`True` if this is a template syntax error."""
return isinstance(self.exc_value, TemplateSyntaxError)
@property
def exc_info(self):
"""Exception info tuple with a proxy around the frame objects."""
return self.exc_type, self.exc_value, self.frames[0]
@property
def standard_exc_info(self):
"""Standard python exc_info for re-raising"""
tb = self.frames[0]
# the frame will be an actual traceback (or transparent proxy) if
# we are on pypy or a python implementation with support for tproxy
if type(tb) is not TracebackType:
tb = tb.tb
return self.exc_type, self.exc_value, tb
def make_traceback(exc_info, source_hint=None):
"""Creates a processed traceback object from the exc_info."""
exc_type, exc_value, tb = exc_info
if isinstance(exc_value, TemplateSyntaxError):
exc_info = translate_syntax_error(exc_value, source_hint)
initial_skip = 0
else:
initial_skip = 1
return translate_exception(exc_info, initial_skip)
def translate_syntax_error(error, source=None):
"""Rewrites a syntax error to please traceback systems."""
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno)
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in range(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__jinja_template__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(make_frame_proxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to
# reraise it unchanged.
# XXX: can we backup here? when could this happen?
if not frames:
reraise(exc_info[0], exc_info[1], exc_info[2])
return ProcessedTraceback(exc_info[0], exc_info[1], frames)
def get_jinja_locals(real_locals):
ctx = real_locals.get('context')
if ctx:
locals = ctx.get_all()
else:
locals = {}
local_overrides = {}
for name, value in iteritems(real_locals):
if not name.startswith('l_') or value is missing:
continue
try:
_, depth, name = name.split('_', 2)
depth = int(depth)
except ValueError:
continue
cur_depth = local_overrides.get(name, (-1,))[0]
if cur_depth < depth:
local_overrides[name] = (depth, value)
for name, (_, value) in iteritems(local_overrides):
if value is missing:
locals.pop(name, None)
else:
locals[name] = value
return locals
def fake_exc_info(exc_info, filename, lineno):
"""Helper for `translate_exception`."""
exc_type, exc_value, tb = exc_info
# figure the real context out
if tb is not None:
locals = get_jinja_locals(tb.tb_frame.f_locals)
# if there is a local called __jinja_exception__, we get
# rid of it to not break the debug functionality.
locals.pop('__jinja_exception__', None)
else:
locals = {}
# assamble fake globals we need
globals = {
'__name__': filename,
'__file__': filename,
'__jinja_exception__': exc_info[:2],
# we don't want to keep the reference to the template around
# to not cause circular dependencies, but we mark it as Jinja
# frame for the ProcessedTraceback
'__jinja_template__': None
}
# and fake the exception
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
# if it's possible, change the name of the code. This won't work
# on some python environments such as google appengine
try:
if tb is None:
location = 'template'
else:
function = tb.tb_frame.f_code.co_name
if function == 'root':
location = 'top-level template code'
elif function.startswith('block_'):
location = 'block "%s"' % function[6:]
else:
location = 'template'
if PY2:
code = CodeType(0, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
else:
code = CodeType(0, code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
except Exception as e:
pass
# execute the code and catch the new traceback
try:
exec(code, globals, locals)
except:
exc_info = sys.exc_info()
new_tb = exc_info[2].tb_next
# return without this frame
return exc_info[:2] + (new_tb,)
def _init_ugly_crap():
"""This function implements a few ugly things so that we can patch the
traceback objects. The function returned allows resetting `tb_next` on
any python traceback object. Do not attempt to use this on non cpython
interpreters
"""
import ctypes
from types import TracebackType
if PY2:
# figure out size of _Py_ssize_t for Python 2:
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
_Py_ssize_t = ctypes.c_int64
else:
_Py_ssize_t = ctypes.c_int
else:
# platform ssize_t on Python 3
_Py_ssize_t = ctypes.c_ssize_t
# regular python
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
# python with trace
if hasattr(sys, 'getobjects'):
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('_ob_next', ctypes.POINTER(_PyObject)),
('_ob_prev', ctypes.POINTER(_PyObject)),
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
class _Traceback(_PyObject):
pass
_Traceback._fields_ = [
('tb_next', ctypes.POINTER(_Traceback)),
('tb_frame', ctypes.POINTER(_PyObject)),
('tb_lasti', ctypes.c_int),
('tb_lineno', ctypes.c_int)
]
def tb_set_next(tb, next):
"""Set the tb_next attribute of a traceback object."""
if not (isinstance(tb, TracebackType) and
(next is None or isinstance(next, TracebackType))):
raise TypeError('tb_set_next arguments must be traceback objects')
obj = _Traceback.from_address(id(tb))
if tb.tb_next is not None:
old = _Traceback.from_address(id(tb.tb_next))
old.ob_refcnt -= 1
if next is None:
obj.tb_next = ctypes.POINTER(_Traceback)()
else:
next = _Traceback.from_address(id(next))
next.ob_refcnt += 1
obj.tb_next = ctypes.pointer(next)
return tb_set_next
# try to get a tb_set_next implementation if we don't have transparent
# proxies.
tb_set_next = None
if tproxy is None:
try:
tb_set_next = _init_ugly_crap()
except:
pass
del _init_ugly_crap
|
louietsai/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/test/threaded_import_hangers.py | 57 | # This is a helper module for test_threaded_import. The test imports this
# module, and this module tries to run various Python library functions in
# their own thread, as a side effect of being imported. If the spawned
# thread doesn't complete in TIMEOUT seconds, an "appeared to hang" message
# is appended to the module-global `errors` list. That list remains empty
# if (and only if) all functions tested complete.
TIMEOUT = 10
import threading
import tempfile
import os.path
errors = []
# This class merely runs a function in its own thread T. The thread importing
# this module holds the import lock, so if the function called by T tries
# to do its own imports it will block waiting for this module's import
# to complete.
class Worker(threading.Thread):
def __init__(self, function, args):
threading.Thread.__init__(self)
self.function = function
self.args = args
def run(self):
self.function(*self.args)
for name, func, args in [
# Bug 147376: TemporaryFile hung on Windows, starting in Python 2.4.
("tempfile.TemporaryFile", lambda: tempfile.TemporaryFile().close(), ()),
# The real cause for bug 147376: ntpath.abspath() caused the hang.
("os.path.abspath", os.path.abspath, ('.',)),
]:
t = Worker(func, args)
t.start()
t.join(TIMEOUT)
if t.is_alive():
errors.append("%s appeared to hang" % name)
|
nevil/edash-packager | refs/heads/master | packager/third_party/libxml/src/check-xml-test-suite.py | 347 | #!/usr/bin/python
import sys
import time
import os
import string
sys.path.insert(0, "python")
import libxml2
test_nr = 0
test_succeed = 0
test_failed = 0
test_error = 0
#
# the testsuite description
#
CONF="xml-test-suite/xmlconf/xmlconf.xml"
LOG="check-xml-test-suite.log"
log = open(LOG, "w")
#
# Error and warning handlers
#
error_nr = 0
error_msg = ''
def errorHandler(ctx, str):
global error_nr
global error_msg
error_nr = error_nr + 1
if len(error_msg) < 300:
if len(error_msg) == 0 or error_msg[-1] == '\n':
error_msg = error_msg + " >>" + str
else:
error_msg = error_msg + str
libxml2.registerErrorHandler(errorHandler, None)
#warning_nr = 0
#warning = ''
#def warningHandler(ctx, str):
# global warning_nr
# global warning
#
# warning_nr = warning_nr + 1
# warning = warning + str
#
#libxml2.registerWarningHandler(warningHandler, None)
#
# Used to load the XML testsuite description
#
def loadNoentDoc(filename):
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return None
ctxt.replaceEntities(1)
ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if ctxt.wellFormed() != 1:
doc.freeDoc()
return None
return doc
#
# The conformance testing routines
#
def testNotWf(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc != None:
doc.freeDoc()
if ret == 0 or ctxt.wellFormed() != 0:
print "%s: error: Well Formedness error not detected" % (id)
log.write("%s: error: Well Formedness error not detected\n" % (id))
return 0
return 1
def testNotWfEnt(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.replaceEntities(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc != None:
doc.freeDoc()
if ret == 0 or ctxt.wellFormed() != 0:
print "%s: error: Well Formedness error not detected" % (id)
log.write("%s: error: Well Formedness error not detected\n" % (id))
return 0
return 1
def testNotWfEntDtd(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.replaceEntities(1)
ctxt.loadSubset(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc != None:
doc.freeDoc()
if ret == 0 or ctxt.wellFormed() != 0:
print "%s: error: Well Formedness error not detected" % (id)
log.write("%s: error: Well Formedness error not detected\n" % (id))
return 0
return 1
def testWfEntDtd(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.replaceEntities(1)
ctxt.loadSubset(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc == None or ret != 0 or ctxt.wellFormed() == 0:
print "%s: error: wrongly failed to parse the document" % (id)
log.write("%s: error: wrongly failed to parse the document\n" % (id))
if doc != None:
doc.freeDoc()
return 0
if error_nr != 0:
print "%s: warning: WF document generated an error msg" % (id)
log.write("%s: error: WF document generated an error msg\n" % (id))
doc.freeDoc()
return 2
doc.freeDoc()
return 1
def testError(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.replaceEntities(1)
ctxt.loadSubset(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc != None:
doc.freeDoc()
if ctxt.wellFormed() == 0:
print "%s: warning: failed to parse the document but accepted" % (id)
log.write("%s: warning: failed to parse the document but accepte\n" % (id))
return 2
if error_nr != 0:
print "%s: warning: WF document generated an error msg" % (id)
log.write("%s: error: WF document generated an error msg\n" % (id))
return 2
return 1
def testInvalid(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.validate(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
valid = ctxt.isValid()
if doc == None:
print "%s: error: wrongly failed to parse the document" % (id)
log.write("%s: error: wrongly failed to parse the document\n" % (id))
return 0
if valid == 1:
print "%s: error: Validity error not detected" % (id)
log.write("%s: error: Validity error not detected\n" % (id))
doc.freeDoc()
return 0
if error_nr == 0:
print "%s: warning: Validity error not reported" % (id)
log.write("%s: warning: Validity error not reported\n" % (id))
doc.freeDoc()
return 2
doc.freeDoc()
return 1
def testValid(filename, id):
global error_nr
global error_msg
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.validate(1)
ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
valid = ctxt.isValid()
if doc == None:
print "%s: error: wrongly failed to parse the document" % (id)
log.write("%s: error: wrongly failed to parse the document\n" % (id))
return 0
if valid != 1:
print "%s: error: Validity check failed" % (id)
log.write("%s: error: Validity check failed\n" % (id))
doc.freeDoc()
return 0
if error_nr != 0 or valid != 1:
print "%s: warning: valid document reported an error" % (id)
log.write("%s: warning: valid document reported an error\n" % (id))
doc.freeDoc()
return 2
doc.freeDoc()
return 1
def runTest(test):
global test_nr
global test_succeed
global test_failed
global error_msg
global log
uri = test.prop('URI')
id = test.prop('ID')
if uri == None:
print "Test without ID:", uri
return -1
if id == None:
print "Test without URI:", id
return -1
base = test.getBase(None)
URI = libxml2.buildURI(uri, base)
if os.access(URI, os.R_OK) == 0:
print "Test %s missing: base %s uri %s" % (URI, base, uri)
return -1
type = test.prop('TYPE')
if type == None:
print "Test %s missing TYPE" % (id)
return -1
extra = None
if type == "invalid":
res = testInvalid(URI, id)
elif type == "valid":
res = testValid(URI, id)
elif type == "not-wf":
extra = test.prop('ENTITIES')
# print URI
#if extra == None:
# res = testNotWfEntDtd(URI, id)
#elif extra == 'none':
# res = testNotWf(URI, id)
#elif extra == 'general':
# res = testNotWfEnt(URI, id)
#elif extra == 'both' or extra == 'parameter':
res = testNotWfEntDtd(URI, id)
#else:
# print "Unknow value %s for an ENTITIES test value" % (extra)
# return -1
elif type == "error":
res = testError(URI, id)
else:
# TODO skipped for now
return -1
test_nr = test_nr + 1
if res > 0:
test_succeed = test_succeed + 1
elif res == 0:
test_failed = test_failed + 1
elif res < 0:
test_error = test_error + 1
# Log the ontext
if res != 1:
log.write(" File: %s\n" % (URI))
content = string.strip(test.content)
while content[-1] == '\n':
content = content[0:-1]
if extra != None:
log.write(" %s:%s:%s\n" % (type, extra, content))
else:
log.write(" %s:%s\n\n" % (type, content))
if error_msg != '':
log.write(" ----\n%s ----\n" % (error_msg))
error_msg = ''
log.write("\n")
return 0
def runTestCases(case):
profile = case.prop('PROFILE')
if profile != None and \
string.find(profile, "IBM XML Conformance Test Suite - Production") < 0:
print "=>", profile
test = case.children
while test != None:
if test.name == 'TEST':
runTest(test)
if test.name == 'TESTCASES':
runTestCases(test)
test = test.next
conf = loadNoentDoc(CONF)
if conf == None:
print "Unable to load %s" % CONF
sys.exit(1)
testsuite = conf.getRootElement()
if testsuite.name != 'TESTSUITE':
print "Expecting TESTSUITE root element: aborting"
sys.exit(1)
profile = testsuite.prop('PROFILE')
if profile != None:
print profile
start = time.time()
case = testsuite.children
while case != None:
if case.name == 'TESTCASES':
old_test_nr = test_nr
old_test_succeed = test_succeed
old_test_failed = test_failed
old_test_error = test_error
runTestCases(case)
print " Ran %d tests: %d suceeded, %d failed and %d generated an error" % (
test_nr - old_test_nr, test_succeed - old_test_succeed,
test_failed - old_test_failed, test_error - old_test_error)
case = case.next
conf.freeDoc()
log.close()
print "Ran %d tests: %d suceeded, %d failed and %d generated an error in %.2f s." % (
test_nr, test_succeed, test_failed, test_error, time.time() - start)
|
gaddman/ansible | refs/heads/devel | lib/ansible/modules/cloud/azure/azure_rm_securitygroup_facts.py | 17 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_securitygroup_facts
version_added: "2.1"
short_description: Get security group facts.
description:
- Get facts for a specific security group or all security groups within a resource group.
options:
name:
description:
- Only show results for a specific security group.
resource_group:
description:
- Name of the resource group to use.
required: true
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Get facts for one security group
azure_rm_securitygroup_facts:
resource_group: Testing
name: secgroup001
- name: Get facts for all security groups
azure_rm_securitygroup_facts:
resource_group: Testing
'''
RETURN = '''
azure_securitygroups:
description: List containing security group dicts.
returned: always
type: list
example: [{
"etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001",
"location": "eastus2",
"name": "secgroup001",
"properties": {
"defaultSecurityRules": [
{
"etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/AllowVnetInBound",
"name": "AllowVnetInBound",
"properties": {
"access": "Allow",
"description": "Allow inbound traffic from all VMs in VNET",
"destinationAddressPrefix": "VirtualNetwork",
"destinationPortRange": "*",
"direction": "Inbound",
"priority": 65000,
"protocol": "*",
"provisioningState": "Succeeded",
"sourceAddressPrefix": "VirtualNetwork",
"sourcePortRange": "*"
}
},
{
"etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/AllowAzureLoadBalancerInBound",
"name": "AllowAzureLoadBalancerInBound",
"properties": {
"access": "Allow",
"description": "Allow inbound traffic from azure load balancer",
"destinationAddressPrefix": "*",
"destinationPortRange": "*",
"direction": "Inbound",
"priority": 65001,
"protocol": "*",
"provisioningState": "Succeeded",
"sourceAddressPrefix": "AzureLoadBalancer",
"sourcePortRange": "*"
}
},
{
"etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/DenyAllInBound",
"name": "DenyAllInBound",
"properties": {
"access": "Deny",
"description": "Deny all inbound traffic",
"destinationAddressPrefix": "*",
"destinationPortRange": "*",
"direction": "Inbound",
"priority": 65500,
"protocol": "*",
"provisioningState": "Succeeded",
"sourceAddressPrefix": "*",
"sourcePortRange": "*"
}
},
{
"etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/AllowVnetOutBound",
"name": "AllowVnetOutBound",
"properties": {
"access": "Allow",
"description": "Allow outbound traffic from all VMs to all VMs in VNET",
"destinationAddressPrefix": "VirtualNetwork",
"destinationPortRange": "*",
"direction": "Outbound",
"priority": 65000,
"protocol": "*",
"provisioningState": "Succeeded",
"sourceAddressPrefix": "VirtualNetwork",
"sourcePortRange": "*"
}
},
{
"etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/AllowInternetOutBound",
"name": "AllowInternetOutBound",
"properties": {
"access": "Allow",
"description": "Allow outbound traffic from all VMs to Internet",
"destinationAddressPrefix": "Internet",
"destinationPortRange": "*",
"direction": "Outbound",
"priority": 65001,
"protocol": "*",
"provisioningState": "Succeeded",
"sourceAddressPrefix": "*",
"sourcePortRange": "*"
}
},
{
"etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/DenyAllOutBound",
"name": "DenyAllOutBound",
"properties": {
"access": "Deny",
"description": "Deny all outbound traffic",
"destinationAddressPrefix": "*",
"destinationPortRange": "*",
"direction": "Outbound",
"priority": 65500,
"protocol": "*",
"provisioningState": "Succeeded",
"sourceAddressPrefix": "*",
"sourcePortRange": "*"
}
}
],
"networkInterfaces": [
{
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/nic004"
}
],
"provisioningState": "Succeeded",
"resourceGuid": "ebd00afa-5dc8-446f-810a-50dd6f671588",
"securityRules": []
},
"tags": {},
"type": "Microsoft.Network/networkSecurityGroups"
}]
''' # NOQA
try:
from msrestazure.azure_exceptions import CloudError
except:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_OBJECT_CLASS = 'NetworkSecurityGroup'
class AzureRMSecurityGroupFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(required=True, type='str'),
tags=dict(type='list'),
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_securitygroups=[])
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMSecurityGroupFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name is not None:
self.results['ansible_facts']['azure_securitygroups'] = self.get_item()
else:
self.results['ansible_facts']['azure_securitygroups'] = self.list_items()
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.network_client.network_security_groups.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
grp = self.serialize_obj(item, AZURE_OBJECT_CLASS)
grp['name'] = item.name
result = [grp]
return result
def list_items(self):
self.log('List all items')
try:
response = self.network_client.network_security_groups.list(self.resource_group)
except Exception as exc:
self.fail("Error listing all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
grp = self.serialize_obj(item, AZURE_OBJECT_CLASS)
grp['name'] = item.name
results.append(grp)
return results
def main():
AzureRMSecurityGroupFacts()
if __name__ == '__main__':
main()
|
guilleramos/django-json-forms | refs/heads/master | django_json_forms/widgets.py | 3 | import datetime
from django.utils.dates import MONTHS
from django.utils.datastructures import SortedDict
class RemoteWidget(object):
def __init__(self, widget, field_name=None):
self.field_name = field_name
self.widget = widget
def as_dict(self):
widget_dict = SortedDict()
widget_dict['title'] = self.widget.__class__.__name__
widget_dict['is_hidden'] = self.widget.is_hidden
widget_dict['needs_multipart_form'] = self.widget.needs_multipart_form
widget_dict['is_localized'] = self.widget.is_localized
widget_dict['is_required'] = self.widget.is_required
widget_dict['attrs'] = self.widget.attrs
return widget_dict
class RemoteInput(RemoteWidget):
def as_dict(self):
widget_dict = super(RemoteInput, self).as_dict()
widget_dict['input_type'] = self.widget.input_type
return widget_dict
class RemoteTextInput(RemoteInput):
def as_dict(self):
return super(RemoteTextInput, self).as_dict()
class RemotePasswordInput(RemoteInput):
def as_dict(self):
return super(RemotePasswordInput, self).as_dict()
class RemoteHiddenInput(RemoteInput):
def as_dict(self):
return super(RemoteHiddenInput, self).as_dict()
class RemoteMultipleHiddenInput(RemoteHiddenInput):
def as_dict(self):
widget_dict = super(RemoteMultipleHiddenInput, self).as_dict()
widget_dict['choices'] = self.widget.choices
return widget_dict
class RemoteFileInput(RemoteInput):
def as_dict(self):
return super(RemoteFileInput, self).as_dict()
class RemoteClearableFileInput(RemoteFileInput):
def as_dict(self):
widget_dict = super(RemoteClearableFileInput, self).as_dict()
widget_dict['initial_text'] = self.widget.initial_text
widget_dict['input_text'] = self.widget.input_text
widget_dict['clear_checkbox_label'] = self.widget.clear_checkbox_label
return widget_dict
class RemoteTextarea(RemoteWidget):
def as_dict(self):
widget_dict = super(RemoteTextarea, self).as_dict()
widget_dict['input_type'] = 'textarea'
return widget_dict
class RemoteTimeInput(RemoteInput):
def as_dict(self):
widget_dict = super(RemoteTimeInput, self).as_dict()
widget_dict['format'] = self.widget.format
widget_dict['manual_format'] = self.widget.manual_format
widget_dict['date'] = self.widget.manual_format
widget_dict['input_type'] = 'time'
return widget_dict
class RemoteDateInput(RemoteTimeInput):
def as_dict(self):
widget_dict = super(RemoteDateInput, self).as_dict()
widget_dict['input_type'] = 'date'
current_year = datetime.datetime.now().year
widget_dict['choices'] = [{
'title': 'day',
'data': [{'key': x, 'value': x} for x in range(1, 32)]
}, {
'title': 'month',
'data': [{'key': x, 'value': y} for (x, y) in MONTHS.items()]
}, {
'title': 'year',
'data': [{'key': x, 'value': x} for x in range(current_year - 100, current_year + 1)]
}]
return widget_dict
class RemoteDateTimeInput(RemoteTimeInput):
def as_dict(self):
widget_dict = super(RemoteDateTimeInput, self).as_dict()
widget_dict['input_type'] = 'datetime'
return widget_dict
class RemoteCheckboxInput(RemoteWidget):
def as_dict(self):
widget_dict = super(RemoteCheckboxInput, self).as_dict()
# If check test is None then the input should accept null values
check_test = None
if self.widget.check_test is not None:
check_test = True
widget_dict['check_test'] = check_test
widget_dict['input_type'] = 'checkbox'
return widget_dict
class RemoteSelect(RemoteWidget):
def as_dict(self):
widget_dict = super(RemoteSelect, self).as_dict()
widget_dict['choices'] = []
for key, value in self.widget.choices:
widget_dict['choices'].append({
'value': key,
'display': value
})
widget_dict['input_type'] = 'select'
return widget_dict
class RemoteNullBooleanSelect(RemoteSelect):
def as_dict(self):
return super(RemoteNullBooleanSelect, self).as_dict()
class RemoteSelectMultiple(RemoteSelect):
def as_dict(self):
widget_dict = super(RemoteSelectMultiple, self).as_dict()
widget_dict['input_type'] = 'selectmultiple'
widget_dict['size'] = len(widget_dict['choices'])
return widget_dict
class RemoteRadioInput(RemoteWidget):
def as_dict(self):
widget_dict = SortedDict()
widget_dict['title'] = self.widget.__class__.__name__
widget_dict['name'] = self.widget.name
widget_dict['value'] = self.widget.value
widget_dict['attrs'] = self.widget.attrs
widget_dict['choice_value'] = self.widget.choice_value
widget_dict['choice_label'] = self.widget.choice_label
widget_dict['index'] = self.widget.index
widget_dict['input_type'] = 'radio'
return widget_dict
class RemoteRadioFieldRenderer(RemoteWidget):
def as_dict(self):
widget_dict = SortedDict()
widget_dict['title'] = self.widget.__class__.__name__
widget_dict['name'] = self.widget.name
widget_dict['value'] = self.widget.value
widget_dict['attrs'] = self.widget.attrs
widget_dict['choices'] = self.widget.choices
widget_dict['input_type'] = 'radio'
return widget_dict
class RemoteRadioSelect(RemoteSelect):
def as_dict(self):
widget_dict = super(RemoteRadioSelect, self).as_dict()
widget_dict['choices'] = []
for key, value in self.widget.choices:
widget_dict['choices'].append({
'name': self.field_name or '',
'value': key,
'display': value
})
widget_dict['input_type'] = 'radio'
return widget_dict
class RemoteCheckboxSelectMultiple(RemoteSelectMultiple):
def as_dict(self):
return super(RemoteCheckboxSelectMultiple, self).as_dict()
class RemoteMultiWidget(RemoteWidget):
def as_dict(self):
widget_dict = super(RemoteMultiWidget, self).as_dict()
widget_list = []
for widget in self.widget.widgets:
# Fetch remote widget and convert to dict
widget_list.append()
widget_dict['widgets'] = widget_list
return widget_dict
class RemoteSplitDateTimeWidget(RemoteMultiWidget):
def as_dict(self):
widget_dict = super(RemoteSplitDateTimeWidget, self).as_dict()
widget_dict['date_format'] = self.widget.date_format
widget_dict['time_format'] = self.widget.time_format
return widget_dict
class RemoteSplitHiddenDateTimeWidget(RemoteSplitDateTimeWidget):
def as_dict(self):
return super(RemoteSplitHiddenDateTimeWidget, self).as_dict()
|
moondrop-entertainment/django-nonrel-drawp | refs/heads/master | django/conf/locale/pt/formats.py | 232 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \de F \de Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = r'j \de F \de Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \de Y'
MONTH_DAY_FORMAT = r'j \de F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
SHA2017-badge/micropython-esp32 | refs/heads/master | tests/basics/tuple1.py | 28 | # basic tuple functionality
x = (1, 2, 3 * 4)
print(x)
try:
x[0] = 4
except TypeError:
print("TypeError")
print(x)
try:
x.append(5)
except AttributeError:
print("AttributeError")
print(x[1:])
print(x[:-1])
print(x[2:3])
print(x + (10, 100, 10000))
# inplace add operator
x += (10, 11, 12)
print(x)
# construction of tuple from large iterator (tests implementation detail of uPy)
print(tuple(range(20)))
# unsupported unary operation
try:
+()
except TypeError:
print('TypeError')
# unsupported type on RHS of add
try:
() + None
except TypeError:
print('TypeError')
|
sadleader/odoo | refs/heads/master | addons/auth_oauth/res_config.py | 292 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import osv, fields
import logging
_logger = logging.getLogger(__name__)
class base_config_settings(osv.TransientModel):
_inherit = 'base.config.settings'
_columns = {
'auth_oauth_google_enabled' : fields.boolean('Allow users to sign in with Google'),
'auth_oauth_google_client_id' : fields.char('Client ID'),
'auth_oauth_facebook_enabled' : fields.boolean('Allow users to sign in with Facebook'),
'auth_oauth_facebook_client_id' : fields.char('Client ID'),
}
def default_get(self, cr, uid, fields, context=None):
res = super(base_config_settings, self).default_get(cr, uid, fields, context=context)
res.update(self.get_oauth_providers(cr, uid, fields, context=context))
return res
def get_oauth_providers(self, cr, uid, fields, context=None):
google_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'auth_oauth', 'provider_google')[1]
facebook_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'auth_oauth', 'provider_facebook')[1]
rg = self.pool.get('auth.oauth.provider').read(cr, uid, [google_id], ['enabled','client_id'], context=context)
rf = self.pool.get('auth.oauth.provider').read(cr, uid, [facebook_id], ['enabled','client_id'], context=context)
return {
'auth_oauth_google_enabled': rg[0]['enabled'],
'auth_oauth_google_client_id': rg[0]['client_id'],
'auth_oauth_facebook_enabled': rf[0]['enabled'],
'auth_oauth_facebook_client_id': rf[0]['client_id'],
}
def set_oauth_providers(self, cr, uid, ids, context=None):
google_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'auth_oauth', 'provider_google')[1]
facebook_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'auth_oauth', 'provider_facebook')[1]
config = self.browse(cr, uid, ids[0], context=context)
rg = {
'enabled':config.auth_oauth_google_enabled,
'client_id':config.auth_oauth_google_client_id,
}
rf = {
'enabled':config.auth_oauth_facebook_enabled,
'client_id':config.auth_oauth_facebook_client_id,
}
self.pool.get('auth.oauth.provider').write(cr, uid, [google_id], rg)
self.pool.get('auth.oauth.provider').write(cr, uid, [facebook_id], rf)
|
Suninus/NewsBlur | refs/heads/master | vendor/yaml/dumper.py | 543 |
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
from emitter import *
from serializer import *
from representer import *
from resolver import *
class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class Dumper(Emitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
|
kevinlondon/sentry | refs/heads/master | src/sentry/db/models/fields/gzippeddict.py | 29 | """
sentry.db.models.fields.gzippeddict
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import six
from django.db import models
from south.modelsinspector import add_introspection_rules
from sentry.utils.compat import pickle
from sentry.utils.strings import decompress, compress
__all__ = ('GzippedDictField',)
logger = logging.getLogger('sentry')
class GzippedDictField(models.TextField):
"""
Slightly different from a JSONField in the sense that the default
value is a dictionary.
"""
__metaclass__ = models.SubfieldBase
def to_python(self, value):
if isinstance(value, six.string_types) and value:
try:
value = pickle.loads(decompress(value))
except Exception as e:
logger.exception(e)
return {}
elif not value:
return {}
return value
def get_prep_value(self, value):
if not value and self.null:
# save ourselves some storage
return None
# enforce unicode strings to guarantee consistency
if isinstance(value, str):
value = six.text_type(value)
return compress(pickle.dumps(value))
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_prep_value(value)
add_introspection_rules([], ["^sentry\.db\.models\.fields\.gzippeddict\.GzippedDictField"])
|
matthaywardwebdesign/rethinkdb | refs/heads/next | external/v8_3.30.33.16/build/gyp/test/compiler-override/gyptest-compiler-global-settings.py | 137 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that make_global_settings can be used to override the
compiler settings.
"""
import TestGyp
import os
import copy
import sys
from string import Template
if sys.platform == 'win32':
# cross compiling not support by ninja on windows
# and make not supported on windows at all.
sys.exit(0)
test = TestGyp.TestGyp(formats=['ninja', 'make'])
gypfile = 'compiler-global-settings.gyp'
replacements = { 'PYTHON': '/usr/bin/python', 'PWD': os.getcwd()}
# Process the .in gyp file to produce the final gyp file
# since we need to include absolute paths in the make_global_settings
# section.
replacements['TOOLSET'] = 'target'
s = Template(open(gypfile + '.in').read())
output = open(gypfile, 'w')
output.write(s.substitute(replacements))
output.close()
old_env = dict(os.environ)
os.environ['GYP_CROSSCOMPILE'] = '1'
test.run_gyp(gypfile)
os.environ.clear()
os.environ.update(old_env)
test.build(gypfile)
test.must_contain_all_lines(test.stdout(), ['my_cc.py', 'my_cxx.py', 'FOO'])
# Same again but with the host toolset.
replacements['TOOLSET'] = 'host'
s = Template(open(gypfile + '.in').read())
output = open(gypfile, 'w')
output.write(s.substitute(replacements))
output.close()
old_env = dict(os.environ)
os.environ['GYP_CROSSCOMPILE'] = '1'
test.run_gyp(gypfile)
os.environ.clear()
os.environ.update(old_env)
test.build(gypfile)
test.must_contain_all_lines(test.stdout(), ['my_cc.py', 'my_cxx.py', 'BAR'])
# Check that CC_host overrides make_global_settings
old_env = dict(os.environ)
os.environ['CC_host'] = '%s %s/my_cc.py SECRET' % (replacements['PYTHON'],
replacements['PWD'])
test.run_gyp(gypfile)
os.environ.clear()
os.environ.update(old_env)
test.build(gypfile)
test.must_contain_all_lines(test.stdout(), ['SECRET', 'my_cxx.py', 'BAR'])
test.pass_test()
|
raccoongang/edx-platform | refs/heads/ginkgo-rg | common/lib/xmodule/xmodule/tests/test_split_test_module.py | 10 | """
Tests for the Split Testing Module
"""
import ddt
import lxml
from mock import Mock, patch
from fs.memoryfs import MemoryFS
from xmodule.partitions.tests.test_partitions import MockPartitionService, PartitionTestCase, MockUserPartitionScheme
from xmodule.tests.xml import factories as xml
from xmodule.tests.xml import XModuleXmlImportTest
from xmodule.tests import get_test_system
from xmodule.x_module import AUTHOR_VIEW, STUDENT_VIEW
from xmodule.validation import StudioValidationMessage
from xmodule.split_test_module import SplitTestDescriptor, SplitTestFields, get_split_user_partitions
from xmodule.partitions.partitions import Group, UserPartition, MINIMUM_STATIC_PARTITION_ID
class SplitTestModuleFactory(xml.XmlImportFactory):
"""
Factory for generating SplitTestModules for testing purposes
"""
tag = 'split_test'
class SplitTestUtilitiesTest(PartitionTestCase):
"""
Tests for utility methods related to split_test module.
"""
def test_split_user_partitions(self):
"""
Tests the get_split_user_partitions helper method.
"""
first_random_partition = UserPartition(
0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')],
self.random_scheme
)
second_random_partition = UserPartition(
0, 'second_partition', 'Second Partition', [Group("4", 'zeta'), Group("5", 'omega')],
self.random_scheme
)
all_partitions = [
first_random_partition,
# Only UserPartitions with scheme "random" will be returned as available options.
UserPartition(
1, 'non_random_partition', 'Will Not Be Returned', [Group("1", 'apple'), Group("2", 'banana')],
self.non_random_scheme
),
second_random_partition
]
self.assertEqual(
[first_random_partition, second_random_partition],
get_split_user_partitions(all_partitions)
)
class SplitTestModuleTest(XModuleXmlImportTest, PartitionTestCase):
"""
Base class for all split_module tests.
"""
def setUp(self):
super(SplitTestModuleTest, self).setUp()
self.course_id = 'test_org/test_course_number/test_run'
# construct module
course = xml.CourseFactory.build()
sequence = xml.SequenceFactory.build(parent=course)
split_test = SplitTestModuleFactory(
parent=sequence,
attribs={
'user_partition_id': '0',
'group_id_to_child': '{"0": "i4x://edX/xml_test_course/html/split_test_cond0", "1": "i4x://edX/xml_test_course/html/split_test_cond1"}' # pylint: disable=line-too-long
}
)
xml.HtmlFactory(parent=split_test, url_name='split_test_cond0', text='HTML FOR GROUP 0')
xml.HtmlFactory(parent=split_test, url_name='split_test_cond1', text='HTML FOR GROUP 1')
self.course = self.process_xml(course)
self.course_sequence = self.course.get_children()[0]
self.module_system = get_test_system()
self.module_system.descriptor_runtime = self.course._runtime # pylint: disable=protected-access
self.course.runtime.export_fs = MemoryFS()
# Create mock partition service, as these tests are running with XML in-memory system.
self.course.user_partitions = [
self.user_partition,
UserPartition(
MINIMUM_STATIC_PARTITION_ID, 'second_partition', 'Second Partition',
[
Group(unicode(MINIMUM_STATIC_PARTITION_ID + 1), 'abel'),
Group(unicode(MINIMUM_STATIC_PARTITION_ID + 2), 'baker'), Group("103", 'charlie')
],
MockUserPartitionScheme()
)
]
partitions_service = MockPartitionService(
self.course,
course_id=self.course.id,
)
self.module_system._services['partitions'] = partitions_service # pylint: disable=protected-access
# Mock user_service user
user_service = Mock()
user = Mock(username='ma', email='[email protected]', is_staff=False, is_active=True)
user_service._django_user = user
self.module_system._services['user'] = user_service # pylint: disable=protected-access
self.split_test_module = self.course_sequence.get_children()[0]
self.split_test_module.bind_for_student(
self.module_system,
user.id
)
# Create mock modulestore for getting the course. Needed for rendering the HTML
# view, since mock services exist and the rendering code will not short-circuit.
mocked_modulestore = Mock()
mocked_modulestore.get_course.return_value = self.course
self.split_test_module.system.modulestore = mocked_modulestore
@ddt.ddt
class SplitTestModuleLMSTest(SplitTestModuleTest):
"""
Test the split test module
"""
@ddt.data((0, 'split_test_cond0'), (1, 'split_test_cond1'))
@ddt.unpack
def test_child(self, user_tag, child_url_name):
self.user_partition.scheme.current_group = self.user_partition.groups[user_tag]
self.assertEquals(self.split_test_module.child_descriptor.url_name, child_url_name)
@ddt.data((0, 'HTML FOR GROUP 0'), (1, 'HTML FOR GROUP 1'))
@ddt.unpack
def test_get_html(self, user_tag, child_content):
self.user_partition.scheme.current_group = self.user_partition.groups[user_tag]
self.assertIn(
child_content,
self.module_system.render(self.split_test_module, STUDENT_VIEW).content
)
@ddt.data(0, 1)
def test_child_missing_tag_value(self, _user_tag):
# If user_tag has a missing value, we should still get back a valid child url
self.assertIn(self.split_test_module.child_descriptor.url_name, ['split_test_cond0', 'split_test_cond1'])
@ddt.data(100, 200, 300, 400, 500, 600, 700, 800, 900, 1000)
def test_child_persist_new_tag_value_when_tag_missing(self, _user_tag):
# If a user_tag has a missing value, a group should be saved/persisted for that user.
# So, we check that we get the same url_name when we call on the url_name twice.
# We run the test ten times so that, if our storage is failing, we'll be most likely to notice it.
self.assertEquals(
self.split_test_module.child_descriptor.url_name,
self.split_test_module.child_descriptor.url_name
)
# Patch the definition_to_xml for the html children.
@patch('xmodule.html_module.HtmlDescriptor.definition_to_xml')
def test_export_import_round_trip(self, def_to_xml):
# The HtmlDescriptor definition_to_xml tries to write to the filesystem
# before returning an xml object. Patch this to just return the xml.
def_to_xml.return_value = lxml.etree.Element('html')
# Mock out the process_xml
# Expect it to return a child descriptor for the SplitTestDescriptor when called.
self.module_system.process_xml = Mock()
# Write out the xml.
xml_obj = self.split_test_module.definition_to_xml(MemoryFS())
self.assertEquals(xml_obj.get('user_partition_id'), '0')
self.assertIsNotNone(xml_obj.get('group_id_to_child'))
# Read the xml back in.
fields, children = SplitTestDescriptor.definition_from_xml(xml_obj, self.module_system)
self.assertEquals(fields.get('user_partition_id'), '0')
self.assertIsNotNone(fields.get('group_id_to_child'))
self.assertEquals(len(children), 2)
class SplitTestModuleStudioTest(SplitTestModuleTest):
"""
Unit tests for how split test interacts with Studio.
"""
@patch('xmodule.split_test_module.SplitTestDescriptor.group_configuration_url', return_value='http://example.com')
def test_render_author_view(self, group_configuration_url):
"""
Test the rendering of the Studio author view.
"""
def create_studio_context(root_xblock):
"""
Context for rendering the studio "author_view".
"""
return {
'reorderable_items': set(),
'root_xblock': root_xblock,
}
# The split_test module should render both its groups when it is the root
context = create_studio_context(self.split_test_module)
html = self.module_system.render(self.split_test_module, AUTHOR_VIEW, context).content
self.assertIn('HTML FOR GROUP 0', html)
self.assertIn('HTML FOR GROUP 1', html)
# When rendering as a child, it shouldn't render either of its groups
context = create_studio_context(self.course_sequence)
html = self.module_system.render(self.split_test_module, AUTHOR_VIEW, context).content
self.assertNotIn('HTML FOR GROUP 0', html)
self.assertNotIn('HTML FOR GROUP 1', html)
# The "Create Missing Groups" button should be rendered when groups are missing
context = create_studio_context(self.split_test_module)
self.split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("1", 'beta'), Group("2", 'gamma')])
]
html = self.module_system.render(self.split_test_module, AUTHOR_VIEW, context).content
self.assertIn('HTML FOR GROUP 0', html)
self.assertIn('HTML FOR GROUP 1', html)
def test_group_configuration_url(self):
"""
Test creation of correct Group Configuration URL.
"""
mocked_course = Mock(advanced_modules=['split_test'])
mocked_modulestore = Mock()
mocked_modulestore.get_course.return_value = mocked_course
self.split_test_module.system.modulestore = mocked_modulestore
self.split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')])
]
expected_url = '/group_configurations/edX/xml_test_course/101#0'
self.assertEqual(expected_url, self.split_test_module.group_configuration_url)
def test_editable_settings(self):
"""
Test the setting information passed back from editable_metadata_fields.
"""
editable_metadata_fields = self.split_test_module.editable_metadata_fields
self.assertIn(SplitTestDescriptor.display_name.name, editable_metadata_fields)
self.assertNotIn(SplitTestDescriptor.due.name, editable_metadata_fields)
self.assertNotIn(SplitTestDescriptor.user_partitions.name, editable_metadata_fields)
# user_partition_id will always appear in editable_metadata_settings, regardless
# of the selected value.
self.assertIn(SplitTestDescriptor.user_partition_id.name, editable_metadata_fields)
def test_non_editable_settings(self):
"""
Test the settings that are marked as "non-editable".
"""
non_editable_metadata_fields = self.split_test_module.non_editable_metadata_fields
self.assertIn(SplitTestDescriptor.due, non_editable_metadata_fields)
self.assertIn(SplitTestDescriptor.user_partitions, non_editable_metadata_fields)
self.assertNotIn(SplitTestDescriptor.display_name, non_editable_metadata_fields)
def test_available_partitions(self):
"""
Tests that the available partitions are populated correctly when editable_metadata_fields are called
"""
self.assertEqual([], SplitTestDescriptor.user_partition_id.values)
# user_partitions is empty, only the "Not Selected" item will appear.
self.split_test_module.user_partition_id = SplitTestFields.no_partition_selected['value']
self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement
partitions = SplitTestDescriptor.user_partition_id.values
self.assertEqual(1, len(partitions))
self.assertEqual(SplitTestFields.no_partition_selected['value'], partitions[0]['value'])
# Populate user_partitions and call editable_metadata_fields again
self.split_test_module.user_partitions = [
UserPartition(
0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')],
self.random_scheme
),
# Only UserPartitions with scheme "random" will be returned as available options.
UserPartition(
1, 'non_random_partition', 'Will Not Be Returned', [Group("1", 'apple'), Group("2", 'banana')],
self.non_random_scheme
)
]
self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement
partitions = SplitTestDescriptor.user_partition_id.values
self.assertEqual(2, len(partitions))
self.assertEqual(SplitTestFields.no_partition_selected['value'], partitions[0]['value'])
self.assertEqual(0, partitions[1]['value'])
self.assertEqual("first_partition", partitions[1]['display_name'])
# Try again with a selected partition and verify that there is no option for "No Selection"
self.split_test_module.user_partition_id = 0
self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement
partitions = SplitTestDescriptor.user_partition_id.values
self.assertEqual(1, len(partitions))
self.assertEqual(0, partitions[0]['value'])
self.assertEqual("first_partition", partitions[0]['display_name'])
# Finally try again with an invalid selected partition and verify that "No Selection" is an option
self.split_test_module.user_partition_id = 999
self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement
partitions = SplitTestDescriptor.user_partition_id.values
self.assertEqual(2, len(partitions))
self.assertEqual(SplitTestFields.no_partition_selected['value'], partitions[0]['value'])
self.assertEqual(0, partitions[1]['value'])
self.assertEqual("first_partition", partitions[1]['display_name'])
def test_active_and_inactive_children(self):
"""
Tests the active and inactive children returned for different split test configurations.
"""
split_test_module = self.split_test_module
children = split_test_module.get_children()
# Verify that a split test has no active children if it has no specified user partition.
split_test_module.user_partition_id = -1
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, [])
self.assertEqual(inactive_children, children)
# Verify that all the children are returned as active for a correctly configured split_test
split_test_module.user_partition_id = 0
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')])
]
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, children)
self.assertEqual(inactive_children, [])
# Verify that a split_test does not return inactive children in the active children
self.split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha')])
]
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, [children[0]])
self.assertEqual(inactive_children, [children[1]])
# Verify that a split_test ignores misconfigured children
self.split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("2", 'gamma')])
]
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, [children[0]])
self.assertEqual(inactive_children, [children[1]])
# Verify that a split_test referring to a non-existent user partition has no active children
self.split_test_module.user_partition_id = 2
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, [])
self.assertEqual(inactive_children, children)
def test_validation_messages(self):
"""
Test the validation messages produced for different split test configurations.
"""
split_test_module = self.split_test_module
def verify_validation_message(message, expected_message, expected_message_type,
expected_action_class=None, expected_action_label=None,
expected_action_runtime_event=None):
"""
Verify that the validation message has the expected validation message and type.
"""
self.assertEqual(message.text, expected_message)
self.assertEqual(message.type, expected_message_type)
if expected_action_class:
self.assertEqual(message.action_class, expected_action_class)
else:
self.assertFalse(hasattr(message, "action_class"))
if expected_action_label:
self.assertEqual(message.action_label, expected_action_label)
else:
self.assertFalse(hasattr(message, "action_label"))
if expected_action_runtime_event:
self.assertEqual(message.action_runtime_event, expected_action_runtime_event)
else:
self.assertFalse(hasattr(message, "action_runtime_event"))
def verify_summary_message(general_validation, expected_message, expected_message_type):
"""
Verify that the general validation message has the expected validation message and type.
"""
self.assertEqual(general_validation.text, expected_message)
self.assertEqual(general_validation.type, expected_message_type)
# Verify the messages for an unconfigured user partition
split_test_module.user_partition_id = -1
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 0)
verify_validation_message(
validation.summary,
u"The experiment is not associated with a group configuration.",
StudioValidationMessage.NOT_CONFIGURED,
'edit-button',
u"Select a Group Configuration",
)
# Verify the messages for a correctly configured split_test
split_test_module.user_partition_id = 0
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')])
]
validation = split_test_module.validate_split_test()
self.assertTrue(validation)
self.assertIsNone(split_test_module.general_validation_message(), None)
# Verify the messages for a split test with too few groups
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("1", 'beta'), Group("2", 'gamma')])
]
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 1)
verify_validation_message(
validation.messages[0],
u"The experiment does not contain all of the groups in the configuration.",
StudioValidationMessage.ERROR,
expected_action_runtime_event='add-missing-groups',
expected_action_label=u"Add Missing Groups"
)
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.ERROR
)
# Verify the messages for a split test with children that are not associated with any group
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition',
[Group("0", 'alpha')])
]
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 1)
verify_validation_message(
validation.messages[0],
u"The experiment has an inactive group. Move content into active groups, then delete the inactive group.",
StudioValidationMessage.WARNING
)
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.WARNING
)
# Verify the messages for a split test with both missing and inactive children
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("2", 'gamma')])
]
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 2)
verify_validation_message(
validation.messages[0],
u"The experiment does not contain all of the groups in the configuration.",
StudioValidationMessage.ERROR,
expected_action_runtime_event='add-missing-groups',
expected_action_label=u"Add Missing Groups"
)
verify_validation_message(
validation.messages[1],
u"The experiment has an inactive group. Move content into active groups, then delete the inactive group.",
StudioValidationMessage.WARNING
)
# With two messages of type error and warning priority given to error.
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.ERROR
)
# Verify the messages for a split test referring to a non-existent user partition
split_test_module.user_partition_id = 2
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 1)
verify_validation_message(
validation.messages[0],
u"The experiment uses a deleted group configuration. "
u"Select a valid group configuration or delete this experiment.",
StudioValidationMessage.ERROR
)
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.ERROR
)
# Verify the message for a split test referring to a non-random user partition
split_test_module.user_partitions = [
UserPartition(
10, 'incorrect_partition', 'Non Random Partition', [Group("0", 'alpha'), Group("2", 'gamma')],
scheme=self.non_random_scheme
)
]
split_test_module.user_partition_id = 10
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 1)
verify_validation_message(
validation.messages[0],
u"The experiment uses a group configuration that is not supported for experiments. "
u"Select a valid group configuration or delete this experiment.",
StudioValidationMessage.ERROR
)
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.ERROR
)
|
keithlee/shakeAppPyDev | refs/heads/master | django/conf/locale/bn/formats.py | 433 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F, Y'
TIME_FORMAT = 'g:i:s A'
# DATETIME_FORMAT =
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j M, Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
# NUMBER_GROUPING =
|
Nikea/VisTrails | refs/heads/master | vistrails/db/versions/v0_9_5/persistence/sql/__init__.py | 58 | ###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
pass |
dcjohnson1989/selenium | refs/heads/master | py/selenium/webdriver/remote/__init__.py | 2454 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
|
almlab/SmileTrain | refs/heads/master | tools/fix_index_fastq.py | 1 | #!/usr/bin/env python
'''
some index fastq's have a weird number of quality line characters. some have an extra
character; others seem to have a single character.
this script truncates quality lines longer than the sequence line and pads quality
lines that are shorter than the sequence line.
author : scott w olesen <[email protected]>
'''
import argparse, sys, os, itertools
sys.path.append(os.path.normpath(os.path.abspath(__file__) + '/../..'))
if __name__ == '__main__':
# parse command line arguments
parser = argparse.ArgumentParser(description='correct quality line length')
parser.add_argument('fastq', help='input barcode fastq')
parser.add_argument('-z', '--fill_char', default='F', help='fill character (default: F)')
parser.add_argument('-o', '--output', default=sys.stdout, type=argparse.FileType('w'), help='output fastq (default: stdout)')
args = parser.parse_args()
with open(args.fastq) as f:
for four_lines in itertools.izip(*[iter(f)]*4):
at_line, seq_line, plus_line, quality_line = [l.rstrip() for l in four_lines]
ls = len(seq_line)
lq = len(quality_line)
if lq < ls:
quality_line = quality_line.ljust(len(seq_line), args.fill_char)
elif lq > ls:
quality_line = quality_line[0: ls]
args.output.write("\n".join([at_line, seq_line, plus_line, quality_line]) + "\n") |
mattesno1/CouchPotatoServer | refs/heads/master | libs/pyutil/test/current/json_tests/test_float.py | 106 | import math
from unittest import TestCase
from pyutil import jsonutil as json
class TestFloat(TestCase):
def test_floats(self):
for num in [1617161771.7650001, math.pi, math.pi**100, math.pi**-100]:
self.assertEquals(float(json.dumps(num)), num)
|
MusculoskeletalAtlasProject/mapclient-src | refs/heads/develop | mapclient/tools/pmr/core.py | 3 | import logging
try:
from urllib import quote_plus
except:
from urllib.parse import quote_plus
from requests_oauthlib.oauth1_session import OAuth1Session
logger = logging.getLogger(__name__)
DEFAULT_SITE_URL = 'https://models.physiomeproject.org'
TEACHING_SITE_URL = 'https://teaching.physiomeproject.org'
DEFAULT_SCOPE = (
'{0}/pmr/scope/collection,'
'{0}/pmr/scope/search,'
'{0}/pmr/scope/workspace_tempauth,'
'{0}/pmr/scope/workspace_full'
).format(DEFAULT_SITE_URL,)
class TokenHelper(object):
request_token_endpoint = 'OAuthRequestToken'
authorize_token_endpoint = 'OAuthAuthorizeToken'
access_token_endpoint = 'OAuthGetAccessToken'
oauth_session_cls = OAuth1Session
def __init__(self, client_key, client_secret,
request_token=None,
request_secret=None,
callback_url='oob',
scope=DEFAULT_SCOPE,
site_url=DEFAULT_SITE_URL,
):
self.client_key = client_key
self.client_secret = client_secret
self.scope = scope
self.site_url = site_url
self.callback_url = callback_url
self.request_token = request_token
self.request_secret = request_secret
self.verifier = None
def get_temporary_credentials(self):
target = '%(site_url)s/%(endpoint)s?scope=%(scope)s' % {
'site_url': self.site_url,
'endpoint': self.request_token_endpoint,
'scope': quote_plus(self.scope),
}
oauth = self.oauth_session_cls(
client_key=self.client_key,
client_secret=self.client_secret,
callback_uri=self.callback_url,
)
logger.debug('Requesting temporary credentials from %s', target)
result = oauth.fetch_request_token(target)
self.request_token = result.get('oauth_token')
self.request_secret = result.get('oauth_token_secret')
return self.request_token, self.request_secret
def get_authorize_url(self):
if not self.request_token:
raise ValueError('no temporary credentials available')
target = '%(site_url)s/%(endpoint)s?oauth_token=%(token)s' % {
'site_url': self.site_url,
'endpoint': self.authorize_token_endpoint,
'token': self.request_token,
}
return target
def set_verifier(self, verifier):
self.verifier = verifier
def get_token_credentials(self):
if (not self.request_token or not self.request_secret or
not self.verifier):
raise ValueError('no temporary credentials available')
target = '%(site_url)s/%(endpoint)s' % {
'site_url': self.site_url,
'endpoint': self.access_token_endpoint,
}
logger.debug('Requesting token credentials from %s', target)
oauth = self.oauth_session_cls(
client_key=self.client_key,
client_secret=self.client_secret,
resource_owner_key=self.request_token,
resource_owner_secret=self.request_secret,
verifier=self.verifier,
)
token = oauth.fetch_access_token(target)
return token
|
abomyi/django | refs/heads/master | tests/validation/__init__.py | 500 | from django.core.exceptions import ValidationError
from django.test import TestCase
class ValidationTestCase(TestCase):
def assertFailsValidation(self, clean, failed_fields, **kwargs):
with self.assertRaises(ValidationError) as cm:
clean(**kwargs)
self.assertEqual(sorted(failed_fields), sorted(cm.exception.message_dict))
def assertFieldFailsValidationWithMessage(self, clean, field_name, message):
with self.assertRaises(ValidationError) as cm:
clean()
self.assertIn(field_name, cm.exception.message_dict)
self.assertEqual(message, cm.exception.message_dict[field_name])
|
OneOneFour/ICSP_Monte_Carlo | refs/heads/master | sgc/sgc/locals.py | 1 | # Copyright 2010-2012 the SGC project developers.
# See the LICENSE file at the top-level directory of this distribution
# and at http://program.sambull.org/sgc/license.html.
"""
Imports useful objects into the local namespace.
Constants:
GUI: Event type for any event emitted by this toolkit.
"""
import types
from .widgets._locals import GUI
import collections
class EventSlot(object):
"""
Event slots object. Allows dynamic allocation of events.
"""
__slots__ = ("_funcs",)
def __init__(self, widget, event, funcs=()):
"""
Args:
widget: The widget you want to bind this event slot to.
event: ``str`` The attribute you want to bind to (e.g. 'on_click').
funcs: A sequence of functions you want to include by default.
"""
assert event.startswith("on_") and hasattr(widget, event), \
"%r is not a valid event for %s" % (event, widget.__class__)
self._funcs = list(funcs)
setattr(widget, event, types.MethodType(self, widget, widget.__class__))
def __call__(self, widget):
"""Callback all registered functions for this event."""
for f in self._funcs:
f(widget)
def add(self, func):
"""
Add additional functions to be called.
Args:
func: A function or sequence of functions to be added.
"""
if isinstance(func, collections.Callable):
self._funcs.append(func)
else:
self._funcs.extend(func)
def remove(self, func):
"""
Remove functions from the existing set of functions.
Args:
func: A function or sequence of functions to be removed.
"""
try:
self._funcs.remove(func)
except ValueError:
for f in func:
self._funcs.remove(f)
|
mrnamingo/enigma2-test | refs/heads/master | lib/python/Plugins/Extensions/Infopanel/SoftcamPanel.py | 2 | from Components.config import config, ConfigSubsection, ConfigText, configfile, getConfigListEntry, ConfigSelection
from Components.ConfigList import ConfigListScreen
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaBlend
from Components.MenuList import MenuList
from Components.Label import Label
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.Sources.List import List
from Components.Sources.StaticText import StaticText
from Tools.LoadPixmap import LoadPixmap
from Tools.Directories import resolveFilename, SCOPE_CURRENT_PLUGIN, SCOPE_CURRENT_SKIN, fileExists
from Screens.MessageBox import MessageBox
from Screens.Console import Console
from enigma import *
import os
from Screens.CCcamInfo import CCcamInfoMain
from Screens.OScamInfo import OscamInfoMenu
def Check_Softcam():
found = False
if fileExists("/etc/enigma2/noemu"):
found = False
else:
for x in os.listdir('/etc'):
if x.find('.emu') > -1:
found = True
break;
return found
def command(comandline, strip=1):
comandline = comandline + " >/tmp/command.txt"
os.system(comandline)
text = ""
if os.path.exists("/tmp/command.txt") is True:
file = open("/tmp/command.txt", "r")
if strip == 1:
for line in file:
text = text + line.strip() + '\n'
else:
for line in file:
text = text + line
if text[-1:] != '\n': text = text + "\n"
file.close()
# if one or last line then remove linefeed
if text[-1:] == '\n': text = text[:-1]
comandline = text
os.system("rm /tmp/command.txt")
return comandline
#class EMUlist(MenuList):
# def __init__(self, list=[], enableWrapAround = False):
# MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
# Schriftart = 22
# self.l.setFont(0, gFont("Regular", Schriftart))
# self.l.setItemHeight(24)
# def moveSelection(self,idx=0):
# if self.instance is not None:
# self.instance.moveSelectionTo(idx)
SOFTCAM_SKIN = """<screen name="SoftcamPanel" position="center,center" size="500,450" title="Softcam Panel">
<eLabel font="Regular;22" position="10,10" size="185,25" text="Softcam Selection:" />
<widget font="Regular;18" name="camcount" position="420,10" size="60,25" />
<widget name="config" position="10,100" size="400,100" />
<eLabel backgroundColor="red" position="10,60" size="120,3" zPosition="0" />
<eLabel backgroundColor="green" position="130,60" size="120,3" zPosition="0" />
<eLabel backgroundColor="yellow" position="250,60" size="120,3" zPosition="0" />
<eLabel backgroundColor="blue" position="370,60" size="120,3" zPosition="0" />
<widget font="Regular;16" halign="center" name="key_red" position="10,62" size="120,35" transparent="1" valign="center" zPosition="2" />
<widget font="Regular;16" halign="center" name="key_green" position="130,62" size="120,35" transparent="1" valign="center" zPosition="2" />
<widget font="Regular;16" halign="center" name="key_yellow" position="250,62" size="120,35" transparent="1" valign="center" zPosition="2" />
<widget font="Regular;16" halign="center" name="key_blue" position="370,62" size="120,35" transparent="1" valign="center" zPosition="2" />
<eLabel backgroundColor="#56C856" position="0,199" size="500,1" zPosition="0" />
<widget font="Regular;16" name="actifcam" position="10,205" size="220,32" />
<widget font="Regular;16" name="actifcam2" position="250,205" size="220,32" />
<eLabel backgroundColor="#56C856" position="0,225" size="500,1" zPosition="0" />
<widget font="Regular;16" name="ecminfo" position="10,235" size="480,300" />
</screen>"""
REFRESH = 0
CCCAMINFO = 1
OSCAMINFO = 2
class SoftcamPanel(ConfigListScreen, Screen):
def __init__(self, session):
global emuDir
emuDir = "/etc/"
self.service = None
Screen.__init__(self, session)
self.skin = SOFTCAM_SKIN
self.onShown.append(self.setWindowTitle)
self.partyfeed = None
self.YellowAction = REFRESH
self.mlist = []
self["key_green"] = Label(_("Restart"))
self["key_red"] = Label(_("Stop"))
self["key_yellow"] = Label(_("Refresh"))
self.partyfeed = os.path.exists("/etc/opkg/3rdparty-feed.conf") or os.path.exists("/etc/opkg/3rd-party-feed.conf")
if self.partyfeed:
self["key_blue"]= Label(_("Install"))
else:
self["key_blue"]= Label(_("Exit"))
self["ecminfo"] = Label(_("No ECM info"))
self["actifcam"] = Label(_("no CAM 1 active"))
self["actifcam2"] = Label(_("no CAM 2 active"))
#// create listings
self.emuDirlist = []
self.emuList = []
self.emuBin = []
self.emuStart = []
self.emuStop = []
self.emuRgui = []
self.emuDirlist = os.listdir(emuDir)
self.ecmtel = 0
self.first = 0
global count
count = 0
#// check emu dir for config files
print "************ go in the emuloop ************"
for x in self.emuDirlist:
#// if file contains the string "emu" (then this is a emu config file)
if x.find("emu") > -1:
self.emuList.append(emuDir + x)
em = open(emuDir + x)
self.emuRgui.append(0)
#// read the emu config file
for line in em.readlines():
line1 = line
#// startcam
line = line1
if line.find("startcam") > -1:
line = line.split("=")
self.emuStart.append(line[1].strip())
#// stopcam
line = line1
if line.find("stopcam") > -1:
line = line.split("=")
self.emuStop.append(line[1].strip())
#// Restart GUI
line = line1
if line.find("restartgui") > -1:
self.emuRgui[count] = 1
#// binname
line = line1
if line.find("binname") > -1:
line = line.split("=")
self.emuBin.append(line[1].strip())
em.close()
count += 1
self.maxcount = count
self.onChangedEntry = [ ]
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session, on_change = self.changedEntry)
self.ReadMenu()
self.createSetup()
self["ecminfo"].show()
self.read_shareinfo()
self.Timer = eTimer()
self.Timer.callback.append(self.layoutFinished)
self.Timer.start(2000, True)
#// get the remote buttons
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ColorActions"],
{
"cancel": self.Exit,
"ok": self.ok,
"blue": self.Blue,
"red": self.Red,
"green": self.Green,
"yellow": self.Yellow,
}, -1)
#// update screen
self.onLayoutFinish.append(self.layoutFinished)
def setWindowTitle(self):
self.setTitle(_("Softcam Panel V2.0"))
def ReadMenu(self):
self.whichCam()
for x in self.emuDirlist:
#// if file contains the string "emu" (then this is a emu config file)
if x.find("emu") > -1:
self.emuList.append(emuDir + x)
em = open(emuDir + x)
self.emuRgui.append(0)
#// read the emu config file
for line in em.readlines():
line1 = line
#// emuname
line = line1
if line.find("emuname") > -1:
line = line.split("=")
self.mlist.append(line[1].strip())
name = line[1].strip()
em.close()
emusel = [_('no cam')]
for x in self.mlist:
emusel.append(x)
self.cam1sel = ConfigSelection(emusel)
self.cam2sel = ConfigSelection(emusel)
self.setYellowKey(self.curcam)
def whichCam(self):
#// check for active cam 1
cam = config.softcam.actCam.value
self.curcam = None
self.curcamIndex = None
if cam in self.mlist:
index = self.mlist.index(cam)
x = self.emuBin[index]
if self.isCamrunning(x):
self.curcam = x
self.curcamIndex = index
#// check for active cam 2
cam = config.softcam.actCam2.value
self.curcam2 = None
self.curcam2Index = None
if cam in self.mlist:
index = self.mlist.index(cam)
x = self.emuBin[index]
if self.isCamrunning(x):
self.curcam2 = x
self.curcam2Index = index
if not self.curcam and not self.curcam2 and self.mlist:
print "[SOFTCAMPANEL] try to find a running cam"
for cam in self.emuBin:
index = self.emuBin.index(cam)
if self.isCamrunning(cam):
self.curcam = cam
self.curcamIndex = index
camname = self.mlist[index]
print"[SOFTCAMPANEL] found %s running" % camname
self.Save_Settings(camname)
break
def createSetup(self):
self.editListEntry = None
self.list = []
self.list.append(getConfigListEntry(_("Select Cam 1"), self.cam1sel))
if len(self.emuStart) > 1:
self["actifcam2"].show()
if self.cam1sel.value != _('no cam') or config.softcam.actCam.value != _("no CAM 1 active"):
self.list.append(getConfigListEntry(_("Select Cam 2"), self.cam2sel))
if self.cam2sel.value != _('no cam'):
self.list.append(getConfigListEntry(_("Wait time before start Cam 2"), config.softcam.waittime))
else:
self["actifcam2"].hide()
self.cam2sel.setValue(_('no cam'))
self["config"].list = self.list
self["config"].setList(self.list)
def setYellowKey(self, cam):
if cam == None or cam == _('no cam'):
self.YellowAction = REFRESH
self["key_yellow"].setText(_("Refresh"))
return
if cam.upper().startswith('CCCAM'):
self.YellowAction = CCCAMINFO
self["key_yellow"].setText(_("CCcamInfo"))
elif cam.upper().startswith('OSCAM'):
self.YellowAction = OSCAMINFO
self["key_yellow"].setText(_("OscamInfo"))
else:
self.YellowAction = REFRESH
self["key_yellow"].setText(_("Refresh"))
def selectionChanged(self):
#self["status"].setText(self["config"].getCurrent()[0])
pass
def changedEntry(self):
for x in self.onChangedEntry:
x()
self.selectionChanged()
self.createSetup()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def getCurrentDescription(self):
return self["config"].getCurrent() and len(self["config"].getCurrent()) > 2 and self["config"].getCurrent()[2] or ""
def layoutFinished(self):
self.Timer.stop()
if not Check_Softcam():
self.Exit()
#// check for active cam
try:
self.whichCam()
global oldcamIndex, oldcam2Index
oldcamIndex = -1
oldcam2Index = -1
tel = 0
if self.curcam:
oldcamIndex = self.curcamIndex
actcam = self.mlist[oldcamIndex]
if self.first == 0:
self.cam1sel.setValue(actcam)
self["key_green"].setText(_("Restart"))
self["actifcam"].setText(_("active CAM 1: ") + actcam )
print '[SOFTCAM] set active cam 1 to: ' + actcam
else:
actcam = _("no CAM 1 active")
self["actifcam"].setText(actcam)
if self.curcam2:
oldcam2Index = self.curcam2Index
actcam = self.mlist[oldcam2Index]
if self.first == 0:
self.cam2sel.setValue(actcam)
self["actifcam2"].setText(_("active CAM 2: ") + actcam )
print '[SOFTCAM] set active cam 2 to: ' + actcam
else:
actcam2 = _("no CAM 2 active")
self["actifcam2"].setText(actcam2)
if self.first == 0: # Only update first time or when refresh button was pressed
self.createSetup()
self.first = 1
#// CAM IS NOT RUNNING
if not self.curcam and not self.curcam2:
self["key_green"].setText(_("Start"))
self.YellowAction = REFRESH
self["key_yellow"].setText(_("Refresh"))
if os.path.exists('/tmp/ecm.info') is True:
os.system("rm /tmp/ecm.info")
if os.path.exists('/tmp/ecm0.info') is True:
os.system("rm /tmp/ecm0.info")
except:
pass
if self["config"].getCurrent()[0] == _("Select Cam 1"):
self.setYellowKey(self.curcam)
else:
self.setYellowKey(self.curcam2)
#// read ecm.info
ecmi = ""
if os.path.exists('/tmp/ecm.info') is True:
ecmi = self.read_ecm('/tmp/ecm.info')
elif os.path.exists('/tmp/ecm1.info') is True:
ecmi = self.read_ecm('/tmp/ecm1.info')
else:
ecmi = _("No ECM info")
ecmold = self["ecminfo"].getText()
if ecmold == ecmi:
self.ecmtel += 1
if self.ecmtel > 5:
ecmi = _("No new ECM info")
else:
self.ecmtel = 0
self["ecminfo"].setText(ecmi)
self.Timer.start(2000, True) #reset timer
def read_shareinfo(self):
#// read share.info and put in list
self.shareinfo =[]
if os.path.exists('/tmp/share.info') is True:
s = open('/tmp/share.info')
for x in s.readlines():
self.shareinfo.append(x)
s.close()
def read_ecm(self, ecmpath):
#// read ecm.info and check for share.info
ecmi2 = ''
Caid = ''
Prov = ''
f = open(ecmpath)
for line in f.readlines():
line= line.replace('=', '')
line= line.replace(' ', '', 1)
#// search CaID
if line.find('ECM on CaID') > -1:
k = line.find('ECM on CaID') + 14
Caid = line[k:k+4]
#// search Boxid
if line.find('prov:') > -1:
tmpprov = line.split(':')
Prov = tmpprov[1].strip()
#// search peer in share.info only if share.info exists
if Caid <> '' and Prov <> '' and len(self.shareinfo) > 0 :
for x in self.shareinfo:
cel = x.split(' ')
#// search Boxid and Caid
if cel[5][0:4] == Caid and cel[9][3:7] == Prov:
line = 'Peer: ' + Prov + ' - ' + cel[3] + ' - ' + cel[8] + '\n'
break
ecmi2 = ecmi2 + line
f.close()
return ecmi2
def Red(self):
#// Stopping the CAM when pressing the RED button
self.Timer.stop()
self.Stopcam()
self.Timer.start(2000, True) #reset timer
def Yellow(self):
if self.YellowAction == CCCAMINFO:
self.Timer.stop()
self.session.openWithCallback(self.ShowSoftcamCallback, CCcamInfoMain)
elif self.YellowAction == OSCAMINFO:
self.Timer.stop()
self.session.openWithCallback(self.ShowSoftcamCallback, OscamInfoMenu)
else:
self.first = 0
self.layoutFinished()
def Green(self):
#// Start the CAM when pressing the GREEN button
self.Timer.stop()
self.Startcam()
self.Timer.start(2000, True) #reset timer
def Exit(self):
self.Timer.stop()
self.close()
def Blue(self):
if not self.partyfeed:
self.Exit()
else:
self.Timer.stop()
self.session.openWithCallback(self.ShowSoftcamCallback, ShowSoftcamPackages)
def ShowSoftcamCallback(self):
self.Timer.start(2000, True)
def ok(self):
#// Exit Softcam when pressing the OK button
self.Timer.stop()
self.Startcam()
if self.YellowAction == REFRESH:
self.Yellow()
self.Timer.start(2000, True) #reset timer
def Stopcam(self):
#// Stopping the CAM
global oldcamIndex, oldcam2Index
if oldcamIndex >= 0:
oldcam = self.emuBin[oldcamIndex]
else:
oldcam = None
if oldcam2Index >= 0:
oldcam2 = self.emuBin[oldcam2Index]
else:
oldcam2 = None
import time
self.container = eConsoleAppContainer()
if config.softcam.camstartMode.value == "0" or not fileExists('/etc/init.d/softcam'):
if oldcam:
print '[SOFTCAM] Python stop cam 1: ' + oldcam
self.container.execute(self.emuStop[oldcamIndex])
time.sleep(1) # was 5sec
t = 0
while t < 5:
p = command('pidof %s |wc -w' % oldcam )
if not p.isdigit(): p=0
if int(p) > 0:
self.container = eConsoleAppContainer()
self.container.execute('killall -9 ' + oldcam)
t += 1
time.sleep(1)
else:
t = 5
if oldcam2:
print '[SOFTCAM] Python stop cam 2: ' + oldcam2
self.container.execute(self.emuStop[oldcam2Index])
time.sleep(1) # was 5sec
t = 0
while t < 5:
p = command('pidof %s |wc -w' % oldcam2 )
if not p.isdigit(): p=0
if int(p) > 0:
self.container = eConsoleAppContainer()
self.container.execute('killall -9 ' + oldcam2)
t += 1
time.sleep(1)
else:
t = 5
else:
self.container.execute('/etc/init.d/softcam.cam1 stop')
self.container.execute('/etc/init.d/softcam.cam2 stop')
if os.path.exists('/tmp/ecm.info') is True:
os.system("rm /tmp/ecm.info")
actcam = _("no CAM 1 active")
actcam2 = _("no CAM 2 active")
self["actifcam"].setText(actcam)
self["actifcam2"].setText(actcam2)
self["key_green"].setText(_("Start"))
self["ecminfo"].setText(_("No ECM info"))
self.Save_Settings(actcam)
self.Save_Settings2(actcam2)
def Startcam(self):
#// Starting the CAM
try:
if count > 0:
if self.cam1sel.value == self.cam2sel.value:
self.session.openWithCallback(self.doNothing, MessageBox, _("No Cam started !!\n\nCam 1 must be different from Cam 2"), MessageBox.TYPE_ERROR, simple=True)
return
if config.softcam.camstartMode.value == "0":
self.Stopcam()
self.camIndex = self.cam1sel.getIndex() -1
self.cam2Index = self.cam2sel.getIndex() - 1
if self.camIndex >= 0:
actcam = self.cam1sel.value
self["actifcam"].setText(_("active CAM 1: ") + actcam)
self.Save_Settings(actcam)
start = self.emuStart[self.camIndex]
if self.checkBinName(self.emuBin[self.camIndex], start):
self.session.openWithCallback(self.startcam2, MessageBox, actcam + _(" Not Started !!\n\nCam binname must be in the start command line\nCheck your emu config file"), MessageBox.TYPE_ERROR, simple=True)
return
if config.softcam.camstartMode.value == "0":
print '[SOFTCAM] Python start cam 1: ' + actcam
self.session.openWithCallback(self.waitTime, MessageBox, _("Starting Cam 1: ") + actcam, MessageBox.TYPE_WARNING, timeout=5, simple=True)
self.container = eConsoleAppContainer()
self.container.execute(start)
else:
# Create INIT.D start
self.session.openWithCallback(self.doNothing, MessageBox, _("Creating start scripts and starting the cam"), MessageBox.TYPE_WARNING, timeout=10, simple=True)
self.Save_Settings2(self.cam2sel.value)
camname1 = self.emuBin[self.camIndex]
camname2 = self.emuBin[self.cam2Index]
self.deleteInit()
camname = "/usr/bin/" + camname1
startcmd = self.emuStart[self.camIndex]
stopcmd = self.emuStop[self.camIndex]
self.createInitdscript("cam1", camname, startcmd, stopcmd)
if self.cam2Index >= 0:
camname = "/usr/bin/" + camname2
startcmd = self.emuStart[self.cam2Index]
stopcmd = self.emuStop[self.cam2Index]
self.createInitdscript("cam2", camname, startcmd, stopcmd, config.softcam.waittime.value)
self["key_green"].setText(_("Restart"))
except:
pass
def waitTime(self, ret):
if self.cam2Index >= 0:
if config.softcam.waittime.value == '0':
self.startcam2(None)
else:
self.session.openWithCallback(self.startcam2, MessageBox, _("Waiting..."), MessageBox.TYPE_WARNING, timeout=int(config.softcam.waittime.value), simple=True)
def doNothing(self, ret):
pass
def startcam2(self, ret):
camIndex = self.cam2Index
if camIndex >= 0:
actcam = self.cam2sel.value
self["actifcam2"].setText(_("active CAM 2: ") + actcam)
self.Save_Settings2(actcam)
start = self.emuStart[camIndex]
if self.checkBinName(self.emuBin[self.cam2Index], start):
self.session.open(MessageBox, actcam + _(" Not Started !!\n\nCam binname must be in the start command line\nCheck your emu config file"), MessageBox.TYPE_ERROR, simple=True)
return
print '[SOFTCAM] Python start cam 2: ' + actcam
self.session.open(MessageBox, _("Starting Cam 2: ") + actcam, MessageBox.TYPE_WARNING, timeout=5, simple=True)
self.container = eConsoleAppContainer()
self.container.execute(start)
def Save_Settings(self, cam_name):
#// Save Came Name to Settings file
config.softcam.actCam.setValue(cam_name)
config.softcam.save()
configfile.save()
def Save_Settings2(self, cam_name):
#// Save Came Name to Settings file
config.softcam.actCam2.setValue(cam_name)
config.softcam.save()
configfile.save()
def isCamrunning(self, cam):
p = command('pidof ' + cam + ' |wc -w')
if not p.isdigit(): p=0
if int(p) > 0:
return True
else:
return False
def checkBinName(self, binname, start):
print "[CHECKBINNAME] bin=%s ,start=%s" %(binname,start)
if start.find(binname + ' ') > -1:
print "[CHECKBINNAME] OK"
return False
else:
if start[start.rfind('/')+1:] == binname:
print "[CHECKBINNAME] OK"
return False
else:
print "[CHECKBINNAME] ERROR"
return True
def createInitdscript(self, camname, emubin, start, stop, wait=None):
Adir = "/etc/init.d/softcam." + camname
softcamfile = []
softcamfile.append('#!/bin/sh')
softcamfile.append('DAEMON=%s' % emubin)
softcamfile.append('STARTCAM="%s"' % start)
softcamfile.append('STOPCAM="%s"' % stop)
softcamfile.append('DESC="Softcam"')
softcamfile.append('')
softcamfile.append('test -f $DAEMON || exit 0')
softcamfile.append('set -e')
softcamfile.append('')
softcamfile.append('case "$1" in')
softcamfile.append(' start)')
softcamfile.append(' echo -n "starting $DESC: $DAEMON... "')
if wait:
softcamfile.append(' sleep ' + wait)
softcamfile.append(' $STARTCAM')
softcamfile.append(' echo "done."')
softcamfile.append(' ;;')
softcamfile.append(' stop)')
softcamfile.append(' echo -n "stopping $DESC: $DAEMON... "')
softcamfile.append(' $STOPCAM')
softcamfile.append(' echo "done."')
softcamfile.append(' ;;')
softcamfile.append(' restart)')
softcamfile.append(' echo "restarting $DESC: $DAEMON... "')
softcamfile.append(' $0 stop')
softcamfile.append(' echo "wait..."')
softcamfile.append(' sleep 5')
softcamfile.append(' $0 start')
softcamfile.append(' echo "done."')
softcamfile.append(' ;;')
softcamfile.append(' *)')
softcamfile.append(' echo "Usage: $0 {start|stop|restart}"')
softcamfile.append(' exit 1')
softcamfile.append(' ;;')
softcamfile.append('esac')
softcamfile.append('')
softcamfile.append('exit 0')
f = open( Adir, "w" )
for x in softcamfile:
f.writelines(x + '\n')
f.close()
self.container = eConsoleAppContainer()
# Set execute rights
os.chmod(Adir,0755)
# Create symbolic link for startup
if not os.path.exists("/etc/rc2.d/S20softcam." + camname):
self.container.execute('update-rc.d -f softcam.' + camname + ' defaults')
# Wait a few seconds
import time
time.sleep (3)
# Start cam
if self.isCamrunning(emubin):
self.container.execute('/etc/init.d/softcam.' + camname + ' restart')
else:
self.container.execute('/etc/init.d/softcam.' + camname + ' start')
def deleteInit(self):
if os.path.exists("/etc/rc2.d/S20softcam.cam1"):
print "Delete Symbolink link"
self.container = eConsoleAppContainer()
self.container.execute('update-rc.d -f softcam.cam1 defaults')
if os.path.exists("/etc/init.d/softcam.cam1"):
print "Delete softcam init script cam1"
os.system("rm /etc/init.d/softcam.cam1")
if os.path.exists("/etc/rc2.d/S20softcam.cam2"):
print "Delete Symbolink link"
self.container = eConsoleAppContainer()
self.container.execute('update-rc.d -f softcam.cam2 defaults')
if os.path.exists("/etc/init.d/softcam.cam2"):
print "Delete softcam init script cam2"
os.system("rm /etc/init.d/softcam.cam2")
class ShowSoftcamPackages(Screen):
skin = """
<screen name="ShowSoftcamPackages" position="center,center" size="630,500" title="Install Softcams" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_ok" render="Label" position="240,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="620,420" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (5, 1), size = (540, 28), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (5, 26), size = (540, 20), font=1, flags = RT_HALIGN_LEFT, text = 2), # index 2 is the description
MultiContentEntryPixmapAlphaBlend(pos = (545, 2), size = (48, 48), png = 4), # index 4 is the status pixmap
MultiContentEntryPixmapAlphaBlend(pos = (5, 50), size = (510, 2), png = 5), # index 4 is the div pixmap
],
"fonts": [gFont("Regular", 22),gFont("Regular", 14)],
"itemHeight": 52
}
</convert>
</widget>
</screen>"""
def __init__(self, session, args = None):
Screen.__init__(self, session)
self.session = session
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ColorActions"],
{
"red": self.exit,
"ok": self.go,
"cancel": self.exit,
"green": self.startupdateList,
}, -1)
self.list = []
self.statuslist = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Reload"))
self["key_ok"] = StaticText(_("Install"))
self.oktext = _("\nPress OK on your remote control to continue.")
self.onShown.append(self.setWindowTitle)
self.setStatus('list')
self.Timer1 = eTimer()
self.Timer1.callback.append(self.rebuildList)
self.Timer1.start(1000, True)
self.Timer2 = eTimer()
self.Timer2.callback.append(self.updateList)
def go(self, returnValue = None):
cur = self["list"].getCurrent()
if cur:
status = cur[3]
self.package = cur[2]
if status == "installable":
self.session.openWithCallback(self.runInstall, MessageBox, _("Do you want to install the package:\n") + self.package + "\n" + self.oktext)
def runInstall(self, result):
if result:
self.session.openWithCallback(self.runInstallCont, Console, cmdlist = ['opkg install ' + self.package], closeOnSuccess = True)
def runInstallCont(self):
ret = command('opkg list-installed | grep ' + self.package + ' | cut -d " " -f1')
if ret != self.package:
self.session.open(MessageBox, _("Install Failed !!"), MessageBox.TYPE_ERROR, timeout = 10)
else:
self.session.open(MessageBox, _("Install Finished."), MessageBox.TYPE_INFO, timeout = 10)
self.setStatus('list')
self.Timer1.start(1000, True)
def UpgradeReboot(self, result):
if result is None:
return
def exit(self):
self.close()
def setWindowTitle(self):
self.setTitle(_("Install Softcams"))
def setStatus(self,status = None):
if status:
self.statuslist = []
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
if status == 'update':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "Extensions/Infopanel/icons/upgrade.png"))
self.statuslist.append(( _("Package list update"), '', _("Trying to download a new updatelist. Please wait..." ),'', statuspng, divpng ))
self['list'].setList(self.statuslist)
if status == 'list':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "Extensions/Infopanel/icons/upgrade.png"))
self.statuslist.append(( _("Package list"), '', _("Getting Softcam list. Please wait..." ),'', statuspng, divpng ))
self['list'].setList(self.statuslist)
elif status == 'error':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "Extensions/Infopanel/icons/remove.png"))
self.statuslist.append(( _("Error"), '', _("There was an error downloading the updatelist. Please try again." ),'', statuspng, divpng ))
self['list'].setList(self.statuslist)
def startupdateList(self):
self.setStatus('update')
self.Timer2.start(1000, True)
def updateList(self):
self.container = eConsoleAppContainer()
self.container.appClosed.append(self.doneupdateList)
self.setStatus('list')
self.container.execute('opkg update')
def doneupdateList(self, answer):
self.container.appClosed.remove(self.doneupdateList)
self.Timer1.start(1000, True)
def rebuildList(self):
self.list = []
self.Flist = []
self.Elist = []
t = command('opkg list | grep "enigma2-plugin-softcams-"')
self.Flist = t.split('\n')
tt = command('opkg list-installed | grep "enigma2-plugin-softcams-"')
self.Elist = tt.split('\n')
if len(self.Flist) > 0:
self.buildPacketList()
else:
self.setStatus('error')
def buildEntryComponent(self, name, version, description, state):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
if not description:
description = ""
installedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "Extensions/Infopanel/icons/installed.png"))
return((name, version, _(description), state, installedpng, divpng))
def buildPacketList(self):
fetchedList = self.Flist
excludeList = self.Elist
if len(fetchedList) > 0:
for x in fetchedList:
x_installed = False
Fx = x.split(' - ')
try:
if Fx[0].find('-softcams-') > -1:
for exc in excludeList:
Ex = exc.split(' - ')
if Fx[0] == Ex[0]:
x_installed = True
break
if x_installed == False:
self.list.append(self.buildEntryComponent(Fx[2], Fx[1], Fx[0], "installable"))
except:
pass
self['list'].setList(self.list)
else:
self.setStatus('error') |
garwedgess/android_kernel_lge_g4 | refs/heads/M | tools/perf/tests/attr.py | 3174 | #! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
|
MoritzS/django | refs/heads/master | django/db/migrations/loader.py | 8 | import os
import sys
from importlib import import_module, reload
from django.apps import apps
from django.conf import settings
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.recorder import MigrationRecorder
from .exceptions import (
AmbiguityError, BadMigrationError, InconsistentMigrationHistory,
NodeNotFoundError,
)
MIGRATIONS_MODULE_NAME = 'migrations'
class MigrationLoader:
"""
Loads migration files from disk, and their status from the database.
Migration files are expected to live in the "migrations" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialization, this class will scan those directories, and open and
read the python files, looking for a class called Migration, which should
inherit from django.db.migrations.Migration. See
django.db.migrations.migration for what that looks like.
Some migrations will be marked as "replacing" another set of migrations.
These are loaded into a separate set of migrations away from the main ones.
If all the migrations they replace are either unapplied or missing from
disk, then they are injected into the main set, replacing the named migrations.
Any dependency pointers to the replaced migrations are re-pointed to the
new migration.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(self, connection, load=True, ignore_no_migrations=False):
self.connection = connection
self.disk_migrations = None
self.applied_migrations = None
self.ignore_no_migrations = ignore_no_migrations
if load:
self.build_graph()
@classmethod
def migrations_module(cls, app_label):
"""
Return the path to the migrations module for the specified app_label
and a boolean indicating if the module is specified in
settings.MIGRATION_MODULE.
"""
if app_label in settings.MIGRATION_MODULES:
return settings.MIGRATION_MODULES[app_label], True
else:
app_package_name = apps.get_app_config(app_label).name
return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME), False
def load_disk(self):
"""
Loads the migrations from all INSTALLED_APPS from disk.
"""
self.disk_migrations = {}
self.unmigrated_apps = set()
self.migrated_apps = set()
for app_config in apps.get_app_configs():
# Get the migrations module directory
module_name, explicit = self.migrations_module(app_config.label)
if module_name is None:
self.unmigrated_apps.add(app_config.label)
continue
was_loaded = module_name in sys.modules
try:
module = import_module(module_name)
except ImportError as e:
# I hate doing this, but I don't want to squash other import errors.
# Might be better to try a directory check directly.
if ((explicit and self.ignore_no_migrations) or (
not explicit and "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e))):
self.unmigrated_apps.add(app_config.label)
continue
raise
else:
# PY3 will happily import empty dirs as namespaces.
if not hasattr(module, '__file__'):
self.unmigrated_apps.add(app_config.label)
continue
# Module is not a package (e.g. migrations.py).
if not hasattr(module, '__path__'):
self.unmigrated_apps.add(app_config.label)
continue
# Force a reload if it's already loaded (tests need this)
if was_loaded:
reload(module)
self.migrated_apps.add(app_config.label)
directory = os.path.dirname(module.__file__)
# Scan for .py files
migration_names = set()
for name in os.listdir(directory):
if name.endswith(".py"):
import_name = name.rsplit(".", 1)[0]
if import_name[0] not in "_.~":
migration_names.add(import_name)
# Load them
for migration_name in migration_names:
migration_module = import_module("%s.%s" % (module_name, migration_name))
if not hasattr(migration_module, "Migration"):
raise BadMigrationError(
"Migration %s in app %s has no Migration class" % (migration_name, app_config.label)
)
self.disk_migrations[app_config.label, migration_name] = migration_module.Migration(
migration_name,
app_config.label,
)
def get_migration(self, app_label, name_prefix):
"Gets the migration exactly named, or raises `graph.NodeNotFoundError`"
return self.graph.nodes[app_label, name_prefix]
def get_migration_by_prefix(self, app_label, name_prefix):
"Returns the migration(s) which match the given app label and name _prefix_"
# Do the search
results = []
for migration_app_label, migration_name in self.disk_migrations:
if migration_app_label == app_label and migration_name.startswith(name_prefix):
results.append((migration_app_label, migration_name))
if len(results) > 1:
raise AmbiguityError(
"There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix)
)
elif len(results) == 0:
raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix))
else:
return self.disk_migrations[results[0]]
def check_key(self, key, current_app):
if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph:
return key
# Special-case __first__, which means "the first migration" for
# migrated apps, and is ignored for unmigrated apps. It allows
# makemigrations to declare dependencies on apps before they even have
# migrations.
if key[0] == current_app:
# Ignore __first__ references to the same app (#22325)
return
if key[0] in self.unmigrated_apps:
# This app isn't migrated, but something depends on it.
# The models will get auto-added into the state, though
# so we're fine.
return
if key[0] in self.migrated_apps:
try:
if key[1] == "__first__":
return list(self.graph.root_nodes(key[0]))[0]
else: # "__latest__"
return list(self.graph.leaf_nodes(key[0]))[0]
except IndexError:
if self.ignore_no_migrations:
return None
else:
raise ValueError("Dependency on app with no migrations: %s" % key[0])
raise ValueError("Dependency on unknown app: %s" % key[0])
def add_internal_dependencies(self, key, migration):
"""
Internal dependencies need to be added first to ensure `__first__`
dependencies find the correct root node.
"""
for parent in migration.dependencies:
if parent[0] != key[0] or parent[1] == '__first__':
# Ignore __first__ references to the same app (#22325).
continue
self.graph.add_dependency(migration, key, parent, skip_validation=True)
def add_external_dependencies(self, key, migration):
for parent in migration.dependencies:
# Skip internal dependencies
if key[0] == parent[0]:
continue
parent = self.check_key(parent, key[0])
if parent is not None:
self.graph.add_dependency(migration, key, parent, skip_validation=True)
for child in migration.run_before:
child = self.check_key(child, key[0])
if child is not None:
self.graph.add_dependency(migration, child, key, skip_validation=True)
def build_graph(self):
"""
Builds a migration dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply migrations. This isn't
usually a problem as generally migration stuff runs in a one-shot process.
"""
# Load disk data
self.load_disk()
# Load database data
if self.connection is None:
self.applied_migrations = set()
else:
recorder = MigrationRecorder(self.connection)
self.applied_migrations = recorder.applied_migrations()
# To start, populate the migration graph with nodes for ALL migrations
# and their dependencies. Also make note of replacing migrations at this step.
self.graph = MigrationGraph()
self.replacements = {}
for key, migration in self.disk_migrations.items():
self.graph.add_node(key, migration)
# Internal (aka same-app) dependencies.
self.add_internal_dependencies(key, migration)
# Replacing migrations.
if migration.replaces:
self.replacements[key] = migration
# Add external dependencies now that the internal ones have been resolved.
for key, migration in self.disk_migrations.items():
self.add_external_dependencies(key, migration)
# Carry out replacements where possible.
for key, migration in self.replacements.items():
# Get applied status of each of this migration's replacement targets.
applied_statuses = [(target in self.applied_migrations) for target in migration.replaces]
# Ensure the replacing migration is only marked as applied if all of
# its replacement targets are.
if all(applied_statuses):
self.applied_migrations.add(key)
else:
self.applied_migrations.discard(key)
# A replacing migration can be used if either all or none of its
# replacement targets have been applied.
if all(applied_statuses) or (not any(applied_statuses)):
self.graph.remove_replaced_nodes(key, migration.replaces)
else:
# This replacing migration cannot be used because it is partially applied.
# Remove it from the graph and remap dependencies to it (#25945).
self.graph.remove_replacement_node(key, migration.replaces)
# Ensure the graph is consistent.
try:
self.graph.validate_consistency()
except NodeNotFoundError as exc:
# Check if the missing node could have been replaced by any squash
# migration but wasn't because the squash migration was partially
# applied before. In that case raise a more understandable exception
# (#23556).
# Get reverse replacements.
reverse_replacements = {}
for key, migration in self.replacements.items():
for replaced in migration.replaces:
reverse_replacements.setdefault(replaced, set()).add(key)
# Try to reraise exception with more detail.
if exc.node in reverse_replacements:
candidates = reverse_replacements.get(exc.node, set())
is_replaced = any(candidate in self.graph.nodes for candidate in candidates)
if not is_replaced:
tries = ', '.join('%s.%s' % c for c in candidates)
raise NodeNotFoundError(
"Migration {0} depends on nonexistent node ('{1}', '{2}'). "
"Django tried to replace migration {1}.{2} with any of [{3}] "
"but wasn't able to because some of the replaced migrations "
"are already applied.".format(
exc.origin, exc.node[0], exc.node[1], tries
),
exc.node
) from exc
raise exc
def check_consistent_history(self, connection):
"""
Raise InconsistentMigrationHistory if any applied migrations have
unapplied dependencies.
"""
recorder = MigrationRecorder(connection)
applied = recorder.applied_migrations()
for migration in applied:
# If the migration is unknown, skip it.
if migration not in self.graph.nodes:
continue
for parent in self.graph.node_map[migration].parents:
if parent not in applied:
# Skip unapplied squashed migrations that have all of their
# `replaces` applied.
if parent in self.replacements:
if all(m in applied for m in self.replacements[parent].replaces):
continue
raise InconsistentMigrationHistory(
"Migration {}.{} is applied before its dependency "
"{}.{} on database '{}'.".format(
migration[0], migration[1], parent[0], parent[1],
connection.alias,
)
)
def detect_conflicts(self):
"""
Looks through the loaded graph and detects any conflicts - apps
with more than one leaf migration. Returns a dict of the app labels
that conflict with the migration names that conflict.
"""
seen_apps = {}
conflicting_apps = set()
for app_label, migration_name in self.graph.leaf_nodes():
if app_label in seen_apps:
conflicting_apps.add(app_label)
seen_apps.setdefault(app_label, set()).add(migration_name)
return {app_label: seen_apps[app_label] for app_label in conflicting_apps}
def project_state(self, nodes=None, at_end=True):
"""
Returns a ProjectState object representing the most recent state
that the migrations we loaded represent.
See graph.make_state for the meaning of "nodes" and "at_end"
"""
return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps))
|
thSoft/lilypond-hu | refs/heads/translation | python/lilylib.py | 5 | # This file is part of LilyPond, the GNU music typesetter.
#
# Copyright (C) 1998--2015 Han-Wen Nienhuys <[email protected]>
# Jan Nieuwenhuizen <[email protected]>
#
# LilyPond is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LilyPond is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LilyPond. If not, see <http://www.gnu.org/licenses/>.
import __main__
import glob
import os
import re
import shutil
import sys
import optparse
import time
################################################################
# Users of python modules should include this snippet
# and customize variables below.
# Python 2.5 only accepts strings with proper Python internal encoding
# (i.e. ASCII or Unicode) when writing to stdout/stderr, so we must
# use ugettext iso gettext, and encode the string when writing to
# stdout/stderr
localedir = '@localedir@'
try:
import gettext
t = gettext.translation ('lilypond', localedir)
_ = t.ugettext
ungettext = t.ungettext
except:
def _ (s):
return s
def ungettext (s, p, n):
if n == 1:
return s
return p
underscore = _
# Urg, Python 2.4 does not define stderr/stdout encoding
# Maybe guess encoding from LANG/LC_ALL/LC_CTYPE?
reload (sys)
sys.setdefaultencoding ('utf-8')
import codecs
sys.stdout = codecs.getwriter ('utf8') (sys.stdout)
sys.stderr = codecs.getwriter ('utf8') (sys.stderr)
def encoded_write(f, s):
f.write (s.encode (f.encoding or 'utf-8', 'replace'))
# ugh, Python 2.5 optparse requires Unicode strings in some argument
# functions, and refuse them in some other places
def display_encode (s):
return s.encode (sys.stderr.encoding or 'utf-8', 'replace')
# Lilylib globals.
program_version = '@TOPLEVEL_VERSION@'
program_name = os.path.basename (sys.argv[0])
# Check if program_version contains @ characters. This will be the case if
# the .py file is called directly while building the lilypond documentation.
# If so, try to check for the env var LILYPOND_VERSION, which is set by our
# makefiles and use its value.
at_re = re.compile (r'@')
if at_re.match (program_version):
if os.environ.has_key('LILYPOND_VERSION'):
program_version = os.environ['LILYPOND_VERSION']
else:
program_version = "unknown"
# Logging framework: We have the following output functions:
# error
# warning
# progress
# debug
loglevels = {"NONE":0, "ERROR":1, "WARN":2, "BASIC":3, "PROGRESS":4, "INFO":5, "DEBUG":6}
loglevel = loglevels["PROGRESS"]
def set_loglevel (l):
global loglevel
newlevel = loglevels.get (l, -1)
if newlevel > 0:
debug_output (_ ("Setting loglevel to %s") % l)
loglevel = newlevel
else:
error (_ ("Unknown or invalid loglevel '%s'") % l)
def handle_loglevel_option (option, opt_str, value, parser, *args):
if value:
set_loglevel (value);
elif args:
set_loglevel (args[0]);
def is_loglevel (l):
global loglevel
return loglevel >= loglevels[l];
def is_verbose ():
return is_loglevel ("DEBUG")
def stderr_write (s):
encoded_write (sys.stderr, s)
def print_logmessage (level, s, fullmessage = True, newline = True):
if (is_loglevel (level)):
if fullmessage:
stderr_write (program_name + ": " + s + '\n')
elif newline:
stderr_write (s + '\n')
else:
stderr_write (s)
def error (s):
print_logmessage ("ERROR", _ ("error: %s") % s);
def warning (s):
print_logmessage ("WARN", _ ("warning: %s") % s);
def basic_progress (s):
print_logmessage ("BASIC", s);
def progress (s, fullmessage = False, newline = True):
print_logmessage ("PROGRESS", s, fullmessage, newline);
def debug_output (s, fullmessage = False, newline = True):
print_logmessage ("DEBUG", s, fullmessage, newline);
def require_python_version ():
if sys.hexversion < 0x02040000:
error ("Python 2.4 or newer is required to run this program.\n\
Please upgrade Python from http://python.org/download/, and if you use MacOS X,\n\
please read 'Setup for MacOS X' in Application Usage.")
os.system ("open http://python.org/download/")
sys.exit (2)
# A modified version of the commands.mkarg(x) that always uses
# double quotes (since Windows can't handle the single quotes)
# and escapes the characters \, $, ", and ` for unix shells.
def mkarg(x):
if os.name == 'nt':
return ' "%s"' % x
s = ' "'
for c in x:
if c in '\\$"`':
s = s + '\\'
s = s + c
s = s + '"'
return s
def command_name (cmd):
# Strip all stuf after command,
# deal with "((latex ) >& 1 ) .." too
cmd = re.match ('([\(\)]*)([^\\\ ]*)', cmd).group (2)
return os.path.basename (cmd)
def subprocess_system (cmd,
ignore_error=False,
progress_p=True,
be_verbose=False,
redirect_output=False,
log_file=None):
import subprocess
show_progress= progress_p
name = command_name (cmd)
error_log_file = ''
if redirect_output:
progress (_ ("Processing %s.ly") % log_file)
else:
if be_verbose:
show_progress = 1
progress (_ ("Invoking `%s\'") % cmd)
else:
progress ( _("Running %s...") % name)
stdout_setting = None
stderr_setting = None
if not show_progress:
stdout_setting = subprocess.PIPE
if redirect_output:
stderr_filename = log_file + '.log'
stderr_setting = open(stderr_filename, 'w')
proc = subprocess.Popen (cmd,
shell=True,
universal_newlines=True,
stdout=stdout_setting,
stderr=stderr_setting)
log = ''
if redirect_output:
while proc.poll()==None:
time.sleep(0.01)
retval = proc.returncode
stderr_setting.close()
else:
if show_progress:
retval = proc.wait()
else:
log = proc.communicate ()
retval = proc.returncode
if retval:
print >>sys.stderr, 'command failed:', cmd
if retval < 0:
print >>sys.stderr, "Child was terminated by signal", -retval
elif retval > 0:
print >>sys.stderr, "Child returned", retval
if ignore_error:
print >>sys.stderr, "Error ignored by lilylib"
else:
if not show_progress:
print log[0]
print log[1]
sys.exit (1)
return abs (retval)
def ossystem_system (cmd,
ignore_error=False,
progress_p=True,
be_verbose=False,
redirect_output=False,
log_file=None):
name = command_name (cmd)
if be_verbose:
show_progress = 1
progress (_ ("Invoking `%s\'") % cmd)
else:
progress ( _("Running %s...") % name)
retval = os.system (cmd)
if retval:
print >>sys.stderr, 'command failed:', cmd
if retval < 0:
print >>sys.stderr, "Child was terminated by signal", -retval
elif retval > 0:
print >>sys.stderr, "Child returned", retval
if ignore_error:
print >>sys.stderr, "Error ignored"
else:
sys.exit (1)
return abs (retval)
system = subprocess_system
if sys.platform == 'mingw32':
## subprocess x-compile doesn't work.
system = ossystem_system
def strip_extension (f, ext):
(p, e) = os.path.splitext (f)
if e == ext:
e = ''
return p + e
def search_exe_path (name):
p = os.environ['PATH']
exe_paths = p.split (':')
for e in exe_paths:
full = os.path.join (e, name)
if os.path.exists (full):
return full
return None
def print_environment ():
for (k,v) in os.environ.items ():
sys.stderr.write ("%s=\"%s\"\n" % (k, v))
class NonDentedHeadingFormatter (optparse.IndentedHelpFormatter):
def format_heading(self, heading):
if heading:
return heading[0].upper() + heading[1:] + ':\n'
return ''
def format_option_strings(self, option):
sep = ' '
if option._short_opts and option._long_opts:
sep = ','
metavar = ''
if option.takes_value():
metavar = '=%s' % option.metavar or option.dest.upper()
return "%3s%s %s%s" % (" ".join (option._short_opts),
sep,
" ".join (option._long_opts),
metavar)
# Only use one level of indentation (even for groups and nested groups),
# since we don't indent the headeings, either
def indent(self):
self.current_indent = self.indent_increment
self.level += 1
def dedent(self):
self.level -= 1
if self.level <= 0:
self.current_indent = ''
self.level = 0;
def format_usage(self, usage):
return _("Usage: %s") % usage + '\n'
def format_description(self, description):
return description
class NonEmptyOptionParser (optparse.OptionParser):
"A subclass of OptionParser that gobbles empty string arguments."
def parse_args (self, args=None, values=None):
options, args = optparse.OptionParser.parse_args (self, args, values)
return options, filter (None, args)
def get_option_parser (*args, **kwargs):
p = NonEmptyOptionParser (*args, **kwargs)
p.formatter = NonDentedHeadingFormatter ()
p.formatter.set_parser (p)
return p
|
jabesq/home-assistant | refs/heads/dev | homeassistant/components/daikin/const.py | 8 | """Constants for Daikin."""
from homeassistant.const import CONF_ICON, CONF_NAME, CONF_TYPE
ATTR_TARGET_TEMPERATURE = 'target_temperature'
ATTR_INSIDE_TEMPERATURE = 'inside_temperature'
ATTR_OUTSIDE_TEMPERATURE = 'outside_temperature'
SENSOR_TYPE_TEMPERATURE = 'temperature'
SENSOR_TYPES = {
ATTR_INSIDE_TEMPERATURE: {
CONF_NAME: 'Inside Temperature',
CONF_ICON: 'mdi:thermometer',
CONF_TYPE: SENSOR_TYPE_TEMPERATURE
},
ATTR_OUTSIDE_TEMPERATURE: {
CONF_NAME: 'Outside Temperature',
CONF_ICON: 'mdi:thermometer',
CONF_TYPE: SENSOR_TYPE_TEMPERATURE
}
}
KEY_MAC = 'mac'
KEY_IP = 'ip'
|
clee704/NaverWebtoonFeeds | refs/heads/master | naverwebtoonfeeds/cache.py | 1 | """
naverwebtoonfeeds.cache
~~~~~~~~~~~~~~~~~~~~~~~
Implements a customized version of :class:`flask_cache.Cache` and
customized versions of :class:`werkzeug.contrib.cache.RedisCache`.
"""
import zlib
from datetime import timedelta
from flask_cache import Cache as CacheBase
from werkzeug.contrib.cache import RedisCache as RedisCacheBase
from ._compat import string_types
from .redis_ import from_url, Redis
LONG_LONG_TIME = int(timedelta(days=365).total_seconds())
class Cache(CacheBase):
def set_permanently(self, *args, **kwargs):
"""Sets the value almost permanently (a year by default)."""
kwargs['timeout'] = LONG_LONG_TIME
return self.set(*args, **kwargs)
class RedisCache(RedisCacheBase):
redis_class = Redis
def __init__(self, host='localhost', port=6379, password=None,
db=0, default_timeout=300, key_prefix=None):
if isinstance(host, string_types):
client = self.redis_class(host=host, port=port, password=password,
db=db)
else:
client = host
super(RedisCache, self).__init__(client, port, password, db,
default_timeout, key_prefix)
class CompressedRedisCache(RedisCache):
def dump_object(self, value):
serialized_str = super(CompressedRedisCache, self).dump_object(value)
try:
return zlib.compress(serialized_str)
except zlib.error:
return serialized_str
def load_object(self, value):
try:
serialized_str = zlib.decompress(value)
except (zlib.error, TypeError):
serialized_str = value
return super(CompressedRedisCache, self).load_object(serialized_str)
def redis(app, config, args, kwargs):
"""Returns a :class:`RedisCache`. Compatible with Flask-Cache.
"""
return _redis_backend(app, config, args, kwargs, RedisCache)
def compressedredis(app, config, args, kwargs):
"""Returns a :class:`CompressedRedisCache`. Compatible with Flask-Cache.
"""
return _redis_backend(app, config, args, kwargs, CompressedRedisCache)
def _redis_backend(app, config, args, kwargs, cache_class):
kwargs.update(dict(
host=config.get('CACHE_REDIS_HOST', 'localhost'),
port=config.get('CACHE_REDIS_PORT', 6379),
))
password = config.get('CACHE_REDIS_PASSWORD')
if password:
kwargs['password'] = password
key_prefix = config.get('CACHE_KEY_PREFIX')
if key_prefix:
kwargs['key_prefix'] = key_prefix
db_number = config.get('CACHE_REDIS_DB')
if db_number:
kwargs['db'] = db_number
redis_url = config.get('CACHE_REDIS_URL')
if redis_url:
kwargs['host'] = from_url(redis_url, db=kwargs.pop('db', None))
return cache_class(*args, **kwargs)
|
JetBrains/intellij-community | refs/heads/master | python/testData/codeInsight/smartEnter/dict_after.py | 83 | class A:
def foo(self):
self.a = {"1": 1, "2":2}<caret> |
bac/horizon | refs/heads/master | horizon/test/firefox_binary.py | 5 | # Copyright 2015, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import platform
import shutil
import socket
import subprocess
import tempfile
import time
from selenium.common import exceptions as selenium_exceptions
from selenium.webdriver.common import desired_capabilities as dc
from selenium.webdriver import firefox
class FirefoxBinary(firefox.firefox_binary.FirefoxBinary):
"""Workarounds selenium firefox issues.
There is race condition in the way firefox is spawned. The exact
cause hasn't been properly diagnosed yet but it's around:
- getting a free port from the OS with
selenium.webdriver.common.utils free_port(),
- release the port immediately but record it in ff prefs so that ff
can listen on that port for the internal http server.
It has been observed that this leads to hanging processes for
'firefox -silent'.
"""
def _start_from_profile_path(self, path):
self._firefox_env["XRE_PROFILE_PATH"] = path
if platform.system().lower() == 'linux':
self._modify_link_library_path()
command = [self._start_cmd, "-silent"]
if self.command_line is not None:
for cli in self.command_line:
command.append(cli)
# The following exists upstream and is known to create hanging
# firefoxes, leading to zombies.
# subprocess.Popen(command, stdout=self._log_file,
# stderr=subprocess.STDOUT,
# env=self._firefox_env).communicate()
command[1] = '-foreground'
self.process = subprocess.Popen(
command, stdout=self._log_file, stderr=subprocess.STDOUT,
env=self._firefox_env)
class WebDriver(firefox.webdriver.WebDriver):
"""Workarounds selenium firefox issues."""
TEMPDIR = tempfile.mkdtemp(dir="/tmp")
CONNREFUSED_RETRY_COUNT = 3
CONNREFUSED_RETRY_INTERVAL = 5
def __init__(self, firefox_profile=None, firefox_binary=None, timeout=30,
desired_capabilities=dc.DesiredCapabilities.FIREFOX,
proxy=None):
try:
if firefox_profile is None:
firefox_profile = firefox.webdriver.FirefoxProfile()
self.setup_profile(firefox_profile)
# NOTE(amotoki): workaround for bug 1626643
# Connection refused error happens randomly in integration tests.
# When a connection refused exception is raised from start_session
# called from WebDriver.__init__, retry __init__.
for i in range(self.CONNREFUSED_RETRY_COUNT + 1):
try:
super(WebDriver, self).__init__(
firefox_profile, FirefoxBinary(), timeout,
desired_capabilities, proxy)
if i > 0:
# i==0 is normal behavior without connection refused.
print('NOTE: Retried %s time(s) due to '
'connection refused.' % i)
break
except socket.error as socket_error:
if (socket_error.errno == errno.ECONNREFUSED
and i < self.CONNREFUSED_RETRY_COUNT):
time.sleep(self.CONNREFUSED_RETRY_INTERVAL)
continue
raise
except selenium_exceptions.WebDriverException:
# If we can't start, cleanup profile
shutil.rmtree(self.profile.path)
if self.profile.tempfolder is not None:
shutil.rmtree(self.profile.tempfolder)
raise
def setup_profile(self, fp):
fp.set_preference("browser.download.folderList", 2)
fp.set_preference("browser.download.manager.showWhenStarting",
False)
fp.set_preference("browser.download.dir", self.TEMPDIR)
fp.set_preference("browser.helperApps.neverAsk.saveToDisk",
"application/binary,text/plain")
|
dovf/kitty | refs/heads/master | kitty/core/kassert.py | 1 | # Copyright (C) 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
#
# This file is part of Kitty.
#
# Kitty is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Kitty is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kitty. If not, see <http://www.gnu.org/licenses/>.
'''
This module provides various assertion functions used by kitty,
not that important, but makes the life easier.
Useful for making assertions that throw :class:`~kitty.core.KittyException`
'''
from kitty.core import KittyException
import types
def is_of_types(obj, the_types):
'''
:param obj: object to assert
:param the_types: iterable of types, or a signle type
:raise: an exception if obj is not an instance of types
'''
if not isinstance(obj, the_types):
raise KittyException('object type (%s) is not one of (%s)' % (type(obj), the_types))
def is_int(obj):
'''
:param obj: object to assert
:raise: an exception if obj is not an int type
'''
is_of_types(obj, types.IntType)
def is_in(obj, it):
'''
:param obj: object to assert
:param it: iterable of elements we assert obj is in
:raise: an exception if obj is in an iterable
'''
if obj not in it:
raise KittyException('(%s) is not in %s' % (obj, it))
def not_none(obj):
'''
:param obj: object to assert
:raise: an exception if obj is not None
'''
if obj is None:
raise KittyException('object is None')
|
nirb/whatsapp | refs/heads/master | yowsup/layers/axolotl/protocolentities/test_iq_keys_set.py | 68 | from yowsup.layers.protocol_iq.protocolentities.test_iq import IqProtocolEntityTest
from yowsup.layers.axolotl.protocolentities import SetKeysIqProtocolEntity
from yowsup.structs import ProtocolTreeNode
class SetKeysIqProtocolEntityTest(IqProtocolEntityTest):
def setUp(self):
super(SetKeysIqProtocolEntityTest, self).setUp()
# self.ProtocolEntity = SetKeysIqProtocolEntity
#
# regNode = ProtocolTreeNode("registration", data = "abcd")
# idNode = ProtocolTreeNode("identity", data = "efgh")
# typeNode = ProtocolTreeNode("type", data = "ijkl")
# listNode = ProtocolTreeNode("list")
# for i in range(0, 2):
# keyNode = ProtocolTreeNode("key", children=[
# ProtocolTreeNode("id", data = "id_%s" % i),
# ProtocolTreeNode("value", data = "val_%s" % i)
# ])
# listNode.addChild(keyNode)
#
# self.node.addChildren([regNode, idNode, typeNode, listNode])
|
keyboard-k/youtube-dl-pet | refs/heads/master | youtube_dl/version.py | 1 | from __future__ import unicode_literals
__version__ = '2015.12.21'
|
eweill/ConwayGameOfLife | refs/heads/master | testing/GoLquadtree.py | 1 | # Instance of quadtree from Malcom Kesson for use in Game of Life
# This code is a copy of his code from http://www.fundza.com/algorithmic/quadtree/index.html
# Instead of using inheritance, I just modified what I needed.
# I had to override every function to make it match my needs.
import matplotlib.pyplot as plt
class GoLNode():
ROOT = 0
BRANCH = 1
LEAF = 2
EMPTY = 3
def __init__(self, parent, rect):
"""
Initializes an instance of a Quadtree Node
Parameters
----------
Parent is the parent Node
rect is the bounding rectangle
"""
self.parent = parent
self.children = [None, None, None, None]
self.rect = rect
x0, y0, x1, y1 = rect
# Determine node type
if parent == None:
self.depth = 0
self.type = GoLNode.ROOT
else:
self.depth = parent.depth + 1
# Leaf iff 1 space wide
if(x1 - x0) == 0:
self.type = GoLNode.LEAF
else:
self.type = GoLNode.BRANCH
def getinstance(self, rect):
"""
Returns an instance of a GoL Node
Parameters
----------
rect is the bounding rectangle for the Node to be created
"""
return GoLNode(self, rect)
def spans_feature(self, rect, point = None):
"""
This very simply checks if the point in question is within a rectangle
Parameters
----------
rect is the bounding rectangle
point is the point within the rectangle
Returns
-------
True if point is in rect, false otherwise
"""
if point == None:
return False
x,y = point
x0, y0, x1, y1 = rect
if x >= x0 and x <= x1 and y >= y0 and y <= y1:
return True
return False
def subdivide(self, point = None):
"""
This is a modification of the subdivide function in the base class
It requires a point to subdivide to assist with insertion
Paramters
---------
Point to subdivide on and check against
Returns
-------
None
"""
if self.type == GoLNode.LEAF:
return
x,y = point
x0, y0, x1, y1 = self.rect
h = (x1 - x0) / 2
rects = []
rects.append((x0, y0, x0 + h, y0 + h))
rects.append((x0, y0 + h + 1, x0 + h, y1))
rects.append((x0 + h + 1, y0 + h + 1, x1, y1))
rects.append((x0 + h + 1, y0, x1, y0 + h))
#print rects
for n in range(len(rects)):
if self.spans_feature(rects[n], point):
#if x == x0 and x == x1 and y == y0 and y == y1:
#print "Creating child for ", point, "at depth ", self.depth, " and child ", n, rects[n]
self.children[n] = self.getinstance(rects[n])
self.children[n].subdivide(point)
def contains(self, x, y):
"""
Determines if the given coordinates are contained within the Node's bounding rectangle
Parameters
----------
x and y are the coordinates of the input point
Returns
-------
True if contained, false otherwise
"""
x0, y0, x1, y1 = self.rect
if x >= x0 and x <= x1 and y >= y0 and y <= y1:
return True
return False
class GoLQuadTree():
maxdepth = 1
leaves = []
allnodes = []
def __init__(self, rootnode, minrect):
"""
Initializes the Quad tree
Parameters
----------
Rootnode is the root of the tree, needs to be (2^n)-1 and square
Minrect is leftover from Malcom's Implementation
Returns
-------
None
"""
GoLNode.minsize = minrect
def traverse(self, node):
"""
This traverses the tree and puts ALL nodes into one list
and puts the leaves into a seperate list as well. The max
depth is recorded during the recursion.
Parameters
----------
node is the current node being examined
Returns
-------
None
"""
# If beginning of recursion (root node), then clear out all data
# structures and reset depth.
if node.depth == 0:
GoLQuadTree.allnodes = []
GoLQuadTree.leaves = []
GoLQuadTree.maxdepth = 1
# Add the current node to all nodes
GoLQuadTree.allnodes.append(node)
# And save leaves into leaves
if node.type == GoLNode.LEAF:
GoLQuadTree.leaves.append(node)
if node.depth > GoLQuadTree.maxdepth:
GoLQuadTree.maxdepth = node.depth
# Recurse on non-empty children
for child in node.children:
if child != None:
self.traverse(child)
def prune(self, node):
"""
Prune determines if a node has children with leaves and cuts
off any branches that have no leaves.
Parameters
----------
node is the node to check for missing leaves
Returns
-------
None
"""
if node.type == GoLNode.LEAF:
return 1
leafcount = 0
removals = []
for child in node.children:
if child != None:
leafcount += self.prune(child)
if leafcount == 0:
removals.append(child)
for item in removals:
n = node.children.index(item)
node.children[n] = None
return leafcount
def insert(self, root, point):
"""
Use this to add a point to the Quad Tree
The function finds the first None child then calls subdivide from that node
This is also a recursive function (root becomes the child)
Parameters
----------
root is the root node
point is the point we want to add, or the cell that will become Alive
Returns
-------
None
"""
# Recursively traverse the tree until the correct non-empty node is
# found that contains the bounding rectangle of our point to insert
# We could call subdivide on the root, but this method is a little
# more efficient.
found = False
for child in root.children:
if child != None and child.contains(point[0], point[1]):
found = True
self.insert(child, point)
if not found:
#print "Subdividing to add point ", point
root.subdivide(point)
def delete(self, root, point):
"""
Use this to delete a point from the QuadTree.
This function clears a child and then prunes if the point was
found and deleted.
Parameters
----------
root is the root node
point to delete from the tree
Returns
-------
True if item found and deleted, else false
"""
found = False
# Need to check each child
for child in range(len(root.children)):
# Only search if not found.
if not found and root.children[child] != None and root.children[child].contains(point[0], point[1]):
if root.children[child].type == GoLNode.LEAF:
found = True
print "Deleting ", point
root.children[child] = None
else:
found = self.delete(root.children[child], point)
# Prune each parent to remove excess nodes.
# We need to do this for each parent to save the most space.
# This can be modified to save less space but run quicker by pruning
# only the root node as shown below in comments.
if root != None and root.parent != None:
self.prune(root.parent)
#if root.type == GoLNode.ROOT:
# self.proon(root)
return found
def show_tree(self, root):
"""
This function attempts to show the status of the quadtree graphically
Parameters
----------
Root is the center of the grid and all connections will be drawn from here
Returns
-------
None
"""
# Verify working on the root node.
if root.type != GoLNode.ROOT:
print "Given node is not the root node."
return
x0, y0, x1, y1 = root.rect
# Set initial figure and set axes to dimensions of root
plt.figure()
plt.xlim(x0, x1)
plt.ylim(y0, y1)
# Recursive function that prints all connections
self.print_tree(root)
plt.show()
def print_tree(self, parent):
"""
This is a helper function to draw the lines on a figure
Parameters
----------
Parent is the parent node and we will draw lines to its children
Returns
-------
None
"""
x0, y0, x1, y1 = parent.rect
x_cent = (x1-x0)/2+x0
y_cent = (y1-y0)/2+y0
# This recursively calls the function for every child
# and then draws a line from the center of the child's rect to the
# center of its parent's rect.
for child in parent.children:
if child != None:
self.print_tree(child)
cx0, cy0, cx1, cy1 = child.rect
cx_cent = ((cx1-cx0)/2)+cx0
cy_cent = ((cy1-cy0)/2)+cy0
#print "Drawing line ", (x_cent, y_cent), (cx_cent, cy_cent)
plt.plot((x_cent, cx_cent), (y_cent, cy_cent), 'bo-', hold=True)
# Need to write spans_feature to check if point we are adding exists in the rectangle. So we need to store the points somehow. Save them in a set like the circle example? Or should each node save it's own point if it is a leaf? Then we need to override subdivide (basically rewrite the class at that point).
# Tree where each branch has 4 children. Root node is direct center of grid. 256x256 -> (128,128)
# Whenever adding a node, need to refine, by quadrant, down to single point
# No need to use instances of ConwayGOLCell, if a cell exists in quadtree, it's alive
# Need to write add and del functions to add items and remove them from the tree efficiently
# Also need a good way to find neighbors, or store them on creation.
# Maybe use a set within the QuadTree to maintain alive items and check against this for neighbors
#
# To add a node, need to subdivide (if necessary) and create the node to be added. This may require modifying
# the subdivide routine.
# To delete a node, just need to set it's position on it's parent to None then prune.
#
#
if __name__ == '__main__':
baserect = [0, 0, 16, 16]
rootnode = GoLNode(None, baserect)
tree = GoLQuadTree(rootnode, 0)
tree.insert(rootnode, (0, 0))
tree.show_tree(rootnode)
tree.insert(rootnode, (5, 8))
tree.show_tree(rootnode)
tree.insert(rootnode, (1,1))
tree.show_tree(rootnode)
tree.insert(rootnode, (14, 11))
tree.show_tree(rootnode)
tree.delete(rootnode, (14,11))
tree.show_tree(rootnode)
baserect = [0, 0, 256, 256]
rootnode = GoLNode(None, baserect)
tree = GoLQuadTree(rootnode, 1)
tree.show_tree(rootnode)
tree.insert(rootnode, (34, 34))
tree.show_tree(rootnode)
tree.insert(rootnode, (56, 3))
tree.show_tree(rootnode)
tree.insert(rootnode, (128, 5))
tree.show_tree(rootnode)
tree.insert(rootnode, (253, 120))
tree.show_tree(rootnode)
tree.insert(rootnode, (253, 247))
tree.insert(rootnode, (253, 248))
tree.insert(rootnode, (238, 139))
tree.insert(rootnode, (160, 230))
tree.insert(rootnode, (178, 35))
tree.insert(rootnode, (190, 78))
tree.insert(rootnode, (32, 156))
tree.insert(rootnode, (79, 230))
tree.insert(rootnode, (120, 129))
tree.show_tree(rootnode)
tree.delete(rootnode, (253, 247))
tree.delete(rootnode, (34, 34))
tree.delete(rootnode, (56, 3))
tree.delete(rootnode, (32, 156))
tree.delete(rootnode, (79, 230))
tree.delete(rootnode, (128, 5))
tree.delete(rootnode, (160, 230))
tree.delete(rootnode, (178, 35))
tree.delete(rootnode, (120, 129))
tree.delete(rootnode, (190, 78))
tree.show_tree(rootnode)
|
aosagie/spark | refs/heads/master | python/pyspark/mllib/classification.py | 8 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import exp
import sys
import warnings
import numpy
from pyspark import RDD, since
from pyspark.mllib.common import callMLlibFunc, _py2java, _java2py
from pyspark.mllib.linalg import SparseVector, _convert_to_vector
from pyspark.mllib.regression import (
LabeledPoint, LinearModel, _regression_train_wrapper,
StreamingLinearAlgorithm)
from pyspark.mllib.util import Saveable, Loader, inherit_doc
__all__ = ['LogisticRegressionModel', 'LogisticRegressionWithSGD', 'LogisticRegressionWithLBFGS',
'SVMModel', 'SVMWithSGD', 'NaiveBayesModel', 'NaiveBayes',
'StreamingLogisticRegressionWithSGD']
class LinearClassificationModel(LinearModel):
"""
A private abstract class representing a multiclass classification
model. The categories are represented by int values: 0, 1, 2, etc.
"""
def __init__(self, weights, intercept):
super(LinearClassificationModel, self).__init__(weights, intercept)
self._threshold = None
@since('1.4.0')
def setThreshold(self, value):
"""
Sets the threshold that separates positive predictions from
negative predictions. An example with prediction score greater
than or equal to this threshold is identified as a positive,
and negative otherwise. It is used for binary classification
only.
"""
self._threshold = value
@property
@since('1.4.0')
def threshold(self):
"""
Returns the threshold (if any) used for converting raw
prediction scores into 0/1 predictions. It is used for
binary classification only.
"""
return self._threshold
@since('1.4.0')
def clearThreshold(self):
"""
Clears the threshold so that `predict` will output raw
prediction scores. It is used for binary classification only.
"""
self._threshold = None
@since('1.4.0')
def predict(self, test):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
raise NotImplementedError
class LogisticRegressionModel(LinearClassificationModel):
"""
Classification model trained using Multinomial/Binary Logistic
Regression.
:param weights:
Weights computed for every feature.
:param intercept:
Intercept computed for this model. (Only used in Binary Logistic
Regression. In Multinomial Logistic Regression, the intercepts will
not bea single value, so the intercepts will be part of the
weights.)
:param numFeatures:
The dimension of the features.
:param numClasses:
The number of possible outcomes for k classes classification problem
in Multinomial Logistic Regression. By default, it is binary
logistic regression so numClasses will be set to 2.
>>> data = [
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(data), iterations=10)
>>> lrm.predict([1.0, 0.0])
1
>>> lrm.predict([0.0, 1.0])
0
>>> lrm.predict(sc.parallelize([[1.0, 0.0], [0.0, 1.0]])).collect()
[1, 0]
>>> lrm.clearThreshold()
>>> lrm.predict([0.0, 1.0])
0.279...
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(sparse_data), iterations=10)
>>> lrm.predict(numpy.array([0.0, 1.0]))
1
>>> lrm.predict(numpy.array([1.0, 0.0]))
0
>>> lrm.predict(SparseVector(2, {1: 1.0}))
1
>>> lrm.predict(SparseVector(2, {0: 1.0}))
0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> lrm.save(sc, path)
>>> sameModel = LogisticRegressionModel.load(sc, path)
>>> sameModel.predict(numpy.array([0.0, 1.0]))
1
>>> sameModel.predict(SparseVector(2, {0: 1.0}))
0
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except:
... pass
>>> multi_class_data = [
... LabeledPoint(0.0, [0.0, 1.0, 0.0]),
... LabeledPoint(1.0, [1.0, 0.0, 0.0]),
... LabeledPoint(2.0, [0.0, 0.0, 1.0])
... ]
>>> data = sc.parallelize(multi_class_data)
>>> mcm = LogisticRegressionWithLBFGS.train(data, iterations=10, numClasses=3)
>>> mcm.predict([0.0, 0.5, 0.0])
0
>>> mcm.predict([0.8, 0.0, 0.0])
1
>>> mcm.predict([0.0, 0.0, 0.3])
2
.. versionadded:: 0.9.0
"""
def __init__(self, weights, intercept, numFeatures, numClasses):
super(LogisticRegressionModel, self).__init__(weights, intercept)
self._numFeatures = int(numFeatures)
self._numClasses = int(numClasses)
self._threshold = 0.5
if self._numClasses == 2:
self._dataWithBiasSize = None
self._weightsMatrix = None
else:
self._dataWithBiasSize = self._coeff.size // (self._numClasses - 1)
self._weightsMatrix = self._coeff.toArray().reshape(self._numClasses - 1,
self._dataWithBiasSize)
@property
@since('1.4.0')
def numFeatures(self):
"""
Dimension of the features.
"""
return self._numFeatures
@property
@since('1.4.0')
def numClasses(self):
"""
Number of possible outcomes for k classes classification problem
in Multinomial Logistic Regression.
"""
return self._numClasses
@since('0.9.0')
def predict(self, x):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
if self.numClasses == 2:
margin = self.weights.dot(x) + self._intercept
if margin > 0:
prob = 1 / (1 + exp(-margin))
else:
exp_margin = exp(margin)
prob = exp_margin / (1 + exp_margin)
if self._threshold is None:
return prob
else:
return 1 if prob > self._threshold else 0
else:
best_class = 0
max_margin = 0.0
if x.size + 1 == self._dataWithBiasSize:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i][0:x.size]) + \
self._weightsMatrix[i][x.size]
if margin > max_margin:
max_margin = margin
best_class = i + 1
else:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i])
if margin > max_margin:
max_margin = margin
best_class = i + 1
return best_class
@since('1.4.0')
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel(
_py2java(sc, self._coeff), self.intercept, self.numFeatures, self.numClasses)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel.load(
sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
numFeatures = java_model.numFeatures()
numClasses = java_model.numClasses()
threshold = java_model.getThreshold().get()
model = LogisticRegressionModel(weights, intercept, numFeatures, numClasses)
model.setThreshold(threshold)
return model
def __repr__(self):
return self._call_java("toString")
class LogisticRegressionWithSGD(object):
"""
.. versionadded:: 0.9.0
.. note:: Deprecated in 2.0.0. Use ml.classification.LogisticRegression or
LogisticRegressionWithLBFGS.
"""
@classmethod
@since('0.9.0')
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0,
initialWeights=None, regParam=0.01, regType="l2", intercept=False,
validateData=True, convergenceTol=0.001):
"""
Train a logistic regression model on the given data.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.01)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
warnings.warn(
"Deprecated in 2.0.0. Use ml.classification.LogisticRegression or "
"LogisticRegressionWithLBFGS.", DeprecationWarning)
def train(rdd, i):
return callMLlibFunc("trainLogisticRegressionModelWithSGD", rdd, int(iterations),
float(step), float(miniBatchFraction), i, float(regParam), regType,
bool(intercept), bool(validateData), float(convergenceTol))
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
class LogisticRegressionWithLBFGS(object):
"""
.. versionadded:: 1.2.0
"""
@classmethod
@since('1.2.0')
def train(cls, data, iterations=100, initialWeights=None, regParam=0.0, regType="l2",
intercept=False, corrections=10, tolerance=1e-6, validateData=True, numClasses=2):
"""
Train a logistic regression model on the given data.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.0)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param corrections:
The number of corrections used in the LBFGS update.
If a known updater is used for binary classification,
it calls the ml implementation and this parameter will
have no effect. (default: 10)
:param tolerance:
The convergence tolerance of iterations for L-BFGS.
(default: 1e-6)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param numClasses:
The number of classes (i.e., outcomes) a label can take in
Multinomial Logistic Regression.
(default: 2)
>>> data = [
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> lrm = LogisticRegressionWithLBFGS.train(sc.parallelize(data), iterations=10)
>>> lrm.predict([1.0, 0.0])
1
>>> lrm.predict([0.0, 1.0])
0
"""
def train(rdd, i):
return callMLlibFunc("trainLogisticRegressionModelWithLBFGS", rdd, int(iterations), i,
float(regParam), regType, bool(intercept), int(corrections),
float(tolerance), bool(validateData), int(numClasses))
if initialWeights is None:
if numClasses == 2:
initialWeights = [0.0] * len(data.first().features)
else:
if intercept:
initialWeights = [0.0] * (len(data.first().features) + 1) * (numClasses - 1)
else:
initialWeights = [0.0] * len(data.first().features) * (numClasses - 1)
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
class SVMModel(LinearClassificationModel):
"""
Model for Support Vector Machines (SVMs).
:param weights:
Weights computed for every feature.
:param intercept:
Intercept computed for this model.
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(data), iterations=10)
>>> svm.predict([1.0])
1
>>> svm.predict(sc.parallelize([[1.0]])).collect()
[1]
>>> svm.clearThreshold()
>>> svm.predict(numpy.array([1.0]))
1.44...
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: -1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(sparse_data), iterations=10)
>>> svm.predict(SparseVector(2, {1: 1.0}))
1
>>> svm.predict(SparseVector(2, {0: -1.0}))
0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> svm.save(sc, path)
>>> sameModel = SVMModel.load(sc, path)
>>> sameModel.predict(SparseVector(2, {1: 1.0}))
1
>>> sameModel.predict(SparseVector(2, {0: -1.0}))
0
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except:
... pass
.. versionadded:: 0.9.0
"""
def __init__(self, weights, intercept):
super(SVMModel, self).__init__(weights, intercept)
self._threshold = 0.0
@since('0.9.0')
def predict(self, x):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
margin = self.weights.dot(x) + self.intercept
if self._threshold is None:
return margin
else:
return 1 if margin > self._threshold else 0
@since('1.4.0')
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.SVMModel(
_py2java(sc, self._coeff), self.intercept)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.SVMModel.load(
sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
threshold = java_model.getThreshold().get()
model = SVMModel(weights, intercept)
model.setThreshold(threshold)
return model
class SVMWithSGD(object):
"""
.. versionadded:: 0.9.0
"""
@classmethod
@since('0.9.0')
def train(cls, data, iterations=100, step=1.0, regParam=0.01,
miniBatchFraction=1.0, initialWeights=None, regType="l2",
intercept=False, validateData=True, convergenceTol=0.001):
"""
Train a support vector machine on the given data.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param regParam:
The regularizer parameter.
(default: 0.01)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param regType:
The type of regularizer used for training our model.
Allowed values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e. whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
def train(rdd, i):
return callMLlibFunc("trainSVMModelWithSGD", rdd, int(iterations), float(step),
float(regParam), float(miniBatchFraction), i, regType,
bool(intercept), bool(validateData), float(convergenceTol))
return _regression_train_wrapper(train, SVMModel, data, initialWeights)
@inherit_doc
class NaiveBayesModel(Saveable, Loader):
"""
Model for Naive Bayes classifiers.
:param labels:
List of labels.
:param pi:
Log of class priors, whose dimension is C, number of labels.
:param theta:
Log of class conditional probabilities, whose dimension is C-by-D,
where D is number of features.
>>> data = [
... LabeledPoint(0.0, [0.0, 0.0]),
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> model = NaiveBayes.train(sc.parallelize(data))
>>> model.predict(numpy.array([0.0, 1.0]))
0.0
>>> model.predict(numpy.array([1.0, 0.0]))
1.0
>>> model.predict(sc.parallelize([[1.0, 0.0]])).collect()
[1.0]
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {1: 0.0})),
... LabeledPoint(0.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {0: 1.0}))
... ]
>>> model = NaiveBayes.train(sc.parallelize(sparse_data))
>>> model.predict(SparseVector(2, {1: 1.0}))
0.0
>>> model.predict(SparseVector(2, {0: 1.0}))
1.0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = NaiveBayesModel.load(sc, path)
>>> sameModel.predict(SparseVector(2, {0: 1.0})) == model.predict(SparseVector(2, {0: 1.0}))
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 0.9.0
"""
def __init__(self, labels, pi, theta):
self.labels = labels
self.pi = pi
self.theta = theta
@since('0.9.0')
def predict(self, x):
"""
Return the most likely class for a data vector
or an RDD of vectors
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
return self.labels[numpy.argmax(self.pi + x.dot(self.theta.transpose()))]
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_labels = _py2java(sc, self.labels.tolist())
java_pi = _py2java(sc, self.pi.tolist())
java_theta = _py2java(sc, self.theta.tolist())
java_model = sc._jvm.org.apache.spark.mllib.classification.NaiveBayesModel(
java_labels, java_pi, java_theta)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.NaiveBayesModel.load(
sc._jsc.sc(), path)
# Can not unpickle array.array from Pyrolite in Python3 with "bytes"
py_labels = _java2py(sc, java_model.labels(), "latin1")
py_pi = _java2py(sc, java_model.pi(), "latin1")
py_theta = _java2py(sc, java_model.theta(), "latin1")
return NaiveBayesModel(py_labels, py_pi, numpy.array(py_theta))
class NaiveBayes(object):
"""
.. versionadded:: 0.9.0
"""
@classmethod
@since('0.9.0')
def train(cls, data, lambda_=1.0):
"""
Train a Naive Bayes model given an RDD of (label, features)
vectors.
This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which
can handle all kinds of discrete data. For example, by
converting documents into TF-IDF vectors, it can be used for
document classification. By making every vector a 0-1 vector,
it can also be used as Bernoulli NB (U{http://tinyurl.com/p7c96j6}).
The input feature values must be nonnegative.
:param data:
RDD of LabeledPoint.
:param lambda_:
The smoothing parameter.
(default: 1.0)
"""
first = data.first()
if not isinstance(first, LabeledPoint):
raise ValueError("`data` should be an RDD of LabeledPoint")
labels, pi, theta = callMLlibFunc("trainNaiveBayesModel", data, lambda_)
return NaiveBayesModel(labels.toArray(), pi.toArray(), numpy.array(theta))
@inherit_doc
class StreamingLogisticRegressionWithSGD(StreamingLinearAlgorithm):
"""
Train or predict a logistic regression model on streaming data.
Training uses Stochastic Gradient Descent to update the model based on
each new batch of incoming data from a DStream.
Each batch of data is assumed to be an RDD of LabeledPoints.
The number of data points per batch can vary, but the number
of features must be constant. An initial weight
vector must be provided.
:param stepSize:
Step size for each iteration of gradient descent.
(default: 0.1)
:param numIterations:
Number of iterations run for each batch of data.
(default: 50)
:param miniBatchFraction:
Fraction of each batch of data to use for updates.
(default: 1.0)
:param regParam:
L2 Regularization parameter.
(default: 0.0)
:param convergenceTol:
Value used to determine when to terminate iterations.
(default: 0.001)
.. versionadded:: 1.5.0
"""
def __init__(self, stepSize=0.1, numIterations=50, miniBatchFraction=1.0, regParam=0.0,
convergenceTol=0.001):
self.stepSize = stepSize
self.numIterations = numIterations
self.regParam = regParam
self.miniBatchFraction = miniBatchFraction
self.convergenceTol = convergenceTol
self._model = None
super(StreamingLogisticRegressionWithSGD, self).__init__(
model=self._model)
@since('1.5.0')
def setInitialWeights(self, initialWeights):
"""
Set the initial value of weights.
This must be set before running trainOn and predictOn.
"""
initialWeights = _convert_to_vector(initialWeights)
# LogisticRegressionWithSGD does only binary classification.
self._model = LogisticRegressionModel(
initialWeights, 0, initialWeights.size, 2)
return self
@since('1.5.0')
def trainOn(self, dstream):
"""Train the model on the incoming dstream."""
self._validate(dstream)
def update(rdd):
# LogisticRegressionWithSGD.train raises an error for an empty RDD.
if not rdd.isEmpty():
self._model = LogisticRegressionWithSGD.train(
rdd, self.numIterations, self.stepSize,
self.miniBatchFraction, self._model.weights,
regParam=self.regParam, convergenceTol=self.convergenceTol)
dstream.foreachRDD(update)
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.mllib.classification
globs = pyspark.mllib.classification.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.classification tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
CartoDB/bigmetadata | refs/heads/master | tasks/carto.py | 1 | '''
Tasks to sync data locally to CartoDB
'''
from lib.logger import get_logger
from tasks.base_tasks import TableToCarto, TableToCartoViaImportAPI
from tasks.meta import current_session, OBSTable, OBSColumn, UpdatedMetaTarget
from tasks.util import underscore_slugify, query_cartodb, classpath, shell, unqualified_task_id
from tasks.targets import PostgresTarget, CartoDBTarget
from luigi import (WrapperTask, BoolParameter, Parameter, Task, LocalTarget,
DateParameter, IntParameter)
from luigi.task_register import Register
from luigi.contrib.s3 import S3Target
from datetime import date
import time
import os
LOGGER = get_logger(__name__)
META_TABLES = ('obs_table', 'obs_column_table', 'obs_column', 'obs_column_to_column',
'obs_column_tag', 'obs_tag', 'obs_dump_version', )
class SyncColumn(WrapperTask):
'''
Upload tables relevant to updating a particular column by keyword.
'''
keywords = Parameter()
def requires(self):
session = current_session()
cols = session.query(OBSColumn).filter(OBSColumn.id.ilike(
'%' + self.keywords + '%'
))
if cols.count():
for col in cols:
for coltable in col.tables:
yield SyncData(exact_id=coltable.table.id)
else:
tables = session.query(OBSTable).filter(OBSTable.id.ilike(
'%' + self.keywords + '%'
))
if tables.count():
for table in tables:
yield SyncData(exact_id=table.id)
else:
raise Exception('Unable to find any tables or columns with ID '
'that matched "{keywords}" via ILIKE'.format(
keywords=self.keywords
))
class SyncData(WrapperTask):
'''
Upload a single OBS table to cartodb by fuzzy ID
'''
force = BoolParameter(default=True, significant=False)
id = Parameter(default=None)
exact_id = Parameter(default=None)
tablename = Parameter(default=None)
def requires(self):
session = current_session()
if self.exact_id:
table = session.query(OBSTable).get(self.exact_id)
elif self.tablename:
table = session.query(OBSTable).filter(OBSTable.tablename == self.tablename).one()
elif self.id:
table = session.query(OBSTable).filter(OBSTable.id.ilike('%' + self.id + '%')).one()
else:
raise Exception('Need id or exact_id for SyncData')
return TableToCarto(table=table.tablename, force=self.force)
class SyncAllData(WrapperTask):
'''
Sync all data to the linked CARTO account.
'''
force = BoolParameter(default=False, significant=False)
def requires(self):
existing_table_versions = dict([
(r['tablename'], r['version']) for r in query_cartodb(
'SELECT * FROM obs_table'
).json()['rows']
])
tables = dict([(k, v) for k, v in current_session().execute(
'''
SELECT tablename, t.version
FROM observatory.obs_table t,
observatory.obs_column_table ct,
observatory.obs_column c
WHERE t.id = ct.table_id
AND c.id = ct.column_id
AND t.tablename NOT IN ('obs_ffebc3eb689edab4faa757f75ca02c65d7db7327')
AND c.weight > 0
'''
)])
for tablename, version in tables.items():
if version > existing_table_versions.get(tablename):
force = True
else:
force = self.force
yield TableToCartoViaImportAPI(table=tablename, force=force)
class PurgeMetadataTasks(Task):
'''
Purge local metadata tables that no longer have tasks linking to them
'''
pass
class PurgeMetadataColumns(Task):
'''
Purge local metadata tables that no longer have tasks linking to them
'''
pass
class PurgeUndocumentedTables(Task):
'''
Purge tables that should be in metadata but are not.
'''
def run(self):
session = current_session()
resp = session.execute('SELECT table_schema, table_name '
'FROM information_schema.tables '
"WHERE table_schema ILIKE 'observatory' ")
for _, tablename in resp:
if tablename in ('obs_table', 'obs_column_table', 'obs_column',
'obs_tag', 'obs_column_to_column', 'obs_column_tag'):
continue
if session.query(OBSTable).filter_by(tablename=tablename).count() == 0:
cnt = session.execute('SELECT COUNT(*) FROM observatory.{tablename}'.format(
tablename=tablename)).fetchone()[0]
if cnt == 0:
stmt = 'DROP TABLE observatory.{tablename} CASCADE'.format(
tablename=tablename)
LOGGER.info(stmt)
session.execute(stmt)
session.commit()
else:
raise Exception("Will not automatically drop table {tablename} "
"with data in it".format(tablename=tablename))
class PurgeMetadataTables(Task):
'''
Purge local metadata tables that no longer have tasks linking to them,
as well as entries in obs_table that do not link to any table.
'''
def run(self):
session = current_session()
for _output in self.output():
if not _output.exists():
resp = session.execute("SELECT id from observatory.obs_table "
"WHERE tablename = '{tablename}'".format(
tablename=_output.tablename))
_id = resp.fetchall()[0][0]
stmt = "DELETE FROM observatory.obs_table " \
"WHERE id = '{id}'".format(id=_id)
LOGGER.info(stmt)
session.execute(stmt)
session.commit()
def output(self):
session = current_session()
for table in session.query(OBSTable):
split = table.id.split('.')
schema, task_id = split[0:-1], split[-1]
modname = 'tasks.' + '.'.join(schema)
module = __import__(modname, fromlist=['*'])
exists = False
for name in dir(module):
kls = getattr(module, name)
if not isinstance(kls, Register):
continue
if task_id.startswith(underscore_slugify(name)):
exists = True
if exists is True:
LOGGER.info('{table} exists'.format(table=table))
else:
# TODO drop table
LOGGER.info(table)
yield PostgresTarget(schema='observatory', tablename=table.tablename)
class ConfirmTableExists(Task):
'''
Confirm a table exists
'''
schema = Parameter(default='observatory')
tablename = Parameter()
def run(self):
raise Exception('Table {} does not exist'.format(self.tablename))
def output(self):
return PostgresTarget(self.schema, self.tablename)
class ConfirmTablesDescribedExist(WrapperTask):
'''
Confirm that all tables described in obs_table actually exist.
'''
def requires(self):
session = current_session()
for table in session.query(OBSTable):
yield ConfirmTableExists(tablename=table.tablename)
class PurgeMetadata(WrapperTask):
'''
Purge local metadata that no longer has tasks linking to it
'''
def requires(self):
yield PurgeMetadataColumns()
yield PurgeMetadataTables()
class PurgeData(Task):
'''
Purge local data that no longer has tasks linking to it.
'''
pass
class PurgeRemoteData(Task):
'''
Purge remote data that is no longer available locally
'''
pass
class TestData(Task):
'''
See if a dataset has been uploaded & is in sync (at the least, has
the same number of rows & columns as local).
'''
pass
class TestAllData(Task):
'''
See if all datasets have been uploaded & are in sync
'''
pass
class Dump(Task):
'''
Dumps the entire ``observatory`` schema to a local file using the
`binary <https://www.postgresql.org/docs/9.4/static/app-pgdump.html>`_
Postgres dump format.
Automatically updates :class:`~.meta.OBSDumpVersion`.
:param timestamp: Optional date parameter, defaults to today.
'''
timestamp = DateParameter(default=date.today())
def requires(self):
yield OBSMetaToLocal(force=True)
def run(self):
session = current_session()
try:
self.output().makedirs()
session.execute(
'INSERT INTO observatory.obs_dump_version (dump_id) '
"VALUES ('{task_id}')".format(task_id=unqualified_task_id(self.task_id)))
session.commit()
shell('pg_dump -Fc -Z0 -x -n observatory -f {output}'.format(
output=self.output().path))
except Exception as err:
session.rollback()
session.execute(
'DELETE FROM observatory.obs_dump_version '
"WHERE dump_id = '{task_id}'".format(task_id=unqualified_task_id(self.task_id)))
session.commit()
raise err
def output(self):
return LocalTarget(os.path.join('tmp', classpath(self), unqualified_task_id(self.task_id) + '.dump'))
class DumpS3(Task):
'''
Uploads ``observatory`` schema dumped from :class:`~.carto.Dump` to
`Amazon S3 <https://aws.amazon.com/s3/>`_, using credentials from ``.env``.
Automatically updates :class:`~.meta.OBSDumpVersion`.
:param timestamp: Optional date parameter, defaults to today.
'''
timestamp = DateParameter(default=date.today())
force = BoolParameter(default=False, significant=False)
def requires(self):
return Dump(timestamp=self.timestamp)
def run(self):
shell('aws s3 cp {input} {output}'.format(
input=self.input().path,
output=self.output().path
))
def output(self):
path = self.input().path.replace('tmp/carto/Dump_', 'do-release-')
path = path.replace('.dump', '/obs.dump')
path = 's3://cartodb-observatory-data/{path}'.format(
path=path
)
LOGGER.info(path)
target = S3Target(path)
if self.force:
shell('aws s3 rm {output}'.format(
output=path
))
self.force = False
return target
class OBSMeta(Task):
force = BoolParameter(default=False)
FIRST_AGGREGATE = '''
CREATE OR REPLACE FUNCTION public.first_agg ( anyelement, anyelement )
RETURNS anyelement LANGUAGE SQL IMMUTABLE STRICT AS $$
SELECT $1;
$$;
DROP AGGREGATE IF EXISTS public.FIRST (anyelement);
CREATE AGGREGATE public.FIRST (
sfunc = public.first_agg,
basetype = anyelement,
stype = anyelement
);
'''
QUERIES = ['''
CREATE TABLE {obs_meta} AS
WITH denoms as (
SELECT
numer_c.id numer_id,
denom_c.id denom_id,
denom_t.id denom_tid,
geomref_c.id geomref_id,
null::varchar denom_name,
null::varchar denom_description,
null::varchar denom_t_description,
null::varchar denom_aggregate,
null::varchar denom_type,
null::varchar denom_reltype,
null::varchar denom_colname,
FIRST(denom_geomref_ct.colname) denom_geomref_colname,
null::varchar denom_tablename,
FIRST(denom_t.timespan) denom_timespan,
null::int as denom_weight,
null::jsonb as denom_tags,
null::jsonb denom_extra,
null::jsonb denom_ct_extra
FROM observatory.obs_column numer_c
, observatory.obs_column_to_column denom_c2c
, observatory.obs_column denom_c
, observatory.obs_column_table denom_data_ct
, observatory.obs_table denom_t
, observatory.obs_column_tag denom_ctag
, observatory.obs_tag denom_tag
, observatory.obs_column_table denom_geomref_ct
, observatory.obs_column geomref_c
, observatory.obs_column_to_column geomref_c2c
WHERE denom_c.weight > 0
AND denom_c2c.source_id = numer_c.id
AND denom_c2c.target_id = denom_c.id
AND denom_data_ct.column_id = denom_c.id
AND denom_data_ct.table_id = denom_t.id
AND denom_c.id = denom_ctag.column_id
AND denom_ctag.tag_id = denom_tag.id
AND denom_c2c.reltype IN ('denominator', 'universe')
AND denom_geomref_ct.table_id = denom_t.id
AND denom_geomref_ct.column_id = geomref_c.id
AND geomref_c2c.reltype = 'geom_ref'
AND geomref_c2c.source_id = geomref_c.id
GROUP BY numer_c.id, denom_c.id, denom_t.id, geomref_c.id
), leftjoined_denoms AS (
SELECT numer_c.id all_numer_id, denoms.*
FROM observatory.obs_column numer_c
LEFT JOIN denoms ON numer_c.id = denoms.numer_id
) SELECT numer_c.id numer_id,
denom_id,
geom_c.id geom_id,
FIRST(numer_t.id) numer_tid,
FIRST(denom_tid) denom_tid,
FIRST(geom_t.id ORDER BY geom_t.timespan DESC) geom_tid,
null::varchar numer_name,
null::varchar denom_name,
null::varchar geom_name,
null::varchar numer_description,
null::varchar denom_description,
null::varchar geom_description,
null::varchar numer_t_description,
null::varchar denom_t_description,
null::varchar geom_t_description,
null::varchar numer_aggregate,
null::varchar denom_aggregate,
null::varchar geom_aggregate,
null::varchar numer_type,
null::varchar denom_type,
null::varchar denom_reltype,
null::varchar geom_type,
null::varchar numer_colname,
null::varchar denom_colname,
null::varchar geom_colname,
null::integer numer_version,
null::integer denom_version,
null::integer geom_version,
null::integer numer_t_version,
null::integer denom_t_version,
null::integer geom_t_version,
FIRST(numer_geomref_ct.colname) numer_geomref_colname,
FIRST(denom_geomref_colname) denom_geomref_colname,
FIRST(geom_geomref_ct.colname ORDER BY geom_t.timespan DESC) geom_geomref_colname,
null::varchar numer_tablename,
null::varchar denom_tablename,
null::varchar geom_tablename,
numer_t.timespan numer_timespan,
null::varchar numer_timespan_alias,
null::varchar numer_timespan_name,
null::varchar numer_timespan_description,
null::varchar numer_timespan_range,
null::varchar numer_timespan_weight,
null::varchar denom_timespan,
null::varchar denom_timespan_alias,
null::varchar denom_timespan_name,
null::varchar denom_timespan_description,
null::daterange denom_timespan_range,
null::numeric denom_timespan_weight,
null::numeric numer_weight,
null::numeric denom_weight,
null::numeric geom_weight,
null::varchar geom_timespan,
null::varchar geom_timespan_alias,
null::varchar geom_timespan_name,
null::varchar geom_timespan_description,
null::varchar geom_timespan_range,
null::varchar geom_timespan_weight,
null::geometry the_geom,
null::jsonb numer_tags,
null::jsonb denom_tags,
null::jsonb geom_tags,
null::jsonb timespan_tags,
null::varchar[] section_tags,
null::varchar[] subsection_tags,
null::varchar[] unit_tags,
null::jsonb numer_extra ,
null::jsonb numer_ct_extra ,
null::jsonb denom_extra,
null::jsonb denom_ct_extra,
null::jsonb geom_extra,
null::jsonb geom_ct_extra
FROM observatory.obs_column_table numer_data_ct,
observatory.obs_table numer_t,
observatory.obs_column_table numer_geomref_ct,
observatory.obs_column geomref_c,
observatory.obs_column_to_column geomref_c2c,
observatory.obs_column_table geom_geom_ct,
observatory.obs_column_table geom_geomref_ct,
observatory.obs_table geom_t,
observatory.obs_column_tag numer_ctag,
observatory.obs_tag numer_tag,
observatory.obs_column numer_c,
leftjoined_denoms,
observatory.obs_column geom_c
LEFT JOIN (
observatory.obs_column_tag geom_ctag JOIN
observatory.obs_tag geom_tag ON geom_tag.id = geom_ctag.tag_id
) ON geom_c.id = geom_ctag.column_id
WHERE numer_c.weight > 0
AND numer_c.id = numer_data_ct.column_id
AND numer_data_ct.table_id = numer_t.id
AND numer_t.id = numer_geomref_ct.table_id
AND numer_geomref_ct.column_id = geomref_c.id
AND geomref_c2c.reltype = 'geom_ref'
AND geomref_c.id = geomref_c2c.source_id
AND geom_c.id = geomref_c2c.target_id
AND geom_geomref_ct.column_id = geomref_c.id
AND geom_geomref_ct.table_id = geom_t.id
AND geom_geom_ct.column_id = geom_c.id
AND geom_geom_ct.table_id = geom_t.id
AND geom_c.type ILIKE 'geometry%'
AND numer_c.type NOT ILIKE 'geometry%'
AND numer_c.id != geomref_c.id
AND numer_ctag.column_id = numer_c.id
AND numer_ctag.tag_id = numer_tag.id
AND numer_c.id = leftjoined_denoms.all_numer_id
AND (leftjoined_denoms.numer_id IS NULL OR (
numer_t.timespan = leftjoined_denoms.denom_timespan
AND geomref_c.id = leftjoined_denoms.geomref_id
))
GROUP BY numer_c.id, denom_id, geom_c.id, numer_t.timespan;
''',
'''CREATE UNIQUE INDEX ON {obs_meta} (numer_id, geom_id, numer_timespan, denom_id);''',
'''CREATE INDEX ON {obs_meta} (numer_tid, numer_t_version);''',
'''CREATE INDEX ON {obs_meta} (geom_tid, geom_t_version);''',
'''-- update numer coltable info
UPDATE {obs_meta} SET
numer_name = c.name,
numer_description = c.description,
numer_t_description = t.description,
numer_version = c.version,
numer_t_version = t.version,
numer_aggregate = aggregate,
numer_type = type,
numer_colname = colname,
numer_tablename = tablename,
numer_timespan = ts.id,
numer_timespan_alias = ts.alias,
numer_timespan_name = ts.name,
numer_timespan_description = ts.description,
numer_timespan_range = ts.timespan,
numer_timespan_weight = ts.weight,
numer_weight = c.weight,
numer_extra = c.extra,
numer_ct_extra = ct.extra
FROM observatory.obs_column c, observatory.obs_column_table ct,
observatory.obs_table t, observatory.obs_timespan ts
WHERE c.id = numer_id
AND t.id = numer_tid
AND c.id = ct.column_id
AND t.id = ct.table_id
AND t.timespan = ts.id;''',
'''-- update denom coltable info
UPDATE {obs_meta} SET
denom_name = c.name,
denom_description = c.description,
denom_t_description = t.description,
denom_version = c.version,
denom_t_version = t.version,
denom_aggregate = aggregate,
denom_type = type,
denom_colname = colname,
denom_tablename = tablename,
denom_timespan = ts.id,
denom_timespan_alias = ts.alias,
denom_timespan_name = ts.name,
denom_timespan_description = ts.description,
denom_timespan_range = ts.timespan,
denom_timespan_weight = ts.weight,
denom_weight = c.weight,
denom_extra = c.extra,
denom_ct_extra = ct.extra
FROM observatory.obs_column c, observatory.obs_column_table ct,
observatory.obs_table t, observatory.obs_timespan ts
WHERE c.id = denom_id
AND t.id = denom_tid
AND c.id = ct.column_id
AND t.id = ct.table_id
AND t.timespan = ts.id;''',
'''-- update geom coltable info
UPDATE {obs_meta} SET
geom_name = c.name,
geom_description = c.description,
geom_t_description = t.description,
geom_version = c.version,
geom_t_version = t.version,
geom_aggregate = aggregate,
geom_type = type,
geom_colname = colname,
geom_tablename = tablename,
geom_timespan = ts.id,
geom_timespan_alias = ts.alias,
geom_timespan_name = ts.name,
geom_timespan_description = ts.description,
geom_timespan_range = ts.timespan,
geom_timespan_weight = ts.weight,
the_geom = t.the_geom,
geom_weight = c.weight,
geom_extra = c.extra,
geom_ct_extra = ct.extra
FROM observatory.obs_column c, observatory.obs_column_table ct,
observatory.obs_table t, observatory.obs_timespan ts
WHERE c.id = geom_id
AND t.id = geom_tid
AND c.id = ct.column_id
AND t.id = ct.table_id
AND t.timespan = ts.id;''',
'''-- update coltag info
DROP TABLE IF EXISTS _obs_coltags;
CREATE TEMPORARY TABLE _obs_coltags AS
SELECT
c.id,
JSONB_OBJECT_AGG(
t.type || '/' || t.id, t.name
) tags,
ARRAY_AGG(DISTINCT t.id) FILTER (WHERE t.type = 'section') section_tags,
ARRAY_AGG(DISTINCT t.id) FILTER (WHERE t.type = 'subsection') subsection_tags,
ARRAY_AGG(DISTINCT t.id) FILTER (WHERE t.type = 'unit') unit_tags
FROM observatory.obs_column c, observatory.obs_column_tag ct, observatory.obs_tag t
WHERE c.id = ct.column_id
AND t.id = ct.tag_id
GROUP BY c.id;
CREATE UNIQUE INDEX ON _obs_coltags (id);''',
'''UPDATE {obs_meta} SET
numer_tags = tags,
section_tags = _obs_coltags.section_tags,
subsection_tags = _obs_coltags.subsection_tags,
unit_tags = _obs_coltags.unit_tags
FROM _obs_coltags WHERE id = numer_id;''',
'''UPDATE {obs_meta} SET
geom_tags = tags
FROM _obs_coltags WHERE id = geom_id;''',
'''UPDATE {obs_meta} SET
denom_tags = tags
FROM _obs_coltags WHERE id = denom_id;''',
'''-- update denom reltype info
UPDATE {obs_meta} SET
denom_reltype = c2c.reltype
FROM observatory.obs_column source,
observatory.obs_column_to_column c2c,
observatory.obs_column target
WHERE c2c.source_id = source.id
AND c2c.target_id = target.id
AND source.id = numer_id
AND target.id = denom_id;
''',
'''CREATE INDEX ON {obs_meta} USING gist (the_geom)''',
'''CREATE INDEX ON {obs_meta} USING gin (numer_tags)'''
]
DIMENSIONS = {
'numer': ['''
CREATE TABLE {obs_meta} AS
SELECT numer_id ,
NULL::TEXT numer_name, --FIRST(numer_description)::TEXT numer_name,
NULL::TEXT numer_description, --FIRST(numer_description)::TEXT numer_description,
NULL::JSONB numer_tags, --FIRST(numer_tags)::JSONB numer_tags,
NULL::NUMERIC numer_weight, --FIRST(numer_weight)::NUMERIC numer_weight,
NULL::JSONB numer_extra, --FIRST(numer_extra)::JSONB numer_extra,
NULL::TEXT numer_type, --FIRST(numer_type)::TEXT numer_type,
NULL::TEXT numer_aggregate, --FIRST(numer_aggregate)::TEXT numer_aggregate,
ARRAY_REMOVE(ARRAY_AGG(DISTINCT denom_id)::TEXT[], NULL) denoms,
ARRAY_REMOVE(ARRAY_AGG(DISTINCT geom_id)::TEXT[], NULL) geoms,
ARRAY_REMOVE(ARRAY_AGG(DISTINCT numer_timespan)::TEXT[], NULL) timespans,
NULL::Geometry(Geometry, 4326) the_geom, -- ST_Union(DISTINCT ST_SetSRID(the_geom, 4326)) the_geom
NULL::Integer numer_version
FROM observatory.obs_meta_next
GROUP BY numer_id;
''',
''' ALTER TABLE {obs_meta} ADD PRIMARY KEY (numer_id); ''',
'''
UPDATE {obs_meta} SET
numer_name = obs_meta.numer_name,
numer_description = obs_meta.numer_description,
numer_tags = obs_meta.numer_tags,
numer_weight = obs_meta.numer_weight,
numer_extra = obs_meta.numer_extra,
numer_type = obs_meta.numer_type,
numer_aggregate = obs_meta.numer_aggregate,
numer_version = obs_meta.numer_version
FROM observatory.obs_meta_next obs_meta
WHERE obs_meta.numer_id = {obs_meta}.numer_id;
''',
'''CREATE INDEX ON observatory.obs_meta_next (numer_id, geom_tid); ''',
'''
WITH geom_tids AS (
SELECT ARRAY_AGG(distinct geom_tid) geom_tids, numer_id
FROM observatory.obs_meta_next
GROUP BY numer_id
), unique_geom_ids AS (
SELECT ARRAY_AGG(distinct numer_id) numer_ids, geom_tids
FROM geom_tids
GROUP BY geom_tids
), union_geoms AS (
SELECT numer_ids, geom_tids, ST_Union(the_geom) the_geom
FROM unique_geom_ids, observatory.obs_table
WHERE id = ANY(geom_tids)
GROUP BY numer_ids, geom_tids
) UPDATE {obs_meta}
SET the_geom = union_geoms.the_geom
FROM union_geoms
WHERE {obs_meta}.numer_id = ANY(union_geoms.numer_ids);
'''
],
'denom': ['''
CREATE UNIQUE INDEX ON observatory.obs_meta_next (denom_id, numer_id, geom_id, numer_timespan, denom_timespan);
''',
'''
CREATE TABLE {obs_meta} AS
SELECT denom_id::TEXT,
NULL::TEXT denom_name, --FIRST(denom_name)::TEXT denom_name,
NULL::TEXT denom_description, --FIRST(denom_description)::TEXT denom_description,
NULL::JSONB denom_tags, --FIRST(denom_tags)::JSONB denom_tags,
NULL::NUMERIC denom_weight, --FIRST(denom_weight)::NUMERIC denom_weight,
NULL::TEXT reltype, --'denominator'::TEXT reltype,
NULL::JSONB denom_extra, --FIRST(denom_extra)::JSONB denom_extra,
NULL::TEXT denom_type, --FIRST(denom_type)::TEXT denom_type,
NULL::TEXT denom_aggregate, --FIRST(denom_aggregate)::TEXT denom_aggregate,
ARRAY_REMOVE(ARRAY_AGG(DISTINCT numer_id)::TEXT[], NULL) numers,
ARRAY_REMOVE(ARRAY_AGG(DISTINCT geom_id)::TEXT[], NULL) geoms,
ARRAY_REMOVE(ARRAY_AGG(DISTINCT denom_timespan)::TEXT[], NULL) timespans,
NULL::Geometry(Geometry, 4326) the_geom, -- ST_Union(DISTINCT ST_SetSRID(the_geom, 4326)) the_geom
NULL::Integer denom_version
FROM observatory.obs_meta_next
WHERE denom_id IS NOT NULL
GROUP BY denom_id;
''',
'''
ALTER TABLE {obs_meta} ADD PRIMARY KEY (denom_id);
''',
'''
UPDATE {obs_meta} SET
denom_name = obs_meta.denom_name,
denom_description = obs_meta.denom_description,
denom_tags = obs_meta.denom_tags,
denom_weight = obs_meta.denom_weight,
reltype = obs_meta.denom_reltype,
denom_extra = obs_meta.denom_extra,
denom_type = obs_meta.denom_type,
denom_aggregate = obs_meta.denom_aggregate,
denom_version = obs_meta.denom_version
FROM observatory.obs_meta_next obs_meta
WHERE obs_meta.denom_id = {obs_meta}.denom_id;
''',
'''CREATE INDEX ON observatory.obs_meta_next (denom_id, geom_tid); ''',
'''
WITH geom_tids AS (
SELECT ARRAY_AGG(geom_tid) geom_tids, numer_id
FROM observatory.obs_meta_next
GROUP BY numer_id
) , unique_geom_ids AS (
SELECT ARRAY_AGG(numer_id) numer_ids, geom_tids
FROM geom_tids
GROUP BY geom_tids
), union_geoms AS (
SELECT numer_ids, geom_tids, ST_Union(the_geom) the_geom
FROM unique_geom_ids, observatory.obs_table
WHERE id = ANY(geom_tids)
GROUP BY numer_ids, geom_tids
) UPDATE {obs_meta}
SET the_geom = union_geoms.the_geom
FROM union_geoms
WHERE {obs_meta}.denom_id = ANY(union_geoms.numer_ids);
'''
],
'geom': [
''' CREATE UNIQUE INDEX ON observatory.obs_meta_next
(geom_id, numer_id, numer_timespan, geom_timespan, denom_id);
''',
'''
CREATE TABLE {obs_meta} AS
SELECT geom_id::TEXT,
NULL::TEXT geom_name, --FIRST(geom_name)::TEXT geom_name,
NULL::TEXT geom_description, --FIRST(geom_description)::TEXT geom_description,
NULL::JSONB geom_tags, --FIRST(geom_tags)::JSONB geom_tags,
NULL::NUMERIC geom_weight, --FIRST(geom_weight)::NUMERIC geom_weight,
NULL::JSONB geom_extra, --FIRST(geom_extra)::JSONB geom_extra,
NULL::TEXT geom_type, --FIRST(geom_type)::TEXT geom_type,
NULL::TEXT geom_aggregate, --FIRST(geom_aggregate)::TEXT geom_aggregate
NULL::Geometry(Geometry, 4326) the_geom, --ST_SetSRID(FIRST(the_geom), 4326)::GEOMETRY(GEOMETRY, 4326) the_geom,
ARRAY_REMOVE(ARRAY_AGG(DISTINCT numer_id)::TEXT[], NULL) numers,
ARRAY_REMOVE(ARRAY_AGG(DISTINCT denom_id)::TEXT[], NULL) denoms,
ARRAY_REMOVE(ARRAY_AGG(DISTINCT geom_timespan)::TEXT[], NULL) timespans,
NULL::Integer geom_version
FROM observatory.obs_meta_next
GROUP BY geom_id;
''',
''' ALTER TABLE {obs_meta} ADD PRIMARY KEY (geom_id); ''',
'''
UPDATE {obs_meta} SET
geom_name = obs_meta.geom_name,
geom_description = obs_meta.geom_description,
geom_tags = obs_meta.geom_tags,
geom_weight = obs_meta.geom_weight,
geom_extra = obs_meta.geom_extra,
geom_type = obs_meta.geom_type,
geom_aggregate = obs_meta.geom_aggregate,
geom_version = obs_meta.geom_version
FROM observatory.obs_meta_next obs_meta
WHERE obs_meta.geom_id = {obs_meta}.geom_id;
''',
'''
WITH geom_tids AS (
SELECT ARRAY_AGG(geom_tid) geom_tids, geom_id
FROM observatory.obs_meta_next
GROUP BY geom_id
) , unique_geom_ids AS (
SELECT ARRAY_AGG(geom_id) geom_ids, geom_tids
FROM geom_tids
GROUP BY geom_tids
), union_geoms AS (
SELECT geom_ids, geom_tids, ST_Union(the_geom) the_geom
FROM unique_geom_ids, observatory.obs_table
WHERE id = ANY(geom_tids)
GROUP BY geom_ids, geom_tids
) UPDATE {obs_meta}
SET the_geom = union_geoms.the_geom
FROM union_geoms
WHERE {obs_meta}.geom_id = ANY(union_geoms.geom_ids);
'''
],
'geom_numer_timespan': [
'''
CREATE TABLE {obs_meta} AS
SELECT geom_id::TEXT,
numer_id::TEXT,
ARRAY_AGG(DISTINCT numer_timespan)::TEXT[] timespans,
ARRAY_AGG(DISTINCT geom_timespan)::TEXT[] geom_timespans
FROM observatory.obs_meta_next
GROUP BY geom_id, numer_id;
''',
''' ALTER TABLE {obs_meta} ADD PRIMARY KEY (geom_id, numer_id); ''',
],
'timespan': ['''
CREATE TABLE {obs_meta} AS
SELECT numer_timespan::TEXT timespan_id,
numer_timespan_alias::TEXT timespan_alias,
numer_timespan_name::TEXT timespan_name,
numer_timespan_description::TEXT timespan_description,
numer_timespan_range::DATERANGE timespan_range,
numer_timespan_weight::NUMERIC timespan_weight,
NULL::JSONB timespan_tags, --FIRST(timespan_tags)::JSONB timespan_tags,
NULL::JSONB timespan_extra,
NULL::TEXT timespan_type,
NULL::TEXT timespan_aggregate,
NULL::TEXT[] numers,
NULL::TEXT[] denoms,
NULL::TEXT[] geoms,
NULL::Geometry(Geometry, 4326) the_geom, -- ST_Union(DISTINCT ST_SetSRID(the_geom, 4326)) the_geom
NULL::Integer timespan_version
FROM observatory.obs_meta_next
WHERE numer_timespan IS NOT NULL
GROUP BY numer_timespan, numer_timespan_alias, numer_timespan_name,
numer_timespan_description, numer_timespan_range, numer_timespan_weight;
''',
'''
ALTER TABLE {obs_meta} ADD PRIMARY KEY (timespan_id);
''',
'''
INSERT INTO {obs_meta}
(timespan_id, timespan_alias, timespan_name, timespan_description, timespan_range, timespan_weight,
timespan_tags, timespan_extra, timespan_type, timespan_aggregate, numers, denoms, geoms, the_geom, timespan_version)
SELECT denom_timespan::TEXT timespan_id,
denom_timespan_alias::TEXT timespan_alias,
denom_timespan_name::TEXT timespan_name,
denom_timespan_description::TEXT timespan_description,
denom_timespan_range::DATERANGE timespan_range,
denom_timespan_weight::NUMERIC timespan_weight,
NULL::JSONB timespan_tags, --FIRST(timespan_tags)::JSONB timespan_tags,
NULL::JSONB timespan_extra,
NULL::TEXT timespan_type,
NULL::TEXT timespan_aggregate,
NULL::TEXT[] numers,
NULL::TEXT[] denoms,
NULL::TEXT[] geoms,
NULL::Geometry(Geometry, 4326) the_geom, -- ST_Union(DISTINCT ST_SetSRID(the_geom, 4326)) the_geom
NULL::Integer timespan_version
FROM observatory.obs_meta_next
WHERE denom_timespan IS NOT NULL
GROUP BY denom_timespan, denom_timespan_alias, denom_timespan_name,
denom_timespan_description, denom_timespan_range, denom_timespan_weight
ON CONFLICT (timespan_id) DO NOTHING;
''',
'''
INSERT INTO {obs_meta}
(timespan_id, timespan_alias, timespan_name, timespan_description, timespan_range, timespan_weight,
timespan_tags, timespan_extra, timespan_type, timespan_aggregate, numers, denoms, geoms, the_geom, timespan_version)
SELECT geom_timespan::TEXT timespan_id,
geom_timespan_alias::TEXT timespan_alias,
geom_timespan_name::TEXT timespan_name,
geom_timespan_description::TEXT timespan_description,
geom_timespan_range::DATERANGE timespan_range,
geom_timespan_weight::NUMERIC timespan_weight,
NULL::JSONB timespan_tags, --FIRST(timespan_tags)::JSONB timespan_tags,
NULL::JSONB timespan_extra,
NULL::TEXT timespan_type,
NULL::TEXT timespan_aggregate,
NULL::TEXT[] numers,
NULL::TEXT[] denoms,
NULL::TEXT[] geoms,
NULL::Geometry(Geometry, 4326) the_geom, -- ST_Union(DISTINCT ST_SetSRID(the_geom, 4326)) the_geom
NULL::Integer timespan_version
FROM observatory.obs_meta_next
WHERE geom_timespan IS NOT NULL
GROUP BY geom_timespan, geom_timespan_alias, geom_timespan_name,
geom_timespan_description, geom_timespan_range, geom_timespan_weight
ON CONFLICT (timespan_id) DO NOTHING;
''',
'''
UPDATE {obs_meta} AS t SET numers =
(SELECT ARRAY_REMOVE(ARRAY_AGG(DISTINCT numer_id)::TEXT[], NULL) numers
FROM observatory.obs_meta_next as m
WHERE m.numer_timespan = t.timespan_id);
''',
'''
UPDATE {obs_meta} AS t SET denoms =
(SELECT ARRAY_REMOVE(ARRAY_AGG(DISTINCT denom_id)::TEXT[], NULL) denoms
FROM observatory.obs_meta_next as m
WHERE m.denom_timespan = t.timespan_id);
''',
'''
UPDATE {obs_meta} AS t SET geoms =
(SELECT ARRAY_REMOVE(ARRAY_AGG(DISTINCT geom_id)::TEXT[], NULL) geoms
FROM observatory.obs_meta_next as m
WHERE m.geom_timespan = t.timespan_id);
''',
'''
UPDATE {obs_meta} SET
timespan_tags = obs_meta.timespan_tags
FROM observatory.obs_meta_next obs_meta
WHERE obs_meta.numer_timespan = {obs_meta}.timespan_id;
''',
'''
WITH geom_tids AS (
SELECT ARRAY_AGG(geom_tid) geom_tids, numer_timespan
FROM observatory.obs_meta_next
GROUP BY numer_timespan
) , unique_geom_ids AS (
SELECT ARRAY_AGG(numer_timespan) numer_timespans, geom_tids
FROM geom_tids
GROUP BY geom_tids
), union_geoms AS (
SELECT numer_timespans, geom_tids, ST_Union(the_geom) the_geom
FROM unique_geom_ids, observatory.obs_table
WHERE id = ANY(geom_tids)
GROUP BY numer_timespans, geom_tids
) UPDATE {obs_meta}
SET the_geom = union_geoms.the_geom
FROM union_geoms
WHERE {obs_meta}.timespan_id = ANY(union_geoms.numer_timespans);
''']
}
class DropRemoteOrphanTables(Task):
'''
Clean up & remove tables that are not linked to in the deployed obs_table.
'''
start = IntParameter(default=1)
end = IntParameter(default=10)
def run(self):
resp = query_cartodb('SELECT tablename FROM obs_table')
tablenames = set([r['tablename'] for r in resp.json()['rows']])
remote_tables = []
for page in range(self.start, self.end + 1):
remote_tables.extend(shell("curl -s '{cartodb_url}/datasets?page={page}' "
"| grep -Eo 'obs_[0-f]{{40}}' | uniq".format(
cartodb_url=os.environ['CARTODB_URL'],
page=page
)).strip().split('\n'))
for table in remote_tables:
LOGGER.info('keeping %s', table)
if table not in tablenames:
LOGGER.info('removing %s', table)
try:
CartoDBTarget(table).remove()
except Exception as err:
LOGGER.warn(err)
class OBSMetaToLocal(OBSMeta):
force = BoolParameter(default=True)
def requires(self):
yield ConfirmTablesDescribedExist()
def run(self):
session = current_session()
try:
session.execute('DROP TABLE IF EXISTS observatory.obs_meta_next')
session.execute(self.FIRST_AGGREGATE)
for i, q in enumerate(self.QUERIES):
before = time.time()
query = q.format(obs_meta='observatory.obs_meta_next')
session.execute(query)
after = time.time()
LOGGER.info('time taken for obs_meta:%s: %s', i, round(after - before, 2))
if i == 1:
session.commit()
session.commit()
except:
session.rollback()
raise
shell("psql -c 'VACUUM ANALYZE observatory.obs_meta_next'")
try:
for dimension, queries in self.DIMENSIONS.items():
before = time.time()
session.execute('DROP TABLE IF EXISTS observatory.obs_meta_next_{dimension}'.format(
dimension=dimension))
for i, q in enumerate(queries):
before = time.time()
query = q.format(obs_meta='observatory.obs_meta_next_{}'.format(dimension))
session.execute(query)
session.flush()
after = time.time()
LOGGER.info('time taken for %s:%s: %s', dimension, i, round(after - before, 2))
# geom_numer_timespan doesn't have geometries so no need to add geometry index for it
if dimension != 'geom_numer_timespan':
session.execute('CREATE INDEX ON observatory.obs_meta_next_{dimension} USING gist '
'(the_geom)'.format(dimension=dimension))
after = time.time()
session.commit()
except:
session.rollback()
session.execute('DROP TABLE IF EXISTS observatory.obs_meta_next')
session.commit()
raise
try:
session.execute('DROP TABLE IF EXISTS observatory.obs_meta')
session.execute('ALTER TABLE observatory.obs_meta_next RENAME TO obs_meta')
for dimension, query in self.DIMENSIONS.items():
session.execute('DROP TABLE IF EXISTS observatory.obs_meta_{dimension}'.format(
dimension=dimension
))
session.execute('''
ALTER TABLE IF EXISTS observatory.obs_meta_next_{dimension}
RENAME TO obs_meta_{dimension}'''.format(
dimension=dimension
))
session.commit()
except:
session.rollback()
session.execute('DROP TABLE IF EXISTS observatory.obs_meta_next')
session.commit()
raise
def output(self):
tables = ['obs_meta', 'obs_meta_numer', 'obs_meta_denom',
'obs_meta_geom', 'obs_meta_timespan', 'obs_meta_geom_numer_timespan']
return [PostgresTarget('observatory', t, non_empty=False) for t in tables] + [UpdatedMetaTarget()]
class SyncMetadata(WrapperTask):
no_force = BoolParameter(default=False, significant=False)
def requires(self):
for table in ('obs_table', 'obs_column', 'obs_column_table',
'obs_tag', 'obs_column_tag', 'obs_dump_version',
'obs_column_to_column', 'obs_meta', 'obs_meta_numer',
'obs_meta_denom', 'obs_meta_geom', 'obs_meta_timespan',
'obs_meta_geom_numer_timespan', 'obs_column_table_tile',
):
if table == 'obs_meta':
yield TableToCartoViaImportAPI(
columns=[
'numer_id', 'denom_id', 'geom_id', 'numer_name',
'denom_name', 'geom_name', 'numer_description',
'denom_description', 'geom_description',
'numer_aggregate', 'denom_aggregate', 'geom_aggregate',
'numer_type', 'denom_type', 'geom_type', 'numer_colname',
'denom_colname', 'geom_colname', 'numer_geomref_colname',
'denom_geomref_colname', 'geom_geomref_colname',
'numer_tablename', 'denom_tablename', 'geom_tablename',
'numer_timespan', 'denom_timespan', 'numer_weight',
'denom_weight', 'geom_weight', 'geom_timespan',
'numer_tags', 'denom_tags', 'geom_tags', 'timespan_tags',
'section_tags', 'subsection_tags', 'unit_tags',
'numer_extra', 'numer_ct_extra', 'denom_extra',
'denom_ct_extra', 'geom_extra', 'geom_ct_extra'
],
table=table,
force=not self.no_force)
else:
yield TableToCartoViaImportAPI(table=table, force=not self.no_force)
|
mrunge/horizon | refs/heads/master | openstack_dashboard/dashboards/admin/volumes/volumes/views.py | 12 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.admin.volumes.volumes \
import forms as volumes_forms
from openstack_dashboard.dashboards.project.volumes.volumes \
import views as volumes_views
class DetailView(volumes_views.DetailView):
template_name = "admin/volumes/volumes/detail.html"
def get_redirect_url(self):
return reverse('horizon:admin:volumes:index')
class CreateVolumeTypeView(forms.ModalFormView):
form_class = volumes_forms.CreateVolumeType
template_name = 'admin/volumes/volumes/create_volume_type.html'
success_url = 'horizon:admin:volumes:volumes_tab'
def get_success_url(self):
return reverse(self.success_url)
class UpdateStatusView(forms.ModalFormView):
form_class = volumes_forms.UpdateStatus
template_name = 'admin/volumes/volumes/update_status.html'
success_url = reverse_lazy('horizon:admin:volumes:index')
def get_context_data(self, **kwargs):
context = super(UpdateStatusView, self).get_context_data(**kwargs)
context["volume_id"] = self.kwargs['volume_id']
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'status': volume.status}
|
agustin380/django-localflavor | refs/heads/master | tests/test_hu.py | 10 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import SimpleTestCase
from localflavor.hu.forms import HUCountySelect
class HULocalFlavorTests(SimpleTestCase):
def test_HUCountySelect(self):
f = HUCountySelect()
out = '''<select name="counties">
<option value="bacs_kiskun">Bács-Kiskun</option>
<option value="baranya">Baranya</option>
<option value="bekes">Békés</option>
<option value="borsod_abauj_zemplen">Borsod-Abaúj-Zemplén</option>
<option value="csongrad">Csongrád</option>
<option value="fejer">Fejér</option>
<option value="gyor_moson_sopron">Győr-Moson-Sopron</option>
<option value="hajdu_bihar">Hajdú-Bihar</option>
<option value="heves">Heves</option>
<option value="jasz_nagykun_szolnok">Jász-Nagykun-Szolnok</option>
<option value="komarom_esztergom">Komárom-Esztergom</option>
<option value="nograd">Nógrád</option>
<option value="pest">Pest</option>
<option value="somogy">Somogy</option>
<option value="szabolcs_szatmar_bereg">Szabolcs-Szatmár-Bereg</option>
<option value="tolna">Tolna</option>
<option value="vas" selected="selected">Vas</option>
<option value="veszprem">Veszprém</option>
<option value="zala">Zala</option>
</select>'''
self.assertHTMLEqual(f.render('counties', 'vas'), out)
|
srio/shadow3-scripts | refs/heads/master | HIGHLIGHTS/occupation.py | 1 | from orangecontrib.comsyl.util.CompactAFReader import CompactAFReader
# from CompactAFReader import CompactAFReader
import numpy
from srxraylib.plot.gol import plot_image, plot
# from plot_color import plot_with_transparency_one
import pylab as plt
from matplotlib.colors import Normalize, ListedColormap
import matplotlib.patches as patches
def convert_to_h5(file_from,file_to):
af = CompactAFReader.initialize_from_file(file_from)
af.write_h5(file_to)
print("File written to disk: ",file_to)
if __name__ == "__main__":
# filename_ebs = "/scisoft/data/srio/COMSYL/ID16/id16s_ebs_u18_1400mm_1h_new_s1.0.npy"
# filename_ebs = "/scisoft/data/srio/COMSYL/CALCULATIONS/cs_new_u18_2m_1h_s2.5.h5" # NOT GOOD
# convert_to_h5("/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cs_new_u18_2m_1h_s2.5.npz",
# "cs_new_u18_2m_1h_s2.5.h5")
# convert_to_h5("/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cl_low_beta_u18_2m_1h_s6.5.npy",
# "cl_low_beta_u18_2m_1h_s6.5.h5")
# filename_ebs = "cs_new_u18_2m_1h_s2.5.h5"
# filename_ebs = "cl_low_beta_u18_2m_1h_s6.5.h5"
# filename_ebs = "/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/new_u18_2m_1h_ts_s2.0.npz"
filename_ebs = "/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cs_new_u18_2m_1h_s2.5.npz" # OK EBS
filename_lb = "/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cl_low_beta_u18_2m_1h_s6.5.npy" # OK LB
filename_hb = "/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cl_high_beta_u18_2m_1h_s2.0.npy"
#
# load CSD
#
af_ebs = CompactAFReader.initialize_from_file(filename_ebs)
cumulated_occupation_ebs = af_ebs.cumulated_occupation_array()
occupation_ebs = af_ebs.occupation_array()
af_lb = CompactAFReader.initialize_from_file(filename_lb)
cumulated_occupation_lb = af_lb.cumulated_occupation_array()
occupation_lb = af_lb.occupation_array()
af_hb = CompactAFReader.initialize_from_file(filename_hb)
cumulated_occupation_hb = af_hb.cumulated_occupation_array()
occupation_hb = af_hb.occupation_array()
#
print("Coherent fraction EBS: ",cumulated_occupation_ebs[0])
print("Coherent fraction LB: ",cumulated_occupation_lb[0])
print("Coherent fraction HB: ",cumulated_occupation_hb[0])
extensions = ["ebs","lb","hb"]
data = [cumulated_occupation_ebs,cumulated_occupation_lb,cumulated_occupation_hb]
data_occ = [occupation_ebs,occupation_lb,occupation_hb]
plot(numpy.arange(cumulated_occupation_ebs.size),cumulated_occupation_ebs,
numpy.arange(cumulated_occupation_lb.size),cumulated_occupation_lb,
numpy.arange(cumulated_occupation_hb.size),cumulated_occupation_hb,
legend=extensions)
for i,extension in enumerate(extensions):
f = open("cumulated_occupation_%s.dat"%extension,'w')
data_i = data[i]
for j in range(data_i.size):
f.write("%d %g \n"%(j,data_i[j]))
f.close()
print("File written to disk: cumulated_occupation_%s.dat"%extension)
f = open("occupation_%s.dat"%extension,'w')
data_i = data_occ[i]
for j in range(data_i.size):
f.write("%d %g \n"%(j,data_i[j]))
f.close()
print("File written to disk: occupation_%s.dat"%extension)
#
# get indices
#
# first propagate a few modes only to check there are no errors
# afp = AFpropagated.propagate(af,distance=distance,index_max=1,zoom=zoom)
|
psav/cfme_tests | refs/heads/master | cfme/configure/documentation.py | 5 | from widgetastic.widget import Text, Image, View
from widgetastic.utils import VersionPick, Version
class LinksView(View):
"""
Widgets for all of the links on the documentation page
Each doc link is an anchor with a child image element, then an anchor with text
Both the image and the text anchor should link to the same PDF
"""
# todo: update this view. it contains too much duplicate code
@View.nested
class policies(View): # noqa
TEXT = 'Defining Policies Profiles Guide'
img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
link = VersionPick({Version.lowest(): Text('//a[normalize-space(.)="{}"]'.format(TEXT)),
'5.9': img_anchor})
@View.nested
class general(View): # noqa
TEXT = 'General Configuration Guide'
img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
link = VersionPick({Version.lowest(): Text('//a[normalize-space(.)="{}"]'.format(TEXT)),
'5.9': img_anchor})
@View.nested
class inventory(View): # noqa
TEXT = 'Infrastructure Inventory Guide'
img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
link = VersionPick({Version.lowest(): Text('//a[normalize-space(.)="{}"]'.format(TEXT)),
'5.9': img_anchor})
@View.nested
class automation(View): # noqa
TEXT = 'Methods For Automation Guide'
img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
link = VersionPick({Version.lowest(): Text('//a[normalize-space(.)="{}"]'.format(TEXT)),
'5.9': img_anchor})
@View.nested
class monitoring(View): # noqa
TEXT = 'Monitoring Alerts Reporting Guide'
img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
link = VersionPick({Version.lowest(): Text('//a[normalize-space(.)="{}"]'.format(TEXT)),
'5.9': img_anchor})
@View.nested
class providers(View): # noqa
TEXT = 'Providers Guide'
img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
link = VersionPick({Version.lowest(): Text('//a[normalize-space(.)="{}"]'.format(TEXT)),
'5.9': img_anchor})
@View.nested
class rest(View): # noqa
TEXT = 'Rest Api Guide'
img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
link = VersionPick({Version.lowest(): Text('//a[normalize-space(.)="{}"]'.format(TEXT)),
'5.9': img_anchor})
@View.nested
class scripting(View): # noqa
TEXT = 'Scripting Actions Guide'
img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
link = VersionPick({Version.lowest(): Text('//a[normalize-space(.)="{}"]'.format(TEXT)),
'5.9': img_anchor})
@View.nested
class vm_hosts(View): # noqa
TEXT = 'Virtual Machines Hosts Guide'
img_anchor = Text('//a[@title="View the {}"]'.format(TEXT))
img = Image(locator='//img[@alt="View the {}"]'.format(TEXT))
link = VersionPick({Version.lowest(): Text('//a[normalize-space(.)="{}"]'.format(TEXT)),
'5.9': img_anchor})
@View.nested
class customer_portal(View): # noqa
TEXT = 'Red Hat Customer Portal'
link = Text('//a[normalize-space(.)="{}"]'.format(TEXT))
class DocView(View):
"""
View for the documentation page, a title and a bunch of pdf of links
"""
@property
def is_displayed(self):
return (
self.title.read() == 'Documentation' and
all([link.is_displayed for link in self.links.sub_widgets])
)
title = Text('//div[@id="main-content"]//div/h1')
links = View.nested(LinksView)
|
rvswift/BlastNFilter | refs/heads/master | BlastNFilter/Blast/Test.py | 1 | __author__ = 'robswift'
import os
import gzip
import socket
from BlastNFilter.PreRelease import filtering_sets as filtering_sets
from SequenceBase import Base
from collections import defaultdict
from Bio.Alphabet import generic_protein
from Bio import SeqIO
class Test(Base):
"""
"""
if socket.gethostname() == 'Robs-MacBook-Pro.local':
pdb_dir = '/Users/robswift/Documents/Work/D3R/devel/data/pdb'
else:
pdb_dir = '/data/pdb'
pdb_dict = defaultdict(list)
@staticmethod
def set_pdb_dict(fasta):
"""
Each PDB ID may have one or more chains, each with a unique sequence. Each PDB ID is mapped to a list sequences
for each of its chains. This information is stored in the class default dictionary pdb_dict, i.e.
pdb_dict = { 'PDB ID' : [ seq_chainA, seq_chainB, ... ] }
:param fasta: path to the PDB sequences stored in FASTA format, i.e. "pdb_seqres.txt"
:return:
"""
fasta_handle = open(fasta, 'r')
for record in SeqIO.parse(fasta_handle, "fasta"):
pdb_id, chain_id = record.id.split('_')
# only add protein sequences
if 'mol:protein' in record.description:
record.alphabet = generic_protein
Test.pdb_dict[pdb_id].append(record)
fasta_handle.close()
def __init__(self):
super(Base, self).__init__()
self.sequences = {}
self.coverage = None
self.identity = None
self.resolution = None
self.ligands = { } # {'resname': ('InChI', 'label'}. label is 'do_not_call' or 'dock'
self.exp_method = None
def set_coverage(self, coverage):
self.coverage = coverage
def set_identity(self, identity):
self.identity = identity
def set_sequence(self, pdb_id, chain_id):
"""
Looks for pdb sequence in the contents of the 'pdb_seqres.txt' file, stored in pdb_dict
:param pdb_id: 4-letter pdb id
:param chain_id: chain-id
:return:
"""
pdb_id = pdb_id.lower() # PDBIDS are lower case in fasta
self.sequences[pdb_id] = Test.pdb_dict[pdb_id]
def set_resolution(self):
"""
The resolution of the PDB of the target object is extracted from the PDB file stored in one of the
subdirectories of Test.pdb_dir. If a PDB file cannot be found, a warning message is printed. If the resolution
can not be found in the PDB file, a warning message is printed.
"""
f = self.read_pdb_file()
if f and self.exp_method == 'X-RAY DIFFRACTION':
try:
self.resolution = [x.split()[3] for x in f if 'REMARK 2 RESOLUTION' in x][0]
except IndexError:
print "Resolution for PDB %s could not be found" % self.pdb_id
elif f and self.exp_method != 'X-RAY DIFFRACTION':
self.resolution = 'Not applicable'
else:
print "PDB file %s could not be found. Resolution could not be set" % self.pdb_id
def set_expt_method(self):
"""
:return:
"""
f = self.read_pdb_file()
if f:
try:
self.exp_method = ' '.join([x.split()[1:] for x in f if 'EXPDTA' in x][0])
except IndexError:
print "The experimental method for PDB %s could not be found" % self.pdb_id
else:
print "PDB file %s could not be found. The experimental method could not be set" % self.pdb_id
def set_ligands(self):
"""
Add a ligand object to the ligand list (add a check to ensure it's a ligand class being passed)
:param ligand:
:return:
"""
f = self.read_pdb_file()
if f:
for resname in [x.split()[1] for x in f if 'HETNAM' in x]:
if not resname.isdigit():
if resname in filtering_sets.do_not_call:
label = 'do_not_call'
else:
label = 'dock'
self.ligands[resname] = ('NA', label)
else:
print "pdb file: %s could not be found. Test ligand dictionary could not be set" % self.pdb_id
def read_pdb_file(self):
"""
The PDB file corresponding to the PDB ID of the target object is read. Lines of the file are extracted into
a list, f, and returned. If a PDB file cannot be found an empty list is returned.
:return: f, a list of lines contained in the PDB file corresponding to the PDB ID of the target object.
"""
pdb_file = os.path.join(Test.pdb_dir, self.pdb_id[1:3], 'pdb' + self.pdb_id + '.ent.gz')
try:
handle = gzip.open(pdb_file, 'rb')
f = handle.readlines()
handle.close()
except IOError:
f = []
return f
def get_coverage(self):
return self.coverage
def get_identity(self):
return self.identity
def get_sequences(self):
return self.sequences
def get_resolution(self):
return self.resolution
def get_ligand_names(self):
return self.ligands.keys()
def get_expt_method(self):
return self.exp_method
|
cuilishen/cuilishenMissionPlanner | refs/heads/master | Lib/distutils/command/upload.py | 176 | """distutils.command.upload
Implements the Distutils 'upload' subcommand (upload package to PyPI)."""
import os
import socket
import platform
from urllib2 import urlopen, Request, HTTPError
from base64 import standard_b64encode
import urlparse
import cStringIO as StringIO
from hashlib import md5
from distutils.errors import DistutilsOptionError
from distutils.core import PyPIRCCommand
from distutils.spawn import spawn
from distutils import log
class upload(PyPIRCCommand):
description = "upload binary package to PyPI"
user_options = PyPIRCCommand.user_options + [
('sign', 's',
'sign files to upload using gpg'),
('identity=', 'i', 'GPG identity used to sign files'),
]
boolean_options = PyPIRCCommand.boolean_options + ['sign']
def initialize_options(self):
PyPIRCCommand.initialize_options(self)
self.username = ''
self.password = ''
self.show_response = 0
self.sign = False
self.identity = None
def finalize_options(self):
PyPIRCCommand.finalize_options(self)
if self.identity and not self.sign:
raise DistutilsOptionError(
"Must use --sign for --identity to have meaning"
)
config = self._read_pypirc()
if config != {}:
self.username = config['username']
self.password = config['password']
self.repository = config['repository']
self.realm = config['realm']
# getting the password from the distribution
# if previously set by the register command
if not self.password and self.distribution.password:
self.password = self.distribution.password
def run(self):
if not self.distribution.dist_files:
raise DistutilsOptionError("No dist file created in earlier command")
for command, pyversion, filename in self.distribution.dist_files:
self.upload_file(command, pyversion, filename)
def upload_file(self, command, pyversion, filename):
# Makes sure the repository URL is compliant
schema, netloc, url, params, query, fragments = \
urlparse.urlparse(self.repository)
if params or query or fragments:
raise AssertionError("Incompatible url %s" % self.repository)
if schema not in ('http', 'https'):
raise AssertionError("unsupported schema " + schema)
# Sign if requested
if self.sign:
gpg_args = ["gpg", "--detach-sign", "-a", filename]
if self.identity:
gpg_args[2:2] = ["--local-user", self.identity]
spawn(gpg_args,
dry_run=self.dry_run)
# Fill in the data - send all the meta-data in case we need to
# register a new release
f = open(filename,'rb')
try:
content = f.read()
finally:
f.close()
meta = self.distribution.metadata
data = {
# action
':action': 'file_upload',
'protcol_version': '1',
# identify release
'name': meta.get_name(),
'version': meta.get_version(),
# file content
'content': (os.path.basename(filename),content),
'filetype': command,
'pyversion': pyversion,
'md5_digest': md5(content).hexdigest(),
# additional meta-data
'metadata_version' : '1.0',
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
# PEP 314
'provides': meta.get_provides(),
'requires': meta.get_requires(),
'obsoletes': meta.get_obsoletes(),
}
comment = ''
if command == 'bdist_rpm':
dist, version, id = platform.dist()
if dist:
comment = 'built for %s %s' % (dist, version)
elif command == 'bdist_dumb':
comment = 'built for %s' % platform.platform(terse=1)
data['comment'] = comment
if self.sign:
data['gpg_signature'] = (os.path.basename(filename) + ".asc",
open(filename+".asc").read())
# set up the authentication
auth = "Basic " + standard_b64encode(self.username + ":" +
self.password)
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\n--' + boundary
end_boundary = sep_boundary + '--'
body = StringIO.StringIO()
for key, value in data.items():
# handle multiple entries for the same name
if not isinstance(value, list):
value = [value]
for value in value:
if isinstance(value, tuple):
fn = ';filename="%s"' % value[0]
value = value[1]
else:
fn = ""
body.write(sep_boundary)
body.write('\nContent-Disposition: form-data; name="%s"'%key)
body.write(fn)
body.write("\n\n")
body.write(value)
if value and value[-1] == '\r':
body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body.write("\n")
body = body.getvalue()
self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO)
# build the Request
headers = {'Content-type':
'multipart/form-data; boundary=%s' % boundary,
'Content-length': str(len(body)),
'Authorization': auth}
request = Request(self.repository, data=body,
headers=headers)
# send the data
try:
result = urlopen(request)
status = result.getcode()
reason = result.msg
if self.show_response:
msg = '\n'.join(('-' * 75, r.read(), '-' * 75))
self.announce(msg, log.INFO)
except socket.error, e:
self.announce(str(e), log.ERROR)
return
except HTTPError, e:
status = e.code
reason = e.msg
if status == 200:
self.announce('Server response (%s): %s' % (status, reason),
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (status, reason),
log.ERROR)
|
rfhk/rqn-custom | refs/heads/10.0 | hr_timesheet_sheet_work_hours_summary/tests/__init__.py | 1 | # -*- coding: utf-8 -*-
from . import test_hr_timesheet_work
|
thurt/arangodb | refs/heads/devel | 3rdParty/V8-4.3.61/third_party/python_26/Lib/stringold.py | 293 | # module 'string' -- A collection of string operations
# Warning: most of the code you see here isn't normally used nowadays. With
# Python 1.6, many of these functions are implemented as methods on the
# standard string object. They used to be implemented by a built-in module
# called strop, but strop is now obsolete itself.
"""Common string manipulations.
Public module variables:
whitespace -- a string containing all characters considered whitespace
lowercase -- a string containing all characters considered lowercase letters
uppercase -- a string containing all characters considered uppercase letters
letters -- a string containing all characters considered letters
digits -- a string containing all characters considered decimal digits
hexdigits -- a string containing all characters considered hexadecimal digits
octdigits -- a string containing all characters considered octal digits
"""
from warnings import warnpy3k
warnpy3k("the stringold module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
lowercase = 'abcdefghijklmnopqrstuvwxyz'
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
letters = lowercase + uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
# Case conversion helpers
_idmap = ''
for i in range(256): _idmap = _idmap + chr(i)
del i
# Backward compatible names for exceptions
index_error = ValueError
atoi_error = ValueError
atof_error = ValueError
atol_error = ValueError
# convert UPPER CASE letters to lower case
def lower(s):
"""lower(s) -> string
Return a copy of the string s converted to lowercase.
"""
return s.lower()
# Convert lower case letters to UPPER CASE
def upper(s):
"""upper(s) -> string
Return a copy of the string s converted to uppercase.
"""
return s.upper()
# Swap lower case letters and UPPER CASE
def swapcase(s):
"""swapcase(s) -> string
Return a copy of the string s with upper case characters
converted to lowercase and vice versa.
"""
return s.swapcase()
# Strip leading and trailing tabs and spaces
def strip(s):
"""strip(s) -> string
Return a copy of the string s with leading and trailing
whitespace removed.
"""
return s.strip()
# Strip leading tabs and spaces
def lstrip(s):
"""lstrip(s) -> string
Return a copy of the string s with leading whitespace removed.
"""
return s.lstrip()
# Strip trailing tabs and spaces
def rstrip(s):
"""rstrip(s) -> string
Return a copy of the string s with trailing whitespace
removed.
"""
return s.rstrip()
# Split a string into a list of space/tab-separated words
def split(s, sep=None, maxsplit=0):
"""split(str [,sep [,maxsplit]]) -> list of strings
Return a list of the words in the string s, using sep as the
delimiter string. If maxsplit is nonzero, splits into at most
maxsplit words If sep is not specified, any whitespace string
is a separator. Maxsplit defaults to 0.
(split and splitfields are synonymous)
"""
return s.split(sep, maxsplit)
splitfields = split
# Join fields with optional separator
def join(words, sep = ' '):
"""join(list [,sep]) -> string
Return a string composed of the words in list, with
intervening occurrences of sep. The default separator is a
single space.
(joinfields and join are synonymous)
"""
return sep.join(words)
joinfields = join
# for a little bit of speed
_apply = apply
# Find substring, raise exception if not found
def index(s, *args):
"""index(s, sub [,start [,end]]) -> int
Like find but raises ValueError when the substring is not found.
"""
return _apply(s.index, args)
# Find last substring, raise exception if not found
def rindex(s, *args):
"""rindex(s, sub [,start [,end]]) -> int
Like rfind but raises ValueError when the substring is not found.
"""
return _apply(s.rindex, args)
# Count non-overlapping occurrences of substring
def count(s, *args):
"""count(s, sub[, start[,end]]) -> int
Return the number of occurrences of substring sub in string
s[start:end]. Optional arguments start and end are
interpreted as in slice notation.
"""
return _apply(s.count, args)
# Find substring, return -1 if not found
def find(s, *args):
"""find(s, sub [,start [,end]]) -> in
Return the lowest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return _apply(s.find, args)
# Find last substring, return -1 if not found
def rfind(s, *args):
"""rfind(s, sub [,start [,end]]) -> int
Return the highest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return _apply(s.rfind, args)
# for a bit of speed
_float = float
_int = int
_long = long
_StringType = type('')
# Convert string to float
def atof(s):
"""atof(s) -> float
Return the floating point number represented by the string s.
"""
if type(s) == _StringType:
return _float(s)
else:
raise TypeError('argument 1: expected string, %s found' %
type(s).__name__)
# Convert string to integer
def atoi(*args):
"""atoi(s [,base]) -> int
Return the integer represented by the string s in the given
base, which defaults to 10. The string s must consist of one
or more digits, possibly preceded by a sign. If base is 0, it
is chosen from the leading characters of s, 0 for octal, 0x or
0X for hexadecimal. If base is 16, a preceding 0x or 0X is
accepted.
"""
try:
s = args[0]
except IndexError:
raise TypeError('function requires at least 1 argument: %d given' %
len(args))
# Don't catch type error resulting from too many arguments to int(). The
# error message isn't compatible but the error type is, and this function
# is complicated enough already.
if type(s) == _StringType:
return _apply(_int, args)
else:
raise TypeError('argument 1: expected string, %s found' %
type(s).__name__)
# Convert string to long integer
def atol(*args):
"""atol(s [,base]) -> long
Return the long integer represented by the string s in the
given base, which defaults to 10. The string s must consist
of one or more digits, possibly preceded by a sign. If base
is 0, it is chosen from the leading characters of s, 0 for
octal, 0x or 0X for hexadecimal. If base is 16, a preceding
0x or 0X is accepted. A trailing L or l is not accepted,
unless base is 0.
"""
try:
s = args[0]
except IndexError:
raise TypeError('function requires at least 1 argument: %d given' %
len(args))
# Don't catch type error resulting from too many arguments to long(). The
# error message isn't compatible but the error type is, and this function
# is complicated enough already.
if type(s) == _StringType:
return _apply(_long, args)
else:
raise TypeError('argument 1: expected string, %s found' %
type(s).__name__)
# Left-justify a string
def ljust(s, width):
"""ljust(s, width) -> string
Return a left-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated.
"""
n = width - len(s)
if n <= 0: return s
return s + ' '*n
# Right-justify a string
def rjust(s, width):
"""rjust(s, width) -> string
Return a right-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated.
"""
n = width - len(s)
if n <= 0: return s
return ' '*n + s
# Center a string
def center(s, width):
"""center(s, width) -> string
Return a center version of s, in a field of the specified
width. padded with spaces as needed. The string is never
truncated.
"""
n = width - len(s)
if n <= 0: return s
half = n/2
if n%2 and width%2:
# This ensures that center(center(s, i), j) = center(s, j)
half = half+1
return ' '*half + s + ' '*(n-half)
# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
# Decadent feature: the argument may be a string or a number
# (Use of this is deprecated; it should be a string as with ljust c.s.)
def zfill(x, width):
"""zfill(x, width) -> string
Pad a numeric string x with zeros on the left, to fill a field
of the specified width. The string x is never truncated.
"""
if type(x) == type(''): s = x
else: s = repr(x)
n = len(s)
if n >= width: return s
sign = ''
if s[0] in ('-', '+'):
sign, s = s[0], s[1:]
return sign + '0'*(width-n) + s
# Expand tabs in a string.
# Doesn't take non-printing chars into account, but does understand \n.
def expandtabs(s, tabsize=8):
"""expandtabs(s [,tabsize]) -> string
Return a copy of the string s with all tab characters replaced
by the appropriate number of spaces, depending on the current
column, and the tabsize (default 8).
"""
res = line = ''
for c in s:
if c == '\t':
c = ' '*(tabsize - len(line) % tabsize)
line = line + c
if c == '\n':
res = res + line
line = ''
return res + line
# Character translation through look-up table.
def translate(s, table, deletions=""):
"""translate(s,table [,deletechars]) -> string
Return a copy of the string s, where all characters occurring
in the optional argument deletechars are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256.
"""
return s.translate(table, deletions)
# Capitalize a string, e.g. "aBc dEf" -> "Abc def".
def capitalize(s):
"""capitalize(s) -> string
Return a copy of the string s with only its first character
capitalized.
"""
return s.capitalize()
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s, [sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. Note that this replaces runs of whitespace characters by
a single space.
"""
return join(map(capitalize, s.split(sep)), sep or ' ')
# Construct a translation string
_idmapL = None
def maketrans(fromstr, tostr):
"""maketrans(frm, to) -> string
Return a translation table (a string of 256 bytes long)
suitable for use in string.translate. The strings frm and to
must be of the same length.
"""
if len(fromstr) != len(tostr):
raise ValueError, "maketrans arguments must have same length"
global _idmapL
if not _idmapL:
_idmapL = list(_idmap)
L = _idmapL[:]
fromstr = map(ord, fromstr)
for i in range(len(fromstr)):
L[fromstr[i]] = tostr[i]
return join(L, "")
# Substring replacement (global)
def replace(s, old, new, maxsplit=0):
"""replace (str, old, new[, maxsplit]) -> string
Return a copy of string str with all occurrences of substring
old replaced by new. If the optional argument maxsplit is
given, only the first maxsplit occurrences are replaced.
"""
return s.replace(old, new, maxsplit)
# XXX: transitional
#
# If string objects do not have methods, then we need to use the old string.py
# library, which uses strop for many more things than just the few outlined
# below.
try:
''.upper
except AttributeError:
from stringold import *
# Try importing optional built-in module "strop" -- if it exists,
# it redefines some string operations that are 100-1000 times faster.
# It also defines values for whitespace, lowercase and uppercase
# that match <ctype.h>'s definitions.
try:
from strop import maketrans, lowercase, uppercase, whitespace
letters = lowercase + uppercase
except ImportError:
pass # Use the original versions
|
calispac/digicam_toy | refs/heads/master | digicamtoy/gui/configurator.py | 1 | from tkinter import *
from digicamtoy.core import tracegenerator
import inspect
#from tkinter.ttk import *
class ConfigurationWindow(Frame):
"""Notre fenêtre principale.
Tous les widgets sont stockés comme attributs de cette fenêtre."""
def __init__(self, fenetre, **kwargs):
Frame.__init__(self, fenetre, width=768, height=576, **kwargs)
self.pack(fill=BOTH)
self.nb_clic = 0
# Création de nos widgets
self.message = Label(self, text="Vous n'avez pas cliqué sur le bouton.")
self.message.pack()
self.bouton_quitter = Button(self, text="Quitter", command=self.quit)
self.bouton_quitter.pack(side="left")
self.bouton_cliquer = Button(self, text="Cliquez ici", fg="red", command=self.cliquer)
self.bouton_cliquer.pack(side="right")
def cliquer(self):
"""Il y a eu un clic sur le bouton.
On change la valeur du label message."""
self.nb_clic += 1
self.message["text"] = "Vous avez cliqué {} fois.".format(self.nb_clic)
return
if __name__ == '__main__':
a = inspect.signature(tracegenerator.TraceGenerator.__init__)
window = Tk()
#window.style = Style()
#print(window.style.theme_names())
# ('clam', 'alt', 'default', 'classic')
#window.style.theme_use('clam')
ConfigurationWindow(window)
window.mainloop()
window.destroy() |
geekaia/edx-platform | refs/heads/master | common/lib/sandbox-packages/loncapa/__init__.py | 280 | #!/usr/bin/python
from .loncapa_check import *
|
hbrunn/OpenUpgrade | refs/heads/master | addons/point_of_sale/__init__.py | 378 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_bank_statement
import controllers
import point_of_sale
import report
import res_users
import res_partner
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
markdryan/media-service-demo | refs/heads/master | src/msd/__init__.py | 4 | # media-service-demo
#
# Copyright (C) 2012 Intel Corporation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# Mark Ryan <[email protected]>
#
|
rudaoshi/neuralmachines | refs/heads/master | neural_machine/tasks/language/dialog/dialog.py | 3 |
import numpy as np
import mxnet as mx
from neural_machine.tasks.language.common.problem import Problem
from neural_machine.component.lstm import StackedLSTM, SequenceDecoder
class DialogProblem(Problem):
def __init__(self, corpus):
super(DialogProblem, self).__init__(corpus)
def is_supervised(self):
return True
def data_names(self):
return ["data"]
def label_names(self):
return ["label"]
def samples(self):
for x in self.corpus.corpus:
data = x[:-1]
label = x[1:]
yield [data], [label]
@staticmethod
def objective(label, pred):
"Perplexity for language model"
#logging.debug("{0} {1}".format(label.shape, pred.shape))
label = label.T.reshape((-1,))
loss = 0.
for i in range(pred.shape[0]):
try:
loss += -np.log(max(1e-10, pred[i][int(label[i])]))
except:
print >> sys.stderr, pred
print >> sys.stderr, label
raise
return np.exp(loss / label.size)
class DialogModelArchParam(object):
def __init__(self, num_hidden, num_embed,
num_lstm_layer, cell_num):
self.num_hidden = num_hidden
self.num_embed = num_embed
self.num_lstm_layer = num_lstm_layer
self.cell_num = cell_num
class DialogModelLearnParam(object):
def __init__(self, num_epoch, learning_rate, momentum):
self.num_epoch = num_epoch
self.learning_rate = learning_rate
self.momentum = momentum
class DialogModel(object):
def __init__(self, param):
self.param = param
def __build(self, bucket):
embed_weight = mx.sym.Variable("embed_weight")
cls_weight = mx.sym.Variable("cls_weight")
cls_bias = mx.sym.Variable("cls_bias")
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
seq_len = bucket[0]
embed = mx.sym.Embedding(data=data, input_dim=self.param.cell_num,
weight=embed_weight, output_dim=self.param.num_embed, name='embed')
wordvec = mx.sym.SliceChannel(data=embed, num_outputs=seq_len, squeeze_axis=1)
encoder_out, states = StackedLSTM(self.param.num_lstm_layer,
self.param.num_hidden, seq_len, name = "encoder",
return_sequence=False,
output_states=True)(wordvec)
decoder_out = SequenceDecoder(self.param.num_lstm_layer,
self.param.num_hidden, seq_len, name = "decoder",
init_states=states,
return_sequence=True,
output_states=False)(encoder_out)
pred = mx.sym.FullyConnected(data=decoder_out, num_hidden=self.param.cell_num,
weight=cls_weight, bias=cls_bias, name='pred')
################################################################################
# Make label the same shape as our produced data path
# I did not observe big speed difference between the following two ways
label = mx.sym.transpose(data=label)
label = mx.sym.Reshape(data=label, shape=(-1,))
# label_slice = mx.sym.SliceChannel(data=label, num_outputs=seq_len)
# label = [label_slice[t] for t in range(seq_len)]
# label = mx.sym.Concat(*label, dim=0)
# label = mx.sym.Reshape(data=label, target_shape=(0,))
################################################################################
sm = mx.sym.SoftmaxOutput(data=pred, label=label, name='softmax')
return sm
def train(self, data_train, data_val, learning_param):
self.symbol = lambda seq_len: self.__build(seq_len)
contexts = [mx.context.cpu(i) for i in range(1)]
self.model = mx.model.FeedForward(ctx=contexts,
symbol=self.symbol,
num_epoch=learning_param.num_epoch,
learning_rate=learning_param.learning_rate,
momentum=learning_param.momentum,
wd=0.00001,
initializer=mx.init.Xavier(factor_type="in", magnitude=2.34))
init_states = RepeatedAppendIter(
[np.zeros((batch_size, self.param.num_hidden))] * 4,
["encoder" + 'l{0}_init_{1}'.format(l, t)
for l in range(self.param.num_lstm_layer)
for t in ["c", "h"]])
train_iter = MergeIter(data_train, init_states)
val_iter = MergeIter(data_val, init_states)
print train_iter.provide_data
self.model.fit(X=train_iter, eval_data=val_iter,
eval_metric=mx.metric.np(DialogProblem.objective),
batch_end_callback=mx.callback.Speedometer(batch_size, 50), )
def show_shape_info(self, train_iter):
default_symbol = self.symbol(train_iter.default_bucket_key)
arg_shape, output_shape, aux_shape = default_symbol.infer_shape(
**dict(train_iter.provide_data + train_iter.provide_label)
)
arg_names = default_symbol.list_arguments()
aux_names = default_symbol.list_auxiliary_states()
for i in range(len(arg_names)):
print arg_names[i], arg_shape[i]
for i in range(len(aux_names)):
print aux_names[i], aux_shape[i]
print "output shape", output_shape
from neural_machine.tasks.language.common.corpus.segmentor import *
from neural_machine.tasks.language.common.corpus.sequence_corpus import SequenceCorpus
from neural_machine.tasks.language.common.data_reader.bucket_iter import *
import sys
import logging
if __name__ == '__main__':
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
segmenter = SpaceSegmenter()
corpus = SequenceCorpus()
corpus.build(open(sys.argv[1], 'r'), segmenter)
cell_num = corpus.cell_num()
problem = DialogProblem(corpus)
batch_size = 32
data_train = BucketIter(problem, batch_size)
val_corpus = corpus.make(open(sys.argv[2], 'r'), segmenter)
val_problem = DialogProblem(val_corpus)
data_val = BucketIter(val_problem, batch_size)
arch_param = DialogModelArchParam(
num_hidden= 200,
num_embed= 200,
num_lstm_layer= 2,
cell_num = corpus.cell_num()
)
learning_param = DialogModelLearnParam(
num_epoch=25,learning_rate=0.01, momentum=0.0
)
lm = DialogModel(arch_param)
lm.train(data_train, data_val, learning_param)
|
Subsets and Splits