repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
sagemath/git-trac-command | git_trac/cmdline.py | 1 | 12216 | ## -*- encoding: utf-8 -*-
"""
Handle Command Line Options
"""
##############################################################################
# The "git trac ..." command extension for git
# Copyright (C) 2013 Volker Braun <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import sys
import os
import warnings
import argparse
from .logger import logger
from .ticket_or_branch import TicketOrBranch
def xdg_open(uri):
import subprocess
if sys.platform == 'darwin':
rc = subprocess.call(['open', uri])
error = 'Failed to run "open", please open {0}'
else:
rc = subprocess.call(['xdg-open', uri])
error = 'Failed to run "xdg-open", please open {0}'
if rc != 0:
print(error.format(uri))
def show_cheat_sheet():
# case where `git-trac` was just symbolically linked
root_dir = os.path.dirname(os.path.dirname(__file__))
cheat_sheet = os.path.join(root_dir, 'doc', 'git-cheat-sheet.pdf')
# case of `python setup.py install --user`
if not os.path.exists(cheat_sheet):
root_dir = __import__('site').USER_BASE
cheat_sheet = os.path.join(root_dir,
'share',
'git-trac-command',
'git-cheat-sheet.pdf')
# case of `python setup.py install`
if not os.path.exists(cheat_sheet):
root_dir = sys.prefix
cheat_sheet = os.path.join(root_dir,
'share',
'git-trac-command',
'git-cheat-sheet.pdf')
# go to internet if not found
if not os.path.exists(cheat_sheet):
cheat_sheet = "http://github.com/sagemath/git-trac-command/raw/master/doc/git-cheat-sheet.pdf"
print('Cheat sheet not found locally. Trying the internet.')
xdg_open(cheat_sheet)
def debug_shell(app, parser):
from IPython.terminal.ipapp import TerminalIPythonApp
ip = TerminalIPythonApp.instance()
ip.initialize(argv=[])
ip.shell.user_global_ns['app'] = app
ip.shell.user_global_ns['logger'] = logger
ip.shell.user_global_ns['repo'] = app.repo
ip.shell.user_global_ns['git'] = app.git
ip.shell.user_global_ns['trac'] = app.trac
ip.shell.user_global_ns['parser'] = parser
def ipy_import(module_name, identifier):
import importlib
module = importlib.import_module(module_name)
ip.shell.user_global_ns[identifier] = getattr(module, identifier)
ipy_import('git_trac.git_interface', 'GitInterface')
ipy_import('git_trac.trac_server', 'TracServer')
ip.start()
description = \
"""
The trac command extension for git
"""
def monkey_patch():
"""
Monkey patch ArgumentParser
"""
old_parse_args = argparse.ArgumentParser.parse_args
def parse_args_override(self, args=None):
"""
http://bugs.python.org/issue9253 prevents us from just redefining -h
Workaround by monkey-patching parse_args
"""
if args is None:
args = list(sys.argv)[1:]
if len(args) == 1 and args[-1] == '-h':
# Convert "git-trac -h" to "git-trac help"
args[-1] = 'help'
return old_parse_args(self, args)
setattr(argparse.ArgumentParser, 'parse_args', parse_args_override)
def make_parser():
monkey_patch()
parser = argparse.ArgumentParser(description=description, add_help=False)
# We cannot handle "git trac --help", this is outside of our control and purely within git
# redefine to not print '--help' in the online help
parser.add_argument('-h', dest='option_help', action='store_true',
default=False,
help='show this help message and exit')
parser.add_argument('--debug', dest='debug', action='store_true',
default=False,
help='debug')
parser.add_argument('--log', dest='log', default=None,
help='one of [DEBUG, INFO, ERROR, WARNING, CRITICAL]')
subparsers = parser.add_subparsers(dest='subcommand')
parser_create = subparsers.add_parser('create', help='Create new ticket')
parser_create.add_argument('-b', '--branch', dest='branch_name',
help='Branch name',
default=None)
parser_create.add_argument('summary', type=str, help='Ticket summary')
parser_checkout = subparsers.add_parser('checkout', help='Download branch')
parser_checkout.add_argument('-b', '--branch', dest='branch_name',
help='Local branch name',
default=None)
parser_checkout.add_argument('ticket_or_branch', type=TicketOrBranch,
help='Ticket number or remote branch name')
parser_search = subparsers.add_parser('search', help='Search trac')
parser_search.add_argument('--branch', dest='branch_name',
help='Remote git branch name (default: local branch)',
default=None)
parser_fetch = subparsers.add_parser('fetch', help='Fetch branch from trac ticket')
parser_fetch.add_argument('ticket_or_branch', nargs='?', type=TicketOrBranch,
help='Ticket number or remote branch name', default=None)
parser_pull = subparsers.add_parser('pull', help='Get updates')
parser_pull.add_argument('ticket_or_branch', nargs='?', type=TicketOrBranch,
help='Ticket number or remote branch name', default=None)
parser_push = subparsers.add_parser('push', help='Upload changes')
parser_push.add_argument('--force', dest='force', action='store_true',
default=False, help='Force push')
parser_push.add_argument('--branch', dest='remote',
default=None, help='Remote branch name')
parser_push.add_argument('ticket', nargs='?', type=int,
help='Ticket number', default=None)
parser_get = subparsers.add_parser('get', help='Print trac page')
parser_get.add_argument('ticket', nargs='?', type=int,
help='Ticket number', default=None)
parser_depends = subparsers.add_parser('depends', help='Print trac dependencies')
parser_depends.add_argument('ticket', nargs='?', type=int,
help='Ticket number', default=None)
parser_print = subparsers.add_parser('print', help='Print trac page')
parser_print.add_argument('ticket', nargs='?', type=int,
help='Ticket number', default=None)
parser_browse = subparsers.add_parser('browse', help='Open trac page in browser')
parser_browse.add_argument('ticket', nargs='?', type=int,
help='Ticket number', default=None)
parser_review = subparsers.add_parser('review', help='Show code to review')
parser_review.add_argument('ticket', nargs='?', type=int,
help='Ticket number', default=None)
parser_find = subparsers.add_parser('find', help='Find trac ticket from SHA1')
parser_find.add_argument('commit', type=str, help='Commit SHA1')
parser_try = subparsers.add_parser('try', help='Try out trac ticket in "detached HEAD"')
parser_try.add_argument('ticket_or_branch', type=TicketOrBranch,
help='Ticket number or remote branch name')
parser_log = subparsers.add_parser('log', help='Commit log for ticket')
parser_log.add_argument('ticket', type=int, help='Ticket number')
parser_log.add_argument('--oneline', dest='oneline', action='store_true',
default=False, help='One line per commit')
parser_config = subparsers.add_parser('config', help='Configure git-trac')
parser_config.add_argument('--user', dest='trac_user',
help='Trac username', default=None)
parser_config.add_argument('--pass', dest='trac_pass',
help='Trac password', default=None)
parser_config.add_argument('--token', dest='trac_token',
help="Trac authentication token (this can "
"be used in lieu of username/password "
"and must be used if you authenticate "
"with Trac via GitHub)")
parser_cheatsheet = subparsers.add_parser('cheat-sheet', help='Show the git trac cheat sheet')
parser_help = subparsers.add_parser('help', help='Show the git trac help')
return parser
def launch():
parser = make_parser()
args = parser.parse_args(sys.argv[1:])
if args.log is not None:
import logging
level = getattr(logging, args.log)
logger.setLevel(level=level)
from .app import Application
app = Application()
if args.debug:
print(args)
app.config.debug = True
debug_shell(app, parser)
elif args.option_help:
parser.print_help()
elif args.subcommand == 'create':
app.create(args.summary, args.branch_name)
elif args.subcommand == 'checkout':
app.checkout(args.ticket_or_branch, args.branch_name)
elif args.subcommand == 'fetch':
app.fetch(args.ticket_or_branch)
elif args.subcommand == 'pull':
app.pull(args.ticket_or_branch)
elif args.subcommand == 'push':
ticket_number = app.guess_ticket_number(args.ticket)
print('Pushing to Trac #{0}...'.format(ticket_number))
app.push(ticket_number, remote=args.remote, force=args.force)
elif args.subcommand == 'review':
ticket_number = app.guess_ticket_number(args.ticket)
app.review_diff(ticket_number)
elif args.subcommand == 'try':
app.tryout(args.ticket_or_branch)
elif args.subcommand == 'get':
warnings.warn('deprecated; use "git trac print" instead')
ticket_number = app.guess_ticket_number(args.ticket)
app.print_ticket(ticket_number)
elif args.subcommand == 'print':
ticket_number = app.guess_ticket_number(args.ticket)
app.print_ticket(ticket_number)
elif args.subcommand == 'depends':
ticket_number = app.guess_ticket_number(args.ticket)
app.print_dependencies(ticket_number)
elif args.subcommand == 'browse':
ticket_number = app.guess_ticket_number(args.ticket)
xdg_open('https://trac.sagemath.org/{0}'.format(ticket_number))
elif args.subcommand == 'log':
app.log(args.ticket, oneline=args.oneline)
elif args.subcommand == 'find':
app.find(args.commit)
elif args.subcommand == 'search':
try:
app.search(branch=args.branch_name)
except ValueError:
parser_search.print_help()
raise
elif args.subcommand == 'config':
app.add_remote()
if args.trac_user is not None:
app.save_trac_username(args.trac_user)
if args.trac_pass is not None:
app.save_trac_password(args.trac_pass)
if args.trac_token is not None:
app.save_trac_token(args.trac_token)
app.print_config()
elif args.subcommand == 'cheat-sheet':
show_cheat_sheet()
elif args.subcommand == 'help':
parser.print_help()
else:
print('Unknown subcommand "{0}"'.format(args.subcommand))
parser.print_help()
| gpl-3.0 | 1,164,262,400,669,942,300 | 40.979381 | 102 | 0.597823 | false |
gaboflowers/mallador_v3 | unidecode/x059.py | 252 | 4644 | data = (
'Shou ', # 0x00
'Yi ', # 0x01
'Zhi ', # 0x02
'Gu ', # 0x03
'Chu ', # 0x04
'Jiang ', # 0x05
'Feng ', # 0x06
'Bei ', # 0x07
'Cay ', # 0x08
'Bian ', # 0x09
'Sui ', # 0x0a
'Qun ', # 0x0b
'Ling ', # 0x0c
'Fu ', # 0x0d
'Zuo ', # 0x0e
'Xia ', # 0x0f
'Xiong ', # 0x10
'[?] ', # 0x11
'Nao ', # 0x12
'Xia ', # 0x13
'Kui ', # 0x14
'Xi ', # 0x15
'Wai ', # 0x16
'Yuan ', # 0x17
'Mao ', # 0x18
'Su ', # 0x19
'Duo ', # 0x1a
'Duo ', # 0x1b
'Ye ', # 0x1c
'Qing ', # 0x1d
'Uys ', # 0x1e
'Gou ', # 0x1f
'Gou ', # 0x20
'Qi ', # 0x21
'Meng ', # 0x22
'Meng ', # 0x23
'Yin ', # 0x24
'Huo ', # 0x25
'Chen ', # 0x26
'Da ', # 0x27
'Ze ', # 0x28
'Tian ', # 0x29
'Tai ', # 0x2a
'Fu ', # 0x2b
'Guai ', # 0x2c
'Yao ', # 0x2d
'Yang ', # 0x2e
'Hang ', # 0x2f
'Gao ', # 0x30
'Shi ', # 0x31
'Ben ', # 0x32
'Tai ', # 0x33
'Tou ', # 0x34
'Yan ', # 0x35
'Bi ', # 0x36
'Yi ', # 0x37
'Kua ', # 0x38
'Jia ', # 0x39
'Duo ', # 0x3a
'Kwu ', # 0x3b
'Kuang ', # 0x3c
'Yun ', # 0x3d
'Jia ', # 0x3e
'Pa ', # 0x3f
'En ', # 0x40
'Lian ', # 0x41
'Huan ', # 0x42
'Di ', # 0x43
'Yan ', # 0x44
'Pao ', # 0x45
'Quan ', # 0x46
'Qi ', # 0x47
'Nai ', # 0x48
'Feng ', # 0x49
'Xie ', # 0x4a
'Fen ', # 0x4b
'Dian ', # 0x4c
'[?] ', # 0x4d
'Kui ', # 0x4e
'Zou ', # 0x4f
'Huan ', # 0x50
'Qi ', # 0x51
'Kai ', # 0x52
'Zha ', # 0x53
'Ben ', # 0x54
'Yi ', # 0x55
'Jiang ', # 0x56
'Tao ', # 0x57
'Zang ', # 0x58
'Ben ', # 0x59
'Xi ', # 0x5a
'Xiang ', # 0x5b
'Fei ', # 0x5c
'Diao ', # 0x5d
'Xun ', # 0x5e
'Keng ', # 0x5f
'Dian ', # 0x60
'Ao ', # 0x61
'She ', # 0x62
'Weng ', # 0x63
'Pan ', # 0x64
'Ao ', # 0x65
'Wu ', # 0x66
'Ao ', # 0x67
'Jiang ', # 0x68
'Lian ', # 0x69
'Duo ', # 0x6a
'Yun ', # 0x6b
'Jiang ', # 0x6c
'Shi ', # 0x6d
'Fen ', # 0x6e
'Huo ', # 0x6f
'Bi ', # 0x70
'Lian ', # 0x71
'Duo ', # 0x72
'Nu ', # 0x73
'Nu ', # 0x74
'Ding ', # 0x75
'Nai ', # 0x76
'Qian ', # 0x77
'Jian ', # 0x78
'Ta ', # 0x79
'Jiu ', # 0x7a
'Nan ', # 0x7b
'Cha ', # 0x7c
'Hao ', # 0x7d
'Xian ', # 0x7e
'Fan ', # 0x7f
'Ji ', # 0x80
'Shuo ', # 0x81
'Ru ', # 0x82
'Fei ', # 0x83
'Wang ', # 0x84
'Hong ', # 0x85
'Zhuang ', # 0x86
'Fu ', # 0x87
'Ma ', # 0x88
'Dan ', # 0x89
'Ren ', # 0x8a
'Fu ', # 0x8b
'Jing ', # 0x8c
'Yan ', # 0x8d
'Xie ', # 0x8e
'Wen ', # 0x8f
'Zhong ', # 0x90
'Pa ', # 0x91
'Du ', # 0x92
'Ji ', # 0x93
'Keng ', # 0x94
'Zhong ', # 0x95
'Yao ', # 0x96
'Jin ', # 0x97
'Yun ', # 0x98
'Miao ', # 0x99
'Pei ', # 0x9a
'Shi ', # 0x9b
'Yue ', # 0x9c
'Zhuang ', # 0x9d
'Niu ', # 0x9e
'Yan ', # 0x9f
'Na ', # 0xa0
'Xin ', # 0xa1
'Fen ', # 0xa2
'Bi ', # 0xa3
'Yu ', # 0xa4
'Tuo ', # 0xa5
'Feng ', # 0xa6
'Yuan ', # 0xa7
'Fang ', # 0xa8
'Wu ', # 0xa9
'Yu ', # 0xaa
'Gui ', # 0xab
'Du ', # 0xac
'Ba ', # 0xad
'Ni ', # 0xae
'Zhou ', # 0xaf
'Zhuo ', # 0xb0
'Zhao ', # 0xb1
'Da ', # 0xb2
'Nai ', # 0xb3
'Yuan ', # 0xb4
'Tou ', # 0xb5
'Xuan ', # 0xb6
'Zhi ', # 0xb7
'E ', # 0xb8
'Mei ', # 0xb9
'Mo ', # 0xba
'Qi ', # 0xbb
'Bi ', # 0xbc
'Shen ', # 0xbd
'Qie ', # 0xbe
'E ', # 0xbf
'He ', # 0xc0
'Xu ', # 0xc1
'Fa ', # 0xc2
'Zheng ', # 0xc3
'Min ', # 0xc4
'Ban ', # 0xc5
'Mu ', # 0xc6
'Fu ', # 0xc7
'Ling ', # 0xc8
'Zi ', # 0xc9
'Zi ', # 0xca
'Shi ', # 0xcb
'Ran ', # 0xcc
'Shan ', # 0xcd
'Yang ', # 0xce
'Man ', # 0xcf
'Jie ', # 0xd0
'Gu ', # 0xd1
'Si ', # 0xd2
'Xing ', # 0xd3
'Wei ', # 0xd4
'Zi ', # 0xd5
'Ju ', # 0xd6
'Shan ', # 0xd7
'Pin ', # 0xd8
'Ren ', # 0xd9
'Yao ', # 0xda
'Tong ', # 0xdb
'Jiang ', # 0xdc
'Shu ', # 0xdd
'Ji ', # 0xde
'Gai ', # 0xdf
'Shang ', # 0xe0
'Kuo ', # 0xe1
'Juan ', # 0xe2
'Jiao ', # 0xe3
'Gou ', # 0xe4
'Mu ', # 0xe5
'Jian ', # 0xe6
'Jian ', # 0xe7
'Yi ', # 0xe8
'Nian ', # 0xe9
'Zhi ', # 0xea
'Ji ', # 0xeb
'Ji ', # 0xec
'Xian ', # 0xed
'Heng ', # 0xee
'Guang ', # 0xef
'Jun ', # 0xf0
'Kua ', # 0xf1
'Yan ', # 0xf2
'Ming ', # 0xf3
'Lie ', # 0xf4
'Pei ', # 0xf5
'Yan ', # 0xf6
'You ', # 0xf7
'Yan ', # 0xf8
'Cha ', # 0xf9
'Shen ', # 0xfa
'Yin ', # 0xfb
'Chi ', # 0xfc
'Gui ', # 0xfd
'Quan ', # 0xfe
'Zi ', # 0xff
)
| gpl-3.0 | -2,013,848,095,006,463,200 | 17 | 20 | 0.390827 | false |
neuront/redis-ctl | models/cluster_plan.py | 2 | 1308 | import json
from werkzeug.utils import cached_property
from base import db, Base, DB_TEXT_TYPE
from cluster import Cluster
class ClusterBalancePlan(Base):
__tablename__ = 'cluster_balance_plan'
cluster_id = db.Column(db.ForeignKey(Cluster.id), unique=True,
nullable=False)
balance_plan_json = db.Column(DB_TEXT_TYPE, nullable=False)
@cached_property
def balance_plan(self):
return json.loads(self.balance_plan_json)
def save(self):
self.balance_plan_json = json.dumps(self.balance_plan)
db.session.add(self)
db.session.flush()
@cached_property
def pod(self):
return self.balance_plan['pod']
@cached_property
def host(self):
return self.balance_plan.get('host')
@cached_property
def slaves(self):
return self.balance_plan.get('slaves', [])
@cached_property
def aof(self):
return (self.balance_plan.get('entrypoint') == 'aof'
or self.balance_plan['aof'])
def get_balance_plan_by_addr(host, port):
from node import RedisNode
n = RedisNode.query.filter_by(host=host, port=port).first()
if n is None or n.assignee_id is None:
return None
return ClusterBalancePlan.query.filter_by(cluster_id=n.assignee_id).first()
| mit | 8,133,717,082,645,176,000 | 26.829787 | 79 | 0.649083 | false |
gboone/wedding.harmsboone.org | rsvp/migrations/0019_auto__chg_field_guest_notes.py | 1 | 5628 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Guest.notes'
db.alter_column(u'rsvp_guest', 'notes', self.gf('django.db.models.fields.TextField')(max_length=2048, null=True))
def backwards(self, orm):
# Changing field 'Guest.notes'
db.alter_column(u'rsvp_guest', 'notes', self.gf('django.db.models.fields.CharField')(max_length=2048, null=True))
models = {
u'rsvp.event': {
'Meta': {'object_name': 'Event'},
'guests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['rsvp.Guest']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsvp.Location']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'rsvp.guest': {
'Meta': {'ordering': "['-last_name', '-first_name']", 'object_name': 'Guest'},
'arriving': ('django.db.models.fields.DateField', [], {'default': "'2014-08-14'"}),
'attending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'departing': ('django.db.models.fields.DateField', [], {'default': "'2014-08-17'"}),
'display_as': ('django.db.models.fields.CharField', [], {'max_length': '91', 'null': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'max_guests': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'nights': ('django.db.models.fields.IntegerField', [], {'max_length': '1'}),
'notes': ('django.db.models.fields.TextField', [], {'default': "'None'", 'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'prefix': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'primary_email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsvp.Guest']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'street_addr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'zip_code': ('django.db.models.fields.IntegerField', [], {'max_length': '5'})
},
u'rsvp.hotel': {
'Meta': {'object_name': 'Hotel'},
'guests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['rsvp.Guest']", 'null': 'True', 'blank': 'True'}),
'hotel_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {}),
'total_guest_count': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'})
},
u'rsvp.location': {
'Meta': {'object_name': 'Location'},
'distance': ('django.db.models.fields.DecimalField', [], {'max_digits': '3', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'rsvp.room': {
'Meta': {'object_name': 'Room'},
'guests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['rsvp.Guest']", 'null': 'True', 'blank': 'True'}),
'hotel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsvp.Hotel']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_occupancy': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'room_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsvp.Roomtype']", 'null': 'True', 'blank': 'True'})
},
u'rsvp.roomtype': {
'Meta': {'object_name': 'Roomtype'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'rsvp.table': {
'Meta': {'object_name': 'Table'},
'guests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['rsvp.Guest']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['rsvp'] | mit | 4,548,199,276,618,365,000 | 64.453488 | 165 | 0.539801 | false |
YuriGural/erpnext | erpnext/hr/doctype/salary_slip/salary_slip.py | 8 | 16266 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe.utils import add_days, cint, cstr, flt, getdate, rounded, date_diff, money_in_words
from frappe.model.naming import make_autoname
from frappe import msgprint, _
from erpnext.hr.doctype.process_payroll.process_payroll import get_start_end_dates
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
from erpnext.utilities.transaction_base import TransactionBase
class SalarySlip(TransactionBase):
def autoname(self):
self.name = make_autoname('Sal Slip/' +self.employee + '/.#####')
def validate(self):
self.status = self.get_status()
self.validate_dates()
self.check_existing()
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
if not (len(self.get("earnings")) or len(self.get("deductions"))):
# get details from salary structure
self.get_emp_and_leave_details()
else:
self.get_leave_details(lwp = self.leave_without_pay)
# if self.salary_slip_based_on_timesheet or not self.net_pay:
self.calculate_net_pay()
company_currency = erpnext.get_company_currency(self.company)
self.total_in_words = money_in_words(self.rounded_total, company_currency)
if frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet"):
max_working_hours = frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet")
if self.salary_slip_based_on_timesheet and (self.total_working_hours > int(max_working_hours)):
frappe.msgprint(_("Total working hours should not be greater than max working hours {0}").
format(max_working_hours), alert=True)
def validate_dates(self):
if date_diff(self.end_date, self.start_date) < 0:
frappe.throw(_("To date cannot be before From date"))
def calculate_component_amounts(self):
if not getattr(self, '_salary_structure_doc', None):
self._salary_structure_doc = frappe.get_doc('Salary Structure', self.salary_structure)
data = self.get_data_for_eval()
for key in ('earnings', 'deductions'):
for struct_row in self._salary_structure_doc.get(key):
amount = self.eval_condition_and_formula(struct_row, data)
if amount and struct_row.statistical_component == 0:
self.update_component_row(struct_row, amount, key)
def update_component_row(self, struct_row, amount, key):
component_row = None
for d in self.get(key):
if d.salary_component == struct_row.salary_component:
component_row = d
if not component_row:
self.append(key, {
'amount': amount,
'default_amount': amount,
'depends_on_lwp' : struct_row.depends_on_lwp,
'salary_component' : struct_row.salary_component
})
else:
component_row.amount = amount
def eval_condition_and_formula(self, d, data):
try:
if d.condition:
if not frappe.safe_eval(d.condition, None, data):
return None
amount = d.amount
if d.amount_based_on_formula:
if d.formula:
amount = frappe.safe_eval(d.formula, None, data)
if amount:
data[d.abbr] = amount
return amount
except NameError as err:
frappe.throw(_("Name error: {0}".format(err)))
except SyntaxError as err:
frappe.throw(_("Syntax error in formula or condition: {0}".format(err)))
except Exception as e:
frappe.throw(_("Error in formula or condition: {0}".format(e)))
raise
def get_data_for_eval(self):
'''Returns data for evaluating formula'''
data = frappe._dict()
data.update(frappe.get_doc("Salary Structure Employee",
{"employee": self.employee, "parent": self.salary_structure}).as_dict())
data.update(frappe.get_doc("Employee", self.employee).as_dict())
data.update(self.as_dict())
# set values for components
salary_components = frappe.get_all("Salary Component", fields=["salary_component_abbr"])
for sc in salary_components:
data.setdefault(sc.salary_component_abbr, 0)
for key in ('earnings', 'deductions'):
for d in self.get(key):
data[d.abbr] = d.amount
return data
def get_emp_and_leave_details(self):
'''First time, load all the components from salary structure'''
if self.employee:
self.set("earnings", [])
self.set("deductions", [])
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
self.validate_dates()
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
self.get_leave_details(joining_date, relieving_date)
struct = self.check_sal_struct(joining_date, relieving_date)
if struct:
self._salary_structure_doc = frappe.get_doc('Salary Structure', struct)
self.salary_slip_based_on_timesheet = self._salary_structure_doc.salary_slip_based_on_timesheet or 0
self.set_time_sheet()
self.pull_sal_struct()
def set_time_sheet(self):
if self.salary_slip_based_on_timesheet:
self.set("timesheets", [])
timesheets = frappe.db.sql(""" select * from `tabTimesheet` where employee = %(employee)s and start_date BETWEEN %(start_date)s AND %(end_date)s and (status = 'Submitted' or
status = 'Billed')""", {'employee': self.employee, 'start_date': self.start_date, 'end_date': self.end_date}, as_dict=1)
for data in timesheets:
self.append('timesheets', {
'time_sheet': data.name,
'working_hours': data.total_hours
})
def get_date_details(self):
date_details = get_start_end_dates(self.payroll_frequency, self.start_date or self.posting_date)
self.start_date = date_details.start_date
self.end_date = date_details.end_date
def check_sal_struct(self, joining_date, relieving_date):
cond = ''
if self.payroll_frequency:
cond = """and payroll_frequency = '%(payroll_frequency)s'""" % {"payroll_frequency": self.payroll_frequency}
st_name = frappe.db.sql("""select parent from `tabSalary Structure Employee`
where employee=%s and (from_date <= %s or from_date <= %s)
and (to_date is null or to_date >= %s or to_date >= %s)
and parent in (select name from `tabSalary Structure`
where is_active = 'Yes'%s)
"""% ('%s', '%s', '%s','%s','%s', cond),(self.employee, self.start_date, joining_date, self.end_date, relieving_date))
if st_name:
if len(st_name) > 1:
frappe.msgprint(_("Multiple active Salary Structures found for employee {0} for the given dates")
.format(self.employee), title=_('Warning'))
return st_name and st_name[0][0] or ''
else:
self.salary_structure = None
frappe.msgprint(_("No active or default Salary Structure found for employee {0} for the given dates")
.format(self.employee), title=_('Salary Structure Missing'))
def pull_sal_struct(self):
from erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip
if self.salary_slip_based_on_timesheet:
self.salary_structure = self._salary_structure_doc.name
self.hour_rate = self._salary_structure_doc.hour_rate
self.total_working_hours = sum([d.working_hours or 0.0 for d in self.timesheets]) or 0.0
wages_amount = self.hour_rate * self.total_working_hours
self.add_earning_for_hourly_wages(self, self._salary_structure_doc.salary_component, wages_amount)
make_salary_slip(self._salary_structure_doc.name, self)
def process_salary_structure(self):
'''Calculate salary after salary structure details have been updated'''
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
self.pull_emp_details()
self.get_leave_details()
self.calculate_net_pay()
def add_earning_for_hourly_wages(self, doc, salary_component, amount):
row_exists = False
for row in doc.earnings:
if row.salary_component == salary_component:
row.amount = amount
row_exists = True
break
if not row_exists:
wages_row = {
"salary_component": salary_component,
"abbr": frappe.db.get_value("Salary Component", salary_component, "salary_component_abbr"),
"amount": self.hour_rate * self.total_working_hours
}
doc.append('earnings', wages_row)
def pull_emp_details(self):
emp = frappe.db.get_value("Employee", self.employee, ["bank_name", "bank_ac_no"], as_dict=1)
if emp:
self.bank_name = emp.bank_name
self.bank_account_no = emp.bank_ac_no
def get_leave_details(self, joining_date=None, relieving_date=None, lwp=None):
if not joining_date:
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
holidays = self.get_holidays_for_employee(self.start_date, self.end_date)
working_days = date_diff(self.end_date, self.start_date) + 1
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
working_days -= len(holidays)
if working_days < 0:
frappe.throw(_("There are more holidays than working days this month."))
actual_lwp = self.calculate_lwp(holidays, working_days)
if not lwp:
lwp = actual_lwp
elif lwp != actual_lwp:
frappe.msgprint(_("Leave Without Pay does not match with approved Leave Application records"))
self.total_working_days = working_days
self.leave_without_pay = lwp
payment_days = flt(self.get_payment_days(joining_date, relieving_date)) - flt(lwp)
self.payment_days = payment_days > 0 and payment_days or 0
def get_payment_days(self, joining_date, relieving_date):
start_date = getdate(self.start_date)
if joining_date:
if getdate(self.start_date) <= joining_date <= getdate(self.end_date):
start_date = joining_date
elif joining_date > getdate(self.end_date):
return
end_date = getdate(self.end_date)
if relieving_date:
if getdate(self.start_date) <= relieving_date <= getdate(self.end_date):
end_date = relieving_date
elif relieving_date < getdate(self.start_date):
frappe.throw(_("Employee relieved on {0} must be set as 'Left'")
.format(relieving_date))
payment_days = date_diff(end_date, start_date) + 1
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
holidays = self.get_holidays_for_employee(start_date, end_date)
payment_days -= len(holidays)
return payment_days
def get_holidays_for_employee(self, start_date, end_date):
holiday_list = get_holiday_list_for_employee(self.employee)
holidays = frappe.db.sql_list('''select holiday_date from `tabHoliday`
where
parent=%(holiday_list)s
and holiday_date >= %(start_date)s
and holiday_date <= %(end_date)s''', {
"holiday_list": holiday_list,
"start_date": start_date,
"end_date": end_date
})
holidays = [cstr(i) for i in holidays]
return holidays
def calculate_lwp(self, holidays, working_days):
lwp = 0
holidays = "','".join(holidays)
for d in range(working_days):
dt = add_days(cstr(getdate(self.start_date)), d)
leave = frappe.db.sql("""
select t1.name, t1.half_day
from `tabLeave Application` t1, `tabLeave Type` t2
where t2.name = t1.leave_type
and t2.is_lwp = 1
and t1.docstatus = 1
and t1.status = 'Approved'
and t1.employee = %(employee)s
and CASE WHEN t2.include_holiday != 1 THEN %(dt)s not in ('{0}') and %(dt)s between from_date and to_date
WHEN t2.include_holiday THEN %(dt)s between from_date and to_date
END
""".format(holidays), {"employee": self.employee, "dt": dt})
if leave:
lwp = cint(leave[0][1]) and (lwp + 0.5) or (lwp + 1)
return lwp
def check_existing(self):
if not self.salary_slip_based_on_timesheet:
ret_exist = frappe.db.sql("""select name from `tabSalary Slip`
where start_date = %s and end_date = %s and docstatus != 2
and employee = %s and name != %s""",
(self.start_date, self.end_date, self.employee, self.name))
if ret_exist:
self.employee = ''
frappe.throw(_("Salary Slip of employee {0} already created for this period").format(self.employee))
else:
for data in self.timesheets:
if frappe.db.get_value('Timesheet', data.time_sheet, 'status') == 'Payrolled':
frappe.throw(_("Salary Slip of employee {0} already created for time sheet {1}").format(self.employee, data.time_sheet))
def sum_components(self, component_type, total_field):
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
if not relieving_date:
relieving_date = getdate(self.end_date)
if not joining_date:
frappe.throw(_("Please set the Date Of Joining for employee {0}").format(frappe.bold(self.employee_name)))
for d in self.get(component_type):
if (self.salary_structure and
cint(d.depends_on_lwp) and
(not
self.salary_slip_based_on_timesheet or
getdate(self.start_date) < joining_date or
getdate(self.end_date) > relieving_date
)):
d.amount = rounded(
(flt(d.default_amount) * flt(self.payment_days)
/ cint(self.total_working_days)), self.precision("amount", component_type)
)
elif not self.payment_days and not self.salary_slip_based_on_timesheet:
d.amount = 0
elif not d.amount:
d.amount = d.default_amount
self.set(total_field, self.get(total_field) + flt(d.amount))
def calculate_net_pay(self):
if self.salary_structure:
self.calculate_component_amounts()
disable_rounded_total = cint(frappe.db.get_value("Global Defaults", None, "disable_rounded_total"))
self.total_deduction = 0
self.gross_pay = 0
self.sum_components('earnings', 'gross_pay')
self.sum_components('deductions', 'total_deduction')
self.set_loan_repayment()
self.net_pay = flt(self.gross_pay) - (flt(self.total_deduction) + flt(self.total_loan_repayment))
self.rounded_total = rounded(self.net_pay,
self.precision("net_pay") if disable_rounded_total else 0)
def set_loan_repayment(self):
employee_loan = frappe.db.sql("""select sum(principal_amount) as principal_amount, sum(interest_amount) as interest_amount,
sum(total_payment) as total_loan_repayment from `tabRepayment Schedule`
where payment_date between %s and %s and parent in (select name from `tabEmployee Loan`
where employee = %s and repay_from_salary = 1 and docstatus = 1)""",
(self.start_date, self.end_date, self.employee), as_dict=True)
if employee_loan:
self.principal_amount = employee_loan[0].principal_amount
self.interest_amount = employee_loan[0].interest_amount
self.total_loan_repayment = employee_loan[0].total_loan_repayment
def on_submit(self):
if self.net_pay < 0:
frappe.throw(_("Net Pay cannot be less than 0"))
else:
self.set_status()
self.update_status(self.name)
if(frappe.db.get_single_value("HR Settings", "email_salary_slip_to_employee")):
self.email_salary_slip()
def on_cancel(self):
self.set_status()
self.update_status()
def email_salary_slip(self):
receiver = frappe.db.get_value("Employee", self.employee, "prefered_email")
if receiver:
subj = 'Salary Slip - from {0} to {1}'.format(self.start_date, self.end_date)
frappe.sendmail([receiver], subject=subj, message = _("Please see attachment"),
attachments=[frappe.attach_print(self.doctype, self.name, file_name=self.name)], reference_doctype= self.doctype, reference_name= self.name)
else:
msgprint(_("{0}: Employee email not found, hence email not sent").format(self.employee_name))
def update_status(self, salary_slip=None):
for data in self.timesheets:
if data.time_sheet:
timesheet = frappe.get_doc('Timesheet', data.time_sheet)
timesheet.salary_slip = salary_slip
timesheet.flags.ignore_validate_update_after_submit = True
timesheet.set_status()
timesheet.save()
def set_status(self, status=None):
'''Get and update status'''
if not status:
status = self.get_status()
self.db_set("status", status)
def get_status(self):
if self.docstatus == 0:
status = "Draft"
elif self.docstatus == 1:
status = "Submitted"
elif self.docstatus == 2:
status = "Cancelled"
return status
def unlink_ref_doc_from_salary_slip(ref_no):
linked_ss = frappe.db.sql_list("""select name from `tabSalary Slip`
where journal_entry=%s and docstatus < 2""", (ref_no))
if linked_ss:
for ss in linked_ss:
ss_doc = frappe.get_doc("Salary Slip", ss)
frappe.db.set_value("Salary Slip", ss_doc.name, "journal_entry", "")
| gpl-3.0 | 817,172,144,035,190,800 | 36.56582 | 176 | 0.695254 | false |
jnez71/demos | signals/gaussian_markov_kernel.py | 1 | 1646 | #!/usr/bin/env python3
"""
Kernel of Gaussian-transition scalar Markov process?
"""
import numpy as np
from matplotlib import pyplot
npr = np.random
np.set_printoptions(suppress=True)
pyplot.rcParams["font.size"] = 16
pyplot.rcParams["axes.grid"] = True
################################################## SYSTEM
def initial(m=10.0, s=2.0):
return npr.normal(m, s) # gaussian initial-condition
def transition(x, s=1.0):
#f = 0.5*x # linear
f = 10*np.sin(2/(1+x**2)) # nonlinear
return f + npr.normal(0.0, s) # gaussian transition
def simulate(d):
X = [initial()]
for i in range(d-1):
X.append(transition(X[-1]))
return X # one sample from d-dimensional joint (only gaussian if linear transitions)
################################################## SIMULATE
d = 9
n = int(5e5)
print("Simulating samples...")
samples = np.array([simulate(d) for i in range(n)])
print("Computing statistics...")
mean = np.mean(samples, axis=0)
covar = np.cov(samples, rowvar=False)
################################################## VISUALIZE
print("========================================")
print(np.round(mean, 3), '\n')
print(np.round(covar, 3))
print("========================================")
print("Visualizing covariance...")
vmax = np.max(np.abs(covar))
pyplot.imshow(covar, cmap="coolwarm", vmin=-vmax, vmax=vmax, interpolation="lanczos")
pyplot.colorbar()
pyplot.grid(False)
pyplot.title("Covariance")
print("Visualizing joint...")
pyplot.figure()
pyplot.scatter(samples[::int(n/1e3+1), 0], samples[::int(n/1e3+1), -1], alpha=0.4)
pyplot.xlabel("x0")
pyplot.ylabel("x{0}".format(d-1))
pyplot.show()
| mit | 6,670,597,085,459,015,000 | 25.548387 | 89 | 0.580194 | false |
rananda/cfme_tests | cfme/infrastructure/config_management.py | 1 | 19414 | from functools import partial
from cached_property import cached_property
from navmazing import NavigateToSibling, NavigateToAttribute
import cfme
import cfme.fixtures.pytest_selenium as sel
import cfme.web_ui.flash as flash
import cfme.web_ui.tabstrip as tabs
import cfme.web_ui.toolbar as tb
from cfme.web_ui import (
accordion, Quadicon, Form, Input, fill, form_buttons, mixins, Table, Region,
AngularSelect, match_location
)
from utils import version, conf
from utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
from utils.appliance import Navigatable
from utils.log import logger
from utils.pretty import Pretty
from utils.update import Updateable
from utils.wait import wait_for
properties_form = Form(
fields=[
('name_text', Input('name')),
('type_select', AngularSelect("provider_type")),
('url_text', Input('url')),
('ssl_checkbox', Input('verify_ssl'))
])
credential_form = Form(
fields=[
('principal_text', Input('log_userid')),
('secret_pass', Input('log_password')),
('verify_secret_pass', Input('log_verify')),
('validate_btn', form_buttons.validate)
])
def cfm_mgr_table():
return Table("//div[@id='main_div']//div[@id='list_grid']/table")
page = Region(locators={
'list_table_config_profiles': cfm_mgr_table(),
'list_table_config_systems': cfm_mgr_table()})
add_manager_btn = form_buttons.FormButton('Add')
edit_manager_btn = form_buttons.FormButton('Save changes')
cfg_btn = partial(tb.select, 'Configuration')
match_page = partial(match_location, controller='provider_foreman',
title='Red Hat Satellite Provider')
class ConfigManager(Updateable, Pretty, Navigatable):
"""
This is base class for Configuration manager objects (Red Hat Satellite, Foreman, Ansible Tower)
Args:
name: Name of the config. manager
url: URL, hostname or IP of the config. manager
ssl: Boolean value; `True` if SSL certificate validity should be checked, `False` otherwise
credentials: Credentials to access the config. manager
key: Key to access the cfme_data yaml data (same as `name` if not specified)
Usage:
Use Satellite or AnsibleTower classes instead.
"""
pretty_attr = ['name', 'url']
type = None
def __init__(self, name=None, url=None, ssl=None, credentials=None, key=None, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self.url = url
self.ssl = ssl
self.credentials = credentials
self.key = key or name
def _form_mapping(self, create=None, **kwargs):
return {'name_text': kwargs.get('name'),
'type_select': create and self.type,
'url_text': kwargs.get('url'),
'ssl_checkbox': kwargs.get('ssl')}
class Credential(cfme.Credential, Updateable):
pass
def _submit(self, cancel, submit_button):
if cancel:
form_buttons.cancel()
else:
submit_button()
flash.assert_no_errors()
def create(self, cancel=False, validate_credentials=True, validate=True, force=False):
"""Creates the manager through UI
Args:
cancel (bool): Whether to cancel out of the creation. The cancel is done
after all the information present in the manager has been filled in the UI.
validate_credentials (bool): Whether to validate credentials - if True and the
credentials are invalid, an error will be raised.
validate (bool): Whether we want to wait for the manager's data to load
and show up in it's detail page. True will also wait, False will only set it up.
force (bool): Whether to force the creation even if the manager already exists.
True will try anyway; False will check for its existence and leave, if present.
"""
def config_profiles_loaded():
# Workaround - without this, validation of provider failed
config_profiles_names = [prof.name for prof in self.config_profiles]
logger.info(
"UI: %s\nYAML: %s",
set(config_profiles_names), set(self.yaml_data['config_profiles']))
return all(
[cp in config_profiles_names for cp in self.yaml_data['config_profiles']])
if not force and self.exists:
return
navigate_to(self, 'Add')
fill(properties_form, self._form_mapping(create=True, **self.__dict__))
fill(credential_form, self.credentials, validate=validate_credentials)
self._submit(cancel, add_manager_btn)
if not cancel:
flash.assert_message_match(self._refresh_flash_msg)
if validate:
try:
self.yaml_data['config_profiles']
except KeyError as e:
logger.exception(e)
raise
wait_for(
config_profiles_loaded,
fail_func=self.refresh_relationships,
handle_exception=True,
num_sec=180, delay=30)
def update(self, updates, cancel=False, validate_credentials=False):
"""Updates the manager through UI
args:
updates (dict): Data to change.
cancel (bool): Whether to cancel out of the update. The cancel is done
after all the new information has been filled in the UI.
validate_credentials (bool): Whether to validate credentials - if True and the
credentials are invalid, an error will be raised.
Note:
utils.update use is recommended over use of this method.
"""
navigate_to(self, 'Edit')
# Workaround - without this, update was failing on downstream appliance
sel.wait_for_ajax()
sel.wait_for_element(properties_form.name_text)
fill(properties_form, self._form_mapping(**updates))
fill(credential_form, updates.get('credentials', None), validate=validate_credentials)
self._submit(cancel, edit_manager_btn)
name = updates['name'] or self.name
if not cancel:
flash.assert_message_match('{} Provider "{}" was updated'.format(self.type, name))
self.__dict__.update(**updates)
def delete(self, cancel=False, wait_deleted=True, force=False):
"""Deletes the manager through UI
Args:
cancel (bool): Whether to cancel out of the deletion, when the alert pops up.
wait_deleted (bool): Whether we want to wait for the manager to disappear from the UI.
True will wait; False will only delete it and move on.
force (bool): Whether to try to delete the manager even though it doesn't exist.
True will try to delete it anyway; False will check for its existence and leave,
if not present.
"""
if not force and not self.exists:
return
navigate_to(self, 'All')
sel.check(Quadicon(self.quad_name, None).checkbox())
item_text = version.pick({'5.6': 'Remove selected items from the VMDB',
'5.7': 'Remove selected items'})
cfg_btn(item_text, invokes_alert=True)
sel.handle_alert(cancel)
if not cancel:
flash_msg = version.pick({'5.6': 'Delete initiated for 1 provider',
'5.7': 'Delete initiated for 1 Provider'})
flash.assert_message_match(flash_msg)
if wait_deleted:
wait_for(func=lambda: self.exists, fail_condition=True, delay=15, num_sec=60)
@property
def _refresh_flash_msg(self):
return 'Refresh Provider initiated for 1 provider ({})'.format(self.type)
@property
def exists(self):
"""Returns whether the manager exists in the UI or not"""
navigate_to(self, 'All')
if (Quadicon.any_present() and
Quadicon(self.quad_name, None).exists):
return True
return False
def refresh_relationships(self, cancel=False):
"""Refreshes relationships and power states of this manager"""
navigate_to(self, 'All')
sel.check(Quadicon(self.quad_name, None).checkbox())
cfg_btn('Refresh Relationships and Power states', invokes_alert=True)
sel.handle_alert(cancel)
if not cancel:
flash.assert_message_match(self._refresh_flash_msg)
def _does_profile_exist(self):
return sel.is_displayed(page.list_table_config_profiles)
@property
def config_profiles(self):
"""Returns 'ConfigProfile' configuration profiles (hostgroups) available on this manager"""
navigate_to(self, 'Details')
tb.select('List View')
wait_for(self._does_profile_exist, num_sec=300, delay=20, fail_func=sel.refresh)
return [ConfigProfile(row['name'].text, self) for row in
page.list_table_config_profiles.rows()]
@property
def systems(self):
"""Returns 'ConfigSystem' configured systems (hosts) available on this manager"""
return reduce(lambda x, y: x + y, [prof.systems for prof in self.config_profiles])
@property
def yaml_data(self):
"""Returns yaml data for this manager"""
return conf.cfme_data.configuration_managers[self.key]
@classmethod
def load_from_yaml(cls, key):
"""Returns 'ConfigManager' object loaded from yamls, based on its key"""
data = conf.cfme_data.configuration_managers[key]
creds = conf.credentials[data['credentials']]
return cls(
name=data['name'],
url=data['url'],
ssl=data['ssl'],
credentials=cls.Credential(
principal=creds['username'], secret=creds['password']),
key=key)
@property
def quad_name(self):
return '{} Configuration Manager'.format(self.name)
def get_config_manager_from_config(cfg_mgr_key):
cfg_mgr = conf.cfme_data.get('configuration_managers', {})[cfg_mgr_key]
if cfg_mgr['type'] == 'satellite':
return Satellite.load_from_yaml(cfg_mgr_key)
elif cfg_mgr['type'] == 'ansible':
return AnsibleTower.load_from_yaml(cfg_mgr_key)
else:
raise Exception("Unknown configuration manager key")
@fill.method((Form, ConfigManager.Credential))
def _fill_credential(form, cred, validate=None):
"""How to fill in a credential. Validates the credential if that option is passed in."""
fill(credential_form, {'principal_text': cred.principal,
'secret_pass': cred.secret,
'verify_secret_pass': cred.verify_secret,
'validate_btn': validate})
if validate:
flash.assert_no_errors()
class ConfigProfile(Pretty):
"""Configuration profile object (foreman-side hostgroup)
Args:
name: Name of the profile
manager: ConfigManager object which this profile is bound to
"""
pretty_attrs = ['name', 'manager']
def __init__(self, name, manager):
self.name = name
self.manager = manager
@property
def systems(self):
"""Returns 'ConfigSystem' objects that are active under this profile"""
navigate_to(self, 'Details')
# ajax wait doesn't work here
_title_loc = "//span[contains(@id, 'explorer_title_text') " \
"and contains(normalize-space(text()), 'Configured Systems')]"
sel.wait_for_element(_title_loc)
# Unassigned config profile has no tabstrip
if "unassigned" not in self.name.lower():
tabs.select_tab("Configured Systems")
if sel.is_displayed(page.list_table_config_systems):
row_key = 'hostname'
return [ConfigSystem(row[row_key].text, self) for row in
page.list_table_config_systems.rows()]
return list()
class ConfigSystem(Pretty):
pretty_attrs = ['name', 'manager_key']
def __init__(self, name, profile):
self.name = name
self.profile = profile
def tag(self, tag):
"""Tags the system by given tag"""
navigate_to(self, 'EditTags')
fill(mixins.tag_form, {'category': 'Cost Center *', 'tag': 'Cost Center 001'})
# ---
mixins.add_tag(tag, navigate=False)
def untag(self, tag):
"""Removes the selected tag off the system"""
navigate_to(self, 'EditTags')
mixins.remove_tag(tag)
@property
def tags(self):
"""Returns a list of this system's active tags"""
navigate_to(self, 'EditTags')
return mixins.get_tags()
class Satellite(ConfigManager):
"""
Configuration manager object (Red Hat Satellite, Foreman)
Args:
name: Name of the Satellite/Foreman configuration manager
url: URL, hostname or IP of the configuration manager
ssl: Boolean value; `True` if SSL certificate validity should be checked, `False` otherwise
credentials: Credentials to access the config. manager
key: Key to access the cfme_data yaml data (same as `name` if not specified)
Usage:
Create provider:
.. code-block:: python
satellite_cfg_mgr = Satellite('my_satellite', 'my-satellite.example.com',
ssl=False, ConfigManager.Credential(principal='admin',
secret='testing'), key='satellite_yaml_key')
satellite_cfg_mgr.create()
Update provider:
.. code-block:: python
with update(satellite_cfg_mgr):
satellite_cfg_mgr.name = 'new_satellite_name'
Delete provider:
.. code-block:: python
satellite_cfg_mgr.delete()
"""
def __init__(self, name=None, url=None, ssl=None, credentials=None, key=None):
super(Satellite, self).__init__(name=name, url=url, ssl=ssl, credentials=credentials,
key=key)
self.name = name
self.url = url
self.ssl = ssl
self.credentials = credentials
self.key = key or name
@cached_property
def type(self):
"""Returns presumed type of the manager based on CFME version
Note:
We cannot actually know the type of the provider from the UI.
This represents the supported type by CFME version and is to be used in navigation.
"""
return version.pick({version.LOWEST: 'Red Hat Satellite', version.LATEST: 'Foreman'})
class AnsibleTower(ConfigManager):
"""
Configuration manager object (Ansible Tower)
Args:
name: Name of the Ansible Tower configuration manager
url: URL, hostname or IP of the configuration manager
ssl: Boolean value; `True` if SSL certificate validity should be checked, `False` otherwise
credentials: Credentials to access the config. manager
key: Key to access the cfme_data yaml data (same as `name` if not specified)
Usage:
Create provider:
.. code-block:: python
tower_cfg_mgr = AnsibleTower('my_tower', 'https://my-tower.example.com/api/v1',
ssl=False, ConfigManager.Credential(principal='admin',
secret='testing'), key='tower_yaml_key')
tower_cfg_mgr.create()
Update provider:
.. code-block:: python
with update(tower_cfg_mgr):
tower_cfg_mgr.name = 'new_tower_name'
Delete provider:
.. code-block:: python
tower_cfg_mgr.delete()
"""
type = 'Ansible Tower'
def __init__(self, name=None, url=None, ssl=None, credentials=None, key=None):
super(AnsibleTower, self).__init__(name=name, url=url, ssl=ssl, credentials=credentials,
key=key)
self.name = name
self.url = url
self.ssl = ssl
self.credentials = credentials
self.key = key or name
@navigator.register(ConfigManager, 'All')
class MgrAll(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self):
if self.obj.appliance.version > '5.7.0.8':
self.prerequisite_view.navigation.select('Configuration', 'Management')
else:
self.prerequisite_view.navigation.select('Configuration', 'Configuration Management')
def resetter(self):
accordion.tree('Providers', 'All Configuration Manager Providers')
tb.select('Grid View')
def am_i_here(self):
return match_page('All Configuration Management Providers')
@navigator.register(ConfigManager, 'Add')
class MgrAdd(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
cfg_btn('Add a new Provider')
@navigator.register(ConfigManager, 'Edit')
class MgrEdit(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
sel.check(Quadicon(self.obj.quad_name, None).checkbox())
cfg_btn('Edit Selected item')
@navigator.register(ConfigManager, 'Details')
class MgrDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
sel.click(Quadicon(self.obj.quad_name, None))
def am_i_here(self):
return any((match_page(summary='Configuration Profiles under Red Hat Satellite '
'Provider "{} Configuration Manager"'.format(self.obj.name)),
match_page(summary='Inventory Groups under Ansible Tower Provider'
' "{} Configuration Manager"'.format(self.obj.name))))
@navigator.register(ConfigManager, 'EditFromDetails')
class MgrEditFromDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self):
cfg_btn('Edit this Provider')
# todo: not sure whether this works or not. it seems it wasn't used for a long time
@navigator.register(ConfigProfile, 'Details')
class Details(CFMENavigateStep):
prerequisite = NavigateToAttribute('manager', 'Details')
def step(self):
tb.select('List View'),
page.list_table_config_profiles.click_cell('Description', self.obj.name)
@navigator.register(ConfigSystem, 'All')
class SysAll(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self):
self.prerequisite_view.navigation.select('Configuration', 'Configuration Management')
def resetter(self):
accordion.tree('Configured Systems', 'All Configured Systems')
tb.select('Grid View')
def am_i_here(self):
return match_page(summary='All Configured Systems')
@navigator.register(ConfigSystem, 'Provision')
class SysProvision(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
sel.check(Quadicon(self.obj.name, None))
cfg_btn('Provision Configured Systems')
@navigator.register(ConfigSystem, 'EditTags')
class SysEditTags(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def step(self):
sel.check(Quadicon(self.obj.name, None))
cfg_btn('Edit Tags')
| gpl-2.0 | 3,822,269,521,155,331,000 | 35.220149 | 100 | 0.62151 | false |
rawls238/planout | planout-editor/planout-editor-kernel.py | 9 | 2393 | from flask import Flask, jsonify, render_template, request, url_for
app = Flask(__name__)
from planout.interpreter import Interpreter
import traceback
import json
import sys
def testPlanOutScript(script, inputs={}, overrides=None, assertions=None):
payload = {}
# make sure experiment runs with the given inputs
i = Interpreter(script, 'demo_salt', inputs)
if overrides:
i.set_overrides(overrides)
try:
results = dict(i.get_params()) # executes experiment
except Exception as err:
#message = "Error running experiment: %s" % traceback.format_exc(0)
message = "Error running experiment:\n%s" % err
payload['errors'] = [{
"error_code": "runtime",
"message": message
}]
return payload
payload['results'] = results
# validate if input contains validation code
validation_errors = []
if assertions:
for (key, value) in assertions.iteritems():
if key not in results:
validation_errors.append({
"error_code": "assertion",
"message": {"param": key}
})
else:
if results[key] != value:
message = {'param': key, 'expected': value, 'got': results[key]}
validation_errors.append({
"error_code": "assertion",
"message": message
})
if validation_errors:
payload['errors'] = validation_errors
return payload
@app.route('/run_test')
def run_test():
# not sure how to change everything to use POST requests
raw_script = request.args.get('compiled_code', '')
raw_inputs = request.args.get('inputs', '')
raw_overrides = request.args.get('overrides', "{}")
raw_assertions = request.args.get('assertions', "{}")
id = request.args.get('id')
script = json.loads(raw_script) if raw_script else {}
try:
inputs = json.loads(raw_inputs)
overrides = json.loads(raw_overrides) if raw_overrides else None
assertions = json.loads(raw_assertions) if raw_assertions else None
except:
return jsonify({
'errors': [{
'error_code': "INVALID_FORM",
'message': 'Invalid form input'
}],
'id': id
})
t = testPlanOutScript(script, inputs, overrides, assertions)
t['id'] = id
return jsonify(t)
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)
url_for('static', filename='planoutstyle.css')
| bsd-3-clause | 8,410,829,750,947,377,000 | 27.152941 | 74 | 0.634768 | false |
hbrunn/hr | hr_contract_hourly_rate/tests/test_hr_contract_hourly_rates.py | 23 | 7450 | # -*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Savoir-faire Linux. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests.common import TransactionCase
from openerp import exceptions
class test_contract_hourly_rate(TransactionCase):
def setUp(self):
super(test_contract_hourly_rate, self).setUp()
self.employee_model = self.env['hr.employee']
self.user_model = self.env["res.users"]
self.contract_model = self.env["hr.contract"]
self.job_model = self.env["hr.job"]
self.rate_class_model = self.env["hr.hourly.rate.class"]
# Create an employee
self.employee_id = self.employee_model.create({'name': 'Employee 1'})
# Create 3 jobs
self.job_id = self.job_model.create({'name': 'Job 1'})
self.job_2_id = self.job_model.create({'name': 'Job 2'})
self.job_3_id = self.job_model.create({'name': 'Job 3'})
# Create 3 hourly rate classes
self.rate_class_id = self.rate_class_model.create(
{
'name': 'Test',
'line_ids': [
(0, 0, {
'date_start': '2014-01-01',
'date_end': '2014-06-30',
'rate': 40,
}),
(0, 0, {
'date_start': '2014-07-01',
'date_end': '2014-12-31',
'rate': 45,
}),
],
}
)
self.rate_class_2_id = self.rate_class_model.create(
{
'name': 'Test',
'line_ids': [
(0, 0, {
'date_start': '2014-01-01',
'date_end': '2014-06-30',
'rate': 30,
}),
(0, 0, {
'date_start': '2014-07-01',
'date_end': '2014-12-31',
'rate': 35,
}),
],
}
)
self.rate_class_3_id = self.rate_class_model.create(
{
'name': 'Test',
'line_ids': [
(0, 0, {
'date_start': '2014-01-01',
'date_end': '2014-06-30',
'rate': 20,
}),
(0, 0, {
'date_start': '2014-07-01',
'date_end': '2014-12-31',
'rate': 25,
}),
],
}
)
# Create a contract
self.contract_id = self.contract_model.create(
{
'employee_id': self.employee_id.id,
'name': 'Contract 1',
'wage': 50000,
'salary_computation_method': 'hourly',
'contract_job_ids': [
(0, 0, {
'job_id': self.job_id.id,
'is_main_job': False,
'hourly_rate_class_id': self.rate_class_id.id,
}),
(0, 0, {
'job_id': self.job_2_id.id,
'is_main_job': True,
'hourly_rate_class_id': self.rate_class_2_id.id,
}),
(0, 0, {
'job_id': self.job_3_id.id,
'is_main_job': False,
'hourly_rate_class_id': self.rate_class_3_id.id,
}),
],
}
)
def test_check_overlapping_dates(self):
"""
test the _check_overlapping_dates constraint
on hourly rate class
"""
# Should all return the same result
for dates in [('2013-01-01', '2014-01-01'),
('2014-12-31', '2015-12-31'),
('2014-06-01', '2014-07-31')]:
self.assertRaises(
exceptions.ValidationError, self.rate_class_id.write,
{'line_ids': [(0, 0, {'date_start': dates[0],
'date_end': dates[1],
'rate': 15})]})
def test_check_has_hourly_rate_class(self):
"""
test the _check_overlapping_dates constraint
on contract
"""
self.job_4_id = self.job_model.create({'name': 'Job 4'})
self.assertRaises(
exceptions.ValidationError, self.contract_id.write,
{'contract_job_ids': [(0, 0, {'job_id': self.job_4_id.id,
'is_main_job': False,
'hourly_rate_class_id': False})]})
def test_get_job_hourly_rate(self):
"""
test the method get_job_hourly_rate with job_id argument
"""
# Should all return the same result
for dates in [('2014-02-01', '2014-02-10'),
('2014-01-01', '2014-06-30')]:
res = self.contract_id.get_job_hourly_rate(
dates[0], dates[1], job_id=self.job_3_id.id, main_job=False)
self.assertTrue(res == 20)
# Should all return the same result
for dates in [('2014-08-10', '2014-08-20'),
('2014-07-01', '2014-12-31')]:
res = self.contract_id.get_job_hourly_rate(
dates[0], dates[1], job_id=self.job_3_id.id, main_job=False)
self.assertTrue(res == 25)
def test_get_job_hourly_rate_main_job(self):
"""
test the method get_job_hourly_rate with main_job argument
"""
# Should all return the same result
for dates in [('2014-02-01', '2014-02-10'),
('2014-01-01', '2014-06-30')]:
res = self.contract_id.get_job_hourly_rate(
dates[0], dates[1], job_id=False, main_job=True)
self.assertTrue(res == 30)
# Should all return the same result
for dates in [('2014-08-10', '2014-08-20'),
('2014-07-01', '2014-12-31')]:
res = self.contract_id.get_job_hourly_rate(
dates[0], dates[1], job_id=False, main_job=True)
self.assertTrue(res == 35)
self.assertRaises(
exceptions.ValidationError, self.rate_class_id.write,
{'line_ids': [(0, 0, {'date_start': dates[0],
'date_end': dates[1],
'rate': 15})]})
| agpl-3.0 | -9,060,705,282,690,150,000 | 36.437186 | 78 | 0.437315 | false |
mfherbst/spack | var/spack/repos/builtin/packages/r-affxparser/package.py | 2 | 2101 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffxparser(RPackage):
"""Package for parsing Affymetrix files (CDF, CEL, CHP, BPMAP, BAR).
It provides methods for fast and memory efficient parsing of
Affymetrix files using the Affymetrix' Fusion SDK. Both ASCII-
and binary-based files are supported. Currently, there are methods
for reading chip definition file (CDF) and a cell intensity file (CEL).
These files can be read either in full or in part. For example,
probe signals from a few probesets can be extracted very quickly
from a set of CEL files into a convenient list structure."""
homepage = "https://www.bioconductor.org/packages/affxparser/"
git = "https://git.bioconductor.org/packages/affxparser.git"
version('1.48.0', commit='2461ea88f310b59c4a9a997a4b3dadedbd65a4aa')
depends_on('[email protected]:3.4.9', when='@1.48.0')
| lgpl-2.1 | 4,594,564,581,564,383,000 | 47.860465 | 78 | 0.693003 | false |
twz915/django | tests/migrations/test_optimizer.py | 2 | 26004 | from django.db import migrations, models
from django.db.migrations import operations
from django.db.migrations.optimizer import MigrationOptimizer
from django.test import SimpleTestCase
from .models import EmptyManager, UnicodeModel
class OptimizerTests(SimpleTestCase):
"""
Tests the migration autodetector.
"""
def optimize(self, operations, app_label):
"""
Handy shortcut for getting results + number of loops
"""
optimizer = MigrationOptimizer()
return optimizer.optimize(operations, app_label), optimizer._iterations
def assertOptimizesTo(self, operations, expected, exact=None, less_than=None, app_label=None):
result, iterations = self.optimize(operations, app_label)
result = [repr(f.deconstruct()) for f in result]
expected = [repr(f.deconstruct()) for f in expected]
self.assertEqual(expected, result)
if exact is not None and iterations != exact:
raise self.failureException(
"Optimization did not take exactly %s iterations (it took %s)" % (exact, iterations)
)
if less_than is not None and iterations >= less_than:
raise self.failureException(
"Optimization did not take less than %s iterations (it took %s)" % (less_than, iterations)
)
def assertDoesNotOptimize(self, operations, **kwargs):
self.assertOptimizesTo(operations, operations, **kwargs)
def test_single(self):
"""
The optimizer does nothing on a single operation,
and that it does it in just one pass.
"""
self.assertOptimizesTo(
[migrations.DeleteModel("Foo")],
[migrations.DeleteModel("Foo")],
exact=1,
)
def test_create_delete_model(self):
"""
CreateModel and DeleteModel should collapse into nothing.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.DeleteModel("Foo"),
],
[],
)
def test_create_rename_model(self):
"""
CreateModel should absorb RenameModels.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RenameModel("Foo", "Bar"),
],
[
migrations.CreateModel(
"Bar",
[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
)
],
)
def test_rename_model_self(self):
"""
RenameModels should absorb themselves.
"""
self.assertOptimizesTo(
[
migrations.RenameModel("Foo", "Baa"),
migrations.RenameModel("Baa", "Bar"),
],
[
migrations.RenameModel("Foo", "Bar"),
],
)
def test_create_alter_model_options(self):
self.assertOptimizesTo(
[
migrations.CreateModel('Foo', fields=[]),
migrations.AlterModelOptions(name='Foo', options={'verbose_name_plural': 'Foozes'}),
],
[
migrations.CreateModel('Foo', fields=[], options={'verbose_name_plural': 'Foozes'}),
]
)
def _test_create_alter_foo_delete_model(self, alter_foo):
"""
CreateModel, AlterModelTable, AlterUniqueTogether/AlterIndexTogether/
AlterOrderWithRespectTo, and DeleteModel should collapse into nothing.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.AlterModelTable("Foo", "woohoo"),
alter_foo,
migrations.DeleteModel("Foo"),
],
[],
)
def test_create_alter_unique_delete_model(self):
self._test_create_alter_foo_delete_model(migrations.AlterUniqueTogether("Foo", [["a", "b"]]))
def test_create_alter_index_delete_model(self):
self._test_create_alter_foo_delete_model(migrations.AlterIndexTogether("Foo", [["a", "b"]]))
def test_create_alter_owrt_delete_model(self):
self._test_create_alter_foo_delete_model(migrations.AlterOrderWithRespectTo("Foo", "a"))
def _test_alter_alter_model(self, alter_foo, alter_bar):
"""
Two AlterUniqueTogether/AlterIndexTogether/AlterOrderWithRespectTo
should collapse into the second.
"""
self.assertOptimizesTo(
[
alter_foo,
alter_bar,
],
[
alter_bar,
],
)
def test_alter_alter_table_model(self):
self._test_alter_alter_model(
migrations.AlterModelTable("Foo", "a"),
migrations.AlterModelTable("Foo", "b"),
)
def test_alter_alter_unique_model(self):
self._test_alter_alter_model(
migrations.AlterUniqueTogether("Foo", [["a", "b"]]),
migrations.AlterUniqueTogether("Foo", [["a", "c"]]),
)
def test_alter_alter_index_model(self):
self._test_alter_alter_model(
migrations.AlterIndexTogether("Foo", [["a", "b"]]),
migrations.AlterIndexTogether("Foo", [["a", "c"]]),
)
def test_alter_alter_owrt_model(self):
self._test_alter_alter_model(
migrations.AlterOrderWithRespectTo("Foo", "a"),
migrations.AlterOrderWithRespectTo("Foo", "b"),
)
def test_optimize_through_create(self):
"""
We should be able to optimize away create/delete through a create or delete
of a different model, but only if the create operation does not mention the model
at all.
"""
# These should work
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Bar"),
migrations.DeleteModel("Foo"),
],
[],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Foo"),
migrations.DeleteModel("Bar"),
],
[],
)
# This should not work - FK should block it
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
migrations.DeleteModel("Foo"),
],
)
# The same operations should be optimized if app_label is specified and
# a FK references a model from the other app.
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
],
app_label="otherapp",
)
# But it shouldn't work if a FK references a model with the same
# app_label.
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
migrations.DeleteModel("Foo"),
],
app_label="testapp",
)
# This should not work - bases should block it
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo", )),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo", )),
migrations.DeleteModel("Foo"),
],
)
# The same operations should be optimized if app_label and none of
# bases belong to that app.
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo", )),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo", )),
],
app_label="otherapp",
)
# But it shouldn't work if some of bases belongs to the specified app.
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo", )),
migrations.DeleteModel("Foo"),
],
app_label="testapp",
)
def test_create_model_add_field(self):
"""
AddField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.AddField("Foo", "age", models.IntegerField()),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
("age", models.IntegerField()),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_create_model_add_field_not_through_fk(self):
"""
AddField should NOT optimize into CreateModel if it's an FK to a model
that's between them.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Link", [("url", models.TextField())]),
migrations.AddField("Foo", "link", models.ForeignKey("migrations.Link", models.CASCADE)),
],
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Link", [("url", models.TextField())]),
migrations.AddField("Foo", "link", models.ForeignKey("migrations.Link", models.CASCADE)),
],
)
def test_create_model_add_field_not_through_m2m_through(self):
"""
AddField should NOT optimize into CreateModel if it's an M2M using a
through that's created between them.
"""
# Note: The middle model is not actually a valid through model,
# but that doesn't matter, as we never render it.
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("LinkThrough", []),
migrations.AddField(
"Foo", "link", models.ManyToManyField("migrations.Link", through="migrations.LinkThrough")
),
],
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("LinkThrough", []),
migrations.AddField(
"Foo", "link", models.ManyToManyField("migrations.Link", through="migrations.LinkThrough")
),
],
)
def test_create_model_alter_field(self):
"""
AlterField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.AlterField("Foo", "name", models.IntegerField()),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.IntegerField()),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_create_model_rename_field(self):
"""
RenameField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RenameField("Foo", "name", "title"),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("title", models.CharField(max_length=255)),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_add_field_rename_field(self):
"""
RenameField should optimize into AddField
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "name", models.CharField(max_length=255)),
migrations.RenameField("Foo", "name", "title"),
],
[
migrations.AddField("Foo", "title", models.CharField(max_length=255)),
],
)
def test_alter_field_rename_field(self):
"""
RenameField should optimize to the other side of AlterField,
and into itself.
"""
self.assertOptimizesTo(
[
migrations.AlterField("Foo", "name", models.CharField(max_length=255)),
migrations.RenameField("Foo", "name", "title"),
migrations.RenameField("Foo", "title", "nom"),
],
[
migrations.RenameField("Foo", "name", "nom"),
migrations.AlterField("Foo", "nom", models.CharField(max_length=255)),
],
)
def test_create_model_remove_field(self):
"""
RemoveField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
("age", models.IntegerField()),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RemoveField("Foo", "age"),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
],
options={'verbose_name': 'Foo'},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_add_field_alter_field(self):
"""
AlterField should optimize into AddField.
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.AlterField("Foo", "age", models.FloatField(default=2.4)),
],
[
migrations.AddField("Foo", name="age", field=models.FloatField(default=2.4)),
],
)
def test_add_field_delete_field(self):
"""
RemoveField should cancel AddField
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.RemoveField("Foo", "age"),
],
[],
)
def test_alter_field_delete_field(self):
"""
RemoveField should absorb AlterField
"""
self.assertOptimizesTo(
[
migrations.AlterField("Foo", "age", models.IntegerField()),
migrations.RemoveField("Foo", "age"),
],
[
migrations.RemoveField("Foo", "age"),
],
)
def _test_create_alter_foo_field(self, alter):
"""
CreateModel, AlterFooTogether/AlterOrderWithRespectTo followed by an
add/alter/rename field should optimize to CreateModel and the Alter*
"""
# AddField
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.AddField("Foo", "c", models.IntegerField()),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
],
)
# AlterField
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.AlterField("Foo", "b", models.CharField(max_length=255)),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
migrations.AlterField("Foo", "c", models.CharField(max_length=255)),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.CharField(max_length=255)),
]),
alter,
],
)
# RenameField
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "b", "c"),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "b", "x"),
migrations.RenameField("Foo", "x", "c"),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "b", "c"),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "c", "d"),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("d", models.IntegerField()),
]),
alter,
],
)
# RemoveField
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RemoveField("Foo", "b"),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
migrations.RemoveField("Foo", "c"),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
],
)
def test_create_alter_unique_field(self):
self._test_create_alter_foo_field(migrations.AlterUniqueTogether("Foo", [["a", "b"]]))
def test_create_alter_index_field(self):
self._test_create_alter_foo_field(migrations.AlterIndexTogether("Foo", [["a", "b"]]))
def test_create_alter_owrt_field(self):
self._test_create_alter_foo_field(migrations.AlterOrderWithRespectTo("Foo", "b"))
def test_optimize_through_fields(self):
"""
field-level through checking is working. This should manage to collapse
model Foo to nonexistence, and model Bar to a single IntegerField
called "width".
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.AddField("Bar", "width", models.IntegerField()),
migrations.AlterField("Foo", "age", models.IntegerField()),
migrations.RenameField("Bar", "size", "dimensions"),
migrations.RemoveField("Foo", "age"),
migrations.RenameModel("Foo", "Phou"),
migrations.RemoveField("Bar", "dimensions"),
migrations.RenameModel("Phou", "Fou"),
migrations.DeleteModel("Fou"),
],
[
migrations.CreateModel("Bar", [("width", models.IntegerField())]),
],
)
def test_optimize_elidable_operation(self):
elidable_operation = operations.base.Operation()
elidable_operation.elidable = True
self.assertOptimizesTo(
[
elidable_operation,
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
elidable_operation,
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
elidable_operation,
migrations.RenameModel("Foo", "Phou"),
migrations.DeleteModel("Bar"),
elidable_operation,
],
[
migrations.CreateModel("Phou", [("name", models.CharField(max_length=255))]),
],
)
| bsd-3-clause | -8,217,871,205,433,163,000 | 35.780764 | 110 | 0.48754 | false |
bikong2/django | tests/null_fk_ordering/tests.py | 381 | 2012 | from __future__ import unicode_literals
from django.test import TestCase
from .models import Article, Author, Comment, Forum, Post, SystemInfo
class NullFkOrderingTests(TestCase):
def test_ordering_across_null_fk(self):
"""
Regression test for #7512
ordering across nullable Foreign Keys shouldn't exclude results
"""
author_1 = Author.objects.create(name='Tom Jones')
author_2 = Author.objects.create(name='Bob Smith')
Article.objects.create(title='No author on this article')
Article.objects.create(author=author_1, title='This article written by Tom Jones')
Article.objects.create(author=author_2, title='This article written by Bob Smith')
# We can't compare results directly (since different databases sort NULLs to
# different ends of the ordering), but we can check that all results are
# returned.
self.assertEqual(len(list(Article.objects.all())), 3)
s = SystemInfo.objects.create(system_name='System Info')
f = Forum.objects.create(system_info=s, forum_name='First forum')
p = Post.objects.create(forum=f, title='First Post')
Comment.objects.create(post=p, comment_text='My first comment')
Comment.objects.create(comment_text='My second comment')
s2 = SystemInfo.objects.create(system_name='More System Info')
f2 = Forum.objects.create(system_info=s2, forum_name='Second forum')
p2 = Post.objects.create(forum=f2, title='Second Post')
Comment.objects.create(comment_text='Another first comment')
Comment.objects.create(post=p2, comment_text='Another second comment')
# We have to test this carefully. Some databases sort NULL values before
# everything else, some sort them afterwards. So we extract the ordered list
# and check the length. Before the fix, this list was too short (some values
# were omitted).
self.assertEqual(len(list(Comment.objects.all())), 4)
| bsd-3-clause | -4,001,505,499,062,184,000 | 46.904762 | 90 | 0.684394 | false |
a-doumoulakis/tensorflow | tensorflow/contrib/tensor_forest/python/tensor_forest.py | 5 | 26323 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numbers
import random
from google.protobuf import text_format
from tensorflow.contrib.decision_trees.proto import generic_tree_model_pb2 as _tree_proto
from tensorflow.contrib.framework.python.ops import variables as framework_variables
from tensorflow.contrib.tensor_forest.proto import tensor_forest_params_pb2 as _params_proto
from tensorflow.contrib.tensor_forest.python.ops import data_ops
from tensorflow.contrib.tensor_forest.python.ops import model_ops
from tensorflow.contrib.tensor_forest.python.ops import stats_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# Stores tuples of (leaf model type, stats model type)
CLASSIFICATION_LEAF_MODEL_TYPES = {
'all_dense': (_params_proto.MODEL_DENSE_CLASSIFICATION,
_params_proto.STATS_DENSE_GINI),
'all_sparse': (_params_proto.MODEL_SPARSE_CLASSIFICATION,
_params_proto.STATS_SPARSE_GINI),
'sparse_then_dense':
(_params_proto.MODEL_SPARSE_OR_DENSE_CLASSIFICATION,
_params_proto.STATS_SPARSE_THEN_DENSE_GINI),
}
REGRESSION_MODEL_TYPE = (
_params_proto.MODEL_REGRESSION,
_params_proto.STATS_LEAST_SQUARES_REGRESSION,
_params_proto.COLLECTION_BASIC)
FINISH_TYPES = {
'basic': _params_proto.SPLIT_FINISH_BASIC,
'hoeffding': _params_proto.SPLIT_FINISH_DOMINATE_HOEFFDING,
'bootstrap': _params_proto.SPLIT_FINISH_DOMINATE_BOOTSTRAP
}
PRUNING_TYPES = {
'none': _params_proto.SPLIT_PRUNE_NONE,
'half': _params_proto.SPLIT_PRUNE_HALF,
'quarter': _params_proto.SPLIT_PRUNE_QUARTER,
'10_percent': _params_proto.SPLIT_PRUNE_10_PERCENT,
'hoeffding': _params_proto.SPLIT_PRUNE_HOEFFDING,
}
SPLIT_TYPES = {
'less_or_equal': _tree_proto.InequalityTest.LESS_OR_EQUAL,
'less': _tree_proto.InequalityTest.LESS_THAN
}
def parse_number_or_string_to_proto(proto, param):
if isinstance(param, numbers.Number):
proto.constant_value = param
else: # assume it's a string
if param.isdigit():
proto.constant_value = int(param)
else:
text_format.Merge(param, proto)
def build_params_proto(params):
"""Build a TensorForestParams proto out of the V4ForestHParams object."""
proto = _params_proto.TensorForestParams()
proto.num_trees = params.num_trees
proto.max_nodes = params.max_nodes
proto.is_regression = params.regression
proto.num_outputs = params.num_classes
proto.num_features = params.num_features
proto.leaf_type = params.leaf_model_type
proto.stats_type = params.stats_model_type
proto.collection_type = _params_proto.COLLECTION_BASIC
proto.pruning_type.type = params.pruning_type
proto.finish_type.type = params.finish_type
proto.inequality_test_type = params.split_type
proto.drop_final_class = False
proto.collate_examples = params.collate_examples
proto.checkpoint_stats = params.checkpoint_stats
proto.use_running_stats_method = params.use_running_stats_method
proto.initialize_average_splits = params.initialize_average_splits
proto.inference_tree_paths = params.inference_tree_paths
parse_number_or_string_to_proto(proto.pruning_type.prune_every_samples,
params.prune_every_samples)
parse_number_or_string_to_proto(proto.finish_type.check_every_steps,
params.early_finish_check_every_samples)
parse_number_or_string_to_proto(proto.split_after_samples,
params.split_after_samples)
parse_number_or_string_to_proto(proto.num_splits_to_consider,
params.num_splits_to_consider)
proto.dominate_fraction.constant_value = params.dominate_fraction
if params.param_file:
with open(params.param_file) as f:
text_format.Merge(f.read(), proto)
return proto
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(
self,
num_trees=100,
max_nodes=10000,
bagging_fraction=1.0,
num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, # deprecated, unused.
split_after_samples=250,
valid_leaf_threshold=1,
dominate_method='bootstrap',
dominate_fraction=0.99,
model_name='all_dense',
split_finish_name='basic',
split_pruning_name='none',
prune_every_samples=0,
early_finish_check_every_samples=0,
collate_examples=False,
checkpoint_stats=False,
use_running_stats_method=False,
initialize_average_splits=False,
inference_tree_paths=False,
param_file=None,
split_name='less_or_equal',
**kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.valid_leaf_threshold = valid_leaf_threshold
self.dominate_method = dominate_method
self.dominate_fraction = dominate_fraction
self.model_name = model_name
self.split_finish_name = split_finish_name
self.split_pruning_name = split_pruning_name
self.collate_examples = collate_examples
self.checkpoint_stats = checkpoint_stats
self.use_running_stats_method = use_running_stats_method
self.initialize_average_splits = initialize_average_splits
self.inference_tree_paths = inference_tree_paths
self.param_file = param_file
self.split_name = split_name
self.early_finish_check_every_samples = early_finish_check_every_samples
self.prune_every_samples = prune_every_samples
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Our experiments have found that num_splits_to_consider = num_features
# gives good accuracy.
self.num_splits_to_consider = self.num_splits_to_consider or min(
max(10, math.floor(math.sqrt(self.num_features))), 1000)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
# How to store leaf models.
self.leaf_model_type = (
REGRESSION_MODEL_TYPE[0] if self.regression else
CLASSIFICATION_LEAF_MODEL_TYPES[self.model_name][0])
# How to store stats objects.
self.stats_model_type = (
REGRESSION_MODEL_TYPE[1] if self.regression else
CLASSIFICATION_LEAF_MODEL_TYPES[self.model_name][1])
self.finish_type = (
_params_proto.SPLIT_FINISH_BASIC if self.regression else
FINISH_TYPES[self.split_finish_name])
self.pruning_type = PRUNING_TYPES[self.split_pruning_name]
if self.pruning_type == _params_proto.SPLIT_PRUNE_NONE:
self.prune_every_samples = 0
else:
if (not self.prune_every_samples and
not (isinstance(numbers.Number) or
self.split_after_samples.isdigit())):
logging.error(
'Must specify prune_every_samples if using a depth-dependent '
'split_after_samples')
# Pruning half-way through split_after_samples seems like a decent
# default, making it easy to select the number being pruned with
# pruning_type while not paying the cost of pruning too often. Note that
# this only holds if not using a depth-dependent split_after_samples.
self.prune_every_samples = (self.prune_every_samples or
int(self.split_after_samples) / 2)
if self.finish_type == _params_proto.SPLIT_FINISH_BASIC:
self.early_finish_check_every_samples = 0
else:
if (not self.early_finish_check_every_samples and
not (isinstance(numbers.Number) or
self.split_after_samples.isdigit())):
logging.error(
'Must specify prune_every_samples if using a depth-dependent '
'split_after_samples')
# Checking for early finish every quarter through split_after_samples
# seems like a decent default. We don't want to incur the checking cost
# too often, but (at least for hoeffding) it's lower than the cost of
# pruning so we can do it a little more frequently.
self.early_finish_check_every_samples = (
self.early_finish_check_every_samples or
int(self.split_after_samples) / 4)
self.split_type = SPLIT_TYPES[self.split_name]
return self
def get_epoch_variable():
"""Returns the epoch variable, or [0] if not defined."""
# Grab epoch variable defined in
# //third_party/tensorflow/python/training/input.py::limit_epochs
for v in tf_variables.local_variables():
if 'limit_epochs/epoch' in v.op.name:
return array_ops.reshape(v, [1])
# TODO(thomaswc): Access epoch from the data feeder.
return [0]
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
if (not hasattr(params, 'params_proto') or
not isinstance(params.params_proto,
_params_proto.TensorForestParams)):
params.params_proto = build_params_proto(params)
params.serialized_params_proto = params.params_proto.SerializeToString()
self.stats = None
if training:
# TODO(gilberth): Manually shard this to be able to fit it on
# multiple machines.
self.stats = stats_ops.fertile_stats_variable(
params, '', self.get_tree_name('stats', tree_num))
self.tree = model_ops.tree_variable(
params, '', self.stats, self.get_tree_name('tree', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
# Set up some scalar variables to run through the device assigner, then
# we can use those to colocate everything related to a tree.
self.device_dummies = []
with ops.device(device_assigner):
for i in range(params.num_trees):
self.device_dummies.append(variable_scope.get_variable(
name='device_dummy_%d' % i, shape=0))
for i in range(params.num_trees):
with ops.device(self.device_dummies[i].device):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self,
params,
device_assigner=None,
variables=None,
tree_variables_class=TreeTrainingVariables,
tree_graphs=None,
training=True):
self.params = params
self.device_assigner = (
device_assigner or framework_variables.VariableDeviceChooser())
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(self.variables[i], self.params, i)
for i in range(self.params.num_trees)
]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(
value=input_data, num_or_size_splits=self.params.num_features, axis=1)
return array_ops.concat(
[split_data[ind] for ind in self.params.bagged_features[tree_num]], 1)
def get_all_resource_handles(self):
return ([self.variables[i].tree for i in range(len(self.trees))] +
[self.variables[i].stats for i in range(len(self.trees))])
def training_graph(self,
input_data,
input_labels,
num_trainers=1,
trainer_id=0,
**tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or dict of string->Tensor for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
num_trainers: Number of parallel trainers to split trees among.
trainer_id: Which trainer this instance is.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
Raises:
NotImplementedError: If trying to use bagging with sparse features.
"""
processed_dense_features, processed_sparse_features, data_spec = (
data_ops.ParseDataTensorOrDict(input_data))
if input_labels is not None:
labels = data_ops.ParseLabelTensorOrDict(input_labels)
data_spec = data_spec or self.get_default_data_spec(input_data)
tree_graphs = []
trees_per_trainer = self.params.num_trees / num_trainers
tree_start = int(trainer_id * trees_per_trainer)
tree_end = int((trainer_id + 1) * trees_per_trainer)
for i in range(tree_start, tree_end):
with ops.device(self.variables.device_dummies[i].device):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = processed_dense_features
tree_labels = labels
if self.params.bagging_fraction < 1.0:
# TODO(gilberth): Support bagging for sparse features.
if processed_sparse_features is not None:
raise NotImplementedError(
'Bagging not supported with sparse features.')
# TODO(thomaswc): This does sampling without replacement. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.strided_slice(
array_ops.shape(processed_dense_features), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(processed_dense_features, gather_indices)
tree_labels = array_ops.gather(labels, gather_indices)
if self.params.bagged_features:
if processed_sparse_features is not None:
raise NotImplementedError(
'Feature bagging not supported with sparse features.')
tree_data = self._bag_features(i, tree_data)
tree_graphs.append(self.trees[i].training_graph(
tree_data,
tree_labels,
seed,
data_spec=data_spec,
sparse_features=processed_sparse_features,
**tree_kwargs))
return control_flow_ops.group(*tree_graphs, name='train')
def inference_graph(self, input_data, **inference_args):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or dict of string->Tensor for input data.
**inference_args: Keyword arguments to pass through to each tree.
Returns:
A tuple of (probabilities, tree_paths, variance), where variance
is the variance over all the trees for regression problems only.
Raises:
NotImplementedError: If trying to use feature bagging with sparse
features.
"""
processed_dense_features, processed_sparse_features, data_spec = (
data_ops.ParseDataTensorOrDict(input_data))
probabilities = []
paths = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
tree_data = processed_dense_features
if self.params.bagged_features:
if processed_sparse_features is not None:
raise NotImplementedError(
'Feature bagging not supported with sparse features.')
tree_data = self._bag_features(i, tree_data)
probs, path = self.trees[i].inference_graph(
tree_data,
data_spec,
sparse_features=processed_sparse_features,
**inference_args)
probabilities.append(probs)
paths.append(path)
with ops.device(self.variables.device_dummies[0].device):
# shape of all_predict should be [batch_size, num_trees, num_outputs]
all_predict = array_ops.stack(probabilities, axis=1)
average_values = math_ops.div(
math_ops.reduce_sum(all_predict, 1),
self.params.num_trees,
name='probabilities')
tree_paths = array_ops.stack(paths, axis=1)
regression_variance = None
if self.params.regression:
expected_squares = math_ops.div(
math_ops.reduce_sum(all_predict * all_predict, 1),
self.params.num_trees)
regression_variance = math_ops.maximum(
0., expected_squares - average_values * average_values)
return average_values, tree_paths, regression_variance
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(math_ops.to_float(array_ops.stack(sizes)))
# pylint: disable=unused-argument
def training_loss(self, features, labels, name='training_loss'):
return math_ops.negative(self.average_size(), name=name)
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.negative(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.stack(impurities))
def feature_importances(self):
tree_counts = [self.trees[i].feature_usage_counts()
for i in range(self.params.num_trees)]
total_counts = math_ops.reduce_sum(array_ops.stack(tree_counts, 0), 0)
return total_counts / math_ops.reduce_sum(total_counts)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, tree_num):
self.variables = variables
self.params = params
self.tree_num = tree_num
def training_graph(self,
input_data,
input_labels,
random_seed,
data_spec,
sparse_features=None,
input_weights=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A data_ops.TensorForestDataSpec object specifying the
original feature/columns of the data.
sparse_features: A tf.SparseTensor for sparse input data.
input_weights: A float tensor or placeholder holding per-input weights,
or None if all inputs are to be weighted equally.
Returns:
The last op in the random tree training graph.
"""
# TODO(gilberth): Use this.
unused_epoch = math_ops.to_int32(get_epoch_variable())
if input_weights is None:
input_weights = []
sparse_indices = []
sparse_values = []
sparse_shape = []
if sparse_features is not None:
sparse_indices = sparse_features.indices
sparse_values = sparse_features.values
sparse_shape = sparse_features.dense_shape
if input_data is None:
input_data = []
leaf_ids = model_ops.traverse_tree_v4(
self.variables.tree,
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_spec=data_spec.SerializeToString(),
params=self.params.serialized_params_proto)
update_model = model_ops.update_model_v4(
self.variables.tree,
leaf_ids,
input_labels,
input_weights,
params=self.params.serialized_params_proto)
finished_nodes = stats_ops.process_input_v4(
self.variables.tree,
self.variables.stats,
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_labels,
input_weights,
leaf_ids,
input_spec=data_spec.SerializeToString(),
random_seed=random_seed,
params=self.params.serialized_params_proto)
with ops.control_dependencies([update_model]):
return stats_ops.grow_tree_v4(
self.variables.tree,
self.variables.stats,
finished_nodes,
params=self.params.serialized_params_proto)
def inference_graph(self, input_data, data_spec, sparse_features=None):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or placeholder for input data.
data_spec: A TensorForestDataSpec proto specifying the original
input columns.
sparse_features: A tf.SparseTensor for sparse input data.
Returns:
A tuple of (probabilities, tree_paths).
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if sparse_features is not None:
sparse_indices = sparse_features.indices
sparse_values = sparse_features.values
sparse_shape = sparse_features.dense_shape
if input_data is None:
input_data = []
return model_ops.tree_predictions_v4(
self.variables.tree,
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_spec=data_spec.SerializeToString(),
params=self.params.serialized_params_proto)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return model_ops.tree_size(self.variables.tree)
def feature_usage_counts(self):
return model_ops.feature_usage_counts(
self.variables.tree, params=self.params.serialized_params_proto)
| apache-2.0 | 6,557,969,534,409,780,000 | 37.260174 | 92 | 0.670288 | false |
sh1nu11bi/sulley | utils/crash_binning.py | 12 | 10341 | #
# Crash Binning
# Copyright (C) 2006 Pedram Amini <[email protected]>
#
# $Id: crash_binning.py 193 2007-04-05 13:30:01Z cameron $
#
# This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
'''
@author: Pedram Amini
@license: GNU General Public License 2.0 or later
@contact: [email protected]
@organization: www.openrce.org
'''
import sys
import zlib
import cPickle
class __crash_bin_struct__:
exception_module = None
exception_address = 0
write_violation = 0
violation_address = 0
violation_thread_id = 0
context = None
context_dump = None
disasm = None
disasm_around = []
stack_unwind = []
seh_unwind = []
extra = None
class crash_binning:
'''
@todo: Add MySQL import/export.
'''
bins = {}
last_crash = None
pydbg = None
####################################################################################################################
def __init__ (self):
'''
'''
self.bins = {}
self.last_crash = None
self.pydbg = None
####################################################################################################################
def record_crash (self, pydbg, extra=None):
'''
Given a PyDbg instantiation that at the current time is assumed to have "crashed" (access violation for example)
record various details such as the disassemly around the violating address, the ID of the offending thread, the
call stack and the SEH unwind. Store the recorded data in an internal dictionary, binning them by the exception
address.
@type pydbg: pydbg
@param pydbg: Instance of pydbg
@type extra: Mixed
@param extra: (Optional, Def=None) Whatever extra data you want to store with this bin
'''
self.pydbg = pydbg
crash = __crash_bin_struct__()
# add module name to the exception address.
exception_module = pydbg.addr_to_module(pydbg.dbg.u.Exception.ExceptionRecord.ExceptionAddress)
if exception_module:
exception_module = exception_module.szModule
else:
exception_module = "[INVALID]"
crash.exception_module = exception_module
crash.exception_address = pydbg.dbg.u.Exception.ExceptionRecord.ExceptionAddress
crash.write_violation = pydbg.dbg.u.Exception.ExceptionRecord.ExceptionInformation[0]
crash.violation_address = pydbg.dbg.u.Exception.ExceptionRecord.ExceptionInformation[1]
crash.violation_thread_id = pydbg.dbg.dwThreadId
crash.context = pydbg.context
crash.context_dump = pydbg.dump_context(pydbg.context, print_dots=False)
crash.disasm = pydbg.disasm(crash.exception_address)
crash.disasm_around = pydbg.disasm_around(crash.exception_address, 10)
crash.stack_unwind = pydbg.stack_unwind()
crash.seh_unwind = pydbg.seh_unwind()
crash.extra = extra
# add module names to the stack unwind.
for i in xrange(len(crash.stack_unwind)):
addr = crash.stack_unwind[i]
module = pydbg.addr_to_module(addr)
if module:
module = module.szModule
else:
module = "[INVALID]"
crash.stack_unwind[i] = "%s:%08x" % (module, addr)
# add module names to the SEH unwind.
for i in xrange(len(crash.seh_unwind)):
(addr, handler) = crash.seh_unwind[i]
module = pydbg.addr_to_module(handler)
if module:
module = module.szModule
else:
module = "[INVALID]"
crash.seh_unwind[i] = (addr, handler, "%s:%08x" % (module, handler))
if not self.bins.has_key(crash.exception_address):
self.bins[crash.exception_address] = []
self.bins[crash.exception_address].append(crash)
self.last_crash = crash
####################################################################################################################
def crash_synopsis (self, crash=None):
'''
For the supplied crash, generate and return a report containing the disassemly around the violating address,
the ID of the offending thread, the call stack and the SEH unwind. If not crash is specified, then call through
to last_crash_synopsis() which returns the same information for the last recorded crash.
@see: crash_synopsis()
@type crash: __crash_bin_struct__
@param crash: (Optional, def=None) Crash object to generate report on
@rtype: String
@return: Crash report
'''
if not crash:
return self.last_crash_synopsis()
if crash.write_violation:
direction = "write to"
else:
direction = "read from"
synopsis = "%s:%08x %s from thread %d caused access violation\nwhen attempting to %s 0x%08x\n\n" % \
(
crash.exception_module, \
crash.exception_address, \
crash.disasm, \
crash.violation_thread_id, \
direction, \
crash.violation_address \
)
synopsis += crash.context_dump
synopsis += "\ndisasm around:\n"
for (ea, inst) in crash.disasm_around:
synopsis += "\t0x%08x %s\n" % (ea, inst)
if len(crash.stack_unwind):
synopsis += "\nstack unwind:\n"
for entry in crash.stack_unwind:
synopsis += "\t%s\n" % entry
if len(crash.seh_unwind):
synopsis += "\nSEH unwind:\n"
for (addr, handler, handler_str) in crash.seh_unwind:
synopsis += "\t%08x -> %s\n" % (addr, handler_str)
return synopsis + "\n"
####################################################################################################################
def export_file (self, file_name):
'''
Dump the entire object structure to disk.
@see: import_file()
@type file_name: String
@param file_name: File name to export to
@rtype: crash_binning
@return: self
'''
# null out what we don't serialize but save copies to restore after dumping to disk.
last_crash = self.last_crash
pydbg = self.pydbg
self.last_crash = self.pydbg = None
fh = open(file_name, "wb+")
fh.write(zlib.compress(cPickle.dumps(self, protocol=2)))
fh.close()
self.last_crash = last_crash
self.pydbg = pydbg
return self
####################################################################################################################
def import_file (self, file_name):
'''
Load the entire object structure from disk.
@see: export_file()
@type file_name: String
@param file_name: File name to import from
@rtype: crash_binning
@return: self
'''
fh = open(file_name, "rb")
tmp = cPickle.loads(zlib.decompress(fh.read()))
fh.close()
self.bins = tmp.bins
return self
####################################################################################################################
def last_crash_synopsis (self):
'''
For the last recorded crash, generate and return a report containing the disassemly around the violating
address, the ID of the offending thread, the call stack and the SEH unwind.
@see: crash_synopsis()
@rtype: String
@return: Crash report
'''
if self.last_crash.write_violation:
direction = "write to"
else:
direction = "read from"
synopsis = "%s:%08x %s from thread %d caused access violation\nwhen attempting to %s 0x%08x\n\n" % \
(
self.last_crash.exception_module, \
self.last_crash.exception_address, \
self.last_crash.disasm, \
self.last_crash.violation_thread_id, \
direction, \
self.last_crash.violation_address \
)
synopsis += self.last_crash.context_dump
synopsis += "\ndisasm around:\n"
for (ea, inst) in self.last_crash.disasm_around:
synopsis += "\t0x%08x %s\n" % (ea, inst)
if len(self.last_crash.stack_unwind):
synopsis += "\nstack unwind:\n"
for entry in self.last_crash.stack_unwind:
synopsis += "\t%s\n" % entry
if len(self.last_crash.seh_unwind):
synopsis += "\nSEH unwind:\n"
for (addr, handler, handler_str) in self.last_crash.seh_unwind:
try:
disasm = self.pydbg.disasm(handler)
except:
disasm = "[INVALID]"
synopsis += "\t%08x -> %s %s\n" % (addr, handler_str, disasm)
return synopsis + "\n" | gpl-2.0 | -3,872,107,706,934,015,500 | 33.913194 | 120 | 0.511846 | false |
grangier/xhtml2pdf | xhtml2pdf/version.py | 61 | 1551 | # -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__reversion__ = "$Revision: 247 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2008-08-15 13:37:57 +0200 (Fr, 15 Aug 2008) $"
__version__ = VERSION = "VERSION{3.0.33}VERSION"[8:-8]
__build__ = BUILD = "BUILD{2010-06-16}BUILD"[6:-6]
VERSION_STR = """XHTML2PDF/pisa %s (Build %s)
http://www.xhtml2pdf.com
Copyright 2010 Dirk Holtwick, holtwick.it
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.""" % (
VERSION,
BUILD,
)
| apache-2.0 | -8,486,325,982,242,563,000 | 36.829268 | 74 | 0.718246 | false |
toert/django-shop-template | myshop/urls.py | 1 | 1142 | """myshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^catalog/', include('catalog.urls', namespace='catalog')),
url(r'^cart/', include('cart.urls', namespace='cart')),
url(r'^order/', include('orders.urls', namespace='orders')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | mit | 3,241,782,335,727,302,000 | 38.413793 | 80 | 0.704028 | false |
Johnetordoff/osf.io | addons/github/api.py | 9 | 5246 | from future.moves.urllib.parse import urlencode
import github3
import cachecontrol
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError
from addons.github import settings as github_settings
from addons.github.exceptions import NotFoundError
# Initialize caches
https_cache = cachecontrol.CacheControlAdapter()
default_adapter = HTTPAdapter()
class GitHubClient(object):
def __init__(self, external_account=None, access_token=None):
self.access_token = getattr(external_account, 'oauth_key', None) or access_token
if self.access_token:
self.gh3 = github3.login(token=self.access_token)
self.gh3.set_client_id(
github_settings.CLIENT_ID, github_settings.CLIENT_SECRET
)
else:
self.gh3 = github3.GitHub()
# Caching libary
if github_settings.CACHE:
self.gh3._session.mount('https://api.github.com/user', default_adapter)
self.gh3._session.mount('https://', https_cache)
def user(self, user=None):
"""Fetch a user or the authenticated user.
:param user: Optional GitHub user name; will fetch authenticated
user if omitted
:return dict: GitHub API response
"""
if user is None:
return self.gh3.me()
return self.gh3.user(user)
def repo(self, user, repo):
"""Get a single Github repo's info.
:param str user: GitHub user name
:param str repo: GitHub repo name
:return: Dict of repo information
See http://developer.github.com/v3/repos/#get
"""
try:
rv = self.gh3.repository(user, repo)
except ConnectionError:
raise NotFoundError
if rv:
return rv
raise NotFoundError
def repos(self):
repos = self.gh3.repositories(type='all', sort='pushed')
return [repo for repo in repos if repo.permissions['push']]
def create_repo(self, repo, **kwargs):
return self.gh3.create_repository(repo, **kwargs)
def branches(self, user, repo, branch=None):
"""List a repo's branches or get a single branch (in a list).
:param str user: GitHub user name
:param str repo: GitHub repo name
:param str branch: Branch name if getting a single branch
:return: List of branch dicts
http://developer.github.com/v3/repos/#list-branches
"""
if branch:
return [self.repo(user, repo).branch(branch)]
return self.repo(user, repo).branches() or []
# TODO: Test
def starball(self, user, repo, archive='tar', ref='master'):
"""Get link for archive download.
:param str user: GitHub user name
:param str repo: GitHub repo name
:param str archive: Archive format [tar|zip]
:param str ref: Git reference
:returns: tuple: Tuple of headers and file location
"""
# github3 archive method writes file to disk
repository = self.repo(user, repo)
url = repository._build_url(archive + 'ball', ref, base_url=repository._api)
resp = repository._get(url, allow_redirects=True, stream=True)
return resp.headers, resp.content
#########
# Hooks #
#########
def hooks(self, user, repo):
"""List webhooks
:param str user: GitHub user name
:param str repo: GitHub repo name
:return list: List of commit dicts from GitHub; see
http://developer.github.com/v3/repos/hooks/#json-http
"""
return self.repo(user, repo).hooks()
def add_hook(self, user, repo, name, config, events=None, active=True):
"""Create a webhook.
:param str user: GitHub user name
:param str repo: GitHub repo name
:return dict: Hook info from GitHub: see see
http://developer.github.com/v3/repos/hooks/#json-http
"""
try:
hook = self.repo(user, repo).create_hook(name, config, events, active)
except github3.GitHubError:
# TODO Handle this case - if '20 hooks' in e.errors[0].get('message'):
return None
else:
return hook
def delete_hook(self, user, repo, _id):
"""Delete a webhook.
:param str user: GitHub user name
:param str repo: GitHub repo name
:return bool: True if successful, False otherwise
:raises: NotFoundError if repo or hook cannot be located
"""
repo = self.repo(user, repo)
hook = repo.hook(_id)
if hook is None:
raise NotFoundError
return repo.hook(_id).delete()
########
# Auth #
########
def revoke_token(self):
if self.access_token:
return self.gh3.revoke_authorization(self.access_token)
def check_authorization(self):
return self.gh3.check_authorization(self.access_token)
def ref_to_params(branch=None, sha=None):
params = urlencode({
key: value
for key, value in {
'branch': branch,
'sha': sha,
}.items()
if value
})
if params:
return '?' + params
return ''
| apache-2.0 | 4,574,463,879,318,881,300 | 30.04142 | 88 | 0.601029 | false |
asmodehn/catkin | test/unit_tests/test_setup_util.py | 2 | 6374 | import os
import shutil
import sys
import tempfile
import unittest
from catkin_pkg.cmake import configure_file
data = configure_file(os.path.join(os.path.dirname(__file__), '..', '..', 'cmake', 'templates', '_setup_util.py.in'),
{
'CATKIN_LIB_ENVIRONMENT_PATHS': "'lib'",
'CATKIN_PKGCONFIG_ENVIRONMENT_PATHS': "os.path.join('lib', 'pkgconfig')",
'CATKIN_GLOBAL_BIN_DESTINATION': 'bin',
'PYTHON_EXECUTABLE': sys.executable,
'PYTHON_INSTALL_DIR': 'pythonX.Y/packages',
'CMAKE_PREFIX_PATH_AS_IS': '',
})
with tempfile.NamedTemporaryFile('w+') as setup_util_file:
setup_util_file.write(data)
setup_util_file.seek(0)
import imp
imp.load_source('setup_util', setup_util_file.name, setup_util_file.file)
import setup_util
from setup_util import _get_workspaces, _prefix_env_variable, _rollback_env_variable, CATKIN_MARKER_FILE
class SetupUtilTest(unittest.TestCase):
def test_get_reversed_workspaces(self):
try:
rootdir = tempfile.mkdtemp()
mock_env = {}
self.assertEqual([], _get_workspaces(mock_env))
self.assertEqual([], _get_workspaces(mock_env, 'foo'))
foows = os.path.join(rootdir, 'foo')
os.makedirs(foows)
with open(os.path.join(foows, CATKIN_MARKER_FILE), 'w') as fhand:
fhand.write('')
barws = os.path.join(rootdir, 'bar')
os.makedirs(barws)
with open(os.path.join(barws, CATKIN_MARKER_FILE), 'w') as fhand:
fhand.write('')
nows = os.path.join(rootdir, 'nows')
os.makedirs(nows)
mock_env = {'CMAKE_PREFIX_PATH': foows}
self.assertEqual([foows], _get_workspaces(mock_env))
mock_env = {'CMAKE_PREFIX_PATH': os.pathsep.join([nows, foows, barws, 'invalid'])}
self.assertEqual([foows, barws], _get_workspaces(mock_env))
finally:
shutil.rmtree(rootdir)
def test_prefix_env(self):
try:
rootdir = tempfile.mkdtemp()
foo_path = os.path.join(rootdir, 'foo')
os.makedirs(foo_path)
bar_path = os.path.join(rootdir, 'bar')
os.makedirs(bar_path)
baz_path = os.path.join(rootdir, 'baz')
bam_path = os.path.join(rootdir, 'bam')
lim_path = os.path.join(rootdir, 'lim')
os.makedirs(lim_path)
mock_env = {}
self.assertEqual('',
_prefix_env_variable(mock_env, 'varname', [], ''))
self.assertEqual(os.pathsep.join([foo_path, bar_path]),
_prefix_env_variable(mock_env, 'varname', [foo_path, bar_path, baz_path], ''))
mock_env = {'varname': os.pathsep.join([baz_path, bar_path, bam_path])}
self.assertEqual('',
_prefix_env_variable(mock_env, 'varname', [], ''))
self.assertEqual(foo_path + os.pathsep,
_prefix_env_variable(mock_env, 'varname', [foo_path, bar_path], ''))
self.assertEqual(os.pathsep.join([foo_path, lim_path]) + os.pathsep,
_prefix_env_variable(mock_env, 'varname', [foo_path, lim_path, foo_path, lim_path], ''))
finally:
shutil.rmtree(rootdir)
def test_remove_from_env(self):
altsep = os.path.altsep
try:
rootdir = tempfile.mkdtemp()
mock_env = {}
# foows
foows = os.path.join(rootdir, 'foo')
foolib = os.path.join(foows, 'lib') + '/'
os.makedirs(foows)
with open(os.path.join(foows, '.catkin'), 'w') as fhand:
fhand.write('')
# barws
barws = os.path.join(rootdir, 'bar')
barlib = os.path.join(barws, 'lib')
os.makedirs(barws)
with open(os.path.join(barws, '.catkin'), 'w') as fhand:
fhand.write('')
# mock_env with one ws in CPP
varname = 'varname'
wsvarname = 'workspaces'
mock_env = {varname: os.pathsep.join([foolib, barlib]),
'CMAKE_PREFIX_PATH': barws}
# since workspace foo is not in CMAKE_PREFIX_PATH, it remains in varname
self.assertEqual(foolib, _rollback_env_variable(mock_env, varname, ['/lib']))
# mock_env with both ws in CPP
mock_env = {varname: os.pathsep.join([foolib, barlib]),
wsvarname: os.pathsep.join([foows, barws]),
'CMAKE_PREFIX_PATH': os.pathsep.join([foows, barws])}
self.assertEqual(None, _rollback_env_variable(mock_env, varname, ['']))
self.assertEqual(None, _rollback_env_variable(mock_env, varname, ['nolib']))
self.assertEqual(None, _rollback_env_variable(mock_env, varname, ['/nolib']))
self.assertEqual('', _rollback_env_variable(mock_env, varname, ['lib']))
self.assertEqual('', _rollback_env_variable(mock_env, varname, ['/lib']))
self.assertEqual(None, _rollback_env_variable(mock_env, varname, ['']))
self.assertEqual('', _rollback_env_variable(mock_env, wsvarname, ['']))
# nows: not a workspace
nows = os.path.join(rootdir, 'nows')
nowslib = os.path.join(nows, 'lib')
nowslib = os.path.join(nows, 'include')
os.makedirs(nows)
mock_env = {'varname': os.pathsep.join([foolib, nowslib, barlib, foolib]),
'CMAKE_PREFIX_PATH': os.pathsep.join([foows, barws])}
# checks nows/lib remains, and second mention of foolib
self.assertEqual(os.pathsep.join([nowslib, foolib]), _rollback_env_variable(mock_env, 'varname', ['/lib']))
self.assertEqual(os.pathsep.join([nowslib, foolib]), _rollback_env_variable(mock_env, 'varname', ['lib']))
# windows pathsep
os.path.altsep = '\\'
self.assertEqual(os.pathsep.join([nowslib, foolib]), _rollback_env_variable(mock_env, 'varname', ['\\lib']))
finally:
os.path.altsep = altsep
shutil.rmtree(rootdir)
| bsd-3-clause | -3,447,286,103,721,762,000 | 45.867647 | 120 | 0.543458 | false |
cherylyli/stress-aid | env/lib/python3.5/site-packages/markupsafe/tests.py | 674 | 6107 | # -*- coding: utf-8 -*-
import gc
import sys
import unittest
from markupsafe import Markup, escape, escape_silent
from markupsafe._compat import text_type
class MarkupTestCase(unittest.TestCase):
def test_adding(self):
# adding two strings should escape the unsafe one
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup('<em>username</em>')
assert unsafe + safe == text_type(escape(unsafe)) + text_type(safe)
def test_string_interpolation(self):
# string interpolations are safe to use too
assert Markup('<em>%s</em>') % '<bad user>' == \
'<em><bad user></em>'
assert Markup('<em>%(username)s</em>') % {
'username': '<bad user>'
} == '<em><bad user></em>'
assert Markup('%i') % 3.14 == '3'
assert Markup('%.2f') % 3.14 == '3.14'
def test_type_behavior(self):
# an escaped object is markup too
assert type(Markup('foo') + 'bar') is Markup
# and it implements __html__ by returning itself
x = Markup("foo")
assert x.__html__() is x
def test_html_interop(self):
# it also knows how to treat __html__ objects
class Foo(object):
def __html__(self):
return '<em>awesome</em>'
def __unicode__(self):
return 'awesome'
__str__ = __unicode__
assert Markup(Foo()) == '<em>awesome</em>'
assert Markup('<strong>%s</strong>') % Foo() == \
'<strong><em>awesome</em></strong>'
def test_tuple_interpol(self):
self.assertEqual(Markup('<em>%s:%s</em>') % (
'<foo>',
'<bar>',
), Markup(u'<em><foo>:<bar></em>'))
def test_dict_interpol(self):
self.assertEqual(Markup('<em>%(foo)s</em>') % {
'foo': '<foo>',
}, Markup(u'<em><foo></em>'))
self.assertEqual(Markup('<em>%(foo)s:%(bar)s</em>') % {
'foo': '<foo>',
'bar': '<bar>',
}, Markup(u'<em><foo>:<bar></em>'))
def test_escaping(self):
# escaping and unescaping
assert escape('"<>&\'') == '"<>&''
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
assert Markup("<test>").unescape() == "<test>"
def test_formatting(self):
for actual, expected in (
(Markup('%i') % 3.14, '3'),
(Markup('%.2f') % 3.14159, '3.14'),
(Markup('%s %s %s') % ('<', 123, '>'), '< 123 >'),
(Markup('<em>{awesome}</em>').format(awesome='<awesome>'),
'<em><awesome></em>'),
(Markup('{0[1][bar]}').format([0, {'bar': '<bar/>'}]),
'<bar/>'),
(Markup('{0[1][bar]}').format([0, {'bar': Markup('<bar/>')}]),
'<bar/>')):
assert actual == expected, "%r should be %r!" % (actual, expected)
# This is new in 2.7
if sys.version_info >= (2, 7):
def test_formatting_empty(self):
formatted = Markup('{}').format(0)
assert formatted == Markup('0')
def test_custom_formatting(self):
class HasHTMLOnly(object):
def __html__(self):
return Markup('<foo>')
class HasHTMLAndFormat(object):
def __html__(self):
return Markup('<foo>')
def __html_format__(self, spec):
return Markup('<FORMAT>')
assert Markup('{0}').format(HasHTMLOnly()) == Markup('<foo>')
assert Markup('{0}').format(HasHTMLAndFormat()) == Markup('<FORMAT>')
def test_complex_custom_formatting(self):
class User(object):
def __init__(self, id, username):
self.id = id
self.username = username
def __html_format__(self, format_spec):
if format_spec == 'link':
return Markup('<a href="/user/{0}">{1}</a>').format(
self.id,
self.__html__(),
)
elif format_spec:
raise ValueError('Invalid format spec')
return self.__html__()
def __html__(self):
return Markup('<span class=user>{0}</span>').format(self.username)
user = User(1, 'foo')
assert Markup('<p>User: {0:link}').format(user) == \
Markup('<p>User: <a href="/user/1"><span class=user>foo</span></a>')
def test_all_set(self):
import markupsafe as markup
for item in markup.__all__:
getattr(markup, item)
def test_escape_silent(self):
assert escape_silent(None) == Markup()
assert escape(None) == Markup(None)
assert escape_silent('<foo>') == Markup(u'<foo>')
def test_splitting(self):
self.assertEqual(Markup('a b').split(), [
Markup('a'),
Markup('b')
])
self.assertEqual(Markup('a b').rsplit(), [
Markup('a'),
Markup('b')
])
self.assertEqual(Markup('a\nb').splitlines(), [
Markup('a'),
Markup('b')
])
def test_mul(self):
self.assertEqual(Markup('a') * 3, Markup('aaa'))
class MarkupLeakTestCase(unittest.TestCase):
def test_markup_leaks(self):
counts = set()
for count in range(20):
for item in range(1000):
escape("foo")
escape("<foo>")
escape(u"foo")
escape(u"<foo>")
counts.add(len(gc.get_objects()))
assert len(counts) == 1, 'ouch, c extension seems to leak objects'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MarkupTestCase))
# this test only tests the c extension
if not hasattr(escape, 'func_code'):
suite.addTest(unittest.makeSuite(MarkupLeakTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
# vim:sts=4:sw=4:et:
| mit | 1,928,791,351,044,493,300 | 33.117318 | 82 | 0.502866 | false |
wuga214/Django-Wuga | env/lib/python2.7/site-packages/whoosh/lang/snowball/bases.py | 96 | 4874 | # Base classes
class _ScandinavianStemmer(object):
"""
This subclass encapsulates a method for defining the string region R1.
It is used by the Danish, Norwegian, and Swedish stemmer.
"""
def _r1_scandinavian(self, word, vowels):
"""
Return the region R1 that is used by the Scandinavian stemmers.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel. But then R1 is adjusted so that the region
before it contains at least three letters.
:param word: The word whose region R1 is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region R1.
:type vowels: unicode
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses DanishStemmer, NorwegianStemmer, and
SwedishStemmer. It is not to be invoked directly!
"""
r1 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i - 1] in vowels:
if len(word[:i + 1]) < 3 and len(word[:i + 1]) > 0:
r1 = word[3:]
elif len(word[:i + 1]) >= 3:
r1 = word[i + 1:]
else:
return word
break
return r1
class _StandardStemmer(object):
"""
This subclass encapsulates two methods for defining the standard versions
of the string regions R1, R2, and RV.
"""
def _r1r2_standard(self, word, vowels):
"""
Return the standard interpretations of the string regions R1 and R2.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel.
R2 is the region after the first non-vowel following a vowel
in R1, or is the null region at the end of the word if there
is no such non-vowel.
:param word: The word whose regions R1 and R2 are determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the regions R1 and R2.
:type vowels: unicode
:return: (r1,r2), the regions R1 and R2 for the respective word.
:rtype: tuple
:note: This helper method is invoked by the respective stem method of
the subclasses DutchStemmer, FinnishStemmer,
FrenchStemmer, GermanStemmer, ItalianStemmer,
PortugueseStemmer, RomanianStemmer, and SpanishStemmer.
It is not to be invoked directly!
:note: A detailed description of how to define R1 and R2
can be found at http://snowball.tartarus.org/texts/r1r2.html
"""
r1 = ""
r2 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i - 1] in vowels:
r1 = word[i + 1:]
break
for i in range(1, len(r1)):
if r1[i] not in vowels and r1[i - 1] in vowels:
r2 = r1[i + 1:]
break
return (r1, r2)
def _rv_standard(self, word, vowels):
"""
Return the standard interpretation of the string region RV.
If the second letter is a consonant, RV is the region after the
next following vowel. If the first two letters are vowels, RV is
the region after the next following consonant. Otherwise, RV is
the region after the third letter.
:param word: The word whose region RV is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region RV.
:type vowels: unicode
:return: the region RV for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses ItalianStemmer, PortugueseStemmer,
RomanianStemmer, and SpanishStemmer. It is not to be
invoked directly!
"""
rv = ""
if len(word) >= 2:
if word[1] not in vowels:
for i in range(2, len(word)):
if word[i] in vowels:
rv = word[i + 1:]
break
elif word[:2] in vowels:
for i in range(2, len(word)):
if word[i] not in vowels:
rv = word[i + 1:]
break
else:
rv = word[3:]
return rv
| apache-2.0 | -9,069,660,661,931,592,000 | 35.646617 | 77 | 0.564423 | false |
Ziemin/telepathy-gabble | tests/twisted/muc/chat-states.py | 2 | 5219 | """
Regression test for <https://bugs.freedesktop.org/show_bug.cgi?id=32952>,
wherein chat states in MUCs were misparsed, and MUC chat states in general.
"""
from servicetest import assertEquals, assertLength, EventPattern
from gabbletest import exec_test, elem, make_muc_presence, sync_stream
from mucutil import join_muc_and_check
import ns
import constants as cs
MUC = '[email protected]'
BOB = MUC + '/bob'
def get_state_notification(stanza):
for x in stanza.elements():
if x.uri == ns.CHAT_STATES:
return x
return None
def check_state_notification(elem, name, allow_body=False):
assertEquals('message', elem.name)
assertEquals('groupchat', elem['type'])
notification = get_state_notification(elem)
assert notification is not None, elem.toXml()
assert notification.name == name, notification.toXml()
if not allow_body:
assert len(elem.children) == 1, elem.toXml()
def test(q, bus, conn, stream):
(chan, user, bob) = join_muc_and_check(q, bus, conn, stream,
MUC)
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_INACTIVE,
states.get(user, cs.CHAT_STATE_INACTIVE))
assertEquals(cs.CHAT_STATE_INACTIVE,
states.get(bob, cs.CHAT_STATE_INACTIVE))
stream.send(
elem('message', from_=BOB, to='test@localhost/Resource',
type='groupchat', jid='[email protected]')(
elem(ns.CHAT_STATES, 'composing'),
elem('google:nosave', 'x', value='disabled'),
elem('http://jabber.org/protocol/archive', 'record', otr='false'),
))
e = q.expect('dbus-signal', signal='ChatStateChanged')
contact, state = e.args
assertEquals(bob, contact)
assertEquals(cs.CHAT_STATE_COMPOSING, state)
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_INACTIVE,
states.get(user, cs.CHAT_STATE_INACTIVE))
assertEquals(cs.CHAT_STATE_COMPOSING,
states.get(bob, cs.CHAT_STATE_INACTIVE))
stream.send(
elem('message', from_=BOB, to='test@localhost/Resource',
type='groupchat', jid='[email protected]')(
elem(ns.CHAT_STATES, 'paused'),
elem('google:nosave', 'x', value='disabled'),
elem('http://jabber.org/protocol/archive', 'record', otr='false'),
))
e = q.expect('dbus-signal', signal='ChatStateChanged')
contact, state = e.args
assertEquals(bob, contact)
assertEquals(cs.CHAT_STATE_PAUSED, state)
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_INACTIVE,
states.get(user, cs.CHAT_STATE_INACTIVE))
assertEquals(cs.CHAT_STATE_PAUSED,
states.get(bob, cs.CHAT_STATE_INACTIVE))
# Bob leaves
presence = make_muc_presence('owner', 'none', MUC, 'bob')
presence['type'] = 'unavailable'
stream.send(presence)
e = q.expect('dbus-signal', signal='ChatStateChanged')
contact, state = e.args
assertEquals(bob, contact)
assertEquals(cs.CHAT_STATE_GONE, state)
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_INACTIVE,
states.get(user, cs.CHAT_STATE_INACTIVE))
# Bob no longer has any chat state at all
assertEquals(None, states.get(bob, None))
# Sending chat states:
# Composing...
chan.ChatState.SetChatState(cs.CHAT_STATE_COMPOSING)
stream_message = q.expect('stream-message')
check_state_notification(stream_message.stanza, 'composing')
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_COMPOSING,
states.get(user, cs.CHAT_STATE_INACTIVE))
# XEP 0085:
# every content message SHOULD contain an <active/> notification.
chan.send_msg_sync('hi.')
stream_message = q.expect('stream-message')
stanza = stream_message.stanza
check_state_notification(stanza, 'active', allow_body=True)
states = chan.Properties.Get(cs.CHANNEL_IFACE_CHAT_STATE, 'ChatStates')
assertEquals(cs.CHAT_STATE_ACTIVE,
states.get(user, cs.CHAT_STATE_INACTIVE))
bodies = list(stanza.elements(uri=ns.CLIENT, name='body'))
assertLength(1, bodies)
assertEquals(u'hi.', bodies[0].children[0])
# If we get an error with type='wait', stop sending chat states.
stanza['type'] = 'error'
stanza['from'] = MUC
stanza['to'] = 'test@localhost/Resource'
error = stanza.addElement('error')
error['type'] = 'wait'
error.addElement((ns.STANZA, 'resource-constraint'))
stream.send(stanza)
q.expect('dbus-signal', signal='MessageReceived',
predicate=lambda e: e.args[0][0]['message-type'] == cs.MT_DELIVERY_REPORT)
q.forbid_events([
EventPattern('stream-message', to=MUC,
predicate=lambda e: get_state_notification(e.stanza) is not None)
])
# User starts typing again but nothing should be seen or heard on the stream.
chan.ChatState.SetChatState(cs.CHAT_STATE_COMPOSING)
sync_stream(q, stream)
if __name__ == '__main__':
exec_test(test)
| lgpl-2.1 | -799,849,711,795,539,800 | 34.263514 | 82 | 0.65798 | false |
hushaoqing/my_notes | Python/Scrapy/mytest/testScrapyGraphite/settings.py | 1 | 3248 | # -*- coding: utf-8 -*-
# Scrapy settings for mytest project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'testScrapyGraphite'
SPIDER_MODULES = ['testScrapyGraphite.spiders']
NEWSPIDER_MODULE = 'testScrapyGraphite.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'mytest (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'mytest.middlewares.MytestSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'mytest.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
# 'mytest.pipelines.MytestPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
STATS_CLASS = 'testScrapyGraphite.spiders.stat_redis.GraphiteStatsCollector'
| mit | -629,166,847,869,914,000 | 33.553191 | 109 | 0.768165 | false |
rodrigods/keystone | keystone/token/controllers.py | 2 | 19819 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import sys
from keystoneclient.common import cms
from oslo.utils import timeutils
import six
from keystone.common import controller
from keystone.common import dependency
from keystone.common import wsgi
from keystone import config
from keystone import exception
from keystone.i18n import _
from keystone.openstack.common import jsonutils
from keystone.openstack.common import log
from keystone.token import provider
CONF = config.CONF
LOG = log.getLogger(__name__)
class ExternalAuthNotApplicable(Exception):
"""External authentication is not applicable."""
pass
@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
'token_api', 'token_provider_api', 'trust_api')
class Auth(controller.V2Controller):
@controller.v2_deprecated
def ca_cert(self, context, auth=None):
ca_file = open(CONF.signing.ca_certs, 'r')
data = ca_file.read()
ca_file.close()
return data
@controller.v2_deprecated
def signing_cert(self, context, auth=None):
cert_file = open(CONF.signing.certfile, 'r')
data = cert_file.read()
cert_file.close()
return data
@controller.v2_deprecated
def authenticate(self, context, auth=None):
"""Authenticate credentials and return a token.
Accept auth as a dict that looks like::
{
"auth":{
"passwordCredentials":{
"username":"test_user",
"password":"mypass"
},
"tenantName":"customer-x"
}
}
In this case, tenant is optional, if not provided the token will be
considered "unscoped" and can later be used to get a scoped token.
Alternatively, this call accepts auth with only a token and tenant
that will return a token that is scoped to that tenant.
"""
if auth is None:
raise exception.ValidationError(attribute='auth',
target='request body')
if "token" in auth:
# Try to authenticate using a token
auth_info = self._authenticate_token(
context, auth)
else:
# Try external authentication
try:
auth_info = self._authenticate_external(
context, auth)
except ExternalAuthNotApplicable:
# Try local authentication
auth_info = self._authenticate_local(
context, auth)
user_ref, tenant_ref, metadata_ref, expiry, bind = auth_info
# Validate that the auth info is valid and nothing is disabled
try:
self.identity_api.assert_user_enabled(
user_id=user_ref['id'], user=user_ref)
self.assignment_api.assert_domain_enabled(
domain_id=user_ref['domain_id'])
if tenant_ref:
self.assignment_api.assert_project_enabled(
project_id=tenant_ref['id'], project=tenant_ref)
except AssertionError as e:
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
# NOTE(morganfainberg): Make sure the data is in correct form since it
# might be consumed external to Keystone and this is a v2.0 controller.
# The user_ref is encoded into the auth_token_data which is returned as
# part of the token data. The token provider doesn't care about the
# format.
user_ref = self.v3_to_v2_user(user_ref)
if tenant_ref:
tenant_ref = self.filter_domain_id(tenant_ref)
auth_token_data = self._get_auth_token_data(user_ref,
tenant_ref,
metadata_ref,
expiry)
if tenant_ref:
catalog_ref = self.catalog_api.get_catalog(
user_ref['id'], tenant_ref['id'], metadata_ref)
else:
catalog_ref = {}
auth_token_data['id'] = 'placeholder'
if bind:
auth_token_data['bind'] = bind
roles_ref = []
for role_id in metadata_ref.get('roles', []):
role_ref = self.assignment_api.get_role(role_id)
roles_ref.append(dict(name=role_ref['name']))
(token_id, token_data) = self.token_provider_api.issue_v2_token(
auth_token_data, roles_ref=roles_ref, catalog_ref=catalog_ref)
# NOTE(wanghong): We consume a trust use only when we are using trusts
# and have successfully issued a token.
if CONF.trust.enabled and 'trust_id' in auth:
self.trust_api.consume_use(auth['trust_id'])
return token_data
def _authenticate_token(self, context, auth):
"""Try to authenticate using an already existing token.
Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
"""
if 'token' not in auth:
raise exception.ValidationError(
attribute='token', target='auth')
if "id" not in auth['token']:
raise exception.ValidationError(
attribute="id", target="token")
old_token = auth['token']['id']
if len(old_token) > CONF.max_token_size:
raise exception.ValidationSizeError(attribute='token',
size=CONF.max_token_size)
try:
old_token_ref = self.token_api.get_token(old_token)
except exception.NotFound as e:
raise exception.Unauthorized(e)
wsgi.validate_token_bind(context, old_token_ref)
# A trust token cannot be used to get another token
if 'trust' in old_token_ref:
raise exception.Forbidden()
if 'trust_id' in old_token_ref['metadata']:
raise exception.Forbidden()
user_ref = old_token_ref['user']
user_id = user_ref['id']
tenant_id = self._get_project_id_from_auth(auth)
if not CONF.trust.enabled and 'trust_id' in auth:
raise exception.Forbidden('Trusts are disabled.')
elif CONF.trust.enabled and 'trust_id' in auth:
trust_ref = self.trust_api.get_trust(auth['trust_id'])
if trust_ref is None:
raise exception.Forbidden()
if user_id != trust_ref['trustee_user_id']:
raise exception.Forbidden()
if (trust_ref['project_id'] and
tenant_id != trust_ref['project_id']):
raise exception.Forbidden()
if ('expires' in trust_ref) and (trust_ref['expires']):
expiry = trust_ref['expires']
if expiry < timeutils.parse_isotime(timeutils.isotime()):
raise exception.Forbidden()()
user_id = trust_ref['trustor_user_id']
trustor_user_ref = self.identity_api.get_user(
trust_ref['trustor_user_id'])
if not trustor_user_ref['enabled']:
raise exception.Forbidden()()
trustee_user_ref = self.identity_api.get_user(
trust_ref['trustee_user_id'])
if not trustee_user_ref['enabled']:
raise exception.Forbidden()()
if trust_ref['impersonation'] is True:
current_user_ref = trustor_user_ref
else:
current_user_ref = trustee_user_ref
else:
current_user_ref = self.identity_api.get_user(user_id)
metadata_ref = {}
tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
user_id, tenant_id)
expiry = old_token_ref['expires']
if CONF.trust.enabled and 'trust_id' in auth:
trust_id = auth['trust_id']
trust_roles = []
for role in trust_ref['roles']:
if 'roles' not in metadata_ref:
raise exception.Forbidden()()
if role['id'] in metadata_ref['roles']:
trust_roles.append(role['id'])
else:
raise exception.Forbidden()
if 'expiry' in trust_ref and trust_ref['expiry']:
trust_expiry = timeutils.parse_isotime(trust_ref['expiry'])
if trust_expiry < expiry:
expiry = trust_expiry
metadata_ref['roles'] = trust_roles
metadata_ref['trustee_user_id'] = trust_ref['trustee_user_id']
metadata_ref['trust_id'] = trust_id
bind = old_token_ref.get('bind')
return (current_user_ref, tenant_ref, metadata_ref, expiry, bind)
def _authenticate_local(self, context, auth):
"""Try to authenticate against the identity backend.
Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
"""
if 'passwordCredentials' not in auth:
raise exception.ValidationError(
attribute='passwordCredentials', target='auth')
if "password" not in auth['passwordCredentials']:
raise exception.ValidationError(
attribute='password', target='passwordCredentials')
password = auth['passwordCredentials']['password']
if password and len(password) > CONF.identity.max_password_length:
raise exception.ValidationSizeError(
attribute='password', size=CONF.identity.max_password_length)
if ("userId" not in auth['passwordCredentials'] and
"username" not in auth['passwordCredentials']):
raise exception.ValidationError(
attribute='username or userId',
target='passwordCredentials')
user_id = auth['passwordCredentials'].get('userId')
if user_id and len(user_id) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='userId',
size=CONF.max_param_size)
username = auth['passwordCredentials'].get('username', '')
if username:
if len(username) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='username',
size=CONF.max_param_size)
try:
user_ref = self.identity_api.get_user_by_name(
username, CONF.identity.default_domain_id)
user_id = user_ref['id']
except exception.UserNotFound as e:
raise exception.Unauthorized(e)
try:
user_ref = self.identity_api.authenticate(
context,
user_id=user_id,
password=password)
except AssertionError as e:
raise exception.Unauthorized(e.args[0])
metadata_ref = {}
tenant_id = self._get_project_id_from_auth(auth)
tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
user_id, tenant_id)
expiry = provider.default_expire_time()
return (user_ref, tenant_ref, metadata_ref, expiry, None)
def _authenticate_external(self, context, auth):
"""Try to authenticate an external user via REMOTE_USER variable.
Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
"""
environment = context.get('environment', {})
if not environment.get('REMOTE_USER'):
raise ExternalAuthNotApplicable()
# NOTE(jamielennox): xml and json differ and get confused about what
# empty auth should look like so just reset it.
if not auth:
auth = {}
username = environment['REMOTE_USER']
try:
user_ref = self.identity_api.get_user_by_name(
username, CONF.identity.default_domain_id)
user_id = user_ref['id']
except exception.UserNotFound as e:
raise exception.Unauthorized(e)
metadata_ref = {}
tenant_id = self._get_project_id_from_auth(auth)
tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
user_id, tenant_id)
expiry = provider.default_expire_time()
bind = None
if ('kerberos' in CONF.token.bind and
environment.get('AUTH_TYPE', '').lower() == 'negotiate'):
bind = {'kerberos': username}
return (user_ref, tenant_ref, metadata_ref, expiry, bind)
def _get_auth_token_data(self, user, tenant, metadata, expiry):
return dict(user=user,
tenant=tenant,
metadata=metadata,
expires=expiry)
def _get_project_id_from_auth(self, auth):
"""Extract tenant information from auth dict.
Returns a valid tenant_id if it exists, or None if not specified.
"""
tenant_id = auth.get('tenantId')
if tenant_id and len(tenant_id) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='tenantId',
size=CONF.max_param_size)
tenant_name = auth.get('tenantName')
if tenant_name and len(tenant_name) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='tenantName',
size=CONF.max_param_size)
if tenant_name:
try:
tenant_ref = self.assignment_api.get_project_by_name(
tenant_name, CONF.identity.default_domain_id)
tenant_id = tenant_ref['id']
except exception.ProjectNotFound as e:
raise exception.Unauthorized(e)
return tenant_id
def _get_project_roles_and_ref(self, user_id, tenant_id):
"""Returns the project roles for this user, and the project ref."""
tenant_ref = None
role_list = []
if tenant_id:
try:
tenant_ref = self.assignment_api.get_project(tenant_id)
role_list = self.assignment_api.get_roles_for_user_and_project(
user_id, tenant_id)
except exception.ProjectNotFound:
pass
if not role_list:
msg = _('User %(u_id)s is unauthorized for tenant %(t_id)s')
msg = msg % {'u_id': user_id, 't_id': tenant_id}
LOG.warning(msg)
raise exception.Unauthorized(msg)
return (tenant_ref, role_list)
def _get_token_ref(self, token_id, belongs_to=None):
"""Returns a token if a valid one exists.
Optionally, limited to a token owned by a specific tenant.
"""
data = self.token_api.get_token(token_id)
if belongs_to:
if data.get('tenant') is None:
raise exception.Unauthorized(
_('Token does not belong to specified tenant.'))
if data['tenant'].get('id') != belongs_to:
raise exception.Unauthorized(
_('Token does not belong to specified tenant.'))
return data
@controller.v2_deprecated
@controller.protected()
def validate_token_head(self, context, token_id):
"""Check that a token is valid.
Optionally, also ensure that it is owned by a specific tenant.
Identical to ``validate_token``, except does not return a response.
The code in ``keystone.common.wsgi.render_response`` will remove
the content body.
"""
# TODO(ayoung) validate against revocation API
belongs_to = context['query_string'].get('belongsTo')
return self.token_provider_api.validate_v2_token(token_id, belongs_to)
@controller.v2_deprecated
@controller.protected()
def validate_token(self, context, token_id):
"""Check that a token is valid.
Optionally, also ensure that it is owned by a specific tenant.
Returns metadata about the token along any associated roles.
"""
belongs_to = context['query_string'].get('belongsTo')
# TODO(ayoung) validate against revocation API
return self.token_provider_api.validate_v2_token(token_id, belongs_to)
@controller.v2_deprecated
def delete_token(self, context, token_id):
"""Delete a token, effectively invalidating it for authz."""
# TODO(termie): this stuff should probably be moved to middleware
self.assert_admin(context)
self.token_provider_api.revoke_token(token_id)
@controller.v2_deprecated
@controller.protected()
def revocation_list(self, context, auth=None):
if not CONF.token.revoke_by_id:
raise exception.Gone()
tokens = self.token_provider_api.list_revoked_tokens()
for t in tokens:
expires = t['expires']
if expires and isinstance(expires, datetime.datetime):
t['expires'] = timeutils.isotime(expires)
data = {'revoked': tokens}
json_data = jsonutils.dumps(data)
signed_text = cms.cms_sign_text(json_data,
CONF.signing.certfile,
CONF.signing.keyfile)
return {'signed': signed_text}
@controller.v2_deprecated
def endpoints(self, context, token_id):
"""Return a list of endpoints available to the token."""
self.assert_admin(context)
token_ref = self._get_token_ref(token_id)
catalog_ref = None
if token_ref.get('tenant'):
catalog_ref = self.catalog_api.get_catalog(
token_ref['user']['id'],
token_ref['tenant']['id'],
token_ref['metadata'])
return Auth.format_endpoint_list(catalog_ref)
@classmethod
def format_endpoint_list(cls, catalog_ref):
"""Formats a list of endpoints according to Identity API v2.
The v2.0 API wants an endpoint list to look like::
{
'endpoints': [
{
'id': $endpoint_id,
'name': $SERVICE[name],
'type': $SERVICE,
'tenantId': $tenant_id,
'region': $REGION,
}
],
'endpoints_links': [],
}
"""
if not catalog_ref:
return {}
endpoints = []
for region_name, region_ref in six.iteritems(catalog_ref):
for service_type, service_ref in six.iteritems(region_ref):
endpoints.append({
'id': service_ref.get('id'),
'name': service_ref.get('name'),
'type': service_type,
'region': region_name,
'publicURL': service_ref.get('publicURL'),
'internalURL': service_ref.get('internalURL'),
'adminURL': service_ref.get('adminURL'),
})
return {'endpoints': endpoints, 'endpoints_links': []}
| apache-2.0 | -9,215,433,027,710,838,000 | 37.408915 | 79 | 0.566123 | false |
rahul-c1/scrapy | scrapy/contrib/downloadermiddleware/robotstxt.py | 15 | 1856 | """
This is a middleware to respect robots.txt policies. To activate it you must
enable this middleware and enable the ROBOTSTXT_OBEY setting.
"""
import robotparser
from scrapy import signals, log
from scrapy.exceptions import NotConfigured, IgnoreRequest
from scrapy.http import Request
from scrapy.utils.httpobj import urlparse_cached
class RobotsTxtMiddleware(object):
DOWNLOAD_PRIORITY = 1000
def __init__(self, crawler):
if not crawler.settings.getbool('ROBOTSTXT_OBEY'):
raise NotConfigured
self.crawler = crawler
self._useragent = crawler.settings.get('USER_AGENT')
self._parsers = {}
self._spider_netlocs = set()
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def process_request(self, request, spider):
useragent = self._useragent
rp = self.robot_parser(request, spider)
if rp and not rp.can_fetch(useragent, request.url):
log.msg(format="Forbidden by robots.txt: %(request)s",
level=log.DEBUG, request=request)
raise IgnoreRequest
def robot_parser(self, request, spider):
url = urlparse_cached(request)
netloc = url.netloc
if netloc not in self._parsers:
self._parsers[netloc] = None
robotsurl = "%s://%s/robots.txt" % (url.scheme, url.netloc)
robotsreq = Request(robotsurl, priority=self.DOWNLOAD_PRIORITY)
dfd = self.crawler.engine.download(robotsreq, spider)
dfd.addCallback(self._parse_robots)
self._spider_netlocs.add(netloc)
return self._parsers[netloc]
def _parse_robots(self, response):
rp = robotparser.RobotFileParser(response.url)
rp.parse(response.body.splitlines())
self._parsers[urlparse_cached(response).netloc] = rp
| bsd-3-clause | 779,046,458,928,687,500 | 33.37037 | 76 | 0.655172 | false |
crazy-cat/incubator-mxnet | python/mxnet/module/bucketing_module.py | 20 | 20458 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-instance-attributes, too-many-arguments, protected-access
# pylint: disable=too-many-public-methods
"""A `BucketingModule` implement the `BaseModule` API, and allows multiple
symbols to be used depending on the `bucket_key` provided by each different
mini-batch of data.
"""
import logging
import warnings
from .. import context as ctx
from ..initializer import Uniform
from .base_module import BaseModule, _check_input_names
from .module import Module
class BucketingModule(BaseModule):
"""This module helps to deal efficiently with varying-length inputs.
Parameters
----------
sym_gen : function
A function when called with a bucket key, returns a triple
``(symbol, data_names, label_names)``.
default_bucket_key : str (or any python object)
The key for the default bucket.
logger : Logger
context : Context or list of Context
Defaults to ``mx.cpu()``
work_load_list : list of number
Defaults to ``None``, indicating uniform workload.
fixed_param_names: list of str
Defaults to ``None``, indicating no network parameters are fixed.
state_names : list of str
States are similar to data and label, but not provided by data iterator.
Instead they are initialized to 0 and can be set by set_states()
"""
def __init__(self, sym_gen, default_bucket_key=None, logger=logging,
context=ctx.cpu(), work_load_list=None,
fixed_param_names=None, state_names=None):
super(BucketingModule, self).__init__(logger=logger)
assert default_bucket_key is not None
self._default_bucket_key = default_bucket_key
self._sym_gen = sym_gen
symbol, data_names, label_names = sym_gen(default_bucket_key)
data_names = list(data_names) if data_names is not None else []
label_names = list(label_names) if label_names is not None else []
state_names = list(state_names) if state_names is not None else []
fixed_param_names = list(fixed_param_names) if fixed_param_names is not None else []
_check_input_names(symbol, data_names, "data", True)
_check_input_names(symbol, label_names, "label", False)
_check_input_names(symbol, state_names, "state", True)
_check_input_names(symbol, fixed_param_names, "fixed_param", True)
self._fixed_param_names = fixed_param_names
self._state_names = state_names
self._context = context
self._work_load_list = work_load_list
self._buckets = {}
self._curr_module = None
self._curr_bucket_key = None
self._params_dirty = False
def _reset_bind(self):
"""Internal utility function to reset binding."""
self.binded = False
self._buckets = {}
self._curr_module = None
self._curr_bucket_key = None
@property
def data_names(self):
"""A list of names for data required by this module."""
if self.binded:
return self._curr_module.data_names
else:
_, data_names, _ = self._sym_gen(self._default_bucket_key)
return data_names
@property
def output_names(self):
"""A list of names for the outputs of this module."""
if self.binded:
return self._curr_module.output_names
else:
symbol, _, _ = self._sym_gen(self._default_bucket_key)
return symbol.list_outputs()
@property
def data_shapes(self):
"""Get data shapes.
Returns
-------
A list of `(name, shape)` pairs.
"""
assert self.binded
return self._curr_module.data_shapes
@property
def label_shapes(self):
"""Get label shapes.
Returns
-------
A list of `(name, shape)` pairs.
The return value could be ``None`` if the module does not need labels,
or if the module is not bound for training (in this case, label information
is not available).
"""
assert self.binded
return self._curr_module.label_shapes
@property
def output_shapes(self):
"""Gets output shapes.
Returns
-------
A list of `(name, shape)` pairs.
"""
assert self.binded
return self._curr_module.output_shapes
def get_params(self):
"""Gets current parameters.
Returns
-------
`(arg_params, aux_params)`
A pair of dictionaries each mapping parameter names to NDArray values.
"""
assert self.binded and self.params_initialized
self._curr_module._params_dirty = self._params_dirty
params = self._curr_module.get_params()
self._params_dirty = False
return params
def set_params(self, arg_params, aux_params, allow_missing=False, force_init=True,
allow_extra=False):
"""Assigns parameters and aux state values.
Parameters
----------
arg_params : dict
Dictionary of name to value (`NDArray`) mapping.
aux_params : dict
Dictionary of name to value (`NDArray`) mapping.
allow_missing : bool
If true, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If true, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Examples
--------
>>> # An example of setting module parameters.
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, n_epoch_load)
>>> mod.set_params(arg_params=arg_params, aux_params=aux_params)
"""
if not allow_missing:
self.init_params(initializer=None, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init)
return
if self.params_initialized and not force_init:
warnings.warn("Parameters already initialized and force_init=False. "
"set_params call ignored.", stacklevel=2)
return
self._curr_module.set_params(arg_params, aux_params, allow_missing=allow_missing,
force_init=force_init, allow_extra=allow_extra)
# because we didn't update self._arg_params, they are dirty now.
self._params_dirty = True
self.params_initialized = True
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
"""Initializes parameters.
Parameters
----------
initializer : Initializer
arg_params : dict
Defaults to ``None``. Existing parameters. This has higher priority
than `initializer`.
aux_params : dict
Defaults to ``None``. Existing auxiliary states. This has higher priority
than `initializer`.
allow_missing : bool
Allow missing values in `arg_params` and `aux_params` (if not ``None``).
In this case, missing values will be filled with `initializer`.
force_init : bool
Defaults to ``False``.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
"""
if self.params_initialized and not force_init:
return
assert self.binded, 'call bind before initializing the parameters'
self._curr_module.init_params(initializer=initializer, arg_params=arg_params,
aux_params=aux_params, allow_missing=allow_missing,
force_init=force_init, allow_extra=allow_extra)
self._params_dirty = False
self.params_initialized = True
def get_states(self, merge_multi_context=True):
"""Gets states from all devices.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the states
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArrays or list of list of NDArrays
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`.
"""
assert self.binded and self.params_initialized
return self._curr_module.get_states(merge_multi_context=merge_multi_context)
def set_states(self, states=None, value=None):
"""Sets value for states. Only one of states & values can be specified.
Parameters
----------
states : list of list of NDArrays
Source states arrays formatted like ``[[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]]``.
value : number
A single scalar value for all state arrays.
"""
assert self.binded and self.params_initialized
self._curr_module.set_states(states, value)
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binding for a `BucketingModule` means setting up the buckets and binding the
executor for the default bucket key. Executors corresponding to other keys are
bound afterwards with `switch_bucket`.
Parameters
----------
data_shapes : list of (str, tuple)
This should correspond to the symbol for the default bucket.
label_shapes : list of (str, tuple)
This should correspond to the symbol for the default bucket.
for_training : bool
Default is ``True``.
inputs_need_grad : bool
Default is ``False``.
force_rebind : bool
Default is ``False``.
shared_module : BucketingModule
Default is ``None``. This value is currently not used.
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
bucket_key : str (or any python object)
bucket key for binding. by default use the default_bucket_key
"""
# in case we already initialized params, keep it
if self.params_initialized:
arg_params, aux_params = self.get_params()
# force rebinding is typically used when one want to switch from
# training to prediction phase.
if force_rebind:
self._reset_bind()
if self.binded:
self.logger.warning('Already bound, ignoring bind()')
return
assert shared_module is None, 'shared_module for BucketingModule is not supported'
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.binded = True
symbol, data_names, label_names = self._sym_gen(self._default_bucket_key)
module = Module(symbol, data_names, label_names, logger=self.logger,
context=self._context, work_load_list=self._work_load_list,
fixed_param_names=self._fixed_param_names,
state_names=self._state_names)
module.bind(data_shapes, label_shapes, for_training, inputs_need_grad,
force_rebind=False, shared_module=None, grad_req=grad_req)
self._curr_module = module
self._curr_bucket_key = self._default_bucket_key
self._buckets[self._default_bucket_key] = module
# copy back saved params, if already initialized
if self.params_initialized:
self.set_params(arg_params, aux_params)
def switch_bucket(self, bucket_key, data_shapes, label_shapes=None):
"""Switches to a different bucket. This will change ``self.curr_module``.
Parameters
----------
bucket_key : str (or any python object)
The key of the target bucket.
data_shapes : list of (str, tuple)
Typically ``data_batch.provide_data``.
label_shapes : list of (str, tuple)
Typically ``data_batch.provide_label``.
"""
assert self.binded, 'call bind before switching bucket'
if not bucket_key in self._buckets:
symbol, data_names, label_names = self._sym_gen(bucket_key)
module = Module(symbol, data_names, label_names,
logger=self.logger, context=self._context,
work_load_list=self._work_load_list,
fixed_param_names=self._fixed_param_names,
state_names=self._state_names)
module.bind(data_shapes, label_shapes, self._curr_module.for_training,
self._curr_module.inputs_need_grad,
force_rebind=False, shared_module=self._buckets[self._default_bucket_key])
self._buckets[bucket_key] = module
self._curr_module = self._buckets[bucket_key]
self._curr_bucket_key = bucket_key
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),),
force_init=False):
"""Installs and initializes optimizers.
Parameters
----------
kvstore : str or KVStore
Defaults to `'local'`.
optimizer : str or Optimizer
Defaults to `'sgd'`
optimizer_params : dict
Defaults to `(('learning_rate', 0.01),)`. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Defaults to ``False``, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
"""
assert self.binded and self.params_initialized
if self.optimizer_initialized and not force_init:
self.logger.warning('optimizer already initialized, ignoring.')
return
self._curr_module.init_optimizer(kvstore, optimizer, optimizer_params,
force_init=force_init)
for mod in self._buckets.values():
if mod is not self._curr_module:
mod.borrow_optimizer(self._curr_module)
self.optimizer_initialized = True
def prepare(self, data_batch):
"""Prepares a data batch for forward.
Parameters
----------
data_batch : DataBatch
"""
# perform bind if haven't done so
assert self.binded and self.params_initialized
bucket_key = data_batch.bucket_key
original_bucket_key = self._curr_bucket_key
data_shapes = data_batch.provide_data
label_shapes = data_batch.provide_label
self.switch_bucket(bucket_key, data_shapes, label_shapes)
# switch back
self.switch_bucket(original_bucket_key, None, None)
def forward(self, data_batch, is_train=None):
"""Forward computation.
Parameters
----------
data_batch : DataBatch
is_train : bool
Defaults to ``None``, in which case `is_train` is take as ``self.for_training``.
"""
assert self.binded and self.params_initialized
self.switch_bucket(data_batch.bucket_key, data_batch.provide_data,
data_batch.provide_label)
self._curr_module.forward(data_batch, is_train=is_train)
def backward(self, out_grads=None):
"""Backward computation."""
assert self.binded and self.params_initialized
self._curr_module.backward(out_grads=out_grads)
def update(self):
"""Updates parameters according to installed optimizer and the gradient computed
in the previous forward-backward cycle.
"""
assert self.binded and self.params_initialized and self.optimizer_initialized
self._params_dirty = True
self._curr_module.update()
def get_outputs(self, merge_multi_context=True):
"""Gets outputs from a previous forward computation.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of numpy arrays or list of list of numpy arrays
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are numpy arrays.
"""
assert self.binded and self.params_initialized
return self._curr_module.get_outputs(merge_multi_context=merge_multi_context)
def get_input_grads(self, merge_multi_context=True):
"""Gets the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArrays or list of list of NDArrays
If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements are `NDArray`.
"""
assert self.binded and self.params_initialized and self.inputs_need_grad
return self._curr_module.get_input_grads(merge_multi_context=merge_multi_context)
def update_metric(self, eval_metric, labels):
"""Evaluates and accumulates evaluation metric on outputs of the last forward computation.
Parameters
----------
eval_metric : EvalMetric
labels : list of NDArray
Typically ``data_batch.label``.
"""
assert self.binded and self.params_initialized
self._curr_module.update_metric(eval_metric, labels)
@property
def symbol(self):
"""The symbol of the current bucket being used."""
assert self.binded
return self._curr_module.symbol
def install_monitor(self, mon):
"""Installs monitor on all executors """
assert self.binded
for mod in self._buckets.values():
mod.install_monitor(mon)
| apache-2.0 | -1,090,213,141,985,674,100 | 39.916 | 98 | 0.60959 | false |
HyperBaton/ansible | lib/ansible/modules/cloud/univention/udm_group.py | 37 | 4839 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright: (c) 2016, Adfinis SyGroup AG
# Tobias Rueetschi <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: udm_group
version_added: "2.2"
author:
- Tobias Rüetschi (@keachi)
short_description: Manage of the posix group
description:
- "This module allows to manage user groups on a univention corporate server (UCS).
It uses the python API of the UCS to create a new object or edit it."
requirements:
- Python >= 2.6
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the group is present or not.
name:
required: true
description:
- Name of the posix group.
description:
required: false
description:
- Group description.
position:
required: false
description:
- define the whole ldap position of the group, e.g.
C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com).
ou:
required: false
description:
- LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com).
subpath:
required: false
description:
- Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups).
'''
EXAMPLES = '''
# Create a POSIX group
- udm_group:
name: g123m-1A
# Create a POSIX group with the exact DN
# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com)
- udm_group:
name: g123m-1A
subpath: 'cn=classes,cn=students,cn=groups'
ou: school
# or
- udm_group:
name: g123m-1A
position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com'
'''
RETURN = '''# '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.univention_umc import (
umc_module_for_add,
umc_module_for_edit,
ldap_search,
base_dn,
)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True,
type='str'),
description=dict(default=None,
type='str'),
position=dict(default='',
type='str'),
ou=dict(default='',
type='str'),
subpath=dict(default='cn=groups',
type='str'),
state=dict(default='present',
choices=['present', 'absent'],
type='str')
),
supports_check_mode=True
)
name = module.params['name']
description = module.params['description']
position = module.params['position']
ou = module.params['ou']
subpath = module.params['subpath']
state = module.params['state']
changed = False
diff = None
groups = list(ldap_search(
'(&(objectClass=posixGroup)(cn={0}))'.format(name),
attr=['cn']
))
if position != '':
container = position
else:
if ou != '':
ou = 'ou={0},'.format(ou)
if subpath != '':
subpath = '{0},'.format(subpath)
container = '{0}{1}{2}'.format(subpath, ou, base_dn())
group_dn = 'cn={0},{1}'.format(name, container)
exists = bool(len(groups))
if state == 'present':
try:
if not exists:
grp = umc_module_for_add('groups/group', container)
else:
grp = umc_module_for_edit('groups/group', group_dn)
grp['name'] = name
grp['description'] = description
diff = grp.diff()
changed = grp.diff() != []
if not module.check_mode:
if not exists:
grp.create()
else:
grp.modify()
except Exception:
module.fail_json(
msg="Creating/editing group {0} in {1} failed".format(name, container)
)
if state == 'absent' and exists:
try:
grp = umc_module_for_edit('groups/group', group_dn)
if not module.check_mode:
grp.remove()
changed = True
except Exception:
module.fail_json(
msg="Removing group {0} failed".format(name)
)
module.exit_json(
changed=changed,
name=name,
diff=diff,
container=container
)
if __name__ == '__main__':
main()
| gpl-3.0 | 1,430,512,566,803,135,200 | 26.965318 | 92 | 0.54382 | false |
google-research/language | language/tek_representations/utils/mrqa_official_eval.py | 1 | 3638 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Official evaluation script for the MRQA Workshop Shared Task.
Adapted fromt the SQuAD v1.1 official evaluation script.
Usage:
python official_eval.py dataset_file.jsonl.gz prediction_file.json
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gzip
import json
import re
import string
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
"""Computes the token-level F1 score from the ground truth."""
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = collections.Counter(prediction_tokens) & collections.Counter(
ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def read_predictions(prediction_file):
with open(prediction_file) as f:
predictions = json.load(f)
return predictions
def read_answers(gold_file):
answers = {}
with gzip.open(gold_file, 'rb') as f:
for i, line in enumerate(f):
example = json.loads(line)
if i == 0 and 'header' in example:
continue
for qa in example['qas']:
answers[qa['qid']] = qa['answers']
return answers
def evaluate(answers, predictions, skip_no_answer=False):
"""Evaluates EM/F1 of predictions given answers."""
f1 = exact_match = total = 0
for qid, ground_truths in answers.items():
if qid not in predictions:
if not skip_no_answer:
message = 'Unanswered question %s will receive score 0.' % qid
print(message)
total += 1
continue
total += 1
prediction = predictions[qid]
exact_match += metric_max_over_ground_truths(exact_match_score, prediction,
ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
| apache-2.0 | -1,437,262,390,485,069,800 | 30.634783 | 79 | 0.694338 | false |
MostlyOpen/odoo_addons_jcafb | myo_lab_test_cst/models/lab_test_result_urina.py | 1 | 4485 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
# from datetime import datetime
from openerp import fields, models
class LabTestResultUrina(models.Model):
_name = "myo.lab_test.result.urina"
_log_access = False
person_code = fields.Char(string='Person Code', required=True)
address_code = fields.Char(string='Address Code')
lab_test_code = fields.Char(string='Lab Test Code')
lab_test_type = fields.Char(string='Lab Test Type')
gender = fields.Char(string='Gender')
age = fields.Char(string='Age')
person_category = fields.Char(string='Person Category')
person_status = fields.Char(string='Person Status')
address_city = fields.Char(string='Cidade')
address_category = fields.Char(string='Address Category')
address_ditrict = fields.Char(string='Address District')
EUR_02_01 = fields.Char(
string='EUR-02-01',
help='Volume'
)
EUR_02_02 = fields.Char(
string='EUR-02-02',
help='Densidade'
)
EUR_02_03 = fields.Char(
string='EUR-02-03',
help='Aspecto'
)
EUR_02_04 = fields.Char(
string='EUR-02-04',
help='Cor'
)
EUR_02_05 = fields.Char(
string='EUR-02-05',
help='Odor'
)
EUR_03_01 = fields.Char(
string='EUR-03-01',
help='ph'
)
EUR_03_02 = fields.Char(
string='EUR-03-02',
help='Proteínas'
)
EUR_03_03 = fields.Char(
string='EUR-03-03',
help='Glicose'
)
EUR_03_04 = fields.Char(
string='EUR-03-04',
help='Cetona'
)
EUR_03_05 = fields.Char(
string='EUR-03-05',
help='Pigmentos biliares'
)
EUR_03_06 = fields.Char(
string='EUR-03-06',
help='Sangue'
)
EUR_03_07 = fields.Char(
string='EUR-03-07',
help='Urobilinogênio'
)
EUR_03_08 = fields.Char(
string='EUR-03-08',
help='Nitrito'
)
EUR_04_01 = fields.Char(
string='EUR-04-01',
help='Células Epiteliais'
)
EUR_04_02 = fields.Char(
string='EUR-04-02',
help='Muco'
)
EUR_04_03 = fields.Char(
string='EUR-04-03',
help='Cristais'
)
EUR_04_04 = fields.Char(
string='EUR-04-04',
help='Leucócitos'
)
EUR_04_05 = fields.Char(
string='EUR-04-05',
help='Hemácias'
)
EUR_04_06 = fields.Char(
string='EUR-04-06',
help='Cilindros'
)
EUR_04_07 = fields.Char(
string='EUR-04-07',
help='Cilindros Hialinos'
)
EUR_04_08 = fields.Char(
string='EUR-04-08',
help='Cilindros Granulosos'
)
EUR_04_09 = fields.Char(
string='EUR-04-09',
help='Cilindros Leucocitários'
)
EUR_04_10 = fields.Char(
string='EUR-04-10',
help='Cilindros Hemáticos'
)
EUR_04_11 = fields.Char(
string='EUR-04-11',
help='Cilindros Céreos'
)
EUR_04_12 = fields.Char(
string='EUR-04-12',
help='Outros tipos de Cilindros'
)
EUR_05_01 = fields.Char(
string='EUR-05-01',
help='Observações'
)
notes = fields.Text(string='Notes')
active = fields.Boolean(
'Active',
help="If unchecked, it will allow you to hide the lab test result urina without removing it.",
default=1
)
_sql_constraints = [
(
'person_code_uniq',
'UNIQUE (person_code)',
'Error! The Person Code must be unique!'
),
]
_rec_name = 'person_code'
_order = 'person_code'
| agpl-3.0 | 5,958,855,959,438,270,000 | 25.636905 | 102 | 0.560447 | false |
systemd/casync | doc/conf.py | 1 | 3578 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: LGPL-2.1+
#
# casync documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 20 16:46:39 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'casync'
author = ''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'casyncdoc'
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('casync', 'casync', 'casync Documentation',
[], 1)
]
| lgpl-2.1 | 4,648,395,466,651,347,000 | 30.946429 | 79 | 0.698155 | false |
oikoumene/wcc.api | wcc/api/api.py | 1 | 2196 | # -*- coding: utf-8 -*-
from plone.jsonapi.routes import add_plone_route
# CRUD
from plone.jsonapi.routes.api import get_items
from plone.jsonapi.routes.api import create_items
from plone.jsonapi.routes.api import update_items
from plone.jsonapi.routes.api import delete_items
from plone.jsonapi.routes.api import url_for
# GET
@add_plone_route("/wccdocument", "wccdocument", methods=["GET"])
@add_plone_route("/wccdocument/<string:uid>", "wccdocument", methods=["GET"])
def get(context, request, uid=None):
""" get wccdocument
"""
items = get_items("wcc.document.document", request, uid=uid, endpoint="wccdocument")
return {
"url": url_for("wccdocument"),
"count": len(items),
"items": items,
}
# CREATE
@add_plone_route("/wccdocument/create", "wccdocument_create", methods=["POST"])
@add_plone_route("/wccdocument/create/<string:uid>", "wccdocument_create", methods=["POST"])
def create(context, request, uid=None):
""" create wccdocument
"""
items = create_items("wcc.document.document", request, uid=uid, endpoint="wccdocument")
return {
"url": url_for("wccdocument_create"),
"count": len(items),
"items": items,
}
# UPDATE
@add_plone_route("/wccdocument/update", "wccdocument_update", methods=["POST"])
@add_plone_route("/wccdocument/update/<string:uid>", "wccdocument_update", methods=["POST"])
def update(context, request, uid=None):
""" update wccdocument
"""
items = update_items("wcc.document.document", request, uid=uid, endpoint="wccdocument")
return {
"url": url_for("wccdocument_update"),
"count": len(items),
"items": items,
}
# DELETE
@add_plone_route("/wccdocument/delete", "wccdocument_delete", methods=["POST"])
@add_plone_route("/wccdocument/delete/<string:uid>", "wccdocument_delete", methods=["POST"])
def delete(context, request, uid=None):
""" delete wccdocument
"""
items = delete_items("wcc.document.document", request, uid=uid, endpoint="wccdocument")
return {
"url": url_for("wccdocument_delete"),
"count": len(items),
"items": items,
}
# vim: set ft=python ts=4 sw=4 expandtab :
| lgpl-3.0 | -5,587,490,180,324,059,000 | 30.826087 | 92 | 0.656648 | false |
TalShafir/ansible | lib/ansible/modules/cloud/docker/docker_image_facts.py | 7 | 7425 | #!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_image_facts
short_description: Inspect docker images
version_added: "2.1.0"
description:
- Provide one or more image names, and the module will inspect each, returning an array of inspection results.
options:
name:
description:
- An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]),
where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also
image IDs can be used.
required: true
extends_documentation_fragment:
- docker
requirements:
- "python >= 2.6"
- "docker-py >= 1.8.0"
- "Please note that the L(docker-py,https://pypi.org/project/docker-py/) Python
module has been superseded by L(docker,https://pypi.org/project/docker/)
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
install the C(docker) Python module. Note that both modules should I(not)
be installed at the same time. Also note that when both modules are installed
and one of them is uninstalled, the other might no longer function and a
reinstall of it is required."
- "Docker API >= 1.20"
author:
- Chris Houseknecht (@chouseknecht)
'''
EXAMPLES = '''
- name: Inspect a single image
docker_image_facts:
name: pacur/centos-7
- name: Inspect multiple images
docker_image_facts:
name:
- pacur/centos-7
- sinatra
'''
RETURN = '''
images:
description: Facts for the selected images.
returned: always
type: dict
sample: [
{
"Architecture": "amd64",
"Author": "",
"Comment": "",
"Config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/etc/docker/registry/config.yml"
],
"Domainname": "",
"Entrypoint": [
"/bin/registry"
],
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"ExposedPorts": {
"5000/tcp": {}
},
"Hostname": "e5c68db50333",
"Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
"Labels": {},
"OnBuild": [],
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": {
"/var/lib/registry": {}
},
"WorkingDir": ""
},
"Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
"ContainerConfig": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/bin/sh",
"-c",
'#(nop) CMD ["/etc/docker/registry/config.yml"]'
],
"Domainname": "",
"Entrypoint": [
"/bin/registry"
],
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"ExposedPorts": {
"5000/tcp": {}
},
"Hostname": "e5c68db50333",
"Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
"Labels": {},
"OnBuild": [],
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": {
"/var/lib/registry": {}
},
"WorkingDir": ""
},
"Created": "2016-03-08T21:08:15.399680378Z",
"DockerVersion": "1.9.1",
"GraphDriver": {
"Data": null,
"Name": "aufs"
},
"Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
"Name": "registry:2",
"Os": "linux",
"Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
"RepoDigests": [],
"RepoTags": [
"registry:2"
],
"Size": 0,
"VirtualSize": 165808884
}
]
'''
try:
from docker import utils
except ImportError:
# missing docker-py handled in ansible.module_utils.docker_common
pass
from ansible.module_utils.docker_common import AnsibleDockerClient, DockerBaseClass, is_image_name_id
class ImageManager(DockerBaseClass):
def __init__(self, client, results):
super(ImageManager, self).__init__()
self.client = client
self.results = results
self.name = self.client.module.params.get('name')
self.log("Gathering facts for images: %s" % (str(self.name)))
if self.name:
self.results['images'] = self.get_facts()
else:
self.results['images'] = self.get_all_images()
def fail(self, msg):
self.client.fail(msg)
def get_facts(self):
'''
Lookup and inspect each image name found in the names parameter.
:returns array of image dictionaries
'''
results = []
names = self.name
if not isinstance(names, list):
names = [names]
for name in names:
if is_image_name_id(name):
self.log('Fetching image %s (ID)' % (name))
image = self.client.find_image_by_id(name)
else:
repository, tag = utils.parse_repository_tag(name)
if not tag:
tag = 'latest'
self.log('Fetching image %s:%s' % (repository, tag))
image = self.client.find_image(name=repository, tag=tag)
if image:
results.append(image)
return results
def get_all_images(self):
results = []
images = self.client.images()
for image in images:
try:
inspection = self.client.inspect_image(image['Id'])
except Exception as exc:
self.fail("Error inspecting image %s - %s" % (image['Id'], str(exc)))
results.append(inspection)
return results
def main():
argument_spec = dict(
name=dict(type='list'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
min_docker_api_version='1.20',
)
results = dict(
changed=False,
images=[]
)
ImageManager(client, results)
client.module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | -437,710,135,116,650,100 | 28.819277 | 115 | 0.51596 | false |
mahak/neutron | neutron/tests/unit/services/logapi/drivers/test_manager.py | 2 | 7789 | # Copyright (c) 2017 Fujitsu Limited
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib.callbacks import events
from neutron_lib import exceptions
from neutron_lib import fixture
from neutron_lib.services.logapi import constants as log_const
from neutron.services.logapi.common import exceptions as log_exc
from neutron.services.logapi.drivers import base as log_driver_base
from neutron.services.logapi.drivers import manager as driver_mgr
from neutron.tests import tools
from neutron.tests.unit.services.logapi import base
class TestGetParameter(base.BaseLogTestCase):
def test__get_param_missing_parameter(self):
kwargs = {'context': mock.sentinel.context}
self.assertRaises(log_exc.LogapiDriverException,
driver_mgr._get_param,
args=[], kwargs=kwargs,
name='log_obj', index=1)
self.assertRaises(log_exc.LogapiDriverException,
driver_mgr._get_param,
args=[mock.sentinel.context], kwargs={},
name='log_obj', index=1)
self.assertRaises(log_exc.LogapiDriverException,
driver_mgr._get_param,
args=[], kwargs={'log_obj': mock.sentinel.log_obj},
name='context', index=0)
class TestLogDriversManagerBase(base.BaseLogTestCase):
def setUp(self):
super(TestLogDriversManagerBase, self).setUp()
self.config_parse()
self.setup_coreplugin(load_plugins=False)
@staticmethod
def _create_manager_with_drivers(drivers_details):
for name, driver_details in drivers_details.items():
class LogDriver(log_driver_base.DriverBase):
@property
def is_loaded(self):
return driver_details['is_loaded']
LogDriver(name,
driver_details.get('vif_types', []),
driver_details.get('vnic_types', []),
driver_details.get('supported_logging_types', []))
return driver_mgr.LoggingServiceDriverManager()
class TestLogDriversManagerMulti(TestLogDriversManagerBase):
"""Test calls happen to all drivers"""
def test_driver_manager_empty_with_no_drivers(self):
driver_manager = self._create_manager_with_drivers({})
self.assertEqual(0, len(driver_manager.drivers))
def test_driver_manager_empty_with_no_loaded_drivers(self):
driver_manager = self._create_manager_with_drivers(
{'driver-A': {'is_loaded': False}})
self.assertEqual(0, len(driver_manager.drivers))
def test_driver_manager_with_one_loaded_driver(self):
driver_manager = self._create_manager_with_drivers(
{'driver-A': {'is_loaded': True}})
self.assertEqual(1, len(driver_manager.drivers))
def test_driver_manager_with_two_loaded_drivers(self):
driver_manager = self._create_manager_with_drivers(
{'driver-A': {'is_loaded': True},
'driver-B': {'is_loaded': True}})
self.assertEqual(2, len(driver_manager.drivers))
class TestLogDriversManagerLoggingTypes(TestLogDriversManagerBase):
"""Test supported logging types"""
def test_available_logging_types(self):
driver_manager = self._create_manager_with_drivers(
{'driver-A': {'is_loaded': True,
'supported_logging_types': ['security_group']},
'driver-B': {'is_loaded': True,
'supported_logging_types':
['security_group', 'firewall']}
})
self.assertEqual(set(['security_group', 'firewall']),
driver_manager.supported_logging_types)
class TestLogDriversCalls(TestLogDriversManagerBase):
"""Test log driver calls"""
def setUp(self):
super(TestLogDriversCalls, self).setUp()
self.driver_manager = self._create_manager_with_drivers(
{'driver-A': {'is_loaded': True}})
def test_implemented_call_methods(self):
for method in log_const.LOG_CALL_METHODS:
with mock.patch.object(log_driver_base.DriverBase, method) as \
method_fnc:
context = mock.sentinel.context
log_obj = mock.sentinel.log_obj
self.driver_manager.call(
method, context=context, log_objs=[log_obj])
method_fnc.assert_called_once_with(
context=context, log_objs=[log_obj])
def test_not_implemented_call_methods(self):
context = mock.sentinel.context
log_obj = mock.sentinel.log_obj
self.assertRaises(exceptions.DriverCallError, self.driver_manager.call,
'wrong_method', context=context, log_objs=[log_obj])
class TestHandleResourceCallback(TestLogDriversManagerBase):
"""Test handle resource callback"""
def setUp(self):
super(TestHandleResourceCallback, self).setUp()
self._cb_mgr = mock.Mock()
self.useFixture(fixture.CallbackRegistryFixture(
callback_manager=self._cb_mgr))
self.driver_manager = driver_mgr.LoggingServiceDriverManager()
def test_subscribe_resources_cb(self):
class FakeResourceCB1(driver_mgr.ResourceCallBackBase):
def handle_event(self, resource, event, trigger, **kwargs):
pass
class FakeResourceCB2(driver_mgr.ResourceCallBackBase):
def handle_event(self, resource, event, trigger, **kwargs):
pass
driver_mgr.RESOURCE_CB_CLASS_MAP = {'fake_resource1': FakeResourceCB1,
'fake_resource2': FakeResourceCB2}
self.driver_manager._setup_resources_cb_handle()
fake_resource_cb1 = FakeResourceCB1(
'fake_resource1', self.driver_manager.call)
fake_resource_cb2 = FakeResourceCB2(
'fake_resource2', self.driver_manager.call)
assert_calls = [
mock.call(
*tools.get_subscribe_args(
fake_resource_cb1.handle_event,
'fake_resource1', events.AFTER_CREATE)),
mock.call(
*tools.get_subscribe_args(
fake_resource_cb1.handle_event,
'fake_resource1', events.AFTER_UPDATE)),
mock.call(
*tools.get_subscribe_args(
fake_resource_cb1.handle_event,
'fake_resource1', events.AFTER_DELETE)),
mock.call(
*tools.get_subscribe_args(
fake_resource_cb2.handle_event,
'fake_resource2', events.AFTER_CREATE)),
mock.call(
*tools.get_subscribe_args(
fake_resource_cb2.handle_event,
'fake_resource2', events.AFTER_UPDATE)),
mock.call(
*tools.get_subscribe_args(
fake_resource_cb2.handle_event,
'fake_resource2', events.AFTER_DELETE)),
]
self._cb_mgr.subscribe.assert_has_calls(assert_calls)
| apache-2.0 | -316,675,875,089,911,600 | 40.21164 | 79 | 0.605598 | false |
wwj718/edx-platform | common/test/acceptance/tests/discussion/helpers.py | 46 | 4061 | """
Helper functions and classes for discussion tests.
"""
from uuid import uuid4
import json
from ...fixtures import LMS_BASE_URL
from ...fixtures.course import CourseFixture
from ...fixtures.discussion import (
SingleThreadViewFixture,
Thread,
Response,
)
from ...pages.lms.discussion import DiscussionTabSingleThreadPage
from ...tests.helpers import UniqueCourseTest
class BaseDiscussionMixin(object):
"""
A mixin containing methods common to discussion tests.
"""
def setup_thread(self, num_responses, **thread_kwargs):
"""
Create a test thread with the given number of responses, passing all
keyword arguments through to the Thread fixture, then invoke
setup_thread_page.
"""
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(id=thread_id, commentable_id=self.discussion_id, **thread_kwargs)
)
for i in range(num_responses):
thread_fixture.addResponse(Response(id=str(i), body=str(i)))
thread_fixture.push()
self.setup_thread_page(thread_id)
return thread_id
class CohortTestMixin(object):
"""
Mixin for tests of cohorted courses
"""
def setup_cohort_config(self, course_fixture, auto_cohort_groups=None):
"""
Sets up the course to use cohorting with the given list of auto_cohort_groups.
If auto_cohort_groups is None, no auto cohorts are set.
"""
course_fixture._update_xblock(course_fixture._course_location, {
"metadata": {
u"cohort_config": {
"auto_cohort_groups": auto_cohort_groups or [],
"cohorted_discussions": [],
"cohorted": True,
},
},
})
def disable_cohorting(self, course_fixture):
"""
Disables cohorting for the current course fixture.
"""
url = LMS_BASE_URL + "/courses/" + course_fixture._course_key + '/cohorts/settings' # pylint: disable=protected-access
data = json.dumps({'is_cohorted': False})
response = course_fixture.session.patch(url, data=data, headers=course_fixture.headers)
self.assertTrue(response.ok, "Failed to disable cohorts")
def add_manual_cohort(self, course_fixture, cohort_name):
"""
Adds a cohort by name, returning its ID.
"""
url = LMS_BASE_URL + "/courses/" + course_fixture._course_key + '/cohorts/'
data = json.dumps({"name": cohort_name, 'assignment_type': 'manual'})
response = course_fixture.session.post(url, data=data, headers=course_fixture.headers)
self.assertTrue(response.ok, "Failed to create cohort")
return response.json()['id']
def add_user_to_cohort(self, course_fixture, username, cohort_id):
"""
Adds a user to the specified cohort.
"""
url = LMS_BASE_URL + "/courses/" + course_fixture._course_key + "/cohorts/{}/add".format(cohort_id)
data = {"users": username}
course_fixture.headers['Content-type'] = 'application/x-www-form-urlencoded'
response = course_fixture.session.post(url, data=data, headers=course_fixture.headers)
self.assertTrue(response.ok, "Failed to add user to cohort")
class BaseDiscussionTestCase(UniqueCourseTest):
def setUp(self):
super(BaseDiscussionTestCase, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fixture = CourseFixture(**self.course_info)
self.course_fixture.add_advanced_settings(
{'discussion_topics': {'value': {'Test Discussion Topic': {'id': self.discussion_id}}}}
)
self.course_fixture.install()
def create_single_thread_page(self, thread_id):
"""
Sets up a `DiscussionTabSingleThreadPage` for a given
`thread_id`.
"""
return DiscussionTabSingleThreadPage(self.browser, self.course_id, self.discussion_id, thread_id)
| agpl-3.0 | -2,008,490,233,469,499,000 | 37.67619 | 127 | 0.632849 | false |
groschovskiy/keyczar | cpp/src/tools/swtoolkit/site_scons/site_tools/environment_tools.py | 11 | 10726 | #!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Set up tools for environments for for software construction toolkit.
This module is a SCons tool which should be include in all environments. It
will automatically be included by the component_setup tool.
"""
import os
import SCons
#------------------------------------------------------------------------------
def FilterOut(self, **kw):
"""Removes values from existing construction variables in an Environment.
The values to remove should be a list. For example:
self.FilterOut(CPPDEFINES=['REMOVE_ME', 'ME_TOO'])
Args:
self: Environment to alter.
kw: (Any other named arguments are values to remove).
"""
kw = SCons.Environment.copy_non_reserved_keywords(kw)
for key, val in kw.items():
envval = self.get(key, None)
if envval is None:
# No existing variable in the environment, so nothing to delete.
continue
for vremove in val:
# Use while not if, so we can handle duplicates.
while vremove in envval:
envval.remove(vremove)
self[key] = envval
# TODO: SCons.Environment.Append() has much more logic to deal with various
# types of values. We should handle all those cases in here too. (If
# variable is a dict, etc.)
#------------------------------------------------------------------------------
def Overlap(self, values1, values2):
"""Checks for overlap between the values.
Args:
self: Environment to use for variable substitution.
values1: First value(s) to compare. May be a string or list of strings.
values2: Second value(s) to compare. May be a string or list of strings.
Returns:
The list of values in common after substitution, or an empty list if
the values do not overlap.
Converts the values to a set of plain strings via self.SubstList2() before
comparison, so SCons $ variables are evaluated.
"""
set1 = set(self.SubstList2(values1))
set2 = set(self.SubstList2(values2))
return list(set1.intersection(set2))
#------------------------------------------------------------------------------
def ApplySConscript(self, sconscript_file):
"""Applies a SConscript to the current environment.
Args:
self: Environment to modify.
sconscript_file: Name of SConscript file to apply.
Returns:
The return value from the call to SConscript().
ApplySConscript() should be used when an existing SConscript which sets up an
environment gets too large, or when there is common setup between multiple
environments which can't be reduced into a parent environment which the
multiple child environments Clone() from. The latter case is necessary
because env.Clone() only enables single inheritance for environments.
ApplySConscript() is NOT intended to replace the Tool() method. If you need
to add methods or builders to one or more environments, do that as a tool
(and write unit tests for them).
ApplySConscript() is equivalent to the following SCons call:
SConscript(sconscript_file, exports={'env':self})
The called SConscript should import the 'env' variable to get access to the
calling environment:
Import('env')
Changes made to env in the called SConscript will be applied to the
environment calling ApplySConscript() - that is, env in the called SConscript
is a reference to the calling environment.
If you need to export multiple variables to the called SConscript, or return
variables from it, use the existing SConscript() function.
"""
return self.SConscript(sconscript_file, exports={'env': self})
#------------------------------------------------------------------------------
def BuildSConscript(self, sconscript_file):
"""Builds a SConscript based on the current environment.
Args:
self: Environment to clone and pass to the called SConscript.
sconscript_file: Name of SConscript file to build. If this is a directory,
this method will look for sconscript_file+'/build.scons', and if that
is not found, sconscript_file+'/SConscript'.
Returns:
The return value from the call to SConscript().
BuildSConscript() should be used when an existing SConscript which builds a
project gets too large, or when a group of SConscripts are logically related
but should not directly affect each others' environments (for example, a
library might want to build a number of unit tests which exist in
subdirectories, but not allow those tests' SConscripts to affect/pollute the
library's environment.
BuildSConscript() is NOT intended to replace the Tool() method. If you need
to add methods or builders to one or more environments, do that as a tool
(and write unit tests for them).
BuildSConscript() is equivalent to the following SCons call:
SConscript(sconscript_file, exports={'env':self.Clone()})
or if sconscript_file is a directory:
SConscript(sconscript_file+'/build.scons', exports={'env':self.Clone()})
The called SConscript should import the 'env' variable to get access to the
calling environment:
Import('env')
Changes made to env in the called SConscript will NOT be applied to the
environment calling BuildSConscript() - that is, env in the called SConscript
is a clone/copy of the calling environment, not a reference to that
environment.
If you need to export multiple variables to the called SConscript, or return
variables from it, use the existing SConscript() function.
"""
# Need to look for the source node, since by default SCons will look for the
# entry in the variant_dir, which won't exist (and thus won't be a directory
# or a file). This isn't a problem in BuildComponents(), since the variant
# dir is only set inside its call to SConscript().
if self.Entry(sconscript_file).srcnode().isdir():
# Building a subdirectory, so look for build.scons or SConscript
script_file = sconscript_file + '/build.scons'
if not self.File(script_file).srcnode().exists():
script_file = sconscript_file + '/SConscript'
else:
script_file = sconscript_file
self.SConscript(script_file, exports={'env': self.Clone()})
#------------------------------------------------------------------------------
def SubstList2(self, *args):
"""Replacement subst_list designed for flags/parameters, not command lines.
Args:
self: Environment context.
args: One or more strings or lists of strings.
Returns:
A flattened, substituted list of strings.
SCons's built-in subst_list evaluates (substitutes) variables in its
arguments, and returns a list of lists (one per positional argument). Since
it is designed for use in command line expansion, the list items are
SCons.Subst.CmdStringHolder instances. These instances can't be passed into
env.File() (or subsequent calls to env.subst(), either). The returned
nested lists also need to be flattened via env.Flatten() before the caller
can iterate over the contents.
SubstList2() does a subst_list, flattens the result, then maps the flattened
list to strings.
It is better to do:
for x in env.SubstList2('$MYPARAMS'):
than to do:
for x in env.get('MYPARAMS', []):
and definitely better than:
for x in env['MYPARAMS']:
which will throw an exception if MYPARAMS isn't defined.
"""
return map(str, self.Flatten(self.subst_list(args)))
#------------------------------------------------------------------------------
def RelativePath(self, source, target, sep=os.sep, source_is_file=False):
"""Calculates the relative path from source to target.
Args:
self: Environment context.
source: Source path or node.
target: Target path or node.
sep: Path separator to use in returned relative path.
source_is_file: If true, calculates the relative path from the directory
containing the source, rather than the source itself. Note that if
source is a node, you can pass in source.dir instead, which is shorter.
Returns:
The relative path from source to target.
"""
# Split source and target into list of directories
source = self.Entry(str(source))
if source_is_file:
source = source.dir
source = source.abspath.split(os.sep)
target = self.Entry(str(target)).abspath.split(os.sep)
# Handle source and target identical
if source == target:
if source_is_file:
return source[-1] # Bare filename
else:
return '.' # Directory pointing to itself
# TODO: Handle UNC paths and drive letters (fine if they're the same, but if
# they're different, there IS no relative path)
# Remove common elements
while source and target and source[0] == target[0]:
source.pop(0)
target.pop(0)
# Join the remaining elements
return sep.join(['..'] * len(source) + target)
#------------------------------------------------------------------------------
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
# Add methods to environment
env.AddMethod(ApplySConscript)
env.AddMethod(BuildSConscript)
env.AddMethod(FilterOut)
env.AddMethod(Overlap)
env.AddMethod(RelativePath)
env.AddMethod(SubstList2)
| apache-2.0 | -9,214,205,022,342,615,000 | 36.767606 | 79 | 0.692616 | false |
smips/Temporary_Insanity | TI/src/TI.py | 1 | 4468 | import libtcodpy as libtcod
import sys
from time import sleep
import os, math, random
sys.path.insert(0, os.path.realpath(__file__).replace("TI.py","World"))
sys.path.insert(0, os.path.realpath(__file__).replace("TI.py","Engine"))
sys.path.insert(0, os.path.realpath(__file__).replace("TI.py","Scripts"))
import GameObject,Tile,DataGrinder,Actor,Prop,Camera,ScriptHandler
import Map
DEBUG = 1
game_iteration = 0
objects = []
#actual size of the window
SCREEN_WIDTH = 80
SCREEN_HEIGHT = 50
DISPLAY_WIDTH = 60
DISPLAY_HEIGHT = 50
FOV_RECOMPUTE = True
LIMIT_FPS = 60 #60 frames-per-second maximum
FPS_CONSOLE = libtcod.console_new(10,1)
def dprint(arg):
global DEBUG
if DEBUG:
print(arg)
def handle_keys():
global player, map, FOV_RECOMPUTE, enemy
key = libtcod.console_check_for_keypress(True)
if key.vk == libtcod.KEY_ENTER and key.lalt:
#Alt+Enter: toggle fullscreen
libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen())
elif key.vk == libtcod.KEY_ESCAPE:
return True #exit game
#Call a test script
elif key.vk == libtcod.KEY_KP5:
enemy.tick(map)
FOV_RECOMPUTE = True
#movement keys
if key.vk == (libtcod.KEY_KP8):
player.move(0,-1, map)
FOV_RECOMPUTE = True
elif key.vk == (libtcod.KEY_KP2):
player.move(0,1, map)
FOV_RECOMPUTE = True
elif key.vk == (libtcod.KEY_KP4):
player.move(-1,0, map)
FOV_RECOMPUTE = True
elif key.vk == (libtcod.KEY_KP6):
player.move(1,0, map)
FOV_RECOMPUTE = True
elif key.vk == (libtcod.KEY_KP7):
player.move(-1,-1, map)
FOV_RECOMPUTE = True
elif key.vk == (libtcod.KEY_KP9):
player.move(1,-1, map)
FOV_RECOMPUTE = True
elif key.vk == (libtcod.KEY_KP1):
player.move(-1,1, map)
FOV_RECOMPUTE = True
elif key.vk == (libtcod.KEY_KP3):
player.move(1,1, map)
FOV_RECOMPUTE = True
if key.vk != libtcod.KEY_NONE:
return False
def update():
pass
def render():
global map, camera, player, SCREEN_WIDTH, SCREEN_HEIGHT, FOV_RECOMPUTE
camera.move_camera(player.x, player.y, map.width, map.height)
libtcod.console_set_default_foreground(0, libtcod.white)
temp_player_x, temp_player_y = camera.to_camera_coordinates(player.x, player.y)
if FOV_RECOMPUTE:
libtcod.console_clear(0)
libtcod.map_compute_fov(map.fov_map, player.x, player.y, 7, True, 1)
FOV_RECOMPUTE = False
for x in range(DISPLAY_WIDTH):
for y in range(DISPLAY_HEIGHT):
(map_x, map_y) = (camera.x + x, camera.y + y)
distance = get_distance(player.x, map_x, player.y, map_y)
map.map[map_x][map_y].draw(camera, map, distance)
libtcod.console_print(FPS_CONSOLE, 0, 0, 'FPS: ' + str(libtcod.sys_get_fps()))
libtcod.console_blit(FPS_CONSOLE, 0, 0, 10, 1, 0, 0, 0)
libtcod.console_flush()
def get_distance(x1, x2, y1, y2):
dx = x2 - x1
dy = y2 - y1
return int(math.sqrt(dx ** 2 + dy ** 2))
def tick():
global map, FOV_RECOMPUTE
for object in map.objects:
if object.name == 'Player':
FOV_RECOMPUTE = object.tick(map)
else:
object.tick(map)
#############################################
# Initialization & Main Loop
#############################################
dprint('Initialization started')
libtcod.console_set_custom_font('assets/arial10x10.png', libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_TCOD)
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'python/libtcod tutorial', False)
libtcod.sys_set_fps(LIMIT_FPS)
map = Map.Map(1)
player = Actor.Actor(map.rooms[0].center.x, map.rooms[0].center.y, 1, map)
enemy = Actor.Actor(map.rooms[0].center.x + 2, map.rooms[0].center.y + 2, 2, map)
map.objects.append(player)
map.objects.append(enemy)
camera = Camera.Camera(player.x, player.y, DISPLAY_WIDTH, DISPLAY_HEIGHT)
dprint('Initialization complete')
while not libtcod.console_is_window_closed():
#Tick is currently causing the game to run in real time......... FIX ASAP!!!
update()
render()
tick()
exit = False
#exit game if needed
if exit:
break
| mit | 85,711,425,074,374,270 | 26.628205 | 112 | 0.592476 | false |
HubLot/PBxplore | pbxplore/tests/test_regression.py | 2 | 21493 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Regression tests for PBxplore.
This test suite run the various PBxplore programs with various argument, and
makes sure the output is the expected one. The aim is to check that the
programs are not broken during development.
Be careful this test suite does not test that the output is right. It just
test that the output is the expected one based on a previous version.
"""
# Use print as a function like in python 3
from os import path
from uuid import uuid1
from functools import wraps
import os
import subprocess
import shutil
import sys
import pytest
import MDAnalysis
import matplotlib
try:
import weblogo
IS_WEBLOGO = True
except ImportError:
IS_WEBLOGO = False
here = os.path.abspath(os.path.dirname(__file__))
# Resources for the tests are stored in the following directory
REFDIR = os.path.join(here, "test_data/")
class TemplateTestCase(object):
"""
Template TestCase class for the other TestCase class to inherit from.
Children class must overload the `_build_command_line` and the
`_validate_output` methods.
"""
def _run_program_and_validate(self, out_run_dir, reference, **kwargs):
"""
Run the program to test and validate its outputs.
"""
# Build the command line to run. This relies on the _build_command_line
# method that is a virtual method, which must be overloaded by the
# child class.
command = self._build_command_line(str(out_run_dir), **kwargs)
print(command)
# Run the command.
exe = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = exe.communicate()
print(out.decode('utf-8'))
print(err.decode('utf-8'))
# The return code should be 0.
assert exe.returncode == 0, 'Program exited with a {} code.'.format(
exe.returncode)
# Validate the output files. This relies on the _validate_output
# virtual method.
self._validate_output(str(out_run_dir), reference, **kwargs)
def _build_command_line(self, **kwargs):
"""
Build the command line to run.
This is a virtual method. It must be overloaded by the child class.
"""
raise NotImplementedError
def _validate_output(self, reference, **kwargs):
"""
Validate the output files.
This is a virtual method. It must be overloaded by the child class.
"""
raise NotImplementedError
class TestPBAssign(TemplateTestCase):
"""
Regression tests for PBAssign.py
"""
references = ["1BTA", "1AY7", "2LFU", "3ICH"]
extensions = [".pdb", ".cif.gz"]
def _run_PBassign(self, out_run_dir, pdbid, extension,
multiple=None, indir=REFDIR):
"""
Run a PBxplore program on a PDBID with the given options.
`options` is expected to be a list that will be directly passed to
subprocess, it must not contain the input or output options.
"""
if multiple is None:
test_input = path.join(REFDIR, pdbid + extension)
out_basename = path.join(out_run_dir, pdbid)
input_args = ['-p', test_input]
else:
input_args = []
for basename in pdbid:
input_args += ['-p', path.join(REFDIR, basename + extension)]
out_basename = path.join(out_run_dir, multiple)
run_list = (['PBassign'] + input_args + ['-o', out_basename + extension])
exe = subprocess.Popen(run_list,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = exe.communicate()
print(out.decode('utf-8'))
print(err.decode('utf-8'))
return exe.returncode, out_run_dir
def _test_PBassign_options(self, out_run_dir, basename, extension, outfiles,
multiple=None, expected_exit=0):
out_run_dir = str(out_run_dir)
if multiple is not None:
out_name = multiple
status, out_run_dir = self._run_PBassign(out_run_dir, basename, extension, multiple)
assert status == expected_exit, \
'PBassign stoped with a {0} exit code'.format(status)
assert len(os.listdir(out_run_dir)) == len(outfiles),\
('PBassign did not produced the right number of files: '
'{0} files produced instead of {1}').format(
len(os.listdir(out_run_dir)), len(outfiles))
out_name = basename if multiple is None else multiple
for outfile in (template.format(out_name + extension)
for template in outfiles):
test_file = path.join(out_run_dir, outfile)
ref_file = path.join(REFDIR, outfile)
_assert_identical_files(test_file, ref_file)
@pytest.mark.parametrize('reference', references)
@pytest.mark.parametrize('extension', extensions)
def test_fasta(self, tmpdir, reference, extension):
"""
Run PBAssign on PDB files, and check the fasta output.
"""
self._test_PBassign_options(tmpdir, reference, extension,
['{0}.PB.fasta'])
@pytest.mark.parametrize('extension', extensions)
def test_multiple_inputs(self, tmpdir, extension):
"""
Run PBassign with multiple inputs.
"""
self._test_PBassign_options(tmpdir, self.references, extension,
['{0}.PB.fasta'], multiple='all')
def test_xtc_input(self, tmpdir):
"""
Run PBassign on a trajectory in the XTC format.
This test should produce the righ output with python 2. With python 3,
PBassign should fail as MDanalysis is not available.
"""
name = 'barstar_md_traj'
out_run_dir = str(tmpdir)
output_fname = name + '.PB.fasta'
call_list = ['PBassign',
'-x', os.path.join(REFDIR, name + '.xtc'),
'-g', os.path.join(REFDIR, name + '.gro'),
'-o', os.path.join(out_run_dir, name)]
exe = subprocess.Popen(call_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = exe.communicate()
status = exe.wait()
print(out.decode('utf-8'))
print(err.decode('utf-8'))
# MDanalysis is available, PBassign should run and produce the
# correct output
assert status == 0, 'PBassign exited with an error'
_assert_identical_files(os.path.join(REFDIR, output_fname),
os.path.join(out_run_dir, output_fname))
@pytest.mark.xfail(strict=True, raises=AssertionError)
def test_different_outputs(self, tmpdir):
"""
Test if the tests properly fail if an output content is different from
expected.
"""
reference = "test_fail"
extension = ".pdb"
self._test_PBassign_options(tmpdir, reference, extension, ['{0}.PB.fasta'])
class TestPBcount(TemplateTestCase):
"""
Test running PBcount.
"""
def _build_command_line(self, out_run_dir, input_files, output, first_residue=None):
output_full_path = os.path.join(out_run_dir, output)
command = ['PBcount', '-o', output_full_path]
for input_file in input_files:
command += ['-f', os.path.join(REFDIR, input_file)]
if first_residue is not None:
command += ['--first-residue', str(first_residue)]
return command
def _validate_output(self, out_run_dir, reference, output, **kwargs):
reference_full_path = os.path.join(REFDIR, reference)
output_full_path = os.path.join(out_run_dir,
output + '.PB.count')
_assert_identical_files(output_full_path, reference_full_path)
def test_single_file_single_model(self, tmpdir):
"""
Run PBcount with a single input file that contains a single model.
"""
input_files = ['count_single1.PB.fasta', ]
output = 'output'
reference = 'count_single1.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output)
def test_single_file_multiple_models(self, tmpdir):
"""
Run PBcount with a single input file that contains multiple models.
"""
input_files = ['count_multi1.PB.fasta', ]
output = 'output'
reference = 'count_multi1.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output)
def test_multiple_files_single_model(self, tmpdir):
"""
Run PBcount with multiple input files that contain a single model.
"""
input_files = ['count_single1.PB.fasta',
'count_single2.PB.fasta',
'count_single3.PB.fasta']
output = 'output'
reference = 'count_single123.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output)
def test_multiple_files_multiple_models(self, tmpdir):
"""
Run PBcount with multiple input files that contain multiple models each.
"""
input_files = ['count_multi1.PB.fasta',
'count_multi2.PB.fasta',
'count_multi3.PB.fasta']
output = 'output'
reference = 'count_multi123.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output)
def test_first_residue_positive(self, tmpdir):
"""
Test PBcount on with the --first-residue option and a positive value.
"""
input_files = ['count_multi1.PB.fasta',
'count_multi2.PB.fasta',
'count_multi3.PB.fasta']
output = 'output'
reference = 'count_multi123_first20.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output,
first_residue=20)
def test_first_residue_negative(self, tmpdir):
"""
Test PBcount on with the --first-residue option and a negative value.
"""
input_files = ['count_multi1.PB.fasta',
'count_multi2.PB.fasta',
'count_multi3.PB.fasta']
output = 'output'
reference = 'count_multi123_first-20.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output,
first_residue=-20)
class TestPBstat(TemplateTestCase):
def _build_command_line(self, out_run_dir, input_file, output,
mapdist=False, neq=False,
logo=False, image_format=None,
residue_min=None, residue_max=None):
input_full_path = os.path.join(REFDIR, input_file)
output_full_path = os.path.join(str(out_run_dir), output)
command = ['PBstat', '-f', input_full_path, '-o', output_full_path]
if mapdist:
command += ['--map']
if neq:
command += ['--neq']
if logo:
command += ['--logo']
if image_format is not None:
command += ['--image-format', image_format]
if residue_min is not None:
command += ['--residue-min', str(residue_min)]
if residue_max is not None:
command += ['--residue-max', str(residue_max)]
return command
def _validate_output(self, out_run_dir, reference, input_file, output,
mapdist=False, neq=False, logo=False, image_format=None,
residue_min=None, residue_max=None, **kwargs):
suffix_residue = ''
if residue_min or residue_max:
suffix_residue = ".{}-{}".format(residue_min, residue_max)
suffix_args = ''
extension = '.png'
if neq:
suffix_args = '.Neq'
if mapdist:
suffix_args = '.map'
if logo:
suffix_args = '.logo'
if image_format is None:
extension = '.png'
else:
extension = '.' + image_format
reference_full_path = os.path.join(REFDIR, reference + '.PB'
+ suffix_args + suffix_residue)
output = os.path.join(str(out_run_dir), output)
output_full_path = output + '.PB' + suffix_args + suffix_residue
if neq:
# Assess the validity of the Neq file
_assert_identical_files(output_full_path, reference_full_path)
# Assess the creation of the graph file (png or pdf)
value, msg = _file_validity(output_full_path + extension)
assert value, msg
def test_neq(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
neq=True)
self._run_program_and_validate(tmpdir,
reference='count_single123',
input_file='count_single123.PB.count',
output='output',
neq=True)
def test_neq_with_range_residues(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
neq=True,
residue_min=10, residue_max=30)
def test_neq_with_first_residue(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123_first20',
input_file='count_multi123_first20.PB.count',
output='output',
neq=True)
def test_neq_with_first_range_residues(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123_first20',
input_file='count_multi123_first20.PB.count',
output='output',
neq=True,
residue_min=25, residue_max=35)
def test_neq_pdf(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
neq=True, image_format='pdf')
def test_mapdist(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
mapdist=True)
def test_mapdist_pdf(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
mapdist=True, image_format='pdf')
def test_mapdist_with_range_residues(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
mapdist=True,
residue_min=10, residue_max=30)
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
def test_weblogo(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True)
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
def test_weblogo_logo_pdf(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True, image_format='pdf')
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
def test_weblogo_logo_png(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True, image_format='png')
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
def test_weblogo_logo_jpg(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True, image_format='jpg')
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
@pytest.mark.xfail(strict=True, raises=AssertionError)
def test_weblogo_logo_invalid_format(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True, image_format='invalid')
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
def test_weblogo_with_range_residues(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True,
residue_min=10, residue_max=30)
def _file_validity(file_a):
"""
Check wether file_a exists and is not empty.
Return a tuple containing:
- True if all went well, False otherwise
- the error message, empty if True is returned
"""
if os.path.isfile(file_a):
if os.path.getsize(file_a) > 0:
return True, ''
else:
return False, '{0} is empty'.format(file_a)
else:
return False, '{0} does not exist'.format(file_a)
def _same_file_content(file_a, file_b, comment_char=">"):
"""
Return True if two files are identical. Take file path as arguments.
Ignore the content of lines which start with `comment_char`.
"""
with open(file_a) as f1, open(file_b) as f2:
# Compare content line by line
for f1_line, f2_line in zip(f1, f2):
if (f1_line != f2_line):
# If both lines start with a comment,
# it's a valid one no matter the content of the comment
f1_firstchar = f1_line.strip().startswith(comment_char)
f2_firstchar = f2_line.strip().startswith(comment_char)
if f1_firstchar != f2_firstchar:
print(file_a, file_b)
print(f1_line, f2_line, sep='//')
return False
# Check if one file is longer than the other; it would result as one
# file iterator not completely consumed
for infile in (f1, f2):
try:
next(infile)
except StopIteration:
pass
else:
# The iterator is not consumed, it means that this file is
# longer than the other
print('File too long')
return False
# If we reach this line, it means that we did not find any difference
return True
def _assert_identical_files(file_a, file_b, comment_char=">"):
"""
Raise an Assert exception if the two files are not identical.
Take file path as arguments.
Ignore the content of lines which start with `comment_char`.
"""
assert _same_file_content(file_a, file_b), '{0} and {1} are not identical'\
.format(file_a, file_b)
| mit | -6,514,076,797,168,018,000 | 39.939048 | 92 | 0.528172 | false |
rjschof/gem5 | src/dev/BadDevice.py | 69 | 1789 | # Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.params import *
from Device import BasicPioDevice
class BadDevice(BasicPioDevice):
type = 'BadDevice'
cxx_header = "dev/baddev.hh"
devicename = Param.String("Name of device to error on")
| bsd-3-clause | -460,308,259,115,228,300 | 50.114286 | 72 | 0.787032 | false |
haya14busa/alc-etm-searcher | nltk-3.0a3/build/lib/nltk/classify/tadm.py | 2 | 3527 | # Natural Language Toolkit: Interface to TADM Classifier
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Joseph Frazee <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, unicode_literals
import sys
import subprocess
from nltk import compat
from nltk.internals import find_binary
try:
import numpy
except ImportError:
numpy = None
_tadm_bin = None
def config_tadm(bin=None):
global _tadm_bin
_tadm_bin = find_binary(
'tadm', bin,
env_vars=['TADM_DIR'],
binary_names=['tadm'],
url='http://tadm.sf.net')
def write_tadm_file(train_toks, encoding, stream):
"""
Generate an input file for ``tadm`` based on the given corpus of
classified tokens.
:type train_toks: list(tuple(dict, str))
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a feature dictionary,
and the second of which is a classification label.
:type encoding: TadmEventMaxentFeatureEncoding
:param encoding: A feature encoding, used to convert featuresets
into feature vectors.
:type stream: stream
:param stream: The stream to which the ``tadm`` input file should be
written.
"""
# See the following for a file format description:
#
# http://sf.net/forum/forum.php?thread_id=1391502&forum_id=473054
# http://sf.net/forum/forum.php?thread_id=1675097&forum_id=473054
labels = encoding.labels()
for featureset, label in train_toks:
length_line = '%d\n' % len(labels)
stream.write(length_line)
for known_label in labels:
v = encoding.encode(featureset, known_label)
line = '%d %d %s\n' % (
int(label == known_label),
len(v),
' '.join('%d %d' % u for u in v)
)
stream.write(line)
def parse_tadm_weights(paramfile):
"""
Given the stdout output generated by ``tadm`` when training a
model, return a ``numpy`` array containing the corresponding weight
vector.
"""
weights = []
for line in paramfile:
weights.append(float(line.strip()))
return numpy.array(weights, 'd')
def call_tadm(args):
"""
Call the ``tadm`` binary with the given arguments.
"""
if isinstance(args, compat.string_types):
raise TypeError('args should be a list of strings')
if _tadm_bin is None:
config_tadm()
# Call tadm via a subprocess
cmd = [_tadm_bin] + args
p = subprocess.Popen(cmd, stdout=sys.stdout)
(stdout, stderr) = p.communicate()
# Check the return code.
if p.returncode != 0:
print()
print(stderr)
raise OSError('tadm command failed!')
def names_demo():
from nltk.classify.util import names_demo
from nltk.classify.maxent import TadmMaxentClassifier
classifier = names_demo(TadmMaxentClassifier.train)
def encoding_demo():
import sys
from nltk.classify.maxent import TadmEventMaxentFeatureEncoding
tokens = [({'f0':1, 'f1':1, 'f3':1}, 'A'),
({'f0':1, 'f2':1, 'f4':1}, 'B'),
({'f0':2, 'f2':1, 'f3':1, 'f4':1}, 'A')]
encoding = TadmEventMaxentFeatureEncoding.train(tokens)
write_tadm_file(tokens, encoding, sys.stdout)
print()
for i in range(encoding.length()):
print('%s --> %d' % (encoding.describe(i), i))
print()
if __name__ == '__main__':
encoding_demo()
names_demo()
| mit | 4,864,610,011,136,842,000 | 30.491071 | 72 | 0.625461 | false |
gavin-feng/odoo | addons/document/odt2txt.py | 435 | 2110 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys, zipfile, xml.dom.minidom
import StringIO
class OpenDocumentTextFile :
def __init__ (self, filepath):
zip = zipfile.ZipFile(filepath)
self.content = xml.dom.minidom.parseString(zip.read("content.xml"))
def toString (self):
""" Converts the document to a string. """
buffer = u""
for val in ["text:p", "text:h", "text:list"]:
for paragraph in self.content.getElementsByTagName(val) :
buffer += self.textToString(paragraph) + "\n"
return buffer
def textToString(self, element):
buffer = u""
for node in element.childNodes :
if node.nodeType == xml.dom.Node.TEXT_NODE :
buffer += node.nodeValue
elif node.nodeType == xml.dom.Node.ELEMENT_NODE :
buffer += self.textToString(node)
return buffer
if __name__ == "__main__" :
s =StringIO.StringIO(file(sys.argv[1]).read())
odt = OpenDocumentTextFile(s)
print odt.toString().encode('ascii','replace')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,180,575,962,094,107,000 | 38.811321 | 79 | 0.601422 | false |
Venturi/oldcms | env/lib/python2.7/site-packages/django/db/models/fields/related.py | 10 | 114783 | from __future__ import unicode_literals
import warnings
from operator import attrgetter
from django import forms
from django.apps import apps
from django.core import checks, exceptions
from django.core.exceptions import FieldDoesNotExist
from django.db import connection, connections, router, transaction
from django.db.backends import utils
from django.db.models import Q, signals
from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL
from django.db.models.fields import (
BLANK_CHOICE_DASH, AutoField, Field, IntegerField, PositiveIntegerField,
PositiveSmallIntegerField,
)
from django.db.models.lookups import IsNull
from django.db.models.query import QuerySet
from django.db.models.query_utils import PathInfo
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text, smart_text
from django.utils.functional import cached_property, curry
from django.utils.translation import ugettext_lazy as _
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
if isinstance(relation, six.string_types):
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
else:
# it's actually a model class
app_label = relation._meta.app_label
model_name = relation._meta.object_name
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_registered_model raises a LookupError, it means
# that the related model isn't loaded yet, so we need to pend the relation
# until the class is prepared.
try:
model = cls._meta.apps.get_registered_model(app_label, model_name)
except LookupError:
key = (app_label, model_name)
value = (cls, field, operation)
cls._meta.apps._pending_lookups.setdefault(key, []).append(value)
else:
operation(field, model, cls)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in sender._meta.apps._pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
class RelatedField(Field):
# Field flags
one_to_many = False
one_to_one = False
many_to_many = False
many_to_one = False
@cached_property
def related_model(self):
# Can't cache this property until all the models are loaded.
apps.check_models_ready()
return self.rel.to
def check(self, **kwargs):
errors = super(RelatedField, self).check(**kwargs)
errors.extend(self._check_related_name_is_valid())
errors.extend(self._check_relation_model_exists())
errors.extend(self._check_referencing_to_swapped_model())
errors.extend(self._check_clashes())
return errors
def _check_related_name_is_valid(self):
import re
import keyword
related_name = self.rel.related_name
if not related_name:
return []
is_valid_id = True
if keyword.iskeyword(related_name):
is_valid_id = False
if six.PY3:
if not related_name.isidentifier():
is_valid_id = False
else:
if not re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*\Z', related_name):
is_valid_id = False
if not (is_valid_id or related_name.endswith('+')):
return [
checks.Error(
"The name '%s' is invalid related_name for field %s.%s" %
(self.rel.related_name, self.model._meta.object_name,
self.name),
hint="Related name must be a valid Python identifier or end with a '+'",
obj=self,
id='fields.E306',
)
]
return []
def _check_relation_model_exists(self):
rel_is_missing = self.rel.to not in apps.get_models()
rel_is_string = isinstance(self.rel.to, six.string_types)
model_name = self.rel.to if rel_is_string else self.rel.to._meta.object_name
if rel_is_missing and (rel_is_string or not self.rel.to._meta.swapped):
return [
checks.Error(
("Field defines a relation with model '%s', which "
"is either not installed, or is abstract.") % model_name,
hint=None,
obj=self,
id='fields.E300',
)
]
return []
def _check_referencing_to_swapped_model(self):
if (self.rel.to not in apps.get_models() and
not isinstance(self.rel.to, six.string_types) and
self.rel.to._meta.swapped):
model = "%s.%s" % (
self.rel.to._meta.app_label,
self.rel.to._meta.object_name
)
return [
checks.Error(
("Field defines a relation with the model '%s', "
"which has been swapped out.") % model,
hint="Update the relation to point at 'settings.%s'." % self.rel.to._meta.swappable,
obj=self,
id='fields.E301',
)
]
return []
def _check_clashes(self):
""" Check accessor and reverse query name clashes. """
from django.db.models.base import ModelBase
errors = []
opts = self.model._meta
# `f.rel.to` may be a string instead of a model. Skip if model name is
# not resolved.
if not isinstance(self.rel.to, ModelBase):
return []
# If the field doesn't install backward relation on the target model (so
# `is_hidden` returns True), then there are no clashes to check and we
# can skip these fields.
if self.rel.is_hidden():
return []
try:
self.rel
except AttributeError:
return []
# Consider that we are checking field `Model.foreign` and the models
# are:
#
# class Target(models.Model):
# model = models.IntegerField()
# model_set = models.IntegerField()
#
# class Model(models.Model):
# foreign = models.ForeignKey(Target)
# m2m = models.ManyToManyField(Target)
rel_opts = self.rel.to._meta
# rel_opts.object_name == "Target"
rel_name = self.rel.get_accessor_name() # i. e. "model_set"
rel_query_name = self.related_query_name() # i. e. "model"
field_name = "%s.%s" % (opts.object_name,
self.name) # i. e. "Model.field"
# Check clashes between accessor or reverse query name of `field`
# and any other field name -- i.e. accessor for Model.foreign is
# model_set and it clashes with Target.model_set.
potential_clashes = rel_opts.fields + rel_opts.many_to_many
for clash_field in potential_clashes:
clash_name = "%s.%s" % (rel_opts.object_name,
clash_field.name) # i. e. "Target.model_set"
if clash_field.name == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E302',
)
)
if clash_field.name == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E303',
)
)
# Check clashes between accessors/reverse query names of `field` and
# any other field accessor -- i. e. Model.foreign accessor clashes with
# Model.m2m accessor.
potential_clashes = (r for r in rel_opts.related_objects if r.field is not self)
for clash_field in potential_clashes:
clash_name = "%s.%s" % ( # i. e. "Model.m2m"
clash_field.related_model._meta.object_name,
clash_field.field.name)
if clash_field.get_accessor_name() == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with reverse accessor for '%s'." % (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E304',
)
)
if clash_field.get_accessor_name() == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with reverse query name for '%s'."
% (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E305',
)
)
return errors
def db_type(self, connection):
'''By default related field will not have a column
as it relates columns to another table'''
return None
def contribute_to_class(self, cls, name, virtual_only=False):
sup = super(RelatedField, self)
# Store the opts for related_query_name()
self.opts = cls._meta
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name, virtual_only=virtual_only)
if not cls._meta.abstract and self.rel.related_name:
related_name = force_text(self.rel.related_name) % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower()
}
self.rel.related_name = related_name
other = self.rel.to
if isinstance(other, six.string_types) or other._meta.pk is None:
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
@property
def swappable_setting(self):
"""
Gets the setting that this is powered from for swapping, or None
if it's not swapped in / marked with swappable=False.
"""
if self.swappable:
# Work out string form of "to"
if isinstance(self.rel.to, six.string_types):
to_string = self.rel.to
else:
to_string = "%s.%s" % (
self.rel.to._meta.app_label,
self.rel.to._meta.object_name,
)
# See if anything swapped/swappable matches
for model in apps.get_models(include_swapped=True):
if model._meta.swapped:
if model._meta.swapped == to_string:
return model._meta.swappable
if ("%s.%s" % (model._meta.app_label, model._meta.object_name)) == to_string and model._meta.swappable:
return model._meta.swappable
return None
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.model_name + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.set_field_name()
@property
def related(self):
warnings.warn(
"Usage of field.related has been deprecated. Use field.rel instead.",
RemovedInDjango110Warning, 2)
return self.rel
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
if not cls._meta.abstract:
self.contribute_to_related_class(other, self.rel)
def get_limit_choices_to(self):
"""Returns 'limit_choices_to' for this model field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.rel.limit_choices_to):
return self.rel.limit_choices_to()
return self.rel.limit_choices_to
def formfield(self, **kwargs):
"""Passes ``limit_choices_to`` to field being constructed.
Only passes it if there is a type that supports related fields.
This is a similar strategy used to pass the ``queryset`` to the field
being constructed.
"""
defaults = {}
if hasattr(self.rel, 'get_related_field'):
# If this is a callable, do not invoke it here. Just pass
# it in the defaults for when the form class will later be
# instantiated.
limit_choices_to = self.rel.limit_choices_to
defaults.update({
'limit_choices_to': limit_choices_to,
})
defaults.update(kwargs)
return super(RelatedField, self).formfield(**defaults)
def related_query_name(self):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_query_name or self.rel.related_name or self.opts.model_name
class SingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ReverseSingleRelatedObjectDescriptor`.
return type(
str('RelatedObjectDoesNotExist'),
(self.related.related_model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.related.related_model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.related.related_model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = attrgetter(self.related.field.attname)
instance_attr = lambda obj: obj._get_pk_val()
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
params = {}
for lh_field, rh_field in self.related.field.related_fields:
params['%s__%s' % (self.related.field.name, rh_field.name)] = getattr(instance, rh_field.attname)
try:
rel_obj = self.get_queryset(instance=instance).get(**params)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' % (
instance._meta.object_name,
self.related.get_accessor_name(),
)
)
elif value is not None and not isinstance(value, self.related.related_model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `rel.to` might still be
# a string model reference.
return type(
str('RelatedObjectDoesNotExist'),
(self.field.rel.to.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.field.rel.to._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.field.rel.to._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.rel.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.rel.multiple:
rel_obj_cache_name = self.field.rel.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
params = {
rh_field.attname: getattr(instance, lh_field.attname)
for lh_field, rh_field in self.field.related_fields}
qs = self.get_queryset(instance=instance)
extra_filter = self.field.get_extra_descriptor_filter(instance)
if isinstance(extra_filter, dict):
params.update(extra_filter)
qs = qs.filter(**params)
else:
qs = qs.filter(extra_filter, **params)
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get()
if not self.field.rel.multiple:
setattr(rel_obj, self.field.rel.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name)
)
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.rel.to._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.rel.get_cache_name(), None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
if value is not None and not self.field.rel.multiple:
setattr(value, self.field.rel.get_cache_name(), instance)
def create_foreign_related_manager(superclass, rel_field, rel_model):
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.core_filters = {rel_field.name: instance}
self.model = rel_model
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_foreign_related_manager(manager.__class__, rel_field, rel_model)
return manager_class(self.instance)
do_not_call_in_templates = True
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[rel_field.related_query_name()]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
qs = super(RelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
qs = qs.filter(**self.core_filters)
for field in rel_field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return qs.none()
qs._known_related_objects = {rel_field: {self.instance.pk: self.instance}}
return qs
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(RelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = rel_field.get_local_related_value
instance_attr = rel_field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % rel_field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_field.name, instance)
cache_name = rel_field.related_query_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs):
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" %
(self.model._meta.object_name, obj))
setattr(obj, rel_field.name, self.instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
val = rel_field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if rel_field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, self.instance))
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{rel_field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, rel_field.name, None)
obj.save(update_fields=[rel_field.name])
_clear.alters_data = True
return RelatedManager
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
# Force evaluation of `value` in case it's a queryset whose
# value could be affected by `manager.clear()`. Refs #19816.
value = tuple(value)
manager = self.__get__(instance)
db = router.db_for_write(manager.model, instance=manager.instance)
with transaction.atomic(using=db, savepoint=False):
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's default
# manager.
return create_foreign_related_manager(
self.related.related_model._default_manager.__class__,
self.related.field,
self.related.related_model,
)
def create_many_related_manager(superclass, rel):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, query_field_name=None, instance=None, symmetrical=None,
source_field_name=None, target_field_name=None, reverse=False,
through=None, prefetch_cache_name=None):
super(ManyRelatedManager, self).__init__()
self.model = model
self.query_field_name = query_field_name
source_field = through._meta.get_field(source_field_name)
source_related_fields = source_field.related_fields
self.core_filters = {}
for lh_field, rh_field in source_related_fields:
self.core_filters['%s__%s' % (query_field_name, rh_field.name)] = getattr(instance, rh_field.attname)
self.instance = instance
self.symmetrical = symmetrical
self.source_field = source_field
self.target_field = through._meta.get_field(target_field_name)
self.source_field_name = source_field_name
self.target_field_name = target_field_name
self.reverse = reverse
self.through = through
self.prefetch_cache_name = prefetch_cache_name
self.related_val = source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_many_related_manager(manager.__class__, rel)
return manager_class(
model=self.model,
query_field_name=self.query_field_name,
instance=self.instance,
symmetrical=self.symmetrical,
source_field_name=self.source_field_name,
target_field_name=self.target_field_name,
reverse=self.reverse,
through=self.through,
prefetch_cache_name=self.prefetch_cache_name,
)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
qs = super(ManyRelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
return qs._next_is_sticky().filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(ManyRelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = self.through._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={
'_prefetch_related_val_%s' % f.attname:
'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})
return (
queryset,
lambda result: tuple(
getattr(result, '_prefetch_related_val_%s' % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
)
def add(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use add() on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use remove() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use create() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
update_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = (self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
}))
new_ids = new_ids - set(vals)
with transaction.atomic(using=db, savepoint=False):
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
target_model_qs = super(ManyRelatedManager, self).get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.related_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related
# model's default manager.
return create_many_related_manager(
self.related.related_model._default_manager.__class__,
self.related.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
rel_model = self.related.related_model
manager = self.related_manager_cls(
model=rel_model,
query_field_name=self.related.field.name,
prefetch_cache_name=self.related.field.related_query_name(),
instance=instance,
symmetrical=False,
source_field_name=self.related.field.m2m_reverse_field_name(),
target_field_name=self.related.field.m2m_field_name(),
reverse=True,
through=self.related.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.related.field.rel.through._meta.auto_created:
opts = self.related.field.rel.through._meta
raise AttributeError(
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name)
)
# Force evaluation of `value` in case it's a queryset whose
# value could be affected by `manager.clear()`. Refs #19816.
value = tuple(value)
manager = self.__get__(instance)
db = router.db_for_write(manager.through, instance=manager.instance)
with transaction.atomic(using=db, savepoint=False):
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.field.rel.through
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's
# default manager.
return create_many_related_manager(
self.field.rel.to._default_manager.__class__,
self.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
manager = self.related_manager_cls(
model=self.field.rel.to,
query_field_name=self.field.related_query_name(),
prefetch_cache_name=self.field.name,
instance=instance,
symmetrical=self.field.rel.symmetrical,
source_field_name=self.field.m2m_field_name(),
target_field_name=self.field.m2m_reverse_field_name(),
reverse=False,
through=self.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.field.rel.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError(
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name)
)
# Force evaluation of `value` in case it's a queryset whose
# value could be affected by `manager.clear()`. Refs #19816.
value = tuple(value)
manager = self.__get__(instance)
db = router.db_for_write(manager.through, instance=manager.instance)
with transaction.atomic(using=db, savepoint=False):
manager.clear()
manager.add(*value)
class ForeignObjectRel(object):
# Field flags
auto_created = True
concrete = False
editable = False
is_relation = True
def __init__(self, field, to, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
self.field = field
self.to = to
self.related_name = related_name
self.related_query_name = related_query_name
self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to
self.multiple = True
self.parent_link = parent_link
self.on_delete = on_delete
self.symmetrical = False
# Some of the following cached_properties can't be initialized in
# __init__ as the field doesn't have its model yet. Calling these methods
# before field.contribute_to_class() has been called will result in
# AttributeError
@cached_property
def model(self):
return self.to
@cached_property
def hidden(self):
return self.is_hidden()
@cached_property
def name(self):
return self.field.related_query_name()
@cached_property
def related_model(self):
if not self.field.model:
raise AttributeError(
"This property can't be accessed before self.field.contribute_to_class has been called.")
return self.field.model
@cached_property
def many_to_many(self):
return self.field.many_to_many
@cached_property
def many_to_one(self):
return self.field.one_to_many
@cached_property
def one_to_many(self):
return self.field.many_to_one
@cached_property
def one_to_one(self):
return self.field.one_to_one
def __repr__(self):
return '<%s: %s.%s>' % (
type(self).__name__,
self.related_model._meta.app_label,
self.related_model._meta.model_name,
)
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH,
limit_to_currently_related=False):
"""
Returns choices with a default blank choices included, for use as
SelectField choices for this field.
Analog of django.db.models.fields.Field.get_choices(), provided
initially for utilization by RelatedFieldListFilter.
"""
first_choice = blank_choice if include_blank else []
queryset = self.related_model._default_manager.all()
if limit_to_currently_related:
queryset = queryset.complex_filter(
{'%s__isnull' % self.related_model._meta.model_name: False}
)
lst = [(x._get_pk_val(), smart_text(x)) for x in queryset]
return first_choice + lst
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
# Defer to the actual field definition for db prep
return self.field.get_db_prep_lookup(lookup_type, value, connection=connection, prepared=prepared)
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name is not None and self.related_name[-1] == '+'
def get_joining_columns(self):
return self.field.get_reverse_joining_columns()
def get_extra_restriction(self, where_class, alias, related_alias):
return self.field.get_extra_restriction(where_class, related_alias, alias)
def set_field_name(self):
"""
Sets the related field's name, this is not available until later stages
of app loading, so set_field_name is called from
set_attributes_from_rel()
"""
# By default foreign object doesn't relate to any remote field (for
# example custom multicolumn joins currently have no remote field).
self.field_name = None
def get_accessor_name(self, model=None):
# This method encapsulates the logic that decides what name to give an
# accessor descriptor that retrieves related many-to-one or
# many-to-many objects. It uses the lower-cased object_name + "_set",
# but this can be overridden with the "related_name" option.
# Due to backwards compatibility ModelForms need to be able to provide
# an alternate model. See BaseInlineFormSet.get_default_prefix().
opts = model._meta if model else self.related_model._meta
model = model or self.related_model
if self.multiple:
# If this is a symmetrical m2m relation on self, there is no reverse accessor.
if self.symmetrical and model == self.to:
return None
if self.related_name:
return self.related_name
if opts.default_related_name:
return opts.default_related_name % {
'model_name': opts.model_name.lower(),
'app_label': opts.app_label.lower(),
}
return opts.model_name + ('_set' if self.multiple else '')
def get_cache_name(self):
return "_%s_cache" % self.get_accessor_name()
def get_path_info(self):
return self.field.get_reverse_path_info()
class ManyToOneRel(ForeignObjectRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(ManyToOneRel, self).__init__(
field, to, related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name)
self.field_name = field_name
def __getstate__(self):
state = self.__dict__.copy()
state.pop('related_model', None)
return state
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
field = self.to._meta.get_field(self.field_name)
if not field.concrete:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return field
def set_field_name(self):
self.field_name = self.field_name or self.to._meta.pk.name
class OneToOneRel(ManyToOneRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(OneToOneRel, self).__init__(field, to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name)
self.multiple = False
class ManyToManyRel(ForeignObjectRel):
def __init__(self, field, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None, through_fields=None,
db_constraint=True, related_query_name=None):
if through and not db_constraint:
raise ValueError("Can't supply a through model and db_constraint=False")
if through_fields and not through:
raise ValueError("Cannot specify through_fields without a through model")
super(ManyToManyRel, self).__init__(
field, to, related_name=related_name,
limit_choices_to=limit_choices_to, related_query_name=related_query_name)
self.symmetrical = symmetrical
self.multiple = True
self.through = through
self.through_fields = through_fields
self.db_constraint = db_constraint
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name is not None and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the field in the 'to' object to which this relationship is tied.
Provided for symmetry with ManyToOneRel.
"""
opts = self.through._meta
if self.through_fields:
field = opts.get_field(self.through_fields[0])
else:
for field in opts.fields:
rel = getattr(field, 'rel', None)
if rel and rel.to == self.to:
break
return field.foreign_related_fields[0]
class ForeignObject(RelatedField):
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
# For backwards compatibility; ignored as of Django 1.8.4.
allow_unsaved_instance_assignment = False
requires_unique_target = True
related_accessor_class = ForeignRelatedObjectsDescriptor
def __init__(self, to, from_fields, to_fields, swappable=True, **kwargs):
self.from_fields = from_fields
self.to_fields = to_fields
self.swappable = swappable
if 'rel' not in kwargs:
kwargs['rel'] = ForeignObjectRel(
self, to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
super(ForeignObject, self).__init__(**kwargs)
def check(self, **kwargs):
errors = super(ForeignObject, self).check(**kwargs)
errors.extend(self._check_unique_target())
return errors
def _check_unique_target(self):
rel_is_string = isinstance(self.rel.to, six.string_types)
if rel_is_string or not self.requires_unique_target:
return []
# Skip if the
try:
self.foreign_related_fields
except FieldDoesNotExist:
return []
try:
self.rel
except AttributeError:
return []
if not self.foreign_related_fields:
return []
has_unique_field = any(rel_field.unique
for rel_field in self.foreign_related_fields)
if not has_unique_field and len(self.foreign_related_fields) > 1:
field_combination = ', '.join("'%s'" % rel_field.name
for rel_field in self.foreign_related_fields)
model_name = self.rel.to.__name__
return [
checks.Error(
"None of the fields %s on model '%s' have a unique=True constraint."
% (field_combination, model_name),
hint=None,
obj=self,
id='fields.E310',
)
]
elif not has_unique_field:
field_name = self.foreign_related_fields[0].name
model_name = self.rel.to.__name__
return [
checks.Error(
("'%s.%s' must set unique=True "
"because it is referenced by a foreign key.") % (model_name, field_name),
hint=None,
obj=self,
id='fields.E311',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ForeignObject, self).deconstruct()
kwargs['from_fields'] = self.from_fields
kwargs['to_fields'] = self.to_fields
if self.rel.related_name is not None:
kwargs['related_name'] = self.rel.related_name
if self.rel.related_query_name is not None:
kwargs['related_query_name'] = self.rel.related_query_name
if self.rel.on_delete != CASCADE:
kwargs['on_delete'] = self.rel.on_delete
if self.rel.parent_link:
kwargs['parent_link'] = self.rel.parent_link
# Work out string form of "to"
if isinstance(self.rel.to, six.string_types):
kwargs['to'] = self.rel.to
else:
kwargs['to'] = "%s.%s" % (self.rel.to._meta.app_label, self.rel.to._meta.object_name)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ForeignKey pointing to a model "
"that is swapped in place of more than one model (%s and %s)"
% (kwargs['to'].setting_name, swappable_setting)
)
# Set it
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def resolve_related_fields(self):
if len(self.from_fields) < 1 or len(self.from_fields) != len(self.to_fields):
raise ValueError('Foreign Object from and to fields must be the same non-zero length')
if isinstance(self.rel.to, six.string_types):
raise ValueError('Related model %r cannot be resolved' % self.rel.to)
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
to_field_name = self.to_fields[index]
from_field = (self if from_field_name == 'self'
else self.opts.get_field(from_field_name))
to_field = (self.rel.to._meta.pk if to_field_name is None
else self.rel.to._meta.get_field(to_field_name))
related_fields.append((from_field, to_field))
return related_fields
@property
def related_fields(self):
if not hasattr(self, '_related_fields'):
self._related_fields = self.resolve_related_fields()
return self._related_fields
@property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@property
def local_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.related_fields)
@property
def foreign_related_fields(self):
return tuple(rhs_field for lhs_field, rhs_field in self.related_fields if rhs_field)
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
opts = instance._meta
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
if field.primary_key:
possible_parent_link = opts.get_ancestor_link(field.model)
if (not possible_parent_link or
possible_parent_link.primary_key or
possible_parent_link.model._meta.abstract):
ret.append(instance.pk)
continue
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super(ForeignObject, self).get_attname_column()
return attname, None
def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join else self.related_fields
return tuple((lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source)
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Returns an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, where_class, alias, related_alias):
"""
Returns a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(compiler, connection)
method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self):
"""
Get path from this field to the related model.
"""
opts = self.rel.to._meta
from_opts = self.model._meta
return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, True)]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def get_lookup_constraint(self, constraint_class, alias, targets, sources, lookups,
raw_value):
from django.db.models.sql.where import SubqueryConstraint, AND, OR
root_constraint = constraint_class()
assert len(targets) == len(sources)
if len(lookups) > 1:
raise exceptions.FieldError('Relation fields do not support nested lookups')
lookup_type = lookups[0]
def get_normalized_value(value):
from django.db.models import Model
if isinstance(value, Model):
value_list = []
for source in sources:
# Account for one-to-one relations when sent a different model
while not isinstance(value, source.model) and source.rel:
source = source.rel.to._meta.get_field(source.rel.field_name)
value_list.append(getattr(value, source.attname))
return tuple(value_list)
elif not isinstance(value, tuple):
return (value,)
return value
is_multicolumn = len(self.related_fields) > 1
if (hasattr(raw_value, '_as_sql') or
hasattr(raw_value, 'get_compiler')):
root_constraint.add(SubqueryConstraint(alias, [target.column for target in targets],
[source.name for source in sources], raw_value),
AND)
elif lookup_type == 'isnull':
root_constraint.add(IsNull(targets[0].get_col(alias, sources[0]), raw_value), AND)
elif (lookup_type == 'exact' or (lookup_type in ['gt', 'lt', 'gte', 'lte']
and not is_multicolumn)):
value = get_normalized_value(raw_value)
for target, source, val in zip(targets, sources, value):
lookup_class = target.get_lookup(lookup_type)
root_constraint.add(
lookup_class(target.get_col(alias, source), val), AND)
elif lookup_type in ['range', 'in'] and not is_multicolumn:
values = [get_normalized_value(value) for value in raw_value]
value = [val[0] for val in values]
lookup_class = targets[0].get_lookup(lookup_type)
root_constraint.add(lookup_class(targets[0].get_col(alias, sources[0]), value), AND)
elif lookup_type == 'in':
values = [get_normalized_value(value) for value in raw_value]
for value in values:
value_constraint = constraint_class()
for source, target, val in zip(sources, targets, value):
lookup_class = target.get_lookup('exact')
lookup = lookup_class(target.get_col(alias, source), val)
value_constraint.add(lookup, AND)
root_constraint.add(value_constraint, OR)
else:
raise TypeError('Related Field got invalid lookup: %s' % lookup_type)
return root_constraint
@property
def attnames(self):
return tuple(field.attname for field in self.local_related_fields)
def get_defaults(self):
return tuple(field.get_default() for field in self.local_related_fields)
def contribute_to_class(self, cls, name, virtual_only=False):
super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.related_model._meta.swapped:
setattr(cls, related.get_accessor_name(), self.related_accessor_class(related))
# While 'limit_choices_to' might be a callable, simply pass
# it along for later - this is too early because it's still
# model load time.
if self.rel.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to)
class ForeignKey(ForeignObject):
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
empty_strings_allowed = False
default_error_messages = {
'invalid': _('%(model)s instance with %(field)s %(value)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, to_field=None, rel_class=ManyToOneRel,
db_constraint=True, **kwargs):
try:
to._meta.model_name
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), (
"%s(%r) is invalid. First parameter to ForeignKey must be "
"either a model, a model name, or the string %r" % (
self.__class__.__name__, to,
RECURSIVE_RELATIONSHIP_CONSTANT,
)
)
else:
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
if 'db_index' not in kwargs:
kwargs['db_index'] = True
self.db_constraint = db_constraint
kwargs['rel'] = rel_class(
self, to, to_field,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
super(ForeignKey, self).__init__(to, ['self'], [to_field], **kwargs)
def check(self, **kwargs):
errors = super(ForeignKey, self).check(**kwargs)
errors.extend(self._check_on_delete())
errors.extend(self._check_unique())
return errors
def _check_on_delete(self):
on_delete = getattr(self.rel, 'on_delete', None)
if on_delete == SET_NULL and not self.null:
return [
checks.Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=self,
id='fields.E320',
)
]
elif on_delete == SET_DEFAULT and not self.has_default():
return [
checks.Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=self,
id='fields.E321',
)
]
else:
return []
def _check_unique(self, **kwargs):
return [
checks.Warning(
'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.',
hint='ForeignKey(unique=True) is usually better served by a OneToOneField.',
obj=self,
id='fields.W342',
)
] if self.unique else []
def deconstruct(self):
name, path, args, kwargs = super(ForeignKey, self).deconstruct()
del kwargs['to_fields']
del kwargs['from_fields']
# Handle the simpler arguments
if self.db_index:
del kwargs['db_index']
else:
kwargs['db_index'] = False
if self.db_constraint is not True:
kwargs['db_constraint'] = self.db_constraint
# Rel needs more work.
to_meta = getattr(self.rel.to, "_meta", None)
if self.rel.field_name and (not to_meta or (to_meta.pk and self.rel.field_name != to_meta.pk.name)):
kwargs['to_field'] = self.rel.field_name
return name, path, args, kwargs
@property
def related_field(self):
return self.foreign_related_fields[0]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def validate(self, value, model_instance):
if self.rel.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.get_limit_choices_to())
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={
'model': self.rel.to._meta.verbose_name, 'pk': value,
'field': self.rel.field_name, 'value': value,
}, # 'pk' is included for backwards compatibility
)
def get_attname(self):
return '%s_id' % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.related_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value is None or (value == '' and
(not self.related_field.empty_strings_allowed or
connection.features.interprets_empty_strings_as_nulls)):
return None
else:
return self.related_field.get_db_prep_save(value, connection=connection)
def get_db_prep_value(self, value, connection, prepared=False):
return self.related_field.get_db_prep_value(value, connection, prepared)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_text(choice_list[1][0])
return super(ForeignKey, self).value_to_string(obj)
def contribute_to_related_class(self, cls, related):
super(ForeignKey, self).contribute_to_related_class(cls, related)
if self.rel.field_name is None:
self.rel.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
if isinstance(self.rel.to, six.string_types):
raise ValueError("Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet" %
(self.name, self.rel.to))
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.using(db),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.related_field
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
def db_parameters(self, connection):
return {"type": self.db_type(connection), "check": []}
def convert_empty_strings(self, value, expression, connection, context):
if (not value) and isinstance(value, six.string_types):
return None
return value
def get_db_converters(self, connection):
converters = super(ForeignKey, self).get_db_converters(connection)
if connection.features.interprets_empty_strings_as_nulls:
converters += [self.convert_empty_strings]
return converters
def get_col(self, alias, output_field=None):
return super(ForeignKey, self).get_col(alias, output_field or self.related_field)
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
# Field flags
many_to_many = False
many_to_one = False
one_to_many = False
one_to_one = True
related_accessor_class = SingleRelatedObjectDescriptor
description = _("One-to-one relationship")
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(OneToOneField, self).deconstruct()
if "unique" in kwargs:
del kwargs['unique']
return name, path, args, kwargs
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.rel.to):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def _check_unique(self, **kwargs):
# override ForeignKey since check isn't applicable here
return []
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
managed = True
if isinstance(field.rel.to, six.string_types) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT:
to_model = field.rel.to
to = to_model.split('.')[-1]
def set_managed(field, model, cls):
field.rel.through._meta.managed = model._meta.managed or cls._meta.managed
add_lazy_relation(klass, field, to_model, set_managed)
elif isinstance(field.rel.to, six.string_types):
to = klass._meta.object_name
to_model = klass
managed = klass._meta.managed
else:
to = field.rel.to._meta.object_name
to_model = field.rel.to
managed = klass._meta.managed or to_model._meta.managed
name = '%s_%s' % (klass._meta.object_name, field.name)
if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name:
from_ = 'from_%s' % to.lower()
to = 'to_%s' % to.lower()
else:
from_ = klass._meta.model_name
to = to.lower()
meta = type(str('Meta'), (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'managed': managed,
'auto_created': klass,
'app_label': klass._meta.app_label,
'db_tablespace': klass._meta.db_tablespace,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
'apps': field.model._meta.apps,
})
# Construct and return the new class.
return type(str(name), (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(
klass,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.rel.db_constraint,
),
to: models.ForeignKey(
to_model,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.rel.db_constraint,
)
})
class ManyToManyField(RelatedField):
# Field flags
many_to_many = True
many_to_one = False
one_to_many = False
one_to_one = False
description = _("Many-to-many relationship")
def __init__(self, to, db_constraint=True, swappable=True, **kwargs):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), (
"%s(%r) is invalid. First parameter to ManyToManyField must be "
"either a model, a model name, or the string %r" %
(self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
)
# Class names must be ASCII in Python 2.x, so we forcibly coerce it
# here to break early if there's a problem.
to = str(to)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(
self, to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None),
through_fields=kwargs.pop('through_fields', None),
db_constraint=db_constraint,
)
self.swappable = swappable
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
super(ManyToManyField, self).__init__(**kwargs)
def check(self, **kwargs):
errors = super(ManyToManyField, self).check(**kwargs)
errors.extend(self._check_unique(**kwargs))
errors.extend(self._check_relationship_model(**kwargs))
errors.extend(self._check_ignored_options(**kwargs))
return errors
def _check_unique(self, **kwargs):
if self.unique:
return [
checks.Error(
'ManyToManyFields cannot be unique.',
hint=None,
obj=self,
id='fields.E330',
)
]
return []
def _check_ignored_options(self, **kwargs):
warnings = []
if self.null:
warnings.append(
checks.Warning(
'null has no effect on ManyToManyField.',
hint=None,
obj=self,
id='fields.W340',
)
)
if len(self._validators) > 0:
warnings.append(
checks.Warning(
'ManyToManyField does not support validators.',
hint=None,
obj=self,
id='fields.W341',
)
)
return warnings
def _check_relationship_model(self, from_model=None, **kwargs):
if hasattr(self.rel.through, '_meta'):
qualified_model_name = "%s.%s" % (
self.rel.through._meta.app_label, self.rel.through.__name__)
else:
qualified_model_name = self.rel.through
errors = []
if self.rel.through not in apps.get_models(include_auto_created=True):
# The relationship model is not installed.
errors.append(
checks.Error(
("Field specifies a many-to-many relation through model "
"'%s', which has not been installed.") %
qualified_model_name,
hint=None,
obj=self,
id='fields.E331',
)
)
else:
assert from_model is not None, \
"ManyToManyField with intermediate " \
"tables cannot be checked if you don't pass the model " \
"where the field is attached to."
# Set some useful local variables
to_model = self.rel.to
from_model_name = from_model._meta.object_name
if isinstance(to_model, six.string_types):
to_model_name = to_model
else:
to_model_name = to_model._meta.object_name
relationship_model_name = self.rel.through._meta.object_name
self_referential = from_model == to_model
# Check symmetrical attribute.
if (self_referential and self.rel.symmetrical and
not self.rel.through._meta.auto_created):
errors.append(
checks.Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=self,
id='fields.E332',
)
)
# Count foreign keys in intermediate model
if self_referential:
seen_self = sum(from_model == getattr(field.rel, 'to', None)
for field in self.rel.through._meta.fields)
if seen_self > 2 and not self.rel.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than two foreign keys "
"to '%s', which is ambiguous. You must specify "
"which two foreign keys Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=("Use through_fields to specify which two "
"foreign keys Django should use."),
obj=self.rel.through,
id='fields.E333',
)
)
else:
# Count foreign keys in relationship model
seen_from = sum(from_model == getattr(field.rel, 'to', None)
for field in self.rel.through._meta.fields)
seen_to = sum(to_model == getattr(field.rel, 'to', None)
for field in self.rel.through._meta.fields)
if seen_from > 1 and not self.rel.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"from '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=('If you want to create a recursive relationship, '
'use ForeignKey("self", symmetrical=False, '
'through="%s").') % relationship_model_name,
obj=self,
id='fields.E334',
)
)
if seen_to > 1 and not self.rel.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"to '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, to_model_name),
hint=('If you want to create a recursive '
'relationship, use ForeignKey("self", '
'symmetrical=False, through="%s").') % relationship_model_name,
obj=self,
id='fields.E335',
)
)
if seen_from == 0 or seen_to == 0:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it does not have a foreign key to '%s' or '%s'.") % (
self, from_model_name, to_model_name
),
hint=None,
obj=self.rel.through,
id='fields.E336',
)
)
# Validate `through_fields`
if self.rel.through_fields is not None:
# Validate that we're given an iterable of at least two items
# and that none of them is "falsy"
if not (len(self.rel.through_fields) >= 2 and
self.rel.through_fields[0] and self.rel.through_fields[1]):
errors.append(
checks.Error(
("Field specifies 'through_fields' but does not "
"provide the names of the two link fields that should be "
"used for the relation through model "
"'%s'.") % qualified_model_name,
hint=("Make sure you specify 'through_fields' as "
"through_fields=('field1', 'field2')"),
obj=self,
id='fields.E337',
)
)
# Validate the given through fields -- they should be actual
# fields on the through model, and also be foreign keys to the
# expected models
else:
assert from_model is not None, \
"ManyToManyField with intermediate " \
"tables cannot be checked if you don't pass the model " \
"where the field is attached to."
source, through, target = from_model, self.rel.through, self.rel.to
source_field_name, target_field_name = self.rel.through_fields[:2]
for field_name, related_model in ((source_field_name, source),
(target_field_name, target)):
possible_field_names = []
for f in through._meta.fields:
if hasattr(f, 'rel') and getattr(f.rel, 'to', None) == related_model:
possible_field_names.append(f.name)
if possible_field_names:
hint = ("Did you mean one of the following foreign "
"keys to '%s': %s?") % (related_model._meta.object_name,
', '.join(possible_field_names))
else:
hint = None
try:
field = through._meta.get_field(field_name)
except FieldDoesNotExist:
errors.append(
checks.Error(
("The intermediary model '%s' has no field '%s'.") % (
qualified_model_name, field_name),
hint=hint,
obj=self,
id='fields.E338',
)
)
else:
if not (hasattr(field, 'rel') and
getattr(field.rel, 'to', None) == related_model):
errors.append(
checks.Error(
"'%s.%s' is not a foreign key to '%s'." % (
through._meta.object_name, field_name,
related_model._meta.object_name),
hint=hint,
obj=self,
id='fields.E339',
)
)
return errors
def deconstruct(self):
name, path, args, kwargs = super(ManyToManyField, self).deconstruct()
# Handle the simpler arguments
if self.db_table is not None:
kwargs['db_table'] = self.db_table
if self.rel.db_constraint is not True:
kwargs['db_constraint'] = self.rel.db_constraint
if self.rel.related_name is not None:
kwargs['related_name'] = self.rel.related_name
if self.rel.related_query_name is not None:
kwargs['related_query_name'] = self.rel.related_query_name
# Rel needs more work.
if isinstance(self.rel.to, six.string_types):
kwargs['to'] = self.rel.to
else:
kwargs['to'] = "%s.%s" % (self.rel.to._meta.app_label, self.rel.to._meta.object_name)
if getattr(self.rel, 'through', None) is not None:
if isinstance(self.rel.through, six.string_types):
kwargs['through'] = self.rel.through
elif not self.rel.through._meta.auto_created:
kwargs['through'] = "%s.%s" % (self.rel.through._meta.app_label, self.rel.through._meta.object_name)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ManyToManyField pointing to a "
"model that is swapped in place of more than one model "
"(%s and %s)" % (kwargs['to'].setting_name, swappable_setting)
)
# Set it
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def _get_path_info(self, direct=False):
"""
Called by both direct and indirect m2m traversal.
"""
pathinfos = []
int_model = self.rel.through
linkfield1 = int_model._meta.get_field(self.m2m_field_name())
linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name())
if direct:
join1infos = linkfield1.get_reverse_path_info()
join2infos = linkfield2.get_path_info()
else:
join1infos = linkfield2.get_reverse_path_info()
join2infos = linkfield1.get_path_info()
pathinfos.extend(join1infos)
pathinfos.extend(join2infos)
return pathinfos
def get_path_info(self):
return self._get_path_info(direct=True)
def get_reverse_path_info(self):
return self._get_path_info(direct=False)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return utils.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"Function that can be curried to provide the source accessor or DB column name for the m2m table"
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
if self.rel.through_fields is not None:
link_field_name = self.rel.through_fields[0]
else:
link_field_name = None
for f in self.rel.through._meta.fields:
if (f.is_relation and f.rel.to == related.related_model and
(link_field_name is None or link_field_name == f.name)):
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"Function that can be curried to provide the related accessor or DB column name for the m2m table"
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
if self.rel.through_fields is not None:
link_field_name = self.rel.through_fields[1]
else:
link_field_name = None
for f in self.rel.through._meta.fields:
# NOTE f.rel.to != f.related_model
if f.is_relation and f.rel.to == related.model:
if link_field_name is None and related.related_model == related.model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
elif link_field_name is None or link_field_name == f.name:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_text(data)
def contribute_to_class(self, cls, name, **kwargs):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name):
self.rel.related_name = "%s_rel_+" % name
elif self.rel.is_hidden():
# If the backwards relation is disabled, replace the original
# related_name with one generated from the m2m field name. Django
# still uses backwards relations internally and we need to avoid
# clashes between multiple m2m fields with related_name == '+'.
self.rel.related_name = "_%s_%s_+" % (cls.__name__.lower(), name)
super(ManyToManyField, self).contribute_to_class(cls, name, **kwargs)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not self.rel.through and not cls._meta.abstract and not cls._meta.swapped:
self.rel.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, six.string_types):
def resolve_through_model(field, model, cls):
field.rel.through = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.related_model._meta.swapped:
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'rel')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.rel.to._default_manager.using(db),
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
def db_parameters(self, connection):
return {"type": None, "check": None}
| apache-2.0 | -1,119,114,136,893,352,100 | 42.860527 | 119 | 0.568621 | false |
rallylee/gem5 | configs/ruby/GPU_VIPER.py | 12 | 25726 | #
# Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Lisa Hsu
#
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
from Ruby import send_evicts
from topologies.Cluster import Cluster
from topologies.Crossbar import Crossbar
class CntrlBase:
_seqs = 0
@classmethod
def seqCount(cls):
# Use SeqCount not class since we need global count
CntrlBase._seqs += 1
return CntrlBase._seqs - 1
_cntrls = 0
@classmethod
def cntrlCount(cls):
# Use CntlCount not class since we need global count
CntrlBase._cntrls += 1
return CntrlBase._cntrls - 1
_version = 0
@classmethod
def versionCount(cls):
cls._version += 1 # Use count for this particular type
return cls._version - 1
class L1Cache(RubyCache):
resourceStalls = False
dataArrayBanks = 2
tagArrayBanks = 2
dataAccessLatency = 1
tagAccessLatency = 1
def create(self, size, assoc, options):
self.size = MemorySize(size)
self.assoc = assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class L2Cache(RubyCache):
resourceStalls = False
assoc = 16
dataArrayBanks = 16
tagArrayBanks = 16
def create(self, size, assoc, options):
self.size = MemorySize(size)
self.assoc = assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class CPCntrl(CorePair_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1Icache = L1Cache()
self.L1Icache.create(options.l1i_size, options.l1i_assoc, options)
self.L1D0cache = L1Cache()
self.L1D0cache.create(options.l1d_size, options.l1d_assoc, options)
self.L1D1cache = L1Cache()
self.L1D1cache.create(options.l1d_size, options.l1d_assoc, options)
self.L2cache = L2Cache()
self.L2cache.create(options.l2_size, options.l2_assoc, options)
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1Icache
self.sequencer.dcache = self.L1D0cache
self.sequencer.ruby_system = ruby_system
self.sequencer.coreid = 0
self.sequencer.is_cpu_sequencer = True
self.sequencer1 = RubySequencer()
self.sequencer1.version = self.seqCount()
self.sequencer1.icache = self.L1Icache
self.sequencer1.dcache = self.L1D1cache
self.sequencer1.ruby_system = ruby_system
self.sequencer1.coreid = 1
self.sequencer1.is_cpu_sequencer = True
self.issue_latency = options.cpu_to_dir_latency
self.send_evictions = send_evicts(options)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class TCPCache(RubyCache):
size = "16kB"
assoc = 16
dataArrayBanks = 16 #number of data banks
tagArrayBanks = 16 #number of tag banks
dataAccessLatency = 4
tagAccessLatency = 1
def create(self, options):
self.size = MemorySize(options.tcp_size)
self.assoc = options.tcp_assoc
self.resourceStalls = options.no_tcc_resource_stalls
self.replacement_policy = PseudoLRUReplacementPolicy()
class TCPCntrl(TCP_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = TCPCache(tagAccessLatency = options.TCP_latency,
dataAccessLatency = options.TCP_latency)
self.L1cache.resourceStalls = options.no_resource_stalls
self.L1cache.create(options)
self.issue_latency = 1
self.coalescer = VIPERCoalescer()
self.coalescer.version = self.seqCount()
self.coalescer.icache = self.L1cache
self.coalescer.dcache = self.L1cache
self.coalescer.ruby_system = ruby_system
self.coalescer.support_inst_reqs = False
self.coalescer.is_cpu_sequencer = False
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.is_cpu_sequencer = True
self.use_seq_not_coal = False
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def createCP(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = TCPCache(tagAccessLatency = options.TCP_latency,
dataAccessLatency = options.TCP_latency)
self.L1cache.resourceStalls = options.no_resource_stalls
self.L1cache.create(options)
self.issue_latency = 1
self.coalescer = VIPERCoalescer()
self.coalescer.version = self.seqCount()
self.coalescer.icache = self.L1cache
self.coalescer.dcache = self.L1cache
self.coalescer.ruby_system = ruby_system
self.coalescer.support_inst_reqs = False
self.coalescer.is_cpu_sequencer = False
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.is_cpu_sequencer = True
self.use_seq_not_coal = True
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class SQCCache(RubyCache):
dataArrayBanks = 8
tagArrayBanks = 8
dataAccessLatency = 1
tagAccessLatency = 1
def create(self, options):
self.size = MemorySize(options.sqc_size)
self.assoc = options.sqc_assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class SQCCntrl(SQC_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = SQCCache()
self.L1cache.create(options)
self.L1cache.resourceStalls = options.no_resource_stalls
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.support_data_reqs = False
self.sequencer.is_cpu_sequencer = False
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class TCC(RubyCache):
size = MemorySize("256kB")
assoc = 16
dataAccessLatency = 8
tagAccessLatency = 2
resourceStalls = True
def create(self, options):
self.assoc = options.tcc_assoc
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
s = options.num_compute_units
tcc_size = s * 128
tcc_size = str(tcc_size)+'kB'
self.size = MemorySize(tcc_size)
self.dataArrayBanks = 64
self.tagArrayBanks = 64
else:
self.size = MemorySize(options.tcc_size)
self.dataArrayBanks = 256 / options.num_tccs #number of data banks
self.tagArrayBanks = 256 / options.num_tccs #number of tag banks
self.size.value = self.size.value / options.num_tccs
if ((self.size.value / long(self.assoc)) < 128):
self.size.value = long(128 * self.assoc)
self.start_index_bit = math.log(options.cacheline_size, 2) + \
math.log(options.num_tccs, 2)
self.replacement_policy = PseudoLRUReplacementPolicy()
class TCCCntrl(TCC_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L2cache = TCC()
self.L2cache.create(options)
self.L2cache.resourceStalls = options.no_tcc_resource_stalls
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class L3Cache(RubyCache):
dataArrayBanks = 16
tagArrayBanks = 16
def create(self, options, ruby_system, system):
self.size = MemorySize(options.l3_size)
self.size.value /= options.num_dirs
self.assoc = options.l3_assoc
self.dataArrayBanks /= options.num_dirs
self.tagArrayBanks /= options.num_dirs
self.dataArrayBanks /= options.num_dirs
self.tagArrayBanks /= options.num_dirs
self.dataAccessLatency = options.l3_data_latency
self.tagAccessLatency = options.l3_tag_latency
self.resourceStalls = False
self.replacement_policy = PseudoLRUReplacementPolicy()
class L3Cntrl(L3Cache_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L3cache = L3Cache()
self.L3cache.create(options, ruby_system, system)
self.l3_response_latency = max(self.L3cache.dataAccessLatency, self.L3cache.tagAccessLatency)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
req_to_l3, probe_to_l3, resp_to_l3):
self.reqToDir = req_to_dir
self.respToDir = resp_to_dir
self.l3UnblockToDir = l3_unblock_to_dir
self.reqToL3 = req_to_l3
self.probeToL3 = probe_to_l3
self.respToL3 = resp_to_l3
class DirMem(RubyDirectoryMemory, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
phys_mem_size = AddrRange(options.mem_size).size()
mem_module_size = phys_mem_size / options.num_dirs
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
self.size = dir_size
class DirCntrl(Directory_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.response_latency = 30
self.directory = DirMem()
self.directory.create(options, ruby_system, system)
self.L3CacheMemory = L3Cache()
self.L3CacheMemory.create(options, ruby_system, system)
self.l3_hit_latency = max(self.L3CacheMemory.dataAccessLatency,
self.L3CacheMemory.tagAccessLatency)
self.number_of_TBEs = options.num_tbes
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
req_to_l3, probe_to_l3, resp_to_l3):
self.reqToDir = req_to_dir
self.respToDir = resp_to_dir
self.l3UnblockToDir = l3_unblock_to_dir
self.reqToL3 = req_to_l3
self.probeToL3 = probe_to_l3
self.respToL3 = resp_to_l3
def define_options(parser):
parser.add_option("--num-subcaches", type = "int", default = 4)
parser.add_option("--l3-data-latency", type = "int", default = 20)
parser.add_option("--l3-tag-latency", type = "int", default = 15)
parser.add_option("--cpu-to-dir-latency", type = "int", default = 120)
parser.add_option("--gpu-to-dir-latency", type = "int", default = 120)
parser.add_option("--no-resource-stalls", action = "store_false",
default = True)
parser.add_option("--no-tcc-resource-stalls", action = "store_false",
default = True)
parser.add_option("--use-L3-on-WT", action = "store_true", default = False)
parser.add_option("--num-tbes", type = "int", default = 256)
parser.add_option("--l2-latency", type = "int", default = 50) # load to use
parser.add_option("--num-tccs", type = "int", default = 1,
help = "number of TCC banks in the GPU")
parser.add_option("--sqc-size", type = 'string', default = '32kB',
help = "SQC cache size")
parser.add_option("--sqc-assoc", type = 'int', default = 8,
help = "SQC cache assoc")
parser.add_option("--WB_L1", action = "store_true", default = False,
help = "writeback L1")
parser.add_option("--WB_L2", action = "store_true", default = False,
help = "writeback L2")
parser.add_option("--TCP_latency", type = "int", default = 4,
help = "TCP latency")
parser.add_option("--TCC_latency", type = "int", default = 16,
help = "TCC latency")
parser.add_option("--tcc-size", type = 'string', default = '256kB',
help = "agregate tcc size")
parser.add_option("--tcc-assoc", type = 'int', default = 16,
help = "tcc assoc")
parser.add_option("--tcp-size", type = 'string', default = '16kB',
help = "tcp size")
parser.add_option("--tcp-assoc", type = 'int', default = 16,
help = "tcp assoc")
parser.add_option("--noL1", action = "store_true", default = False,
help = "bypassL1")
def create_system(options, full_system, system, dma_devices, ruby_system):
if buildEnv['PROTOCOL'] != 'GPU_VIPER':
panic("This script requires the GPU_VIPER protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes
# must be listed before the directory nodes and directory nodes before
# dma nodes, etc.
#
cp_cntrl_nodes = []
tcp_cntrl_nodes = []
sqc_cntrl_nodes = []
tcc_cntrl_nodes = []
dir_cntrl_nodes = []
l3_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
# For an odd number of CPUs, still create the right number of controllers
TCC_bits = int(math.log(options.num_tccs, 2))
# This is the base crossbar that connects the L3s, Dirs, and cpu/gpu
# Clusters
crossbar_bw = None
mainCluster = None
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
#Assuming a 2GHz clock
crossbar_bw = 16 * options.num_compute_units * options.bw_scalor
mainCluster = Cluster(intBW=crossbar_bw)
else:
mainCluster = Cluster(intBW=8) # 16 GB/s
for i in xrange(options.num_dirs):
dir_cntrl = DirCntrl(noTCCdir = True, TCC_select_num_bits = TCC_bits)
dir_cntrl.create(options, ruby_system, system)
dir_cntrl.number_of_TBEs = options.num_tbes
dir_cntrl.useL3OnWT = options.use_L3_on_WT
# the number_of_TBEs is inclusive of TBEs below
# Connect the Directory controller to the ruby network
dir_cntrl.requestFromCores = MessageBuffer(ordered = True)
dir_cntrl.requestFromCores.slave = ruby_system.network.master
dir_cntrl.responseFromCores = MessageBuffer()
dir_cntrl.responseFromCores.slave = ruby_system.network.master
dir_cntrl.unblockFromCores = MessageBuffer()
dir_cntrl.unblockFromCores.slave = ruby_system.network.master
dir_cntrl.probeToCore = MessageBuffer()
dir_cntrl.probeToCore.master = ruby_system.network.slave
dir_cntrl.responseToCore = MessageBuffer()
dir_cntrl.responseToCore.master = ruby_system.network.slave
dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True)
dir_cntrl.responseFromMemory = MessageBuffer()
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
mainCluster.add(dir_cntrl)
cpuCluster = None
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
cpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
else:
cpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
for i in xrange((options.num_cpus + 1) / 2):
cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system)
exec("ruby_system.cp_cntrl%d = cp_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])
# Connect the CP controllers and the network
cp_cntrl.requestFromCore = MessageBuffer()
cp_cntrl.requestFromCore.master = ruby_system.network.slave
cp_cntrl.responseFromCore = MessageBuffer()
cp_cntrl.responseFromCore.master = ruby_system.network.slave
cp_cntrl.unblockFromCore = MessageBuffer()
cp_cntrl.unblockFromCore.master = ruby_system.network.slave
cp_cntrl.probeToCore = MessageBuffer()
cp_cntrl.probeToCore.slave = ruby_system.network.master
cp_cntrl.responseToCore = MessageBuffer()
cp_cntrl.responseToCore.slave = ruby_system.network.master
cp_cntrl.mandatoryQueue = MessageBuffer()
cp_cntrl.triggerQueue = MessageBuffer(ordered = True)
cpuCluster.add(cp_cntrl)
gpuCluster = None
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
else:
gpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
for i in xrange(options.num_compute_units):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
issue_latency = 1,
number_of_TBEs = 2560)
# TBEs set to max outstanding requests
tcp_cntrl.create(options, ruby_system, system)
tcp_cntrl.WB = options.WB_L1
tcp_cntrl.disableL1 = options.noL1
tcp_cntrl.L1cache.tagAccessLatency = options.TCP_latency
tcp_cntrl.L1cache.dataAccessLatency = options.TCP_latency
exec("ruby_system.tcp_cntrl%d = tcp_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(tcp_cntrl.coalescer)
tcp_cntrl_nodes.append(tcp_cntrl)
# Connect the TCP controller to the ruby network
tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
tcp_cntrl.unblockFromCore = MessageBuffer()
tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
tcp_cntrl.probeToTCP.slave = ruby_system.network.master
tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseToTCP.slave = ruby_system.network.master
tcp_cntrl.mandatoryQueue = MessageBuffer()
gpuCluster.add(tcp_cntrl)
for i in xrange(options.num_sqc):
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system)
exec("ruby_system.sqc_cntrl%d = sqc_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(sqc_cntrl.sequencer)
# Connect the SQC controller to the ruby network
sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
sqc_cntrl.requestFromSQC.master = ruby_system.network.slave
sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
sqc_cntrl.probeToSQC.slave = ruby_system.network.master
sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
sqc_cntrl.responseToSQC.slave = ruby_system.network.master
sqc_cntrl.mandatoryQueue = MessageBuffer()
# SQC also in GPU cluster
gpuCluster.add(sqc_cntrl)
for i in xrange(options.num_cp):
tcp_ID = options.num_compute_units + i
sqc_ID = options.num_sqc + i
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
issue_latency = 1,
number_of_TBEs = 2560)
# TBEs set to max outstanding requests
tcp_cntrl.createCP(options, ruby_system, system)
tcp_cntrl.WB = options.WB_L1
tcp_cntrl.disableL1 = options.noL1
tcp_cntrl.L1cache.tagAccessLatency = options.TCP_latency
tcp_cntrl.L1cache.dataAccessLatency = options.TCP_latency
exec("ruby_system.tcp_cntrl%d = tcp_cntrl" % tcp_ID)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(tcp_cntrl.sequencer)
tcp_cntrl_nodes.append(tcp_cntrl)
# Connect the CP (TCP) controllers to the ruby network
tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
tcp_cntrl.unblockFromCore = MessageBuffer(ordered = True)
tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
tcp_cntrl.probeToTCP.slave = ruby_system.network.master
tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseToTCP.slave = ruby_system.network.master
tcp_cntrl.mandatoryQueue = MessageBuffer()
gpuCluster.add(tcp_cntrl)
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system)
exec("ruby_system.sqc_cntrl%d = sqc_cntrl" % sqc_ID)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(sqc_cntrl.sequencer)
# SQC also in GPU cluster
gpuCluster.add(sqc_cntrl)
for i in xrange(options.num_tccs):
tcc_cntrl = TCCCntrl(l2_response_latency = options.TCC_latency)
tcc_cntrl.create(options, ruby_system, system)
tcc_cntrl.l2_request_latency = options.gpu_to_dir_latency
tcc_cntrl.l2_response_latency = options.TCC_latency
tcc_cntrl_nodes.append(tcc_cntrl)
tcc_cntrl.WB = options.WB_L2
tcc_cntrl.number_of_TBEs = 2560 * options.num_compute_units
# the number_of_TBEs is inclusive of TBEs below
# Connect the TCC controllers to the ruby network
tcc_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcc_cntrl.requestFromTCP.slave = ruby_system.network.master
tcc_cntrl.responseToCore = MessageBuffer(ordered = True)
tcc_cntrl.responseToCore.master = ruby_system.network.slave
tcc_cntrl.probeFromNB = MessageBuffer()
tcc_cntrl.probeFromNB.slave = ruby_system.network.master
tcc_cntrl.responseFromNB = MessageBuffer()
tcc_cntrl.responseFromNB.slave = ruby_system.network.master
tcc_cntrl.requestToNB = MessageBuffer(ordered = True)
tcc_cntrl.requestToNB.master = ruby_system.network.slave
tcc_cntrl.responseToNB = MessageBuffer()
tcc_cntrl.responseToNB.master = ruby_system.network.slave
tcc_cntrl.unblockToNB = MessageBuffer()
tcc_cntrl.unblockToNB.master = ruby_system.network.slave
tcc_cntrl.triggerQueue = MessageBuffer(ordered = True)
exec("ruby_system.tcc_cntrl%d = tcc_cntrl" % i)
# connect all of the wire buffers between L3 and dirs up
# TCC cntrls added to the GPU cluster
gpuCluster.add(tcc_cntrl)
# Assuming no DMA devices
assert(len(dma_devices) == 0)
# Add cpu/gpu clusters to main cluster
mainCluster.add(cpuCluster)
mainCluster.add(gpuCluster)
ruby_system.network.number_of_virtual_networks = 10
return (cpu_sequencers, dir_cntrl_nodes, mainCluster)
| bsd-3-clause | -8,977,783,867,979,160,000 | 37.169139 | 101 | 0.654241 | false |
Suwings/Yeinw | src/Crypto/Random/OSRNG/posix.py | 125 | 2835 | #
# Random/OSRNG/posix.py : OS entropy source for POSIX systems
#
# Written in 2008 by Dwayne C. Litzenberger <[email protected]>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
__all__ = ['DevURandomRNG']
import errno
import os
import stat
from rng_base import BaseRNG
from Crypto.Util.py3compat import b
class DevURandomRNG(BaseRNG):
def __init__(self, devname=None):
if devname is None:
self.name = "/dev/urandom"
else:
self.name = devname
# Test that /dev/urandom is a character special device
f = open(self.name, "rb", 0)
fmode = os.fstat(f.fileno())[stat.ST_MODE]
if not stat.S_ISCHR(fmode):
f.close()
raise TypeError("%r is not a character special device" % (self.name,))
self.__file = f
BaseRNG.__init__(self)
def _close(self):
self.__file.close()
def _read(self, N):
# Starting with Python 3 open with buffering=0 returns a FileIO object.
# FileIO.read behaves like read(2) and not like fread(3) and thus we
# have to handle the case that read returns less data as requested here
# more carefully.
data = b("")
while len(data) < N:
try:
d = self.__file.read(N - len(data))
except IOError, e:
# read(2) has been interrupted by a signal; redo the read
if e.errno == errno.EINTR:
continue
raise
if d is None:
# __file is in non-blocking mode and no data is available
return data
if len(d) == 0:
# __file is in blocking mode and arrived at EOF
return data
data += d
return data
def new(*args, **kwargs):
return DevURandomRNG(*args, **kwargs)
# vim:set ts=4 sw=4 sts=4 expandtab:
| gpl-3.0 | -2,062,888,866,573,278,700 | 31.965116 | 82 | 0.591182 | false |
WoLpH/EventGhost | lib27/site-packages/curl/__init__.py | 8 | 7023 | '''A high-level interface to the pycurl extension'''
# ** mfx NOTE: the CGI class uses "black magic" using COOKIEFILE in
# combination with a non-existant file name. See the libcurl docs
# for more info.
import sys, pycurl
py3 = sys.version_info[0] == 3
# python 2/3 compatibility
if py3:
import urllib.parse as urllib_parse
from urllib.parse import urljoin
from io import BytesIO
else:
import urllib as urllib_parse
from urlparse import urljoin
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
try:
import signal
from signal import SIGPIPE, SIG_IGN
signal.signal(SIGPIPE, SIG_IGN)
except ImportError:
pass
class Curl:
"High-level interface to pycurl functions."
def __init__(self, base_url="", fakeheaders=[]):
self.handle = pycurl.Curl()
# These members might be set.
self.set_url(base_url)
self.verbosity = 0
self.fakeheaders = fakeheaders
# Nothing past here should be modified by the caller.
self.payload = None
self.payload_io = BytesIO()
self.hrd = ""
# Verify that we've got the right site; harmless on a non-SSL connect.
self.set_option(pycurl.SSL_VERIFYHOST, 2)
# Follow redirects in case it wants to take us to a CGI...
self.set_option(pycurl.FOLLOWLOCATION, 1)
self.set_option(pycurl.MAXREDIRS, 5)
self.set_option(pycurl.NOSIGNAL, 1)
# Setting this option with even a nonexistent file makes libcurl
# handle cookie capture and playback automatically.
self.set_option(pycurl.COOKIEFILE, "/dev/null")
# Set timeouts to avoid hanging too long
self.set_timeout(30)
# Use password identification from .netrc automatically
self.set_option(pycurl.NETRC, 1)
self.set_option(pycurl.WRITEFUNCTION, self.payload_io.write)
def header_callback(x):
self.hdr += x.decode('ascii')
self.set_option(pycurl.HEADERFUNCTION, header_callback)
def set_timeout(self, timeout):
"Set timeout for a retrieving an object"
self.set_option(pycurl.TIMEOUT, timeout)
def set_url(self, url):
"Set the base URL to be retrieved."
self.base_url = url
self.set_option(pycurl.URL, self.base_url)
def set_option(self, *args):
"Set an option on the retrieval."
self.handle.setopt(*args)
def set_verbosity(self, level):
"Set verbosity to 1 to see transactions."
self.set_option(pycurl.VERBOSE, level)
def __request(self, relative_url=None):
"Perform the pending request."
if self.fakeheaders:
self.set_option(pycurl.HTTPHEADER, self.fakeheaders)
if relative_url:
self.set_option(pycurl.URL, urljoin(self.base_url, relative_url))
self.payload = None
self.hdr = ""
self.handle.perform()
self.payload = self.payload_io.getvalue()
return self.payload
def get(self, url="", params=None):
"Ship a GET request for a specified URL, capture the response."
if params:
url += "?" + urllib_parse.urlencode(params)
self.set_option(pycurl.HTTPGET, 1)
return self.__request(url)
def post(self, cgi, params):
"Ship a POST request to a specified CGI, capture the response."
self.set_option(pycurl.POST, 1)
self.set_option(pycurl.POSTFIELDS, urllib_parse.urlencode(params))
return self.__request(cgi)
def body(self):
"Return the body from the last response."
return self.payload
def header(self):
"Return the header from the last response."
return self.hdr
def get_info(self, *args):
"Get information about retrieval."
return self.handle.getinfo(*args)
def info(self):
"Return a dictionary with all info on the last response."
m = {}
m['effective-url'] = self.handle.getinfo(pycurl.EFFECTIVE_URL)
m['http-code'] = self.handle.getinfo(pycurl.HTTP_CODE)
m['total-time'] = self.handle.getinfo(pycurl.TOTAL_TIME)
m['namelookup-time'] = self.handle.getinfo(pycurl.NAMELOOKUP_TIME)
m['connect-time'] = self.handle.getinfo(pycurl.CONNECT_TIME)
m['pretransfer-time'] = self.handle.getinfo(pycurl.PRETRANSFER_TIME)
m['redirect-time'] = self.handle.getinfo(pycurl.REDIRECT_TIME)
m['redirect-count'] = self.handle.getinfo(pycurl.REDIRECT_COUNT)
m['size-upload'] = self.handle.getinfo(pycurl.SIZE_UPLOAD)
m['size-download'] = self.handle.getinfo(pycurl.SIZE_DOWNLOAD)
m['speed-upload'] = self.handle.getinfo(pycurl.SPEED_UPLOAD)
m['header-size'] = self.handle.getinfo(pycurl.HEADER_SIZE)
m['request-size'] = self.handle.getinfo(pycurl.REQUEST_SIZE)
m['content-length-download'] = self.handle.getinfo(pycurl.CONTENT_LENGTH_DOWNLOAD)
m['content-length-upload'] = self.handle.getinfo(pycurl.CONTENT_LENGTH_UPLOAD)
m['content-type'] = self.handle.getinfo(pycurl.CONTENT_TYPE)
m['response-code'] = self.handle.getinfo(pycurl.RESPONSE_CODE)
m['speed-download'] = self.handle.getinfo(pycurl.SPEED_DOWNLOAD)
m['ssl-verifyresult'] = self.handle.getinfo(pycurl.SSL_VERIFYRESULT)
m['filetime'] = self.handle.getinfo(pycurl.INFO_FILETIME)
m['starttransfer-time'] = self.handle.getinfo(pycurl.STARTTRANSFER_TIME)
m['redirect-time'] = self.handle.getinfo(pycurl.REDIRECT_TIME)
m['redirect-count'] = self.handle.getinfo(pycurl.REDIRECT_COUNT)
m['http-connectcode'] = self.handle.getinfo(pycurl.HTTP_CONNECTCODE)
m['httpauth-avail'] = self.handle.getinfo(pycurl.HTTPAUTH_AVAIL)
m['proxyauth-avail'] = self.handle.getinfo(pycurl.PROXYAUTH_AVAIL)
m['os-errno'] = self.handle.getinfo(pycurl.OS_ERRNO)
m['num-connects'] = self.handle.getinfo(pycurl.NUM_CONNECTS)
m['ssl-engines'] = self.handle.getinfo(pycurl.SSL_ENGINES)
m['cookielist'] = self.handle.getinfo(pycurl.INFO_COOKIELIST)
m['lastsocket'] = self.handle.getinfo(pycurl.LASTSOCKET)
m['ftp-entry-path'] = self.handle.getinfo(pycurl.FTP_ENTRY_PATH)
return m
def answered(self, check):
"Did a given check string occur in the last payload?"
return self.payload.find(check) >= 0
def close(self):
"Close a session, freeing resources."
if self.handle:
self.handle.close()
self.handle = None
self.hdr = ""
self.payload = ""
def __del__(self):
self.close()
if __name__ == "__main__":
if len(sys.argv) < 2:
url = 'http://curl.haxx.se'
else:
url = sys.argv[1]
c = Curl()
c.get(url)
print(c.body())
print('='*74 + '\n')
import pprint
pprint.pprint(c.info())
print(c.get_info(pycurl.OS_ERRNO))
print(c.info()['os-errno'])
c.close()
| gpl-2.0 | -6,667,848,693,521,195,000 | 37.587912 | 90 | 0.639186 | false |
h3biomed/ansible | lib/ansible/modules/network/cnos/cnos_reload.py | 52 | 3430 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to reload Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_reload
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Perform switch restart on devices running Lenovo CNOS
description:
- This module allows you to restart the switch using the current startup
configuration. The module is usually invoked after the running
configuration has been saved over the startup configuration.
This module uses SSH to manage network device configuration.
The results of the operation can be viewed in results directory.
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_reload. These are
written in the main.yml file of the tasks directory.
---
- name: Test Reload
cnos_reload:
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_reload_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: str
sample: "Device is Reloading. Please wait..."
'''
import sys
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except Exception:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=False),
username=dict(required=False),
password=dict(required=False, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),),
supports_check_mode=False)
command = 'reload'
outputfile = module.params['outputfile']
output = ''
cmd = [{'command': command, 'prompt': 'reboot system? (y/n): ',
'answer': 'y'}]
output = output + str(cnos.run_cnos_commands(module, cmd))
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
if(errorMsg in "Device Response Timed out"):
module.exit_json(changed=True,
msg="Device is Reloading. Please wait...")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,173,355,663,336,433,000 | 29.353982 | 77 | 0.682799 | false |
AbsentMoniker/ECE463Honors | web2py/gluon/contrib/fpdf/template.py | 40 | 11941 | # -*- coding: iso-8859-1 -*-
"PDF Template Helper for FPDF.py"
__author__ = "Mariano Reingart <[email protected]>"
__copyright__ = "Copyright (C) 2010 Mariano Reingart"
__license__ = "LGPL 3.0"
import sys,os,csv
from fpdf import FPDF
def rgb(col):
return (col // 65536), (col // 256 % 256), (col% 256)
class Template:
def __init__(self, infile=None, elements=None, format='A4', orientation='portrait',
title='', author='', subject='', creator='', keywords=''):
if elements:
self.elements = elements
self.keys = [v['name'].lower() for v in self.elements]
self.handlers = {'T': self.text, 'L': self.line, 'I': self.image,
'B': self.rect, 'BC': self.barcode, }
self.pg_no = 0
self.texts = {}
pdf = self.pdf = FPDF(format=format,orientation=orientation, unit="mm")
pdf.set_title(title)
pdf.set_author(author)
pdf.set_creator(creator)
pdf.set_subject(subject)
pdf.set_keywords(keywords)
def parse_csv(self, infile, delimiter=",", decimal_sep="."):
"Parse template format csv file and create elements dict"
keys = ('name','type','x1','y1','x2','y2','font','size',
'bold','italic','underline','foreground','background',
'align','text','priority', 'multiline')
self.elements = []
for row in csv.reader(open(infile, 'rb'), delimiter=delimiter):
kargs = {}
for i,v in enumerate(row):
if not v.startswith("'") and decimal_sep!=".":
v = v.replace(decimal_sep,".")
else:
v = v
if v=='':
v = None
else:
v = eval(v.strip())
kargs[keys[i]] = v
self.elements.append(kargs)
self.keys = [v['name'].lower() for v in self.elements]
def add_page(self):
self.pg_no += 1
self.texts[self.pg_no] = {}
def __setitem__(self, name, value):
if self.has_key(name):
if isinstance(value,unicode):
value = value.encode("latin1","ignore")
elif value is None:
value = ""
else:
value = str(value)
self.texts[self.pg_no][name.lower()] = value
# setitem shortcut (may be further extended)
set = __setitem__
def has_key(self, name):
return name.lower() in self.keys
def __getitem__(self, name):
if self.has_key(name):
key = name.lower()
if key in self.texts:
# text for this page:
return self.texts[self.pg_no][key]
else:
# find first element for default text:
elements = [element for element in self.elements
if element['name'].lower() == key]
if elements:
return elements[0]['text']
def split_multicell(self, text, element_name):
"Divide (\n) a string using a given element width"
pdf = self.pdf
element = [element for element in self.elements
if element['name'].lower() == element_name.lower()][0]
style = ""
if element['bold']: style += "B"
if element['italic']: style += "I"
if element['underline']: style += "U"
pdf.set_font(element['font'],style,element['size'])
align = {'L':'L','R':'R','I':'L','D':'R','C':'C','':''}.get(element['align']) # D/I in spanish
if isinstance(text, unicode):
text = text.encode("latin1","ignore")
else:
text = str(text)
return pdf.multi_cell(w=element['x2']-element['x1'],
h=element['y2']-element['y1'],
txt=text,align=align,split_only=True)
def render(self, outfile, dest="F"):
pdf = self.pdf
for pg in range(1, self.pg_no+1):
pdf.add_page()
pdf.set_font('Arial','B',16)
pdf.set_auto_page_break(False,margin=0)
for element in sorted(self.elements,key=lambda x: x['priority']):
# make a copy of the element:
element = dict(element)
element['text'] = self.texts[pg].get(element['name'].lower(), element['text'])
if 'rotate' in element:
pdf.rotate(element['rotate'], element['x1'], element['y1'])
self.handlers[element['type'].upper()](pdf, **element)
if 'rotate' in element:
pdf.rotate(0)
return pdf.output(outfile, dest)
def text(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', font="arial", size=10,
bold=False, italic=False, underline=False, align="",
foreground=0, backgroud=65535, multiline=None,
*args, **kwargs):
if text:
if pdf.text_color!=rgb(foreground):
pdf.set_text_color(*rgb(foreground))
if pdf.fill_color!=rgb(backgroud):
pdf.set_fill_color(*rgb(backgroud))
font = font.strip().lower()
if font == 'arial black':
font = 'arial'
style = ""
for tag in 'B', 'I', 'U':
if (text.startswith("<%s>" % tag) and text.endswith("</%s>" %tag)):
text = text[3:-4]
style += tag
if bold: style += "B"
if italic: style += "I"
if underline: style += "U"
align = {'L':'L','R':'R','I':'L','D':'R','C':'C','':''}.get(align) # D/I in spanish
pdf.set_font(font,style,size)
##m_k = 72 / 2.54
##h = (size/m_k)
pdf.set_xy(x1,y1)
if multiline is None:
# multiline==None: write without wrapping/trimming (default)
pdf.cell(w=x2-x1,h=y2-y1,txt=text,border=0,ln=0,align=align)
elif multiline:
# multiline==True: automatic word - warp
pdf.multi_cell(w=x2-x1,h=y2-y1,txt=text,border=0,align=align)
else:
# multiline==False: trim to fit exactly the space defined
text = pdf.multi_cell(w=x2-x1, h=y2-y1,
txt=text, align=align, split_only=True)[0]
print "trimming: *%s*" % text
pdf.cell(w=x2-x1,h=y2-y1,txt=text,border=0,ln=0,align=align)
#pdf.Text(x=x1,y=y1,txt=text)
def line(self, pdf, x1=0, y1=0, x2=0, y2=0, size=0, foreground=0, *args, **kwargs):
if pdf.draw_color!=rgb(foreground):
#print "SetDrawColor", hex(foreground)
pdf.set_draw_color(*rgb(foreground))
#print "SetLineWidth", size
pdf.set_line_width(size)
pdf.line(x1, y1, x2, y2)
def rect(self, pdf, x1=0, y1=0, x2=0, y2=0, size=0, foreground=0, backgroud=65535, *args, **kwargs):
if pdf.draw_color!=rgb(foreground):
pdf.set_draw_color(*rgb(foreground))
if pdf.fill_color!=rgb(backgroud):
pdf.set_fill_color(*rgb(backgroud))
pdf.set_line_width(size)
pdf.rect(x1, y1, x2-x1, y2-y1)
def image(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', *args,**kwargs):
pdf.image(text,x1,y1,w=x2-x1,h=y2-y1,type='',link='')
def barcode(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', font="arial", size=1,
foreground=0, *args, **kwargs):
if pdf.draw_color!=rgb(foreground):
pdf.set_draw_color(*rgb(foreground))
font = font.lower().strip()
if font == 'interleaved 2of5 nt':
pdf.interleaved2of5(text,x1,y1,w=size,h=y2-y1)
if __name__ == "__main__":
# generate sample invoice (according Argentina's regulations)
import random
from decimal import Decimal
f = Template(format="A4",
title="Sample Invoice", author="Sample Company",
subject="Sample Customer", keywords="Electronic TAX Invoice")
f.parse_csv(infile="invoice.csv", delimiter=";", decimal_sep=",")
detail = "Lorem ipsum dolor sit amet, consectetur. " * 30
items = []
for i in range(1, 30):
ds = "Sample product %s" % i
qty = random.randint(1,10)
price = round(random.random()*100,3)
code = "%s%s%02d" % (chr(random.randint(65,90)), chr(random.randint(65,90)),i)
items.append(dict(code=code, unit='u',
qty=qty, price=price,
amount=qty*price,
ds="%s: %s" % (i,ds)))
# divide and count lines
lines = 0
li_items = []
for it in items:
qty = it['qty']
code = it['code']
unit = it['unit']
for ds in f.split_multicell(it['ds'], 'item_description01'):
# add item description line (without price nor amount)
li_items.append(dict(code=code, ds=ds, qty=qty, unit=unit, price=None, amount=None))
# clean qty and code (show only at first)
unit = qty = code = None
# set last item line price and amount
li_items[-1].update(amount = it['amount'],
price = it['price'])
obs="\n<U>Detail:</U>\n\n" + detail
for ds in f.split_multicell(obs, 'item_description01'):
li_items.append(dict(code=code, ds=ds, qty=qty, unit=unit, price=None, amount=None))
# calculate pages:
lines = len(li_items)
max_lines_per_page = 24
pages = lines / (max_lines_per_page - 1)
if lines % (max_lines_per_page - 1): pages = pages + 1
# completo campos y hojas
for page in range(1, pages+1):
f.add_page()
f['page'] = 'Page %s of %s' % (page, pages)
if pages>1 and page<pages:
s = 'Continues on page %s' % (page+1)
else:
s = ''
f['item_description%02d' % (max_lines_per_page+1)] = s
f["company_name"] = "Sample Company"
f["company_logo"] = "tutorial/logo.png"
f["company_header1"] = "Some Address - somewhere -"
f["company_header2"] = "http://www.example.com"
f["company_footer1"] = "Tax Code ..."
f["company_footer2"] = "Tax/VAT ID ..."
f['number'] = '0001-00001234'
f['issue_date'] = '2010-09-10'
f['due_date'] = '2099-09-10'
f['customer_name'] = "Sample Client"
f['customer_address'] = "Siempreviva 1234"
# print line item...
li = 0
k = 0
total = Decimal("0.00")
for it in li_items:
k = k + 1
if k > page * (max_lines_per_page - 1):
break
if it['amount']:
total += Decimal("%.6f" % it['amount'])
if k > (page - 1) * (max_lines_per_page - 1):
li += 1
if it['qty'] is not None:
f['item_quantity%02d' % li] = it['qty']
if it['code'] is not None:
f['item_code%02d' % li] = it['code']
if it['unit'] is not None:
f['item_unit%02d' % li] = it['unit']
f['item_description%02d' % li] = it['ds']
if it['price'] is not None:
f['item_price%02d' % li] = "%0.3f" % it['price']
if it['amount'] is not None:
f['item_amount%02d' % li] = "%0.2f" % it['amount']
if pages == page:
f['net'] = "%0.2f" % (total/Decimal("1.21"))
f['vat'] = "%0.2f" % (total*(1-1/Decimal("1.21")))
f['total_label'] = 'Total:'
else:
f['total_label'] = 'SubTotal:'
f['total'] = "%0.2f" % total
f.render("./invoice.pdf")
if sys.platform.startswith("linux"):
os.system("evince ./invoice.pdf")
else:
os.system("./invoice.pdf")
| gpl-2.0 | -1,002,259,036,913,638,900 | 38.671096 | 104 | 0.499874 | false |
sadanandb/pmt | src/pyasm/application/common/dependency.py | 6 | 2711 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['Dependency']
import sys, types
from base_app_info import BaseAppInfo
class Dependency(object):
'''class which handles the texture depedencies in a file or session'''
def __init__(my, node_name, file_type, path=""):
my.file_type = file_type
my.path = path
my.info = BaseAppInfo.get()
my.impl = my.info.get_app_implementation()
my.app = my.info.get_app()
my.node_name = node_name
my.texture_paths = []
my.texture_nodes = []
my.texture_attrs = []
my.dependent_paths = []
def get_texture_info(my):
return my.texture_paths, my.texture_nodes, my.texture_attrs
def execute(my):
assert my.file_type
my.app.message("path [%s] [%s]" % (my.app.APPNAME, my.file_type) )
# find all of the textures in the extracted file
if my.app.APPNAME == "maya":
if my.file_type == "mayaAscii":
# handle the textures
my.texture_nodes, my.texture_paths, my.texture_attrs = \
my.impl.get_textures_from_path(path)
# remember all of the geo paths
my.geo_paths = my.impl.get_geo_paths()
for geo_path in my.geo_paths:
my.dependent_paths.append(geo_path)
else:
my.texture_nodes, my.texture_paths, my.texture_attrs = \
my.impl.get_textures_from_session(my.node_name)
print my.texture_nodes, my.texture_paths, my.texture_attrs
elif my.app.APPNAME == "houdini":
my.texture_nodes, my.texture_paths, my.texture_attrs = \
my.app.get_file_references(my.node_name)
elif my.app.APPNAME == "xsi":
if my.file_type == "dotXSI":
my.texture_nodes, my.texture_paths, my.texture_attrs = \
my.impl.get_textures_from_path(my.path)
else:
my.texture_nodes, my.texture_paths, my.texture_attrs = \
my.impl.get_textures_from_session(my.node_name)
# add all of the texture paths
for texture_path in my.texture_paths:
# FIXME: all of the texture paths are uploaded!!!, even if
# they are identical
my.dependent_paths.append(texture_path)
return my.dependent_paths
| epl-1.0 | -3,597,987,080,122,693,600 | 28.791209 | 74 | 0.559941 | false |
switchboardOp/ansible | lib/ansible/modules/packaging/os/pkg5.py | 29 | 5253 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014 Peter Oliver <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pkg5
author: "Peter Oliver (@mavit)"
short_description: Manages packages with the Solaris 11 Image Packaging System
version_added: 1.9
description:
- IPS packages are the native packages in Solaris 11 and higher.
notes:
- The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html).
options:
name:
description:
- An FRMI of the package(s) to be installed/removed/updated.
- Multiple packages may be specified, separated by C(,).
required: true
state:
description:
- Whether to install (I(present), I(latest)), or remove (I(absent)) a
package.
required: false
default: present
choices: [ present, latest, absent ]
accept_licenses:
description:
- Accept any licences.
required: false
default: false
choices: [ true, false ]
aliases: [ accept_licences, accept ]
'''
EXAMPLES = '''
# Install Vim:
- pkg5:
name: editor/vim
# Remove finger daemon:
- pkg5:
name: service/network/finger
state: absent
# Install several packages at once:
- pkg5:
name:
- /file/gnu-findutils
- /text/gnu-grep
'''
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='list'),
state=dict(
default='present',
choices=[
'present',
'installed',
'latest',
'absent',
'uninstalled',
'removed',
]
),
accept_licenses=dict(
type='bool',
default=False,
aliases=['accept_licences', 'accept'],
),
),
supports_check_mode=True,
)
params = module.params
packages = []
# pkg(5) FRMIs include a comma before the release number, but
# AnsibleModule will have split this into multiple items for us.
# Try to spot where this has happened and fix it.
for fragment in params['name']:
if (
re.search('^\d+(?:\.\d+)*', fragment)
and packages and re.search('@[^,]*$', packages[-1])
):
packages[-1] += ',' + fragment
else:
packages.append(fragment)
if params['state'] in ['present', 'installed']:
ensure(module, 'present', packages, params)
elif params['state'] in ['latest']:
ensure(module, 'latest', packages, params)
elif params['state'] in ['absent', 'uninstalled', 'removed']:
ensure(module, 'absent', packages, params)
def ensure(module, state, packages, params):
response = {
'results': [],
'msg': '',
}
behaviour = {
'present': {
'filter': lambda p: not is_installed(module, p),
'subcommand': 'install',
},
'latest': {
'filter': lambda p: (
not is_installed(module, p) or not is_latest(module, p)
),
'subcommand': 'install',
},
'absent': {
'filter': lambda p: is_installed(module, p),
'subcommand': 'uninstall',
},
}
if module.check_mode:
dry_run = ['-n']
else:
dry_run = []
if params['accept_licenses']:
accept_licenses = ['--accept']
else:
accept_licenses = []
to_modify = filter(behaviour[state]['filter'], packages)
if to_modify:
rc, out, err = module.run_command(
[
'pkg', behaviour[state]['subcommand']
]
+ dry_run
+ accept_licenses
+ [
'-q', '--'
] + to_modify
)
response['rc'] = rc
response['results'].append(out)
response['msg'] += err
response['changed'] = True
if rc != 0:
module.fail_json(**response)
module.exit_json(**response)
def is_installed(module, package):
rc, out, err = module.run_command(['pkg', 'list', '--', package])
return not bool(int(rc))
def is_latest(module, package):
rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package])
return bool(int(rc))
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | 7,785,762,709,391,093,000 | 27.090909 | 151 | 0.560632 | false |
40123148/w17b | static/Brython3.1.1-20150328-091302/Lib/opcode.py | 714 | 5442 |
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"]
# It's a chicken-and-egg I'm afraid:
# We're imported before _opcode's made.
# With exception unheeded
# (stack_effect is not needed)
# Both our chickens and eggs are allayed.
# --Larry Hastings, 2013/11/23
try:
from _opcode import stack_effect
__all__.append('stack_effect')
except ImportError:
pass
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
'is not', 'exception match', 'BAD')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
hasnargs = []
opmap = {}
opname = [''] * 256
for op in range(256): opname[op] = '<%r>' % (op,)
del op
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('DUP_TOP_TWO', 5)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_INVERT', 15)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('STORE_MAP', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
def_op('WITH_CLEANUP', 81)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('END_FINALLY', 88)
def_op('POP_EXCEPT', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
def_op('UNPACK_EX', 94)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_SET', 104) # Number of set items
def_op('BUILD_MAP', 105) # Number of dict entries (upto 255)
name_op('LOAD_ATTR', 106) # Index in name list
def_op('COMPARE_OP', 107) # Comparison operator
hascompare.append(107)
name_op('IMPORT_NAME', 108) # Index in name list
name_op('IMPORT_FROM', 109) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
jabs_op('JUMP_IF_TRUE_OR_POP', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # ""
jabs_op('POP_JUMP_IF_FALSE', 114) # ""
jabs_op('POP_JUMP_IF_TRUE', 115) # ""
name_op('LOAD_GLOBAL', 116) # Index in name list
jabs_op('CONTINUE_LOOP', 119) # Target address
jrel_op('SETUP_LOOP', 120) # Distance to target address
jrel_op('SETUP_EXCEPT', 121) # ""
jrel_op('SETUP_FINALLY', 122) # ""
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args + (#kwargs << 8)
hasnargs.append(131)
def_op('MAKE_FUNCTION', 132) # Number of args with default values
def_op('BUILD_SLICE', 133) # Number of items
def_op('MAKE_CLOSURE', 134)
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('DELETE_DEREF', 138)
hasfree.append(138)
def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8)
hasnargs.append(140)
def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8)
hasnargs.append(141)
def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8)
hasnargs.append(142)
jrel_op('SETUP_WITH', 143)
def_op('LIST_APPEND', 145)
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
def_op('LOAD_CLASSDEREF', 148)
hasfree.append(148)
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
del def_op, name_op, jrel_op, jabs_op
| agpl-3.0 | -2,839,968,717,421,256,700 | 26.21 | 80 | 0.635061 | false |
mars-knowsnothing/amos-bot | src/Lib/site-packages/pip/download.py | 334 | 32171 | from __future__ import absolute_import
import cgi
import email.utils
import getpass
import json
import logging
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
try:
import ssl # noqa
HAS_TLS = True
except ImportError:
HAS_TLS = False
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
import pip
from pip.exceptions import InstallationError, HashMismatch
from pip.models import PyPI
from pip.utils import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
ARCHIVE_EXTENSIONS, consume, call_subprocess)
from pip.utils.encoding import auto_decode
from pip.utils.filesystem import check_path_owner
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip.utils.glibc import libc_ver
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
from pip.locations import write_delete_marker_file
from pip.vcs import vcs
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
from pip._vendor.requests.utils import get_netrc_auth
from pip._vendor.requests.structures import CaseInsensitiveDict
from pip._vendor.requests.packages import urllib3
from pip._vendor.cachecontrol import CacheControlAdapter
from pip._vendor.cachecontrol.caches import FileCache
from pip._vendor.lockfile import LockError
from pip._vendor.six.moves import xmlrpc_client
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url',
'unpack_http_url', 'unpack_url']
logger = logging.getLogger(__name__)
def user_agent():
"""
Return a string representing the user agent.
"""
data = {
"installer": {"name": "pip", "version": pip.__version__},
"python": platform.python_version(),
"implementation": {
"name": platform.python_implementation(),
},
}
if data["implementation"]["name"] == 'CPython':
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'PyPy':
if sys.pypy_version_info.releaselevel == 'final':
pypy_version_info = sys.pypy_version_info[:3]
else:
pypy_version_info = sys.pypy_version_info
data["implementation"]["version"] = ".".join(
[str(x) for x in pypy_version_info]
)
elif data["implementation"]["name"] == 'Jython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'IronPython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
if sys.platform.startswith("linux"):
from pip._vendor import distro
distro_infos = dict(filter(
lambda x: x[1],
zip(["name", "version", "id"], distro.linux_distribution()),
))
libc = dict(filter(
lambda x: x[1],
zip(["lib", "version"], libc_ver()),
))
if libc:
distro_infos["libc"] = libc
if distro_infos:
data["distro"] = distro_infos
if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]}
if platform.system():
data.setdefault("system", {})["name"] = platform.system()
if platform.release():
data.setdefault("system", {})["release"] = platform.release()
if platform.machine():
data["cpu"] = platform.machine()
# Python 2.6 doesn't have ssl.OPENSSL_VERSION.
if HAS_TLS and sys.version_info[:2] > (2, 6):
data["openssl_version"] = ssl.OPENSSL_VERSION
return "{data[installer][name]}/{data[installer][version]} {json}".format(
data=data,
json=json.dumps(data, separators=(",", ":"), sort_keys=True),
)
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urllib_parse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.rsplit("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
# Get creds from netrc if we still don't have them
if username is None and password is None:
netrc_auth = get_netrc_auth(req.url)
username, password = netrc_auth if netrc_auth else (None, None)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simply return the response
if not self.prompting:
return resp
parsed = urllib_parse.urlparse(resp.url)
# Prompt the user for a new username and password
username = six.moves.input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
pathname = url_to_path(request.url)
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
stats = os.stat(pathname)
except OSError as exc:
resp.status_code = 404
resp.raw = exc
else:
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
resp.headers = CaseInsensitiveDict({
"Content-Type": content_type,
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = open(pathname, "rb")
resp.close = resp.raw.close
return resp
def close(self):
pass
class SafeFileCache(FileCache):
"""
A file based cache which is safe to use even when the target directory may
not be accessible or writable.
"""
def __init__(self, *args, **kwargs):
super(SafeFileCache, self).__init__(*args, **kwargs)
# Check to ensure that the directory containing our cache directory
# is owned by the user current executing pip. If it does not exist
# we will check the parent directory until we find one that does exist.
# If it is not owned by the user executing pip then we will disable
# the cache and log a warning.
if not check_path_owner(self.directory):
logger.warning(
"The directory '%s' or its parent directory is not owned by "
"the current user and the cache has been disabled. Please "
"check the permissions and owner of that directory. If "
"executing pip with sudo, you may want sudo's -H flag.",
self.directory,
)
# Set our directory to None to disable the Cache
self.directory = None
def get(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).get(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def set(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).set(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def delete(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).delete(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
class InsecureHTTPAdapter(HTTPAdapter):
def cert_verify(self, conn, url, verify, cert):
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
retries = kwargs.pop("retries", 0)
cache = kwargs.pop("cache", None)
insecure_hosts = kwargs.pop("insecure_hosts", [])
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Create our urllib3.Retry instance which will allow us to customize
# how we handle retries.
retries = urllib3.Retry(
# Set the total number of retries that a particular request can
# have.
total=retries,
# A 503 error from PyPI typically means that the Fastly -> Origin
# connection got interrupted in some way. A 503 error in general
# is typically considered a transient error so we'll go ahead and
# retry it.
status_forcelist=[503],
# Add a small amount of back off between failed requests in
# order to prevent hammering the service.
backoff_factor=0.25,
)
# We want to _only_ cache responses on securely fetched origins. We do
# this because we can't validate the response of an insecurely fetched
# origin, and we don't want someone to be able to poison the cache and
# require manual eviction from the cache to fix it.
if cache:
secure_adapter = CacheControlAdapter(
cache=SafeFileCache(cache, use_dir_lock=True),
max_retries=retries,
)
else:
secure_adapter = HTTPAdapter(max_retries=retries)
# Our Insecure HTTPAdapter disables HTTPS validation. It does not
# support caching (see above) so we'll use it for all http:// URLs as
# well as any https:// host that we've marked as ignoring TLS errors
# for.
insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
self.mount("https://", secure_adapter)
self.mount("http://", insecure_adapter)
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
# We want to use a non-validating adapter for any requests which are
# deemed insecure.
for host in insecure_hosts:
self.mount("https://{0}/".format(host), insecure_adapter)
def request(self, method, url, *args, **kwargs):
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
raise TypeError(
"get_file_content() missing 1 required keyword argument: 'session'"
)
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from and
comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib_parse.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
# FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
return resp.url, resp.text
try:
with open(url, 'rb') as f:
content = auto_decode(f.read())
except IOError as exc:
raise InstallationError(
'Could not open requirements file: %s' % str(exc)
)
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
_, netloc, path, _, _ = urllib_parse.urlsplit(url)
# if we have a UNC path, prepend UNC share notation
if netloc:
netloc = '\\\\' + netloc
path = urllib_request.url2pathname(netloc + path)
return path
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
return url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
ext = splitext(name)[1].lower()
if ext in ARCHIVE_EXTENSIONS:
return True
return False
def unpack_vcs_link(link, location):
vcs_backend = _get_used_vcs_backend(link)
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def is_dir_url(link):
"""Return whether a file:// Link points to a directory.
``link`` must not have any other scheme but file://. Call is_file_url()
first.
"""
link_path = url_to_path(link.url_without_fragment)
return os.path.isdir(link_path)
def _progress_indicator(iterable, *args, **kwargs):
return iterable
def _download_url(resp, link, content_file, hashes):
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
cached_resp = getattr(resp, "from_cache", False)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif cached_resp:
show_progress = False
elif total_length > (40 * 1000):
show_progress = True
elif not total_length:
show_progress = True
else:
show_progress = False
show_url = link.show_url
def resp_read(chunk_size):
try:
# Special case for urllib3.
for chunk in resp.raw.stream(
chunk_size,
# We use decode_content=False here because we don't
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False):
yield chunk
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
def written_chunks(chunks):
for chunk in chunks:
content_file.write(chunk)
yield chunk
progress_indicator = _progress_indicator
if link.netloc == PyPI.netloc:
url = show_url
else:
url = link.url_without_fragment
if show_progress: # We don't show progress on cached responses
if total_length:
logger.info("Downloading %s (%s)", url, format_size(total_length))
progress_indicator = DownloadProgressBar(max=total_length).iter
else:
logger.info("Downloading %s", url)
progress_indicator = DownloadProgressSpinner().iter
elif cached_resp:
logger.info("Using cached %s", url)
else:
logger.info("Downloading %s", url)
logger.debug('Downloading from URL %s', link)
downloaded_chunks = written_chunks(
progress_indicator(
resp_read(CONTENT_CHUNK_SIZE),
CONTENT_CHUNK_SIZE
)
)
if hashes:
hashes.check_against_chunks(downloaded_chunks)
else:
consume(downloaded_chunks)
def _copy_file(filename, location, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)abort' %
display_path(download_location), ('i', 'w', 'b', 'a'))
if response == 'i':
copy = False
elif response == 'w':
logger.warning('Deleting %s', display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warning(
'Backing up %s to %s',
display_path(download_location),
display_path(dest_file),
)
shutil.move(download_location, dest_file)
elif response == 'a':
sys.exit(-1)
if copy:
shutil.copy(filename, download_location)
logger.info('Saved %s', display_path(download_location))
def unpack_http_url(link, location, download_dir=None,
session=None, hashes=None):
if session is None:
raise TypeError(
"unpack_http_url() missing 1 required keyword argument: 'session'"
)
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
# If a download dir is specified, is the file already downloaded there?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link,
download_dir,
hashes)
if already_downloaded_path:
from_path = already_downloaded_path
content_type = mimetypes.guess_type(from_path)[0]
else:
# let's download to a tmp dir
from_path, content_type = _download_http_url(link,
session,
temp_dir,
hashes)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, link)
if not already_downloaded_path:
os.unlink(from_path)
rmtree(temp_dir)
def unpack_file_url(link, location, download_dir=None, hashes=None):
"""Unpack link into location.
If download_dir is provided and link points to a file, make a copy
of the link file inside download_dir.
"""
link_path = url_to_path(link.url_without_fragment)
# If it's a url to a local directory
if is_dir_url(link):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
if download_dir:
logger.info('Link is a directory, ignoring download_dir')
return
# If --require-hashes is off, `hashes` is either empty, the
# link's embedded hash, or MissingHashes; it is required to
# match. If --require-hashes is on, we are satisfied by any
# hash in `hashes` matching: a URL-based or an option-based
# one; no internet-sourced hash will be in `hashes`.
if hashes:
hashes.check_against_path(link_path)
# If a download dir is specified, is the file already there and valid?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link,
download_dir,
hashes)
if already_downloaded_path:
from_path = already_downloaded_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, link)
def _copy_dist_from_dir(link_path, location):
"""Copy distribution files in `link_path` to `location`.
Invoked when user requests to install a local directory. E.g.:
pip install .
pip install ~/dev/git-repos/python-prompt-toolkit
"""
# Note: This is currently VERY SLOW if you have a lot of data in the
# directory, because it copies everything with `shutil.copytree`.
# What it should really do is build an sdist and install that.
# See https://github.com/pypa/pip/issues/2195
if os.path.isdir(location):
rmtree(location)
# build an sdist
setup_py = 'setup.py'
sdist_args = [sys.executable]
sdist_args.append('-c')
sdist_args.append(SETUPTOOLS_SHIM % setup_py)
sdist_args.append('sdist')
sdist_args += ['--dist-dir', location]
logger.info('Running setup.py sdist for %s', link_path)
with indent_log():
call_subprocess(sdist_args, cwd=link_path, show_stdout=False)
# unpack sdist into `location`
sdist = os.path.join(location, os.listdir(location)[0])
logger.info('Unpacking sdist %s into %s', sdist, location)
unpack_file(sdist, location, content_type=None, link=None)
class PipXmlrpcTransport(xmlrpc_client.Transport):
"""Provide a `xmlrpclib.Transport` implementation via a `PipSession`
object.
"""
def __init__(self, index_url, session, use_datetime=False):
xmlrpc_client.Transport.__init__(self, use_datetime)
index_parts = urllib_parse.urlparse(index_url)
self._scheme = index_parts.scheme
self._session = session
def request(self, host, handler, request_body, verbose=False):
parts = (self._scheme, host, handler, None, None, None)
url = urllib_parse.urlunparse(parts)
try:
headers = {'Content-Type': 'text/xml'}
response = self._session.post(url, data=request_body,
headers=headers, stream=True)
response.raise_for_status()
self.verbose = verbose
return self.parse_response(response.raw)
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s",
exc.response.status_code, url,
)
raise
def unpack_url(link, location, download_dir=None,
only_download=False, session=None, hashes=None):
"""Unpack link.
If link is a VCS link:
if only_download, export into download_dir and ignore location
else unpack into location
for other types of link:
- unpack into location
- if download_dir, copy the file into download_dir
- if only_download, mark location for deletion
:param hashes: A Hashes object, one of whose embedded hashes must match,
or HashMismatch will be raised. If the Hashes is empty, no matches are
required, and unhashable types of requirements (like VCS ones, which
would ordinarily raise HashUnsupported) are allowed.
"""
# non-editable vcs urls
if is_vcs_url(link):
unpack_vcs_link(link, location)
# file urls
elif is_file_url(link):
unpack_file_url(link, location, download_dir, hashes=hashes)
# http urls
else:
if session is None:
session = PipSession()
unpack_http_url(
link,
location,
download_dir,
session,
hashes=hashes
)
if only_download:
write_delete_marker_file(location)
def _download_http_url(link, session, temp_dir, hashes):
"""Download link url into temp_dir using provided session"""
target_url = link.url.split('#', 1)[0]
try:
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully there does not
# exist a server which when given a file will notice it is
# already compressed and that you're not asking for a
# compressed file and will then decompress it before sending
# because if that's the case I don't think it'll ever be
# possible to make this work.
headers={"Accept-Encoding": "identity"},
stream=True,
)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s", exc.response.status_code, link,
)
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
file_path = os.path.join(temp_dir, filename)
with open(file_path, 'wb') as content_file:
_download_url(resp, link, content_file, hashes)
return file_path, content_type
def _check_download_dir(link, download_dir, hashes):
""" Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
# If already downloaded, does its hash match?
logger.info('File was already downloaded %s', download_path)
if hashes:
try:
hashes.check_against_path(download_path)
except HashMismatch:
logger.warning(
'Previously-downloaded file %s has bad hash. '
'Re-downloading.',
download_path
)
os.unlink(download_path)
return None
return download_path
return None
| gpl-3.0 | 1,788,143,703,886,796,300 | 34.50883 | 79 | 0.600385 | false |
mrunge/horizon | openstack_dashboard/dashboards/admin/routers/panel.py | 43 | 1067 | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.admin import dashboard
class Routers(horizon.Panel):
name = _("Routers")
slug = 'routers'
permissions = ('openstack.services.network',)
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
if network_config.get('enable_router', True):
dashboard.Admin.register(Routers)
| apache-2.0 | 2,891,312,357,989,939,000 | 34.566667 | 78 | 0.731959 | false |
proggy/uic | doc/conf.py | 1 | 7924 | # -*- coding: utf-8 -*-
#
# uic documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 9 23:00:14 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'uic'
copyright = u'2014, Daniel Jung'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'uicdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'uic.tex', u'uic Documentation',
u'Daniel Jung', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'uic', u'uic Documentation',
[u'Daniel Jung'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'uic', u'uic Documentation',
u'Daniel Jung', 'uic', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| gpl-2.0 | -5,592,889,928,623,766,000 | 31.211382 | 124 | 0.703306 | false |
LordSputnik/mbws3 | ws/schema/artist.py | 1 | 1672 | import datetime
from sqlalchemy.dialects.postgresql import UUID
from ws import db
class Artist(db.Model):
id = db.Column(db.Integer, primary_key=True)
gid = db.Column(UUID, unique=True, nullable=False)
name = db.Column(db.UnicodeText, nullable=False)
sort_name = db.Column(db.UnicodeText, nullable=False)
begin_date_year = db.Column(db.SmallInteger)
begin_date_month = db.Column(db.SmallInteger)
begin_date_day = db.Column(db.SmallInteger)
end_date_year = db.Column(db.SmallInteger)
end_date_month = db.Column(db.SmallInteger)
end_date_day = db.Column(db.SmallInteger)
type_id = db.Column('type', db.Integer, db.ForeignKey('artist_type.id'))
area_id = db.Column('area', db.Integer, db.ForeignKey('area.id'))
gender_id = db.Column('gender',db.Integer, db.ForeignKey('gender.id'))
comment = db.Column(db.Unicode(255), default=u'', nullable=False)
edits_pending = db.Column(db.Integer, default=0, nullable=False)
last_updated = db.Column(db.DateTime(timezone=True), default=datetime.datetime.utcnow)
ended = db.Column(db.Boolean, default=False, nullable=False)
begin_area_id = db.Column('begin_area',db.Integer, db.ForeignKey('area.id'))
end_area_id = db.Column('end_area',db.Integer, db.ForeignKey('area.id'))
type = db.relationship('ArtistType')
gender = db.relationship('Gender')
area = db.relationship('Area', foreign_keys=area_id)
begin_area = db.relationship('Area', foreign_keys=begin_area_id)
end_area = db.relationship('Area', foreign_keys=end_area_id)
@property
def begin_date(self):
pass
@property
def end_date(self):
pass
| gpl-3.0 | 2,722,513,207,775,684,600 | 34.574468 | 90 | 0.688995 | false |
Xilinx/hopper | hopper/commands/CommandHopperBase.py | 1 | 3733 | # Copyright (c) 2015 Xilinx Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys
import urlparse
from hopper.utils.logger import *
import hopper.utils.args
import hopper.utils.Proxy
import hopper.utils.tasks
class CommandHopperBase(hopper.utils.args.CommandBase):
threadLimit = hopper.utils.args.ValueOption(
None, "threads",
default = None,
description = "The maximum number of concurrent threads avaliable.\n" +
"(Default is to automatically detect)")
mirror = hopper.utils.args.ValueOption(
None, "mirror",
default = None,
description = "The location of a git repository mirror. These repositories will be used to seed the clones.\n" +
"(This can be defined via the environment variable HOPPER_MIRROR.)")
locallayers = hopper.utils.args.ValueOption(
None, "local-layers",
default = None,
description = "The location of layers for which are local and can be symlinked to. This is useful for development.\n" +
"(This can be defined via the environment variable HOPPER_LOCAL.)")
def __init__(self):
hopper.utils.args.CommandBase.__init__(self)
self.environment = None
def execute(self, handler = None):
hopper.utils.args.CommandBase.execute(self)
if self.threadLimit:
threads = self.threadLimit
else:
threads = CommandHopperBase.getDefaultThreads()
self.environment = hopper.utils.tasks.Environment(
basepath = os.getcwd(),
mirrorpath = CommandHopperBase.valueOrEnvironment(self.mirror, "HOPPER_MIRROR"),
proxy = CommandHopperBase.getProxy(),
threads = threads,
locallayers = CommandHopperBase.valueOrEnvironment(self.locallayers, "HOPPER_LOCAL"))
return True
@staticmethod
def valueOrEnvironment(value, env):
if value:
return value
elif env in os.environ:
return os.environ[env]
return None
@staticmethod
def getDefaultThreads():
import multiprocessing
systemthreads = multiprocessing.cpu_count()
activecpus = systemthreads / 2
debug("Detected %s threads avaliable to system (using half, %s threads)" % (systemthreads, activecpus))
# Check if using LSF and account for it
if "LSB_DJOB_NUMPROC" in os.environ:
try:
activecpus = int(os.environ["LSB_DJOB_NUMPROC"])
warning("Forced default threads by LSF environment to %s threads" % activecpus)
except:
pass
return activecpus
@staticmethod
def getHttpProxyUri():
if "http_proxy" in os.environ:
return urlparse.urlparse(os.environ["http_proxy"])
elif "HTTP_PROXY" in os.environ:
return urlparse.urlparse(os.environ["HTTP_PROXY"])
return None
@staticmethod
def getProxy():
uri = CommandHopperBase.getHttpProxyUri()
if uri:
return hopper.utils.Proxy.Proxy(uri.hostname, uri.port)
return None
| mit | 1,581,784,744,065,346,800 | 34.552381 | 122 | 0.746049 | false |
swenson/sagewiki | unidecode/unidecode/x058.py | 252 | 4678 | data = (
'Ku ', # 0x00
'Ke ', # 0x01
'Tang ', # 0x02
'Kun ', # 0x03
'Ni ', # 0x04
'Jian ', # 0x05
'Dui ', # 0x06
'Jin ', # 0x07
'Gang ', # 0x08
'Yu ', # 0x09
'E ', # 0x0a
'Peng ', # 0x0b
'Gu ', # 0x0c
'Tu ', # 0x0d
'Leng ', # 0x0e
'[?] ', # 0x0f
'Ya ', # 0x10
'Qian ', # 0x11
'[?] ', # 0x12
'An ', # 0x13
'[?] ', # 0x14
'Duo ', # 0x15
'Nao ', # 0x16
'Tu ', # 0x17
'Cheng ', # 0x18
'Yin ', # 0x19
'Hun ', # 0x1a
'Bi ', # 0x1b
'Lian ', # 0x1c
'Guo ', # 0x1d
'Die ', # 0x1e
'Zhuan ', # 0x1f
'Hou ', # 0x20
'Bao ', # 0x21
'Bao ', # 0x22
'Yu ', # 0x23
'Di ', # 0x24
'Mao ', # 0x25
'Jie ', # 0x26
'Ruan ', # 0x27
'E ', # 0x28
'Geng ', # 0x29
'Kan ', # 0x2a
'Zong ', # 0x2b
'Yu ', # 0x2c
'Huang ', # 0x2d
'E ', # 0x2e
'Yao ', # 0x2f
'Yan ', # 0x30
'Bao ', # 0x31
'Ji ', # 0x32
'Mei ', # 0x33
'Chang ', # 0x34
'Du ', # 0x35
'Tuo ', # 0x36
'Yin ', # 0x37
'Feng ', # 0x38
'Zhong ', # 0x39
'Jie ', # 0x3a
'Zhen ', # 0x3b
'Feng ', # 0x3c
'Gang ', # 0x3d
'Chuan ', # 0x3e
'Jian ', # 0x3f
'Pyeng ', # 0x40
'Toride ', # 0x41
'Xiang ', # 0x42
'Huang ', # 0x43
'Leng ', # 0x44
'Duan ', # 0x45
'[?] ', # 0x46
'Xuan ', # 0x47
'Ji ', # 0x48
'Ji ', # 0x49
'Kuai ', # 0x4a
'Ying ', # 0x4b
'Ta ', # 0x4c
'Cheng ', # 0x4d
'Yong ', # 0x4e
'Kai ', # 0x4f
'Su ', # 0x50
'Su ', # 0x51
'Shi ', # 0x52
'Mi ', # 0x53
'Ta ', # 0x54
'Weng ', # 0x55
'Cheng ', # 0x56
'Tu ', # 0x57
'Tang ', # 0x58
'Que ', # 0x59
'Zhong ', # 0x5a
'Li ', # 0x5b
'Peng ', # 0x5c
'Bang ', # 0x5d
'Sai ', # 0x5e
'Zang ', # 0x5f
'Dui ', # 0x60
'Tian ', # 0x61
'Wu ', # 0x62
'Cheng ', # 0x63
'Xun ', # 0x64
'Ge ', # 0x65
'Zhen ', # 0x66
'Ai ', # 0x67
'Gong ', # 0x68
'Yan ', # 0x69
'Kan ', # 0x6a
'Tian ', # 0x6b
'Yuan ', # 0x6c
'Wen ', # 0x6d
'Xie ', # 0x6e
'Liu ', # 0x6f
'Ama ', # 0x70
'Lang ', # 0x71
'Chang ', # 0x72
'Peng ', # 0x73
'Beng ', # 0x74
'Chen ', # 0x75
'Cu ', # 0x76
'Lu ', # 0x77
'Ou ', # 0x78
'Qian ', # 0x79
'Mei ', # 0x7a
'Mo ', # 0x7b
'Zhuan ', # 0x7c
'Shuang ', # 0x7d
'Shu ', # 0x7e
'Lou ', # 0x7f
'Chi ', # 0x80
'Man ', # 0x81
'Biao ', # 0x82
'Jing ', # 0x83
'Qi ', # 0x84
'Shu ', # 0x85
'Di ', # 0x86
'Zhang ', # 0x87
'Kan ', # 0x88
'Yong ', # 0x89
'Dian ', # 0x8a
'Chen ', # 0x8b
'Zhi ', # 0x8c
'Xi ', # 0x8d
'Guo ', # 0x8e
'Qiang ', # 0x8f
'Jin ', # 0x90
'Di ', # 0x91
'Shang ', # 0x92
'Mu ', # 0x93
'Cui ', # 0x94
'Yan ', # 0x95
'Ta ', # 0x96
'Zeng ', # 0x97
'Qi ', # 0x98
'Qiang ', # 0x99
'Liang ', # 0x9a
'[?] ', # 0x9b
'Zhui ', # 0x9c
'Qiao ', # 0x9d
'Zeng ', # 0x9e
'Xu ', # 0x9f
'Shan ', # 0xa0
'Shan ', # 0xa1
'Ba ', # 0xa2
'Pu ', # 0xa3
'Kuai ', # 0xa4
'Dong ', # 0xa5
'Fan ', # 0xa6
'Que ', # 0xa7
'Mo ', # 0xa8
'Dun ', # 0xa9
'Dun ', # 0xaa
'Dun ', # 0xab
'Di ', # 0xac
'Sheng ', # 0xad
'Duo ', # 0xae
'Duo ', # 0xaf
'Tan ', # 0xb0
'Deng ', # 0xb1
'Wu ', # 0xb2
'Fen ', # 0xb3
'Huang ', # 0xb4
'Tan ', # 0xb5
'Da ', # 0xb6
'Ye ', # 0xb7
'Sho ', # 0xb8
'Mama ', # 0xb9
'Yu ', # 0xba
'Qiang ', # 0xbb
'Ji ', # 0xbc
'Qiao ', # 0xbd
'Ken ', # 0xbe
'Yi ', # 0xbf
'Pi ', # 0xc0
'Bi ', # 0xc1
'Dian ', # 0xc2
'Jiang ', # 0xc3
'Ye ', # 0xc4
'Yong ', # 0xc5
'Bo ', # 0xc6
'Tan ', # 0xc7
'Lan ', # 0xc8
'Ju ', # 0xc9
'Huai ', # 0xca
'Dang ', # 0xcb
'Rang ', # 0xcc
'Qian ', # 0xcd
'Xun ', # 0xce
'Lan ', # 0xcf
'Xi ', # 0xd0
'He ', # 0xd1
'Ai ', # 0xd2
'Ya ', # 0xd3
'Dao ', # 0xd4
'Hao ', # 0xd5
'Ruan ', # 0xd6
'Mama ', # 0xd7
'Lei ', # 0xd8
'Kuang ', # 0xd9
'Lu ', # 0xda
'Yan ', # 0xdb
'Tan ', # 0xdc
'Wei ', # 0xdd
'Huai ', # 0xde
'Long ', # 0xdf
'Long ', # 0xe0
'Rui ', # 0xe1
'Li ', # 0xe2
'Lin ', # 0xe3
'Rang ', # 0xe4
'Ten ', # 0xe5
'Xun ', # 0xe6
'Yan ', # 0xe7
'Lei ', # 0xe8
'Ba ', # 0xe9
'[?] ', # 0xea
'Shi ', # 0xeb
'Ren ', # 0xec
'[?] ', # 0xed
'Zhuang ', # 0xee
'Zhuang ', # 0xef
'Sheng ', # 0xf0
'Yi ', # 0xf1
'Mai ', # 0xf2
'Ke ', # 0xf3
'Zhu ', # 0xf4
'Zhuang ', # 0xf5
'Hu ', # 0xf6
'Hu ', # 0xf7
'Kun ', # 0xf8
'Yi ', # 0xf9
'Hu ', # 0xfa
'Xu ', # 0xfb
'Kun ', # 0xfc
'Shou ', # 0xfd
'Mang ', # 0xfe
'Zun ', # 0xff
)
| gpl-2.0 | 7,406,078,624,884,944,000 | 17.131783 | 20 | 0.392048 | false |
twitchyliquid64/misc-scripts | s3tool/boto-develop/boto/services/sonofmmm.py | 170 | 3498 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto.services.service import Service
from boto.services.message import ServiceMessage
import os
import mimetypes
class SonOfMMM(Service):
def __init__(self, config_file=None):
super(SonOfMMM, self).__init__(config_file)
self.log_file = '%s.log' % self.instance_id
self.log_path = os.path.join(self.working_dir, self.log_file)
boto.set_file_logger(self.name, self.log_path)
if self.sd.has_option('ffmpeg_args'):
self.command = '/usr/local/bin/ffmpeg ' + self.sd.get('ffmpeg_args')
else:
self.command = '/usr/local/bin/ffmpeg -y -i %s %s'
self.output_mimetype = self.sd.get('output_mimetype')
if self.sd.has_option('output_ext'):
self.output_ext = self.sd.get('output_ext')
else:
self.output_ext = mimetypes.guess_extension(self.output_mimetype)
self.output_bucket = self.sd.get_obj('output_bucket')
self.input_bucket = self.sd.get_obj('input_bucket')
# check to see if there are any messages queue
# if not, create messages for all files in input_bucket
m = self.input_queue.read(1)
if not m:
self.queue_files()
def queue_files(self):
boto.log.info('Queueing files from %s' % self.input_bucket.name)
for key in self.input_bucket:
boto.log.info('Queueing %s' % key.name)
m = ServiceMessage()
if self.output_bucket:
d = {'OutputBucket' : self.output_bucket.name}
else:
d = None
m.for_key(key, d)
self.input_queue.write(m)
def process_file(self, in_file_name, msg):
base, ext = os.path.splitext(in_file_name)
out_file_name = os.path.join(self.working_dir,
base+self.output_ext)
command = self.command % (in_file_name, out_file_name)
boto.log.info('running:\n%s' % command)
status = self.run(command)
if status == 0:
return [(out_file_name, self.output_mimetype)]
else:
return []
def shutdown(self):
if os.path.isfile(self.log_path):
if self.output_bucket:
key = self.output_bucket.new_key(self.log_file)
key.set_contents_from_filename(self.log_path)
super(SonOfMMM, self).shutdown()
| mit | 9,052,836,612,045,018,000 | 42.185185 | 80 | 0.641509 | false |
v-iam/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/fabric_error.py | 2 | 1515 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class FabricError(Model):
"""The REST API operations for Service Fabric return standard HTTP status
codes. This type defines the additional information returned from the
Service Fabric API operations that are not successful.
.
:param error:
:type error: :class:`FabricErrorError
<azure.servicefabric.models.FabricErrorError>`
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'error': {'key': 'Error', 'type': 'FabricErrorError'},
}
def __init__(self, error):
self.error = error
class FabricErrorException(HttpOperationError):
"""Server responsed with exception of type: 'FabricError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(FabricErrorException, self).__init__(deserialize, response, 'FabricError', *args)
| mit | 660,511,733,324,183,300 | 30.5625 | 95 | 0.629043 | false |
coderfi/ansible-modules-extras | net_infrastructure/bigip_monitor_tcp.py | 33 | 16829 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, serge van Ginderachter <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_monitor_tcp
short_description: "Manages F5 BIG-IP LTM tcp monitors"
description:
- "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API"
version_added: "1.4"
author: Serge van Ginderachter
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
- "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx"
requirements:
- bigsuds
options:
server:
description:
- BIG-IP host
required: true
default: null
user:
description:
- BIG-IP username
required: true
default: null
password:
description:
- BIG-IP password
required: true
default: null
state:
description:
- Monitor state
required: false
default: 'present'
choices: ['present', 'absent']
name:
description:
- Monitor name
required: true
default: null
aliases: ['monitor']
partition:
description:
- Partition for the monitor
required: false
default: 'Common'
type:
description:
- The template type of this monitor template
required: false
default: 'tcp'
choices: [ 'TTYPE_TCP', 'TTYPE_TCP_ECHO', 'TTYPE_TCP_HALF_OPEN']
parent:
description:
- The parent template of this monitor template
required: false
default: 'tcp'
choices: [ 'tcp', 'tcp_echo', 'tcp_half_open']
parent_partition:
description:
- Partition for the parent monitor
required: false
default: 'Common'
send:
description:
- The send string for the monitor call
required: true
default: none
receive:
description:
- The receive string for the monitor call
required: true
default: none
ip:
description:
- IP address part of the ipport definition. The default API setting
is "0.0.0.0".
required: false
default: none
port:
description:
- port address part op the ipport definition. The default API
setting is 0.
required: false
default: none
interval:
description:
- The interval specifying how frequently the monitor instance
of this template will run. By default, this interval is used for up and
down states. The default API setting is 5.
required: false
default: none
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second. The default API setting
is 16.
required: false
default: none
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 will cause a
node to be marked up immediately after a valid response is received
from the node. The default API setting is 0.
required: false
default: none
'''
EXAMPLES = '''
- name: BIGIP F5 | Create TCP Monitor
local_action:
module: bigip_monitor_tcp
state: present
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ item.monitorname }}"
type: tcp
send: "{{ item.send }}"
receive: "{{ item.receive }}"
with_items: f5monitors-tcp
- name: BIGIP F5 | Create TCP half open Monitor
local_action:
module: bigip_monitor_tcp
state: present
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ item.monitorname }}"
type: tcp
send: "{{ item.send }}"
receive: "{{ item.receive }}"
with_items: f5monitors-halftcp
- name: BIGIP F5 | Remove TCP Monitor
local_action:
module: bigip_monitor_tcp
state: absent
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ monitorname }}"
with_flattened:
- f5monitors-tcp
- f5monitors-halftcp
'''
try:
import bigsuds
except ImportError:
bigsuds_found = False
else:
bigsuds_found = True
TEMPLATE_TYPE = DEFAULT_TEMPLATE_TYPE = 'TTYPE_TCP'
TEMPLATE_TYPE_CHOICES = ['tcp', 'tcp_echo', 'tcp_half_open']
DEFAULT_PARENT = DEFAULT_TEMPLATE_TYPE_CHOICE = DEFAULT_TEMPLATE_TYPE.replace('TTYPE_', '').lower()
# ===========================================
# bigip_monitor module generic methods.
# these should be re-useable for other monitor types
#
def bigip_api(bigip, user, password):
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
return api
def check_monitor_exists(module, api, monitor, parent):
# hack to determine if monitor exists
result = False
try:
ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0]
parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0]
if ttype == TEMPLATE_TYPE and parent == parent2:
result = True
else:
module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent))
except bigsuds.OperationFailed, e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_monitor(api, monitor, template_attributes):
try:
api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes])
except bigsuds.OperationFailed, e:
if "already exists" in str(e):
return False
else:
# genuine exception
raise
return True
def delete_monitor(api, monitor):
try:
api.LocalLB.Monitor.delete_template(template_names=[monitor])
except bigsuds.OperationFailed, e:
# maybe it was deleted since we checked
if "was not found" in str(e):
return False
else:
# genuine exception
raise
return True
def check_string_property(api, monitor, str_property):
try:
return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0]
except bigsuds.OperationFailed, e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
return True
def set_string_property(api, monitor, str_property):
api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property])
def check_integer_property(api, monitor, int_property):
try:
return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0]
except bigsuds.OperationFailed, e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
return True
def set_integer_property(api, monitor, int_property):
api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property])
def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties):
changed = False
for str_property in template_string_properties:
if str_property['value'] is not None and not check_string_property(api, monitor, str_property):
if not module.check_mode:
set_string_property(api, monitor, str_property)
changed = True
for int_property in template_integer_properties:
if int_property['value'] is not None and not check_integer_property(api, monitor, int_property):
if not module.check_mode:
set_integer_property(api, monitor, int_property)
changed = True
return changed
def get_ipport(api, monitor):
return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0]
def set_ipport(api, monitor, ipport):
try:
api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport])
return True, ""
except bigsuds.OperationFailed, e:
if "Cannot modify the address type of monitor" in str(e):
return False, "Cannot modify the address type of monitor if already assigned to a pool."
else:
# genuine exception
raise
# ===========================================
# main loop
#
# writing a module for other monitor types should
# only need an updated main() (and monitor specific functions)
def main():
# begin monitor specific stuff
module = AnsibleModule(
argument_spec = dict(
server = dict(required=True),
user = dict(required=True),
password = dict(required=True),
partition = dict(default='Common'),
state = dict(default='present', choices=['present', 'absent']),
name = dict(required=True),
type = dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES),
parent = dict(default=DEFAULT_PARENT),
parent_partition = dict(default='Common'),
send = dict(required=False),
receive = dict(required=False),
ip = dict(required=False),
port = dict(required=False, type='int'),
interval = dict(required=False, type='int'),
timeout = dict(required=False, type='int'),
time_until_up = dict(required=False, type='int', default=0)
),
supports_check_mode=True
)
server = module.params['server']
user = module.params['user']
password = module.params['password']
partition = module.params['partition']
parent_partition = module.params['parent_partition']
state = module.params['state']
name = module.params['name']
type = 'TTYPE_' + module.params['type'].upper()
parent = "/%s/%s" % (parent_partition, module.params['parent'])
monitor = "/%s/%s" % (partition, name)
send = module.params['send']
receive = module.params['receive']
ip = module.params['ip']
port = module.params['port']
interval = module.params['interval']
timeout = module.params['timeout']
time_until_up = module.params['time_until_up']
# tcp monitor has multiple types, so overrule
global TEMPLATE_TYPE
TEMPLATE_TYPE = type
# end monitor specific stuff
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
api = bigip_api(server, user, password)
monitor_exists = check_monitor_exists(module, api, monitor, parent)
# ipport is a special setting
if monitor_exists: # make sure to not update current settings if not asked
cur_ipport = get_ipport(api, monitor)
if ip is None:
ip = cur_ipport['ipport']['address']
if port is None:
port = cur_ipport['ipport']['port']
else: # use API defaults if not defined to create it
if interval is None:
interval = 5
if timeout is None:
timeout = 16
if ip is None:
ip = '0.0.0.0'
if port is None:
port = 0
if send is None:
send = ''
if receive is None:
receive = ''
# define and set address type
if ip == '0.0.0.0' and port == 0:
address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT'
elif ip == '0.0.0.0' and port != 0:
address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT'
elif ip != '0.0.0.0' and port != 0:
address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT'
else:
address_type = 'ATYPE_UNSET'
ipport = {'address_type': address_type,
'ipport': {'address': ip,
'port': port}}
template_attributes = {'parent_template': parent,
'interval': interval,
'timeout': timeout,
'dest_ipport': ipport,
'is_read_only': False,
'is_directly_usable': True}
# monitor specific stuff
if type == 'TTYPE_TCP':
template_string_properties = [{'type': 'STYPE_SEND',
'value': send},
{'type': 'STYPE_RECEIVE',
'value': receive}]
else:
template_string_properties = []
template_integer_properties = [{'type': 'ITYPE_INTERVAL',
'value': interval},
{'type': 'ITYPE_TIMEOUT',
'value': timeout},
{'type': 'ITYPE_TIME_UNTIL_UP',
'value': interval}]
# main logic, monitor generic
try:
result = {'changed': False} # default
if state == 'absent':
if monitor_exists:
if not module.check_mode:
# possible race condition if same task
# on other node deleted it first
result['changed'] |= delete_monitor(api, monitor)
else:
result['changed'] |= True
else: # state present
## check for monitor itself
if not monitor_exists: # create it
if not module.check_mode:
# again, check changed status here b/c race conditions
# if other task already created it
result['changed'] |= create_monitor(api, monitor, template_attributes)
else:
result['changed'] |= True
## check for monitor parameters
# whether it already existed, or was just created, now update
# the update functions need to check for check mode but
# cannot update settings if it doesn't exist which happens in check mode
if monitor_exists and not module.check_mode:
result['changed'] |= update_monitor_properties(api, module, monitor,
template_string_properties,
template_integer_properties)
# else assume nothing changed
# we just have to update the ipport if monitor already exists and it's different
if monitor_exists and cur_ipport != ipport:
set_ipport(api, monitor, ipport)
result['changed'] |= True
#else: monitor doesn't exist (check mode) or ipport is already ok
except Exception, e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | -6,946,240,441,992,531,000 | 33.415133 | 158 | 0.57294 | false |
s0undt3ch/sorbic | sorbic/db.py | 2 | 7598 | '''
Interface to interact on a database level
'''
# Import python libs
import os
import io
import shutil
# Import sorbic libs
import sorbic.ind.hdht
import sorbic.stor.files
import sorbic.utils.traverse
# Import third party libs
import msgpack
DB_OPTS = (
'key_delim',
'hash_limit',
'key_hash',
'fmt',
'fmt_map',
'header_len',
'serial')
class DB(object):
'''
Databaseing
'''
def __init__(
self,
root,
key_delim='/',
hash_limit=0xfffff,
key_hash='sha1',
fmt='>KsQH',
fmt_map=None,
header_len=1024,
serial='msgpack'):
self.root = root
self.key_delim = key_delim
self.hash_limit = hash_limit
self.key_hash = key_hash
self.fmt = fmt
self.fmt_map = fmt_map
self.header_len = header_len
self.serial = serial
self._get_db_meta()
self.index = sorbic.ind.hdht.HDHT(
self.root,
self.key_delim,
self.hash_limit,
self.key_hash,
self.fmt,
self.fmt_map,
self.header_len)
self.write_stor_funcs = self.__gen_write_stor_funcs()
self.read_stor_funcs = self.__gen_read_stor_funcs()
def __gen_write_stor_funcs(self):
'''
Return the storage write functions dict mapping to types
'''
return {'doc': self.index.write_doc_stor,
'file': sorbic.stor.files.write}
def __gen_read_stor_funcs(self):
'''
Return the storage read functions dict mapping to types
'''
return {'doc': self.index.read_doc_stor,
'file': sorbic.stor.files.read}
def _get_db_meta(self):
'''
Read in the database metadata to preserve the original behavior
as to when the database are created
'''
db_meta = os.path.join(self.root, 'sorbic_db_meta.mp')
meta = {}
if os.path.isfile(db_meta):
with io.open(db_meta, 'rb') as fp_:
meta = msgpack.loads(fp_.read())
for entry in DB_OPTS:
meta[entry] = meta.get(entry, getattr(self, entry))
setattr(self, entry, meta[entry])
if not os.path.isdir(self.root):
os.makedirs(self.root)
with io.open(db_meta, 'w+b') as fp_:
fp_.write(msgpack.dumps(meta))
def _get_storage(self, entries, **kwargs):
stor = self.read_stor_funcs[entries['data']['t']](entries, self.serial, **kwargs)
return stor
def write_stor(self, table_entry, data, serial, type_):
'''
Write the applicable storage type subsytem
'''
return self.write_stor_funcs[type_](
table_entry,
data,
serial)
def insert(self, key, data, id_=None, type_='doc', serial=None, **kwargs):
'''
Insert a key into the database
'''
c_key = self.index.raw_crypt_key(key)
table_entry = self.index.get_table_entry(key, c_key)
serial = serial if serial else self.serial
kwargs.update(self.write_stor(
table_entry,
data,
serial,
type_))
return self.index.commit(
table_entry,
key,
c_key,
id_,
type_,
**kwargs)
def get_meta(self, key, id_=None, count=None):
'''
Retrive a meta entry
'''
return self.index.get_index_entry(key, id_, count)
def get(self, key, id_=None, meta=False, count=None, **kwargs):
'''
Retrive a data entry
'''
entries = self.get_meta(key, id_, count)
if not entries:
return None
if count:
ret = []
for index_entry in entries['data']:
meta_entries = {'table': entries['table'], 'data': index_entry}
stor_ret = self._get_storage(meta_entries, **kwargs)
if meta:
ret.append({'data': stor_ret, 'meta': index_entry})
else:
ret.append(self._get_storage(meta_entries, **kwargs))
return ret
if not meta:
return self._get_storage(entries, **kwargs)
else:
ret = {}
ret['data'] = self._get_storage(entries, **kwargs)
ret['meta'] = entries
return ret
def compress(self, d_key=None, num=None):
'''
Compress a single given index, remove any associated data
'''
fn_root = self.root
if not d_key or d_key == self.key_delim:
pass
else:
fn_root = self.index.entry_root('{0}/blank'.format(d_key))
fn_ = os.path.join(fn_root, 'sorbic_table_{0}'.format(num))
trans_fn = os.path.join(fn_root, 'trans_table_{0}'.format(num))
if os.path.exists(trans_fn):
os.remove(trans_fn)
table = self.index.get_hash_table(fn_)
trans_table = self.index.get_hash_table(trans_fn)
table_entries = []
for entry in self.index._get_table_entries(fn_):
table_entries.append(entry)
for entry in table_entries:
self._compress_entry(entry, table, trans_table)
table['fp'].close()
trans_table['fp'].close()
self.index.tables.pop(fn_)
self.index.tables.pop(trans_fn)
shutil.move(trans_fn, fn_)
def _compress_entry(self, entry, table, trans_table):
'''
Read the table entries to keep out of the given entry and write them
fresh to the trans table
'''
c_key = self.index.raw_crypt_key(entry['key'])
i_entries = self.index.get_index_entry(entry['key'], count=0xffffffff)
tte = {}
tte['tfn'] = trans_table['fp'].name
tte['key'] = i_entries['table']['key']
tte['prev'] = i_entries['table']['prev']
tte['pos'] = i_entries['table']['pos']
tte['rev'] = 0
keeps = []
for ind in reversed(range(len(i_entries['data']))):
i_entry = i_entries['data'][ind]
if i_entry['_status'] != 'k':
continue
keeps.append(i_entry)
for i_entry in keeps:
serial = i_entry.get('serial', self.serial)
get_entries = {'table': i_entries['table'], 'data': i_entry}
stor = self._get_storage(get_entries)
i_entry.update(self.write_stor(
tte,
stor,
serial,
i_entry.get('type', 'doc')))
kwargs = i_entry
key = kwargs.pop('key')
id_ = kwargs.pop('id')
if 'type' in kwargs:
type_ = kwargs.pop('type')
else:
type_ = 'doc'
self.index.commit(tte, key, c_key, id_, type_, **kwargs)
tte['rev'] += 1
def listdir(self, d_key):
'''
List the contents of a directory
'''
return self.index.listdir(d_key)
def rmdir(self, d_key):
'''
Recursively remove a key directory and all subdirs and subkeys.
THIS OPERATION IS IRREVERSIBLE!!
'''
return self.index.rmdir(d_key)
def rm(self, key, id_=None):
'''
Make a key for deletion, if the id is omitted then the key itself
and all revs will be removed. THIS OPERATION IS IRREVERSIBLE!!
'''
return self.index.rm_key(key, id_)
| apache-2.0 | 9,061,666,680,189,123,000 | 31.059072 | 89 | 0.51553 | false |
Mj258/weiboapi | srapyDemo/envs/Lib/site-packages/twisted/test/test_rebuild.py | 10 | 7749 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import sys, os
import types
from twisted.trial import unittest
from twisted.python import rebuild
import crash_test_dummy
f = crash_test_dummy.foo
class Foo: pass
class Bar(Foo): pass
class Baz(object): pass
class Buz(Bar, Baz): pass
class HashRaisesRuntimeError:
"""
Things that don't hash (raise an Exception) should be ignored by the
rebuilder.
@ivar hashCalled: C{bool} set to True when __hash__ is called.
"""
def __init__(self):
self.hashCalled = False
def __hash__(self):
self.hashCalled = True
raise RuntimeError('not a TypeError!')
unhashableObject = None # set in test_hashException
class RebuildTests(unittest.TestCase):
"""
Simple testcase for rebuilding, to at least exercise the code.
"""
def setUp(self):
self.libPath = self.mktemp()
os.mkdir(self.libPath)
self.fakelibPath = os.path.join(self.libPath, 'twisted_rebuild_fakelib')
os.mkdir(self.fakelibPath)
file(os.path.join(self.fakelibPath, '__init__.py'), 'w').close()
sys.path.insert(0, self.libPath)
def tearDown(self):
sys.path.remove(self.libPath)
def testFileRebuild(self):
from twisted.python.util import sibpath
import shutil, time
shutil.copyfile(sibpath(__file__, "myrebuilder1.py"),
os.path.join(self.fakelibPath, "myrebuilder.py"))
from twisted_rebuild_fakelib import myrebuilder
a = myrebuilder.A()
try:
object
except NameError:
pass
else:
from twisted.test import test_rebuild
b = myrebuilder.B()
class C(myrebuilder.B):
pass
test_rebuild.C = C
C()
i = myrebuilder.Inherit()
self.assertEqual(a.a(), 'a')
# necessary because the file has not "changed" if a second has not gone
# by in unix. This sucks, but it's not often that you'll be doing more
# than one reload per second.
time.sleep(1.1)
shutil.copyfile(sibpath(__file__, "myrebuilder2.py"),
os.path.join(self.fakelibPath, "myrebuilder.py"))
rebuild.rebuild(myrebuilder)
try:
object
except NameError:
pass
else:
b2 = myrebuilder.B()
self.assertEqual(b2.b(), 'c')
self.assertEqual(b.b(), 'c')
self.assertEqual(i.a(), 'd')
self.assertEqual(a.a(), 'b')
# more work to be done on new-style classes
# self.assertEqual(c.b(), 'c')
def testRebuild(self):
"""
Rebuilding an unchanged module.
"""
# This test would actually pass if rebuild was a no-op, but it
# ensures rebuild doesn't break stuff while being a less
# complex test than testFileRebuild.
x = crash_test_dummy.X('a')
rebuild.rebuild(crash_test_dummy, doLog=False)
# Instance rebuilding is triggered by attribute access.
x.do()
self.failUnlessIdentical(x.__class__, crash_test_dummy.X)
self.failUnlessIdentical(f, crash_test_dummy.foo)
def testComponentInteraction(self):
x = crash_test_dummy.XComponent()
x.setAdapter(crash_test_dummy.IX, crash_test_dummy.XA)
x.getComponent(crash_test_dummy.IX)
rebuild.rebuild(crash_test_dummy, 0)
newComponent = x.getComponent(crash_test_dummy.IX)
newComponent.method()
self.assertEqual(newComponent.__class__, crash_test_dummy.XA)
# Test that a duplicate registerAdapter is not allowed
from twisted.python import components
self.failUnlessRaises(ValueError, components.registerAdapter,
crash_test_dummy.XA, crash_test_dummy.X,
crash_test_dummy.IX)
def testUpdateInstance(self):
global Foo, Buz
b = Buz()
class Foo:
def foo(self):
pass
class Buz(Bar, Baz):
x = 10
rebuild.updateInstance(b)
assert hasattr(b, 'foo'), "Missing method on rebuilt instance"
assert hasattr(b, 'x'), "Missing class attribute on rebuilt instance"
def testBananaInteraction(self):
from twisted.python import rebuild
from twisted.spread import banana
rebuild.latestClass(banana.Banana)
def test_hashException(self):
"""
Rebuilding something that has a __hash__ that raises a non-TypeError
shouldn't cause rebuild to die.
"""
global unhashableObject
unhashableObject = HashRaisesRuntimeError()
def _cleanup():
global unhashableObject
unhashableObject = None
self.addCleanup(_cleanup)
rebuild.rebuild(rebuild)
self.assertEqual(unhashableObject.hashCalled, True)
class NewStyleTests(unittest.TestCase):
"""
Tests for rebuilding new-style classes of various sorts.
"""
def setUp(self):
self.m = types.ModuleType('whipping')
sys.modules['whipping'] = self.m
def tearDown(self):
del sys.modules['whipping']
del self.m
def test_slots(self):
"""
Try to rebuild a new style class with slots defined.
"""
classDefinition = (
"class SlottedClass(object):\n"
" __slots__ = ['a']\n")
exec classDefinition in self.m.__dict__
inst = self.m.SlottedClass()
inst.a = 7
exec classDefinition in self.m.__dict__
rebuild.updateInstance(inst)
self.assertEqual(inst.a, 7)
self.assertIdentical(type(inst), self.m.SlottedClass)
if sys.version_info < (2, 6):
test_slots.skip = "__class__ assignment for class with slots is only available starting Python 2.6"
def test_errorSlots(self):
"""
Try to rebuild a new style class with slots defined: this should fail.
"""
classDefinition = (
"class SlottedClass(object):\n"
" __slots__ = ['a']\n")
exec classDefinition in self.m.__dict__
inst = self.m.SlottedClass()
inst.a = 7
exec classDefinition in self.m.__dict__
self.assertRaises(rebuild.RebuildError, rebuild.updateInstance, inst)
if sys.version_info >= (2, 6):
test_errorSlots.skip = "__class__ assignment for class with slots should work starting Python 2.6"
def test_typeSubclass(self):
"""
Try to rebuild a base type subclass.
"""
classDefinition = (
"class ListSubclass(list):\n"
" pass\n")
exec classDefinition in self.m.__dict__
inst = self.m.ListSubclass()
inst.append(2)
exec classDefinition in self.m.__dict__
rebuild.updateInstance(inst)
self.assertEqual(inst[0], 2)
self.assertIdentical(type(inst), self.m.ListSubclass)
def test_instanceSlots(self):
"""
Test that when rebuilding an instance with a __slots__ attribute, it
fails accurately instead of giving a L{rebuild.RebuildError}.
"""
classDefinition = (
"class NotSlottedClass(object):\n"
" pass\n")
exec classDefinition in self.m.__dict__
inst = self.m.NotSlottedClass()
inst.__slots__ = ['a']
classDefinition = (
"class NotSlottedClass:\n"
" pass\n")
exec classDefinition in self.m.__dict__
# Moving from new-style class to old-style should fail.
self.assertRaises(TypeError, rebuild.updateInstance, inst)
| mit | -4,069,525,868,831,994,400 | 29.75 | 107 | 0.595432 | false |
BryanQuigley/sos | sos/report/plugins/buildah.py | 5 | 1873 | # Copyright (C) 2018 Red Hat, Inc., Jake Hunsaker <[email protected]>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, RedHatPlugin
class Buildah(Plugin, RedHatPlugin):
short_desc = 'Buildah container and image builder'
plugin_name = 'buildah'
packages = ('buildah',)
profiles = ('container',)
def setup(self):
subcmds = [
'containers',
'containers --all',
'images',
'images --all',
'version'
]
self.add_cmd_output(["buildah %s" % sub for sub in subcmds])
def make_chowdah(aurdah):
chowdah = self.exec_cmd(aurdah)
chowdah['auutput'] = chowdah.pop('output')
chowdah['is_wicked_pissah'] = chowdah.pop('status') == 0
return chowdah
containahs = make_chowdah('buildah containers -n')
if containahs['is_wicked_pissah']:
for containah in containahs['auutput'].splitlines():
# obligatory Tom Brady
goat = containah.split()[-1]
self.add_cmd_output('buildah inspect -t container %s' % goat,
subdir='containers')
pitchez = make_chowdah('buildah images -n')
if pitchez['is_wicked_pissah']:
for pitchah in pitchez['auutput'].splitlines():
brady = pitchah.split()[1]
self.add_cmd_output('buildah inspect -t image %s' % brady,
subdir='images')
# vim: set et ts=4 sw=4 :
| gpl-2.0 | -1,552,753,387,998,770,400 | 33.685185 | 77 | 0.584624 | false |
lebabouin/CouchPotatoServer-develop | libs/tmdb3/cache_file.py | 10 | 13285 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------
# Name: cache_file.py
# Python Library
# Author: Raymond Wagner
# Purpose: Persistant file-backed cache using /tmp/ to share data
# using flock or msvcrt.locking to allow safe concurrent
# access.
#-----------------------
import struct
import errno
import json
import os
import io
from cStringIO import StringIO
from tmdb_exceptions import *
from cache_engine import CacheEngine, CacheObject
####################
# Cache File Format
#------------------
# cache version (2) unsigned short
# slot count (2) unsigned short
# slot 0: timestamp (8) double
# slot 0: lifetime (4) unsigned int
# slot 0: seek point (4) unsigned int
# slot 1: timestamp
# slot 1: lifetime index slots are IDd by their query date and
# slot 1: seek point are filled incrementally forwards. lifetime
# .... is how long after query date before the item
# .... expires, and seek point is the location of the
# slot N-2: timestamp start of data for that entry. 256 empty slots
# slot N-2: lifetime are pre-allocated, allowing fast updates.
# slot N-2: seek point when all slots are filled, the cache file is
# slot N-1: timestamp rewritten from scrach to add more slots.
# slot N-1: lifetime
# slot N-1: seek point
# block 1 (?) ASCII
# block 2
# .... blocks are just simple ASCII text, generated
# .... as independent objects by the JSON encoder
# block N-2
# block N-1
#
####################
def _donothing(*args, **kwargs):
pass
try:
import fcntl
class Flock( object ):
"""
Context manager to flock file for the duration the object exists.
Referenced file will be automatically unflocked as the interpreter
exits the context.
Supports an optional callback to process the error and optionally
suppress it.
"""
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
def __init__(self, fileobj, operation, callback=None):
self.fileobj = fileobj
self.operation = operation
self.callback = callback
def __enter__(self):
fcntl.flock(self.fileobj, self.operation)
def __exit__(self, exc_type, exc_value, exc_tb):
suppress = False
if callable(self.callback):
suppress = self.callback(exc_type, exc_value, exc_tb)
fcntl.flock(self.fileobj, fcntl.LOCK_UN)
return suppress
def parse_filename(filename):
if '$' in filename:
# replace any environmental variables
filename = os.path.expandvars(filename)
if filename.startswith('~'):
# check for home directory
return os.path.expanduser(filename)
elif filename.startswith('/'):
# check for absolute path
return filename
# return path with temp directory prepended
return '/tmp/' + filename
except ImportError:
import msvcrt
class Flock( object ):
LOCK_EX = msvcrt.LK_LOCK
LOCK_SH = msvcrt.LK_LOCK
def __init__(self, fileobj, operation, callback=None):
self.fileobj = fileobj
self.operation = operation
self.callback = callback
def __enter__(self):
self.size = os.path.getsize(self.fileobj.name)
msvcrt.locking(self.fileobj.fileno(), self.operation, self.size)
def __exit__(self, exc_type, exc_value, exc_tb):
suppress = False
if callable(self.callback):
suppress = self.callback(exc_type, exc_value, exc_tb)
msvcrt.locking(self.fileobj.fileno(), msvcrt.LK_UNLCK, self.size)
return suppress
def parse_filename(filename):
if '%' in filename:
# replace any environmental variables
filename = os.path.expandvars(filename)
if filename.startswith('~'):
# check for home directory
return os.path.expanduser(filename)
elif (ord(filename[0]) in (range(65,91)+range(99,123))) \
and (filename[1:3] == ':\\'):
# check for absolute drive path (e.g. C:\...)
return filename
elif (filename.count('\\') >= 3) and (filename.startswith('\\\\')):
# check for absolute UNC path (e.g. \\server\...)
return filename
# return path with temp directory prepended
return os.path.expandvars(os.path.join('%TEMP%',filename))
class FileCacheObject( CacheObject ):
_struct = struct.Struct('dII') # double and two ints
# timestamp, lifetime, position
@classmethod
def fromFile(cls, fd):
dat = cls._struct.unpack(fd.read(cls._struct.size))
obj = cls(None, None, dat[1], dat[0])
obj.position = dat[2]
return obj
def __init__(self, *args, **kwargs):
self._key = None
self._data = None
self._size = None
self._buff = StringIO()
super(FileCacheObject, self).__init__(*args, **kwargs)
@property
def size(self):
if self._size is None:
self._buff.seek(0,2)
size = self._buff.tell()
if size == 0:
if (self._key is None) or (self._data is None):
raise RuntimeError
json.dump([self.key, self.data], self._buff)
self._size = self._buff.tell()
self._size = size
return self._size
@size.setter
def size(self, value): self._size = value
@property
def key(self):
if self._key is None:
try:
self._key, self._data = json.loads(self._buff.getvalue())
except:
pass
return self._key
@key.setter
def key(self, value): self._key = value
@property
def data(self):
if self._data is None:
self._key, self._data = json.loads(self._buff.getvalue())
return self._data
@data.setter
def data(self, value): self._data = value
def load(self, fd):
fd.seek(self.position)
self._buff.seek(0)
self._buff.write(fd.read(self.size))
def dumpslot(self, fd):
pos = fd.tell()
fd.write(self._struct.pack(self.creation, self.lifetime, self.position))
def dumpdata(self, fd):
self.size
fd.seek(self.position)
fd.write(self._buff.getvalue())
class FileEngine( CacheEngine ):
"""Simple file-backed engine."""
name = 'file'
_struct = struct.Struct('HH') # two shorts for version and count
_version = 2
def __init__(self, parent):
super(FileEngine, self).__init__(parent)
self.configure(None)
def configure(self, filename, preallocate=256):
self.preallocate = preallocate
self.cachefile = filename
self.size = 0
self.free = 0
self.age = 0
def _init_cache(self):
# only run this once
self._init_cache = _donothing
if self.cachefile is None:
raise TMDBCacheError("No cache filename given.")
self.cachefile = parse_filename(self.cachefile)
try:
# attempt to read existing cache at filename
# handle any errors that occur
self._open('r+b')
# seems to have read fine, make sure we have write access
if not os.access(self.cachefile, os.W_OK):
raise TMDBCacheWriteError(self.cachefile)
except IOError as e:
if e.errno == errno.ENOENT:
# file does not exist, create a new one
try:
self._open('w+b')
self._write([])
except IOError as e:
if e.errno == errno.ENOENT:
# directory does not exist
raise TMDBCacheDirectoryError(self.cachefile)
elif e.errno == errno.EACCES:
# user does not have rights to create new file
raise TMDBCacheWriteError(self.cachefile)
else:
# let the unhandled error continue through
raise
elif e.errno == errno.EACCESS:
# file exists, but we do not have permission to access it
raise TMDBCacheReadError(self.cachefile)
else:
# let the unhandled error continue through
raise
def get(self, date):
self._init_cache()
self._open('r+b')
with Flock(self.cachefd, Flock.LOCK_SH): # lock for shared access
# return any new objects in the cache
return self._read(date)
def put(self, key, value, lifetime):
self._init_cache()
self._open('r+b')
with Flock(self.cachefd, Flock.LOCK_EX): # lock for exclusive access
newobjs = self._read(self.age)
newobjs.append(FileCacheObject(key, value, lifetime))
# this will cause a new file object to be opened with the proper
# access mode, however the Flock should keep the old object open
# and properly locked
self._open('r+b')
self._write(newobjs)
return newobjs
def _open(self, mode='r+b'):
# enforce binary operation
try:
if self.cachefd.mode == mode:
# already opened in requested mode, nothing to do
self.cachefd.seek(0)
return
except: pass # catch issue of no cachefile yet opened
self.cachefd = io.open(self.cachefile, mode)
def _read(self, date):
try:
self.cachefd.seek(0)
version, count = self._struct.unpack(\
self.cachefd.read(self._struct.size))
if version != self._version:
# old version, break out and well rewrite when finished
raise Exception
self.size = count
cache = []
while count:
# loop through storage definitions
obj = FileCacheObject.fromFile(self.cachefd)
cache.append(obj)
count -= 1
except:
# failed to read information, so just discard it and return empty
self.size = 0
self.free = 0
return []
# get end of file
self.cachefd.seek(0,2)
position = self.cachefd.tell()
newobjs = []
emptycount = 0
# walk backward through all, collecting new content and populating size
while len(cache):
obj = cache.pop()
if obj.creation == 0:
# unused slot, skip
emptycount += 1
elif obj.expired:
# object has passed expiration date, no sense processing
continue
elif obj.creation > date:
# used slot with new data, process
obj.size, position = position - obj.position, obj.position
newobjs.append(obj)
# update age
self.age = max(self.age, obj.creation)
elif len(newobjs):
# end of new data, break
break
# walk forward and load new content
for obj in newobjs:
obj.load(self.cachefd)
self.free = emptycount
return newobjs
def _write(self, data):
if self.free and (self.size != self.free):
# we only care about the last data point, since the rest are
# already stored in the file
data = data[-1]
# determine write position of data in cache
self.cachefd.seek(0,2)
end = self.cachefd.tell()
data.position = end
# write incremental update to free slot
self.cachefd.seek(4 + 16*(self.size-self.free))
data.dumpslot(self.cachefd)
data.dumpdata(self.cachefd)
else:
# rewrite cache file from scratch
# pull data from parent cache
data.extend(self.parent()._data.values())
data.sort(key=lambda x: x.creation)
# write header
size = len(data) + self.preallocate
self.cachefd.seek(0)
self.cachefd.truncate()
self.cachefd.write(self._struct.pack(self._version, size))
# write storage slot definitions
prev = None
for d in data:
if prev == None:
d.position = 4 + 16*size
else:
d.position = prev.position + prev.size
d.dumpslot(self.cachefd)
prev = d
# fill in allocated slots
for i in range(2**8):
self.cachefd.write(FileCacheObject._struct.pack(0, 0, 0))
# write stored data
for d in data:
d.dumpdata(self.cachefd)
self.cachefd.flush()
def expire(self, key):
pass
| gpl-3.0 | 894,537,346,810,222,000 | 32.976982 | 80 | 0.54332 | false |
GoogleCloudPlatform/sap-deployment-automation | third_party/github.com/ansible/awx/awxkit/awxkit/api/pages/organizations.py | 1 | 1807 | from awxkit.api.mixins import HasCreate, HasInstanceGroups, HasNotifications, DSAdapter
from awxkit.utils import random_title, suppress, PseudoNamespace
from awxkit.api.resources import resources
import awxkit.exceptions as exc
from . import base
from . import page
class Organization(HasCreate, HasInstanceGroups, HasNotifications, base.Base):
NATURAL_KEY = ('name',)
def add_admin(self, user):
if isinstance(user, page.Page):
user = user.json
with suppress(exc.NoContent):
self.related.admins.post(user)
def add_user(self, user):
if isinstance(user, page.Page):
user = user.json
with suppress(exc.NoContent):
self.related.users.post(user)
def payload(self, **kwargs):
payload = PseudoNamespace(name=kwargs.get('name') or 'Organization - {}'.format(random_title()),
description=kwargs.get('description') or random_title(10))
return payload
def create_payload(self, name='', description='', **kwargs):
payload = self.payload(name=name, description=description, **kwargs)
payload.ds = DSAdapter(self.__class__.__name__, self._dependency_store)
return payload
def create(self, name='', description='', **kwargs):
payload = self.create_payload(name=name, description=description, **kwargs)
return self.update_identity(Organizations(self.connection).post(payload))
page.register_page([resources.organization,
(resources.organizations, 'post')], Organization)
class Organizations(page.PageList, Organization):
pass
page.register_page([resources.organizations,
resources.user_organizations,
resources.project_organizations], Organizations)
| apache-2.0 | -7,692,307,553,024,060,000 | 34.431373 | 104 | 0.664084 | false |
canaltinova/servo | etc/ci/performance/set_s3_policy.py | 22 | 1108 | #!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import boto3
def main():
parser = argparse.ArgumentParser(
description=("Set the policy of the servo-perf bucket. "
"Remember to set your S3 credentials "
"https://github.com/boto/boto3"))
parser.parse_args()
s3 = boto3.resource('s3')
BUCKET = 'servo-perf'
POLICY = """{
"Version":"2012-10-17",
"Statement":[
{
"Effect":"Allow",
"Principal":"*",
"Action":[
"s3:ListBucket",
"s3:GetBucketLocation"
],
"Resource":"arn:aws:s3:::servo-perf"
},
{
"Effect":"Allow",
"Principal":"*",
"Action":[
"s3:GetObject",
"s3:GetObjectAcl"
],
"Resource":"arn:aws:s3:::servo-perf/*"
}
]
}"""
s3.BucketPolicy(BUCKET).put(Policy=POLICY)
print("Done!")
if __name__ == "__main__":
main()
| mpl-2.0 | -7,621,816,651,125,992,000 | 21.16 | 69 | 0.550542 | false |
xasopheno/audio_visual | audio/venv/lib/python2.7/site-packages/wheel/archive.py | 62 | 2376 | """
Archive tools for wheel.
"""
import os
import os.path
import time
import zipfile
from distutils import log
def archive_wheelfile(base_name, base_dir):
"""Archive all files under `base_dir` in a whl file and name it like
`base_name`.
"""
olddir = os.path.abspath(os.curdir)
base_name = os.path.abspath(base_name)
try:
os.chdir(base_dir)
return make_wheelfile_inner(base_name)
finally:
os.chdir(olddir)
def make_wheelfile_inner(base_name, base_dir='.'):
"""Create a whl file from all the files under 'base_dir'.
Places .dist-info at the end of the archive."""
zip_filename = base_name + ".whl"
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
# Some applications need reproducible .whl files, but they can't do this
# without forcing the timestamp of the individual ZipInfo objects. See
# issue #143.
timestamp = os.environ.get('SOURCE_DATE_EPOCH')
if timestamp is None:
date_time = None
else:
date_time = time.gmtime(int(timestamp))[0:6]
# XXX support bz2, xz when available
zip = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED)
score = {'WHEEL': 1, 'METADATA': 2, 'RECORD': 3}
deferred = []
def writefile(path, date_time):
st = os.stat(path)
if date_time is None:
mtime = time.gmtime(st.st_mtime)
date_time = mtime[0:6]
zinfo = zipfile.ZipInfo(path, date_time)
zinfo.external_attr = st.st_mode << 16
zinfo.compress_type = zipfile.ZIP_DEFLATED
with open(path, 'rb') as fp:
zip.writestr(zinfo, fp.read())
log.info("adding '%s'" % path)
for dirpath, dirnames, filenames in os.walk(base_dir):
# Sort the directory names so that `os.walk` will walk them in a
# defined order on the next iteration.
dirnames.sort()
for name in sorted(filenames):
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
if dirpath.endswith('.dist-info'):
deferred.append((score.get(name, 0), path))
else:
writefile(path, date_time)
deferred.sort()
for score, path in deferred:
writefile(path, date_time)
zip.close()
return zip_filename
| mit | 100,369,475,405,908,210 | 28.7 | 78 | 0.607744 | false |
vmturbo/nova | nova/tests/unit/virt/libvirt/storage/test_lvm.py | 8 | 8203 | # Copyright 2012 NTT Data. All Rights Reserved.
# Copyright 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from nova import exception
from nova import test
from nova import utils
from nova.virt.libvirt.storage import lvm
from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
class LvmTestCase(test.NoDBTestCase):
def test_get_volume_size(self):
executes = []
def fake_execute(*cmd, **kwargs):
executes.append(cmd)
return 123456789, None
expected_commands = [('blockdev', '--getsize64', '/dev/foo')]
self.stub_out('nova.utils.execute', fake_execute)
size = lvm.get_volume_size('/dev/foo')
self.assertEqual(expected_commands, executes)
self.assertEqual(123456789, size)
@mock.patch.object(utils, 'execute',
side_effect=processutils.ProcessExecutionError(
stderr=('blockdev: cannot open /dev/foo: '
'No such device or address')))
def test_get_volume_size_not_found(self, mock_execute):
self.assertRaises(exception.VolumeBDMPathNotFound,
lvm.get_volume_size, '/dev/foo')
@mock.patch.object(utils, 'execute',
side_effect=processutils.ProcessExecutionError(
stderr=('blockdev: cannot open /dev/foo: '
'No such file or directory')))
def test_get_volume_size_not_found_file(self, mock_execute):
self.assertRaises(exception.VolumeBDMPathNotFound,
lvm.get_volume_size, '/dev/foo')
@mock.patch.object(libvirt_utils, 'path_exists', return_value=True)
@mock.patch.object(utils, 'execute',
side_effect=processutils.ProcessExecutionError(
stderr='blockdev: i am sad in other ways'))
def test_get_volume_size_unexpectd_error(self, mock_execute,
mock_path_exists):
self.assertRaises(processutils.ProcessExecutionError,
lvm.get_volume_size, '/dev/foo')
def test_lvm_clear(self):
def fake_lvm_size(path):
return lvm_size
def fake_execute(*cmd, **kwargs):
executes.append(cmd)
self.stub_out('nova.virt.libvirt.storage.lvm.get_volume_size',
fake_lvm_size)
self.stub_out('nova.utils.execute', fake_execute)
# Test the correct dd commands are run for various sizes
lvm_size = 1
executes = []
expected_commands = [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/v1',
'seek=0', 'count=1', 'conv=fdatasync')]
lvm.clear_volume('/dev/v1')
self.assertEqual(expected_commands, executes)
lvm_size = 1024
executes = []
expected_commands = [('dd', 'bs=1024', 'if=/dev/zero', 'of=/dev/v2',
'seek=0', 'count=1', 'conv=fdatasync')]
lvm.clear_volume('/dev/v2')
self.assertEqual(expected_commands, executes)
lvm_size = 1025
executes = []
expected_commands = [('dd', 'bs=1024', 'if=/dev/zero', 'of=/dev/v3',
'seek=0', 'count=1', 'conv=fdatasync')]
expected_commands += [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/v3',
'seek=1024', 'count=1', 'conv=fdatasync')]
lvm.clear_volume('/dev/v3')
self.assertEqual(expected_commands, executes)
lvm_size = 1048576
executes = []
expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v4',
'seek=0', 'count=1', 'oflag=direct')]
lvm.clear_volume('/dev/v4')
self.assertEqual(expected_commands, executes)
lvm_size = 1048577
executes = []
expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v5',
'seek=0', 'count=1', 'oflag=direct')]
expected_commands += [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/v5',
'seek=1048576', 'count=1', 'conv=fdatasync')]
lvm.clear_volume('/dev/v5')
self.assertEqual(expected_commands, executes)
lvm_size = 1234567
executes = []
expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v6',
'seek=0', 'count=1', 'oflag=direct')]
expected_commands += [('dd', 'bs=1024', 'if=/dev/zero', 'of=/dev/v6',
'seek=1024', 'count=181', 'conv=fdatasync')]
expected_commands += [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/v6',
'seek=1233920', 'count=647', 'conv=fdatasync')]
lvm.clear_volume('/dev/v6')
self.assertEqual(expected_commands, executes)
# Test volume_clear_size limits the size
lvm_size = 10485761
CONF.set_override('volume_clear_size', '1', 'libvirt')
executes = []
expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v7',
'seek=0', 'count=1', 'oflag=direct')]
lvm.clear_volume('/dev/v7')
self.assertEqual(expected_commands, executes)
CONF.set_override('volume_clear_size', '2', 'libvirt')
lvm_size = 1048576
executes = []
expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v9',
'seek=0', 'count=1', 'oflag=direct')]
lvm.clear_volume('/dev/v9')
self.assertEqual(expected_commands, executes)
# Test volume_clear=shred
CONF.set_override('volume_clear', 'shred', 'libvirt')
CONF.set_override('volume_clear_size', '0', 'libvirt')
lvm_size = 1048576
executes = []
expected_commands = [('shred', '-n3', '-s1048576', '/dev/va')]
lvm.clear_volume('/dev/va')
self.assertEqual(expected_commands, executes)
CONF.set_override('volume_clear', 'shred', 'libvirt')
CONF.set_override('volume_clear_size', '1', 'libvirt')
lvm_size = 10485761
executes = []
expected_commands = [('shred', '-n3', '-s1048576', '/dev/vb')]
lvm.clear_volume('/dev/vb')
self.assertEqual(expected_commands, executes)
# Test volume_clear=none does nothing
CONF.set_override('volume_clear', 'none', 'libvirt')
executes = []
expected_commands = []
lvm.clear_volume('/dev/vc')
self.assertEqual(expected_commands, executes)
@mock.patch.object(utils, 'execute',
side_effect=processutils.ProcessExecutionError(
stderr=('blockdev: cannot open /dev/foo: '
'No such file or directory')))
def test_lvm_clear_ignore_lvm_not_found(self, mock_execute):
lvm.clear_volume('/dev/foo')
def test_fail_remove_all_logical_volumes(self):
def fake_execute(*args, **kwargs):
if 'vol2' in args:
raise processutils.ProcessExecutionError('Error')
with test.nested(
mock.patch.object(lvm, 'clear_volume'),
mock.patch.object(libvirt_utils, 'execute',
side_effect=fake_execute)) as (mock_clear, mock_execute):
self.assertRaises(exception.VolumesNotRemoved,
lvm.remove_volumes,
['vol1', 'vol2', 'vol3'])
self.assertEqual(3, mock_execute.call_count)
| apache-2.0 | 8,972,324,500,791,158,000 | 42.173684 | 79 | 0.560405 | false |
ain7/www.ain7.org | ain7/annuaire/migrations/0002_auto_20160331_0126.py | 1 | 8973 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-30 23:26
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('annuaire', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('organizations', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='position',
name='office',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='positions', to='organizations.Office', verbose_name='\xe9tablissement'),
),
migrations.AddField(
model_name='phonenumber',
name='last_change_by',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='last_changed_phonenumber', to='annuaire.Person', verbose_name='Auteur de la derni\xe8re modification'),
),
migrations.AddField(
model_name='phonenumber',
name='person',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='phone_numbers', to='annuaire.Person'),
),
migrations.AddField(
model_name='personprivate',
name='last_change_by',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='last_changed_personprivate', to='annuaire.Person', verbose_name='Auteur de la derni\xe8re modification'),
),
migrations.AddField(
model_name='personprivate',
name='member_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='annuaire.MemberType', verbose_name='membre'),
),
migrations.AddField(
model_name='personprivate',
name='person',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='annuaire.Person', verbose_name='personne'),
),
migrations.AddField(
model_name='personprivate',
name='person_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='annuaire.PersonType', verbose_name='type'),
),
migrations.AddField(
model_name='person',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='annuaire.Country', verbose_name='nationalit\xe9'),
),
migrations.AddField(
model_name='person',
name='last_change_by',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='last_changed_person', to='annuaire.Person', verbose_name='Auteur de la derni\xe8re modification'),
),
migrations.AddField(
model_name='person',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='utilisateur'),
),
migrations.AddField(
model_name='leisureitem',
name='ain7member',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='leisure', to='annuaire.AIn7Member'),
),
migrations.AddField(
model_name='leisureitem',
name='last_change_by',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='last_changed_leisureitem', to='annuaire.Person', verbose_name='Auteur de la derni\xe8re modification'),
),
migrations.AddField(
model_name='instantmessaging',
name='person',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='instant_messagings', to='annuaire.Person'),
),
migrations.AddField(
model_name='email',
name='person',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='emails', to='annuaire.Person'),
),
migrations.AddField(
model_name='email',
name='position',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='mail', to='annuaire.Position'),
),
migrations.AddField(
model_name='educationitem',
name='ain7member',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='education', to='annuaire.AIn7Member'),
),
migrations.AddField(
model_name='educationitem',
name='last_change_by',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='last_changed_educationitem', to='annuaire.Person', verbose_name='Auteur de la derni\xe8re modification'),
),
migrations.AddField(
model_name='clubmembership',
name='club',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='memberships', to='annuaire.Club', verbose_name='club'),
),
migrations.AddField(
model_name='clubmembership',
name='member',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='club_memberships', to='annuaire.AIn7Member', verbose_name='membre'),
),
migrations.AddField(
model_name='club',
name='last_change_by',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='last_changed_club', to='annuaire.Person', verbose_name='Auteur de la derni\xe8re modification'),
),
migrations.AddField(
model_name='club',
name='school',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='clubs', to='annuaire.School', verbose_name='\xe9cole'),
),
migrations.AddField(
model_name='ain7member',
name='ceremonial_duties',
field=models.ManyToManyField(blank=True, to='annuaire.CeremonialDuty', verbose_name='fonctions honorifiques'),
),
migrations.AddField(
model_name='ain7member',
name='decorations',
field=models.ManyToManyField(blank=True, to='annuaire.Decoration', verbose_name='d\xe9corations'),
),
migrations.AddField(
model_name='ain7member',
name='last_change_by',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='last_changed_ain7member', to='annuaire.Person', verbose_name='Auteur de la derni\xe8re modification'),
),
migrations.AddField(
model_name='ain7member',
name='marital_status',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='annuaire.MaritalStatus', verbose_name='statut marital'),
),
migrations.AddField(
model_name='ain7member',
name='person',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='annuaire.Person', verbose_name='personne'),
),
migrations.AddField(
model_name='ain7member',
name='promos',
field=models.ManyToManyField(blank=True, related_name='students', to='annuaire.Promo', verbose_name='Promotions'),
),
migrations.AddField(
model_name='address',
name='country',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='annuaire.Country', verbose_name='pays'),
),
migrations.AddField(
model_name='address',
name='last_change_by',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='last_changed_address', to='annuaire.Person', verbose_name='Auteur de la derni\xe8re modification'),
),
migrations.AddField(
model_name='address',
name='person',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='addresses', to='annuaire.Person'),
),
migrations.AddField(
model_name='address',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='annuaire.AddressType', verbose_name='type'),
),
]
| lgpl-2.1 | -175,399,658,491,438,530 | 49.982955 | 239 | 0.632453 | false |
PSUdaemon/trafficserver | tests/tools/traffic-replay/h2Replay.py | 2 | 13166 | #!/bin/env python3
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from threading import Thread
import sys
from multiprocessing import current_process
import sessionvalidation.sessionvalidation as sv
import lib.result as result
import extractHeader
import mainProcess
import json
from hyper import HTTP20Connection
from hyper.tls import wrap_socket, H2_NPN_PROTOCOLS, H2C_PROTOCOL
from hyper.common.bufsocket import BufferedSocket
import hyper
import socket
import logging
import h2
from h2.connection import H2Configuration
import threading
import Config
log = logging.getLogger(__name__)
bSTOP = False
hyper.tls._context = hyper.tls.init_context()
hyper.tls._context.check_hostname = False
hyper.tls._context.verify_mode = hyper.compat.ssl.CERT_NONE
class _LockedObject(object):
"""
A wrapper class that hides a specific object behind a lock.
The goal here is to provide a simple way to protect access to an object
that cannot safely be simultaneously accessed from multiple threads. The
intended use of this class is simple: take hold of it with a context
manager, which returns the protected object.
"""
def __init__(self, obj):
self.lock = threading.RLock()
self._obj = obj
def __enter__(self):
self.lock.acquire()
return self._obj
def __exit__(self, _exc_type, _exc_val, _exc_tb):
self.lock.release()
class h2ATS(HTTP20Connection):
def __init_state(self):
"""
Initializes the 'mutable state' portions of the HTTP/2 connection
object.
This method exists to enable HTTP20Connection objects to be reused if
they're closed, by resetting the connection object to its basic state
whenever it ends up closed. Any situation that needs to recreate the
connection can call this method and it will be done.
This is one of the only methods in hyper that is truly private, as
users should be strongly discouraged from messing about with connection
objects themselves.
"""
config1 = H2Configuration(
client_side=True,
header_encoding='utf-8',
validate_outbound_headers=False,
validate_inbound_headers=False,
)
self._conn = _LockedObject(h2.connection.H2Connection(config=config1))
# Streams are stored in a dictionary keyed off their stream IDs. We
# also save the most recent one for easy access without having to walk
# the dictionary.
#
# We add a set of all streams that we or the remote party forcefully
# closed with RST_STREAM, to avoid encountering issues where frames
# were already in flight before the RST was processed.
#
# Finally, we add a set of streams that recently received data. When
# using multiple threads, this avoids reading on threads that have just
# acquired the I/O lock whose streams have already had their data read
# for them by prior threads.
self.streams = {}
self.recent_stream = None
self.next_stream_id = 1
self.reset_streams = set()
self.recent_recv_streams = set()
# The socket used to send data.
self._sock = None
# Instantiate a window manager.
#self.window_manager = self.__wm_class(65535)
return
def __init__(self, host, **kwargs):
HTTP20Connection.__init__(self, host, **kwargs)
self.__init_state()
def connect(self):
"""
Connect to the server specified when the object was created. This is a
no-op if we're already connected.
Concurrency
-----------
This method is thread-safe. It may be called from multiple threads, and
is a noop for all threads apart from the first.
:returns: Nothing.
"""
#print("connecting to ATS")
with self._lock:
if self._sock is not None:
return
sni = self.host
if not self.proxy_host:
host = self.host
port = self.port
else:
host = self.proxy_host
port = self.proxy_port
sock = socket.create_connection((host, port))
if self.secure:
#assert not self.proxy_host, "Proxy with HTTPS not supported."
sock, proto = wrap_socket(sock, sni, self.ssl_context,
force_proto=self.force_proto)
else:
proto = H2C_PROTOCOL
log.debug("Selected NPN protocol: %s", proto)
assert proto in H2_NPN_PROTOCOLS or proto == H2C_PROTOCOL
self._sock = BufferedSocket(sock, self.network_buffer_size)
self._send_preamble()
def createDummyBodywithLength(numberOfbytes):
if numberOfbytes == 0:
return None
body = 'a'
while numberOfbytes != 1:
body += 'b'
numberOfbytes -= 1
return body
def handleResponse(response, *args, **kwargs):
print(response.status_code)
# resp=args[0]
#expected_output_split = resp.getHeaders().split('\r\n')[ 0].split(' ', 2)
#expected_output = (int(expected_output_split[1]), str( expected_output_split[2]))
#r = result.Result(session_filename, expected_output[0], response.status_code)
# print(r.getResultString(colorize=True))
# make sure len of the message body is greater than length
def gen():
yield 'pforpersia,champaignurbana'.encode('utf-8')
yield 'there'.encode('utf-8')
def txn_replay(session_filename, txn, proxy, result_queue, h2conn, request_IDs):
""" Replays a single transaction
:param request_session: has to be a valid requests session"""
req = txn.getRequest()
resp = txn.getResponse()
# Construct HTTP request & fire it off
txn_req_headers = req.getHeaders()
txn_req_headers_dict = extractHeader.header_to_dict(txn_req_headers)
txn_req_headers_dict['Content-MD5'] = txn._uuid # used as unique identifier
if 'body' in txn_req_headers_dict:
del txn_req_headers_dict['body']
responseID = -1
#print("Replaying session")
try:
# response = request_session.request(extractHeader.extract_txn_req_method(txn_req_headers),
# 'http://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers),
# headers=txn_req_headers_dict,stream=False) # making stream=False raises contentdecoding exception? kill me
method = extractHeader.extract_txn_req_method(txn_req_headers)
response = None
mbody = None
#txn_req_headers_dict['Host'] = "localhost"
if 'Transfer-Encoding' in txn_req_headers_dict:
# deleting the host key, since the STUPID post/get functions are going to add host field anyway, so there will be multiple host fields in the header
# This confuses the ATS and it returns 400 "Invalid HTTP request". I don't believe this
# BUT, this is not a problem if the data is not chunked encoded.. Strange, huh?
#del txn_req_headers_dict['Host']
if 'Content-Length' in txn_req_headers_dict:
#print("ewww !")
del txn_req_headers_dict['Content-Length']
mbody = gen()
if 'Content-Length' in txn_req_headers_dict:
nBytes = int(txn_req_headers_dict['Content-Length'])
mbody = createDummyBodywithLength(nBytes)
if 'Connection' in txn_req_headers_dict:
del txn_req_headers_dict['Connection']
#str2 = extractHeader.extract_host(txn_req_headers)+ extractHeader.extract_GET_path(txn_req_headers)
# print(str2)
if method == 'GET':
responseID = h2conn.request('GET', url=extractHeader.extract_GET_path(
txn_req_headers), headers=txn_req_headers_dict, body=mbody)
# print("get response", responseID)
return responseID
# request_IDs.append(responseID)
#response = h2conn.get_response(id)
# print(response.headers)
# if 'Content-Length' in response.headers:
# content = response.read()
#print("len: {0} received {1}".format(response.headers['Content-Length'],content))
elif method == 'POST':
responseID = h2conn.request('POST', url=extractHeader.extract_GET_path(
txn_req_headers), headers=txn_req_headers_dict, body=mbody)
print("get response", responseID)
return responseID
elif method == 'HEAD':
responseID = h2conn.request('HEAD', url=extractHeader.extract_GET_path(txn_req_headers), headers=txn_req_headers_dict)
print("get response", responseID)
return responseID
except UnicodeEncodeError as e:
# these unicode errors are due to the interaction between Requests and our wiretrace data.
# TODO fix
print("UnicodeEncodeError exception")
except:
e = sys.exc_info()
print("ERROR in requests: ", e, response, session_filename)
def session_replay(input, proxy, result_queue):
global bSTOP
''' Replay all transactions in session
This entire session will be replayed in one requests.Session (so one socket / TCP connection)'''
# if timing_control:
# time.sleep(float(session._timestamp)) # allow other threads to run
while bSTOP == False:
for session in iter(input.get, 'STOP'):
print(bSTOP)
if session == 'STOP':
print("Queue is empty")
bSTOP = True
break
txn = session.returnFirstTransaction()
req = txn.getRequest()
# Construct HTTP request & fire it off
txn_req_headers = req.getHeaders()
txn_req_headers_dict = extractHeader.header_to_dict(txn_req_headers)
with h2ATS(txn_req_headers_dict['Host'], secure=True, proxy_host=Config.proxy_host, proxy_port=Config.proxy_ssl_port) as h2conn:
request_IDs = []
respList = []
for txn in session.getTransactionIter():
try:
ret = txn_replay(session._filename, txn, proxy, result_queue, h2conn, request_IDs)
respList.append(txn.getResponse())
request_IDs.append(ret)
#print("txn return value is ",ret)
except:
e = sys.exc_info()
print("ERROR in replaying: ", e, txn.getRequest().getHeaders())
for id in request_IDs:
expectedH = respList.pop(0)
# print("extracting",id)
response = h2conn.get_response(id)
#print("code {0}:{1}".format(response.status,response.headers))
response_dict = {}
if mainProcess.verbose:
for field, value in response.headers.items():
response_dict[field.decode('utf-8')] = value.decode('utf-8')
expected_output_split = expectedH.getHeaders().split('\r\n')[0].split(' ', 2)
expected_output = (int(expected_output_split[1]), str(expected_output_split[2]))
r = result.Result("", expected_output[0], response.status, response.read())
expected_Dict = extractHeader.responseHeader_to_dict(expectedH.getHeaders())
b_res, res = r.getResult(response_dict, expected_Dict, colorize=Config.colorize)
print(res)
if not b_res:
print("Received response")
print(response_dict)
print("Expected response")
print(expected_Dict)
bSTOP = True
#print("Queue is empty")
input.put('STOP')
break
def client_replay(input, proxy, result_queue, nThread):
Threads = []
for i in range(nThread):
t = Thread(target=session_replay, args=[input, proxy, result_queue])
t.start()
Threads.append(t)
for t1 in Threads:
t1.join()
| apache-2.0 | 5,364,924,808,349,940,000 | 38.776435 | 160 | 0.609904 | false |
pvagner/orca | src/orca/scripts/apps/gnome-mud/script.py | 3 | 6038 | # Orca
#
# Copyright 2005-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for gnome-mud."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2008 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
import orca.debug as debug
import orca.scripts.default as default
import orca.input_event as input_event
import orca.keybindings as keybindings
import orca.orca_state as orca_state
import orca.speech as speech
from orca.orca_i18n import _ # for gettext support
########################################################################
# #
# Ring List. A fixed size circular list by Flavio Catalani #
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/435902 #
# #
########################################################################
class RingList:
def __init__(self, length):
self.__data__ = []
self.__full__ = 0
self.__max__ = length
self.__cur__ = 0
def append(self, x):
if self.__full__ == 1:
for i in range (0, self.__cur__ - 1):
self.__data__[i] = self.__data__[i + 1]
self.__data__[self.__cur__ - 1] = x
else:
self.__data__.append(x)
self.__cur__ += 1
if self.__cur__ == self.__max__:
self.__full__ = 1
def get(self):
return self.__data__
def remove(self):
if (self.__cur__ > 0):
del self.__data__[self.__cur__ - 1]
self.__cur__ -= 1
def size(self):
return self.__cur__
def maxsize(self):
return self.__max__
def __str__(self):
return ''.join(self.__data__)
class Script(default.Script):
MESSAGE_LIST_LENGTH = 10
def __init__(self, app):
"""Creates a new script for the given application.
This script tries to fix some accessibility problems found in
the gnome-mud application, and also improves the user experience.
For more details see bug #
Arguments:
- app: the application to create a script for.
"""
# Set the debug level for all the methods in this script.
#
self.debugLevel = debug.LEVEL_FINEST
self.previousMessages = RingList(Script.MESSAGE_LIST_LENGTH)
# Initially populate the cyclic list with empty strings
i = 0
while i < self.previousMessages.maxsize():
self.previousMessages.append("")
i += 1
default.Script.__init__(self, app)
def setupInputEventHandlers(self):
debug.println(self.debugLevel, "gnome-mud.setupInputEventHandlers.")
default.Script.setupInputEventHandlers(self)
self.inputEventHandlers["readPreviousMessageHandler"] = \
input_event.InputEventHandler(
Script.readPreviousMessage,
_('Read the latest n messages in the incoming messages text '
'area.'))
def getAppKeyBindings(self):
"""Returns the application-specific keybindings for this script."""
keyBindings = keybindings.KeyBindings()
messageKeys = [ "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9"]
for messagekey in messageKeys:
keyBindings.add(
keybindings.KeyBinding(
messagekey,
keybindings.defaultModifierMask,
keybindings.ORCA_MODIFIER_MASK,
self.inputEventHandlers["readPreviousMessageHandler"]))
return keyBindings
def readPreviousMessage(self, inputEvent):
#This function speaks the latest n messages. Orca+F1 the latest one,
#Orca+F2 the latest two and so.
debug.println(self.debugLevel, "gnome-mud.readPreviousMessage.")
i = int(inputEvent.event_string[1:])
messageNo = Script.MESSAGE_LIST_LENGTH - i
text = ""
messages = self.previousMessages.get()
for i in range (messageNo, Script.MESSAGE_LIST_LENGTH):
message = messages[i]
text += message
speech.speak(text)
def onTextInserted(self, event):
#Whenever a new text is inserted in the incoming message text area,
#We want to speak and add it to the ringList structure only those lines
#that contain some text and if the application is the current
#locusOfFocus.
rolesList = [pyatspi.ROLE_TERMINAL,
pyatspi.ROLE_FILLER]
if self.utilities.hasMatchingHierarchy(event.source, rolesList):
if self.flatReviewContext:
self.toggleFlatReviewMode()
message = event.any_data
if message and (not message.isspace()) and message != "\n":
debug.println(debug.LEVEL_FINEST, \
message + " inserted in ringlist:")
self.previousMessages.append(message)
if event.source.getApplication() == \
orca_state.locusOfFocus.getApplication():
speech.speak(message)
else:
default.Script.onTextInserted(self, event)
| lgpl-2.1 | -5,981,183,722,259,072,000 | 33.502857 | 79 | 0.575356 | false |
jfterpstra/bluebottle | bluebottle/utils/staticfiles_finders.py | 2 | 1192 | from django.utils._os import safe_join
import os
from django.conf import settings
from django.contrib.staticfiles.finders import FileSystemFinder
from bluebottle.clients.models import Client
class TenantStaticFilesFinder(FileSystemFinder):
def find(self, path, all=False):
"""
Looks for files in the client static directories.
static/assets/greatbarier/images/logo.jpg
will translate to
MULTI_TENANT_DIR/greatbarier/static/images/logo.jpg
"""
tenants = Client.objects.all()
tenant_dir = getattr(settings, 'MULTI_TENANT_DIR', None)
if not tenant_dir:
return []
for tenant in tenants:
if "{0}/".format(tenant.client_name) in path:
tenant_path = path.replace('{0}/'.format(tenant.client_name),
'{0}/static/'.format(
tenant.client_name))
local_path = safe_join(tenant_dir, tenant_path)
if os.path.exists(local_path):
if all:
return [local_path]
return local_path
return []
| bsd-3-clause | 1,089,561,849,296,188,200 | 35.121212 | 77 | 0.564597 | false |
themarkypantz/kafka | tests/kafkatest/services/security/security_config.py | 8 | 14918 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from tempfile import mkdtemp
from shutil import rmtree
from ducktape.template import TemplateRenderer
from kafkatest.services.security.minikdc import MiniKdc
import itertools
class SslStores(object):
def __init__(self, local_scratch_dir):
self.ca_crt_path = os.path.join(local_scratch_dir, "test.ca.crt")
self.ca_jks_path = os.path.join(local_scratch_dir, "test.ca.jks")
self.ca_passwd = "test-ca-passwd"
self.truststore_path = os.path.join(local_scratch_dir, "test.truststore.jks")
self.truststore_passwd = "test-ts-passwd"
self.keystore_passwd = "test-ks-passwd"
self.key_passwd = "test-key-passwd"
# Allow upto one hour of clock skew between host and VMs
self.startdate = "-1H"
for file in [self.ca_crt_path, self.ca_jks_path, self.truststore_path]:
if os.path.exists(file):
os.remove(file)
def generate_ca(self):
"""
Generate CA private key and certificate.
"""
self.runcmd("keytool -genkeypair -alias ca -keyalg RSA -keysize 2048 -keystore %s -storetype JKS -storepass %s -keypass %s -dname CN=SystemTestCA -startdate %s" % (self.ca_jks_path, self.ca_passwd, self.ca_passwd, self.startdate))
self.runcmd("keytool -export -alias ca -keystore %s -storepass %s -storetype JKS -rfc -file %s" % (self.ca_jks_path, self.ca_passwd, self.ca_crt_path))
def generate_truststore(self):
"""
Generate JKS truststore containing CA certificate.
"""
self.runcmd("keytool -importcert -alias ca -file %s -keystore %s -storepass %s -storetype JKS -noprompt" % (self.ca_crt_path, self.truststore_path, self.truststore_passwd))
def generate_and_copy_keystore(self, node):
"""
Generate JKS keystore with certificate signed by the test CA.
The generated certificate has the node's hostname as a DNS SubjectAlternativeName.
"""
ks_dir = mkdtemp(dir="/tmp")
ks_path = os.path.join(ks_dir, "test.keystore.jks")
csr_path = os.path.join(ks_dir, "test.kafka.csr")
crt_path = os.path.join(ks_dir, "test.kafka.crt")
self.runcmd("keytool -genkeypair -alias kafka -keyalg RSA -keysize 2048 -keystore %s -storepass %s -storetype JKS -keypass %s -dname CN=systemtest -ext SAN=DNS:%s -startdate %s" % (ks_path, self.keystore_passwd, self.key_passwd, self.hostname(node), self.startdate))
self.runcmd("keytool -certreq -keystore %s -storepass %s -storetype JKS -keypass %s -alias kafka -file %s" % (ks_path, self.keystore_passwd, self.key_passwd, csr_path))
self.runcmd("keytool -gencert -keystore %s -storepass %s -storetype JKS -alias ca -infile %s -outfile %s -dname CN=systemtest -ext SAN=DNS:%s -startdate %s" % (self.ca_jks_path, self.ca_passwd, csr_path, crt_path, self.hostname(node), self.startdate))
self.runcmd("keytool -importcert -keystore %s -storepass %s -storetype JKS -alias ca -file %s -noprompt" % (ks_path, self.keystore_passwd, self.ca_crt_path))
self.runcmd("keytool -importcert -keystore %s -storepass %s -storetype JKS -keypass %s -alias kafka -file %s -noprompt" % (ks_path, self.keystore_passwd, self.key_passwd, crt_path))
node.account.copy_to(ks_path, SecurityConfig.KEYSTORE_PATH)
rmtree(ks_dir)
def hostname(self, node):
""" Hostname which may be overridden for testing validation failures
"""
return node.account.hostname
def runcmd(self, cmd):
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise RuntimeError("Command '%s' returned non-zero exit status %d: %s" % (cmd, proc.returncode, stdout))
class SecurityConfig(TemplateRenderer):
PLAINTEXT = 'PLAINTEXT'
SSL = 'SSL'
SASL_PLAINTEXT = 'SASL_PLAINTEXT'
SASL_SSL = 'SASL_SSL'
SASL_MECHANISM_GSSAPI = 'GSSAPI'
SASL_MECHANISM_PLAIN = 'PLAIN'
SASL_MECHANISM_SCRAM_SHA_256 = 'SCRAM-SHA-256'
SASL_MECHANISM_SCRAM_SHA_512 = 'SCRAM-SHA-512'
SCRAM_CLIENT_USER = "kafka-client"
SCRAM_CLIENT_PASSWORD = "client-secret"
SCRAM_BROKER_USER = "kafka-broker"
SCRAM_BROKER_PASSWORD = "broker-secret"
CONFIG_DIR = "/mnt/security"
KEYSTORE_PATH = "/mnt/security/test.keystore.jks"
TRUSTSTORE_PATH = "/mnt/security/test.truststore.jks"
JAAS_CONF_PATH = "/mnt/security/jaas.conf"
KRB5CONF_PATH = "/mnt/security/krb5.conf"
KEYTAB_PATH = "/mnt/security/keytab"
# This is initialized only when the first instance of SecurityConfig is created
ssl_stores = None
def __init__(self, context, security_protocol=None, interbroker_security_protocol=None,
client_sasl_mechanism=SASL_MECHANISM_GSSAPI, interbroker_sasl_mechanism=SASL_MECHANISM_GSSAPI,
zk_sasl=False, template_props="", static_jaas_conf=True):
"""
Initialize the security properties for the node and copy
keystore and truststore to the remote node if the transport protocol
is SSL. If security_protocol is None, the protocol specified in the
template properties file is used. If no protocol is specified in the
template properties either, PLAINTEXT is used as default.
"""
self.context = context
if not SecurityConfig.ssl_stores:
# This generates keystore/trustore files in a local scratch directory which gets
# automatically destroyed after the test is run
# Creating within the scratch directory allows us to run tests in parallel without fear of collision
SecurityConfig.ssl_stores = SslStores(context.local_scratch_dir)
SecurityConfig.ssl_stores.generate_ca()
SecurityConfig.ssl_stores.generate_truststore()
if security_protocol is None:
security_protocol = self.get_property('security.protocol', template_props)
if security_protocol is None:
security_protocol = SecurityConfig.PLAINTEXT
elif security_protocol not in [SecurityConfig.PLAINTEXT, SecurityConfig.SSL, SecurityConfig.SASL_PLAINTEXT, SecurityConfig.SASL_SSL]:
raise Exception("Invalid security.protocol in template properties: " + security_protocol)
if interbroker_security_protocol is None:
interbroker_security_protocol = security_protocol
self.interbroker_security_protocol = interbroker_security_protocol
self.has_sasl = self.is_sasl(security_protocol) or self.is_sasl(interbroker_security_protocol) or zk_sasl
self.has_ssl = self.is_ssl(security_protocol) or self.is_ssl(interbroker_security_protocol)
self.zk_sasl = zk_sasl
self.static_jaas_conf = static_jaas_conf
self.properties = {
'security.protocol' : security_protocol,
'ssl.keystore.location' : SecurityConfig.KEYSTORE_PATH,
'ssl.keystore.password' : SecurityConfig.ssl_stores.keystore_passwd,
'ssl.key.password' : SecurityConfig.ssl_stores.key_passwd,
'ssl.truststore.location' : SecurityConfig.TRUSTSTORE_PATH,
'ssl.truststore.password' : SecurityConfig.ssl_stores.truststore_passwd,
'ssl.endpoint.identification.algorithm' : 'HTTPS',
'sasl.mechanism' : client_sasl_mechanism,
'sasl.mechanism.inter.broker.protocol' : interbroker_sasl_mechanism,
'sasl.kerberos.service.name' : 'kafka'
}
def client_config(self, template_props="", node=None):
# If node is not specified, use static jaas config which will be created later.
# Otherwise use static JAAS configuration files with SASL_SSL and sasl.jaas.config
# property with SASL_PLAINTEXT so that both code paths are tested by existing tests.
# Note that this is an artibtrary choice and it is possible to run all tests with
# either static or dynamic jaas config files if required.
static_jaas_conf = node is None or (self.has_sasl and self.has_ssl)
return SecurityConfig(self.context, self.security_protocol, client_sasl_mechanism=self.client_sasl_mechanism, template_props=template_props, static_jaas_conf=static_jaas_conf)
def enable_security_protocol(self, security_protocol):
self.has_sasl = self.has_sasl or self.is_sasl(security_protocol)
self.has_ssl = self.has_ssl or self.is_ssl(security_protocol)
def setup_ssl(self, node):
node.account.ssh("mkdir -p %s" % SecurityConfig.CONFIG_DIR, allow_fail=False)
node.account.copy_to(SecurityConfig.ssl_stores.truststore_path, SecurityConfig.TRUSTSTORE_PATH)
SecurityConfig.ssl_stores.generate_and_copy_keystore(node)
def setup_sasl(self, node):
node.account.ssh("mkdir -p %s" % SecurityConfig.CONFIG_DIR, allow_fail=False)
jaas_conf_file = "jaas.conf"
java_version = node.account.ssh_capture("java -version")
if any('IBM' in line for line in java_version):
is_ibm_jdk = True
else:
is_ibm_jdk = False
jaas_conf = self.render(jaas_conf_file, node=node, is_ibm_jdk=is_ibm_jdk,
SecurityConfig=SecurityConfig,
client_sasl_mechanism=self.client_sasl_mechanism,
enabled_sasl_mechanisms=self.enabled_sasl_mechanisms,
static_jaas_conf=self.static_jaas_conf)
if self.static_jaas_conf:
node.account.create_file(SecurityConfig.JAAS_CONF_PATH, jaas_conf)
else:
self.properties['sasl.jaas.config'] = jaas_conf.replace("\n", " \\\n")
if self.has_sasl_kerberos:
node.account.copy_to(MiniKdc.LOCAL_KEYTAB_FILE, SecurityConfig.KEYTAB_PATH)
node.account.copy_to(MiniKdc.LOCAL_KRB5CONF_FILE, SecurityConfig.KRB5CONF_PATH)
def setup_node(self, node):
if self.has_ssl:
self.setup_ssl(node)
if self.has_sasl:
self.setup_sasl(node)
def setup_credentials(self, node, path, zk_connect, broker):
if broker:
self.maybe_create_scram_credentials(node, zk_connect, path, self.interbroker_sasl_mechanism,
SecurityConfig.SCRAM_BROKER_USER, SecurityConfig.SCRAM_BROKER_PASSWORD)
else:
self.maybe_create_scram_credentials(node, zk_connect, path, self.client_sasl_mechanism,
SecurityConfig.SCRAM_CLIENT_USER, SecurityConfig.SCRAM_CLIENT_PASSWORD)
def maybe_create_scram_credentials(self, node, zk_connect, path, mechanism, user_name, password):
if self.has_sasl and self.is_sasl_scram(mechanism):
cmd = "%s --zookeeper %s --entity-name %s --entity-type users --alter --add-config %s=[password=%s]" % \
(path.script("kafka-configs.sh", node), zk_connect,
user_name, mechanism, password)
node.account.ssh(cmd)
def clean_node(self, node):
if self.security_protocol != SecurityConfig.PLAINTEXT:
node.account.ssh("rm -rf %s" % SecurityConfig.CONFIG_DIR, allow_fail=False)
def get_property(self, prop_name, template_props=""):
"""
Get property value from the string representation of
a properties file.
"""
value = None
for line in template_props.split("\n"):
items = line.split("=")
if len(items) == 2 and items[0].strip() == prop_name:
value = str(items[1].strip())
return value
def is_ssl(self, security_protocol):
return security_protocol == SecurityConfig.SSL or security_protocol == SecurityConfig.SASL_SSL
def is_sasl(self, security_protocol):
return security_protocol == SecurityConfig.SASL_PLAINTEXT or security_protocol == SecurityConfig.SASL_SSL
def is_sasl_scram(self, sasl_mechanism):
return sasl_mechanism == SecurityConfig.SASL_MECHANISM_SCRAM_SHA_256 or sasl_mechanism == SecurityConfig.SASL_MECHANISM_SCRAM_SHA_512
@property
def security_protocol(self):
return self.properties['security.protocol']
@property
def client_sasl_mechanism(self):
return self.properties['sasl.mechanism']
@property
def interbroker_sasl_mechanism(self):
return self.properties['sasl.mechanism.inter.broker.protocol']
@property
def enabled_sasl_mechanisms(self):
return set([self.client_sasl_mechanism, self.interbroker_sasl_mechanism])
@property
def has_sasl_kerberos(self):
return self.has_sasl and (SecurityConfig.SASL_MECHANISM_GSSAPI in self.enabled_sasl_mechanisms)
@property
def kafka_opts(self):
if self.has_sasl:
if self.static_jaas_conf:
return "\"-Djava.security.auth.login.config=%s -Djava.security.krb5.conf=%s\"" % (SecurityConfig.JAAS_CONF_PATH, SecurityConfig.KRB5CONF_PATH)
else:
return "\"-Djava.security.krb5.conf=%s\"" % SecurityConfig.KRB5CONF_PATH
else:
return ""
def props(self, prefix=''):
"""
Return properties as string with line separators, optionally with a prefix.
This is used to append security config properties to
a properties file.
:param prefix: prefix to add to each property
:return: a string containing line-separated properties
"""
if self.security_protocol == SecurityConfig.PLAINTEXT:
return ""
if self.has_sasl and not self.static_jaas_conf and 'sasl.jaas.config' not in self.properties:
raise Exception("JAAS configuration property has not yet been initialized")
config_lines = (prefix + key + "=" + value for key, value in self.properties.iteritems())
# Extra blank lines ensure this can be appended/prepended safely
return "\n".join(itertools.chain([""], config_lines, [""]))
def __str__(self):
"""
Return properties as a string with line separators.
"""
return self.props()
| apache-2.0 | 8,458,064,236,308,150,000 | 49.398649 | 274 | 0.666577 | false |
andyliuliming/WALinuxAgent | azurelinuxagent/common/protocol/imds.py | 2 | 10005 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
import json
import re
import azurelinuxagent.common.utils.restutil as restutil
from azurelinuxagent.common.exception import HttpError
from azurelinuxagent.common.future import ustr
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common.protocol.restapi import DataContract, set_properties
from azurelinuxagent.common.utils.flexible_version import FlexibleVersion
IMDS_ENDPOINT = '169.254.169.254'
APIVERSION = '2018-02-01'
BASE_URI = "http://{0}/metadata/instance/{1}?api-version={2}"
IMDS_IMAGE_ORIGIN_UNKNOWN = 0
IMDS_IMAGE_ORIGIN_CUSTOM = 1
IMDS_IMAGE_ORIGIN_ENDORSED = 2
IMDS_IMAGE_ORIGIN_PLATFORM = 3
def get_imds_client():
return ImdsClient()
# A *slightly* future proof list of endorsed distros.
# -> e.g. I have predicted the future and said that 20.04-LTS will exist
# and is endored.
#
# See https://docs.microsoft.com/en-us/azure/virtual-machines/linux/endorsed-distros for
# more details.
#
# This is not an exhaustive list. This is a best attempt to mark images as
# endorsed or not. Image publishers do not encode all of the requisite information
# in their publisher, offer, sku, and version to definitively mark something as
# endorsed or not. This is not perfect, but it is approximately 98% perfect.
ENDORSED_IMAGE_INFO_MATCHER_JSON = """{
"CANONICAL": {
"UBUNTUSERVER": {
"List": [
"14.04.0-LTS",
"14.04.1-LTS",
"14.04.2-LTS",
"14.04.3-LTS",
"14.04.4-LTS",
"14.04.5-LTS",
"14.04.6-LTS",
"14.04.7-LTS",
"14.04.8-LTS",
"16.04-LTS",
"16.04.0-LTS",
"18.04-LTS",
"20.04-LTS",
"22.04-LTS"
]
}
},
"COREOS": {
"COREOS": {
"STABLE": { "Minimum": "494.4.0" }
}
},
"CREDATIV": {
"DEBIAN": { "Minimum": "7" }
},
"OPENLOGIC": {
"CENTOS": {
"Minimum": "6.3",
"List": [
"7-LVM",
"7-RAW"
]
},
"CENTOS-HPC": { "Minimum": "6.3" }
},
"REDHAT": {
"RHEL": {
"Minimum": "6.7",
"List": [
"7-LVM",
"7-RAW"
]
},
"RHEL-HANA": { "Minimum": "6.7" },
"RHEL-SAP": { "Minimum": "6.7" },
"RHEL-SAP-APPS": { "Minimum": "6.7" },
"RHEL-SAP-HANA": { "Minimum": "6.7" }
},
"SUSE": {
"SLES": {
"List": [
"11-SP4",
"11-SP5",
"11-SP6",
"12-SP1",
"12-SP2",
"12-SP3",
"12-SP4",
"12-SP5",
"12-SP6"
]
},
"SLES-BYOS": {
"List": [
"11-SP4",
"11-SP5",
"11-SP6",
"12-SP1",
"12-SP2",
"12-SP3",
"12-SP4",
"12-SP5",
"12-SP6"
]
},
"SLES-SAP": {
"List": [
"11-SP4",
"11-SP5",
"11-SP6",
"12-SP1",
"12-SP2",
"12-SP3",
"12-SP4",
"12-SP5",
"12-SP6"
]
}
}
}"""
class ImageInfoMatcher(object):
def __init__(self, doc):
self.doc = json.loads(doc)
def is_match(self, publisher, offer, sku, version):
def _is_match_walk(doci, keys):
key = keys.pop(0).upper()
if key is None:
return False
if key not in doci:
return False
if 'List' in doci[key] and keys[0] in doci[key]['List']:
return True
if 'Match' in doci[key] and re.match(doci[key]['Match'], keys[0]):
return True
if 'Minimum' in doci[key]:
try:
return FlexibleVersion(keys[0]) >= FlexibleVersion(doci[key]['Minimum'])
except ValueError:
pass
return _is_match_walk(doci[key], keys)
return _is_match_walk(self.doc, [ publisher, offer, sku, version ])
class ComputeInfo(DataContract):
__matcher = ImageInfoMatcher(ENDORSED_IMAGE_INFO_MATCHER_JSON)
def __init__(self,
location=None,
name=None,
offer=None,
osType=None,
placementGroupId=None,
platformFaultDomain=None,
placementUpdateDomain=None,
publisher=None,
resourceGroupName=None,
sku=None,
subscriptionId=None,
tags=None,
version=None,
vmId=None,
vmSize=None,
vmScaleSetName=None,
zone=None):
self.location = location
self.name = name
self.offer = offer
self.osType = osType
self.placementGroupId = placementGroupId
self.platformFaultDomain = platformFaultDomain
self.platformUpdateDomain = placementUpdateDomain
self.publisher = publisher
self.resourceGroupName = resourceGroupName
self.sku = sku
self.subscriptionId = subscriptionId
self.tags = tags
self.version = version
self.vmId = vmId
self.vmSize = vmSize
self.vmScaleSetName = vmScaleSetName
self.zone = zone
@property
def image_info(self):
return "{0}:{1}:{2}:{3}".format(self.publisher, self.offer, self.sku, self.version)
@property
def image_origin(self):
"""
An integer value describing the origin of the image.
0 -> unknown
1 -> custom - user created image
2 -> endorsed - See https://docs.microsoft.com/en-us/azure/virtual-machines/linux/endorsed-distros
3 -> platform - non-endorsed image that is available in the Azure Marketplace.
"""
try:
if self.publisher == "":
return IMDS_IMAGE_ORIGIN_CUSTOM
if ComputeInfo.__matcher.is_match(self.publisher, self.offer, self.sku, self.version):
return IMDS_IMAGE_ORIGIN_ENDORSED
else:
return IMDS_IMAGE_ORIGIN_PLATFORM
except Exception as e:
logger.warn("Could not determine the image origin from IMDS: {0}", str(e))
return IMDS_IMAGE_ORIGIN_UNKNOWN
class ImdsClient(object):
def __init__(self, version=APIVERSION):
self._api_version = version
self._headers = {
'User-Agent': restutil.HTTP_USER_AGENT,
'Metadata': True,
}
self._health_headers = {
'User-Agent': restutil.HTTP_USER_AGENT_HEALTH,
'Metadata': True,
}
pass
@property
def compute_url(self):
return BASE_URI.format(IMDS_ENDPOINT, 'compute', self._api_version)
@property
def instance_url(self):
return BASE_URI.format(IMDS_ENDPOINT, '', self._api_version)
def get_compute(self):
"""
Fetch compute information.
:return: instance of a ComputeInfo
:rtype: ComputeInfo
"""
resp = restutil.http_get(self.compute_url, headers=self._headers)
if restutil.request_failed(resp):
raise HttpError("{0} - GET: {1}".format(resp.status, self.compute_url))
data = resp.read()
data = json.loads(ustr(data, encoding="utf-8"))
compute_info = ComputeInfo()
set_properties('compute', compute_info, data)
return compute_info
def validate(self):
"""
Determines whether the metadata instance api returns 200, and the response
is valid: compute should contain location, name, subscription id, and vm size
and network should contain mac address and private ip address.
:return: Tuple<is_healthy:bool, error_response:str>
is_healthy: True when validation succeeds, False otherwise
error_response: validation failure details to assist with debugging
"""
# ensure we get a 200
resp = restutil.http_get(self.instance_url, headers=self._health_headers)
if restutil.request_failed(resp):
return False, "{0}".format(restutil.read_response_error(resp))
# ensure the response is valid json
data = resp.read()
try:
json_data = json.loads(ustr(data, encoding="utf-8"))
except Exception as e:
return False, "JSON parsing failed: {0}".format(ustr(e))
# ensure all expected fields are present and have a value
try:
# TODO: compute fields cannot be verified yet since we need to exclude rdfe vms (#1249)
self.check_field(json_data, 'network')
self.check_field(json_data['network'], 'interface')
self.check_field(json_data['network']['interface'][0], 'macAddress')
self.check_field(json_data['network']['interface'][0], 'ipv4')
self.check_field(json_data['network']['interface'][0]['ipv4'], 'ipAddress')
self.check_field(json_data['network']['interface'][0]['ipv4']['ipAddress'][0], 'privateIpAddress')
except ValueError as v:
return False, ustr(v)
return True, ''
@staticmethod
def check_field(dict_obj, field):
if field not in dict_obj or dict_obj[field] is None:
raise ValueError('Missing field: [{0}]'.format(field))
if len(dict_obj[field]) == 0:
raise ValueError('Empty field: [{0}]'.format(field))
| apache-2.0 | 779,066,349,977,622,700 | 30.363636 | 110 | 0.526737 | false |
noamelf/Open-Knesset | persons/migrations/0005_auto__add_field_person_img_url__add_field_person_phone__add_field_pers.py | 14 | 19706 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Person.img_url'
db.add_column('persons_person', 'img_url',
self.gf('django.db.models.fields.URLField')(default='', max_length=200, blank=True),
keep_default=False)
# Adding field 'Person.phone'
db.add_column('persons_person', 'phone',
self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True),
keep_default=False)
# Adding field 'Person.fax'
db.add_column('persons_person', 'fax',
self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True),
keep_default=False)
# Adding field 'Person.email'
db.add_column('persons_person', 'email',
self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True),
keep_default=False)
# Adding field 'Person.family_status'
db.add_column('persons_person', 'family_status',
self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True),
keep_default=False)
# Adding field 'Person.number_of_children'
db.add_column('persons_person', 'number_of_children',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'Person.date_of_birth'
db.add_column('persons_person', 'date_of_birth',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
# Adding field 'Person.place_of_birth'
db.add_column('persons_person', 'place_of_birth',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Person.date_of_death'
db.add_column('persons_person', 'date_of_death',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
# Adding field 'Person.year_of_aliyah'
db.add_column('persons_person', 'year_of_aliyah',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'Person.place_of_residence'
db.add_column('persons_person', 'place_of_residence',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Person.area_of_residence'
db.add_column('persons_person', 'area_of_residence',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Person.place_of_residence_lat'
db.add_column('persons_person', 'place_of_residence_lat',
self.gf('django.db.models.fields.CharField')(max_length=16, null=True, blank=True),
keep_default=False)
# Adding field 'Person.place_of_residence_lon'
db.add_column('persons_person', 'place_of_residence_lon',
self.gf('django.db.models.fields.CharField')(max_length=16, null=True, blank=True),
keep_default=False)
# Adding field 'Person.residence_centrality'
db.add_column('persons_person', 'residence_centrality',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'Person.residence_economy'
db.add_column('persons_person', 'residence_economy',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'Person.gender'
db.add_column('persons_person', 'gender',
self.gf('django.db.models.fields.CharField')(max_length=1, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Person.img_url'
db.delete_column('persons_person', 'img_url')
# Deleting field 'Person.phone'
db.delete_column('persons_person', 'phone')
# Deleting field 'Person.fax'
db.delete_column('persons_person', 'fax')
# Deleting field 'Person.email'
db.delete_column('persons_person', 'email')
# Deleting field 'Person.family_status'
db.delete_column('persons_person', 'family_status')
# Deleting field 'Person.number_of_children'
db.delete_column('persons_person', 'number_of_children')
# Deleting field 'Person.date_of_birth'
db.delete_column('persons_person', 'date_of_birth')
# Deleting field 'Person.place_of_birth'
db.delete_column('persons_person', 'place_of_birth')
# Deleting field 'Person.date_of_death'
db.delete_column('persons_person', 'date_of_death')
# Deleting field 'Person.year_of_aliyah'
db.delete_column('persons_person', 'year_of_aliyah')
# Deleting field 'Person.place_of_residence'
db.delete_column('persons_person', 'place_of_residence')
# Deleting field 'Person.area_of_residence'
db.delete_column('persons_person', 'area_of_residence')
# Deleting field 'Person.place_of_residence_lat'
db.delete_column('persons_person', 'place_of_residence_lat')
# Deleting field 'Person.place_of_residence_lon'
db.delete_column('persons_person', 'place_of_residence_lon')
# Deleting field 'Person.residence_centrality'
db.delete_column('persons_person', 'residence_centrality')
# Deleting field 'Person.residence_economy'
db.delete_column('persons_person', 'residence_economy')
# Deleting field 'Person.gender'
db.delete_column('persons_person', 'gender')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'mks.member': {
'Meta': {'ordering': "['name']", 'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'average_monthly_committee_presence': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'average_weekly_presence_hours': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'backlinks_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'ordering': "('-number_of_seats',)", 'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'persons.person': {
'Meta': {'ordering': "('name',)", 'object_name': 'Person'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person'", 'null': 'True', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'persons'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['persons.Title']"}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'persons.personalias': {
'Meta': {'object_name': 'PersonAlias'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['persons.Person']"})
},
'persons.processedprotocolpart': {
'Meta': {'object_name': 'ProcessedProtocolPart'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'protocol_part_id': ('django.db.models.fields.IntegerField', [], {})
},
'persons.role': {
'Meta': {'object_name': 'Role'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'roles'", 'to': "orm['persons.Person']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
'persons.title': {
'Meta': {'object_name': 'Title'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'planet.blog': {
'Meta': {'ordering': "('title', 'url')", 'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
}
}
complete_apps = ['persons'] | bsd-3-clause | 8,754,403,834,531,386,000 | 65.130872 | 200 | 0.555973 | false |
JPFrancoia/scikit-learn | sklearn/preprocessing/data.py | 13 | 70436 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# Giorgio Patrini <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import deprecated
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.fixes import bincount
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_* instead of deprecated *data_min*.
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_* instead of deprecated *data_max*.
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_* instead of deprecated *data_range*.
See also
--------
minmax_scale: Equivalent function without the object oriented API.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
@property
@deprecated("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
def data_range(self):
return self.data_range_
@property
@deprecated("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
def data_min(self):
return self.data_min_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* is recommended instead of deprecated *std_*.
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
@property
@deprecated("Attribute ``std_`` will be removed in 0.19. "
"Use ``scale_`` instead")
def std_(self):
return self.scale_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the object oriented API.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
q = np.percentile(X, self.quantile_range, axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
normalize: Equivalent function without the object oriented API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the object oriented API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
categorical_features : "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
sklearn.preprocessing.LabelEncoder : encodes labels with values between 0
and n_classes-1.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause | 1,813,260,841,768,625,000 | 34.973442 | 84 | 0.593049 | false |
bcui6611/healthchecker | Cheetah/Utils/Indenter.py | 17 | 4145 | """
Indentation maker.
@@TR: this code is unsupported and largely undocumented ...
This version is based directly on code by Robert Kuzelj
<[email protected]> and uses his directive syntax. Some classes and
attributes have been renamed. Indentation is output via
$self._CHEETAH__indenter.indent() to prevent '_indenter' being looked up on the
searchList and another one being found. The directive syntax will
soon be changed somewhat.
"""
import re
import sys
def indentize(source):
return IndentProcessor().process(source)
class IndentProcessor(object):
"""Preprocess #indent tags."""
LINE_SEP = '\n'
ARGS = "args"
INDENT_DIR = re.compile(r'[ \t]*#indent[ \t]*(?P<args>.*)')
DIRECTIVE = re.compile(r"[ \t]*#")
WS = "ws"
WHITESPACES = re.compile(r"(?P<ws>[ \t]*)")
INC = "++"
DEC = "--"
SET = "="
CHAR = "char"
ON = "on"
OFF = "off"
PUSH = "push"
POP = "pop"
def process(self, _txt):
result = []
for line in _txt.splitlines():
match = self.INDENT_DIR.match(line)
if match:
#is indention directive
args = match.group(self.ARGS).strip()
if args == self.ON:
line = "#silent $self._CHEETAH__indenter.on()"
elif args == self.OFF:
line = "#silent $self._CHEETAH__indenter.off()"
elif args == self.INC:
line = "#silent $self._CHEETAH__indenter.inc()"
elif args == self.DEC:
line = "#silent $self._CHEETAH__indenter.dec()"
elif args.startswith(self.SET):
level = int(args[1:])
line = "#silent $self._CHEETAH__indenter.setLevel(%(level)d)" % {"level":level}
elif args.startswith('chars'):
self.indentChars = eval(args.split('=')[1])
line = "#silent $self._CHEETAH__indenter.setChars(%(level)d)" % {"level":level}
elif args.startswith(self.PUSH):
line = "#silent $self._CHEETAH__indenter.push()"
elif args.startswith(self.POP):
line = "#silent $self._CHEETAH__indenter.pop()"
else:
match = self.DIRECTIVE.match(line)
if not match:
#is not another directive
match = self.WHITESPACES.match(line)
if match:
size = len(match.group("ws").expandtabs(4))
line = ("${self._CHEETAH__indenter.indent(%(size)d)}" % {"size":size}) + line.lstrip()
else:
line = "${self._CHEETAH__indenter.indent(0)}" + line
result.append(line)
return self.LINE_SEP.join(result)
class Indenter(object):
"""
A class that keeps track of the current indentation level.
.indent() returns the appropriate amount of indentation.
"""
On = 1
Level = 0
Chars = ' '
LevelStack = []
def on(self):
self.On = 1
def off(self):
self.On = 0
def inc(self):
self.Level += 1
def dec(self):
"""decrement can only be applied to values greater zero
values below zero don't make any sense at all!"""
if self.Level > 0:
self.Level -= 1
def push(self):
self.LevelStack.append(self.Level)
def pop(self):
"""the levestack can not become -1. any attempt to do so
sets the level to 0!"""
if len(self.LevelStack) > 0:
self.Level = self.LevelStack.pop()
else:
self.Level = 0
def setLevel(self, _level):
"""the leve can't be less than zero. any attempt to do so
sets the level automatically to zero!"""
if _level < 0:
self.Level = 0
else:
self.Level = _level
def setChar(self, _chars):
self.Chars = _chars
def indent(self, _default=0):
if self.On:
return self.Chars * self.Level
return " " * _default
| apache-2.0 | -313,503,457,145,121,600 | 32.699187 | 110 | 0.525935 | false |
xkmato/rapidpro-tools | fix-contact-names.py | 2 | 1954 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import os
from docopt import docopt
from rapidpro_tools import logger, change_logging_level
from rapidpro_tools.contacts import (export_contact_names_to,
fix_contact_names_from)
help = ("""Usage: update-contacts.py [-v] [-h] -f FILE (export|import)
-h --help Display this help message
-v --verbose Display DEBUG messages
-f --file=<file> File Path to export/import CSV data to/from
This script exports all contact names to a CSV file or import and update"""
""" contact names based on a CSV file""")
def main(arguments):
debug = arguments.get('--verbose') or False
change_logging_level(debug)
logger.info("Starting fix-contacts-names script...{}"
.format(" [DEBUG mode]" if debug else ""))
options = {
'export': arguments.get('export') or False,
'import': arguments.get('import') or False,
'file': arguments.get('--file') or None,
}
if options['export'] + options['import'] != 1:
logger.error("You must specify whether to export or import data")
return 1
if not options['file']:
logger.error("You must specify a file path")
return 1
if options['import'] and not os.path.exists(options['file']):
logger.error("The filepath `{}` does not exist."
.format(options['file']))
return 1
if options['export']:
with open(options['file'], 'w') as fio:
export_contact_names_to(fio)
if options['import']:
with open(options['file'], 'r') as fio:
fix_contact_names_from(fio)
logger.info("-- All done. :)")
if __name__ == '__main__':
main(docopt(help, version=0.1))
| cc0-1.0 | 4,315,070,449,713,544,000 | 29.53125 | 75 | 0.582907 | false |
Russell-IO/ansible | lib/ansible/modules/remote_management/oneview/oneview_san_manager.py | 146 | 7717 | #!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_san_manager
short_description: Manage OneView SAN Manager resources
description:
- Provides an interface to manage SAN Manager resources. Can create, update, or delete.
version_added: "2.4"
requirements:
- hpOneView >= 3.1.1
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
state:
description:
- Indicates the desired state for the Uplink Set resource.
- C(present) ensures data properties are compliant with OneView.
- C(absent) removes the resource from OneView, if it exists.
- C(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent.
default: present
choices: [present, absent, connection_information_set]
data:
description:
- List with SAN Manager properties.
required: true
extends_documentation_fragment:
- oneview
- oneview.validateetag
'''
EXAMPLES = '''
- name: Creates a Device Manager for the Brocade SAN provider with the given hostname and credentials
oneview_san_manager:
config: /etc/oneview/oneview_config.json
state: present
data:
providerDisplayName: Brocade Network Advisor
connectionInfo:
- name: Host
value: 172.18.15.1
- name: Port
value: 5989
- name: Username
value: username
- name: Password
value: password
- name: UseSsl
value: true
delegate_to: localhost
- name: Ensure a Device Manager for the Cisco SAN Provider is present
oneview_san_manager:
config: /etc/oneview/oneview_config.json
state: present
data:
name: 172.18.20.1
providerDisplayName: Cisco
connectionInfo:
- name: Host
value: 172.18.20.1
- name: SnmpPort
value: 161
- name: SnmpUserName
value: admin
- name: SnmpAuthLevel
value: authnopriv
- name: SnmpAuthProtocol
value: sha
- name: SnmpAuthString
value: password
delegate_to: localhost
- name: Sets the SAN Manager connection information
oneview_san_manager:
config: /etc/oneview/oneview_config.json
state: connection_information_set
data:
connectionInfo:
- name: Host
value: '172.18.15.1'
- name: Port
value: '5989'
- name: Username
value: 'username'
- name: Password
value: 'password'
- name: UseSsl
value: true
delegate_to: localhost
- name: Refreshes the SAN Manager
oneview_san_manager:
config: /etc/oneview/oneview_config.json
state: present
data:
name: 172.18.15.1
refreshState: RefreshPending
delegate_to: localhost
- name: Delete the SAN Manager recently created
oneview_san_manager:
config: /etc/oneview/oneview_config.json
state: absent
data:
name: '172.18.15.1'
delegate_to: localhost
'''
RETURN = '''
san_manager:
description: Has the OneView facts about the SAN Manager.
returned: On state 'present'. Can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError
class SanManagerModule(OneViewModuleBase):
MSG_CREATED = 'SAN Manager created successfully.'
MSG_UPDATED = 'SAN Manager updated successfully.'
MSG_DELETED = 'SAN Manager deleted successfully.'
MSG_ALREADY_PRESENT = 'SAN Manager is already present.'
MSG_ALREADY_ABSENT = 'SAN Manager is already absent.'
MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND = "The provider '{0}' was not found."
argument_spec = dict(
state=dict(type='str', default='present', choices=['absent', 'present', 'connection_information_set']),
data=dict(type='dict', required=True)
)
def __init__(self):
super(SanManagerModule, self).__init__(additional_arg_spec=self.argument_spec, validate_etag_support=True)
self.resource_client = self.oneview_client.san_managers
def execute_module(self):
if self.data.get('connectionInfo'):
for connection_hash in self.data.get('connectionInfo'):
if connection_hash.get('name') == 'Host':
resource_name = connection_hash.get('value')
elif self.data.get('name'):
resource_name = self.data.get('name')
else:
msg = 'A "name" or "connectionInfo" must be provided inside the "data" field for this operation. '
msg += 'If a "connectionInfo" is provided, the "Host" name is considered as the "name" for the resource.'
raise OneViewModuleValueError(msg.format())
resource = self.resource_client.get_by_name(resource_name)
if self.state == 'present':
changed, msg, san_manager = self._present(resource)
return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
elif self.state == 'absent':
return self.resource_absent(resource, method='remove')
elif self.state == 'connection_information_set':
changed, msg, san_manager = self._connection_information_set(resource)
return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
def _present(self, resource):
if not resource:
provider_uri = self.data.get('providerUri', self._get_provider_uri_by_display_name(self.data))
return True, self.MSG_CREATED, self.resource_client.add(self.data, provider_uri)
else:
merged_data = resource.copy()
merged_data.update(self.data)
# Remove 'connectionInfo' from comparison, since it is not possible to validate it.
resource.pop('connectionInfo', None)
merged_data.pop('connectionInfo', None)
if self.compare(resource, merged_data):
return False, self.MSG_ALREADY_PRESENT, resource
else:
updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
return True, self.MSG_UPDATED, updated_san_manager
def _connection_information_set(self, resource):
if not resource:
return self._present(resource)
else:
merged_data = resource.copy()
merged_data.update(self.data)
merged_data.pop('refreshState', None)
if not self.data.get('connectionInfo', None):
raise OneViewModuleValueError('A connectionInfo field is required for this operation.')
updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
return True, self.MSG_UPDATED, updated_san_manager
def _get_provider_uri_by_display_name(self, data):
display_name = data.get('providerDisplayName')
provider_uri = self.resource_client.get_provider_uri(display_name)
if not provider_uri:
raise OneViewModuleValueError(self.MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND.format(display_name))
return provider_uri
def main():
SanManagerModule().run()
if __name__ == '__main__':
main()
| gpl-3.0 | -5,382,514,417,054,802,000 | 34.237443 | 137 | 0.640145 | false |
amitgroup/parts-net | scripts/scripy1.py | 1 | 5953 | from __future__ import division, print_function, absolute_import
#from pnet.vzlog import default as vz
import numpy as np
import amitgroup as ag
import itertools as itr
import sys
import os
import pnet
import time
def test(ims, labels, net):
yhat = net.classify(ims)
return yhat == labels
if pnet.parallel.main(__name__):
ag.set_verbose(True)
print("1")
import argparse
parser = argparse.ArgumentParser()
#parser.add_argument('seed', metavar='<seed>', type=int, help='Random seed')
#parser.add_argument('param', metavar='<param>', type=string)
parser.add_argument('model',metavar='<model file>',type=argparse.FileType('rb'), help='Filename of model file')
print("ohhh")
parser.add_argument('data',metavar='<mnist data file>',type=argparse.FileType('rb'),help='Filename of data file')
parser.add_argument('label',metavar='<mnist data file>',type=argparse.FileType('rb'),help='Filename of data file')
parser.add_argument('numOfClassModel',metavar='<numOfClassModel>', type=int, help='num Of Class Model')
args = parser.parse_args()
param = args.model
numOfClassModel = args.numOfClassModel
param = args.data
data = np.load(param)
label = np.load(args.label)
net = pnet.PartsNet.load(args.model)
unsup_training_times = []
sup_training_times = []
testing_times = []
error_rates = []
all_num_parts = []
ims10k = data[:10000]
label10k = np.array(label[:10000])
np.save('a.npy',label10k)
ims2k = data[10000:12000]
label2k = np.array(label[10000:12000])
np.save('b.npy',label2k)
print(ims2k.shape)
digits = range(10)
sup_ims = []
sup_labels = []
# Load supervised training data
for d in digits:
ims0 = ims10k[label10k == d]
sup_ims.append(ims0)
sup_labels.append(d * np.ones(len(ims0), dtype=np.int64))
sup_ims = np.concatenate(sup_ims, axis=0)
sup_labels = np.concatenate(sup_labels, axis=0)
print("=================")
print(sup_ims.shape)
print(sup_labels)
for classifier in 'mixture', 'svm':
for rotspread in [0, 1]:
net.layers[0]._settings['rotation_spreading_radius'] = rotspread
print('Classifier:', classifier, 'Rotational spreading:', rotspread)
if classifier == 'mixture':
cl = pnet.MixtureClassificationLayer(n_components=numOfClassModel, min_prob=1e-5)
elif classifier == 'svm':
cl = pnet.SVMClassificationLayer(C=None)
clnet = pnet.PartsNet([net, cl])
start1 = time.time()
print('Training supervised...')
print(sup_ims.shape)
clnet.train(sup_ims, sup_labels)
print('Done.')
end1 = time.time()
#print("Now testing...")
### Test ######################################################################
corrects = 0
total = 0
if 0:
test_ims, test_labels = mnist_data['test_image'], mnist_data['test_label']
else:
test_ims = ims2k
test_labels = label2k
with gv.Timer("Split to batches"):
ims_batches = np.array_split(test_ims, 10)
labels_batches = np.array_split(test_labels, 10)
def format_error_rate(pr):
return "{:.2f}%".format(100*(1-pr))
#with gv.Timer('Testing'):
start2 = time.time()
args = (tup+(clnet,) for tup in itr.izip(ims_batches, labels_batches))
for i, res in enumerate(pnet.parallel.starmap(test, args)):
corrects += res.sum()
total += res.size
pr = corrects / total
end2 = time.time()
error_rate = 1.0 - pr
num_parts = 0#net.layers[1].num_parts
error_rates.append(error_rate)
print(training_seed, 'error rate', error_rate * 100, 'num parts', num_parts)#, 'num parts 2', net.layers[3].num_parts)
unsup_training_times.append(end0 - start0)
sup_training_times.append(end1 - start1)
testing_times.append(end2 - start2)
#print('times', end0-start0, end1-start1, end2-start2)
all_num_parts.append(num_parts)
#vz.section('MNIST')
#gv.img.save_image(vz.generate_filename(), test_ims[0])
#gv.img.save_image(vz.generate_filename(), test_ims[1])
#gv.img.save_image(vz.generate_filename(), test_ims[2])
# Vz
#net.infoplot(vz)
if 0:
print(r"{ppl} & {depth} & {num_parts} & {unsup_time:.1f} & {test_time:.1f} & ${rate:.2f} \pm {std:.2f}$ \\".format(
ppl=2,
depth=maxdepth,
num_parts=r'${:.0f} \pm {:.0f}$'.format(np.mean(all_num_parts), np.std(all_num_parts)),
unsup_time=np.median(unsup_training_times) / 60,
#sup_time=np.median(sup_training_times),
test_time=np.median(testing_times) / 60,
rate=100*np.mean(error_rates),
std=100*np.std(error_rates)))
print(r"{ppl} {depth} {num_parts} {unsup_time} {test_time} {rate} {std}".format(
ppl=2,
depth=maxdepth,
num_parts=r'${:.0f} \pm {:.0f}$'.format(np.mean(all_num_parts), np.std(all_num_parts)),
unsup_time=np.median(unsup_training_times) / 60,
#sup_time=np.median(sup_training_times),
test_time=np.median(testing_times) / 60,
rate=100*np.mean(error_rates),
std=100*np.std(error_rates)))
#np.savez('gdata2-{}-{}-{}.npz'.format(maxdepth, split_criterion, split_entropy), all_num_parts=all_num_parts, unsup_time=unsup_training_times, test_time=testing_times, rates=error_rates)
print('mean error rate', np.mean(error_rates) * 100)
#net.save(args.model)
| bsd-3-clause | 2,311,712,381,770,744,000 | 34.434524 | 195 | 0.566101 | false |
scripni/rethinkdb | external/v8_3.30.33.16/build/gyp/pylib/gyp/MSVSVersion.py | 486 | 15539 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name == '2013' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 non-Express has a x64-x86 cross that we want to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValue(key, value):
"""Use reg.exe to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _RegistryKeyExists(key):
"""Use reg.exe to see if a key exists.
Args:
key: The registry key to check.
Return:
True if the key exists
"""
if not _RegistryQuery(key):
return False
return True
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (11)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto'):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('10.0', '12.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| agpl-3.0 | -1,196,246,266,234,046,200 | 36.992665 | 80 | 0.555184 | false |
l1ll1/cvl-fabric-launcher | pyinstaller-2.1/PyInstaller/lib/unittest2/case.py | 11 | 43241 | """Test case implementation"""
import sys
import difflib
import pprint
import re
import unittest
import warnings
if sys.version_info[:2] == (2,3):
from sets import Set as set
from sets import ImmutableSet as frozenset
from unittest2 import result
from unittest2.util import\
safe_repr, safe_str, strclass,\
unorderable_list_difference
from unittest2.compatibility import wraps
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestResult.skip() or one of the skipping decorators
instead of raising this directly.
"""
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
# can't use super because Python 2.4 exceptions are old style
Exception.__init__(self)
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
skip_wrapper = wraps(test_item)(skip_wrapper)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(func):
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
wrapper = wraps(func)(wrapper)
return wrapper
class _AssertRaisesContext(object):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, test_case, expected_regexp=None):
self.expected = expected
self.failureException = test_case.failureException
self.expected_regexp = expected_regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"%s not raised" % (exc_name,))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
class _TypeEqualityDict(object):
def __init__(self, testcase):
self.testcase = testcase
self._store = {}
def __setitem__(self, key, value):
self._store[key] = value
def __getitem__(self, key):
value = self._store[key]
if isinstance(value, basestring):
return getattr(self.testcase, value)
return value
def get(self, key, default=None):
if key in self._store:
return self[key]
return default
class TestCase(unittest.TestCase):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
"""
# This attribute determines which exception will be raised when
# the instance's assertion methods fail; test methods raising this
# exception will be deemed to have 'failed' rather than 'errored'
failureException = AssertionError
# This attribute sets the maximum length of a diff in failure messages
# by assert methods using difflib. It is looked up as an instance attribute
# so can be configured by individual tests if required.
maxDiff = 80*8
# This attribute determines whether long messages (including repr of
# objects used in assert methods) will be printed on failure in *addition*
# to any explicit message passed.
longMessage = True
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._resultForDoCleanups = None
try:
testMethod = getattr(self, methodName)
except AttributeError:
raise ValueError("no such test method in %s: %s" % \
(self.__class__, methodName))
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = _TypeEqualityDict(self)
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
self.addTypeEqualityFunc(unicode, 'assertMultiLineEqual')
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
setUpClass = classmethod(setUpClass)
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
tearDownClass = classmethod(tearDownClass)
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("Use of a TestResult without an addSkip method is deprecated",
DeprecationWarning, 2)
result.addSuccess(self)
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
self._resultForDoCleanups = result
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
success = False
try:
self.setUp()
except SkipTest, e:
self._addSkip(result, str(e))
except Exception:
result.addError(self, sys.exc_info())
else:
try:
testMethod()
except self.failureException:
result.addFailure(self, sys.exc_info())
except _ExpectedFailure, e:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, e.exc_info)
else:
warnings.warn("Use of a TestResult without an addExpectedFailure method is deprecated",
DeprecationWarning)
result.addSuccess(self)
except _UnexpectedSuccess:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("Use of a TestResult without an addUnexpectedSuccess method is deprecated",
DeprecationWarning)
result.addFailure(self, sys.exc_info())
except SkipTest, e:
self._addSkip(result, str(e))
except Exception:
result.addError(self, sys.exc_info())
else:
success = True
try:
self.tearDown()
except Exception:
result.addError(self, sys.exc_info())
success = False
cleanUpSuccess = self.doCleanups()
success = success and cleanUpSuccess
if success:
result.addSuccess(self)
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
result = self._resultForDoCleanups
ok = True
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
try:
function(*args, **kwargs)
except Exception:
ok = False
result.addError(self, sys.exc_info())
return ok
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"Fail the test if the expression is true."
if expr:
msg = self._formatMessage(msg, "%s is not False" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Fail the test unless the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not True" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_str(standardMsg), safe_str(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
if callableObj is None:
return _AssertRaisesContext(excClass, self)
try:
callableObj(*args, **kwargs)
except excClass:
return
if hasattr(excClass,'__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise self.failureException, "%s not raised" % excName
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '=='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
# Synonyms for assertion methods
# The plurals are undocumented. Keep them that way to discourage use.
# Do not add more. Do not remove.
# Going through a deprecation cycle on these would annoy many people.
assertEquals = assertEqual
assertNotEquals = assertNotEqual
assertAlmostEquals = assertAlmostEqual
assertNotAlmostEquals = assertNotAlmostEqual
assert_ = assertTrue
# These fail* assertion method names are pending deprecation and will
# be a DeprecationWarning in 3.2; http://bugs.python.org/issue2578
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
('Please use %s instead.' % original_func.__name__),
PendingDeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
failUnlessEqual = _deprecate(assertEqual)
failIfEqual = _deprecate(assertNotEqual)
failUnlessAlmostEqual = _deprecate(assertAlmostEqual)
failIfAlmostEqual = _deprecate(assertNotAlmostEqual)
failUnless = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
def assertSequenceEqual(self, seq1, seq2,
msg=None, seq_type=None, max_diff=80*8):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
max_diff: Maximum size off the diff, larger diffs are not shown
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = repr(seq1)
seq2_repr = repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in xrange(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support
different types of sets, and is optimized for sets specifically
(parameters must support a difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1), safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assert_(isinstance(d1, dict), 'First argument is not a dictionary')
self.assert_(isinstance(d2, dict), 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, expected, actual, msg=None):
"""Checks whether actual is a superset of expected."""
missing = []
mismatched = []
for key, value in expected.iteritems():
if key not in actual:
missing.append(key)
else:
try:
are_equal = (value == actual[key])
except UnicodeDecodeError:
are_equal = False
if not are_equal:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(actual[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join([safe_repr(m) for m in
missing])
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
"""An unordered sequence specific comparison. It asserts that
expected_seq and actual_seq contain the same elements. It is
the equivalent of::
self.assertEqual(sorted(expected_seq), sorted(actual_seq))
Raises with an error message listing which elements of expected_seq
are missing from actual_seq and vice versa if any.
Asserts that each element has the same count in both sequences.
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
try:
expected = expected_seq[:]
expected.sort()
actual = actual_seq[:]
actual.sort()
except TypeError:
# Unsortable items (example: set(), complex(), ...)
expected = list(expected_seq)
actual = list(actual_seq)
missing, unexpected = unorderable_list_difference(
expected, actual, ignore_duplicate=False
)
else:
return self.assertSequenceEqual(expected, actual, msg=msg)
errors = []
if missing:
errors.append('Expected, but missing:\n %s' %
safe_repr(missing))
if unexpected:
errors.append('Unexpected, but present:\n %s' %
safe_repr(unexpected))
if errors:
standardMsg = '\n'.join(errors)
self.fail(self._formatMessage(msg, standardMsg))
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assert_(isinstance(first, basestring), (
'First argument is not a string'))
self.assert_(isinstance(second, basestring), (
'Second argument is not a string'))
if first != second:
standardMsg = '%s != %s' % (safe_repr(first, True), safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(first.splitlines(True),
second.splitlines(True)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegexp(self, expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regexp.
Args:
expected_exception: Exception class expected to be raised.
expected_regexp: Regexp (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.
"""
if callable_obj is None:
return _AssertRaisesContext(expected_exception, self, expected_regexp)
try:
callable_obj(*args, **kwargs)
except expected_exception, exc_value:
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
else:
if hasattr(expected_exception, '__name__'):
excName = expected_exception.__name__
else:
excName = str(expected_exception)
raise self.failureException, "%s not raised" % excName
def assertRegexpMatches(self, text, expected_regexp, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(text):
msg = msg or "Regexp didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regexp.pattern, text)
raise self.failureException(msg)
def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regexp, basestring):
unexpected_regexp = re.compile(unexpected_regexp)
match = unexpected_regexp.search(text)
if match:
msg = msg or "Regexp matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regexp.pattern,
text)
raise self.failureException(msg)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s testFunc=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
| gpl-3.0 | 3,306,664,263,377,698,000 | 38.489498 | 114 | 0.570015 | false |
achang97/YouTunes | lib/python2.7/site-packages/werkzeug/contrib/fixers.py | 104 | 10179 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.fixers
~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module includes various helpers that fix bugs in web servers. They may
be necessary for some versions of a buggy web server but not others. We try
to stay updated with the status of the bugs as good as possible but you have
to make sure whether they fix the problem you encounter.
If you notice bugs in webservers not fixed in this module consider
contributing a patch.
:copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
from werkzeug.http import parse_options_header, parse_cache_control_header, \
parse_set_header
from werkzeug.useragents import UserAgent
from werkzeug.datastructures import Headers, ResponseCacheControl
class CGIRootFix(object):
"""Wrap the application in this middleware if you are using FastCGI or CGI
and you have problems with your app root being set to the cgi script's path
instead of the path users are going to visit
.. versionchanged:: 0.9
Added `app_root` parameter and renamed from `LighttpdCGIRootFix`.
:param app: the WSGI application
:param app_root: Defaulting to ``'/'``, you can set this to something else
if your app is mounted somewhere else.
"""
def __init__(self, app, app_root='/'):
self.app = app
self.app_root = app_root
def __call__(self, environ, start_response):
# only set PATH_INFO for older versions of Lighty or if no
# server software is provided. That's because the test was
# added in newer Werkzeug versions and we don't want to break
# people's code if they are using this fixer in a test that
# does not set the SERVER_SOFTWARE key.
if 'SERVER_SOFTWARE' not in environ or \
environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28':
environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \
environ.get('PATH_INFO', '')
environ['SCRIPT_NAME'] = self.app_root.strip('/')
return self.app(environ, start_response)
# backwards compatibility
LighttpdCGIRootFix = CGIRootFix
class PathInfoFromRequestUriFix(object):
"""On windows environment variables are limited to the system charset
which makes it impossible to store the `PATH_INFO` variable in the
environment without loss of information on some systems.
This is for example a problem for CGI scripts on a Windows Apache.
This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
`REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the
fix can only be applied if the webserver supports either of these
variables.
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL':
if key not in environ:
continue
request_uri = unquote(environ[key])
script_name = unquote(environ.get('SCRIPT_NAME', ''))
if request_uri.startswith(script_name):
environ['PATH_INFO'] = request_uri[len(script_name):] \
.split('?', 1)[0]
break
return self.app(environ, start_response)
class ProxyFix(object):
"""This middleware can be applied to add HTTP proxy support to an
application that was not designed with HTTP proxies in mind. It
sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers. While
Werkzeug-based applications already can use
:py:func:`werkzeug.wsgi.get_host` to retrieve the current host even if
behind proxy setups, this middleware can be used for applications which
access the WSGI environment directly.
If you have more than one proxy server in front of your app, set
`num_proxies` accordingly.
Do not use this middleware in non-proxy setups for security reasons.
The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in
the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and
`werkzeug.proxy_fix.orig_http_host`.
:param app: the WSGI application
:param num_proxies: the number of proxy servers in front of the app.
"""
def __init__(self, app, num_proxies=1):
self.app = app
self.num_proxies = num_proxies
def get_remote_addr(self, forwarded_for):
"""Selects the new remote addr from the given list of ips in
X-Forwarded-For. By default it picks the one that the `num_proxies`
proxy server provides. Before 0.9 it would always pick the first.
.. versionadded:: 0.8
"""
if len(forwarded_for) >= self.num_proxies:
return forwarded_for[-self.num_proxies]
def __call__(self, environ, start_response):
getter = environ.get
forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '')
forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',')
forwarded_host = getter('HTTP_X_FORWARDED_HOST', '')
environ.update({
'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'),
'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'),
'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST')
})
forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x]
remote_addr = self.get_remote_addr(forwarded_for)
if remote_addr is not None:
environ['REMOTE_ADDR'] = remote_addr
if forwarded_host:
environ['HTTP_HOST'] = forwarded_host
if forwarded_proto:
environ['wsgi.url_scheme'] = forwarded_proto
return self.app(environ, start_response)
class HeaderRewriterFix(object):
"""This middleware can remove response headers and add others. This
is for example useful to remove the `Date` header from responses if you
are using a server that adds that header, no matter if it's present or
not or to add `X-Powered-By` headers::
app = HeaderRewriterFix(app, remove_headers=['Date'],
add_headers=[('X-Powered-By', 'WSGI')])
:param app: the WSGI application
:param remove_headers: a sequence of header keys that should be
removed.
:param add_headers: a sequence of ``(key, value)`` tuples that should
be added.
"""
def __init__(self, app, remove_headers=None, add_headers=None):
self.app = app
self.remove_headers = set(x.lower() for x in (remove_headers or ()))
self.add_headers = list(add_headers or ())
def __call__(self, environ, start_response):
def rewriting_start_response(status, headers, exc_info=None):
new_headers = []
for key, value in headers:
if key.lower() not in self.remove_headers:
new_headers.append((key, value))
new_headers += self.add_headers
return start_response(status, new_headers, exc_info)
return self.app(environ, rewriting_start_response)
class InternetExplorerFix(object):
"""This middleware fixes a couple of bugs with Microsoft Internet
Explorer. Currently the following fixes are applied:
- removing of `Vary` headers for unsupported mimetypes which
causes troubles with caching. Can be disabled by passing
``fix_vary=False`` to the constructor.
see: http://support.microsoft.com/kb/824847/en-us
- removes offending headers to work around caching bugs in
Internet Explorer if `Content-Disposition` is set. Can be
disabled by passing ``fix_attach=False`` to the constructor.
If it does not detect affected Internet Explorer versions it won't touch
the request / response.
"""
# This code was inspired by Django fixers for the same bugs. The
# fix_vary and fix_attach fixers were originally implemented in Django
# by Michael Axiak and is available as part of the Django project:
# http://code.djangoproject.com/ticket/4148
def __init__(self, app, fix_vary=True, fix_attach=True):
self.app = app
self.fix_vary = fix_vary
self.fix_attach = fix_attach
def fix_headers(self, environ, headers, status=None):
if self.fix_vary:
header = headers.get('content-type', '')
mimetype, options = parse_options_header(header)
if mimetype not in ('text/html', 'text/plain', 'text/sgml'):
headers.pop('vary', None)
if self.fix_attach and 'content-disposition' in headers:
pragma = parse_set_header(headers.get('pragma', ''))
pragma.discard('no-cache')
header = pragma.to_header()
if not header:
headers.pop('pragma', '')
else:
headers['Pragma'] = header
header = headers.get('cache-control', '')
if header:
cc = parse_cache_control_header(header,
cls=ResponseCacheControl)
cc.no_cache = None
cc.no_store = False
header = cc.to_header()
if not header:
headers.pop('cache-control', '')
else:
headers['Cache-Control'] = header
def run_fixed(self, environ, start_response):
def fixing_start_response(status, headers, exc_info=None):
headers = Headers(headers)
self.fix_headers(environ, headers, status)
return start_response(status, headers.to_wsgi_list(), exc_info)
return self.app(environ, fixing_start_response)
def __call__(self, environ, start_response):
ua = UserAgent(environ)
if ua.browser != 'msie':
return self.app(environ, start_response)
return self.run_fixed(environ, start_response)
| mit | -4,742,932,228,699,254,000 | 39.074803 | 82 | 0.630219 | false |
OptiPop/external_chromium_org | tools/telemetry/telemetry/core/browser_credentials_unittest.py | 47 | 2272 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import tempfile
import unittest
from telemetry.core import browser_credentials
SIMPLE_CREDENTIALS_STRING = """
{
"google": {
"username": "example",
"password": "asdf"
}
}
"""
class BackendStub(object):
def __init__(self, credentials_type):
self.login_needed_called = None
self.login_no_longer_needed_called = None
self.credentials_type = credentials_type
def LoginNeeded(self, config, _, tab):
self.login_needed_called = (config, tab)
return True
def LoginNoLongerNeeded(self, tab):
self.login_no_longer_needed_called = (tab, )
class TestBrowserCredentials(unittest.TestCase):
def testCredentialsInfrastructure(self):
google_backend = BackendStub("google")
othersite_backend = BackendStub("othersite")
browser_cred = browser_credentials.BrowserCredentials(
[google_backend,
othersite_backend])
try:
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(SIMPLE_CREDENTIALS_STRING)
browser_cred.credentials_path = f.name
# Should true because it has a password and a backend.
self.assertTrue(browser_cred.CanLogin('google'))
# Should be false succeed because it has no password.
self.assertFalse(browser_cred.CanLogin('othersite'))
# Should fail because it has no backend.
self.assertRaises(
Exception,
lambda: browser_cred.CanLogin('foobar'))
tab = {}
ret = browser_cred.LoginNeeded(tab, 'google')
self.assertTrue(ret)
self.assertTrue(google_backend.login_needed_called is not None)
self.assertEqual(tab, google_backend.login_needed_called[0])
self.assertEqual("example",
google_backend.login_needed_called[1]["username"])
self.assertEqual("asdf",
google_backend.login_needed_called[1]["password"])
browser_cred.LoginNoLongerNeeded(tab, 'google')
self.assertTrue(google_backend.login_no_longer_needed_called is not None)
self.assertEqual(tab, google_backend.login_no_longer_needed_called[0])
finally:
os.remove(f.name)
| bsd-3-clause | 6,395,106,982,241,786,000 | 30.555556 | 79 | 0.68618 | false |
sbidoul/buildbot | worker/buildbot_worker/compat.py | 9 | 2329 | # coding=utf-8
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Helpers for handling compatibility differences
between Python 2 and Python 3.
"""
from __future__ import absolute_import
from __future__ import print_function
from future.utils import text_type
if str != bytes:
# On Python 3 and higher, str and bytes
# are not equivalent. We must use StringIO for
# doing io on native strings.
from io import StringIO as NativeStringIO
else:
# On Python 2 and older, str and bytes
# are equivalent. We must use BytesIO for
# doing io on native strings.
from io import BytesIO as NativeStringIO
def bytes2NativeString(x, encoding='utf-8'):
"""
Convert C{bytes} to a native C{str}.
On Python 3 and higher, str and bytes
are not equivalent. In this case, decode
the bytes, and return a native string.
On Python 2 and lower, str and bytes
are equivalent. In this case, just
just return the native string.
@param x: a string of type C{bytes}
@param encoding: an optional codec, default: 'utf-8'
@return: a string of type C{str}
"""
if isinstance(x, bytes) and str != bytes:
return x.decode(encoding)
return x
def unicode2bytes(x, encoding='utf-8'):
"""
Convert a unicode string to C{bytes}.
@param x: a unicode string, of type C{unicode} on Python 2,
or C{str} on Python 3.
@param encoding: an optional codec, default: 'utf-8'
@return: a string of type C{bytes}
"""
if isinstance(x, text_type):
x = x.encode(encoding)
return x
__all__ = [
"NativeStringIO",
"bytes2NativeString",
"unicode2bytes"
]
| gpl-2.0 | -2,601,795,805,233,596,400 | 29.246753 | 79 | 0.691284 | false |
incaser/odoo-odoo | addons/procurement_jit/procurement_jit.py | 244 | 1543 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2013 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class procurement_order(osv.osv):
_inherit = "procurement.order"
def create(self, cr, uid, vals, context=None):
context = context or {}
procurement_id = super(procurement_order, self).create(cr, uid, vals, context=context)
if not context.get('procurement_autorun_defer'):
self.run(cr, uid, [procurement_id], context=context)
self.check(cr, uid, [procurement_id], context=context)
return procurement_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,846,236,737,409,919,200 | 41.861111 | 94 | 0.625405 | false |
ProjectSWGCore/NGECore2 | scripts/mobiles/generic/faction/imperial/hardened_swamp_trooper_25.py | 2 | 1496 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from resources.datatables import FactionStatus
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('crackdown_swamp_trooper_hard')
mobileTemplate.setLevel(25)
mobileTemplate.setDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("imperial")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(False)
mobileTemplate.setFaction("imperial")
mobileTemplate.setFactionStatus(FactionStatus.Combatant)
templates = Vector()
templates.add('object/mobile/shared_dressed_swamp_trooper.iff')
templates.add('object/mobile/shared_dressed_swamp_trooper_m.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_e11.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('hardened_swamp_troop_25', mobileTemplate)
return | lgpl-3.0 | -2,915,669,621,269,030,400 | 33.813953 | 126 | 0.832219 | false |
ActiveState/code | recipes/Python/576531_Circle/recipe-576531.py | 1 | 15215 | #On the name of ALLAH and may the blessing and peace of Allah
#be upon the Messenger of Allah Mohamed Salla Allahu Aliahi Wassalam.
#Author :Fouad Teniou
#Date : 08/10/08
#Version : 2.4
""" Class of an equation of a circle of the form Ax^2 + Ay^2 + Dx + Ey + F = 0 (A !=0)
it represents a circle or a point or has no graph , depending of the radius value. And a class
of an equation for the circle of radius r and centred at point (x0,y0). """
import math
class Circle(object):
""" Class that represent an equation of a circle
with A,D,E,F constants properties """
def __init__(self, Avalue,Dvalue,Evalue,Fvalue):
""" Circle construction takes A,D,E,F Constants """
self.__A = float(Avalue)
self.__D = float(Dvalue)
self.__E = float(Evalue)
self.__F = float(Fvalue)
self._b = chr(253)
self._a = self._checkSign(self.__A)
self._d= self._checkSign(self.__D)
self._e = self._checkSign(self.__E)
self._f = self._checkSign(self.__F)
self._g = ((self.__D/self.__A)/2)
self._g1= self.__D/2
self._h =((self.__E/self.__A)/2)
self._h1 = self.__E/2
self._i = self._checkSign(self._g)
self._j = self._checkSign(self._h)
self._k = (-self.__F/self.__A + self._g**2 +self._h**2)
self._k1= (-self.__F + self._g1**2 +self._h1**2)
self._l = "%2.2f" % math.sqrt(abs(self._k))
self._l1 = "%2.2f" % math.sqrt(abs(self._k1))
self._m = "(x%s%s)%s+(y%s%s)%s = %s" % \
(self._i,self._g,self._b,self._j,self._h,self._b,self._k)
self._m1 = "(x%s%s)%s+(y%s%s)%s = %s" % \
(self._i,self._g1,self._b,self._j,self._h1,self._b,self._k1)
self._n = "(%s,%s)" % (-self._g,-self._h)
self._n1 = "(%s,%s)" % (-self._g1,-self._h1)
def __str__(self):
""" String representation of the circle equation,
standard form , centre and radius """
try:
math.sqrt(self._k)
#Circle raises zero degenerate case
assert math.sqrt(self._k) != 0,"The graph is the single point %s" % \
Circle.centre(self)
if self.__A == 0:
return "\n<Equation of a circle : x%s + y%s %s %sx %s %sy %s %s = 0 \
\n\n%s %35s %25s \n\n%s %22s %24s\n" %\
(self._b,self._b,self._d,int(self.D),self._e, \
int(self.E),self._f,int(self.F),
'Standard form','Centre(x0,y0)','Radius r' \
self._m1,Circle.centre(self),Circle.radius(self))
else:
return "\n<Equation of a circle : %sx%s + %sy%s %s %sx %s %sy %s %s = 0 \
\n\n%s %35s %25s \n\n%s %22s %24s\n" %\
(int(self.A)self._b,int(self.A),self._b,self._d,int(self.D),self._e, \
int(self.E),self._f,int(self.F),
'Standard form', 'Centre(x0,y0)','Radius r' \
self._m,Circle.centre(self),Circle.radius(self))
#Circle raises Negative number degenerate case
except ValueError:
raise ValueError,\
" r%s < 0 : Degenerate case has no graph" % self._b
def getA(self):
""" Get method for A attribute """
if self.__A != 0:
return self.__A
else:
raise ValueError,\
" A value should be different than zero "
def setA(self,value):
""" Set method for A attribute """
self.__A = value
def delA(self):
""" Delete method for A attribute """
del self.__A
#Create A property
A = property(getA,setA,delA,"A constant")
def getD(self):
""" Get method for D attribute """
return self.__D
def setD(self,value):
""" Set method for D attribute """
self.__D = value
def delD(self):
""" Delete method for D attribute """
del self.__D
#Create D property
D = property(getD,setD,delD,"D constant")
def getE(self):
""" Get method for E attribute """
return self.__E
def setE(self,value):
""" Set method for E attribute """
self.__E = value
def delE(self):
""" Delete method for E attribute """
del self.__E
#Create E property
E = property(getE,setE,delE,"E constant")
def getF(self):
""" Get method for F attribute """
return self.__F
def setF(self,value):
""" Set method for F attribute """
self.__F = value
def delF(self):
""" Delete method for F attribute """
del self.__F
#Create F property
F = property(getF,setF,delF,"F constant")
def _checkSign(self,value):
""" Utility method to check the values’ signs and return a sign string """
if value >= 0:
return "+"
else:
return ""
def radius(self):
""" Compute radius of a circle """
if self.__A == 1:
return self._l1
else:
return self._l
def centre(self):
""" Compute centre(x0,y0) of a circle """
if self.__A == 1:
return self._n1
else:
return self._n
class Equation(Circle):
"""Class that represent a radius and the centre of a circle """
def __init__(self,x,y,radius):
"""Equation construction takes centre(xValue,yValue
and radius"""
self.__x = float(x)
self.__y = float(y)
self.__radius = float(radius)
self._o = chr(253)
self._p = self.__radius**2
self._q = self._checkSign(-self.__x)
self._r = self._checkSign(-self.__y)
self._s = "(x%s%s)%s + (y%s%s)%s = %s " % \
(self._q,-self.__x,self._o,self._r,-self.__y,self._o,self._p)
self._t = self.__x**2 + self.__y**2 -self._p
self._u = self._checkSign(self._t)
self._v = "x%s + y%s %s %sx %s %sy %s %s = 0 " % \
(self._o,self._o,self._q,-self.__x*2,self._r,-self.__y*2,self._u,self._t)
def __str__(self):
""" String representation of the circle equation, standard form ,centre and radius """
#Equation raises radius value < 0
assert self.__radius > 0, "<Radius value should be greater than zero"
return ( "\n<Equation for the circle of radius (%s)\
centred at (%s,%s) is : \n\n%s < -- > %s" ) % \
(self.__radius,self.__x,self.__y,self._s,self._v)
if __name__ == "__main__":
circle1 = Circle(16,40,16,-7)
print circle1
#Though students might use only values of radius and circle
print radius.circle1()
print centre.circle1()
circle2 = Circle(2,24,0,-81)
print circle2
del circle2.A
circle2.A = 1
print circle2
equation = Equation(2,5,3)
print equation
for doc in (Circle.A,Circle.D,Circle.E,Circle.F):
print doc.__doc__,doc.fget.func_name,doc.fset.func_name,doc.fdel.func_name
########################################################################################
#Version : Python 3.2
#import math
#class Circle(object):
# """ Class that represent an equation of a circle
# with A,D,E,F constants properties"""
#
# def __init__(self,Avalue,Dvalue,Evalue,Fvalue):
# """ Circle constructor takes A,D,F,E constants """
#
# self.__A = float(Avalue)
# self.__D = float(Dvalue)
# self.__E = float(Evalue)
# self.__F = float(Fvalue)
#
# self._b = chr(178)
# self._a = self._checkSign(self.__A)
# self._d = self._checkSign(self.__D)
# self._e = self._checkSign(self.__E)
# self._f = self._checkSign(self.__F)
# self._g = ((self.__D/self.__A)/2)
# self._g1 = self.D/2
# self._h = ((self.__E/self.__A)/2)
# self._h1 = self.E/2
# self._i = self._checkSign(self._g)
# self._j = self._checkSign(self._h)
# self._k = (-self.__F/self.__A +self._g**2 + self._h**2)
# self._k1= (-self.__F +self._g1**2 + self._h1**2)
# self._l = "%2.2f" % math.sqrt(abs(self._k))
# self._l1= "%2.2f" % math.sqrt(abs(self._k1))
# self._m = "(x%s%s)%s+(y%s%s)%s = %s" % \
# (self._i,self._g,self._b,self._j,self._h,self._b,self._k)
# self._m1 ="(x%s%s)%s+(y%s%s)%s = %s" % \
# (self._i,self._g1,self._b,self._j,self._h1,self._b,self._k1)
# self._n = "(%s,%s)" % (-self._g,-self._h)
# self._n1= "(%s,%s)" % (-self._g1,-self._h1)
#
#
# def squared(self):
# self._w =(-self.__F/self.__A +((self.__D/self.__A)/2)**2 + ((self.__E/self.__A)/2)**2)
# return self._w
# def standardForm(self):
# return "(x%s%s)%s+(y%s%s)%s = %s" % \
# (self._checkSign(((self.__D/self.__A)/2)),((self.__D/self.__A)/2),chr(178),self._checkSign(((self.__E/self.__A)/2)),((self.__E/self.__A)/2),chr(178),(-self.__F/self.__A +((self.__D/self.__A)/2)**2 + ((self.__E/self.__A)/2)**2))
#
# def __str__(self):
# """ String representation of the circle equation,
# standard form, centre and radius"""
#
# try:
# math.sqrt(Circle.squared(self))
#
# #Circle raises zero degenerate case
# assert math.sqrt(Circle.squared(self)) != 0,"The graph is the single point %s" % \
# Circle.centre(self)
# if self.__A == 1:
#
# return "\n<Equation of a circle : x%s + y%s %s %sx %s %sy %s %s = 0 \
# \n\n%s %35s %25s \n\n%s %22s %24s\n" %\
# (self._b,self._b,self._d,int(self.D),self._e,\
# int(self.E),self._f,int(self.F),
# "Standard form","Center(x0,y0)","Radius r",\
# self._m1,Circle.centre(self),Circle.radius(self))
# else:
# return "\n<Equation of a circle : %sx%s + %sy%s %s %sx %s %sy %s %s = 0 \
# \n\n%s %35s %25s \n\n%s %22s %24s\n" %\
# (int(self.A),self._b,int(self.A),self._b,self._d,int(self.D),self._e,\
# int(self.E),self._f,int(self.F),
# "Standard form","Center(x0,y0)","Radius r",\
# Circle.standardForm(self),Circle.centre(self),Circle.radius(self))
#
# #Circle raises Negative number degenerate case
# except ValueError:
# raise ValueError("r%s < 0 : Degenerate case has no graph" % self._b)
#
# def getA(self):
# """ Get method for A attribute """
# if self.__A !=0:
# return self.__A
# else:
# raise ValueError("A value should be differtent than zero")
#
# def setA(self,value):
# """ Set method for A attribute """
#
# self.__A = value
#
# def delA(self):
# """Delete method for A attrobute"""
#
# del self.__A
#
# #Create a property
# A = property(getA,setA,delA,"A constant")
#
# def getD(self):
# """ Get method for D attribute """
#
# return self.__D
#
# def setD(self,value):
# """ Set method for D attribute """
#
# self.__D = value
#
# def delD(self):
# """Delete method for D attrobute"""
# del self.__D
#
# #Create a property
# D = property(getD,setD,delD,"D constant")
# def getE(self):
# """ Get method for E attribute """
# return self.__E
#
# def setE(self,value):
# """ Set method for E attribute """
#
# self.__E = value
#
# def delE(self):
# """Delete method for E attrobute"""
#
# del self.__E
#
# #Create a property
# E = property(getE,setE,delE,"E constant")
#
# def getF(self):
# """ Get method for F attribute """
#
# return self.__F
#
# def setF(self,value):
# """ Set method for F attribute """
#
# self.__F = value
#
# def delF(self):
# """Delete method for F attrobute"""
#
# del self.__F
#
# #Create a property
# F = property(getF,setF,delF,"F constant")
#
# def _checkSign(self,value):
# """ Utility method to check the values's sign
# and return a sign string"""
#
# if value >= 0:
# return "+"
# else :
# return ""
#
# def radius(self):
# """ Computes radius of a circle """
# if self.__A ==1:
# return self._l1
# else:
# return "%2.2f" % math.sqrt(abs(Circle.squared(self)))
#
# def centre(self):
# """ Computes centre(x0,y0) of a circle """
# if self.__A == 1:
# return self._n1
# else:
# return "(%s,%s)" % (-((self.__D/self.__A)/2),-((self.__E/self.__A)/2))
#
#
#
#class Equation(Circle):
# """ class that represent a radius and the centre of a circle """
#
# def __init__(self,x,y,radius):
# """ Equation construction takes centre(xValue,yValue)
# and radius """
#
# self.__x = float(x)
# self.__y = float(y)
# self.__radius = float(radius)
#
# self._o = chr(178)
# self._p = self.__radius**2
# self._q = self._checkSign(-self.__x)
# self._r = self._checkSign(-self.__y)
# self._s = "(x%s%s)%s+(y%s%s)%s = %s" % \
# (self._q,-self.__x,self._o,self._r,-self.__y,self._o,self._p)
# self._t = self.__x**2 + self.__y**2 - self._p
# self._u = self._checkSign(self._t)
# self._v = "x%s + y%s %s%sx %s%sy %s%s = 0" % \
# (self._o,self._o,self._q,-self.__x*2,self._r,-self.__y*2,self._u,self._t)
#
# def __str__(self):
# """ String representation of the circle equation, standard form,
# centre and radius"""
#
# #Equation raises radius value < 0
# assert self.__radius > 0, "<radius value should be greater than zero"
#
# return ("\n<Equation for the circle of radius (%s)\
# centred at(%s,%s) is :\n\n%s <--> %s") %\
# (self.__radius,self.__x,self.__y,self._s,self._v )
#
#
#if __name__ == "__main__":
# circle1 = Circle(10,40,16,-7)
# print(circle1)
#
# print(circle1.radius())
# print(circle1.centre())
# circle1.delA
# circle1.A=1
# print(circle1)
# circle3 = Circle(5,24,0,-81)
# print(circle3)
#
# circle3.E =80
# print(circle3)
#
# equation = Equation(2,5,3)
# print(equation)
#
#
# for doc in (Circle.A,Circle.D,Circle.E,Circle.F):
# print(doc.__doc__,"=",doc.fget.__name__,doc.fset.__name__,doc.fdel.__name__)
#######################################################################################
#<Equation of a circle : 10x² + 10y² + 40x + 16y -7 = 0
#Standard form Center(x0,y0) Radius r
#(x+2.0)²+(y+0.8)² = 5.34 (-2.0,-0.8) 2.31
#2.31
#(-2.0,-0.8)
#<Equation of a circle : x² + y² + 40x + 16y -7 = 0
#Standard form Center(x0,y0) Radius r
#(x+20.0)²+(y+8.0)² = 471.0 (-20.0,-8.0) 21.70
#<Equation of a circle : 5x² + 5y² + 24x + 0y -81 = 0
#Standard form Center(x0,y0) Radius r
#(x+2.4)²+(y+0.0)² = 21.96 (-2.4,-0.0) 4.69
#<Equation of a circle : 5x² + 5y² + 24x + 80y -81 = 0
#Standard form Center(x0,y0) Radius r
#(x+2.4)²+(y+8.0)² = 85.96 (-2.4,-8.0) 9.27
#<Equation for the circle of radius (3.0) centred at(2.0,5.0) is :
#(x-2.0)²+(y-5.0)² = 9.0 <--> x² + y² -4.0x -10.0y +20.0 = 0
#A constant = getA setA delA
#D constant = getD setD delD
#E constant = getE setE delE
#F constant = getF setF delF
| mit | 2,432,161,598,260,453,000 | 29.325349 | 246 | 0.506747 | false |
jdramani/servo | python/mach/mach/registrar.py | 46 | 3774 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, unicode_literals
from .base import MachError
INVALID_COMMAND_CONTEXT = r'''
It looks like you tried to run a mach command from an invalid context. The %s
command failed to meet the following conditions: %s
Run |mach help| to show a list of all commands available to the current context.
'''.lstrip()
class MachRegistrar(object):
"""Container for mach command and config providers."""
def __init__(self):
self.command_handlers = {}
self.commands_by_category = {}
self.settings_providers = set()
self.categories = {}
self.require_conditions = False
def register_command_handler(self, handler):
name = handler.name
if not handler.category:
raise MachError('Cannot register a mach command without a '
'category: %s' % name)
if handler.category not in self.categories:
raise MachError('Cannot register a command to an undefined '
'category: %s -> %s' % (name, handler.category))
self.command_handlers[name] = handler
self.commands_by_category[handler.category].add(name)
def register_settings_provider(self, cls):
self.settings_providers.add(cls)
def register_category(self, name, title, description, priority=50):
self.categories[name] = (title, description, priority)
self.commands_by_category[name] = set()
@classmethod
def _condition_failed_message(cls, name, conditions):
msg = ['\n']
for c in conditions:
part = [' %s' % c.__name__]
if c.__doc__ is not None:
part.append(c.__doc__)
msg.append(' - '.join(part))
return INVALID_COMMAND_CONTEXT % (name, '\n'.join(msg))
def _run_command_handler(self, handler, context=None, debug_command=False, **kwargs):
cls = handler.cls
if handler.pass_context and not context:
raise Exception('mach command class requires context.')
if handler.pass_context:
instance = cls(context)
else:
instance = cls()
if handler.conditions:
fail_conditions = []
for c in handler.conditions:
if not c(instance):
fail_conditions.append(c)
if fail_conditions:
print(self._condition_failed_message(handler.name, fail_conditions))
return 1
fn = getattr(instance, handler.method)
if debug_command:
import pdb
result = pdb.runcall(fn, **kwargs)
else:
result = fn(**kwargs)
result = result or 0
assert isinstance(result, (int, long))
return result
def dispatch(self, name, context=None, argv=None, **kwargs):
"""Dispatch/run a command.
Commands can use this to call other commands.
"""
# TODO handler.subcommand_handlers are ignored
handler = self.command_handlers[name]
if handler.parser:
parser = handler.parser
# save and restore existing defaults so **kwargs don't persist across
# subsequent invocations of Registrar.dispatch()
old_defaults = parser._defaults.copy()
parser.set_defaults(**kwargs)
kwargs, _ = parser.parse_known_args(argv or [])
kwargs = vars(kwargs)
parser._defaults = old_defaults
return self._run_command_handler(handler, context=context, **kwargs)
Registrar = MachRegistrar()
| mpl-2.0 | 3,775,675,039,502,286,000 | 32.105263 | 89 | 0.606518 | false |
jm-begon/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause | -7,326,693,292,645,972,000 | 40.737589 | 79 | 0.66644 | false |
ruchikd/Algorithms | Python/CreateLLFromBST/CreateLLFromBST.py | 1 | 1529 | #**************************************************************
# 9
# / \
# 5 11
# / \ / \
# 2 7 10 15
# / / \
# 1 4 6
# /
# 3
#**************************************************************
class Node:
val, left, right, parent = None, None, None, None
def __init__ (self, val):
self.val = val
self.left = None
self.right = None
def createTreeAsAbove():
root = Node(9)
root.left = Node(5)
root.left.left = Node(2)
root.left.left.left = Node(1)
root.left.right = Node(7)
root.left.right.left = Node(4)
root.left.right.left.left = Node(3)
root.left.right.right = Node(6)
root.right = Node(11)
root.right.left = Node(10)
root.right.right = Node (15)
return root
def getHeight(node):
if node is None:
return 0
lHeight = getHeight(node.left)
rHeight = getHeight(node.right)
if(lHeight > rHeight):
return lHeight + 1
else:
return rHeight + 1
def getList(node, height, list):
if node is None:
return
if height == 0:
list.append(node.val)
else:
getList(node.left, height-1, list)
getList(node.right, height-1, list)
return list
def levelOrderTraversal(node):
height = getHeight(node)
list = []
for x in range(height):
getList(node, x, list)
print list
def main():
root = createTreeAsAbove()
levelOrderTraversal(root)
if __name__ == '__main__':
main() | gpl-3.0 | 4,327,146,627,324,118,500 | 18.367089 | 63 | 0.498365 | false |
sccblom/vercors | deps/dafny/1.9.6/windows/z3/bin/z3util.py | 4 | 11933 | ############################################
# Copyright (c) 2012 Microsoft Corporation
#
# Z3 Python interface
#
# Authors: Leonardo de Moura (leonardo)
# ThanhVu (Vu) Nguyen <[email protected]>
############################################
"""
Usage:
import common_z3 as CM_Z3
"""
from z3 import *
def vset(seq, idfun=None, as_list=True):
# This functions preserves the order of arguments while removing duplicates.
# This function is from https://code.google.com/p/common-python-vu/source/browse/vu_common.py
# (Thanhu's personal code). It has been copied here to avoid a dependency on vu_common.py.
"""
order preserving
>>> vset([[11,2],1, [10,['9',1]],2, 1, [11,2],[3,3],[10,99],1,[10,['9',1]]],idfun=repr)
[[11, 2], 1, [10, ['9', 1]], 2, [3, 3], [10, 99]]
"""
def _uniq_normal(seq):
d_ = {}
for s in seq:
if s not in d_:
d_[s] = None
yield s
def _uniq_idfun(seq,idfun):
d_ = {}
for s in seq:
h_ = idfun(s)
if h_ not in d_:
d_[h_] = None
yield s
if idfun is None:
res = _uniq_normal(seq)
else:
res = _uniq_idfun(seq,idfun)
return list(res) if as_list else res
def get_z3_version(as_str=False):
major = ctypes.c_uint(0)
minor = ctypes.c_uint(0)
build = ctypes.c_uint(0)
rev = ctypes.c_uint(0)
Z3_get_version(major,minor,build,rev)
rs = map(int,(major.value,minor.value,build.value,rev.value))
if as_str:
return "{}.{}.{}.{}".format(*rs)
else:
return rs
def ehash(v):
"""
Returns a 'stronger' hash value than the default hash() method.
The result from hash() is not enough to distinguish between 2
z3 expressions in some cases.
Note: the following doctests will fail with Python 2.x as the
default formatting doesn't match that of 3.x.
>>> x1 = Bool('x'); x2 = Bool('x'); x3 = Int('x')
>>> print(x1.hash(),x2.hash(),x3.hash()) #BAD: all same hash values
783810685 783810685 783810685
>>> print(ehash(x1), ehash(x2), ehash(x3))
x_783810685_1 x_783810685_1 x_783810685_2
"""
if __debug__:
assert is_expr(v)
return "{}_{}_{}".format(str(v),v.hash(),v.sort_kind())
"""
In Z3, variables are called *uninterpreted* consts and
variables are *interpreted* consts.
"""
def is_expr_var(v):
"""
EXAMPLES:
>>> is_expr_var(Int('7'))
True
>>> is_expr_var(IntVal('7'))
False
>>> is_expr_var(Bool('y'))
True
>>> is_expr_var(Int('x') + 7 == Int('y'))
False
>>> LOnOff, (On,Off) = EnumSort("LOnOff",['On','Off'])
>>> Block,Reset,SafetyInjection=Consts("Block Reset SafetyInjection",LOnOff)
>>> is_expr_var(LOnOff)
False
>>> is_expr_var(On)
False
>>> is_expr_var(Block)
True
>>> is_expr_var(SafetyInjection)
True
"""
return is_const(v) and v.decl().kind()==Z3_OP_UNINTERPRETED
def is_expr_val(v):
"""
EXAMPLES:
>>> is_expr_val(Int('7'))
False
>>> is_expr_val(IntVal('7'))
True
>>> is_expr_val(Bool('y'))
False
>>> is_expr_val(Int('x') + 7 == Int('y'))
False
>>> LOnOff, (On,Off) = EnumSort("LOnOff",['On','Off'])
>>> Block,Reset,SafetyInjection=Consts("Block Reset SafetyInjection",LOnOff)
>>> is_expr_val(LOnOff)
False
>>> is_expr_val(On)
True
>>> is_expr_val(Block)
False
>>> is_expr_val(SafetyInjection)
False
"""
return is_const(v) and v.decl().kind()!=Z3_OP_UNINTERPRETED
def get_vars(f,rs=[]):
"""
>>> x,y = Ints('x y')
>>> a,b = Bools('a b')
>>> get_vars(Implies(And(x+y==0,x*2==10),Or(a,Implies(a,b==False))))
[x, y, a, b]
"""
if __debug__:
assert is_expr(f)
if is_const(f):
if is_expr_val(f):
return rs
else: #variable
return vset(rs + [f],str)
else:
for f_ in f.children():
rs = get_vars(f_,rs)
return vset(rs,str)
def mk_var(name,vsort):
if vsort.kind() == Z3_INT_SORT:
v = Int(name)
elif vsort.kind() == Z3_REAL_SORT:
v = Real(name)
elif vsort.kind() == Z3_BOOL_SORT:
v = Bool(name)
elif vsort.kind() == Z3_DATATYPE_SORT:
v = Const(name,vsort)
else:
assert False, 'Cannot handle this sort (s: %sid: %d)'\
%(vsort,vsort.kind())
return v
def prove(claim,assume=None,verbose=0):
"""
>>> r,m = prove(BoolVal(True),verbose=0); r,model_str(m,as_str=False)
(True, None)
#infinite counter example when proving contradiction
>>> r,m = prove(BoolVal(False)); r,model_str(m,as_str=False)
(False, [])
>>> x,y,z=Bools('x y z')
>>> r,m = prove(And(x,Not(x))); r,model_str(m,as_str=True)
(False, '[]')
>>> r,m = prove(True,assume=And(x,Not(x)),verbose=0)
Traceback (most recent call last):
...
AssertionError: Assumption is alway False!
>>> r,m = prove(Implies(x,x),assume=y,verbose=2); r,model_str(m,as_str=False)
assume:
y
claim:
Implies(x, x)
to_prove:
Implies(y, Implies(x, x))
(True, None)
>>> r,m = prove(And(x,True),assume=y,verbose=0); r,model_str(m,as_str=False)
(False, [(x, False), (y, True)])
>>> r,m = prove(And(x,y),assume=y,verbose=0)
>>> print(r)
False
>>> print(model_str(m,as_str=True))
x = False
y = True
>>> a,b = Ints('a b')
>>> r,m = prove(a**b == b**a,assume=None,verbose=0)
E: cannot solve !
>>> r is None and m is None
True
"""
if __debug__:
assert not assume or is_expr(assume)
to_prove = claim
if assume:
if __debug__:
is_proved,_ = prove(Not(assume))
def _f():
emsg = "Assumption is alway False!"
if verbose >= 2:
emsg = "{}\n{}".format(assume,emsg)
return emsg
assert is_proved==False, _f()
to_prove = Implies(assume,to_prove)
if verbose >= 2:
print('assume: ')
print(assume)
print('claim: ')
print(claim)
print('to_prove: ')
print(to_prove)
f = Not(to_prove)
models = get_models(f,k=1)
if models is None: #unknown
print('E: cannot solve !')
return None, None
elif models == False: #unsat
return True,None
else: #sat
if __debug__:
assert isinstance(models,list)
if models:
return False, models[0] #the first counterexample
else:
return False, [] #infinite counterexample,models
def get_models(f,k):
"""
Returns the first k models satisfiying f.
If f is not satisfiable, returns False.
If f cannot be solved, returns None
If f is satisfiable, returns the first k models
Note that if f is a tautology, e.g.\ True, then the result is []
Based on http://stackoverflow.com/questions/11867611/z3py-checking-all-solutions-for-equation
EXAMPLES:
>>> x, y = Ints('x y')
>>> len(get_models(And(0<=x,x <= 4),k=11))
5
>>> get_models(And(0<=x**y,x <= 1),k=2) is None
True
>>> get_models(And(0<=x,x <= -1),k=2)
False
>>> len(get_models(x+y==7,5))
5
>>> len(get_models(And(x<=5,x>=1),7))
5
>>> get_models(And(x<=0,x>=5),7)
False
>>> x = Bool('x')
>>> get_models(And(x,Not(x)),k=1)
False
>>> get_models(Implies(x,x),k=1)
[]
>>> get_models(BoolVal(True),k=1)
[]
"""
if __debug__:
assert is_expr(f)
assert k>=1
s = Solver()
s.add(f)
models = []
i = 0
while s.check() == sat and i < k:
i = i + 1
m = s.model()
if not m: #if m == []
break
models.append(m)
#create new constraint to block the current model
block = Not(And([v() == m[v] for v in m]))
s.add(block)
if s.check() == unknown:
return None
elif s.check() == unsat and i==0:
return False
else:
return models
def is_tautology(claim,verbose=0):
"""
>>> is_tautology(Implies(Bool('x'),Bool('x')))
True
>>> is_tautology(Implies(Bool('x'),Bool('y')))
False
>>> is_tautology(BoolVal(True))
True
>>> is_tautology(BoolVal(False))
False
"""
return prove(claim=claim,assume=None,verbose=verbose)[0]
def is_contradiction(claim,verbose=0):
"""
>>> x,y=Bools('x y')
>>> is_contradiction(BoolVal(False))
True
>>> is_contradiction(BoolVal(True))
False
>>> is_contradiction(x)
False
>>> is_contradiction(Implies(x,y))
False
>>> is_contradiction(Implies(x,x))
False
>>> is_contradiction(And(x,Not(x)))
True
"""
return prove(claim=Not(claim),assume=None,verbose=verbose)[0]
def exact_one_model(f):
"""
return True if f has exactly 1 model, False otherwise.
EXAMPLES:
>>> x, y = Ints('x y')
>>> exact_one_model(And(0<=x**y,x <= 0))
False
>>> exact_one_model(And(0<=x,x <= 0))
True
>>> exact_one_model(And(0<=x,x <= 1))
False
>>> exact_one_model(And(0<=x,x <= -1))
False
"""
models = get_models(f,k=2)
if isinstance(models,list):
return len(models)==1
else:
return False
def myBinOp(op,*L):
"""
>>> myAnd(*[Bool('x'),Bool('y')])
And(x, y)
>>> myAnd(*[Bool('x'),None])
x
>>> myAnd(*[Bool('x')])
x
>>> myAnd(*[])
>>> myAnd(Bool('x'),Bool('y'))
And(x, y)
>>> myAnd(*[Bool('x'),Bool('y')])
And(x, y)
>>> myAnd([Bool('x'),Bool('y')])
And(x, y)
>>> myAnd((Bool('x'),Bool('y')))
And(x, y)
>>> myAnd(*[Bool('x'),Bool('y'),True])
Traceback (most recent call last):
...
AssertionError
"""
if __debug__:
assert op == Z3_OP_OR or op == Z3_OP_AND or op == Z3_OP_IMPLIES
if len(L)==1 and (isinstance(L[0],list) or isinstance(L[0],tuple)):
L = L[0]
if __debug__:
assert all(not isinstance(l,bool) for l in L)
L = [l for l in L if is_expr(l)]
if L:
if len(L)==1:
return L[0]
else:
if op == Z3_OP_OR:
return Or(L)
elif op == Z3_OP_AND:
return And(L)
else: #IMPLIES
return Implies(L[0],L[1])
else:
return None
def myAnd(*L): return myBinOp(Z3_OP_AND,*L)
def myOr(*L): return myBinOp(Z3_OP_OR,*L)
def myImplies(a,b):return myBinOp(Z3_OP_IMPLIES,[a,b])
Iff = lambda f: And(Implies(f[0],f[1]),Implies(f[1],f[0]))
def model_str(m,as_str=True):
"""
Returned a 'sorted' model (so that it's easier to see)
The model is sorted by its key,
e.g. if the model is y = 3 , x = 10, then the result is
x = 10, y = 3
EXAMPLES:
see doctest exampels from function prove()
"""
if __debug__:
assert m is None or m == [] or isinstance(m,ModelRef)
if m :
vs = [(v,m[v]) for v in m]
vs = sorted(vs,key=lambda a,_: str(a))
if as_str:
return '\n'.join(['{} = {}'.format(k,v) for (k,v) in vs])
else:
return vs
else:
return str(m) if as_str else m
| mpl-2.0 | -56,820,460,629,510,800 | 21.490157 | 97 | 0.492919 | false |
jvkops/django | django/db/models/signals.py | 399 | 2734 | from django.apps import apps
from django.dispatch import Signal
from django.utils import six
class_prepared = Signal(providing_args=["class"])
class ModelSignal(Signal):
"""
Signal subclass that allows the sender to be lazily specified as a string
of the `app_label.ModelName` form.
"""
def __init__(self, *args, **kwargs):
super(ModelSignal, self).__init__(*args, **kwargs)
self.unresolved_references = {}
class_prepared.connect(self._resolve_references)
def _resolve_references(self, sender, **kwargs):
opts = sender._meta
reference = (opts.app_label, opts.object_name)
try:
receivers = self.unresolved_references.pop(reference)
except KeyError:
pass
else:
for receiver, weak, dispatch_uid in receivers:
super(ModelSignal, self).connect(
receiver, sender=sender, weak=weak, dispatch_uid=dispatch_uid
)
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
if isinstance(sender, six.string_types):
try:
app_label, model_name = sender.split('.')
except ValueError:
raise ValueError(
"Specified sender must either be a model or a "
"model name of the 'app_label.ModelName' form."
)
try:
sender = apps.get_registered_model(app_label, model_name)
except LookupError:
ref = (app_label, model_name)
refs = self.unresolved_references.setdefault(ref, [])
refs.append((receiver, weak, dispatch_uid))
return
super(ModelSignal, self).connect(
receiver, sender=sender, weak=weak, dispatch_uid=dispatch_uid
)
pre_init = ModelSignal(providing_args=["instance", "args", "kwargs"], use_caching=True)
post_init = ModelSignal(providing_args=["instance"], use_caching=True)
pre_save = ModelSignal(providing_args=["instance", "raw", "using", "update_fields"],
use_caching=True)
post_save = ModelSignal(providing_args=["instance", "raw", "created", "using", "update_fields"], use_caching=True)
pre_delete = ModelSignal(providing_args=["instance", "using"], use_caching=True)
post_delete = ModelSignal(providing_args=["instance", "using"], use_caching=True)
m2m_changed = ModelSignal(
providing_args=["action", "instance", "reverse", "model", "pk_set", "using"],
use_caching=True,
)
pre_migrate = Signal(providing_args=["app_config", "verbosity", "interactive", "using"])
post_migrate = Signal(providing_args=["app_config", "verbosity", "interactive", "using"])
| bsd-3-clause | -4,204,846,581,283,849,700 | 38.623188 | 114 | 0.613753 | false |
kevin8909/xjerp | openerp/addons/account_anglo_saxon/invoice.py | 12 | 12123 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C)
# 2004-2010 Tiny SPRL (<http://tiny.be>).
# 2009-2010 Veritos (http://veritos.nl).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
def move_line_get(self, cr, uid, invoice_id, context=None):
res = super(account_invoice_line,self).move_line_get(cr, uid, invoice_id, context=context)
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context)
company_currency = inv.company_id.currency_id.id
def get_price(cr, uid, inv, company_currency,i_line):
cur_obj = self.pool.get('res.currency')
if inv.currency_id.id != company_currency:
price = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, i_line.product_id.standard_price * i_line.quantity, context={'date': inv.date_invoice})
else:
price = i_line.product_id.standard_price * i_line.quantity
return price
if inv.type in ('out_invoice','out_refund'):
for i_line in inv.invoice_line:
if i_line.product_id and i_line.product_id.valuation == 'real_time':
if inv.type == 'out_invoice':
# debit account dacc will be the output account
# first check the product, if empty check the category
dacc = i_line.product_id.property_stock_account_output and i_line.product_id.property_stock_account_output.id
if not dacc:
dacc = i_line.product_id.categ_id.property_stock_account_output_categ and i_line.product_id.categ_id.property_stock_account_output_categ.id
else:
# = out_refund
# debit account dacc will be the input account
# first check the product, if empty check the category
dacc = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id
if not dacc:
dacc = i_line.product_id.categ_id.property_stock_account_input_categ and i_line.product_id.categ_id.property_stock_account_input_categ.id
# in both cases the credit account cacc will be the expense account
# first check the product, if empty check the category
cacc = i_line.product_id.property_account_expense and i_line.product_id.property_account_expense.id
if not cacc:
cacc = i_line.product_id.categ_id.property_account_expense_categ and i_line.product_id.categ_id.property_account_expense_categ.id
if dacc and cacc:
res.append({
'type':'src',
'name': i_line.name[:64],
'price_unit':i_line.product_id.standard_price,
'quantity':i_line.quantity,
'price':get_price(cr, uid, inv, company_currency, i_line),
'account_id':dacc,
'product_id':i_line.product_id.id,
'uos_id':i_line.uos_id.id,
'account_analytic_id': False,
'taxes':i_line.invoice_line_tax_id,
})
res.append({
'type':'src',
'name': i_line.name[:64],
'price_unit':i_line.product_id.standard_price,
'quantity':i_line.quantity,
'price': -1 * get_price(cr, uid, inv, company_currency, i_line),
'account_id':cacc,
'product_id':i_line.product_id.id,
'uos_id':i_line.uos_id.id,
'account_analytic_id': False,
'taxes':i_line.invoice_line_tax_id,
})
elif inv.type in ('in_invoice','in_refund'):
for i_line in inv.invoice_line:
if i_line.product_id and i_line.product_id.valuation == 'real_time':
if i_line.product_id.type != 'service':
# get the price difference account at the product
acc = i_line.product_id.property_account_creditor_price_difference and i_line.product_id.property_account_creditor_price_difference.id
if not acc:
# if not found on the product get the price difference account at the category
acc = i_line.product_id.categ_id.property_account_creditor_price_difference_categ and i_line.product_id.categ_id.property_account_creditor_price_difference_categ.id
a = None
if inv.type == 'in_invoice':
# oa will be the stock input account
# first check the product, if empty check the category
oa = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id
if not oa:
oa = i_line.product_id.categ_id.property_stock_account_input_categ and i_line.product_id.categ_id.property_stock_account_input_categ.id
else:
# = in_refund
# oa will be the stock output account
# first check the product, if empty check the category
oa = i_line.product_id.property_stock_account_output and i_line.product_id.property_stock_account_output.id
if not oa:
oa = i_line.product_id.categ_id.property_stock_account_output_categ and i_line.product_id.categ_id.property_stock_account_output_categ.id
if oa:
# get the fiscal position
fpos = i_line.invoice_id.fiscal_position or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)
diff_res = []
# calculate and write down the possible price difference between invoice price and product price
for line in res:
if a == line['account_id'] and i_line.product_id.id == line['product_id']:
uom = i_line.product_id.uos_id or i_line.product_id.uom_id
standard_price = self.pool.get('product.uom')._compute_price(cr, uid, uom.id, i_line.product_id.standard_price, i_line.uos_id.id)
if standard_price != i_line.price_unit and line['price_unit'] == i_line.price_unit and acc:
price_diff = i_line.price_unit - standard_price
line.update({'price':standard_price * line['quantity']})
diff_res.append({
'type':'src',
'name': i_line.name[:64],
'price_unit':price_diff,
'quantity':line['quantity'],
'price': price_diff * line['quantity'],
'account_id':acc,
'product_id':line['product_id'],
'uos_id':line['uos_id'],
'account_analytic_id':line['account_analytic_id'],
'taxes':line.get('taxes',[]),
})
res += diff_res
return res
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None):
fiscal_pool = self.pool.get('account.fiscal.position')
res = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom_id, qty, name, type, partner_id, fposition_id, price_unit, currency_id, context, company_id)
if not product:
return res
if type in ('in_invoice','in_refund'):
product_obj = self.pool.get('product.product').browse(cr, uid, product, context=context)
if type == 'in_invoice':
oa = product_obj.property_stock_account_input and product_obj.property_stock_account_input.id
if not oa:
oa = product_obj.categ_id.property_stock_account_input_categ and product_obj.categ_id.property_stock_account_input_categ.id
else:
oa = product_obj.property_stock_account_output and product_obj.property_stock_account_output.id
if not oa:
oa = product_obj.categ_id.property_stock_account_output_categ and product_obj.categ_id.property_stock_account_output_categ.id
if oa:
fpos = fposition_id and fiscal_pool.browse(cr, uid, fposition_id, context=context) or False
a = fiscal_pool.map_account(cr, uid, fpos, oa)
res['value'].update({'account_id':a})
return res
class account_invoice(osv.osv):
_inherit = "account.invoice"
def _prepare_refund(self, cr, uid, invoice, date=None, period_id=None, description=None, journal_id=None, context=None):
invoice_data = super(account_invoice, self)._prepare_refund(cr, uid, invoice, date, period_id,
description, journal_id, context=context)
if invoice.type == 'in_invoice':
fiscal_position = self.pool.get('account.fiscal.position')
for _, _, line_dict in invoice_data['invoice_line']:
if line_dict.get('product_id'):
product = self.pool.get('product.product').browse(cr, uid, line_dict['product_id'], context=context)
counterpart_acct_id = product.property_stock_account_output and \
product.property_stock_account_output.id
if not counterpart_acct_id:
counterpart_acct_id = product.categ_id.property_stock_account_output_categ and \
product.categ_id.property_stock_account_output_categ.id
if counterpart_acct_id:
fpos = invoice.fiscal_position or False
line_dict['account_id'] = fiscal_position.map_account(cr, uid,
fpos,
counterpart_acct_id)
return invoice_data
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,345,047,987,591,609,000 | 65.245902 | 205 | 0.522065 | false |
HiSPARC/station-software | user/python/Lib/lib-tk/test/test_tkinter/test_images.py | 40 | 13352 | import unittest
import Tkinter as tkinter
import ttk
import test.test_support as support
from test_ttk.support import AbstractTkTest, requires_tcl
support.requires('gui')
class MiscTest(AbstractTkTest, unittest.TestCase):
def test_image_types(self):
image_types = self.root.image_types()
self.assertIsInstance(image_types, tuple)
self.assertIn('photo', image_types)
self.assertIn('bitmap', image_types)
def test_image_names(self):
image_names = self.root.image_names()
self.assertIsInstance(image_names, tuple)
class BitmapImageTest(AbstractTkTest, unittest.TestCase):
@classmethod
def setUpClass(cls):
AbstractTkTest.setUpClass.__func__(cls)
cls.testfile = support.findfile('python.xbm', subdir='imghdrdata')
def test_create_from_file(self):
image = tkinter.BitmapImage('::img::test', master=self.root,
foreground='yellow', background='blue',
file=self.testfile)
self.assertEqual(str(image), '::img::test')
self.assertEqual(image.type(), 'bitmap')
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
self.assertIn('::img::test', self.root.image_names())
del image
self.assertNotIn('::img::test', self.root.image_names())
def test_create_from_data(self):
with open(self.testfile, 'rb') as f:
data = f.read()
image = tkinter.BitmapImage('::img::test', master=self.root,
foreground='yellow', background='blue',
data=data)
self.assertEqual(str(image), '::img::test')
self.assertEqual(image.type(), 'bitmap')
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
self.assertIn('::img::test', self.root.image_names())
del image
self.assertNotIn('::img::test', self.root.image_names())
def assertEqualStrList(self, actual, expected):
self.assertIsInstance(actual, str)
self.assertEqual(self.root.splitlist(actual), expected)
def test_configure_data(self):
image = tkinter.BitmapImage('::img::test', master=self.root)
self.assertEqual(image['data'], '-data {} {} {} {}')
with open(self.testfile, 'rb') as f:
data = f.read()
image.configure(data=data)
self.assertEqualStrList(image['data'],
('-data', '', '', '', data))
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
self.assertEqual(image['maskdata'], '-maskdata {} {} {} {}')
image.configure(maskdata=data)
self.assertEqualStrList(image['maskdata'],
('-maskdata', '', '', '', data))
def test_configure_file(self):
image = tkinter.BitmapImage('::img::test', master=self.root)
self.assertEqual(image['file'], '-file {} {} {} {}')
image.configure(file=self.testfile)
self.assertEqualStrList(image['file'],
('-file', '', '', '',self.testfile))
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
self.assertEqual(image['maskfile'], '-maskfile {} {} {} {}')
image.configure(maskfile=self.testfile)
self.assertEqualStrList(image['maskfile'],
('-maskfile', '', '', '', self.testfile))
def test_configure_background(self):
image = tkinter.BitmapImage('::img::test', master=self.root)
self.assertEqual(image['background'], '-background {} {} {} {}')
image.configure(background='blue')
self.assertEqual(image['background'], '-background {} {} {} blue')
def test_configure_foreground(self):
image = tkinter.BitmapImage('::img::test', master=self.root)
self.assertEqual(image['foreground'],
'-foreground {} {} #000000 #000000')
image.configure(foreground='yellow')
self.assertEqual(image['foreground'],
'-foreground {} {} #000000 yellow')
class PhotoImageTest(AbstractTkTest, unittest.TestCase):
@classmethod
def setUpClass(cls):
AbstractTkTest.setUpClass.__func__(cls)
cls.testfile = support.findfile('python.gif', subdir='imghdrdata')
def create(self):
return tkinter.PhotoImage('::img::test', master=self.root,
file=self.testfile)
def colorlist(self, *args):
if tkinter.TkVersion >= 8.6 and self.wantobjects:
return args
else:
return tkinter._join(args)
def check_create_from_file(self, ext):
testfile = support.findfile('python.' + ext, subdir='imghdrdata')
image = tkinter.PhotoImage('::img::test', master=self.root,
file=testfile)
self.assertEqual(str(image), '::img::test')
self.assertEqual(image.type(), 'photo')
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
self.assertEqual(image['data'], '')
self.assertEqual(image['file'], testfile)
self.assertIn('::img::test', self.root.image_names())
del image
self.assertNotIn('::img::test', self.root.image_names())
def check_create_from_data(self, ext):
testfile = support.findfile('python.' + ext, subdir='imghdrdata')
with open(testfile, 'rb') as f:
data = f.read()
image = tkinter.PhotoImage('::img::test', master=self.root,
data=data)
self.assertEqual(str(image), '::img::test')
self.assertEqual(image.type(), 'photo')
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
self.assertEqual(image['data'], data if self.wantobjects
else data.decode('latin1'))
self.assertEqual(image['file'], '')
self.assertIn('::img::test', self.root.image_names())
del image
self.assertNotIn('::img::test', self.root.image_names())
def test_create_from_ppm_file(self):
self.check_create_from_file('ppm')
def test_create_from_ppm_data(self):
self.check_create_from_data('ppm')
def test_create_from_pgm_file(self):
self.check_create_from_file('pgm')
def test_create_from_pgm_data(self):
self.check_create_from_data('pgm')
def test_create_from_gif_file(self):
self.check_create_from_file('gif')
def test_create_from_gif_data(self):
self.check_create_from_data('gif')
@requires_tcl(8, 6)
def test_create_from_png_file(self):
self.check_create_from_file('png')
@requires_tcl(8, 6)
def test_create_from_png_data(self):
self.check_create_from_data('png')
def test_configure_data(self):
image = tkinter.PhotoImage('::img::test', master=self.root)
self.assertEqual(image['data'], '')
with open(self.testfile, 'rb') as f:
data = f.read()
image.configure(data=data)
self.assertEqual(image['data'], data if self.wantobjects
else data.decode('latin1'))
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
def test_configure_format(self):
image = tkinter.PhotoImage('::img::test', master=self.root)
self.assertEqual(image['format'], '')
image.configure(file=self.testfile, format='gif')
self.assertEqual(image['format'], ('gif',) if self.wantobjects
else 'gif')
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
def test_configure_file(self):
image = tkinter.PhotoImage('::img::test', master=self.root)
self.assertEqual(image['file'], '')
image.configure(file=self.testfile)
self.assertEqual(image['file'], self.testfile)
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
def test_configure_gamma(self):
image = tkinter.PhotoImage('::img::test', master=self.root)
self.assertEqual(image['gamma'], '1.0')
image.configure(gamma=2.0)
self.assertEqual(image['gamma'], '2.0')
def test_configure_width_height(self):
image = tkinter.PhotoImage('::img::test', master=self.root)
self.assertEqual(image['width'], '0')
self.assertEqual(image['height'], '0')
image.configure(width=20)
image.configure(height=10)
self.assertEqual(image['width'], '20')
self.assertEqual(image['height'], '10')
self.assertEqual(image.width(), 20)
self.assertEqual(image.height(), 10)
def test_configure_palette(self):
image = tkinter.PhotoImage('::img::test', master=self.root)
self.assertEqual(image['palette'], '')
image.configure(palette=256)
self.assertEqual(image['palette'], '256')
image.configure(palette='3/4/2')
self.assertEqual(image['palette'], '3/4/2')
def test_blank(self):
image = self.create()
image.blank()
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
self.assertEqual(image.get(4, 6), self.colorlist(0, 0, 0))
def test_copy(self):
image = self.create()
image2 = image.copy()
self.assertEqual(image2.width(), 16)
self.assertEqual(image2.height(), 16)
self.assertEqual(image.get(4, 6), image.get(4, 6))
def test_subsample(self):
image = self.create()
image2 = image.subsample(2, 3)
self.assertEqual(image2.width(), 8)
self.assertEqual(image2.height(), 6)
self.assertEqual(image2.get(2, 2), image.get(4, 6))
image2 = image.subsample(2)
self.assertEqual(image2.width(), 8)
self.assertEqual(image2.height(), 8)
self.assertEqual(image2.get(2, 3), image.get(4, 6))
def test_zoom(self):
image = self.create()
image2 = image.zoom(2, 3)
self.assertEqual(image2.width(), 32)
self.assertEqual(image2.height(), 48)
self.assertEqual(image2.get(8, 18), image.get(4, 6))
self.assertEqual(image2.get(9, 20), image.get(4, 6))
image2 = image.zoom(2)
self.assertEqual(image2.width(), 32)
self.assertEqual(image2.height(), 32)
self.assertEqual(image2.get(8, 12), image.get(4, 6))
self.assertEqual(image2.get(9, 13), image.get(4, 6))
def test_put(self):
image = self.create()
image.put('{red green} {blue yellow}', to=(4, 6))
self.assertEqual(image.get(4, 6), self.colorlist(255, 0, 0))
self.assertEqual(image.get(5, 6),
self.colorlist(0, 128 if tkinter.TkVersion >= 8.6
else 255, 0))
self.assertEqual(image.get(4, 7), self.colorlist(0, 0, 255))
self.assertEqual(image.get(5, 7), self.colorlist(255, 255, 0))
image.put((('#f00', '#00ff00'), ('#000000fff', '#ffffffff0000')))
self.assertEqual(image.get(0, 0), self.colorlist(255, 0, 0))
self.assertEqual(image.get(1, 0), self.colorlist(0, 255, 0))
self.assertEqual(image.get(0, 1), self.colorlist(0, 0, 255))
self.assertEqual(image.get(1, 1), self.colorlist(255, 255, 0))
def test_get(self):
image = self.create()
self.assertEqual(image.get(4, 6), self.colorlist(62, 116, 162))
self.assertEqual(image.get(0, 0), self.colorlist(0, 0, 0))
self.assertEqual(image.get(15, 15), self.colorlist(0, 0, 0))
self.assertRaises(tkinter.TclError, image.get, -1, 0)
self.assertRaises(tkinter.TclError, image.get, 0, -1)
self.assertRaises(tkinter.TclError, image.get, 16, 15)
self.assertRaises(tkinter.TclError, image.get, 15, 16)
def test_write(self):
image = self.create()
self.addCleanup(support.unlink, support.TESTFN)
image.write(support.TESTFN)
image2 = tkinter.PhotoImage('::img::test2', master=self.root,
format='ppm',
file=support.TESTFN)
self.assertEqual(str(image2), '::img::test2')
self.assertEqual(image2.type(), 'photo')
self.assertEqual(image2.width(), 16)
self.assertEqual(image2.height(), 16)
self.assertEqual(image2.get(0, 0), image.get(0, 0))
self.assertEqual(image2.get(15, 8), image.get(15, 8))
image.write(support.TESTFN, format='gif', from_coords=(4, 6, 6, 9))
image3 = tkinter.PhotoImage('::img::test3', master=self.root,
format='gif',
file=support.TESTFN)
self.assertEqual(str(image3), '::img::test3')
self.assertEqual(image3.type(), 'photo')
self.assertEqual(image3.width(), 2)
self.assertEqual(image3.height(), 3)
self.assertEqual(image3.get(0, 0), image.get(4, 6))
self.assertEqual(image3.get(1, 2), image.get(5, 8))
tests_gui = (MiscTest, BitmapImageTest, PhotoImageTest,)
if __name__ == "__main__":
support.run_unittest(*tests_gui)
| gpl-3.0 | -4,823,958,381,111,532,000 | 39.707317 | 75 | 0.584632 | false |
TheR3ason/map-your-location-history | LatitudePlot.py | 1 | 4022 | #!/usr/bin/env python
# LatitudePlot.py
# Created 30 July 2013
# Created by [email protected]
import os, time, math
from datetime import datetime
from time import mktime
import xml.etree.ElementTree as ET
from PIL import Image, ImageDraw
def GetKmlFiles():
"""Locates and reads local .kml files, returns a list of kml dictionary data"""
KmlData = []
for dirname, dirnames, filenames in os.walk('.'):
for filename in filenames:
sp = filename.split('.')
if sp[len(sp)-1]== "kml": #locate kml files
print "Reading kml file " + filename
KmlData.append(ReadKmlFile(dirname, filename))
print KmlData
return KmlData
def ReadKmlFile(dirname, filename):
"""Parses a single kml file, returns a dict of format {time: [lat, long]}"""
KmlData = {}
kmltime = datetime.time
latlist = []
longlist = []
timelist = []
cnt =0
f = open(filename)
line = f.readline()
while line:
if 'when' in line:
timelist.append(time.strptime(ET.fromstring(line)[0].text,"%Y-%m-%dT%H:%M:%SZ"))
if 'coordinates' in line:
latlist.append(float(ET.fromstring(line)[0].text.split(',')[0]))
longlist.append(float(ET.fromstring(line)[0].text.split(',')[1]))
cnt+=1
if cnt % 5000 ==0:
print "Parsing " + filename + ": points found: " + str(cnt)
line = f.readline()
f.close()
return [latlist, longlist, timelist]
def DrawMapData(KmlData,InputImage, OutputImage, itop, ibottom, ileft, iright,xnudge,ynudge):
"""Draws kml line data on top of the specified image"""
im = Image.open(InputImage)
draw = ImageDraw.Draw(im)
cnt =0
for KmlD in KmlData:
for d in range(len(KmlD[0])-1):
#Get points x and y coordinates and draw line
x1=(LongToX(KmlD[0][d],ileft,iright,im.size[0]))+xnudge
y1=(LatToY(KmlD[1][d],itop,ibottom,im.size[1]))+ynudge
x2=(LongToX(KmlD[0][d+1],ileft,iright,im.size[0]))+xnudge
y2=(LatToY(KmlD[1][d+1],itop,ibottom,im.size[1]))+ynudge
if(EuclidDistance(x1,y1,x2,y2) < 10000):
#setting this around 80 works okay. Attempts to remove some noise
draw.line((x1,y1, x2,y2), fill=80)
cnt+=1
if cnt % 10000 ==0:
print "Drawing point number " + str(cnt)
im.save(OutputImage)
def LongToX(InputLong, LeftLong, RightLong, ImWidth):
"""Converts a longitude value in to an x coordinate"""
return ScalingFunc(InputLong+360, LeftLong+360, RightLong+360, ImWidth);
def LatToY(InputLat, TopLat, BottomLat, ImHeight):
"""Converts a latitude value in to a y coordinate"""
return ScalingFunc(InputLat+360, TopLat+360, BottomLat+360, ImHeight);
def EuclidDistance(x1, y1, x2, y2):
"""Calculates the euclidean distance between two points"""
return math.sqrt((x1 - x2)**2+(y1 - y2)**2)
def ScalingFunc(inputv, minv, maxv, size):
"""Helps convert latitudes and longitudes to x and y"""
if((float(maxv) -float(minv)) ==0):
return 0
return ((((float(inputv) - float(minv)) / (float(maxv) -float(minv))) * float(size)));
def ParseImageFile():
"""Reads SatelliteImageData.csv containing:
<File name of image to draw data on>,
<image top latitude>,
<image bottom lattitude>,
<image left longitude>,
<image right longitude>,
(optional) <x value nudge>,
(optional) <y value nudge>"""
with open('ImageData.csv', 'r') as f:
read_data = f.read().split(',')
while 5 <= len(read_data) < 7:
read_data.append(0)
ReturnData = [0]*7
ReturnData[0]=read_data[0]
for i in range(1,7):
ReturnData[i] = float(read_data[i])
return ReturnData
if __name__ == "__main__":
ImageData = ParseImageFile()
DrawMapData(GetKmlFiles(),ImageData[0], "LatitudeData.png", ImageData[1], ImageData[2], ImageData[3], ImageData[4],ImageData[5],ImageData[6])
| apache-2.0 | -6,708,847,753,446,356,000 | 36.588785 | 145 | 0.617355 | false |
erseco/ugr_desarrollo_aplicaciones_internet | Practica_01/Soluciones Practica 1/ej_01_01.py | 2 | 1972 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Practicas de Desarrollo de Aplicaciones para Internet (DAI)
# Copyright (C) 2013 - Zerjillo ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
# Generamos numero aleatorio
numeroAdivinar = random.randint(1, 100)
# Inicialmente el número es desconocido
numero = -1
iteraciones = 0
maxIntentos = 10;
print "Bienvenido al wonderfuloso juego de adivinar un número"
while (numero != numeroAdivinar) and (iteraciones < maxIntentos):
leido = input("Adivina un número entre 1 y 100 (te quedan %i intentos)... " % (maxIntentos - iteraciones))
# Casting a entero para poder hacer comparaciones, etc. Peta si el usuario no mete un número, pero no me preocupa
numero = int(leido)
if (numero < 1) or (numero > 100):
print "Tu eres tonto, el número tiene que estar entre 1 y 100."
elif (numero < numeroAdivinar):
print "El número buscado es mayor que %i." % (numero)
elif (numero > numeroAdivinar):
print "El número buscado el menor que %i." % (numero)
else:
print "Felicidades, el número buscado era el %i." % (numeroAdivinar)
iteraciones += 1
if (iteraciones == maxIntentos):
print "Lo siento, no te quedan más intentos. El número buscado era el %i. Y tú eres un poco ceporro por no haberlo adivinado." % (numeroAdivinar)
| gpl-3.0 | -4,609,483,100,046,658,600 | 33.421053 | 147 | 0.710352 | false |
felipetomm/POX-Django | pox/web/jsonrpc.py | 45 | 8357 | # Copyright 2011,2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A library for implementing JSON-RPC based web services
This is lightweight, low on features, and not a whole lot of effort
has been paid to really complying with the JSON-RPC spec. Feel
free to improve it. ;)
It'd be nice to factor the JSON-RPC stuff out so that it could
be used with something besides just HTTP.
Also, it has some capability for compatibility with Qooxdoo.
"""
import json
import sys
from pox.web.webcore import *
from pox.core import core
log = core.getLogger()
# A long polling handler can return this if it notices that the
# connection has closed.
ABORT = object()
class JSONRPCHandler (SplitRequestHandler):
"""
Meant for implementing JSON-RPC web services
Implement RPC methods by prefacing them with "_exec_".
config keys of note:
"auth" is a function which takes a username and password and returns
True if they are a valid user. If set, turns on authentication.
"auth_realm" is the optional authentication realm name.
"qx" turns on Qooxdoo mode by default (it's usually switched on by
seeing a "service" key in the request).
There are a couple of extensions to JSON-RPC:
If you want to use positional AND named parameters, in a request, use
"params" for the former and "kwparams" for the latter.
There's an optional "service" key in requests. This comes from qooxdoo.
If it is given, look for the _exec_ method on some otherobject instead
of self. Put the additional services in an arg named 'services'.
"""
protocol_version = 'HTTP/1.1'
QX_ERR_ILLEGAL_SERVICE = 1
QX_ERR_SERVICE_NOT_FOUND = 2
QX_ERR_CLASS_NOT_FOUND = 3
QX_ERR_METHOD_NOT_FOUND = 4
QX_ERR_PARAMETER_MISMATCH = 5
QX_ERR_PERMISSION_DENIED = 6
QX_ORIGIN_SERVER = 1
QX_ORIGIN_METHOD = 2
ERR_PARSE_ERROR = -32700 # WE USE THIS
ERR_INVALID_REQUEST = -32600
ERR_METHOD_NOT_FOUND = -32601 # WE USE THIS
ERR_INVALID_PARAMS = -32602
ERR_INTERNAL_ERROR = -32603 # WE USE THIS
ERR_SERVER_ERROR = -32000 # to -32099 WE USE THIS
ERR_METHOD_ERROR = 99 # We use this for errors in methods
ERROR_XLATE = {
ERR_PARSE_ERROR : (1, QX_ERR_ILLEGAL_SERVICE), # Nonsense
ERR_METHOD_NOT_FOUND : (1, QX_ERR_METHOD_NOT_FOUND),
ERR_INTERNAL_ERROR : (),
ERR_SERVER_ERROR : (),
}
_qx = False
def _init (self):
# Maybe the following arg-adding feature should just be part of
# SplitRequestHandler?
for k,v in self.args.iteritems():
setattr(self, "_arg_" + k, v)
self.auth_function = self.args.get('auth', None)
self.auth_realm = self.args.get('auth_realm', "JSONRPC")
self._qx = self.args.get('qx', self._qx)
def _send_auth_header (self):
if self.auth_function:
self.send_header('WWW-Authenticate',
'Basic realm="%s"' % (self.auth_realm,))
def _do_auth (self):
if not self.auth_function:
return True
auth = self.headers.get("Authorization", "").strip().lower()
success = False
if auth.startswith("basic "):
try:
auth = base64.decodestring(auth[6:].strip()).split(':', 1)
success = self.auth_function(auth[0], auth[1])
except:
pass
if not success:
self.send_response(401, "Authorization Required")
self._send_auth_header()
self.end_headers()
return success
def _translate_error (self, e):
if not 'error' in e: return
if self._qx:
if e['code'] < 0:
c,o = ERROR_XLATE.get(e['code'], (1, self.QX_ERR_ILLEGAL_SERVICE))
e['code'] = c
e['origin'] = o
else:
e['origin'] = QX_ORIGIN_METHOD
def _handle (self, data):
try:
try:
service = self
if 'services' in self.args:
if 'service' in data:
service = self.args['services'].get(data['service'], self)
self._qx = True # This is a qooxdoo request
method = "_exec_" + data.get('method')
method = getattr(service, method)
except:
response = {}
response['error'] = {'code':self.ERR_METHOD_NOT_FOUND,
'message':'Method not found'}
return response
params = data.get('params', [])
if isinstance(params, dict):
kw = params
params = []
else:
kw = data.get('kwparams', {})
try:
r = method(*params,**kw)
#TODO: jsonrpc version?
return r
except:
response = {}
t,v,_ = sys.exc_info()
response['error'] = {'message': "%s: %s" % (t,v),
'code':self.ERR_METHOD_ERROR}
import traceback
response['error']['data'] = {'traceback':traceback.format_exc()}
log.exception("While handling %s...", data.get('method'))
return response
except:
response = {}
t,v,_ = sys.exc_info()
response['error'] = {'message': "%s: %s" % (t,v),
'code':self.ERR_INTERNAL_ERROR}
return response
def do_POST (self):
if not self._do_auth():
return
dumps_opts = {}
#FIXME: this is a hack
if 'pretty' in self.path:
dumps_opts = {'sort_keys':True, 'indent':2}
def reply (response):
orig = response
#if not isinstance(response, basestring):
if isinstance(response, list):
for r in response: self._translate_error(r)
else:
self._translate_error(response)
response = json.dumps(response, default=str, **dumps_opts)
response = response.strip()
if len(response) and not response.endswith("\n"): response += "\n"
try:
self.send_response(200, "OK")
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", len(response))
self.end_headers()
self.wfile.write(response)
except IOError as e:
if e.errno == 32:
if isinstance(orig, dict) and 'error' in orig:
log.info("Socket closed when writing error response")
else:
log.warning("Socket closed when writing response")
#log.debug(" response was: " + response)
else:
log.exception("Exception while trying to send JSON-RPC response")
try:
self.wfile.close()
except:
pass
return False
except:
log.exception("Exception while trying to send JSON-RPC response")
return False
return True
l = self.headers.get("Content-Length", "")
data = ''
if l == "":
data = self.rfile.read()
else:
data = self.rfile.read(int(l))
try:
data = json.loads(data)
except:
response = {}
response['error'] = {'code':self.ERR_PARSE_ERROR,
'message':'Parse error'}
return reply(response)
single = False
if not isinstance(data, list):
data = [data]
single = True
responses = []
for req in data:
response = self._handle(req) # Should never raise an exception
if response is ABORT:
return
if 'id' in req or 'error' in response:
response['id'] = req.get('id')
responses.append(response)
if len(responses) == 0:
responses = ''
else:
if single:
responses = responses[0]
reply(responses)
class QXJSONRPCHandler (JSONRPCHandler):
"""
A subclass of JSONRPCHandler which speaks something closer to
qooxdoo's version JSON-RPC.
"""
_qx = True
#TODO: Implement the <SCRIPT> based GET method for cross-domain
def make_error (msg = "Unknown Error",
code = JSONRPCHandler.ERR_SERVER_ERROR,
data = None):
e = {'code':code,'message':msg}
if data is not None:
e['data'] = data
r = {'error':e}
return r
| apache-2.0 | 6,321,770,912,790,769,000 | 28.634752 | 75 | 0.608951 | false |
tedder/ansible | lib/ansible/modules/network/cloudengine/ce_netconf.py | 15 | 5948 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_netconf
version_added: "2.4"
short_description: Run an arbitrary netconf command on HUAWEI CloudEngine switches.
description:
- Sends an arbitrary netconf command on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@QijunPan)
options:
rpc:
description:
- The type of rpc.
required: true
choices: ['get', 'edit-config', 'execute-action', 'execute-cli']
cfg_xml:
description:
- The config xml string.
required: true
'''
EXAMPLES = '''
- name: CloudEngine netconf test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Netconf get operation"
ce_netconf:
rpc: get
cfg_xml: '<filter type=\"subtree\">
<vlan xmlns=\"http://www.huawei.com/netconf/vrp\" content-version=\"1.0\" format-version=\"1.0\">
<vlans>
<vlan>
<vlanId>10</vlanId>
<vlanif>
<ifName></ifName>
<cfgBand></cfgBand>
<dampTime></dampTime>
</vlanif>
</vlan>
</vlans>
</vlan>
</filter>'
provider: "{{ cli }}"
- name: "Netconf edit-config operation"
ce_netconf:
rpc: edit-config
cfg_xml: '<config>
<aaa xmlns=\"http://www.huawei.com/netconf/vrp\" content-version=\"1.0\" format-version=\"1.0\">
<authenticationSchemes>
<authenticationScheme operation=\"create\">
<authenSchemeName>default_wdz</authenSchemeName>
<firstAuthenMode>local</firstAuthenMode>
<secondAuthenMode>invalid</secondAuthenMode>
</authenticationScheme>
</authenticationSchemes>
</aaa>
</config>'
provider: "{{ cli }}"
- name: "Netconf execute-action operation"
ce_netconf:
rpc: execute-action
cfg_xml: '<action>
<l2mc xmlns=\"http://www.huawei.com/netconf/vrp\" content-version=\"1.0\" format-version=\"1.0\">
<l2McResetAllVlanStatis>
<addrFamily>ipv4unicast</addrFamily>
</l2McResetAllVlanStatis>
</l2mc>
</action>'
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"result": ["ok"]}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config
from ansible.module_utils.network.cloudengine.ce import execute_nc_action, ce_argument_spec, execute_nc_cli
def main():
""" main """
argument_spec = dict(
rpc=dict(choices=['get', 'edit-config',
'execute-action', 'execute-cli'], required=True),
cfg_xml=dict(required=True)
)
argument_spec.update(ce_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
rpc = module.params['rpc']
cfg_xml = module.params['cfg_xml']
changed = False
end_state = dict()
if rpc == "get":
response = get_nc_config(module, cfg_xml)
if "<data/>" in response:
end_state["result"] = "<data/>"
else:
tmp1 = response.split(r"<data>")
tmp2 = tmp1[1].split(r"</data>")
result = tmp2[0].split("\n")
end_state["result"] = result
elif rpc == "edit-config":
response = set_nc_config(module, cfg_xml)
if "<ok/>" not in response:
module.fail_json(msg='rpc edit-config failed.')
changed = True
end_state["result"] = "ok"
elif rpc == "execute-action":
response = execute_nc_action(module, cfg_xml)
if "<ok/>" not in response:
module.fail_json(msg='rpc execute-action failed.')
changed = True
end_state["result"] = "ok"
elif rpc == "execute-cli":
response = execute_nc_cli(module, cfg_xml)
if "<data/>" in response:
end_state["result"] = "<data/>"
else:
tmp1 = response.xml.split(r"<data>")
tmp2 = tmp1[1].split(r"</data>")
result = tmp2[0].split("\n")
end_state["result"] = result
else:
module.fail_json(msg='please input correct rpc.')
results = dict()
results['changed'] = changed
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | 5,066,605,171,228,497,000 | 28.74 | 118 | 0.555145 | false |
jasonbot/django | tests/template_tests/filter_tests/test_dictsort.py | 342 | 1477 | from django.template.defaultfilters import dictsort
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_sort(self):
sorted_dicts = dictsort(
[{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}],
'age',
)
self.assertEqual(
[sorted(dict.items()) for dict in sorted_dicts],
[[('age', 18), ('name', 'Jonny B Goode')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 63), ('name', 'Ra Ra Rasputin')]],
)
def test_dictsort_complex_sorting_key(self):
"""
Since dictsort uses template.Variable under the hood, it can sort
on keys like 'foo.bar'.
"""
data = [
{'foo': {'bar': 1, 'baz': 'c'}},
{'foo': {'bar': 2, 'baz': 'b'}},
{'foo': {'bar': 3, 'baz': 'a'}},
]
sorted_data = dictsort(data, 'foo.baz')
self.assertEqual([d['foo']['bar'] for d in sorted_data], [3, 2, 1])
def test_invalid_values(self):
"""
If dictsort is passed something other than a list of dictionaries,
fail silently.
"""
self.assertEqual(dictsort([1, 2, 3], 'age'), '')
self.assertEqual(dictsort('Hello!', 'age'), '')
self.assertEqual(dictsort({'a': 1}, 'age'), '')
self.assertEqual(dictsort(1, 'age'), '')
| bsd-3-clause | -2,733,055,657,204,998,700 | 32.568182 | 75 | 0.498984 | false |
rahulsharma1991/scrapy | tests/test_utils_deprecate.py | 140 | 10526 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import inspect
import unittest
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.deprecate import create_deprecated_class, update_classpath
from tests import mock
class MyWarning(UserWarning):
pass
class SomeBaseClass(object):
pass
class NewName(SomeBaseClass):
pass
class WarnWhenSubclassedTest(unittest.TestCase):
def _mywarnings(self, w, category=MyWarning):
return [x for x in w if x.category is MyWarning]
def test_no_warning_on_definition(self):
with warnings.catch_warnings(record=True) as w:
Deprecated = create_deprecated_class('Deprecated', NewName)
w = self._mywarnings(w)
self.assertEqual(w, [])
def test_subclassing_warning_message(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_category=MyWarning)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
w = self._mywarnings(w)
self.assertEqual(len(w), 1)
self.assertEqual(
str(w[0].message),
"tests.test_utils_deprecate.UserClass inherits from "
"deprecated class tests.test_utils_deprecate.Deprecated, "
"please inherit from tests.test_utils_deprecate.NewName."
" (warning only on first subclass, there may be others)"
)
self.assertEqual(w[0].lineno, inspect.getsourcelines(UserClass)[1])
def test_custom_class_paths(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
new_class_path='foo.NewClass',
old_class_path='bar.OldClass',
warn_category=MyWarning)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
_ = Deprecated()
w = self._mywarnings(w)
self.assertEqual(len(w), 2)
self.assertIn('foo.NewClass', str(w[0].message))
self.assertIn('bar.OldClass', str(w[0].message))
self.assertIn('foo.NewClass', str(w[1].message))
self.assertIn('bar.OldClass', str(w[1].message))
def test_subclassing_warns_only_on_direct_childs(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_once=False,
warn_category=MyWarning)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
class NoWarnOnMe(UserClass):
pass
w = self._mywarnings(w)
self.assertEqual(len(w), 1)
self.assertIn('UserClass', str(w[0].message))
def test_subclassing_warns_once_by_default(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_category=MyWarning)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
class FooClass(Deprecated):
pass
class BarClass(Deprecated):
pass
w = self._mywarnings(w)
self.assertEqual(len(w), 1)
self.assertIn('UserClass', str(w[0].message))
def test_warning_on_instance(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_category=MyWarning)
# ignore subclassing warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
class UserClass(Deprecated):
pass
with warnings.catch_warnings(record=True) as w:
_, lineno = Deprecated(), inspect.getlineno(inspect.currentframe())
_ = UserClass() # subclass instances don't warn
w = self._mywarnings(w)
self.assertEqual(len(w), 1)
self.assertEqual(
str(w[0].message),
"tests.test_utils_deprecate.Deprecated is deprecated, "
"instantiate tests.test_utils_deprecate.NewName instead."
)
self.assertEqual(w[0].lineno, lineno)
def test_warning_auto_message(self):
with warnings.catch_warnings(record=True) as w:
Deprecated = create_deprecated_class('Deprecated', NewName)
class UserClass2(Deprecated):
pass
msg = str(w[0].message)
self.assertIn("tests.test_utils_deprecate.NewName", msg)
self.assertIn("tests.test_utils_deprecate.Deprecated", msg)
def test_issubclass(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
DeprecatedName = create_deprecated_class('DeprecatedName', NewName)
class UpdatedUserClass1(NewName):
pass
class UpdatedUserClass1a(NewName):
pass
class OutdatedUserClass1(DeprecatedName):
pass
class OutdatedUserClass1a(DeprecatedName):
pass
class UnrelatedClass(object):
pass
class OldStyleClass:
pass
assert issubclass(UpdatedUserClass1, NewName)
assert issubclass(UpdatedUserClass1a, NewName)
assert issubclass(UpdatedUserClass1, DeprecatedName)
assert issubclass(UpdatedUserClass1a, DeprecatedName)
assert issubclass(OutdatedUserClass1, DeprecatedName)
assert not issubclass(UnrelatedClass, DeprecatedName)
assert not issubclass(OldStyleClass, DeprecatedName)
assert not issubclass(OldStyleClass, DeprecatedName)
assert not issubclass(OutdatedUserClass1, OutdatedUserClass1a)
assert not issubclass(OutdatedUserClass1a, OutdatedUserClass1)
self.assertRaises(TypeError, issubclass, object(), DeprecatedName)
def test_isinstance(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
DeprecatedName = create_deprecated_class('DeprecatedName', NewName)
class UpdatedUserClass2(NewName):
pass
class UpdatedUserClass2a(NewName):
pass
class OutdatedUserClass2(DeprecatedName):
pass
class OutdatedUserClass2a(DeprecatedName):
pass
class UnrelatedClass(object):
pass
class OldStyleClass:
pass
assert isinstance(UpdatedUserClass2(), NewName)
assert isinstance(UpdatedUserClass2a(), NewName)
assert isinstance(UpdatedUserClass2(), DeprecatedName)
assert isinstance(UpdatedUserClass2a(), DeprecatedName)
assert isinstance(OutdatedUserClass2(), DeprecatedName)
assert isinstance(OutdatedUserClass2a(), DeprecatedName)
assert not isinstance(OutdatedUserClass2a(), OutdatedUserClass2)
assert not isinstance(OutdatedUserClass2(), OutdatedUserClass2a)
assert not isinstance(UnrelatedClass(), DeprecatedName)
assert not isinstance(OldStyleClass(), DeprecatedName)
def test_clsdict(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
Deprecated = create_deprecated_class('Deprecated', NewName, {'foo': 'bar'})
self.assertEqual(Deprecated.foo, 'bar')
def test_deprecate_a_class_with_custom_metaclass(self):
Meta1 = type('Meta1', (type,), {})
New = Meta1('New', (), {})
Deprecated = create_deprecated_class('Deprecated', New)
def test_deprecate_subclass_of_deprecated_class(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_category=MyWarning)
AlsoDeprecated = create_deprecated_class('AlsoDeprecated', Deprecated,
new_class_path='foo.Bar',
warn_category=MyWarning)
w = self._mywarnings(w)
self.assertEqual(len(w), 0, str(map(str, w)))
with warnings.catch_warnings(record=True) as w:
AlsoDeprecated()
class UserClass(AlsoDeprecated):
pass
w = self._mywarnings(w)
self.assertEqual(len(w), 2)
self.assertIn('AlsoDeprecated', str(w[0].message))
self.assertIn('foo.Bar', str(w[0].message))
self.assertIn('AlsoDeprecated', str(w[1].message))
self.assertIn('foo.Bar', str(w[1].message))
def test_inspect_stack(self):
with mock.patch('inspect.stack', side_effect=IndexError):
with warnings.catch_warnings(record=True) as w:
DeprecatedName = create_deprecated_class('DeprecatedName', NewName)
class SubClass(DeprecatedName):
pass
self.assertIn("Error detecting parent module", str(w[0].message))
@mock.patch('scrapy.utils.deprecate.DEPRECATION_RULES',
[('scrapy.contrib.pipeline.', 'scrapy.pipelines.'),
('scrapy.contrib.', 'scrapy.extensions.')])
class UpdateClassPathTest(unittest.TestCase):
def test_old_path_gets_fixed(self):
with warnings.catch_warnings(record=True) as w:
output = update_classpath('scrapy.contrib.debug.Debug')
self.assertEqual(output, 'scrapy.extensions.debug.Debug')
self.assertEqual(len(w), 1)
self.assertIn("scrapy.contrib.debug.Debug", str(w[0].message))
self.assertIn("scrapy.extensions.debug.Debug", str(w[0].message))
def test_sorted_replacement(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
output = update_classpath('scrapy.contrib.pipeline.Pipeline')
self.assertEqual(output, 'scrapy.pipelines.Pipeline')
def test_unmatched_path_stays_the_same(self):
with warnings.catch_warnings(record=True) as w:
output = update_classpath('scrapy.unmatched.Path')
self.assertEqual(output, 'scrapy.unmatched.Path')
self.assertEqual(len(w), 0)
| bsd-3-clause | 8,439,653,246,269,451,000 | 36.459075 | 87 | 0.610583 | false |
Outernet-Project/librarian | tests/utils/test_route_mixins.py | 1 | 4074 | import mock
from bottle_utils import csrf
import librarian.utils.route_mixins as mod
# Common test helper
class MockedRouteBase(object):
def __init__(self, *args, **kwargs):
# this way all tests will get a separate instance of the mock
# object when they instantiate their routes, because otherwise
# a class level mock would carry over state from previous tests
self.request = mock.Mock()
self.response = mock.Mock()
def get(self, *args, **kwargs):
return None
def post(self, *args, **kwargs):
return None
def get_default_context(self):
return {'default': 'default'}
# CSRFRouteMixin tests
@mock.patch.object(csrf, 'response')
@mock.patch.object(csrf, 'request')
def test_csrf_route_mixin_get(request, response):
request.get_cookie.return_value = ''
class TestRoute(mod.CSRFRouteMixin, MockedRouteBase):
pass
inst = TestRoute()
inst.get()
assert hasattr(inst.request, 'csrf_token')
@mock.patch.object(csrf, 'abort')
@mock.patch.object(csrf, 'response')
@mock.patch.object(csrf, 'request')
def test_csrf_route_mixin_post(request, response, abort):
request.get_cookie.return_value = ''
class TestRoute(mod.CSRFRouteMixin, MockedRouteBase):
pass
inst = TestRoute()
inst.post()
assert abort.called
# RedirectRouteMixin tests
def test_redirect_route_mixin_get_next_path_found():
class TestRoute(mod.RedirectRouteMixin, MockedRouteBase):
pass
inst = TestRoute()
assert inst.get_next_path() == inst.request.params.get.return_value
def test_redirect_route_mixin_get_next_path_default():
class TestRoute(mod.RedirectRouteMixin, MockedRouteBase):
pass
inst = TestRoute()
inst.request.params = {}
assert inst.get_next_path() == inst.default_next_path
@mock.patch.object(mod.RedirectRouteMixin, 'get_next_path')
def test_redirect_route_mixin_get_default_context(get_next_path):
class TestRoute(mod.RedirectRouteMixin, MockedRouteBase):
pass
inst = TestRoute()
exp = {'default': 'default',
inst.next_context_parameter_name: inst.get_next_path.return_value}
assert inst.get_default_context() == exp
assert inst.get_next_path.called
@mock.patch.object(mod, 'i18n_path')
@mock.patch.object(mod.RedirectRouteMixin, 'get_next_path')
def test_redirect_route_mixin_get_next_url(get_next_path, i18n_path):
i18n_path.return_value = '/en/some/path/'
class TestRoute(mod.RedirectRouteMixin, MockedRouteBase):
pass
inst = TestRoute()
inst.request.url = 'http://localhost/here/there/'
assert inst.get_next_url() == 'http://localhost/en/some/path/'
assert inst.get_next_path.called
i18n_path.assert_called_with(get_next_path.return_value)
@mock.patch.object(mod.RedirectRouteMixin, 'get_next_path')
def test_redirect_route_mixin_add_next_parameter(get_next_path):
get_next_path.return_value = '/next/path/'
class TestRoute(mod.RedirectRouteMixin, MockedRouteBase):
pass
inst = TestRoute()
exp = '/main/route/?next=/next/path/'
assert inst.add_next_parameter('/main/route/') == exp
@mock.patch.object(mod.RedirectRouteMixin, 'get_next_url')
def test_redirect_route_mixin_perform_redirect_default(get_next_url):
class TestRoute(mod.RedirectRouteMixin, MockedRouteBase):
pass
inst = TestRoute()
inst.perform_redirect()
inst.response.set_header.assert_called_with('Location',
get_next_url.return_value)
assert inst.response.status == 303
@mock.patch.object(mod.RedirectRouteMixin, 'get_next_url')
def test_redirect_route_mixin_perform_redirect_custom(get_next_url):
class TestRoute(mod.RedirectRouteMixin, MockedRouteBase):
pass
inst = TestRoute()
custom_url = 'outernet.is'
custom_status = 302
inst.perform_redirect(custom_url, custom_status)
inst.response.set_header.assert_called_with('Location', custom_url)
assert inst.response.status == custom_status
| gpl-3.0 | -3,701,229,933,204,108,300 | 26.527027 | 77 | 0.687285 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.1/Lib/encodings/iso8859_5.py | 4 | 5385 | """ Python Character Mapping Codec generated from '8859-5.TXT' with gencodec.py.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00a2: 0x0402, # CYRILLIC CAPITAL LETTER DJE
0x00a3: 0x0403, # CYRILLIC CAPITAL LETTER GJE
0x00a4: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00a5: 0x0405, # CYRILLIC CAPITAL LETTER DZE
0x00a6: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x00a7: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00a8: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x00a9: 0x0409, # CYRILLIC CAPITAL LETTER LJE
0x00aa: 0x040a, # CYRILLIC CAPITAL LETTER NJE
0x00ab: 0x040b, # CYRILLIC CAPITAL LETTER TSHE
0x00ac: 0x040c, # CYRILLIC CAPITAL LETTER KJE
0x00ae: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x00af: 0x040f, # CYRILLIC CAPITAL LETTER DZHE
0x00b0: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00b1: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00b2: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00b3: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00b4: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00b5: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00b6: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00b7: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00b8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00b9: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00ba: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00bb: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00bc: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00bd: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00be: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00bf: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00c0: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00c1: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00c2: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00c3: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00c4: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00c5: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00c6: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00c7: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00c8: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00c9: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00ca: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00cb: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00cc: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00cd: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00ce: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x00cf: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00d0: 0x0430, # CYRILLIC SMALL LETTER A
0x00d1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00d2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00d3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00d4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00d5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00d6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00d7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00d8: 0x0438, # CYRILLIC SMALL LETTER I
0x00d9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00da: 0x043a, # CYRILLIC SMALL LETTER KA
0x00db: 0x043b, # CYRILLIC SMALL LETTER EL
0x00dc: 0x043c, # CYRILLIC SMALL LETTER EM
0x00dd: 0x043d, # CYRILLIC SMALL LETTER EN
0x00de: 0x043e, # CYRILLIC SMALL LETTER O
0x00df: 0x043f, # CYRILLIC SMALL LETTER PE
0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
0x00f0: 0x2116, # NUMERO SIGN
0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
0x00f2: 0x0452, # CYRILLIC SMALL LETTER DJE
0x00f3: 0x0453, # CYRILLIC SMALL LETTER GJE
0x00f4: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00f5: 0x0455, # CYRILLIC SMALL LETTER DZE
0x00f6: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x00f7: 0x0457, # CYRILLIC SMALL LETTER YI
0x00f8: 0x0458, # CYRILLIC SMALL LETTER JE
0x00f9: 0x0459, # CYRILLIC SMALL LETTER LJE
0x00fa: 0x045a, # CYRILLIC SMALL LETTER NJE
0x00fb: 0x045b, # CYRILLIC SMALL LETTER TSHE
0x00fc: 0x045c, # CYRILLIC SMALL LETTER KJE
0x00fd: 0x00a7, # SECTION SIGN
0x00fe: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x00ff: 0x045f, # CYRILLIC SMALL LETTER DZHE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
| mit | -2,776,399,918,202,030,000 | 37.464286 | 80 | 0.736305 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.