repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mefly2012/platform | src/clean_validate/zyktgg.py | 1 | 1453 | # -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from common import public
class zyktgg():
"""开庭公告"""
need_check_ziduan = ['main',
'city',
'bbd_dotime',
'title'
]
def check_main(self, indexstr, ustr):
"""main 清洗验证"""
ret = None
if ustr and len(ustr):
if not public.has_count_hz(ustr, 1):
ret = u'不包含中文'
else:
ret = u'为空'
return ret
def check_city(self, indexstr, ustr):
"""city 清洗验证"""
ret = None
if ustr and len(ustr):
if ustr not in public.PROVINCE:
ret = u'非法的省名'
pass
else:
ret = u'为空'
return ret
def check_bbd_dotime(self, indexstr, ustr):
"""do_time 清洗验证"""
ret = None
if ustr and len(ustr):
if not public.bbd_dotime_date_format(ustr):
ret = u"不合法日期"
return ret
def check_title(self, indexstr, ustr):
"""title 清洗验证"""
ret = None
if ustr and len(ustr):
if all(not public.is_chinese(c) for c in ustr):
ret = u'没有中文'
elif not len(ustr) >= 5:
ret = u'不够5个字以上'
return ret
| apache-2.0 | 8,685,627,748,616,619,000 | 23.636364 | 59 | 0.451661 | false |
libvirt/libvirt-test-API | libvirttestapi/repos/domain/save.py | 1 | 2922 | # Copyright (C) 2010-2012 Red Hat, Inc.
# This work is licensed under the GNU GPLv2 or later.
# Save domain as a statefile
import os
import libvirt
from libvirt import libvirtError
from libvirttestapi.src import sharedmod
from libvirttestapi.utils import utils
required_params = ('guestname', 'filepath',)
optional_params = {}
def get_guest_ipaddr(*args):
"""Get guest ip address"""
(guestname, logger) = args
mac = utils.get_dom_mac_addr(guestname)
logger.debug("guest mac address: %s" % mac)
ipaddr = utils.mac_to_ip(mac, 15)
logger.debug("guest ip address: %s" % ipaddr)
if utils.do_ping(ipaddr, 20) == 1:
logger.info("ping current guest successfull")
return ipaddr
else:
logger.error("Error: can't ping current guest")
return None
def check_guest_status(*args):
"""Check guest current status"""
(domobj, logger) = args
state = domobj.info()[0]
logger.debug("current guest status: %s" % state)
if state == libvirt.VIR_DOMAIN_SHUTOFF or \
state == libvirt.VIR_DOMAIN_SHUTDOWN or \
state == libvirt.VIR_DOMAIN_BLOCKED:
return False
else:
return True
def check_guest_save(*args):
"""Check save domain result, if save domain is successful,
guestname.save will exist under /tmp directory and guest
can't be ping and status is paused
"""
(guestname, domobj, logger) = args
if not check_guest_status(domobj, logger):
if not get_guest_ipaddr(guestname, logger):
return True
else:
return False
else:
return False
def save(params):
"""Save domain to a disk file"""
logger = params['logger']
guestname = params['guestname']
filepath = params['filepath']
conn = sharedmod.libvirtobj['conn']
domobj = conn.lookupByName(guestname)
# Save domain
ipaddr = get_guest_ipaddr(guestname, logger)
if not check_guest_status(domobj, logger):
logger.error("Error: current guest status is shutoff")
return 1
if not ipaddr:
logger.error("Error: can't get guest ip address")
return 1
try:
domobj.save(filepath)
if check_guest_save(guestname, domobj, logger):
logger.info("save %s domain successful" % guestname)
else:
logger.error("Error: fail to check save domain")
return 1
except libvirtError as e:
logger.error("API error message: %s, error code is %s"
% (e.get_error_message(), e.get_error_code()))
logger.error("Error: fail to save %s domain" % guestname)
return 1
return 0
def save_clean(params):
""" clean testing environment """
logger = params['logger']
filepath = params['filepath']
if os.path.exists(filepath):
logger.info("remove dump file from save %s" % filepath)
os.remove(filepath)
| gpl-2.0 | -1,875,613,081,637,927,700 | 26.055556 | 67 | 0.629363 | false |
noelevans/sandpit | fivethiryeight/riddler_casino.py | 1 | 1180 | """
Suppose a casino invents a new game that you must pay $250 to play. The game
works like this: The casino draws random numbers between 0 and 1, from a
uniform distribution. It adds them together until their sum is greater than 1,
at which time it stops drawing new numbers. You get a payout of $100 each time
a new number is drawn.
For example, suppose the casino draws 0.4 and then 0.7. Since the sum is
greater than 1, it will stop after these two draws, and you receive $200. If
instead it draws 0.2, 0.3, 0.3, and then 0.6, it will stop after the fourth
draw and you will receive $400. Given the $250 entrance fee, should you play
the game?
Specifically, what is the expected value of your winnings?
From:
http://fivethirtyeight.com/features/
should-you-pay-250-to-play-this-casino-game
"""
import numpy as np
def trial():
total = 0
spins = 0
while total < 1:
total += np.random.random()
spins += 1
return spins
def main():
n = 10000000
dollar_return = (np.mean([trial() for _ in range(n)]))
return_on_stake = 100 * dollar_return
print(return_on_stake)
if __name__ == '__main__':
main()
| mit | -3,011,533,244,456,876,500 | 27.780488 | 78 | 0.677119 | false |
sunlightlabs/sarahs_inbox | mail/views.py | 1 | 8502 | from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.core.paginator import Paginator
from django.http import HttpResponse, HttpResponseRedirect
from urllib import unquote
from haystack.query import SearchQuerySet
from mail.models import *
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.core.cache import cache
import re
RESULTS_PER_PAGE = 50
def _search_string(request):
return request.GET.get('q', None)
def _search_tokens(request):
s = _search_string(request)
if s is None:
return []
# protection!
re_sanitize = re.compile(r'[^\w\d\s\'"\,\.\?\$]', re.I)
s = re_sanitize.sub('', s)
tokens = []
re_quotes = re.compile(r'\"([^\"]+)\"')
for m in re_quotes.findall(s):
tokens.append(m.replace('"','').strip())
s = s.replace('"%s"' % m, '')
for t in s.split(' '):
tokens.append(t.strip())
while '' in tokens:
tokens.remove('')
return tokens
def _highlight(text, tokens):
regexes = []
sorted_tokens = sorted(tokens, key=lambda x: len(x))
for t in sorted_tokens:
regexes.append(re.compile(r'(%s)' % t.replace(' ', r'\s+'), re.I))
for r in regexes:
text = r.sub('<span class="highlight">\\1</span>', text)
return text
def _prepare_ids_from_cookie(request, cookie_name, method=None):
if method == 'post':
cookie = unquote(request.POST.get(cookie_name, '')).replace(',,', ',')
else:
cookie = unquote(request.COOKIES.get(cookie_name,'')).replace(',,', ',')
print cookie
if len(cookie)>1:
if cookie[0]==',':
cookie = cookie[1:]
if cookie[-1]==',':
cookie = cookie[:-1]
try:
id_list = map(lambda x: (x!='') and int(x) or 0, cookie.split(','))
except:
id_list = []
return id_list
def _annotate_emails(emails, search=[]):
r = []
for email in emails:
email.text = _highlight(email.text, search)
r.append({ 'creator_html': email.creator_html(), 'to_html': email.to_html(), 'cc_html': email.cc_html(), 'obj': email })
return r
def index(request, search=[], threads=None):
if threads is None:
palin = Person.objects.sarah_palin()
threads = Thread.objects.exclude(creator__in=palin).order_by('-date')
threads_count = threads.count()
p = Paginator(threads, RESULTS_PER_PAGE)
page_num = 1
try:
page_num = int(request.GET.get('page', 1))
except:
pass
page = p.page(page_num)
highlighted_threads = []
for thread in page.object_list:
if (threads is not None) and type(threads) is SearchQuerySet: # deal with searchqueryset objects
thread = thread.object
thread.name = _highlight(thread.name, search)
highlighted_threads.append(thread)
template_vars = {
'range': "<strong>%d</strong> - <strong>%d</strong> of <strong>%d</strong>" % (page.start_index(), page.end_index(), threads_count),
'num_pages': p.num_pages ,
'next': page_num<p.num_pages and min(p.num_pages,page_num+1) or False,
'prev': page_num>1 and max(1, page_num-1) or False,
'first': '1',
'last': p.num_pages,
'current_page': page_num,
'threads': highlighted_threads,
'search': " ".join(search),
'search_orig': (_search_string(request) is not None) and _search_string(request) or '',
'path': request.path,
}
return render_to_response('index.html', template_vars, context_instance=RequestContext(request))
def sent(request):
kagan = Person.objects.elena_kagan()
emails = Thread.objects.filter(creator=kagan).order_by('-date')
return index(request, threads=emails)
def contact_by_id(request, contact_id, suppress_redirect=False):
cache_key = 'contact_%d' % int(contact_id)
threads = cache.get(cache_key)
if threads is None:
try:
person = Person.objects.get(id=contact_id)
except Person.DoesNotExist, e:
return HttpResponseRedirect(reverse('mail.views.index'))
if person.merged_into is not None:
return HttpResponseRedirect('/contact/%d/' % person.merged_into.id)
threads = []
emails = Email.objects.filter(Q(to=person)|Q(cc=person))
for e in emails:
if e.email_thread is not None:
threads.append(e.email_thread.id)
threads = Thread.objects.filter(id__in=threads).order_by('-date')
cache.set(cache_key, threads)
return index(request, threads=threads)
def contact_by_name(request, contact_name):
try:
contact = Person.objects.get(slug=contact_name)
except Person.DoesNotExist, e:
return HttpResponseRedirect(reverse('mail.views.contacts_index'))
except Thread.MultipleObjectsReturned, e:
return HttpResponseRedirect(reverse('mail.views.contacts_index'))
return contact_by_id(request, contact.id, suppress_redirect=True)
def contacts_index(request):
return index(request)
def thread_by_id(request, thread_id, suppress_redirect=False):
try:
thread = Thread.objects.get(id=thread_id)
except Thread.DoesNotExist, e:
return HttpResponseRedirect(reverse('mail.views.index'))
# if merged thread, redirect
if thread.merged_into is not None:
return HttpResponseRedirect('/thread/%d/' % thread.merged_into.id)
# if access by ID, redirect to descriptive URL
if (not suppress_redirect) and (len(thread.slug.strip())>3):
return HttpResponseRedirect('/thread/%s/' % thread.slug)
search = _search_tokens(request)
thread_starred = thread.id in _prepare_ids_from_cookie(request, 'kagan_star')
emails = _annotate_emails(Email.objects.filter(email_thread=thread).order_by('creation_date_time'), search)
return render_to_response('thread.html', {'thread': thread, 'thread_starred': thread_starred, 'emails': emails }, context_instance=RequestContext(request))
def thread_by_name(request, thread_name):
try:
thread = Thread.objects.get(slug=thread_name)
except Thread.DoesNotExist, e:
return HttpResponseRedirect(reverse('mail.views.index'))
except Thread.MultipleObjectsReturned, e:
return HttpResponseRedirect(reverse('mail.views.index'))
return thread_by_id(request, thread.id, suppress_redirect=True)
def search(request):
tokens = _search_tokens(request)
if len(tokens) is None:
return HttpResponseRedirect(reverse('mail.views.index'))
sqs = SearchQuerySet().models(Thread)
for t in tokens:
sqs = sqs.filter_or(text_and_recipients=t)
sqs = sqs.order_by('-date')
if sqs.count()==0:
return render_to_response('search_empty.html', { 'path': request.path }, context_instance=RequestContext(request))
return index(request, search=tokens, threads=sqs)
def star_record_ajax(request, thread_id, action):
try:
thread = Thread.objects.get(id=thread_id)
except Thread.DoesNotExist, e:
return HttpResponse('{ status: \'not_found\'}');
if thread.star_count is None:
thread.star_count = 0
if action=='add':
thread.star_count += 1
elif action=='remove':
thread.star_count -= 1
thread.save()
return HttpResponse('{ status: \'success\'}')
def starred(request):
if not request.POST.get('kagan_star'):
return HttpResponseRedirect(reverse('mail.views.index'))
starred_ids = _prepare_ids_from_cookie(request, 'kagan_star', method='post')
if len(starred_ids)==0:
return HttpResponseRedirect(reverse('mail.views.index'))
starred = Thread.objects.filter(id__in=starred_ids).order_by('-date')
if starred.count()==0:
return render_to_response('search_empty.html', { 'path': request.path }, context_instance=RequestContext(request))
else:
return index(request, threads=starred)
return index(request, threads=starred)
def starred_all(request):
starred = Thread.objects.filter(star_count__gt=0).order_by('-star_count','-date')
if starred.count()==0:
return render_to_response('search_empty.html', { 'path': request.path }, context_instance=RequestContext(request))
else:
return index(request, threads=starred)
| bsd-3-clause | 8,381,273,969,921,054,000 | 32.738095 | 159 | 0.631498 | false |
nbeck90/city-swap | cityswap/requests/migrations/0001_initial.py | 1 | 1445 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-11 16:52
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('profiles', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Request',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField(default=b'Type your title here')),
('description', models.TextField(default=b'Type your description here')),
('origin', models.CharField(choices=[(b'Seattle', b'Seattle'), (b'Portland', b'Portland')], default=b'Seattle', max_length=25)),
('destination', models.CharField(choices=[(b'Seattle', b'Seattle'), (b'Portland', b'Portland')], default=b'Seattle', max_length=25)),
('date_created', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('courier', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='requests', to='profiles.Profile')),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sent_from', to='profiles.Profile')),
],
),
]
| mit | 4,474,503,118,401,711,600 | 44.15625 | 163 | 0.624913 | false |
ingmarlehmann/franca-tools | franca_parser/franca_parser/franca_ast.py | 1 | 15547 | #------------------------------------------------------------------------------
# franca_parser: franca_ast.py
#
# AST node classes: AST node classes for Franca IDL (*.fidl).
# Builds an AST to be used in other tools.
#
# This code is *heavlily* inspired by 'pycparser' by Eli Bendersky
# (https://github.com/eliben/pycparser/)
#
# Copyright (C) 2016, Ingmar Lehmann
# License: BSD
#------------------------------------------------------------------------------
import sys
class Node(object):
def __init__(self):
print ("node constructor")
def children(self):
pass
def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None):
""" Pretty print the Node and all its attributes and
children (recursively) to a buffer.
buf:
Open IO buffer into which the Node is printed.
offset:
Initial offset (amount of leading spaces)
attrnames:
True if you want to see the attribute names in
name=value pairs. False to only see the values.
nodenames:
True if you want to see the actual node names
within their parents.
showcoord:
Do you want the coordinates of each Node to be
displayed.
"""
lead = ' ' * offset
if nodenames and _my_node_name is not None:
buf.write(lead + self.__class__.__name__+ ' <' + _my_node_name + '>: ')
else:
buf.write(lead + self.__class__.__name__+ ': ')
if self.attr_names:
if attrnames:
nvlist = [(n, getattr(self,n)) for n in self.attr_names]
attrstr = ', '.join('%s=%s' % nv for nv in nvlist)
else:
vlist = [getattr(self, n) for n in self.attr_names]
attrstr = ', '.join('%s' % v for v in vlist)
buf.write(attrstr)
if showcoord:
buf.write(' (at %s)' % self.coord)
buf.write('\n')
for (child_name, child) in self.children():
child.show(
buf,
offset=offset + 2,
attrnames=attrnames,
nodenames=nodenames,
showcoord=showcoord,
_my_node_name=child_name)
class ArrayTypeDeclaration(Node):
def __init__(self, typename, type, dimension):
self.typename = typename
self.type = type
self.dimension = dimension
def children(self):
nodelist = []
if self.type is not None: nodelist.append(("type", self.type))
if self.typename is not None: nodelist.append(("typename", self.typename))
return tuple(nodelist)
attr_names = ('dimension',)
class Attribute(Node):
def __init__(self, typename, name):
self.typename = typename
self.name = name
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.typename is not None: nodelist.append(("typename", self.typename))
return tuple(nodelist)
attr_names = ()
class BroadcastMethod(Node):
def __init__(self, name, comment, out_args, is_selective=False):
self.name = name
self.comment = comment
self.out_args = out_args
self.is_selective = is_selective
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.comment is not None: nodelist.append(("comment", self.comment))
if self.out_args is not None: nodelist.append(("out_args", self.out_args))
return tuple(nodelist)
attr_names = ('is_selective',)
class ComplexTypeDeclarationList(Node):
def __init__(self, members):
self.members = members
def children(self):
nodelist = []
for i, child in enumerate(self.members or []):
nodelist.append(("members[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Constant(Node):
def __init__(self, comment):
self.value = value
def children(self):
return tuple()
attr_names = ('value',)
class Enum(Node):
def __init__(self, name, values, comment=None):
self.name = name
self.values = values
self.comment = comment
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.values is not None: nodelist.append(("values", self.values))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class Enumerator(Node):
def __init__(self, name, value=None, comment=None):
self.name = name
self.value = value
self.comment = comment
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.value is not None: nodelist.append(("value", self.value))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class EnumeratorList(Node):
def __init__(self, enumerators):
self.enumerators = enumerators
def children(self):
nodelist = []
for i, child in enumerate(self.enumerators or []):
nodelist.append(("enumerators[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class FrancaComment(Node):
def __init__(self, comment):
self.comment = comment
def children(self):
return tuple()
attr_names = ('comment',)
class FrancaDocument(Node):
def __init__(self, package_identifier, imports, child_objects):
self.package_identifier = package_identifier
self.imports = imports
self.child_objects = child_objects
def children(self):
nodelist = []
if self.package_identifier is not None: nodelist.append(("package_identifier", self.package_identifier))
if self.imports is not None: nodelist.append(("imports", self.imports))
if self.child_objects is not None: nodelist.append(("child_objects", self.child_objects))
return tuple(nodelist)
attr_names = ()
class ID(Node):
def __init__(self, id):
self.id = id
def children(self):
return tuple()
attr_names = ('id',)
class ImportIdentifier(Node):
def __init__(self, import_identifier):
self.import_identifier = import_identifier
def children(self):
return tuple()
attr_names = ('import_identifier',)
class ImportStatement(Node):
def __init__(self, import_identifier, filename):
self.import_identifier = import_identifier
self.filename = filename
def children(self):
nodelist = []
if self.import_identifier is not None: nodelist.append(("import_identifier", self.import_identifier))
if self.filename is not None: nodelist.append(("filename", self.filename))
return tuple(nodelist)
attr_names = ()
class ImportStatementList(Node):
def __init__(self, members):
self.members = members
def children(self):
nodelist = []
for i, child in enumerate(self.members or []):
nodelist.append(("imports[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class IntegerConstant(Node):
def __init__(self, value):
self.value = value
def children(self):
return tuple()
attr_names = ('value',)
class Interface(Node):
def __init__(self, name, members, comment=None):
self.name = name
self.members = members
self.comment = comment
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.members is not None: nodelist.append(("members", self.members))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class Map(Node):
def __init__(self, name, key_type, value_type, comment=None):
self.name = name
self.key_type = key_type
self.value_type = value_type
self.comment = comment
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.key_type is not None: nodelist.append(("key_type", self.key_type))
if self.value_type is not None: nodelist.append(("value_type", self.value_type))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class Method(Node):
def __init__(self, name, comment, body, is_fire_and_forget=False):
self.name = name
self.comment = comment
self.body = body
self.is_fire_and_forget = is_fire_and_forget
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.comment is not None: nodelist.append(("comment", self.comment))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ('is_fire_and_forget',)
class MethodBody(Node):
def __init__(self, in_args, out_args):
self.in_args = in_args
self.out_args = out_args
def children(self):
nodelist = []
if self.in_args is not None: nodelist.append(("in_args", self.in_args))
if self.out_args is not None: nodelist.append(("out_args", self.out_args))
return tuple(nodelist)
attr_names = ()
class MethodArgument(Node):
def __init__(self, type, name, comment=None):
self.type = type
self.name = name
self.comment = comment
def children(self):
nodelist = []
if self.type is not None: nodelist.append(("type", self.type))
if self.name is not None: nodelist.append(("name", self.name))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class MethodArgumentList(Node):
def __init__(self, args):
self.args = args
def children(self):
nodelist = []
for i, child in enumerate(self.args or []):
nodelist.append(("args[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class MethodOutArguments(Node):
def __init__(self, args):
self.args = args
def children(self):
nodelist = []
if self.args is not None: nodelist.append(("args", self.args))
return tuple(nodelist)
attr_names = ()
class MethodInArguments(Node):
def __init__(self, args):
self.args = args
def children(self):
nodelist = []
if self.args is not None: nodelist.append(("args", self.args))
return tuple(nodelist)
attr_names = ()
class PackageStatement(Node):
def __init__(self, package_identifier):
self.package_identifier = package_identifier
def children(self):
nodelist = []
if self.package_identifier is not None: nodelist.append(("package_identifier", self.package_identifier))
return tuple(nodelist)
attr_names = ()
class PackageIdentifier(Node):
def __init__(self, package_identifier):
self.package_identifier = package_identifier
def children(self):
return tuple()
attr_names = ('package_identifier',)
class RootLevelObjectList(Node):
def __init__(self, root_level_objects):
self.members = root_level_objects
def children(self):
nodelist = []
for i, child in enumerate(self.members or []):
nodelist.append(("root_objects[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class String(Node):
def __init__(self, string):
self.string = string
def children(self):
return tuple()
attr_names = ('string',)
class Struct(Node):
def __init__(self, name, struct_members, comment=None):
self.name = name
self.struct_members = struct_members
self.comment = comment
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.struct_members is not None: nodelist.append(("struct_members", self.struct_members))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class TypeCollection(Node):
def __init__(self, name, members, comment=None):
self.name = name
self.members = members
self.comment = comment
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.members is not None: nodelist.append(("members", self.members))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class Typedef(Node):
def __init__(self, existing_type, new_type):
self.existing_type = existing_type
self.new_type = new_type
def children(self):
nodelist = []
if self.existing_type is not None: nodelist.append(("existing_type", self.existing_type))
if self.new_type is not None: nodelist.append(("new_type", self.new_type))
return tuple(nodelist)
attr_names = ()
class Typename(Node):
def __init__(self, typename):
self.typename = typename
def children(self):
nodelist = []
if self.typename is not None and isinstance(self.typename, Node): nodelist.append(("typename", self.typename))
return tuple(nodelist)
attr_names = ('typename',)
class Union(Node):
def __init__(self, name, member_list, comment=None):
self.name = name
self.member_list = member_list
self.comment = comment
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.member_list is not None: nodelist.append(("member_list", self.member_list))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class Variable(Node):
def __init__(self, typename, name, comment):
self.typename = typename
self.name = name
self.comment = comment
def children(self):
nodelist = []
if self.typename is not None: nodelist.append(("typename", self.typename))
if self.name is not None: nodelist.append(("name", self.name))
if self.comment is not None: nodelist.append(("comment", self.comment))
return tuple(nodelist)
attr_names = ()
class VariableList(Node):
def __init__(self, members):
self.members = members
def children(self):
nodelist = []
for i, child in enumerate(self.members or []):
nodelist.append(("members[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Version(Node):
def __init__(self, major, minor):
self.major = major
self.minor = minor
def children(self):
nodelist = []
if self.major is not None: nodelist.append(("major", self.major))
if self.minor is not None: nodelist.append(("minor", self.minor))
return tuple(nodelist)
attr_names = ()
| mpl-2.0 | 2,878,004,356,479,588,400 | 29.247082 | 118 | 0.589631 | false |
nuobit/odoo-addons | connector_oxigesti/components_custom/binder.py | 1 | 8847 | # -*- coding: utf-8 -*-
# Copyright 2013-2017 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
"""
Binders
=======
Binders are components that know how to find the external ID for an
Odoo ID, how to find the Odoo ID for an external ID and how to
create the binding between them.
"""
import psycopg2
import json
from odoo import fields, models, tools
from odoo.addons.component.core import AbstractComponent
from contextlib import contextmanager
from odoo.addons.connector.exception import (RetryableJobError, )
import odoo
class BinderComposite(AbstractComponent):
""" The same as Binder but allowing composite external keys
"""
_name = 'base.binder.composite'
_inherit = 'base.binder'
_default_binding_field = 'oxigesti_bind_ids'
_external_display_field = 'external_id_display'
_odoo_extra_fields = []
@contextmanager
def _retry_unique_violation(self):
""" Context manager: catch Unique constraint error and retry the
job later.
When we execute several jobs workers concurrently, it happens
that 2 jobs are creating the same record at the same time (binding
record created by :meth:`_export_dependency`), resulting in:
IntegrityError: duplicate key value violates unique
constraint "my_backend_product_product_odoo_uniq"
DETAIL: Key (backend_id, odoo_id)=(1, 4851) already exists.
In that case, we'll retry the import just later.
.. warning:: The unique constraint must be created on the
binding record to prevent 2 bindings to be created
for the same External record.
"""
try:
yield
except psycopg2.IntegrityError as err:
if err.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise RetryableJobError(
'A database error caused the failure of the job:\n'
'%s\n\n'
'Likely due to 2 concurrent jobs wanting to create '
'the same record. The job will be retried later.' % err)
else:
raise
def _is_binding(self, binding):
try:
binding._fields[self._odoo_field]
except KeyError:
return False
return True
def _find_binding(self, relation, binding_extra_vals={}):
if self._is_binding(relation):
raise Exception("The source object %s must not be a binding" % relation.model._name)
if not set(self._odoo_extra_fields).issubset(set(binding_extra_vals.keys())):
raise Exception("If _odoo_extra_fields are defined %s, "
"you must specify the correpsonding binding_extra_vals %s" % (
self._odoo_extra_fields, binding_extra_vals))
domain = [(self._odoo_field, '=', relation.id),
(self._backend_field, '=', self.backend_record.id)]
for f in self._odoo_extra_fields:
domain.append((f, '=', binding_extra_vals[f]))
binding = self.model.with_context(
active_test=False).search(domain)
if binding:
binding.ensure_one()
return binding
def wrap_binding(self, relation, binding_field=None, binding_extra_vals={}):
if not relation:
return
if binding_field is None:
if not self._default_binding_field:
raise Exception("_default_binding_field defined on synchronizer class is mandatory")
binding_field = self._default_binding_field
# wrap is typically True if the relation is a 'product.product'
# record but the binding model is 'oxigesti.product.product'
wrap = relation._name != self.model._name
if wrap and hasattr(relation, binding_field):
binding = self._find_binding(relation, binding_extra_vals)
if not binding:
# we are working with a unwrapped record (e.g.
# product.template) and the binding does not exist yet.
# Example: I created a product.product and its binding
# oxigesti.product.product, it is exported, but we need to
# create the binding for the template.
_bind_values = {self._odoo_field: relation.id,
self._backend_field: self.backend_record.id}
_bind_values.update(binding_extra_vals)
# If 2 jobs create it at the same time, retry
# one later. A unique constraint (backend_id,
# odoo_id) should exist on the binding model
with self._retry_unique_violation():
binding = (self.model
.with_context(connector_no_export=True)
.sudo()
.create(_bind_values))
# Eager commit to avoid having 2 jobs
# exporting at the same time. The constraint
# will pop if an other job already created
# the same binding. It will be caught and
# raise a RetryableJobError.
if not odoo.tools.config['test_enable']:
self.env.cr.commit() # nowait
else:
# If oxigest_bind_ids does not exist we are typically in a
# "direct" binding (the binding record is the same record).
# If wrap is True, relation is already a binding record.
binding = relation
if not self._is_binding(binding):
raise Exception(
"Expected binding '%s' and found regular model '%s'" % (self.model._name, relation._name))
return binding
def to_internal(self, external_id, unwrap=False):
""" Give the Odoo recordset for an external ID
:param external_id: external ID for which we want
the Odoo ID
:param unwrap: if True, returns the normal record
else return the binding record
:return: a recordset, depending on the value of unwrap,
or an empty recordset if the external_id is not mapped
:rtype: recordset
"""
domain = [(self._backend_field, '=', self.backend_record.id),
(self._external_display_field, '=', json.dumps(external_id))]
bindings = self.model.with_context(active_test=False).search(
domain
)
if not bindings:
if unwrap:
return self.model.browse()[self._odoo_field]
return self.model.browse()
bindings.ensure_one()
if unwrap:
bindings = bindings[self._odoo_field]
return bindings
def to_external(self, binding, wrap=False, wrapped_model=None, binding_extra_vals={}):
""" Give the external ID for an Odoo binding ID
:param binding: Odoo binding for which we want the external id
:param wrap: if True, binding is a normal record, the
method will search the corresponding binding and return
the external id of the binding
:return: external ID of the record
"""
if isinstance(binding, models.BaseModel):
binding.ensure_one()
else:
if wrap:
if not wrapped_model:
raise Exception("The wrapped model is mandatory if binding is not an object")
binding = self.env[wrapped_model].browse(binding)
else:
binding = self.model.browse(binding)
if wrap:
binding = self._find_binding(binding, binding_extra_vals)
if not binding:
return None
return binding[self._external_field] or None
def bind(self, external_id, binding):
""" Create the link between an external ID and an Odoo ID
:param external_id: external id to bind
:param binding: Odoo record to bind
:type binding: int
"""
# Prevent False, None, or "", but not 0
assert (external_id or external_id is 0) and binding, (
"external_id or binding missing, "
"got: %s, %s" % (external_id, binding)
)
# avoid to trigger the export when we modify the `external_id`
now_fmt = fields.Datetime.now()
if isinstance(binding, models.BaseModel):
binding.ensure_one()
else:
binding = self.model.browse(binding)
binding.with_context(connector_no_export=True).write({
self._external_field: external_id,
self._sync_date_field: now_fmt,
})
def _get_external_id(self, binding):
return None
| agpl-3.0 | 2,360,778,247,693,015,600 | 38.851351 | 106 | 0.58144 | false |
mpi-sws-rse/datablox | blox/enum_shard__1_0/b_enum_shard.py | 1 | 4344 | """This is a shard that works off a fixed set of values. The shard_field
configuration property should be set to an incoming message property that
can be used to select a shard. Each node's definition should have a property
called shard_field_value. This is used to build a mapping from vlaues of
the shard_field to nodes.
"""
import sys
import os.path
from logging import ERROR, WARN, INFO, DEBUG
import time
import random
from collections import defaultdict
try:
import datablox_framework
except ImportError:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../datablox_framework")))
import datablox_framework
from datablox_framework.block import *
from datablox_framework.shard import *
class EnumShardError(Exception):
pass
class ValueNotInEnum(Exception):
def __init__(self, v):
Exception.__init__(self, "Value '%s' not found in Enum" % v)
self.v = v
class enum_shard(Shard):
@classmethod
def initial_configs(cls, config):
if isinstance(config["node_type"]["args"], list):
#at least have as many arguments as there are nodes
assert(len(config["node_type"]["args"]) >= config["nodes"])
return [config["node_type"]["args"][i] for i in range(config["nodes"])]
else:
return [config["node_type"]["args"] for i in range(config["nodes"])]
def on_load(self, config):
self.nodes = config["nodes"]
self.config = config
self.shard_field = config["shard_field"]
self.add_port("input", Port.PUSH, Port.UNNAMED, [])
self.add_port("input_query", Port.QUERY, Port.UNNAMED, [])
self.field_to_node_mapping = {}
self.message_counts = []
for i in range(config["nodes"]):
node_info = config["node_type"]["args"][i]
if not node_info.has_key("shard_field_value"):
raise EnumShardError("Shard %d missing shard_field_value property" % i)
v = node_info["shard_field_value"]
if self.field_to_node_mapping.has_key(v):
raise EnumShardError("Shard has multiple nodes defined for field value %s" %
v)
self.field_to_node_mapping[v] = i
self.message_counts.append(0)
self.log(INFO, "field to node mapping: %r" % self.field_to_node_mapping)
self.log(INFO, "Enum shard loaded")
def find_node_num(self, row):
val = row[self.shard_field]
if self.field_to_node_mapping.has_key(val):
return self.field_to_node_mapping[val]
else:
raise ValueNotInEnum(val)
def flush_logs(self, logs):
for p_num, log in logs.items():
self.push_node(p_num, log)
def process_log(self, log):
logs = defaultdict(Log)
for row in log.iter_flatten():
try:
p = self.find_node_num(row)
logs[p].append_row(row)
self.message_counts[p] += 1
except KeyError:
#this row does not have shard field - send it to all ports
#useful for sending tokens
#first flush all the pending logs, because this doesn't have the same names
self.flush_logs(logs)
logs = defaultdict(Log)
nl = Log()
nl.append_row(row)
for i in range(self.nodes):
self.push_node(i, nl)
self.message_counts[i] += 1
except ValueNotInEnum, e:
#this row's shard field value not in enum- send it to a random port
#first flush all the pending logs, because this doesn't have the same names
self.flush_logs(logs)
logs = defaultdict(Log)
dest_node = random.randint(0, self.nodes-1)
self.log(WARN,"%s, sending to a random node (%d)" %
(e, dest_node))
nl = Log()
nl.append_row(row)
self.push_node(dest_node, nl)
self.message_counts[dest_node] += 1
self.flush_logs(logs)
def recv_push(self, port, log):
self.process_log(log)
#migration not implemented yet
def can_add_node(self):
return False
def recv_query(self, port, log):
self.process_log(log)
ret = Log()
ret.log["result"] = True
self.return_query_res(port, ret)
def on_shutdown(self):
self.log(INFO, "Total messages processed: %d" % sum(self.message_counts))
for i in range(self.config["nodes"]):
self.log(INFO, " Node %d: %d messages sent" %
(i, self.message_counts[i]))
| apache-2.0 | 2,849,247,604,520,422,000 | 34.032258 | 84 | 0.630064 | false |
alexryndin/ambari | ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_server.py | 1 | 8565 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oozie_server_upgrade
from resource_management.core import Logger
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions import format
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.constants import Direction
from resource_management.libraries.script import Script
from resource_management.libraries.functions import check_process_status
from resource_management.libraries.functions import compare_versions
from resource_management.libraries.functions import format_stack_version
from resource_management.libraries.functions.security_commons import build_expectations
from resource_management.libraries.functions.security_commons import cached_kinit_executor
from resource_management.libraries.functions.security_commons import get_params_from_filesystem
from resource_management.libraries.functions.security_commons import validate_security_config_properties
from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
from oozie import oozie
from oozie_service import oozie_service
class OozieServer(Script):
def get_component_name(self):
return "oozie-server"
def install(self, env):
self.install_packages(env)
def configure(self, env, upgrade_type=None):
import params
#TODO: needed?
if upgrade_type == "nonrolling" and params.upgrade_direction == Direction.UPGRADE and \
params.version and compare_versions(format_stack_version(params.version), '4.1.0.0') >= 0:
conf_select.select(params.stack_name, "oozie", params.version)
# In order for the "/usr/hdp/current/oozie-<client/server>" point to the new version of
# oozie, we need to create the symlinks both for server and client.
# This is required as both need to be pointing to new installed oozie version.
# Sets the symlink : eg: /usr/hdp/current/oozie-client -> /usr/hdp/2.3.x.y-<version>/oozie
stack_select.select("oozie-client", params.version)
# Sets the symlink : eg: /usr/hdp/current/oozie-server -> /usr/hdp/2.3.x.y-<version>/oozie
stack_select.select("oozie-server", params.version)
env.set_params(params)
oozie(is_server=True)
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
#TODO remove this when config command will be implemented
self.configure(env)
# preparing the WAR file must run after configure since configure writes out
# oozie-env.sh which is needed to have the right environment directories setup!
if upgrade_type is not None:
oozie_server_upgrade.prepare_warfile();
oozie_service(action='start', upgrade_type=upgrade_type)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
oozie_service(action='stop', upgrade_type=upgrade_type)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.pid_file)
def security_status(self, env):
import status_params
env.set_params(status_params)
if status_params.security_enabled:
expectations = {
"oozie-site":
build_expectations('oozie-site',
{
"oozie.authentication.type": "kerberos",
"oozie.service.AuthorizationService.security.enabled": "true",
"oozie.service.HadoopAccessorService.kerberos.enabled": "true"
},
[
"local.realm",
"oozie.authentication.kerberos.principal",
"oozie.authentication.kerberos.keytab",
"oozie.service.HadoopAccessorService.kerberos.principal",
"oozie.service.HadoopAccessorService.keytab.file"
],
None)
}
security_params = get_params_from_filesystem(status_params.conf_dir,
{'oozie-site.xml': FILE_TYPE_XML})
result_issues = validate_security_config_properties(security_params, expectations)
if not result_issues: # If all validations passed successfully
try:
# Double check the dict before calling execute
if ('oozie-site' not in security_params
or 'oozie.authentication.kerberos.principal' not in security_params['oozie-site']
or 'oozie.authentication.kerberos.keytab' not in security_params['oozie-site']
or 'oozie.service.HadoopAccessorService.kerberos.principal' not in security_params['oozie-site']
or 'oozie.service.HadoopAccessorService.keytab.file' not in security_params['oozie-site']):
self.put_structured_out({"securityState": "UNSECURED"})
self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
return
cached_kinit_executor(status_params.kinit_path_local,
status_params.oozie_user,
security_params['oozie-site']['oozie.authentication.kerberos.keytab'],
security_params['oozie-site']['oozie.authentication.kerberos.principal'],
status_params.hostname,
status_params.tmp_dir)
cached_kinit_executor(status_params.kinit_path_local,
status_params.oozie_user,
security_params['oozie-site']['oozie.service.HadoopAccessorService.keytab.file'],
security_params['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal'],
status_params.hostname,
status_params.tmp_dir)
self.put_structured_out({"securityState": "SECURED_KERBEROS"})
except Exception as e:
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityStateErrorInfo": str(e)})
else:
issues = []
for cf in result_issues:
issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
self.put_structured_out({"securityState": "UNSECURED"})
else:
self.put_structured_out({"securityState": "UNSECURED"})
def pre_upgrade_restart(self, env, upgrade_type=None):
"""
Performs the tasks surrounding the Oozie startup when a rolling upgrade
is in progress. This includes backing up the configuration, updating
the database, preparing the WAR, and installing the sharelib in HDFS.
:param env:
:return:
"""
import params
env.set_params(params)
# this function should not execute if the version can't be determined or
# is not at least IOP 4.0.0.0
if not params.version or compare_versions(format_stack_version(params.version), '4.0.0.0') < 0:
return
Logger.info("Executing Oozie Server Rolling Upgrade pre-restart")
oozie_server_upgrade.backup_configuration()
conf_select.select(params.stack_name, "oozie", params.version)
stack_select.select("oozie-server", params.version)
#Execute(format("stack-select set oozie-server {version}"))
oozie_server_upgrade.restore_configuration()
#oozie_server_upgrade.prepare_libext_directory()
oozie_server_upgrade.upgrade_oozie()
if __name__ == "__main__":
OozieServer().execute()
| apache-2.0 | 4,240,268,077,202,023,000 | 43.378238 | 120 | 0.669469 | false |
helfertool/helfertool | src/registration/models/shift.py | 1 | 7529 | from django.core.validators import MinValueValidator
from django.db import models
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.template.defaultfilters import date as date_f
from django.utils.timezone import localtime
from django.utils.translation import ugettext_lazy as _
from collections import OrderedDict
from copy import deepcopy
from datetime import datetime
import math
class Shift(models.Model):
""" A shift of one job.
Columns:
:job: job of this shift
:begin: begin of the shift
:end: end of the shift
:number: number of people
:blocked: shift is blocked, if the job is public
:hidden: shift is not displayed publicly
:name: name of the shift (optional)
"""
class Meta:
ordering = ['job', 'begin', 'end']
job = models.ForeignKey(
'Job',
on_delete=models.CASCADE,
)
name = models.CharField(
max_length=200,
verbose_name=_("Name (optional)"),
default="",
blank=True,
)
begin = models.DateTimeField(
verbose_name=_("Begin"),
)
end = models.DateTimeField(
verbose_name=_("End"),
)
number = models.IntegerField(
default=0,
verbose_name=_("Number of helpers"),
validators=[MinValueValidator(0)],
)
blocked = models.BooleanField(
default=False,
verbose_name=_("The shift is blocked and displayed as full."),
)
hidden = models.BooleanField(
default=False,
verbose_name=_("The shift is not visible."),
)
gifts = models.ManyToManyField(
'gifts.GiftSet',
verbose_name=_("Gifts"),
blank=True,
)
archived_number = models.IntegerField(
default=0,
verbose_name=_("Number of registered helpers for archived event"),
)
def __str__(self):
if self.name:
return "%s, %s, %s" % (self.job.name, self.name,
self.time_with_day())
else:
return "%s, %s" % (self.job.name, self.time_with_day())
def time(self):
""" Returns a string representation of the begin and end time.
The begin contains the date and time, the end only the time.
"""
return "%s, %s - %s" % (date_f(localtime(self.begin), 'DATE_FORMAT'),
date_f(localtime(self.begin), 'TIME_FORMAT'),
date_f(localtime(self.end), 'TIME_FORMAT'))
def time_hours(self):
""" Returns a string representation of the begin and end time.
Only the time is used, the date is not shown.
"""
return "%s - %s" % (date_f(localtime(self.begin), 'TIME_FORMAT'),
date_f(localtime(self.end), 'TIME_FORMAT'))
def time_with_day(self):
""" Returns a string representation of the day.
If the shift is on two days only the name of the first day is returned.
"""
day = date_f(localtime(self.begin), "l")
return "{}, {}".format(day, self.time())
def date(self):
""" Returns the day on which the shifts begins. """
return localtime(self.begin).date()
def num_helpers(self):
"""
Returns the current number of helpers, but 0 if event is archived.
"""
return self.helper_set.count()
def num_helpers_archived(self):
""" Returns the current number of helpers- """
if self.job.event.archived:
return self.archived_number
else:
return self.helper_set.count()
def is_full(self):
""" Check if the shift is full and return a boolean. """
return self.num_helpers() >= self.number
def helpers_percent(self):
""" Calculate the percentage of registered helpers and returns an int.
If the maximal number of helpers for a shift is 0, 0 is returned.
"""
if self.number == 0:
return 0
num = self.num_helpers_archived()
return int(round(float(num) / self.number * 100.0, 0))
def helpers_percent_5percent(self):
"""
Returns the percentage of registered helpers in 5% steps.
So the returned value is between 0 and 20 (including both values).
This is used to generate the CSS class names defined in style.css.
Therefore, inline CSS can be avoided.
"""
percent = self.helpers_percent()
return math.ceil(percent / 5)
def helpers_percent_vacant_5percent(self):
"""
Same as `helpers_percent_5percent`, but for the missing helpers.
"""
return 20 - self.helpers_percent_5percent()
@property
def shirt_sizes(self):
# data structure
shirts = OrderedDict()
for size, name in self.job.event.get_shirt_choices():
shirts.update({name: 0})
# collect all sizes, this must be the first shift of the helper
for helper in self.helper_set.all():
if helper.first_shift == self:
tmp = shirts[helper.get_shirt_display()]
shirts.update({helper.get_shirt_display(): tmp+1})
return shirts
def duplicate(self, new_date=None, new_job=None, gift_set_mapping=None):
""" Duplicate a shift. There are multiple possibilities:
* Shift is copied to new day in same job: set new_date
* Shift is copied to new job in same event: set new_job
* Shift is copied to new event: set new_job and gift_set_mapping
"""
new_shift = deepcopy(self)
new_shift.pk = None
new_shift.archived_number = 0
# maybe shift is copied to new job
if new_job:
new_shift.job = new_job
# if shift is copied to new event, move begin and end time according to diff in event dates
if self.job.event != new_job.event:
diff = new_job.event.date - self.job.event.date
new_shift.begin += diff
new_shift.end += diff
# maybe just the date is changed
if new_date:
new_shift.move_date(new_date)
# now save that
new_shift.save()
# and finally set the gifts again
for gift in self.gifts.all():
if gift_set_mapping:
new_shift.gifts.add(gift_set_mapping[gift])
else:
new_shift.gifts.add(gift)
return new_shift
def move_date(self, new_date):
# current begin and end in local time
old_begin_localtime = localtime(self.begin)
old_end_localtime = localtime(self.end)
# move date alone without chainging time
diff_days = new_date - old_begin_localtime.date()
new_begin_date = old_begin_localtime.date() + diff_days
new_end_date = old_end_localtime.date() + diff_days
# set time separately (10 am should always be 10 am, also when a time change is between old and new date)
begin_time = old_begin_localtime.time()
end_time = old_end_localtime.time()
self.begin = datetime.combine(new_begin_date, begin_time)
self.end = datetime.combine(new_end_date, end_time)
@receiver(pre_delete, sender=Shift)
def shift_deleted(sender, instance, using, **kwargs):
# m2m_changed does not trigger here, so remote the helpers before the shift is deleted
for helper in instance.helper_set.all():
helper.shifts.remove(instance)
| agpl-3.0 | -1,135,561,369,318,406,400 | 31.175214 | 113 | 0.594501 | false |
mpapierski/hb_balancer | protocol.py | 1 | 6557 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# hb_balancer
# High performance load balancer between Helbreath World Servers.
#
# Copyright (C) 2012 Michał Papierski <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import struct
import random
import logging
from twisted.internet import reactor
from twisted.protocols.stateful import StatefulProtocol
from twisted.python import log
from packets import Packets
class BaseHelbreathProtocol(StatefulProtocol):
''' Basic Helbreath Protocol '''
def getInitialState(self):
'''
Protocol overview:
[Key unsigned byte] [Size unsigned short] [Data Size-bytes]
'''
return (self.get_key, 1)
def get_key(self, data):
''' Get key '''
self.key, = struct.unpack('<B', data)
return (self.get_data_size, 2)
def get_data_size(self, data):
''' Read data size '''
self.data_size, = struct.unpack('<H', data)
return (self.get_data, self.data_size - 3)
def get_data(self, data):
''' Read encoded data and decode it '''
if self.key > 0:
# Decode
data = list(data)
for i in range(len(data)):
data[i] = chr(((ord(data[i]) ^ (self.key ^ (self.data_size - 3 - i))) - (i ^ self.key)) % 256)
data = ''.join(data)
# Pass decoded data
self.raw_data(data)
return (self.get_key, 1)
def send_message(self, data):
''' Send a Helbreath Packet data '''
key = random.randint(0, 255)
if key > 0:
# Encode
data = list(data)
for i in range(len(data)):
data[i] = chr(((ord(data[i]) + (i ^ key)) ^ (key ^ (len(data) - i))) % 256)
data = ''.join(data)
self.transport.write(struct.pack('<BH', key, len(data) + 3) + data)
def raw_data(self, data):
''' Got packet '''
pass
class ProxyHelbreathProtocol(BaseHelbreathProtocol):
''' Proxy Helbreath protocol used for proxying packets '''
def connectionMade(self):
self.factory.success(self)
def login(self, account_name, account_password, world_name):
''' Request a login '''
# Casting to str is made for sure
# world_name could be either str or unicode.
self.send_message(struct.pack('<IH10s10s30s',
Packets.MSGID_REQUEST_LOGIN, # MsgID
0, # MsgType
str(account_name),
str(account_password),
str(world_name)))
def raw_data(self, data):
self.factory.receiver(data)
self.transport.loseConnection()
class HelbreathProtocol(BaseHelbreathProtocol):
def raw_data(self, data):
# Header
msg_id, msg_type = struct.unpack('<IH', data[:6])
# Process packet data
if msg_id == Packets.MSGID_REQUEST_LOGIN:
# Client is requesting login
packet_format = '<10s10s30s'
account_name, account_password, world_name = struct.unpack(
packet_format,
data[6:]
)
self.request_login(
account_name.rstrip('\x00'),
account_password.rstrip('\x00'),
world_name.rstrip('\x00')
)
elif msg_id == Packets.MSGID_REQUEST_ENTERGAME:
# Client is trying to enter game
packet_format = '<10s10s10s10si30s120s'
player_name, map_name, account_name, account_password, \
level, world_name, cmd_line = struct.unpack(
packet_format,
data[6:])
self.request_entergame(
msg_type,
player_name.rstrip('\x00'),
map_name.rstrip('\x00'),
account_name.rstrip('\x00'),
account_password.rstrip('\x00'),
level,
world_name.rstrip('\x00'),
cmd_line.rstrip('\x00'))
else:
# Abort if a packet is not (yet) known
self.transport.loseConnection()
def request_login(self, account_name, account_password, world_name):
''' Request client login
account_name -- Account name
account_password -- Account password
world_name -- World server name
'''
def world_is_down(failure = None):
''' The requested world is offline '''
self.send_message(struct.pack('<IH',
Packets.MSGID_RESPONSE_LOG,
Packets.DEF_LOGRESMSGTYPE_NOTEXISTINGWORLDSERVER))
reactor.callLater(10, self.transport.loseConnection)
def handle_response(data):
''' Pass data and close the connection nicely '''
self.send_message(data)
reactor.callLater(10, self.transport.loseConnection)
def connection_made(remote):
''' Connection is made. Request a login. '''
log.msg('Remote connection made!')
remote.login(
account_name,
account_password,
remote.factory.world_name
)
# Request connection to a world by its name, pass some callbacks
self.factory.connect_to_world(
world_name = world_name,
receiver = handle_response,
success = connection_made,
failure = world_is_down)
log.msg('Request world %s' % (world_name, ))
def request_entergame(self, msg_type, player_name, map_name, account_name,
account_password, level, world_name, cmd_line):
''' Client wants to enter game. '''
log.msg('Request entergame player(%s) map(%s) account(%s) world(%s)' % (
player_name, map_name, account_name, world_name))
def connection_made(remote):
''' Request enter game, construct exacly the same data.
TODO: Parse the msg_type. '''
log.msg('Requesting enter game...')
remote.send_message(struct.pack('<IH10s10s10s10si30s120s',
Packets.MSGID_REQUEST_ENTERGAME,
msg_type,
player_name,
map_name,
account_name,
account_password,
level,
str(remote.factory.world_name),
cmd_line))
def error_handler(failure = None):
''' Unable to connect to destination world '''
log.err('Enter game error for account(%s) at world(%s)' % (
account_name,
world_name))
self.send_message(struct.pack('<IHB',
Packets.MSGID_RESPONSE_ENTERGAME,
Packets.DEF_ENTERGAMERESTYPE_REJECT,
Packets.DEF_REJECTTYPE_DATADIFFERENCE))
reactor.callLater(10, self.transport.loseConnection)
def response_handler(data):
''' Pass the (modified) data '''
self.send_message(data)
self.factory.connect_to_world(
world_name = world_name,
receiver = response_handler,
success = connection_made,
failure = error_handler
)
| agpl-3.0 | -1,380,034,530,598,753,500 | 28.399103 | 98 | 0.675412 | false |
sameersingh/bibere | scripts/first_pages.py | 1 | 1750 | #!/usr/bin/python3
import argparse
from read_json import *
import tempfile
import shutil
import pypdftk
import os
def get_pdf(source, dest):
shutil.copy(source, dest)
def run(idir, bdir, ofile):
authors, venues, papers = read_all_info(idir)
fpdf_names = []
tmpdirname = tempfile.mkdtemp()
for p in papers:
if p['pubTypeSlot'] == 'Conference' or p['pubTypeSlot'] == 'Journal':
if 'pdfLink' not in p:
print("pdfLink missing:", p['id'])
elif p['pdfLink'].startswith("http"):
print("local link missing:", p['id'])
else:
source = bdir + "/" + p['pdfLink']
i = len(fpdf_names)
dest = "%s/%d.pdf" % (tmpdirname, i)
print("getting %s, putting it %s" % (source, dest))
get_pdf(source, dest)
tdir = "%s/%d/" % (tmpdirname, i)
os.mkdir(tdir)
fpdf_names.append(tdir + "page_01.pdf")
pypdftk.split(dest, tdir)
pypdftk.concat(fpdf_names, out_file=ofile)
shutil.rmtree(tmpdirname)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="directory containing the json files for authors/papers", required=True)
parser.add_argument("-b", "--basedir", help="the base directory of where the full PDFs reside.", required=True)
parser.add_argument("-o", "--output", help="output pdf file for the first pages", required=True)
args = parser.parse_args()
print("input: ", args.input)
print("basedir: ", args.basedir)
print("output: ", args.output)
run(args.input, args.basedir, args.output)
| bsd-2-clause | -2,552,111,543,381,166,600 | 36.888889 | 118 | 0.564 | false |
openstack/yaql | yaql/language/contexts.py | 1 | 9928 | # Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from yaql.language import exceptions
from yaql.language import runner
from yaql.language import specs
from yaql.language import utils
class ContextBase(metaclass=abc.ABCMeta):
def __init__(self, parent_context=None, convention=None):
self._parent_context = parent_context
self._convention = convention
if convention is None and parent_context:
self._convention = parent_context.convention
@property
def parent(self):
return self._parent_context
@abc.abstractmethod
def register_function(self, spec, *args, **kwargs):
pass
@abc.abstractmethod
def get_data(self, name, default=None, ask_parent=True):
return default
def __getitem__(self, name):
return self.get_data(name)
@abc.abstractmethod
def __setitem__(self, name, value):
pass
@abc.abstractmethod
def __delitem__(self, name):
pass
@abc.abstractmethod
def __contains__(self, item):
return False
def __call__(self, name, engine, receiver=utils.NO_VALUE,
data_context=None, use_convention=False,
function_filter=None):
return lambda *args, **kwargs: runner.call(
name, self, args, kwargs, engine, receiver,
data_context, use_convention, function_filter)
@abc.abstractmethod
def get_functions(self, name, predicate=None, use_convention=False):
return [], False
@abc.abstractmethod
def delete_function(self, spec):
pass
def collect_functions(self, name, predicate=None, use_convention=False):
overloads = []
p = self
while p is not None:
context_predicate = None
if predicate:
context_predicate = lambda fd: predicate(fd, p) # noqa: E731
layer_overloads, is_exclusive = p.get_functions(
name, context_predicate, use_convention)
p = None if is_exclusive else p.parent
if layer_overloads:
overloads.append(layer_overloads)
return overloads
def create_child_context(self):
return type(self)(self)
@property
def convention(self):
return self._convention
@abc.abstractmethod
def keys(self):
return {}.keys()
class Context(ContextBase):
def __init__(self, parent_context=None, data=utils.NO_VALUE,
convention=None):
super(Context, self).__init__(parent_context, convention)
self._functions = {}
self._data = {}
self._exclusive_funcs = set()
if data is not utils.NO_VALUE:
self['$'] = data
@staticmethod
def _import_function_definition(fd):
return fd
def register_function(self, spec, *args, **kwargs):
exclusive = kwargs.pop('exclusive', False)
if not isinstance(spec, specs.FunctionDefinition) and callable(spec):
spec = specs.get_function_definition(
spec, *args, convention=self._convention, **kwargs)
spec = self._import_function_definition(spec)
if spec.is_method:
if not spec.is_valid_method():
raise exceptions.InvalidMethodException(spec.name)
self._functions.setdefault(spec.name, set()).add(spec)
if exclusive:
self._exclusive_funcs.add(spec.name)
def delete_function(self, spec):
self._functions.get(spec.name, set()).discard(spec)
self._exclusive_funcs.discard(spec.name)
def get_functions(self, name, predicate=None, use_convention=False):
name = name.rstrip('_')
if use_convention and self._convention is not None:
name = self._convention.convert_function_name(name)
if predicate is None:
predicate = lambda x: True # noqa: E731
return (
set(filter(predicate, self._functions.get(name, set()))),
name in self._exclusive_funcs
)
@staticmethod
def _normalize_name(name):
if not name.startswith('$'):
name = ('$' + name)
if name == '$':
name = '$1'
return name
def __setitem__(self, name, value):
self._data[self._normalize_name(name)] = value
def get_data(self, name, default=None, ask_parent=True):
name = self._normalize_name(name)
if name in self._data:
return self._data[name]
ctx = self.parent
while ask_parent and ctx:
result = ctx.get_data(name, utils.NO_VALUE, False)
if result is utils.NO_VALUE:
ctx = ctx.parent
else:
return result
return default
def __delitem__(self, name):
self._data.pop(self._normalize_name(name))
def __contains__(self, item):
if isinstance(item, specs.FunctionDefinition):
return item in self._functions.get(item.name, [])
if isinstance(item, str):
return self._normalize_name(item) in self._data
return False
def keys(self):
return self._data.keys()
class MultiContext(ContextBase):
def __init__(self, context_list, convention=None):
self._context_list = context_list
if convention is None:
convention = context_list[0].convention
parents = tuple(
filter(lambda t: t, map(lambda t: t.parent, context_list))
)
if not parents:
super(MultiContext, self).__init__(None, convention)
elif len(parents) == 1:
super(MultiContext, self).__init__(parents[0], convention)
else:
super(MultiContext, self).__init__(MultiContext(parents),
convention)
def register_function(self, spec, *args, **kwargs):
self._context_list[0].register_function(spec, *args, **kwargs)
def get_data(self, name, default=None, ask_parent=True):
for context in self._context_list:
result = context.get_data(name, utils.NO_VALUE, False)
if result is not utils.NO_VALUE:
return result
ctx = self.parent
while ask_parent and ctx:
result = ctx.get_data(name, utils.NO_VALUE, False)
if result is utils.NO_VALUE:
ctx = ctx.parent
else:
return result
return default
def __setitem__(self, name, value):
self._context_list[0][name] = value
def __delitem__(self, name):
for context in self._context_list:
del context[name]
def create_child_context(self):
return Context(self)
def keys(self):
prev_keys = set()
for context in self._context_list:
for key in context.keys():
if key not in prev_keys:
prev_keys.add(key)
yield key
def delete_function(self, spec):
for context in self._context_list:
context.delete_function(spec)
def __contains__(self, item):
for context in self._context_list:
if item in context:
return True
return False
def get_functions(self, name, predicate=None, use_convention=False):
result = set()
is_exclusive = False
for context in self._context_list:
funcs, exclusive = context.get_functions(
name, predicate, use_convention)
result.update(funcs)
if exclusive:
is_exclusive = True
return result, is_exclusive
class LinkedContext(ContextBase):
"""Context that is as a proxy to another context but has its own parent."""
def __init__(self, parent_context, linked_context, convention=None):
self.linked_context = linked_context
if linked_context.parent:
super(LinkedContext, self).__init__(
LinkedContext(parent_context, linked_context.parent,
convention), convention)
else:
super(LinkedContext, self).__init__(parent_context, convention)
def register_function(self, spec, *args, **kwargs):
return self.linked_context.register_function(spec, *args, **kwargs)
def keys(self):
return self.linked_context.keys()
def get_data(self, name, default=None, ask_parent=True):
result = self.linked_context.get_data(
name, default=utils.NO_VALUE, ask_parent=False)
if result is utils.NO_VALUE:
if not ask_parent or not self.parent:
return default
return self.parent.get_data(name, default=default, ask_parent=True)
return result
def get_functions(self, name, predicate=None, use_convention=False):
return self.linked_context.get_functions(
name, predicate=predicate, use_convention=use_convention)
def delete_function(self, spec):
return self.linked_context.delete_function(spec)
def __contains__(self, item):
return item in self.linked_context
def __delitem__(self, name):
del self.linked_context[name]
def __setitem__(self, name, value):
self.linked_context[name] = value
def create_child_context(self):
return type(self.linked_context)(self)
| apache-2.0 | 7,695,287,142,699,506,000 | 32.427609 | 79 | 0.599919 | false |
ActiveState/code | recipes/Python/59867_crossplatform_import_hook_endofline/recipe-59867.py | 1 | 1504 | # Import hook for end-of-line conversion,
# by David Goodger ([email protected]).
# Put in your sitecustomize.py, anywhere on sys.path, and you'll be able to
# import Python modules with any of Unix, Mac, or Windows line endings.
import ihooks, imp, py_compile
class MyHooks(ihooks.Hooks):
def load_source(self, name, filename, file=None):
"""Compile source files with any line ending."""
if file:
file.close()
py_compile.compile(filename) # line ending conversion is in here
cfile = open(filename + (__debug__ and 'c' or 'o'), 'rb')
try:
return self.load_compiled(name, filename, cfile)
finally:
cfile.close()
class MyModuleLoader(ihooks.ModuleLoader):
def load_module(self, name, stuff):
"""Special-case package directory imports."""
file, filename, (suff, mode, type) = stuff
path = None
if type == imp.PKG_DIRECTORY:
stuff = self.find_module_in_dir("__init__", filename, 0)
file = stuff[0] # package/__init__.py
path = [filename]
try: # let superclass handle the rest
module = ihooks.ModuleLoader.load_module(self, name, stuff)
finally:
if file:
file.close()
if path:
module.__path__ = path # necessary for pkg.module imports
return module
ihooks.ModuleImporter(MyModuleLoader(MyHooks())).install()
| mit | 6,434,838,695,669,646,000 | 35.682927 | 75 | 0.588431 | false |
RTHMaK/RPGOne | deep_qa-master/deep_qa/layers/recurrence_modes.py | 1 | 1184 | from typing import Any, Dict
from collections import OrderedDict
from keras import backend as K
class FixedRecurrence:
'''
This recurrence class simply performs a fixed number of memory network steps and
returns the memory representation and representation of the background knowledge
generated by the knowledge_selector and knowledge_combiner layers (the simplest
case being a weighted sum).
'''
def __init__(self, memory_network, params: Dict[str, Any]):
self.num_memory_layers = params.pop("num_memory_layers", 1)
self.memory_network = memory_network
def __call__(self, encoded_question, current_memory, encoded_background):
for _ in range(self.num_memory_layers):
current_memory, attended_knowledge = \
self.memory_network.memory_step(encoded_question, current_memory, encoded_background)
return current_memory, attended_knowledge
recurrence_modes = OrderedDict() # pylint: disable=invalid-name
recurrence_modes["fixed"] = FixedRecurrence
if K.backend() == 'tensorflow':
from .adaptive_recurrence import AdaptiveRecurrence
recurrence_modes["adaptive"] = AdaptiveRecurrence
| apache-2.0 | 6,218,812,813,095,521,000 | 39.827586 | 101 | 0.723818 | false |
disler/Kontact | App/Server.py | 1 | 3547 | from flask import Flask, render_template, current_app, Response, request
from server.DBInterface import DBInterface
from server.Validator import Validator
from server.WebUtil import WebUtil
import json
import ast
app = Flask(__name__)
#load database interface
db = DBInterface()
#load validator
validator = Validator.Kontact()
@app.route('/')
def Home():
"""
Landing page for application
"""
return current_app.send_static_file("index.html")
@app.route('/kontacts')
def Get():
"""
Get the list of kontacts
"""
return WebUtil.AsJson(db.Get("tblKontact"))
@app.route('/kontacts/<int:id>')
def GetByID(id):
"""
Get single record by id
"""
#get record by id from the kontact table
oRecord = db.GetByID("tblKontact", id)
#if the record returned is nothing return an empty object
if(oRecord is None):
oRecord = dict({})
return WebUtil.AsJson(oRecord)
@app.route('/kontacts', methods=["POST"])
def Create():
"""
Create a new kontact record
"""
#convert request data to json to be rendered as a python dict
oKontact = WebUtil.ToObject(request.data)
#if our processed data is a dict
if type(oKontact) is dict:
#validate to proper data structure
bValid = validator.Validate(oKontact)
#if valid kontact object is valid add to db
if bValid:
#create kontact obj
db.Create("tblKontact", oKontact)
#return success response
return WebUtil.SuccessResponse()
#kontact object is not valid return failure response
else:
return WebUtil.FailureResponse()
@app.route("/kontacts/<int:id>", methods=["PUT"])
def Update(id):
"""
Update a currently existing kontact record
"""
#Convert request to python structure
oNewKontact = WebUtil.ToObject(request.data)
#get current kontact we're going to update
oPreviousKontact = db.GetByID("tblKontact", id)
#if the kontact we're trying to update exists
if(oPreviousKontact is not None):
#combine the old kontact with the new - new having priority
oMergedKontact = WebUtil.MergeDict(oPreviousKontact, oNewKontact)
#validate the newly merged kontact object
bValid = validator.Validate(oMergedKontact)
#if the kontact object is valid
if bValid:
#update the kontact object
db.Update("tblKontact", id, oMergedKontact)
#return failure response
return WebUtil.SuccessResponse()
#kontact object is not valid
else:
#return failure response
return WebUtil.FailureResponse()
#the kontact we're trying to update does not exists return failure response
else:
return WebUtil.FailureResponse()
@app.route("/kontacts/<int:id>", methods=["DELETE"])
def Delete(id):
"""
Delete a kontact based on it's id'
"""
#get current kontact we're going to delete
oPreviousKontact = db.GetByID("tblKontact", id)
#if the kontact we're trying to delete exists
if(oPreviousKontact is not None):
#delete the kontact
db.Delete("tblKontact", id)
#return success response
return WebUtil.SuccessResponse()
#kontact does not exists return failure response
else:
return WebUtil.FailureResponse()
#launch flask app
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000, debug=True, threaded=True)
| mit | -8,879,114,534,368,940,000 | 24.702899 | 79 | 0.643079 | false |
arenadata/ambari | ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py | 1 | 7000 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from stacks.utils.RMFTestCase import *
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
@not_for_platform(PLATFORM_WINDOWS)
class TestMahoutClient(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "MAHOUT/1.0.0.2.3/package"
STACK_VERSION = "2.3"
DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
classname = "MahoutServiceCheck",
command = "service_check",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('File', '/tmp/sample-mahout-test.txt',
content = 'Test text which will be converted to sequence file.',
mode = 0755,
)
self.maxDiff=None
self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
mode = 0770,
owner = 'ambari-qa',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
hadoop_conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf',
type = 'directory',
)
self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mahoutsmokeoutput',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
action = ['delete_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
hadoop_conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf',
type = 'directory',
)
self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mahoutsmokeinput',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
owner = 'ambari-qa',
hadoop_conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
)
self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mahoutsmokeinput/sample-mahout-test.txt',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
source = '/tmp/sample-mahout-test.txt',
user = 'hdfs',
dfs_type = '',
owner = 'ambari-qa',
hadoop_conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf',
type = 'file',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
hadoop_conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf',
)
self.assertResourceCalled('Execute', 'mahout seqdirectory --input /user/ambari-qa/mahoutsmokeinput/'
'sample-mahout-test.txt --output /user/ambari-qa/mahoutsmokeoutput/ '
'--charset utf-8',
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45',
'MAHOUT_HOME': '/usr/hdp/current/mahout-client'},
path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
tries = 3,
user = 'ambari-qa',
try_sleep = 5,
)
self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /user/ambari-qa/mahoutsmokeoutput/_SUCCESS',
try_sleep = 6,
tries = 10,
bin_dir = '/usr/hdp/2.2.1.0-2067/hadoop/bin',
user = 'ambari-qa',
conf_dir = '/usr/hdp/2.2.1.0-2067/hadoop/conf',
)
self.assertNoMoreResources()
| apache-2.0 | 1,205,591,747,481,414,000 | 53.6875 | 291 | 0.59 | false |
JMoravec/unkRadnet | fitToCurve/pyeq2/UnitTests/Test_Equations.py | 1 | 5518 | # Version info: $Id: Test_Equations.py 1 2012-01-07 22:20:43Z [email protected] $
import sys, os, unittest, inspect
if os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..') not in sys.path:
sys.path.append(os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..'))
import pyeq2
class Test_BioScience2D(unittest.TestCase):
def test_AphidPopulationGrowth(self):
equation = pyeq2.Models_2D.BioScience.AphidPopulationGrowth('SSQABS')
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(equation.exampleData, equation, False)
self.assertTrue(0.374 >= equation.CalculateAllDataFittingTarget(equation.Solve()))
class Test_Engineering2D(unittest.TestCase):
def test_DispersionOptical(self):
equation = pyeq2.Models_2D.Engineering.DispersionOptical('SSQABS')
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(equation.exampleData, equation, False)
self.assertTrue(1.77E-02 >= equation.CalculateAllDataFittingTarget(equation.Solve()))
class Test_Exponential2D(unittest.TestCase):
def test_Hocket_Sherby(self):
equation = pyeq2.Models_2D.Exponential.Hocket_Sherby('SSQABS')
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(equation.exampleData, equation, False)
self.assertTrue(8.30E-03 >= equation.CalculateAllDataFittingTarget(equation.Solve()))
class Test_LegendrePolynomial2D(unittest.TestCase):
def test_SecondDegreeLegendrePolynomial(self):
equation = pyeq2.Models_2D.LegendrePolynomial.SecondDegreeLegendrePolynomial('SSQABS')
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(equation.exampleData, equation, False)
self.assertTrue(0.0146 >= equation.CalculateAllDataFittingTarget(equation.Solve()))
class Test_Logarithmic2D(unittest.TestCase):
def test_LinearLogarithmic(self):
equation = pyeq2.Models_2D.Logarithmic.LinearLogarithmic('SSQABS')
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(equation.exampleData, equation, False)
self.assertTrue(1.20 >= equation.CalculateAllDataFittingTarget(equation.Solve()))
class Test_Exponential3D(unittest.TestCase):
def test_FullCubicExponential(self):
equation = pyeq2.Models_3D.Exponential.FullCubicExponential('SSQABS')
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(equation.exampleData, equation, False)
self.assertTrue(0.05 >= equation.CalculateAllDataFittingTarget(equation.Solve()))
class Test_Polyfunctional2D(unittest.TestCase):
def test_Polyfunctional2D(self):
equation = pyeq2.Models_2D.Polyfunctional.UserSelectablePolyfunctional('SSQABS', 'Default', [0,1,3])
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(equation.exampleData, equation, False)
self.assertTrue(0.013 >= equation.CalculateAllDataFittingTarget(equation.Solve()))
class Test_Polyfunctional3D(unittest.TestCase):
def test_Polyfunctional3D(self):
equation = pyeq2.Models_3D.Polyfunctional.UserSelectablePolyfunctional('SSQREL', 'Default', [[0,0], [1,1], [3,3]])
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(equation.exampleData, equation, False)
self.assertTrue(8.2 >= equation.CalculateAllDataFittingTarget(equation.Solve()))
class Test_Rationals(unittest.TestCase):
def test_Rational2D(self):
equation = pyeq2.Models_2D.Rational.UserSelectableRational('SSQABS', 'Default', [0,1], [2,3])
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(equation.exampleData, equation, False)
self.assertTrue(0.009 >= equation.CalculateAllDataFittingTarget(equation.Solve()))
def test_Rational_WithOffset_2D(self):
equation = pyeq2.Models_2D.Rational.UserSelectableRational('SSQABS', 'Offset', [0,1], [2,3])
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(equation.exampleData, equation, False)
self.assertTrue(0.008 >= equation.CalculateAllDataFittingTarget(equation.Solve()))
class Test_Polynomials(unittest.TestCase):
def test_Polynomial2D(self):
equation = pyeq2.Models_2D.Polynomial.UserSelectablePolynomial('SSQABS', 'Default', 2)
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(equation.exampleData, equation, False)
self.assertTrue(0.015 >= equation.CalculateAllDataFittingTarget(equation.Solve()))
def test_Polynomial3D(self):
equation = pyeq2.Models_3D.Polynomial.UserSelectablePolynomial('SSQABS', 'Default', 2, 2)
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(equation.exampleData, equation, False)
self.assertTrue(2.92E-04 >= equation.CalculateAllDataFittingTarget(equation.Solve()))
class Test_InstantiationOfAllEquations(unittest.TestCase):
def test_InstantiationOfAllNamedEquations(self): # The test is that no exceptions are raised
for submodule in inspect.getmembers(pyeq2.Models_2D) + inspect.getmembers(pyeq2.Models_3D):
if inspect.ismodule(submodule[1]):
for equationClass in inspect.getmembers(submodule[1]):
if inspect.isclass(equationClass[1]):
equationClass[1]('SSQABS') # non-offset forms
if equationClass[1].autoGenerateOffsetForm == True:
equationClass[1]('SSQABS', 'Offset') # offset forms
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 1,022,641,982,060,471,300 | 42.448819 | 122 | 0.719645 | false |
frew/simpleproto | scons-local-1.1.0/SCons/Scanner/C.py | 1 | 4739 | """SCons.Scanner.C
This module implements the depenency scanner for C/C++ code.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/C.py 3603 2008/10/10 05:46:45 scons"
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
import SCons.cpp
class SConsCPPScanner(SCons.cpp.PreProcessor):
"""
SCons-specific subclass of the cpp.py module's processing.
We subclass this so that: 1) we can deal with files represented
by Nodes, not strings; 2) we can keep track of the files that are
missing.
"""
def __init__(self, *args, **kw):
apply(SCons.cpp.PreProcessor.__init__, (self,)+args, kw)
self.missing = []
def initialize_result(self, fname):
self.result = SCons.Util.UniqueList([fname])
def finalize_result(self, fname):
return self.result[1:]
def find_include_file(self, t):
keyword, quote, fname = t
result = SCons.Node.FS.find_file(fname, self.searchpath[quote])
if not result:
self.missing.append((fname, self.current_file))
return result
def read_file(self, file):
try:
fp = open(str(file.rfile()))
except EnvironmentError, e:
self.missing.append((file, self.current_file))
return ''
else:
return fp.read()
def dictify_CPPDEFINES(env):
cppdefines = env.get('CPPDEFINES', {})
if cppdefines is None:
return {}
if SCons.Util.is_Sequence(cppdefines):
result = {}
for c in cppdefines:
if SCons.Util.is_Sequence(c):
result[c[0]] = c[1]
else:
result[c] = None
return result
if not SCons.Util.is_Dict(cppdefines):
return {cppdefines : None}
return cppdefines
class SConsCPPScannerWrapper:
"""
The SCons wrapper around a cpp.py scanner.
This is the actual glue between the calling conventions of generic
SCons scanners, and the (subclass of) cpp.py class that knows how
to look for #include lines with reasonably real C-preprocessor-like
evaluation of #if/#ifdef/#else/#elif lines.
"""
def __init__(self, name, variable):
self.name = name
self.path = SCons.Scanner.FindPathDirs(variable)
def __call__(self, node, env, path = ()):
cpp = SConsCPPScanner(current = node.get_dir(),
cpppath = path,
dict = dictify_CPPDEFINES(env))
result = cpp(node)
for included, includer in cpp.missing:
fmt = "No dependency generated for file: %s (included from: %s) -- file not found"
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
fmt % (included, includer))
return result
def recurse_nodes(self, nodes):
return nodes
def select(self, node):
return self
def CScanner():
"""Return a prototype Scanner instance for scanning source files
that use the C pre-processor"""
# Here's how we would (or might) use the CPP scanner code above that
# knows how to evaluate #if/#ifdef/#else/#elif lines when searching
# for #includes. This is commented out for now until we add the
# right configurability to let users pick between the scanners.
#return SConsCPPScannerWrapper("CScanner", "CPPPATH")
cs = SCons.Scanner.ClassicCPP("CScanner",
"$CPPSUFFIXES",
"CPPPATH",
'^[ \t]*#[ \t]*(?:include|import)[ \t]*(<|")([^>"]+)(>|")')
return cs
| bsd-2-clause | -3,936,452,968,050,813,000 | 36.611111 | 94 | 0.638109 | false |
uclouvain/osis_louvain | base/models/learning_unit_year.py | 1 | 24536 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import re
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator, MaxValueValidator, RegexValidator
from django.db import models
from django.db.models import Q
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _, ngettext
from base.models import entity_container_year as mdl_entity_container_year
from base.models.academic_year import compute_max_academic_year_adjournment, AcademicYear, \
MAX_ACADEMIC_YEAR_FACULTY, starting_academic_year
from base.models.enums import active_status, learning_container_year_types
from base.models.enums import learning_unit_year_subtypes, internship_subtypes, \
learning_unit_year_session, entity_container_year_link_type, quadrimesters, attribution_procedure
from base.models.enums.learning_container_year_types import COURSE, INTERNSHIP
from base.models.enums.learning_unit_year_periodicity import PERIODICITY_TYPES, ANNUAL, BIENNIAL_EVEN, BIENNIAL_ODD
from base.models.learning_unit import LEARNING_UNIT_ACRONYM_REGEX_ALL, REGEX_BY_SUBTYPE
from osis_common.models.serializable_model import SerializableModel, SerializableModelAdmin
AUTHORIZED_REGEX_CHARS = "$*+.^"
REGEX_ACRONYM_CHARSET = "[A-Z0-9" + AUTHORIZED_REGEX_CHARS + "]+"
MINIMUM_CREDITS = 0.0
MAXIMUM_CREDITS = 500
def academic_year_validator(value):
academic = AcademicYear.objects.get(pk=value)
academic_year_max = compute_max_academic_year_adjournment()
if academic.year > academic_year_max:
raise ValidationError(_('learning_unit_creation_academic_year_max_error').format(academic_year_max))
class LearningUnitYearAdmin(SerializableModelAdmin):
list_display = ('external_id', 'acronym', 'specific_title', 'academic_year', 'credits', 'changed', 'structure',
'status')
list_filter = ('academic_year', 'decimal_scores', 'summary_locked')
search_fields = ['acronym', 'structure__acronym', 'external_id']
actions = [
'resend_messages_to_queue',
'apply_learning_unit_year_postponement'
]
def apply_learning_unit_year_postponement(self, request, queryset):
# Potential circular imports
from base.business.learning_units.automatic_postponement import LearningUnitAutomaticPostponement
from base.views.common import display_success_messages, display_error_messages
result, errors = LearningUnitAutomaticPostponement(queryset.filter(learning_container_year__isnull=False))
count = len(result)
display_success_messages(
request, ngettext(
'%(count)d learning unit has been postponed with success',
'%(count)d learning units have been postponed with success', count
) % {'count': count}
)
if errors:
display_error_messages(request, "{} : {}".format(
_("The following learning units ended with error"),
", ".join([str(error) for error in errors])
))
apply_learning_unit_year_postponement.short_description = _("Apply postponement on learning unit year")
class LearningUnitYearWithContainerManager(models.Manager):
def get_queryset(self):
# FIXME For the moment, the learning_unit_year without container must be hide !
return super().get_queryset().filter(learning_container_year__isnull=False)
class ExtraManagerLearningUnitYear(models.Model):
# This class ensure that the default manager (from serializable model) is not override by this manager
objects_with_container = LearningUnitYearWithContainerManager()
class Meta:
abstract = True
class LearningUnitYear(SerializableModel, ExtraManagerLearningUnitYear):
external_id = models.CharField(max_length=100, blank=True, null=True, db_index=True)
academic_year = models.ForeignKey(AcademicYear, verbose_name=_('academic_year'),
validators=[academic_year_validator])
learning_unit = models.ForeignKey('LearningUnit')
learning_container_year = models.ForeignKey('LearningContainerYear', null=True)
changed = models.DateTimeField(null=True, auto_now=True)
acronym = models.CharField(max_length=15, db_index=True, verbose_name=_('code'),
validators=[RegexValidator(LEARNING_UNIT_ACRONYM_REGEX_ALL)])
specific_title = models.CharField(max_length=255, blank=True, null=True,
verbose_name=_('title_proper_to_UE'))
specific_title_english = models.CharField(max_length=250, blank=True, null=True,
verbose_name=_('english_title_proper_to_UE'))
subtype = models.CharField(max_length=50, choices=learning_unit_year_subtypes.LEARNING_UNIT_YEAR_SUBTYPES,
default=learning_unit_year_subtypes.FULL)
credits = models.DecimalField(null=True, max_digits=5, decimal_places=2,
validators=[MinValueValidator(MINIMUM_CREDITS), MaxValueValidator(MAXIMUM_CREDITS)],
verbose_name=_('credits'))
decimal_scores = models.BooleanField(default=False)
structure = models.ForeignKey('Structure', blank=True, null=True)
internship_subtype = models.CharField(max_length=250, blank=True, null=True,
verbose_name=_('internship_subtype'),
choices=internship_subtypes.INTERNSHIP_SUBTYPES)
status = models.BooleanField(default=False, verbose_name=_('active_title'))
session = models.CharField(max_length=50, blank=True, null=True,
choices=learning_unit_year_session.LEARNING_UNIT_YEAR_SESSION,
verbose_name=_('session_title'))
quadrimester = models.CharField(max_length=9, blank=True, null=True, verbose_name=_('quadrimester'),
choices=quadrimesters.LEARNING_UNIT_YEAR_QUADRIMESTERS)
attribution_procedure = models.CharField(max_length=20, blank=True, null=True, verbose_name=_('procedure'),
choices=attribution_procedure.ATTRIBUTION_PROCEDURES)
summary_locked = models.BooleanField(default=False, verbose_name=_("summary_locked"))
professional_integration = models.BooleanField(default=False, verbose_name=_('professional_integration'))
campus = models.ForeignKey('Campus', null=True, verbose_name=_("learning_location"))
language = models.ForeignKey('reference.Language', null=True, verbose_name=_('language'))
periodicity = models.CharField(max_length=20, choices=PERIODICITY_TYPES, default=ANNUAL,
verbose_name=_('periodicity'))
_warnings = None
class Meta:
unique_together = (('learning_unit', 'academic_year'), ('acronym', 'academic_year'))
permissions = (
("can_receive_emails_about_automatic_postponement", "Can receive emails about automatic postponement"),
)
def __str__(self):
return u"%s - %s" % (self.academic_year, self.acronym)
@property
def subdivision(self):
if self.acronym and self.learning_container_year:
return self.acronym.replace(self.learning_container_year.acronym, "")
return None
@property
def parent(self):
if self.subdivision and self.is_partim():
return LearningUnitYear.objects.filter(
subtype=learning_unit_year_subtypes.FULL,
learning_container_year=self.learning_container_year,
).get()
return None
@property
def same_container_learning_unit_years(self):
return LearningUnitYear.objects.filter(
learning_container_year=self.learning_container_year
).order_by('acronym')
@cached_property
def allocation_entity(self):
return self.get_entity(entity_container_year_link_type.ALLOCATION_ENTITY)
@cached_property
def requirement_entity(self):
return self.get_entity(entity_container_year_link_type.REQUIREMENT_ENTITY)
@property
def complete_title(self):
complete_title = self.specific_title
if self.learning_container_year:
complete_title = ' - '.join(filter(None, [self.learning_container_year.common_title, self.specific_title]))
return complete_title
@property
def complete_title_english(self):
complete_title_english = self.specific_title_english
if self.learning_container_year:
complete_title_english = ' - '.join(filter(None, [
self.learning_container_year.common_title_english,
self.specific_title_english,
]))
return complete_title_english
@property
def container_common_title(self):
if self.learning_container_year:
return self.learning_container_year.common_title
return ''
def get_partims_related(self):
if self.is_full() and self.learning_container_year:
return self.learning_container_year.get_partims_related()
return LearningUnitYear.objects.none()
def find_list_group_element_year(self):
return self.child_leaf.filter(child_leaf=self).select_related('parent')
def get_learning_unit_next_year(self):
try:
return self.learning_unit.learningunityear_set.get(academic_year__year=(self.academic_year.year + 1))
except LearningUnitYear.DoesNotExist:
return None
@property
def in_charge(self):
return self.learning_container_year and self.learning_container_year.in_charge
@property
def container_type_verbose(self):
container_type = ''
if self.learning_container_year:
container_type = _(self.learning_container_year.container_type)
if self.learning_container_year.container_type in (COURSE, INTERNSHIP):
container_type += " ({subtype})".format(subtype=_(self.subtype))
return container_type
@property
def status_verbose(self):
return _("active") if self.status else _("inactive")
@property
def internship_subtype_verbose(self):
return _('to_complete') if self.learning_container_year and \
self.learning_container_year.container_type == INTERNSHIP and \
not self.internship_subtype else self.internship_subtype
@property
def get_previous_acronym(self):
return find_lt_learning_unit_year_with_different_acronym(self)
@property
def periodicity_verbose(self):
if self.periodicity:
return _(self.periodicity)
return None
def find_gte_learning_units_year(self):
return LearningUnitYear.objects.filter(learning_unit=self.learning_unit,
academic_year__year__gte=self.academic_year.year) \
.order_by('academic_year__year')
def find_gt_learning_units_year(self):
return LearningUnitYear.objects.filter(learning_unit=self.learning_unit,
academic_year__year__gt=self.academic_year.year) \
.order_by('academic_year__year')
def is_past(self):
return self.academic_year.is_past()
# FIXME move this method to business/perm file
def can_update_by_faculty_manager(self):
if not self.learning_container_year:
return False
starting_year = starting_academic_year().year
year = self.academic_year.year
return starting_year <= year <= starting_year + MAX_ACADEMIC_YEAR_FACULTY
def is_full(self):
return self.subtype == learning_unit_year_subtypes.FULL
def is_partim(self):
return self.subtype == learning_unit_year_subtypes.PARTIM
def get_entity(self, entity_type):
entity = None
# @TODO: Remove this condition when classes will be removed from learning unit year
if self.learning_container_year:
entity_container_yr = mdl_entity_container_year.search(
link_type=entity_type,
learning_container_year=self.learning_container_year,
).get()
entity = entity_container_yr.entity if entity_container_yr else None
return entity
def clean(self):
learning_unit_years = find_gte_year_acronym(self.academic_year, self.acronym)
if getattr(self, 'learning_unit', None):
learning_unit_years = learning_unit_years.exclude(learning_unit=self.learning_unit)
self.clean_acronym(learning_unit_years)
def clean_acronym(self, learning_unit_years):
if self.acronym in learning_unit_years.values_list('acronym', flat=True):
raise ValidationError({'acronym': _('already_existing_acronym')})
if not re.match(REGEX_BY_SUBTYPE[self.subtype], self.acronym):
raise ValidationError({'acronym': _('invalid_acronym')})
@property
def warnings(self):
if self._warnings is None:
self._warnings = []
self._warnings.extend(self._check_credits_is_integer())
self._warnings.extend(self._check_partim_parent_credits())
self._warnings.extend(self._check_internship_subtype())
self._warnings.extend(self._check_partim_parent_status())
self._warnings.extend(self._check_partim_parent_periodicity())
self._warnings.extend(self._check_learning_component_year_warnings())
self._warnings.extend(self._check_learning_container_year_warnings())
self._warnings.extend(self._check_entity_container_year_warnings())
return self._warnings
# TODO: Currently, we should warning user that the credits is not an integer
def _check_credits_is_integer(self):
warnings = []
if self.credits and self.credits % 1 != 0:
warnings.append(_('The credits value should be an integer'))
return warnings
def _check_partim_parent_credits(self):
children = self.get_partims_related()
return [_('The credits value of the partim %(acronym)s is greater or equal than the credits value of the '
'parent learning unit.') % {'acronym': child.acronym}
for child in children if child.credits and child.credits >= self.credits]
def _check_internship_subtype(self):
warnings = []
if getattr(self, 'learning_container_year', None):
if (self.learning_container_year.container_type == learning_container_year_types.INTERNSHIP and
not self.internship_subtype):
warnings.append(_('missing_internship_subtype'))
return warnings
def _check_partim_parent_status(self):
warnings = []
if self.parent:
if not self.parent.status and self.status:
warnings.append(_('This partim is active and the parent is inactive'))
else:
if self.status is False and find_partims_with_active_status(self).exists():
warnings.append(_("The parent is inactive and there is at least one partim active"))
return warnings
def _check_partim_parent_periodicity(self):
warnings = []
if self.parent:
if self.parent.periodicity in [BIENNIAL_EVEN, BIENNIAL_ODD] and self.periodicity != self.parent.periodicity:
warnings.append(_("This partim is %(partim_periodicity)s and the parent is %(parent_periodicty)s")
% {'partim_periodicity': self.periodicity_verbose,
'parent_periodicty': self.parent.periodicity_verbose})
else:
if self.periodicity in [BIENNIAL_EVEN, BIENNIAL_ODD] and \
find_partims_with_different_periodicity(self).exists():
warnings.append(_("The parent is %(parent_periodicty)s and there is at least one partim which is not "
"%(parent_periodicty)s") % {'parent_periodicty': self.periodicity_verbose})
return warnings
def _check_learning_component_year_warnings(self):
_warnings = []
components_queryset = self.learning_container_year.learningcomponentyear_set
all_components = components_queryset.all().order_by('learningunitcomponent__learning_unit_year__acronym')
for learning_component_year in all_components:
_warnings.extend(learning_component_year.warnings)
return _warnings
def _check_learning_container_year_warnings(self):
return self.learning_container_year.warnings
def _check_entity_container_year_warnings(self):
_warnings = []
entity_container_years = mdl_entity_container_year.find_by_learning_container_year(self.learning_container_year)
for entity_container_year in entity_container_years:
_warnings.extend(entity_container_year.warnings)
return _warnings
def is_external(self):
return hasattr(self, "externallearningunityear")
def get_by_id(learning_unit_year_id):
return LearningUnitYear.objects.select_related('learning_container_year__learning_container') \
.get(pk=learning_unit_year_id)
def find_by_acronym(acronym):
return LearningUnitYear.objects.filter(acronym=acronym).select_related('learning_container_year')
def _is_regex(acronym):
return set(AUTHORIZED_REGEX_CHARS).intersection(set(acronym))
def search(academic_year_id=None, acronym=None, learning_container_year_id=None, learning_unit=None,
title=None, subtype=None, status=None, container_type=None, tutor=None,
summary_responsible=None, requirement_entities=None, learning_unit_year_id=None, *args, **kwargs):
queryset = LearningUnitYear.objects_with_container
if learning_unit_year_id:
queryset = queryset.filter(id=learning_unit_year_id)
if academic_year_id:
queryset = queryset.filter(academic_year=academic_year_id)
if acronym:
if _is_regex(acronym):
queryset = queryset.filter(acronym__iregex=r"(" + acronym + ")")
else:
queryset = queryset.filter(acronym__icontains=acronym)
if learning_container_year_id is not None:
if isinstance(learning_container_year_id, list):
queryset = queryset.filter(learning_container_year__in=learning_container_year_id)
elif learning_container_year_id:
queryset = queryset.filter(learning_container_year=learning_container_year_id)
if requirement_entities:
queryset = queryset.filter(
learning_container_year__entitycontaineryear__entity__entityversion__in=requirement_entities,
learning_container_year__entitycontaineryear__type=entity_container_year_link_type.REQUIREMENT_ENTITY)
if learning_unit:
queryset = queryset.filter(learning_unit=learning_unit)
if title:
queryset = queryset. \
filter(Q(specific_title__iregex=title) | Q(learning_container_year__common_title__iregex=title))
if subtype:
queryset = queryset.filter(subtype=subtype)
if status:
queryset = queryset.filter(status=convert_status_bool(status))
if container_type:
queryset = queryset.filter(learning_container_year__container_type=container_type)
if tutor:
for name in tutor.split():
filter_by_first_name = {_build_tutor_filter(name_type='first_name'): name}
filter_by_last_name = {_build_tutor_filter(name_type='last_name'): name}
queryset = queryset.filter(Q(**filter_by_first_name) | Q(**filter_by_last_name)).distinct()
if summary_responsible:
queryset = find_summary_responsible_by_name(queryset, summary_responsible)
return queryset.select_related('learning_container_year', 'academic_year')
def find_summary_responsible_by_name(queryset, name):
for term in name.split():
queryset = queryset.filter(
Q(attribution__tutor__person__first_name__icontains=term) |
Q(attribution__tutor__person__last_name__icontains=term)
)
return queryset.filter(attribution__summary_responsible=True).distinct()
def _build_tutor_filter(name_type):
return '__'.join(['learningunitcomponent', 'learning_component_year', 'attributionchargenew', 'attribution',
'tutor', 'person', name_type, 'iregex'])
def convert_status_bool(status):
if status in (active_status.ACTIVE, active_status.INACTIVE):
boolean = status == active_status.ACTIVE
else:
boolean = status
return boolean
def find_gte_year_acronym(academic_yr, acronym):
return LearningUnitYear.objects.filter(academic_year__year__gte=academic_yr.year,
acronym__iexact=acronym)
def find_lt_year_acronym(academic_yr, acronym):
return LearningUnitYear.objects.filter(academic_year__year__lt=academic_yr.year,
acronym__iexact=acronym).order_by('academic_year')
def check_if_acronym_regex_is_valid(acronym):
return isinstance(acronym, str) and \
not acronym.startswith('*') and \
re.fullmatch(REGEX_ACRONYM_CHARSET, acronym.upper()) is not None
def find_max_credits_of_related_partims(a_learning_unit_year):
return a_learning_unit_year.get_partims_related().aggregate(max_credits=models.Max("credits"))["max_credits"]
def find_partims_with_active_status(a_learning_unit_year):
return a_learning_unit_year.get_partims_related().filter(status=True)
def find_partims_with_different_periodicity(a_learning_unit_year):
return a_learning_unit_year.get_partims_related().exclude(periodicity=a_learning_unit_year.periodicity)
def find_by_learning_unit(a_learning_unit):
return search(learning_unit=a_learning_unit)
def find_by_entities(entities):
return LearningUnitYear.objects.filter(learning_container_year__entitycontaineryear__entity__in=entities)
def find_latest_by_learning_unit(a_learning_unit):
return search(learning_unit=a_learning_unit).order_by('academic_year').last()
def find_lt_learning_unit_year_with_different_acronym(a_learning_unit_yr):
return LearningUnitYear.objects.filter(learning_unit__id=a_learning_unit_yr.learning_unit.id,
academic_year__year__lt=a_learning_unit_yr.academic_year.year,
proposallearningunit__isnull=True) \
.order_by('-academic_year') \
.exclude(acronym__iexact=a_learning_unit_yr.acronym).first()
def find_learning_unit_years_by_academic_year_tutor_attributions(academic_year, tutor):
""" In this function, only learning unit year with containers is visible! [no classes] """
qs = LearningUnitYear.objects_with_container.filter(
academic_year=academic_year,
attribution__tutor=tutor,
).distinct().order_by('academic_year__year', 'acronym')
return qs
def toggle_summary_locked(learning_unit_year_id):
luy = LearningUnitYear.objects.get(pk=learning_unit_year_id)
luy.summary_locked = not luy.summary_locked
luy.save()
return luy
| agpl-3.0 | -1,512,369,831,425,935,600 | 43.853748 | 120 | 0.663746 | false |
openworm/Blender2NeuroML | src/Entity/Entity.py | 1 | 21651 | '''
Created on 03.06.2011
@author: Sergey Khayrulin
'''
from __future__ import absolute_import
from Entity.Vertex import Vertex
from Entity.Face import Face
from Entity.Slice import Slice, AlternateSlice
from Entity.Helper import *
import pprint
import math
class Entity(object):
'''
Main Class which process data from blender file or WRL(formated file).
'''
def __init__(self):
'''
Constructor
'''
self.vertices = []
self.faces = Faces()
self.resulting_points = []
self.checked_points = []
self.neuronInfo = ''
def clean_all(self):
self.faces.clean_all()
def add_vertex(self, coordinates):
'''
Method add vertex to collection point. It get a
collection of coordinates of point, create point
and append it to collection of point.
'''
try:
if len(coordinates) != 3:
raise ParserException('Error')
point = Vertex(float(coordinates[0]),float(coordinates[1]),float(coordinates[2]))
self.vertices.append(point)
except ParserException as ex:
print('It should be some incorrect data')
raise ex
def add_face(self, points_arr):
'''
Method add face to faces collection. It get a sequence
of numbers which means position in point collection.
'''
try:
if len(points_arr) < 4:
raise ParserException('Face contains more that 4 point')
face = Face(self.vertices[int(points_arr[0])],self.vertices[int(points_arr[1])],self.vertices[int(points_arr[2])],self.vertices[int(points_arr[3])])
face.order = [int(points_arr[0]),int(points_arr[1]),int(points_arr[2]),int(points_arr[3])]
self.faces[face.order] = face
#print("add_face %s" % face.order)
#self.faces.append(face)
except ParserException as ex:
print('Error:%s'%ex)
print(points_arr)
raise ex
def findCenterOfSoma(self, use_method2 = False):
'''
Method find start point for work main algorithm
first point should be in soma. Soma is the
biggest segment of cell.
'''
iter = 0
temp_points = []
slices = []
for p in range(len(self.vertices)):
temp_points.append(HelpPoint(p,0))
if use_method2:
startSlice = Slice(temp_points,self.faces, use_method2 = True, vertices = self.vertices)
point_on_perimeter = self.vertices[startSlice[0].point]
self.checked_points += startSlice.extra_dict['points_in_soma']
self.start_center_point = startSlice.extra_dict['center_pt']
self.start_center_point.diametr = 2 * self.start_center_point.len_between_point(point_on_perimeter)
self.starting_slice = startSlice
return
slice = Slice(temp_points,self.faces)
slices.append(slice)
while len(slice) != 0:
temp_points = list(filter(lambda p: not slice.__contains__(p), temp_points))
slice = None
slice = Slice(temp_points,self.faces)
if len(slice) != 0:
slices.append(slice)
#if not (iter % 10):
# print('slice %d iter %d' % (len(temp_points), iter))
#slice.printSlice()
#print slice.getPerimetr(self.vertices)
iter += 1
# find slice with longest line segments
perimiter_coll = sorted(slices,key=lambda slice:slice.getPerimetr(self.vertices), reverse=True)
startSlice = Slice(perimiter_coll[0],self.faces)
#print("findCenterOfSoma while loop done %d %d" % (iter, len(temp_points)))
try:
self.start_center_point = self.__getCenterPoint(startSlice, minimal = True)
except IndexError:
print("no center point startSlice %d perimiter_coll %d"
% (len(startSlice), len(perimiter_coll[0])))
for face in self.faces.keys():
print("face order %s" % face)
# the coordinates aren't organized in a pattern that the normal
# code in Slice can understand, so we use an alternate method
return self.findCenterOfSoma(use_method2 = True)
if not use_method2:
point_on_perimeter = self.vertices[perimiter_coll[0][0].point]
self.start_center_point.diametr = 2 * self.start_center_point.len_between_point(point_on_perimeter)
def getAllBrunches(self):
'''
Method return dictionary which contains pair key=>value:
key it's name of neurite, value - it's sorted sequence
numbers which means position in resulting_points collection
for instance 'axon' => [1,2,4]
'''
brunches_temp = {}
result_coll = {}
i = 0
roots = [self.resulting_points.index(p) for p in self.resulting_points \
if p.parentPoint == 0 and self.resulting_points.index(p) != 0]
for root in roots:
brunches_temp[root] = []
for p in self.resulting_points:
parent = p.getRoot(self.resulting_points)
if parent == root:
brunches_temp[root].append(self.resulting_points.index(p))
# the first of these two lines works with python3, the second with python2:
#for k1, value in sorted(brunches_temp.iteritems(),key=lambda k,v:(len(v),k),reverse=True): # we try to determine
for k1, value in sorted(brunches_temp.iteritems(),key=lambda (k,v):(len(v),k),reverse=True): # we try to determine
if i == 0:
for j in value:
self.resulting_points[j].isAxon = True
result_coll['axon'] = value
else:
for j in value:
if self.resulting_points[j].cable != 2:
self.resulting_points[j].isDendrite = True
self.resulting_points[j].cable = 3
result_coll['dendrite' + str(i)] = value
i += 1
return result_coll
def use_alt_slice(self):
return hasattr(self, 'starting_slice')
def create_slice(self, coll, allow_checked = False):
if self.use_alt_slice():
if not allow_checked:
coll = filter(lambda p: not self.checked_points.__contains__(p.point), coll)
slice = AlternateSlice(coll,self.faces, self.vertices, self.checked_points, self.vertices[self.starting_slice[0].point], None, allow_checked)
else:
slice = Slice(coll,self.faces)
return slice
def branching(self, slice):
if not self.use_alt_slice():
return False
for p in range(len(slice)):
if len(self.starting_slice.extra_dict['adjacentPoints'][slice[p].point]) == 5:
return True
return False
def find_point(self,center_point=Vertex(),iteration=0,
parentPoint=0, isNeurite=False,
isBrunchStart=False, _slice=None):
'''
Main function find axon dendrite and neurite
'''
vector_len = []
print("enter find_point iteration %d isBrunchStart %d" % (iteration, isBrunchStart))
if iteration == 0: center_point = self.start_center_point
if isNeurite:
res_point = Result_Point(center_point,parentPoint,2,isBrunchStart)
res_point.isNeurite = True
self.resulting_points.append(res_point)
elif iteration != 0:
self.resulting_points.append(Result_Point(center_point,parentPoint,1,isBrunchStart))
elif iteration == 0:
self.resulting_points.append(Result_Point(center_point,parentPoint,0,isBrunchStart))
current_point = len(self.resulting_points) - 1
for p in range(len(self.vertices)):
vector_len.append(HelpPoint(p,self.vertices[p].len_between_point(center_point)))
vector_len = sorted(vector_len,key=lambda p:p.lenght)
tmp_list = []
if iteration != 0:
'''
If iteration != 0 that means we are should find next 4 or more(if we find place of brunching 6 or 8) vertices
'''
if _slice is not None:
slice = _slice
else:
slice = self.create_slice(vector_len)
adjacentPoints = []
use_v5 = iteration >= 3 and self.branching(slice) # with 5 adjacent points
for p in range(4):
if use_v5 and not isBrunchStart:
c = slice[p].point
tmp_list.append(c)
adjacentPoints.append(HelpPoint(c, self.vertices[c].len_between_point(center_point)))
if use_v5 and isBrunchStart:
#print("use_v5 br %d p %d" % (len(slice), p))
coll = self.__find_adjacent_vertices5(slice[p].point)
elif p != 3:
coll = self.__find_adjacent_vertices(slice[p].point, slice[p+1].point)
else:
coll = self.__find_adjacent_vertices(slice[p].point, slice[0].point)
#print("%d-%d has %d adj v" % (slice[p].point, slice[(p+1)%4].point, len(coll)))
for c in coll:
helpPoint = HelpPoint(c,self.vertices[c].len_between_point(center_point))
#print("%3d %3d is checked? %d" % (p, c, self.checked_points.__contains__(c)))
if not adjacentPoints.__contains__(helpPoint):
if not self.checked_points.__contains__(c):
adjacentPoints.append(helpPoint)
tmp_list.append(c)
print("got %d adjacentPoints %s" % (len(adjacentPoints), tmp_list))
if len(adjacentPoints) == 0: return
'''
If we find 8 adjacent vertices it means that we place in branching segments
'''
if len(adjacentPoints) > 4 and not (use_v5 and isBrunchStart):
if self.__more4AdjacentPointCase(adjacentPoints, slice, isBrunchStart,iteration, current_point, center_point):
return
del vector_len[:]
vector_len = [HelpPoint(p.point,self.vertices[p.point].len_between_point(center_point))
for p in adjacentPoints if not self.checked_points.__contains__(p.point)]
vector_len = sorted(vector_len,key=lambda p:p.lenght)
if self.use_alt_slice():
vector_len = filter(lambda p: not self.checked_points.__contains__(p.point), vector_len)
if iteration == 0:
adj_dict = self.starting_slice.extra_dict['adjacentPoints']
else:
adj_dict = None
slice = AlternateSlice(vector_len,self.faces, self.vertices, self.checked_points, self.vertices[self.starting_slice[0].point], adj_dict)
else:
slice = Slice(vector_len,self.faces)
lenOfSlice = len(slice)
print("lenOfSlice %d iter %d %d" % (lenOfSlice, iteration, len(vector_len)))
if lenOfSlice == 0:
slice = vector_len
if len(slice) < 4:
return
new_center_point = self.__getCenterPoint(slice)
iteration += 1
if lenOfSlice != 0:
self.find_point(new_center_point,iteration,parentPoint=current_point,isNeurite=isNeurite,isBrunchStart=False, _slice=slice)
else:
if isNeurite:
res_point = Result_Point(new_center_point,current_point,2,False)
res_point.isNeurite = True
self.resulting_points.append(res_point)
elif iteration != 0:
self.resulting_points.append(Result_Point(new_center_point,current_point,1,False))
if iteration == 1:
self.__checkDendrite(slice, center_point, vector_len,current_point)
def __getCenterPoint(self, slice, minimal = False):
'''
Get center point like center of mass for input collection slice (usually it should be 4 point)
'''
x=y=z=0
n_points = 4
if len(slice) < 4:
print("Bad slice len %d" % len(slice))
if minimal and len(slice) > 0:
n_points = len(slice)
else:
raise IndexError
for p in range(n_points):
x += self.vertices[slice[p].point].x
y += self.vertices[slice[p].point].y
z += self.vertices[slice[p].point].z
if not self.checked_points.__contains__(slice[p].point):
self.checked_points.append(slice[p].point)
center_point = Vertex(x/n_points,y/n_points,z/n_points)
center_point.diametr = 2 * center_point.len_between_point(self.vertices[slice[0].point])
if isinstance(slice, Slice):
slice.printSlice()
else:
print(slice)
return center_point
def __find_adjacent_vertices(self, num_p1,num_p2):
'''
Find for two point adjacent vertices
'''
adjacentVertices = []
for key,f in self.faces.items():
if f.order.__contains__(num_p1) and f.order.__contains__(num_p2):
for p in f.order:
if p != num_p1 and p != num_p2:
adjacentVertices.append(p)
return adjacentVertices
def __find_adjacent_vertices5(self, num_p1):
'''
Find for one point adjacent vertices
'''
adjacentVertices = []
for key,f in self.faces.items():
if f.order.__contains__(num_p1):
for p in f.order:
if p != num_p1 and not (p in adjacentVertices):
near_old_point = False
for r_pt in self.resulting_points:
dist = r_pt.point.len_between_point(self.vertices[p])
if dist < r_pt.point.diametr:
near_old_point = True
break
if not near_old_point:
adjacentVertices.append(p)
return adjacentVertices
def __fillUpBrachesCollection(self, adjacentPoints, slice):
'''
Fill branches collection
'''
branchesCollection = []
for i in range(4):
for p1 in adjacentPoints:
for p2 in adjacentPoints:
if p1 == p2:
continue
s = self.create_slice([slice[i], slice[(i + 1) % 4], p1, p2],
allow_checked = True)
if (len(s) == 4):
if not branchesCollection.__contains__(s):
branchesCollection.append(s)
if len(self.create_slice(adjacentPoints)) != 0:
branchesCollection.append(self.create_slice(adjacentPoints))
return branchesCollection
def __more4AdjacentPointCase(self, adjacentPoints, slice, isBrunch,iteration, current_point, center_point):
'''
Work when algorithm find more that 4 adjacent points
'''
branchesCollection = self.__fillUpBrachesCollection(adjacentPoints, slice)
if len(branchesCollection) >= 2 :
center_points = {}
thirdBrunchCollection = []
for branch in branchesCollection:
branch_center_point = self.__getCenterPoint(branch)
center_points[branch_center_point] = branch
print("%d center_points" % (len(center_points.keys())))
for branch_center_point,branch in center_points.items():
old_num_r_points = len(self.resulting_points)
print("start branch %d %d %d %d size %d %3d resulting_points"
% (branch[0].point, branch[1].point, branch[2].point, branch[3].point, len(branch), len(self.resulting_points)))
self.find_point(branch_center_point,iteration,current_point,True,True, _slice=branch)
print("finish branch %d %3d resulting_points" % (branch[0].point, len(self.resulting_points)))
if self.use_alt_slice() and len(self.resulting_points) == old_num_r_points + 1:
del self.resulting_points[-1]
print("undo branches of length 1")
if len(adjacentPoints) > 6:
thirdBrunchCollection.extend(branch)
thirdBrunchPoints = [HelpPoint(p.point,self.vertices[p.point].len_between_point(center_point)) \
for p in thirdBrunchCollection if not slice.__contains__(p)]
slice_t = self.create_slice(thirdBrunchPoints)
if len(slice_t) == 4:
third_brunch_center_point = self.__getCenterPoint(slice_t)
self.find_point(third_brunch_center_point,iteration, current_point,True,True, _slice=slice_t)
return True
elif len(branchesCollection) == 0 or (len(branchesCollection) == 1 and not isBrunch):
sortedadjacentPoints = sorted(adjacentPoints,key=lambda p:p.lenght)
first_slice = self.create_slice(sortedadjacentPoints)
second_slice = self.create_slice(filter(lambda p: first_slice.__contains__(p) == False, sortedadjacentPoints))
perimeter_1 = first_slice.getPerimetr(self.vertices)
perimeter_2 = second_slice.getPerimetr(self.vertices)
if perimeter_1 > perimeter_2 and perimeter_2 != 0:
new_center_point = self.__getCenterPoint(second_slice)
self.find_point(new_center_point,iteration, current_point,False,False, _slice=second_slice)
return True
elif perimeter_1 < perimeter_2 or perimeter_2 == 0:
if perimeter_1 == 0:
if len(branchesCollection) == 1:
first_slice = branchesCollection[0]
else:
first_slice.getFaceFromColl(adjacentPoints,self.faces)
new_center_point = self.__getCenterPoint(first_slice)
self.find_point(new_center_point,iteration, current_point,isBrunch,False, _slice=first_slice)
else:
new_center_point = self.__getCenterPoint(first_slice)
self.find_point(new_center_point,iteration, current_point,False,False, _slice=first_slice)
return True
elif len(branchesCollection) == 1 and isBrunch:
slice = branchesCollection[0]
if len(slice) == 0:
slice = slice.getFaceFromColl(adjacentPoints,self.faces)
try:
new_center_point = self.__getCenterPoint(slice)
except IndexError:
print("Warning: __getCenterPoint failed, slice len %d, %d adjacentPoints"
% (len(slice), len(adjacentPoints)))
slice.printSlice()
return False
self.find_point(new_center_point,iteration, parentPoint=current_point,isNeurite=True,isBrunchStart=False, _slice=slice)
return True
return False
def __checkDendrite(self, slice, center_point, vector_len, current_point):
'''
Private Method.
Check if soma has other output processes
if it's contain than run find_point for it.
'''
iteration = 1
vector_len = filter(lambda p: slice.__contains__(p) == False
and self.checked_points.__contains__(p.point) == False, vector_len)
vector_len = sorted(vector_len,key=lambda p:p.lenght)
for i in range(5):
slice2 = self.create_slice(vector_len)
if (len(slice2) == 4 and
int(slice.getPerimetr(self.vertices) / slice2.getPerimetr(self.vertices)) <= 1 and
int(slice2.getPerimetr(self.vertices) / slice.getPerimetr(self.vertices)) <= 1):
new_center_point = self.__getCenterPoint(slice2)
iteration += 1
self.find_point(new_center_point,iteration,parentPoint=current_point,isNeurite=False,isBrunchStart=False, _slice=slice2)
vector_len = filter(lambda p: slice2.__contains__(p) == False
and self.checked_points.__contains__(p.point) == False, vector_len)
vector_len = sorted(vector_len, key=lambda p:p.lenght)
#
# check_unused_coordinates might be of some use in checking for
# sections of a neuron that were omitted due to flaws in the code
#
def check_unused_coordinates(self):
for key,f in self.faces.items():
unused = True
for p in f.order:
if p in self.checked_points:
unused = False
break
if unused:
print("unused face %s" % f.order)
| mit | -6,809,170,652,540,044,000 | 47.095238 | 160 | 0.549813 | false |
cansik/pyunicon | pyunicon/Cocoa/CocoaMouse.py | 1 | 2057 | from Quartz.CoreGraphics import CGEventCreateMouseEvent
from Quartz.CoreGraphics import CGEventPost
from Quartz.CoreGraphics import kCGEventMouseMoved
from Quartz.CoreGraphics import kCGEventLeftMouseDown
from Quartz.CoreGraphics import kCGEventLeftMouseUp
from Quartz.CoreGraphics import kCGEventRightMouseDown
from Quartz.CoreGraphics import kCGEventRightMouseUp
from Quartz.CoreGraphics import kCGMouseButtonLeft
from Quartz.CoreGraphics import kCGHIDEventTap
from Quartz.CoreGraphics import CGEventCreate
from Quartz.CoreGraphics import CGEventGetLocation
from Quartz.CoreGraphics import CGWarpMouseCursorPosition
from pyunicon.util import UCMouseKey
__author__ = 'cansik'
class CocoaMouse(object):
def __init__(self):
pass
def __mouse_event(self, type, x, y):
mouse_event = CGEventCreateMouseEvent(None, type, (x, y), kCGMouseButtonLeft)
CGEventPost(kCGHIDEventTap, mouse_event)
def move(self, x, y):
self.__mouse_event(kCGEventMouseMoved, x, y)
CGWarpMouseCursorPosition((x, y))
# todo: fix race condition (get position is not accurate)
def get_position(self):
mouse_event = CGEventCreate(None)
pos = CGEventGetLocation(mouse_event)
return pos.x, pos.y
def press(self, mouse_key):
x, y = self.get_position()
if mouse_key is UCMouseKey.UC_MOUSE_LEFT:
self.__mouse_event(kCGEventLeftMouseDown, x, y)
elif mouse_key is UCMouseKey.UC_MOUSE_MIDDLE:
print("mouse middle not supported on OSX!")
elif mouse_key is UCMouseKey.UC_MOUSE_RIGHT:
self.__mouse_event(kCGEventRightMouseDown, x, y)
def release(self, mouse_key):
x, y = self.get_position()
if mouse_key is UCMouseKey.UC_MOUSE_LEFT:
self.__mouse_event(kCGEventLeftMouseUp, x, y)
elif mouse_key is UCMouseKey.UC_MOUSE_MIDDLE:
print("mouse middle not supported on OSX!")
elif mouse_key is UCMouseKey.UC_MOUSE_RIGHT:
self.__mouse_event(kCGEventRightMouseUp, x, y)
| mit | 2,062,059,134,703,579,100 | 37.092593 | 85 | 0.712202 | false |
AhmedHani/Neural-Networks-for-ML | Implementations/simple_word2vec/cbow.py | 1 | 2983 | import tensorflow as tf
class CBOW(object):
def __init__(self, args):
self.__args = args
self.__ngram_size = args.ngram_size
self.__input_size = self.__ngram_size - 1
self.__vocab_size = args.vocab_size + 1
self.__embedding_dim = args.embedding_dim
self.__learning_rate = args.learning_rate
self.__activation_function = args.activation_function
self.__optimizer = args.optimizer
self.__loss_function = args.loss_function
def init_session(self, restore=False):
self.__session = tf.Session()
if restore:
self.__saver = tf.train.Saver()
self.__saver.restore(self.__session, self.__args.model)
def build(self):
self.__input = tf.placeholder(tf.float32, [None, self.__input_size * self.__vocab_size])
self.__output = tf.placeholder(tf.float32, [None, self.__vocab_size])
self.__input_to_hidden_weights = tf.get_variable("ih_w", shape=[self.__input_size * self.__vocab_size, self.__embedding_dim],
initializer=tf.contrib.layers.xavier_initializer())
self.__input_to_hidden_bias = tf.Variable(tf.ones(self.__embedding_dim))
self.__hidden_to_output_weights = tf.get_variable("ho_w", shape=[self.__embedding_dim, self.__vocab_size], initializer=tf.contrib.layers.xavier_initializer())
self.__hidden_to_output_bias = tf.Variable(tf.ones([self.__vocab_size]))
if self.__optimizer.lower() == "sgd":
self.__optimizer = tf.train.GradientDescentOptimizer(self.__learning_rate)
elif self.__optimizer.lower() == "adam":
self.__optimizer = tf.train.AdamOptimizer(self.__learning_rate)
self.__embedding_layer = tf.matmul(self.__input, self.__input_to_hidden_weights) + self.__input_to_hidden_bias
if self.__activation_function.lower() == "tanh":
self.__embedding_layer = tf.nn.tanh(self.__embedding_layer)
elif self.__activation_function.lower() == "relu":
self.__embedding_layer = tf.nn.relu(self.__embedding_layer)
self.__output_layer = tf.matmul(self.__embedding_layer, self.__hidden_to_output_weights) + self.__hidden_to_output_bias
self.__output_layer = tf.nn.softmax(self.__output_layer)
if self.__loss_function.lower() == "mse":
self.__cost_function = 0.5 * tf.reduce_sum(tf.square(self.__output_layer - self.__output))
elif self.__loss_function.lower() == "ce":
self.__cost_function = -tf.reduce_mean((self.__output * tf.log(self.__output_layer)) + ((1 - self.__output) * tf.log(1 - self.__output_layer)))
self.__train = self.__optimizer.minimize(self.__cost_function)
def run(self, x_input, y_output):
self.__session.run(tf.global_variables_initializer())
error = self.__session.run(self.__cost_function, feed_dict={self.__input: x_input, self.__output: y_output})
return error | gpl-3.0 | -121,940,656,376,534,380 | 49.576271 | 166 | 0.612471 | false |
alexsiri7/RoboScrum | stories/views.py | 1 | 2140 | from django.shortcuts import render_to_response, get_object_or_404
from django.template import Context, loader
from stories.models import Story, Sprint
from django.http import HttpResponse
from django.views.generic import DetailView, ListView
class SprintView(DetailView):
days = ["", "","Mon", "", "", "", "Tue", "", "", "", "Wed", "", "", "", "Thu", "", "Fri"]
model = Sprint
def get_context_data(self, **kwargs):
context = super(SprintView, self).get_context_data(**kwargs)
if self.object.is_finished:
context['burndown'] = self.burndown()
else:
context['burndown_schema'] = self.burndown_schema()
return context
def burndown(self):
total = self.object.original_commitment()
burn = map(lambda (i,e): (self.days[i], total-total*i/4, total*1.2-total*i/4*1.2, total*0.8-total*i/4*0.8,total-e),enumerate(self.object.burnup()))
return burn
def burndown_schema(self):
total = self.object.original_commitment()
burn = map(lambda (i,e): (
self.days[i],
total-total*i/17,
total*1.2-total*i/17*1.2,
total*0.8-total*i/17*0.8)
,enumerate(range(17)))
return burn
class SprintListView(ListView):
queryset = Sprint.objects.all().order_by('-start_date')
def get_context_data(self, **kwargs):
context = super(SprintListView, self).get_context_data(**kwargs)
context['TVI'] = self.getTVI()
context['Points'] = self.getPoints()
context['Pct'] = self.getPct()
return context
def getTVI(self):
return map(lambda s: (s.number, s.targeted_value_increase()), self.object_list.order_by('start_date').filter(is_finished=True).all())
def getPoints(self):
return map(lambda s: (s.number, s.work_capacity()*100/s.member_dedication, s.velocity()*100/s.member_dedication, s.original_commitment()*100/s.member_dedication),
self.object_list.order_by('start_date').filter(is_finished=True).all())
def getPct(self):
return map(lambda s: (s.number, s.focus_factor(), s.accuracy_of_estimation(), s.accuracy_of_commit()),
self.object_list.order_by('start_date').filter(is_finished=True).all())
| gpl-3.0 | 218,280,061,264,985,020 | 43.583333 | 171 | 0.659346 | false |
Knio/dominate | dominate/dom_tag.py | 1 | 12996 | __license__ = '''
This file is part of Dominate.
Dominate is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Dominate is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General
Public License along with Dominate. If not, see
<http://www.gnu.org/licenses/>.
'''
# pylint: disable=bad-indentation, bad-whitespace, missing-docstring
import copy
import numbers
from collections import defaultdict, namedtuple
from functools import wraps
import threading
try:
# Python 3
from collections.abc import Callable
except ImportError:
# Python 2.7
from collections import Callable
try:
basestring = basestring
except NameError: # py3
basestring = str
unicode = str
try:
import greenlet
except ImportError:
greenlet = None
def _get_thread_context():
context = [threading.current_thread()]
if greenlet:
context.append(greenlet.getcurrent())
return hash(tuple(context))
class dom_tag(object):
is_single = False # Tag does not require matching end tag (ex. <hr/>)
is_pretty = True # Text inside the tag should be left as-is (ex. <pre>)
# otherwise, text will be escaped() and whitespace may be
# modified
is_inline = False
def __new__(_cls, *args, **kwargs):
'''
Check if bare tag is being used a a decorator
(called with a single function arg).
decorate the function and return
'''
if len(args) == 1 and isinstance(args[0], Callable) \
and not isinstance(args[0], dom_tag) and not kwargs:
wrapped = args[0]
@wraps(wrapped)
def f(*args, **kwargs):
with _cls() as _tag:
return wrapped(*args, **kwargs) or _tag
return f
return object.__new__(_cls)
def __init__(self, *args, **kwargs):
'''
Creates a new tag. Child tags should be passed as arguments and attributes
should be passed as keyword arguments.
There is a non-rendering attribute which controls how the tag renders:
* `__inline` - Boolean value. If True renders all children tags on the same
line.
'''
self.attributes = {}
self.children = []
self.parent = None
self.document = None
# Does not insert newlines on all children if True (recursive attribute)
self.is_inline = kwargs.pop('__inline', self.is_inline)
self.is_pretty = kwargs.pop('__pretty', self.is_pretty)
#Add child elements
if args:
self.add(*args)
for attr, value in kwargs.items():
self.set_attribute(*type(self).clean_pair(attr, value))
self._ctx = None
self._add_to_ctx()
# context manager
frame = namedtuple('frame', ['tag', 'items', 'used'])
# stack of frames
_with_contexts = defaultdict(list)
def _add_to_ctx(self):
stack = dom_tag._with_contexts.get(_get_thread_context())
if stack:
self._ctx = stack[-1]
stack[-1].items.append(self)
def __enter__(self):
stack = dom_tag._with_contexts[_get_thread_context()]
stack.append(dom_tag.frame(self, [], set()))
return self
def __exit__(self, type, value, traceback):
thread_id = _get_thread_context()
stack = dom_tag._with_contexts[thread_id]
frame = stack.pop()
for item in frame.items:
if item in frame.used: continue
self.add(item)
if not stack:
del dom_tag._with_contexts[thread_id]
def __call__(self, func):
'''
tag instance is being used as a decorator.
wrap func to make a copy of this tag
'''
# remove decorator from its context so it doesn't
# get added in where it was defined
if self._ctx:
self._ctx.used.add(self)
@wraps(func)
def f(*args, **kwargs):
tag = copy.deepcopy(self)
tag._add_to_ctx()
with tag:
return func(*args, **kwargs) or tag
return f
def set_attribute(self, key, value):
'''
Add or update the value of an attribute.
'''
if isinstance(key, int):
self.children[key] = value
elif isinstance(key, basestring):
self.attributes[key] = value
else:
raise TypeError('Only integer and string types are valid for assigning '
'child tags and attributes, respectively.')
__setitem__ = set_attribute
def delete_attribute(self, key):
if isinstance(key, int):
del self.children[key:key+1]
else:
del self.attributes[key]
__delitem__ = delete_attribute
def setdocument(self, doc):
'''
Creates a reference to the parent document to allow for partial-tree
validation.
'''
# assume that a document is correct in the subtree
if self.document != doc:
self.document = doc
for i in self.children:
if not isinstance(i, dom_tag): return
i.setdocument(doc)
def add(self, *args):
'''
Add new child tags.
'''
for obj in args:
if isinstance(obj, numbers.Number):
# Convert to string so we fall into next if block
obj = str(obj)
if isinstance(obj, basestring):
obj = escape(obj)
self.children.append(obj)
elif isinstance(obj, dom_tag):
stack = dom_tag._with_contexts.get(_get_thread_context())
if stack:
stack[-1].used.add(obj)
self.children.append(obj)
obj.parent = self
obj.setdocument(self.document)
elif isinstance(obj, dict):
for attr, value in obj.items():
self.set_attribute(*dom_tag.clean_pair(attr, value))
elif hasattr(obj, '__iter__'):
for subobj in obj:
self.add(subobj)
else: # wtf is it?
raise ValueError('%r not a tag or string.' % obj)
if len(args) == 1:
return args[0]
return args
def add_raw_string(self, s):
self.children.append(s)
def remove(self, obj):
self.children.remove(obj)
def clear(self):
for i in self.children:
if isinstance(i, dom_tag) and i.parent is self:
i.parent = None
self.children = []
def get(self, tag=None, **kwargs):
'''
Recursively searches children for tags of a certain
type with matching attributes.
'''
# Stupid workaround since we can not use dom_tag in the method declaration
if tag is None: tag = dom_tag
attrs = [(dom_tag.clean_attribute(attr), value)
for attr, value in kwargs.items()]
results = []
for child in self.children:
if (isinstance(tag, basestring) and type(child).__name__ == tag) or \
(not isinstance(tag, basestring) and isinstance(child, tag)):
if all(child.attributes.get(attribute) == value
for attribute, value in attrs):
# If the child is of correct type and has all attributes and values
# in kwargs add as a result
results.append(child)
if isinstance(child, dom_tag):
# If the child is a dom_tag extend the search down through its children
results.extend(child.get(tag, **kwargs))
return results
def __getitem__(self, key):
'''
Returns the stored value of the specified attribute or child
(if it exists).
'''
if isinstance(key, int):
# Children are accessed using integers
try:
return object.__getattribute__(self, 'children')[key]
except KeyError:
raise IndexError('Child with index "%s" does not exist.' % key)
elif isinstance(key, basestring):
# Attributes are accessed using strings
try:
return object.__getattribute__(self, 'attributes')[key]
except KeyError:
raise AttributeError('Attribute "%s" does not exist.' % key)
else:
raise TypeError('Only integer and string types are valid for accessing '
'child tags and attributes, respectively.')
__getattr__ = __getitem__
def __len__(self):
'''
Number of child elements.
'''
return len(self.children)
def __bool__(self):
'''
Hack for "if x" and __len__
'''
return True
__nonzero__ = __bool__
def __iter__(self):
'''
Iterates over child elements.
'''
return self.children.__iter__()
def __contains__(self, item):
'''
Checks recursively if item is in children tree.
Accepts both a string and a class.
'''
return bool(self.get(item))
def __iadd__(self, obj):
'''
Reflexive binary addition simply adds tag as a child.
'''
self.add(obj)
return self
# String and unicode representations are the same as render()
def __unicode__(self):
return self.render()
__str__ = __unicode__
def render(self, indent=' ', pretty=True, xhtml=False):
data = self._render([], 0, indent, pretty, xhtml)
return u''.join(data)
def _render(self, sb, indent_level, indent_str, pretty, xhtml):
pretty = pretty and self.is_pretty
name = getattr(self, 'tagname', type(self).__name__)
# Workaround for python keywords and standard classes/methods
# (del, object, input)
if name[-1] == '_':
name = name[:-1]
# open tag
sb.append('<')
sb.append(name)
for attribute, value in sorted(self.attributes.items()):
if value is not False: # False values must be omitted completely
sb.append(' %s="%s"' % (attribute, escape(unicode(value), True)))
sb.append(' />' if self.is_single and xhtml else '>')
if not self.is_single:
inline = self._render_children(sb, indent_level + 1, indent_str, pretty, xhtml)
if pretty and not inline:
sb.append('\n')
sb.append(indent_str * indent_level)
# close tag
sb.append('</')
sb.append(name)
sb.append('>')
return sb
def _render_children(self, sb, indent_level, indent_str, pretty, xhtml):
inline = True
for child in self.children:
if isinstance(child, dom_tag):
if pretty and not child.is_inline:
inline = False
sb.append('\n')
sb.append(indent_str * indent_level)
child._render(sb, indent_level, indent_str, pretty, xhtml)
else:
sb.append(unicode(child))
return inline
def __repr__(self):
name = '%s.%s' % (self.__module__, type(self).__name__)
attributes_len = len(self.attributes)
attributes = '%s attribute' % attributes_len
if attributes_len != 1: attributes += 's'
children_len = len(self.children)
children = '%s child' % children_len
if children_len != 1: children += 'ren'
return '<%s at %x: %s, %s>' % (name, id(self), attributes, children)
@staticmethod
def clean_attribute(attribute):
'''
Normalize attribute names for shorthand and work arounds for limitations
in Python's syntax
'''
# Shorthand
attribute = {
'cls': 'class',
'className': 'class',
'class_name': 'class',
'fr': 'for',
'html_for': 'for',
'htmlFor': 'for',
}.get(attribute, attribute)
# Workaround for Python's reserved words
if attribute[0] == '_':
attribute = attribute[1:]
# Workaround for dash
special_prefix = any([attribute.startswith(x) for x in ('data_', 'aria_')])
if attribute in set(['http_equiv']) or special_prefix:
attribute = attribute.replace('_', '-').lower()
# Workaround for colon
if attribute.split('_')[0] in ('xlink', 'xml', 'xmlns'):
attribute = attribute.replace('_', ':', 1).lower()
return attribute
@classmethod
def clean_pair(cls, attribute, value):
'''
This will call `clean_attribute` on the attribute and also allows for the
creation of boolean attributes.
Ex. input(selected=True) is equivalent to input(selected="selected")
'''
attribute = cls.clean_attribute(attribute)
# Check for boolean attributes
# (i.e. selected=True becomes selected="selected")
if value is True:
value = attribute
# Ignore `if value is False`: this is filtered out in render()
return (attribute, value)
_get_current_none = object()
def get_current(default=_get_current_none):
'''
get the current tag being used as a with context or decorated function.
if no context is active, raises ValueError, or returns the default, if provided
'''
h = _get_thread_context()
ctx = dom_tag._with_contexts.get(h, None)
if ctx:
return ctx[-1].tag
if default is _get_current_none:
raise ValueError('no current context')
return default
def attr(*args, **kwargs):
'''
Set attributes on the current active tag context
'''
c = get_current()
dicts = args + (kwargs,)
for d in dicts:
for attr, value in d.items():
c.set_attribute(*dom_tag.clean_pair(attr, value))
# escape() is used in render
from .util import escape
| lgpl-3.0 | -2,593,975,155,927,345,700 | 25.740741 | 85 | 0.6255 | false |
TuSimple/simpledet | config/RepPoints/reppoints_moment_r50v1_fpn_1x.py | 1 | 6559 | from models.RepPoints.builder import RepPoints as Detector
from models.retinanet.builder import MSRAResNet50V1FPN as Backbone
from models.RepPoints.builder import RepPointsNeck as Neck
from models.RepPoints.builder import RepPointsHead as Head
from mxnext.complicate import normalizer_factory
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 2 if is_train else 1
fp16 = False
class KvstoreParam:
kvstore = "nccl"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
# normalizer = normalizer_factory(type="syncbn", ndev=8, wd_mult=1.0)
normalizer = normalizer_factory(type="gn")
class BackboneParam:
fp16 = General.fp16
# normalizer = NormalizeParam.normalizer
normalizer = normalizer_factory(type="fixbn")
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class HeadParam:
num_class = 1 + 80
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image
class point_generate:
num_points = 9
scale = 4
stride = (8, 16, 32, 64, 128)
# transform = "minmax"
transform = "moment"
class head:
conv_channel = 256
point_conv_channel = 256
mean = None
std = None
class proposal:
pre_nms_top_n = 1000
post_nms_top_n = None
nms_thr = None
min_bbox_side = None
class point_target:
target_scale = 4
num_pos = 1
class bbox_target:
pos_iou_thr = 0.5
neg_iou_thr = 0.5
min_pos_iou = 0.0
class focal_loss:
alpha = 0.25
gamma = 2.0
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = None
image_roi = None
batch_image = None
class regress_target:
class_agnostic = None
mean = None
std = None
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = None
stride = None
class DatasetParam:
if is_train:
image_set = ("coco_train2017", )
else:
image_set = ("coco_val2017", )
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
head = Head(HeadParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(backbone, neck, head)
test_sym = None
else:
train_sym = None
test_sym = detector.get_test_symbol(backbone, neck, head)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
from_scratch = False
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = "pretrain_model/resnet-v1-50"
epoch = 0
fixed_param = ["conv0", "stage1", "gamma", "beta"]
excluded_param = ["gn"]
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.005 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = None
class schedule:
begin_epoch = 0
end_epoch = 6
lr_iter = [60000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
80000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.005 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image / 3
iter = 500
class TestParam:
min_det_score = 0.05 # filter appended boxes
max_det_per_image = 100
def process_roidb(x):
return x
def process_output(x, y):
return x
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_minival2014.json"
# data processing
class NormParam:
mean = (122.7717, 115.9465, 102.9801) # RGB order
std = (1.0, 1.0, 1.0)
class ResizeParam:
short = 800
long = 1333
class PadParam:
short = 800
long = 1333
max_num_gt = 100
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, Resize2DImageBbox, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord
from models.retinanet.input import Norm2DImage
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
Flip2DImageBbox(),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data"]
label_name = ["gt_bbox"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
from models.retinanet import metric as cls_metric
import core.detection_metric as box_metric
cls_acc_metric = cls_metric.FGAccMetric(
"FGAcc",
["cls_loss_output", "point_refine_labels_output"],
[]
)
box_init_l1_metric = box_metric.L1(
"InitL1",
["pts_init_loss_output", "points_init_labels_output"],
[]
)
box_refine_l1_metric = box_metric.L1(
"RefineL1",
["pts_refine_loss_output", "point_refine_labels_output"],
[]
)
metric_list = [cls_acc_metric, box_init_l1_metric, box_refine_l1_metric]
return General, KvstoreParam, HeadParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list
| apache-2.0 | 1,111,768,911,099,931,600 | 27.393939 | 89 | 0.560604 | false |
openmicroscopy/omero-marshal | omero_marshal/encode/encoders/mask.py | 1 | 1127 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Glencoe Software, Inc. All rights reserved.
#
# This software is distributed under the terms described by the LICENCE file
# you can find at the root of the distribution bundle.
# If the file is missing please request a copy by contacting
# [email protected].
#
from ... import SCHEMA_VERSION
from .shape import ShapeEncoder
from omero.model import MaskI
class Mask201501Encoder(ShapeEncoder):
TYPE = 'http://www.openmicroscopy.org/Schemas/ROI/2015-01#Mask'
def encode(self, obj):
v = super(Mask201501Encoder, self).encode(obj)
self.set_if_not_none(v, 'X', obj.x)
self.set_if_not_none(v, 'Y', obj.y)
self.set_if_not_none(v, 'Width', obj.width)
self.set_if_not_none(v, 'Height', obj.height)
return v
class Mask201606Encoder(Mask201501Encoder):
TYPE = 'http://www.openmicroscopy.org/Schemas/OME/2016-06#Mask'
if SCHEMA_VERSION == '2015-01':
encoder = (MaskI, Mask201501Encoder)
elif SCHEMA_VERSION == '2016-06':
encoder = (MaskI, Mask201606Encoder)
MaskEncoder = encoder[1]
| gpl-2.0 | 9,218,345,415,002,380,000 | 27.897436 | 76 | 0.692103 | false |
jseabold/statsmodels | statsmodels/tsa/vector_ar/output.py | 5 | 6945 | from statsmodels.compat.python import lzip
from io import StringIO
import numpy as np
from statsmodels.iolib import SimpleTable
mat = np.array
_default_table_fmt = dict(
empty_cell = '',
colsep=' ',
row_pre = '',
row_post = '',
table_dec_above='=',
table_dec_below='=',
header_dec_below='-',
header_fmt = '%s',
stub_fmt = '%s',
title_align='c',
header_align = 'r',
data_aligns = 'r',
stubs_align = 'l',
fmt = 'txt'
)
class VARSummary(object):
default_fmt = dict(
#data_fmts = ["%#12.6g","%#12.6g","%#10.4g","%#5.4g"],
#data_fmts = ["%#10.4g","%#10.4g","%#10.4g","%#6.4g"],
data_fmts = ["%#15.6F","%#15.6F","%#15.3F","%#14.3F"],
empty_cell = '',
#colwidths = 10,
colsep=' ',
row_pre = '',
row_post = '',
table_dec_above='=',
table_dec_below='=',
header_dec_below='-',
header_fmt = '%s',
stub_fmt = '%s',
title_align='c',
header_align = 'r',
data_aligns = 'r',
stubs_align = 'l',
fmt = 'txt'
)
part1_fmt = dict(
default_fmt,
data_fmts = ["%s"],
colwidths = 15,
colsep=' ',
table_dec_below='',
header_dec_below=None,
)
part2_fmt = dict(
default_fmt,
data_fmts = ["%#12.6g","%#12.6g","%#10.4g","%#5.4g"],
colwidths = None,
colsep=' ',
table_dec_above='-',
table_dec_below='-',
header_dec_below=None,
)
def __init__(self, estimator):
self.model = estimator
self.summary = self.make()
def __repr__(self):
return self.summary
def make(self, endog_names=None, exog_names=None):
"""
Summary of VAR model
"""
buf = StringIO()
buf.write(self._header_table() + '\n')
buf.write(self._stats_table() + '\n')
buf.write(self._coef_table() + '\n')
buf.write(self._resid_info() + '\n')
return buf.getvalue()
def _header_table(self):
import time
model = self.model
t = time.localtime()
# TODO: change when we allow coef restrictions
# ncoefs = len(model.beta)
# Header information
part1title = "Summary of Regression Results"
part1data = [[model._model_type],
["OLS"], #TODO: change when fit methods change
[time.strftime("%a, %d, %b, %Y", t)],
[time.strftime("%H:%M:%S", t)]]
part1header = None
part1stubs = ('Model:',
'Method:',
'Date:',
'Time:')
part1 = SimpleTable(part1data, part1header, part1stubs,
title=part1title, txt_fmt=self.part1_fmt)
return str(part1)
def _stats_table(self):
# TODO: do we want individual statistics or should users just
# use results if wanted?
# Handle overall fit statistics
model = self.model
part2Lstubs = ('No. of Equations:',
'Nobs:',
'Log likelihood:',
'AIC:')
part2Rstubs = ('BIC:',
'HQIC:',
'FPE:',
'Det(Omega_mle):')
part2Ldata = [[model.neqs], [model.nobs], [model.llf], [model.aic]]
part2Rdata = [[model.bic], [model.hqic], [model.fpe], [model.detomega]]
part2Lheader = None
part2L = SimpleTable(part2Ldata, part2Lheader, part2Lstubs,
txt_fmt = self.part2_fmt)
part2R = SimpleTable(part2Rdata, part2Lheader, part2Rstubs,
txt_fmt = self.part2_fmt)
part2L.extend_right(part2R)
return str(part2L)
def _coef_table(self):
model = self.model
k = model.neqs
Xnames = self.model.exog_names
data = lzip(model.params.T.ravel(),
model.stderr.T.ravel(),
model.tvalues.T.ravel(),
model.pvalues.T.ravel())
header = ('coefficient','std. error','t-stat','prob')
buf = StringIO()
dim = k * model.k_ar + model.k_trend + model.k_exog_user
for i in range(k):
section = "Results for equation %s" % model.names[i]
buf.write(section + '\n')
table = SimpleTable(data[dim * i : dim * (i + 1)], header,
Xnames, title=None, txt_fmt = self.default_fmt)
buf.write(str(table) + '\n')
if i < k - 1:
buf.write('\n')
return buf.getvalue()
def _resid_info(self):
buf = StringIO()
names = self.model.names
buf.write("Correlation matrix of residuals" + '\n')
buf.write(pprint_matrix(self.model.resid_corr, names, names) + '\n')
return buf.getvalue()
def normality_summary(results):
title = "Normality skew/kurtosis Chi^2-test"
null_hyp = 'H_0: data generated by normally-distributed process'
return hypothesis_test_table(results, title, null_hyp)
def hypothesis_test_table(results, title, null_hyp):
fmt = dict(_default_table_fmt,
data_fmts=["%#15.6F","%#15.6F","%#15.3F", "%s"])
buf = StringIO()
table = SimpleTable([[results['statistic'],
results['crit_value'],
results['pvalue'],
str(results['df'])]],
['Test statistic', 'Critical Value', 'p-value',
'df'], [''], title=None, txt_fmt=fmt)
buf.write(title + '\n')
buf.write(str(table) + '\n')
buf.write(null_hyp + '\n')
buf.write("Conclusion: %s H_0" % results['conclusion'])
buf.write(" at %.2f%% significance level" % (results['signif'] * 100))
return buf.getvalue()
def pprint_matrix(values, rlabels, clabels, col_space=None):
buf = StringIO()
T, K = len(rlabels), len(clabels)
if col_space is None:
min_space = 10
col_space = [max(len(str(c)) + 2, min_space) for c in clabels]
else:
col_space = (col_space,) * K
row_space = max([len(str(x)) for x in rlabels]) + 2
head = _pfixed('', row_space)
for j, h in enumerate(clabels):
head += _pfixed(h, col_space[j])
buf.write(head + '\n')
for i, rlab in enumerate(rlabels):
line = ('%s' % rlab).ljust(row_space)
for j in range(K):
line += _pfixed(values[i,j], col_space[j])
buf.write(line + '\n')
return buf.getvalue()
def _pfixed(s, space, nanRep=None, float_format=None):
if isinstance(s, float):
if float_format:
formatted = float_format(s)
else:
formatted = "%#8.6F" % s
return formatted.rjust(space)
else:
return ('%s' % s)[:space].rjust(space)
| bsd-3-clause | -7,572,108,318,767,246,000 | 27.004032 | 79 | 0.500216 | false |
stefanklug/django-lazysignup | lazysignup/migrations/0001_initial.py | 1 | 4128 | # flake8: noqa
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'LazyUser'
db.create_table('lazysignup_lazyuser', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True)),
))
db.send_create_signal('lazysignup', ['LazyUser'])
def backwards(self, orm):
# Deleting model 'LazyUser'
db.delete_table('lazysignup_lazyuser')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lazysignup.lazyuser': {
'Meta': {'object_name': 'LazyUser'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['lazysignup']
| bsd-3-clause | 1,776,426,415,339,057,000 | 57.971429 | 182 | 0.556928 | false |
dmccloskey/ddt_python | ddt_python/ddt_container_biPlotAndValidation.py | 1 | 20231 | from .ddt_container import ddt_container
from .ddt_tile import ddt_tile
from .ddt_tile_html import ddt_tile_html
class ddt_container_biPlotAndValidation(ddt_container):
def make_biPlotAndValidation(self,
data1,data2,
data1_keys,data1_nestkeys,data1_keymap,
data2_keys,data2_nestkeys,data2_keymap,
):
'''Make a biPlot and model validation plot
INPUT:
data1
data2
data1_keys
data1_nestkeys
data1_keymap
data2_keys
data2_nestkeys
data2_keymap
'''
cnt = 0;
#from 1: biplot
form = ddt_tile();
form.make_tileparameters(
tileparameters={
'tileheader':'Bi Plot filter menu',
'tiletype':'html',
'tileid':"filtermenu1",
'rowid':"row1",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-4"}
);
form.make_htmlparameters(
htmlparameters = {
'htmlid':'filtermenuform1',
"htmltype":'form_01',
"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},
"formresetbuttonidtext":{'id':'reset1','text':'reset'},
"formupdatebuttonidtext":{'id':'update1','text':'update'}},
);
self.add_parameters(form.get_parameters());
self.update_tile2datamap("filtermenu1",[cnt]);
self.add_filtermenu(
{"filtermenuid":"filtermenu1",
"filtermenuhtmlid":"filtermenuform1",
"filtermenusubmitbuttonid":"submit1",
"filtermenuresetbuttonid":"reset1",
"filtermenuupdatebuttonid":"update1"}
);
#svg 1: biplot
svg = ddt_tile();
svg.make_tileparameters(
tileparameters={
'tileheader':'Bi Plot',
'tiletype':'svg',
'tileid':"tile1",
'rowid':"row1",
'colid':"col2",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-8"}
);
svg.make_svgparameters(
svgparameters={
"svgtype":'scatterlineplot2d_01',
"svgkeymap":[data1_keymap,data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,
"svgx1axislabel":"component",
"svgy1axislabel":"variance explained",
'svgformtileid':'filtermenu1',}
);
self.add_parameters(svg.get_parameters());
self.update_tile2datamap("tile1",[cnt,cnt]);
# data 1:
self.add_data(
data1,
data1_keys,
data1_nestkeys
);
# increment the data counter
cnt+=1;
#form 2: validation
form = ddt_tile();
form.make_tileparameters(
tileparameters={
'tileheader':'Cross Validation filter menu',
'tiletype':'html',
'tileid':"filtermenu2",
'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-4"}
);
form.make_htmlparameters(
htmlparameters = {
'htmlid':'filtermenuform2',
"htmltype":'form_01',
"formsubmitbuttonidtext":{'id':'submit2','text':'submit'},
"formresetbuttonidtext":{'id':'reset2','text':'reset'},
"formupdatebuttonidtext":{'id':'update12','text':'update'}},
);
self.add_parameters(form.get_parameters());
self.update_tile2datamap("filtermenu2",[cnt]);
self.add_filtermenu(
{"filtermenuid":"filtermenu2",
"filtermenuhtmlid":"filtermenuform2",
"filtermenusubmitbuttonid":"submit2",
"filtermenuresetbuttonid":"reset2",
"filtermenuupdatebuttonid":"update2"}
);
#svg 2: validation
svg = ddt_tile();
svg.make_tileparameters(
tileparameters={
'tileheader':'Cross Validation',
'tiletype':'svg',
'tileid':"tile2",
'rowid':"row2",
'colid':"col2",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-8"
});
svg.make_svgparameters(
svgparameters={
"svgtype":'verticalbarschart2d_01',
"svgkeymap":[data2_keymap],
'svgid':'svg2',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,"svgy1axislabel":"Value",
"svgfilters":None,
'svgformtileid':'filtermenu2',
}
);
self.add_parameters(svg.get_parameters());
self.update_tile2datamap("tile2",[cnt]);
#table 2: validation
crosstable = ddt_tile();
crosstable.make_tileparameters(
tileparameters = {
'tileheader':'Cross Validation',
'tiletype':'table',
'tileid':"tile3",
'rowid':"row3",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-12"}
);
crosstable.make_tableparameters(
tableparameters = {
"tablekeymap":[data2_keymap],
"tabletype":'responsivetable_01',
'tableid':'table2',
"tablefilters":None,
"tableheaders":None,
"tableclass":"table table-condensed table-hover"}
);
self.add_parameters(crosstable.get_parameters());
self.update_tile2datamap("tile3",[cnt]);
# add data 2
self.add_data(
data2,
data2_keys,
data2_nestkeys
);
# increment the data counter
cnt+=1;
def make_biPlot(self,
data1,
data1_keys,data1_nestkeys,data1_keymap,
):
'''Make a biPlot
INPUT:
data1
data1_keys
data1_nestkeys
data1_keymap
'''
cnt = 0;
#from 1: biplot
form = ddt_tile();
form.make_tileparameters(
tileparameters={
'tileheader':'Bi Plot filter menu',
'tiletype':'html',
'tileid':"filtermenu1",
'rowid':"row1",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-4"}
);
form.make_htmlparameters(
htmlparameters = {
'htmlid':'filtermenuform1',
"htmltype":'form_01',
"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},
"formresetbuttonidtext":{'id':'reset1','text':'reset'},
"formupdatebuttonidtext":{'id':'update1','text':'update'}},
);
self.add_parameters(form.get_parameters());
self.update_tile2datamap("filtermenu1",[cnt]);
self.add_filtermenu(
{"filtermenuid":"filtermenu1",
"filtermenuhtmlid":"filtermenuform1",
"filtermenusubmitbuttonid":"submit1",
"filtermenuresetbuttonid":"reset1",
"filtermenuupdatebuttonid":"update1"}
);
#svg 1: biplot
svg = ddt_tile();
svg.make_tileparameters(
tileparameters={
'tileheader':'Bi Plot',
'tiletype':'svg',
'tileid':"tile1",
'rowid':"row1",
'colid':"col2",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-8"}
);
svg.make_svgparameters(
svgparameters={
"svgtype":'scatterlineplot2d_01',
"svgkeymap":[data1_keymap,data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,
"svgx1axislabel":"component",
"svgy1axislabel":"variance explained",
'svgformtileid':'filtermenu1',}
);
self.add_parameters(svg.get_parameters());
self.update_tile2datamap("tile1",[cnt,cnt]);
#table 1: Bi plot
crosstable = ddt_tile();
crosstable.make_tileparameters(
tileparameters = {
'tileheader':'Bi plot',
'tiletype':'table',
'tileid':"tile3",
'rowid':"row3",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-12"}
);
crosstable.make_tableparameters(
tableparameters = {
"tablekeymap":[data1_keymap],
"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableheaders":None,
"tableclass":"table table-condensed table-hover"}
);
self.add_parameters(crosstable.get_parameters());
self.update_tile2datamap("tile3",[cnt]);
# data 1:
self.add_data(
data1,
data1_keys,
data1_nestkeys
);
# increment the data counter
cnt+=1;
def make_hyperparameter(self,
data1,
data1_keys,data1_nestkeys,data1_keymap,
data_cnt=0,
):
'''Make a hyperparameter bar plot
INPUT:
data1
data1_keys
data1_nestkeys
data1_keymap
'''
#form 2: validation
form = ddt_tile();
form.make_tileparameters(
tileparameters={
'tileheader':'Cross Validation filter menu',
'tiletype':'html',
'tileid':"filtermenu1",
'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-4"}
);
form.make_htmlparameters(
htmlparameters = {
'htmlid':'filtermenuform1',
"htmltype":'form_01',
"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},
"formresetbuttonidtext":{'id':'reset1','text':'reset'},
"formupdatebuttonidtext":{'id':'update1','text':'update'}},
);
self.add_parameters(form.get_parameters());
self.update_tile2datamap("filtermenu1",[data_cnt]);
self.add_filtermenu(
{"filtermenuid":"filtermenu1",
"filtermenuhtmlid":"filtermenuform1",
"filtermenusubmitbuttonid":"submit1",
"filtermenuresetbuttonid":"reset1",
"filtermenuupdatebuttonid":"update1"}
);
#svg 2: validation
svg = ddt_tile();
svg.make_tileparameters(
tileparameters={
'tileheader':'Cross Validation',
'tiletype':'svg',
'tileid':"tile2",
'rowid':"row1",
'colid':"col2",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-8"
});
svg.make_svgparameters(
svgparameters={
"svgtype":'verticalbarschart2d_01',
"svgkeymap":[data1_keymap],
'svgid':'svg2',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,"svgy1axislabel":"Value",
"svgfilters":None,
'svgformtileid':'filtermenu1',
}
);
self.add_parameters(svg.get_parameters());
self.update_tile2datamap("tile2",[data_cnt]);
#table 2: validation
crosstable = ddt_tile();
crosstable.make_tileparameters(
tileparameters = {
'tileheader':'Cross Validation',
'tiletype':'table',
'tileid':"tile3",
'rowid':"row2",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-12"}
);
crosstable.make_tableparameters(
tableparameters = {
"tablekeymap":[data1_keymap],
"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableheaders":None,
"tableclass":"table table-condensed table-hover"}
);
self.add_parameters(crosstable.get_parameters());
self.update_tile2datamap("tile3",[data_cnt]);
# add data 1
self.add_data(
data1,
data1_keys,
data1_nestkeys
);
# increment the data counter
data_cnt+=1;
def make_impfeat(self,
data1,
data1_keys,data1_nestkeys,data1_keymap,
data_cnt=0,
):
'''Make a important feature bar plot
INPUT:
data1
data1_keys
data1_nestkeys
data1_keymap
'''
#form 2: validation
form = ddt_tile();
form.make_tileparameters(
tileparameters={
'tileheader':'Important feature filter menu',
'tiletype':'html',
'tileid':"filtermenu1",
'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-4"}
);
form.make_htmlparameters(
htmlparameters = {
'htmlid':'filtermenuform1',
"htmltype":'form_01',
"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},
"formresetbuttonidtext":{'id':'reset1','text':'reset'},
"formupdatebuttonidtext":{'id':'update1','text':'update'}},
);
self.add_parameters(form.get_parameters());
self.update_tile2datamap("filtermenu1",[data_cnt]);
self.add_filtermenu(
{"filtermenuid":"filtermenu1",
"filtermenuhtmlid":"filtermenuform1",
"filtermenusubmitbuttonid":"submit1",
"filtermenuresetbuttonid":"reset1",
"filtermenuupdatebuttonid":"update1"}
);
#svg 2: validation
svg = ddt_tile();
svg.make_tileparameters(
tileparameters={
'tileheader':'Important features',
'tiletype':'svg',
'tileid':"tile2",
'rowid':"row1",
'colid':"col2",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-8"
});
svg.make_svgparameters(
svgparameters={
"svgtype":'horizontalbarschart2d_01',
"svgkeymap":[data1_keymap],
'svgid':'svg2',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 250 },
"svgwidth":450,"svgheight":900,
"svgx1axislabel":"impfeat_value",
"svgy1axislabel":"component_name",
'svgformtileid':'filtermenu1',
}
);
self.add_parameters(svg.get_parameters());
self.update_tile2datamap("tile2",[data_cnt]);
#table 2: validation
crosstable = ddt_tile();
crosstable.make_tileparameters(
tileparameters = {
'tileheader':'Important features',
'tiletype':'table',
'tileid':"tile3",
'rowid':"row2",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-12"}
);
crosstable.make_tableparameters(
tableparameters = {
"tablekeymap":[data1_keymap],
"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableheaders":None,
"tableclass":"table table-condensed table-hover"}
);
self.add_parameters(crosstable.get_parameters());
self.update_tile2datamap("tile3",[data_cnt]);
# add data 1
self.add_data(
data1,
data1_keys,
data1_nestkeys
);
# increment the data counter
data_cnt+=1;
def make_SPlot(self,
data1,data_dict1,
data1_keys,data1_nestkeys,data1_keymap,
data_cnt=0,
):
'''Make a important feature bar plot
INPUT:
data1
data1_keys
data1_nestkeys
data1_keymap
'''
#form 2: validation
form = ddt_tile();
form.make_tileparameters(
tileparameters={
'tileheader':'S-Plot filter menu',
'tiletype':'html',
'tileid':"filtermenu1",
'rowid':"row1",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-4"}
);
form.make_htmlparameters(
htmlparameters = {
'htmlid':'filtermenuform1',
"htmltype":'form_01',
"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},
"formresetbuttonidtext":{'id':'reset1','text':'reset'},
"formupdatebuttonidtext":{'id':'update1','text':'update'}},
);
self.add_parameters(form.get_parameters());
self.update_tile2datamap("filtermenu1",[data_cnt]);
self.add_filtermenu(
{"filtermenuid":"filtermenu1",
"filtermenuhtmlid":"filtermenuform1",
"filtermenusubmitbuttonid":"submit1",
"filtermenuresetbuttonid":"reset1",
"filtermenuupdatebuttonid":"update1"}
);
# add data 1
self.add_data(
data1,
data1_keys,
data1_nestkeys
);
#svg 2: validation
svg = ddt_tile();
for i in range(int(max(data_dict1.keys()))):
axis = i+1;
svgid = 'svg'+str(axis);
colid = 'col'+str(axis+1);
tileid = 'tile'+str(axis);
svg.make_tileparameters(
tileparameters={
'tileheader':'S-Plot',
'tiletype':'svg',
'tileid':tileid,
'rowid':"row1",
'colid':colid,
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-6"
});
svg.make_svgparameters(
svgparameters={
"svgtype":'volcanoplot2d_01',
"svgkeymap":[data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":400,"svgheight":350,
"svgx1axislabel":"loadings" + str(axis),
"svgy1axislabel":"correlations" + str(axis),
}
);
self.add_parameters(svg.get_parameters());
self.update_tile2datamap(tileid,[axis]);
self.add_data(
data_dict1[axis],
data1_keys,
data1_nestkeys
);
#table 2: validation
crosstable = ddt_tile();
crosstable.make_tileparameters(
tileparameters = {
'tileheader':'S-Plot',
'tiletype':'table',
'tileid':'tile'+str(axis+1),
'rowid':"row2",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-12"}
);
crosstable.make_tableparameters(
tableparameters = {
"tablekeymap":[data1_keymap],
"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableheaders":None,
"tableclass":"table table-condensed table-hover"}
);
self.add_parameters(crosstable.get_parameters());
self.update_tile2datamap('tile'+str(axis+1),[data_cnt]);
| mit | -6,382,075,242,696,304,000 | 32.329489 | 82 | 0.497059 | false |
TeamSWAP/swap | src/raid_server.py | 1 | 5127 | #
# Copyright 2013 TeamSWAP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import threading
import socket
from time import sleep, time
from select import select
import fuzion
import log_parser
import log_analyzer
import net
from logging import prnt
from const import pkt
from const import *
from bytestream import ByteStream
class RaidServer(threading.Thread):
def __init__(self, sock):
threading.Thread.__init__(self)
self.centralSock = sock
self.node = None
self.stoppedEvent = threading.Event()
self.clientList = []
self.lastRaidUpdateSent = 0
self.lastRaidUpdatePoke = 0
def run(self):
prnt("RaidServer: Booting up...")
self.port = net.node.bind("swap:raid")
self.centralSock.setblocking(False)
while not self.stoppedEvent.isSet():
now = time()
# Central server
r, w, e = select([self.centralSock], [self.centralSock], [], 0)
if r:
data = self.centralSock.recv(1024)
stream = ByteStream(data)
packetType = stream.readByte()
if self.port.connectionPending():
conn = self.port.accept()
self.clientList.append({ 'conn': conn, 'playerInfo': None })
for client in self.clientList:
conn = client['conn']
if conn.recvPending():
data = conn.recv()
if data == None:
playerName = client['playerInfo']['name'] if client['playerInfo'] else "<NoInfo>"
prnt("Client (%s) left raid, reason=%s"%(playerName, fuzion.formatError(conn.closedReason)))
self.lastRaidUpdatePoke = time()
self.clientList.remove(client)
continue
packetType = data.readByte()
if packetType == pkt.PLAYER_UPDATE:
self.processPlayerUpdate(client, data)
if now - self.lastRaidUpdateSent > 2 and now - self.lastRaidUpdatePoke < 5:
self.sendRaidUpdate()
self.lastRaidUpdateSent = now
sleep(0.1)
self.port.close()
for client in self.clientList:
conn = client['conn']
conn.close()
self.centralSock.close()
prnt("RaidServer: Shutting down...")
def stop(self):
self.stoppedEvent.set()
def processPlayerUpdate(self, client, stream):
name = stream.readString()
totalDamage = stream.readInt()
totalDamageTaken = stream.readInt()
avgDps = stream.readFloat()
totalHealing = stream.readInt()
totalHealingReceived = stream.readInt()
avgHps = stream.readFloat()
totalThreat = stream.readInt()
tfbOrb = stream.readByte()
prnt("RaidServer: Got player update from %s!"%name)
conn = client['conn']
connType = 'T'
if conn.loopback:
connType = 'L'
elif conn.relay:
connType = 'R'
client['playerInfo'] = {
'name': name,
'connType': connType,
'totalDamage': totalDamage,
'totalDamageTaken': totalDamageTaken,
'avgDps': avgDps,
'totalHealing': totalHealing,
'totalHealingReceived': totalHealingReceived,
'avgHps': avgHps,
'totalThreat': totalThreat,
'tfbOrb': tfbOrb
}
self.lastRaidUpdatePoke = time()
def sendRaidUpdate(self):
prnt("RaidServer: Sending raid update...")
stream = fuzion.ByteStream()
stream.writeByte(pkt.RAID_UPDATE)
playerList = []
for client in self.clientList:
playerInfo = client['playerInfo']
if playerInfo == None:
continue
playerList.append(playerInfo)
stream.writeByte(len(playerList))
for player in playerList:
stream.writeString(player['name'])
stream.writeString(player['connType'])
stream.writeInt(player['totalDamage'])
stream.writeInt(player['totalDamageTaken'])
stream.writeFloat(player['avgDps'])
stream.writeInt(player['totalHealing'])
stream.writeInt(player['totalHealingReceived'])
stream.writeFloat(player['avgHps'])
stream.writeInt(player['totalThreat'])
stream.writeByte(player['tfbOrb'])
for client in self.clientList:
conn = client['conn']
conn.send(stream)
| apache-2.0 | -184,038,147,123,107,870 | 31.656051 | 116 | 0.584357 | false |
nuncjo/Delver | examples.py | 1 | 6037 | # -*- coding:utf-8 -*-
import os
import psycopg2
from pprint import pprint
from delver import Crawler
def scraping_movies_table():
c = Crawler()
c.logging = True
c.useragent = "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
c.open("http://www.boxofficemojo.com/daily/")
pprint(c.tables())
def user_login():
c = Crawler()
c.useragent = (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/60.0.3112.90 Safari/537.36"
)
c.random_timeout = (0, 5)
c.open('http://testing-ground.scraping.pro/login')
forms = c.forms()
if forms:
login_form = forms[0]
login_form.fields = {
'usr': 'admin',
'pwd': '12345'
}
c.submit(login_form)
success_check = c.submit_check(
login_form,
phrase='WELCOME :)',
status_codes=[200]
)
print(success_check)
class OnePunchManDownloader:
"""Downloads One Punch Man free manga chapers to local directories.
Uses one main thread for scraper with random timeout.
Uses 20 threads just for image downloads.
"""
def __init__(self):
self._target_directory = 'one_punch_man'
self._start_url = "http://m.mangafox.me/manga/onepunch_man_one/"
self.crawler = Crawler()
self.crawler.random_timeout = (0, 5)
self.crawler.useragent = "Googlebot-Image/1.0"
def run(self):
self.crawler.open(self._start_url)
for link in self.crawler.links(filters={'text': 'Ch '}, match='IN'):
self.download_images(link)
def download_images(self, link):
target_path = '{}/{}'.format(self._target_directory, link.split('/')[-2])
full_chapter_url = link.replace('/manga/', '/roll_manga/')
self.crawler.open(full_chapter_url)
images = self.crawler.xpath("//img[@class='reader-page']/@data-original")
os.makedirs(target_path, exist_ok=True)
self.crawler.download_files(target_path, files=images, workers=20)
def one_punch_downloader():
downloader = OnePunchManDownloader()
downloader.run()
class WithConnection:
def __init__(self, params):
self._connection = psycopg2.connect(**params)
self._connection.autocommit = True
self._cursor = self._connection.cursor()
def table_exists(self, table_name):
self._cursor.execute('''
select exists(
select * from information_schema.tables where table_name='{}'
)
'''.format(table_name))
return self._cursor.fetchone()[0]
def scrape_page(crawler):
""" Scrapes rows from tables with promotions.
:param crawler: <delver.crawler.Crawler object>
:return: generator with page of rows
"""
titles = crawler.xpath("//div/span[@class='title']/text()")
discounts = crawler.xpath("//div[contains(@class, 'search_discount')]/span/text()")
final_prices = crawler.xpath("//div[contains(@class, 'discounted')]//text()[2]").strip()
yield [{
'title': row[0],
'discount': row[1],
'price': row[2]
} for row in zip(titles, discounts, final_prices)]
class SteamPromotionsScraper:
""" Scraper which can be iterated through
Usage example::
>>> promotions_scraper = SteamPromotionsScraper()
>>> for page in promotions_scraper:
... pprint(page)
"""
def __init__(self):
self.crawler = Crawler()
self.crawler.logging = True
self.crawler.useragent = \
"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
self.crawler.random_timeout = (0, 5)
def scrape_by_page(self):
self.crawler.open('http://store.steampowered.com/search/?specials=1')
yield from scrape_page(self.crawler)
while self.crawler.links(filters={
'class': 'pagebtn',
'text': '>'
}):
self.crawler.open(self.crawler.current_results[0])
yield from scrape_page(self.crawler)
def __iter__(self):
return self.scrape_by_page()
class SteamPromotionsScraperDB(WithConnection):
"""Example with saving data to postgresql database
Usage example::
>>> promotions_scraper_db = SteamPromotionsScraperDB({
... 'dbname': "test",
... 'user': "testuser",
... 'password': "test"
... })
>>> promotions_scraper.save_to_db()
"""
def __init__(self, params):
super().__init__(params)
self.crawler = Crawler()
self.crawler.logging = True
self.crawler.useragent = \
"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
self.crawler.random_timeout = (0, 5)
def scrape_by_page(self):
self.crawler.open('http://store.steampowered.com/search/?specials=1')
yield from scrape_page(self.crawler)
while self.crawler.links(filters={
'class': 'pagebtn',
'text': '>'
}):
self.crawler.open(self.crawler.current_results[0])
yield from scrape_page(self.crawler)
def save_to_db(self):
if not self.table_exists('promotions'):
self._cursor.execute(
'''
CREATE TABLE promotions (
id serial PRIMARY KEY,
title varchar(255),
discount varchar(4),
price varchar(10)
);
'''
)
for page in self.scrape_by_page():
for row in page:
self._cursor.execute(
'''
INSERT INTO promotions(title, discount, price)
VALUES(%s, %s, %s)
''',
(row.get('title'), row.get('discount'), row.get('price'))
)
pprint(row)
| mit | 3,424,693,276,046,666,000 | 31.283422 | 92 | 0.558556 | false |
beeftornado/sentry | src/sentry/stacktraces/processing.py | 1 | 20204 | from __future__ import absolute_import
import six
import logging
from datetime import datetime
from django.utils import timezone
from collections import namedtuple, OrderedDict
import sentry_sdk
from sentry.models import Project, Release
from sentry.utils.cache import cache
from sentry.utils.hashlib import hash_values
from sentry.utils.safe import get_path, safe_execute
from sentry.stacktraces.functions import set_in_app, trim_function_name
logger = logging.getLogger(__name__)
StacktraceInfo = namedtuple(
"StacktraceInfo", ["stacktrace", "container", "platforms", "is_exception"]
)
StacktraceInfo.__hash__ = lambda x: id(x)
StacktraceInfo.__eq__ = lambda a, b: a is b
StacktraceInfo.__ne__ = lambda a, b: a is not b
class ProcessableFrame(object):
def __init__(self, frame, idx, processor, stacktrace_info, processable_frames):
self.frame = frame
self.idx = idx
self.processor = processor
self.stacktrace_info = stacktrace_info
self.data = None
self.cache_key = None
self.cache_value = None
self.processable_frames = processable_frames
def __repr__(self):
return "<ProcessableFrame %r #%r at %r>" % (
self.frame.get("function") or "unknown",
self.idx,
self.frame.get("instruction_addr"),
)
def __contains__(self, key):
return key in self.frame
def __getitem__(self, key):
return self.frame[key]
def get(self, key, default=None):
return self.frame.get(key, default)
def close(self):
# manually break circular references
self.closed = True
self.processable_frames = None
self.stacktrace_info = None
self.processor = None
@property
def previous_frame(self):
last_idx = len(self.processable_frames) - self.idx - 1 - 1
if last_idx < 0:
return
return self.processable_frames[last_idx]
def set_cache_value(self, value):
if self.cache_key is not None:
cache.set(self.cache_key, value, 3600)
return True
return False
def set_cache_key_from_values(self, values):
if values is None:
self.cache_key = None
return
h = hash_values(values, seed=self.processor.__class__.__name__)
self.cache_key = rv = "pf:%s" % h
return rv
class StacktraceProcessingTask(object):
def __init__(self, processable_stacktraces, processors):
self.processable_stacktraces = processable_stacktraces
self.processors = processors
def close(self):
for frame in self.iter_processable_frames():
frame.close()
def iter_processors(self):
return iter(self.processors)
def iter_processable_stacktraces(self):
return six.iteritems(self.processable_stacktraces)
def iter_processable_frames(self, processor=None):
for _, frames in self.iter_processable_stacktraces():
for frame in frames:
if processor is None or frame.processor == processor:
yield frame
class StacktraceProcessor(object):
def __init__(self, data, stacktrace_infos, project=None):
self.data = data
self.stacktrace_infos = stacktrace_infos
if project is None:
project = Project.objects.get_from_cache(id=data["project"])
self.project = project
def close(self):
pass
def get_release(self, create=False):
"""Convenient helper to return the release for the current data
and optionally creates the release if it's missing. In case there
is no release info it will return `None`.
"""
release = self.data.get("release")
if not release:
return None
if not create:
return Release.get(project=self.project, version=self.data["release"])
timestamp = self.data.get("timestamp")
if timestamp is not None:
date = datetime.fromtimestamp(timestamp).replace(tzinfo=timezone.utc)
else:
date = None
return Release.get_or_create(
project=self.project, version=self.data["release"], date_added=date
)
def handles_frame(self, frame, stacktrace_info):
"""Returns true if this processor can handle this frame. This is the
earliest check and operates on a raw frame and stacktrace info. If
this returns `True` a processable frame is created.
"""
return False
def preprocess_frame(self, processable_frame):
"""After a processable frame has been created this method is invoked
to give the processor a chance to store additional data to the frame
if wanted. In particular a cache key can be set here.
"""
pass
def process_exception(self, exception):
"""Processes an exception."""
return False
def process_frame(self, processable_frame, processing_task):
"""Processes the processable frame and returns a tuple of three
lists: ``(frames, raw_frames, errors)`` where frames is the list of
processed frames, raw_frames is the list of raw unprocessed frames
(which however can also be modified if needed) as well as a list of
optional errors. Each one of the items can be `None` in which case
the original input frame is assumed.
"""
def preprocess_step(self, processing_task):
"""After frames are preprocessed but before frame processing kicks in
the preprocessing step is run. This already has access to the cache
values on the frames.
"""
return False
def find_stacktraces_in_data(data, include_raw=False, with_exceptions=False):
"""Finds all stracktraces in a given data blob and returns it
together with some meta information.
If `include_raw` is True, then also raw stacktraces are included. If
`with_exceptions` is set to `True` then stacktraces of the exception
are always included and the `is_exception` flag is set on that stack
info object.
"""
rv = []
def _report_stack(stacktrace, container, is_exception=False):
if not is_exception and (not stacktrace or not get_path(stacktrace, "frames", filter=True)):
return
platforms = set(
frame.get("platform") or data.get("platform")
for frame in get_path(stacktrace, "frames", filter=True, default=())
)
rv.append(
StacktraceInfo(
stacktrace=stacktrace,
container=container,
platforms=platforms,
is_exception=is_exception,
)
)
for exc in get_path(data, "exception", "values", filter=True, default=()):
_report_stack(exc.get("stacktrace"), exc, is_exception=with_exceptions)
_report_stack(data.get("stacktrace"), None)
for thread in get_path(data, "threads", "values", filter=True, default=()):
_report_stack(thread.get("stacktrace"), thread)
if include_raw:
for info in rv[:]:
if info.container is not None:
_report_stack(info.container.get("raw_stacktrace"), info.container)
return rv
def _has_system_frames(frames):
"""
Determines whether there are any frames in the stacktrace with in_app=false.
"""
system_frames = 0
for frame in frames:
if not frame.get("in_app"):
system_frames += 1
return bool(system_frames) and len(frames) != system_frames
def _normalize_in_app(stacktrace, platform=None, sdk_info=None):
"""
Ensures consistent values of in_app across a stacktrace.
"""
has_system_frames = _has_system_frames(stacktrace)
for frame in stacktrace:
# If all frames are in_app, flip all of them. This is expected by the UI
if not has_system_frames:
set_in_app(frame, False)
# Default to false in all cases where processors or grouping enhancers
# have not yet set in_app.
elif frame.get("in_app") is None:
set_in_app(frame, False)
def normalize_stacktraces_for_grouping(data, grouping_config=None):
"""
Applies grouping enhancement rules and ensure in_app is set on all frames.
This also trims functions if necessary.
"""
stacktraces = []
for stacktrace_info in find_stacktraces_in_data(data, include_raw=True):
frames = get_path(stacktrace_info.stacktrace, "frames", filter=True, default=())
if frames:
stacktraces.append(frames)
if not stacktraces:
return
platform = data.get("platform")
# Put the trimmed function names into the frames. We only do this if
# the trimming produces a different function than the function we have
# otherwise stored in `function` to not make the payload larger
# unnecessarily.
for frames in stacktraces:
for frame in frames:
# Restore the original in_app value before the first grouping
# enhancers have been run. This allows to re-apply grouping
# enhancers on the original frame data.
orig_in_app = get_path(frame, "data", "orig_in_app")
if orig_in_app is not None:
frame["in_app"] = None if orig_in_app == -1 else bool(orig_in_app)
if frame.get("raw_function") is not None:
continue
raw_func = frame.get("function")
if not raw_func:
continue
function_name = trim_function_name(raw_func, frame.get("platform") or platform)
if function_name != raw_func:
frame["raw_function"] = raw_func
frame["function"] = function_name
# If a grouping config is available, run grouping enhancers
if grouping_config is not None:
for frames in stacktraces:
grouping_config.enhancements.apply_modifications_to_frame(frames, platform)
# normalize in-app
for stacktrace in stacktraces:
_normalize_in_app(stacktrace, platform=platform)
def should_process_for_stacktraces(data):
from sentry.plugins.base import plugins
infos = find_stacktraces_in_data(data, with_exceptions=True)
platforms = set()
for info in infos:
platforms.update(info.platforms or ())
for plugin in plugins.all(version=2):
processors = safe_execute(
plugin.get_stacktrace_processors,
data=data,
stacktrace_infos=infos,
platforms=platforms,
_with_transaction=False,
)
if processors:
return True
return False
def get_processors_for_stacktraces(data, infos):
from sentry.plugins.base import plugins
platforms = set()
for info in infos:
platforms.update(info.platforms or ())
processors = []
for plugin in plugins.all(version=2):
processors.extend(
safe_execute(
plugin.get_stacktrace_processors,
data=data,
stacktrace_infos=infos,
platforms=platforms,
_with_transaction=False,
)
or ()
)
if processors:
project = Project.objects.get_from_cache(id=data["project"])
processors = [x(data, infos, project) for x in processors]
return processors
def get_processable_frames(stacktrace_info, processors):
"""Returns thin wrappers around the frames in a stacktrace associated
with the processor for it.
"""
frames = get_path(stacktrace_info.stacktrace, "frames", filter=True, default=())
frame_count = len(frames)
rv = []
for idx, frame in enumerate(frames):
processor = next((p for p in processors if p.handles_frame(frame, stacktrace_info)), None)
if processor is not None:
rv.append(
ProcessableFrame(frame, frame_count - idx - 1, processor, stacktrace_info, rv)
)
return rv
def process_single_stacktrace(processing_task, stacktrace_info, processable_frames):
# TODO: associate errors with the frames and processing issues
changed_raw = False
changed_processed = False
raw_frames = []
processed_frames = []
all_errors = []
bare_frames = get_path(stacktrace_info.stacktrace, "frames", filter=True, default=())
frame_count = len(bare_frames)
processable_frames = {frame.idx: frame for frame in processable_frames}
for i, bare_frame in enumerate(bare_frames):
idx = frame_count - i - 1
rv = None
if idx in processable_frames:
processable_frame = processable_frames[idx]
assert processable_frame.frame is bare_frame
try:
rv = processable_frame.processor.process_frame(processable_frame, processing_task)
except Exception:
logger.exception("Failed to process frame")
expand_processed, expand_raw, errors = rv or (None, None, None)
if expand_processed is not None:
processed_frames.extend(expand_processed)
changed_processed = True
elif expand_raw: # is not empty
processed_frames.extend(expand_raw)
changed_processed = True
else:
processed_frames.append(bare_frame)
if expand_raw is not None:
raw_frames.extend(expand_raw)
changed_raw = True
else:
raw_frames.append(bare_frame)
all_errors.extend(errors or ())
return (
processed_frames if changed_processed else None,
raw_frames if changed_raw else None,
all_errors,
)
def get_crash_frame_from_event_data(data, frame_filter=None):
"""
Return the highest (closest to the crash) in-app frame in the top stacktrace
which doesn't fail the given filter test.
If no such frame is available, return the highest non-in-app frame which
otherwise meets the same criteria.
Return None if any of the following are true:
- there are no frames
- all frames fail the given filter test
- we're unable to find any frames nested in either event.exception or
event.stacktrace, and there's anything other than exactly one thread
in the data
"""
frames = get_path(data, "exception", "values", -1, "stacktrace", "frames") or get_path(
data, "stacktrace", "frames"
)
if not frames:
threads = get_path(data, "threads", "values")
if threads and len(threads) == 1:
frames = get_path(threads, 0, "stacktrace", "frames")
default = None
for frame in reversed(frames or ()):
if frame is None:
continue
if frame_filter is not None:
if not frame_filter(frame):
continue
if frame.get("in_app"):
return frame
if default is None:
default = frame
if default:
return default
def lookup_frame_cache(keys):
rv = {}
for key in keys:
rv[key] = cache.get(key)
return rv
def get_stacktrace_processing_task(infos, processors):
"""Returns a list of all tasks for the processors. This can skip over
processors that seem to not handle any frames.
"""
by_processor = {}
to_lookup = {}
# by_stacktrace_info requires stable sorting as it is used in
# StacktraceProcessingTask.iter_processable_stacktraces. This is important
# to guarantee reproducible symbolicator requests.
by_stacktrace_info = OrderedDict()
for info in infos:
processable_frames = get_processable_frames(info, processors)
for processable_frame in processable_frames:
processable_frame.processor.preprocess_frame(processable_frame)
by_processor.setdefault(processable_frame.processor, []).append(processable_frame)
by_stacktrace_info.setdefault(processable_frame.stacktrace_info, []).append(
processable_frame
)
if processable_frame.cache_key is not None:
to_lookup[processable_frame.cache_key] = processable_frame
frame_cache = lookup_frame_cache(to_lookup)
for cache_key, processable_frame in six.iteritems(to_lookup):
processable_frame.cache_value = frame_cache.get(cache_key)
return StacktraceProcessingTask(
processable_stacktraces=by_stacktrace_info, processors=by_processor
)
def dedup_errors(errors):
# This operation scales bad but we do not expect that many items to
# end up in rv, so that should be okay enough to do.
rv = []
for error in errors:
if error not in rv:
rv.append(error)
return rv
def process_stacktraces(data, make_processors=None, set_raw_stacktrace=True):
infos = find_stacktraces_in_data(data, with_exceptions=True)
if make_processors is None:
processors = get_processors_for_stacktraces(data, infos)
else:
processors = make_processors(data, infos)
# Early out if we have no processors. We don't want to record a timer
# in that case.
if not processors:
return
changed = False
# Build a new processing task
processing_task = get_stacktrace_processing_task(infos, processors)
try:
# Preprocess step
for processor in processing_task.iter_processors():
with sentry_sdk.start_span(
op="stacktraces.processing.process_stacktraces.preprocess_step"
) as span:
span.set_data("processor", processor.__class__.__name__)
if processor.preprocess_step(processing_task):
changed = True
span.set_data("data_changed", True)
# Process all stacktraces
for stacktrace_info, processable_frames in processing_task.iter_processable_stacktraces():
# Let the stacktrace processors touch the exception
if stacktrace_info.is_exception and stacktrace_info.container:
for processor in processing_task.iter_processors():
with sentry_sdk.start_span(
op="stacktraces.processing.process_stacktraces.process_exception"
) as span:
span.set_data("processor", processor.__class__.__name__)
if processor.process_exception(stacktrace_info.container):
changed = True
span.set_data("data_changed", True)
# If the stacktrace is empty we skip it for processing
if not stacktrace_info.stacktrace:
continue
with sentry_sdk.start_span(
op="stacktraces.processing.process_stacktraces.process_single_stacktrace"
) as span:
new_frames, new_raw_frames, errors = process_single_stacktrace(
processing_task, stacktrace_info, processable_frames
)
if new_frames is not None:
stacktrace_info.stacktrace["frames"] = new_frames
changed = True
span.set_data("data_changed", True)
if (
set_raw_stacktrace
and new_raw_frames is not None
and stacktrace_info.container is not None
):
stacktrace_info.container["raw_stacktrace"] = dict(
stacktrace_info.stacktrace, frames=new_raw_frames
)
changed = True
if errors:
data.setdefault("errors", []).extend(dedup_errors(errors))
data.setdefault("_metrics", {})["flag.processing.error"] = True
changed = True
except Exception:
logger.exception("stacktraces.processing.crash")
data.setdefault("_metrics", {})["flag.processing.fatal"] = True
data.setdefault("_metrics", {})["flag.processing.error"] = True
changed = True
finally:
for processor in processors:
processor.close()
processing_task.close()
if changed:
return data
| bsd-3-clause | 501,741,680,478,238,800 | 34.076389 | 100 | 0.620719 | false |
matthiaskrgr/cppcheck | tools/extracttests.py | 1 | 10280 | #!/usr/bin/env python
#
# Cppcheck - A tool for static C/C++ code analysis
# Copyright (C) 2007-2017 Cppcheck team.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Extract test cases information from Cppcheck test
file
"""
import os
import sys
import re
class Extract:
"""
Read Cppcheck test file and create data
representation
"""
# array that stores all the test cases
nodes = []
def parseFile(self, filename):
"""
parse test file and add info to the nodes
variable
"""
name = '[0-9a-zA-Z_]+'
string = '\\"(.+)\\"'
testclass = None
functionName = None
code = None
fin = open(filename, 'r')
for line in fin:
# testclass starts
res = re.match('class (' + name + ')', line)
if res is not None:
testclass = res.group(1)
# end of testclass
if re.match('};', line) is not None:
testclass = None
# function start
res = re.match('\\s+void (' + name + ')\\(\\)', line)
if res is not None:
functionName = res.group(1)
elif re.match('\\s+}', line) is not None:
functionName = None
if functionName is None:
continue
# check
res = re.match('\s+check.*\(' + string, line)
if res is not None:
code = res.group(1)
# code..
if code is not None:
res = re.match('\\s+' + string, line)
if res is not None:
code = code + res.group(1)
# assert
res = re.match('\\s+ASSERT_EQUALS\\(\\"([^"]*)\\",', line)
if res is not None and code is not None:
node = {'testclass': testclass,
'functionName': functionName,
'code': code,
'expected': res.group(1)}
self.nodes.append(node)
code = None
# close test file
fin.close()
def strtoxml(s):
"""Convert string to xml/html format"""
return s.replace('&', '&').replace('"', '"').replace('<', '<').replace('>', '>')
def trimname(name):
"""Trim test name. Trailing underscore and digits are removed"""
while name[-1].isdigit():
name = name[:-1]
if name[-1] == '_':
name = name[:-1]
return name
def writeHtmlFile(nodes, functionName, filename, errorsOnly):
"""Write html file for a function name"""
fout = open(filename, 'w')
fout.write('<html>\n')
fout.write('<head>\n')
fout.write(' <style type="text/css">\n')
fout.write(' body { font-size: 0.8em }\n')
fout.write(
' th { background-color: #A3C159; text-transform: uppercase }\n')
fout.write(' td { background-color: white; vertical-align: text-top }\n')
fout.write(' pre { background-color: #EEEEEE }\n')
fout.write(' </style>\n')
fout.write('</head>\n')
fout.write('<body>\n')
fout.write('<a href="index.htm">Home</a> -- ')
if errorsOnly:
fout.write('<a href="all-' + functionName + '.htm">All test cases</a>')
else:
fout.write(
'<a href="errors-' + functionName + '.htm">Error test cases</a>')
fout.write('<br><br>')
testclass = None
num = 0
for node in nodes:
if errorsOnly and node['expected'] == '':
continue
if trimname(node['functionName']) == functionName:
num = num + 1
if not testclass:
testclass = node['testclass']
fout.write(
'<h1>' + node['testclass'] + '::' + functionName + '</h1>')
fout.write('<table border="0" cellspacing="0">\n')
fout.write(
' <tr><th>Nr</th><th>Code</th><th>Expected</th></tr>\n')
fout.write(' <tr><td>' + str(num) + '</td>')
fout.write('<td><pre>' + strtoxml(
node['code']).replace('\\n', '\n') + '</pre></td>')
fout.write(
'<td>' + strtoxml(node['expected']).replace('\\n', '<br>') + '</td>')
fout.write('</tr>\n')
if testclass is not None:
fout.write('</table>\n')
fout.write('</body></html>\n')
fout.close()
if len(sys.argv) <= 1 or '--help' in sys.argv:
print('Extract test cases from test file')
print(
'Syntax: extracttests.py [--html=folder] [--xml] [--code=folder] [--onlyTP] path/testfile.cpp')
sys.exit(0)
# parse command line
xml = False
filename = None
htmldir = None
codedir = None
onlyTP = None
for arg in sys.argv[1:]:
if arg == '--xml':
xml = True
elif arg == '--onlyTP':
onlyTP = True
elif arg.startswith('--html='):
htmldir = arg[7:]
elif arg.startswith('--code='):
codedir = arg[7:]
elif arg.endswith('.cpp'):
filename = arg
else:
print('Invalid option: ' + arg)
sys.exit(1)
# extract test cases
if filename is not None:
# parse test file
e = Extract()
e.parseFile(filename)
# generate output
if xml:
print('<?xml version="1.0"?>')
print('<tree>')
count = 0
for node in e.nodes:
s = ' <node'
s += ' function="' + node['functionName'] + '"'
s += ' code="' + strtoxml(node['code']) + '"'
s += ' expected="' + strtoxml(node['expected']) + '"'
s += '/>'
print(s)
print('</tree>')
elif htmldir is not None:
if not htmldir.endswith('/'):
htmldir += '/'
if not os.path.exists(htmldir):
os.mkdir(htmldir)
findex = open(htmldir + 'index.htm', 'w')
findex.write('<html>\n')
findex.write('<head>\n')
findex.write(' <style type="text/css">\n')
findex.write(' table { font-size: 0.8em }\n')
findex.write(
' th { background-color: #A3C159; text-transform: uppercase }\n')
findex.write(
' td { background-color: #F0FFE0; vertical-align: text-top }\n')
findex.write(' A:link { text-decoration: none }\n')
findex.write(' A:visited { text-decoration: none }\n')
findex.write(' A:active { text-decoration: none }\n')
findex.write(' A:hover { text-decoration: underline; color: blue }\n')
findex.write(' </style>\n')
findex.write('</head>\n')
findex.write('<body>\n')
findex.write('<h1>' + filename + '</h1>\n')
functionNames = []
for node in e.nodes:
functionname = trimname(node['functionName'])
if functionname not in functionNames:
functionNames.append(functionname)
functionNames.sort()
findex.write('<table border="0" cellspacing="0">\n')
findex.write(' <tr><th>Name</th><th>Errors</th><th>All</th></tr>\n')
for functionname in functionNames:
findex.write(' <tr><td>' + functionname + '</td>')
numall = 0
numerr = 0
for node in e.nodes:
if trimname(node['functionName']) == functionname:
numall = numall + 1
if node['expected'] != '':
numerr = numerr + 1
if numerr == 0:
findex.write('<td><div align="right">0</div></td>')
else:
findex.write('<td><a href="errors-' + functionname +
'.htm"><div align="right">' + str(numerr) + '</div></a></td>')
findex.write('<td><a href="all-' + functionname +
'.htm"><div align="right">' + str(numall) + '</div></a></td>')
findex.write('</tr>\n')
findex.write('</table>\n')
findex.write('</body></html>')
findex.close()
# create files for each functionName
for functionName in functionNames:
writeHtmlFile(e.nodes,
functionName,
htmldir + 'errors-' + functionName + '.htm',
True)
writeHtmlFile(e.nodes,
functionName,
htmldir + 'all-' + functionName + '.htm',
False)
elif codedir:
testnum = 0
if not codedir.endswith('/'):
codedir = codedir + '/'
if not os.path.exists(codedir):
os.mkdir(codedir)
errors = open(codedir + 'errors.txt', 'w')
for node in e.nodes:
if onlyTP and node['expected'] == '':
continue
testnum = testnum + 1
functionName = node['functionName']
code = node['code']
code = code.replace('\\n', '\n')
code = code.replace('\\"', '"')
expected = node['expected']
filename = '0000' + str(testnum) + '-'
filename = filename[-4:]
filename += functionName + '.cpp'
# source code
fout = open(codedir + filename, 'w')
fout.write(code)
fout.close()
# write 'expected' to errors.txt
if expected != '':
expected = expected.replace('\\n', '\n')
expected = expected.replace('\\"', '"')
expected = re.sub(
'\\[test.cp?p?:', '[' + filename + ':', expected)
errors.write(expected)
errors.close()
else:
for node in e.nodes:
print(node['functionName'])
| gpl-3.0 | 4,796,322,087,070,515,000 | 31.125 | 103 | 0.501556 | false |
MTLeeLab/RESA | resa_util.py | 1 | 10051 | ###
# Copyright 2016 Miler T. Lee, University of Pittburgh
# This file is part of the RESA Suite
#
# RESA Suite is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# RESA Suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with RESA Suite. If not, see <http://www.gnu.org/licenses/>.
#
#
# resa_util.py: Utilities for processing RESA data
##
import bz2
import gzip
import json
import re
import sys
import subprocess
def initialize_loci(utr_bed12_file, utr_fasta_file, test = False):
"""
Given the UTRs listed in the bed12 file and
their corresponding sequences in the fasta_file,
creates a dict of loci[key] = (chr, strand, exon_list, seq)
"""
seqs = dict(read_fasta(utr_fasta_file))
loci = {}
f = open(utr_bed12_file)
for line in f:
fields = line.strip().split()
chrom = fields[0]
start = int(fields[1])
strand = fields[5]
feat_id = fields[3]
block_sizes = fields[10].strip(',').split(',')
block_starts = fields[11].strip(',').split(',')
exons = []
for i, (bsize, bstart) in enumerate(zip(block_sizes, block_starts)):
gstart = start + int(bstart)
gend = gstart + int(bsize)
exons.append((gstart, gend))
loci[feat_id] = (chrom, strand, tuple(exons), seqs[fields[3]].upper())
if test:
break
f.close()
return loci
###
# UTILITIES
###
nt_mutations = {'C': 'T', 'G': 'A'}
anti_strand_str = {'-': '+', '+': '-'}
###string.maketrans('acgturyACGTURY', 'tgcaayrTGCAAYR')
DNA_TRANS = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@TBGDEFCHIJKLMNOPQYSAAVWXRZ[\\]^_`tbgdefchijklmnopqysaavwxrz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff'
def rc(sequence, reverse = True):
"""
Reverse complement a DNA sequence, preserving case
"""
result = sequence.translate(DNA_TRANS)
if reverse:
return result[::-1]
else:
return result
def bam_entry_is_reverse(samflag):
"""
Flag is passed in as an integer. Determines
whether the 0x10 bit is set (16 in base 10),
which indicates reverse complemented sequence.
This is done using a binary operator &
"""
return samflag & 16 == 16
def seq_mask(seq, chars = ['A', 'G']):
"""
Replaces specified characters with N
"""
for char in chars:
seq = seq.replace(char, 'N')
return seq
def load_chr_seqs(genome_fa):
"""
Loads all chromosome sequences into a dict
"""
chr_dict = dict(read_fasta(genome_fa))
return chr_dict
def load_chr_seq(chr_id, chr_dict, genome_fa):
"""
Loads the chromosome sequence into memory if it's not
already there
"""
if chr_id not in chr_dict:
fasta_file = genome_fa % chr_id
chr_dict[chr_id] = read_fasta(fasta_file)[0][1]
return chr_dict[chr_id]
def decode_cigar(cigar):
"""
Parses the cigar string into integers and letters
"""
return re.findall('(\d+)([MNDISHPX=])', cigar)
def cigar_span_(cigar):
"""
Interprets the cigar string as the number of genomic
positions consumed
"""
span = 0
cigar_ops = decode_cigar(cigar)
for nts, op in cigar_ops:
nts = int(nts)
if op != 'I':
span += nts
return span
def cigar_span(cigar):
return sum(int(x) for x in re.findall('(\d+)[MNDSHPX=]', cigar)) #no I
def tx_indexing(exons, minus = False, inverse = False):
"""
Returns a dict of genomic coordinates -> tx coordinates
(or the inverse if inverse = True)
Exons are zero indexed.
"""
positions = []
for s, e in exons:
positions += [i for i in range(s, e)]
if minus:
positions.reverse()
if inverse:
return {i:x for i, x in enumerate(positions)}
else:
return {x:i for i, x in enumerate(positions)}
def pretty_str(x, fields = False):
"""
Handles tuples or lists
"""
def joined_string(x, sep=','):
return sep.join(list([str(y) for y in x]))
if isinstance(x, str):
return x
elif isinstance(x, float):
if abs(x) < 0.001:
return '%.1E' % x
else:
return '%.3f' % x
elif isinstance(x, tuple) or isinstance(x, list):
if fields:
return joined_string(x, '\t')
elif not x:
return '.'
elif isinstance(x[0], tuple) or isinstance(x[0], list):
return ';'.join([joined_string(y) for y in x])
else:
return joined_string(x)
else:
return str(x)
#######################
# FASTA file processing
#######################
def read_fasta(filename):
"""
Returns the contents of a fasta file in a list of (id, sequence)
tuples. Empty list returned if there are no fasta sequences in the file
"""
a = fasta_reader(filename)
seqs = []
while a.has_next():
seqs.append(next(a))
return seqs
class fasta_reader:
"""
Lightweight class for incrementally reading fasta files.
Supports reading directly from properly named
gzipped (.gz or .z) or bzip2ed (.bz2) files.
"""
file = None
nextheader=''
def __init__(self, filename):
try:
if filename.endswith('.gz') or filename.endswith('.z'):
self.file = gzip.open(filename, 'rb')
elif filename.endswith('.bz2'):
self.file = bz2.BZ2File(filename, 'rb')
else:
self.file = open(filename, 'r')
# fast forward to the first entry
while 1:
line = self.file.readline()
if line == '':
self.close()
return
elif line[0] == '>':
self.nextheader = line[1:].rstrip()
return
except IOError:
#print('No such file', filename)
raise
def has_next(self):
"""
Returns true if there are still fasta entries
"""
return len(self.nextheader) > 0
def __next__(self):
"""
Returns an (id, sequence) tuple, or () if file is finished
"""
#if global nextheader is empty, return empty
#otherwise, the header is the nextheader
try:
identifier = self.nextheader
total = []
while 1:
line = self.file.readline()
if line == '' or line[0] == '>': #EOF, end of entry
break
total.append(line.rstrip())
sequence = ''.join(total)
if len(line) > 0:
self.nextheader = line[1:].rstrip()
else:
self.nextheader = ''
self.close()
return (identifier, sequence)
except:
self.nextheader=''
self.close()
return ()
def close(self):
"""
Close the fasta file
"""
self.file.close()
def write_fasta(filename, id_or_list, seq='', width=60, gzip_compress = False):
"""
Writes a fasta file with the sequence(s)
version 1: write_fasta(myfilename, 'seq1_id', 'AAAAA')
version 2: write_fasta(myfilename, [('seq1_id', 'AAAAA'),
('seq2_id', BBBBB)])
"""
a = fasta_writer(filename, width=width, gzip_compress = gzip_compress)
a.write(id_or_list, seq)
a.close()
class fasta_writer:
"""
Rudimentary fasta file writer
Supports writing out to a gzipped file. If the passed in filename
does not end with .gz or .z, .gz is appended.
"""
file = None
width = 0
def __init__(self, filename, width=60, gzip_compress = False):
self.width = width
try:
if gzip_compress:
if not filename.endswith('.gz') and not filename.endswith('.z'):
filename += '.gz'
self.file = gzip.open(filename, 'wb')
else:
self.file = open(filename, 'w')
except IOError:
print('Can\'t open file.')
def write(self, id, seq=''):
"""
Supports an id and a sequence, an (id, seq) tuple, or
a list of sequence tuples
"""
if type(id) == type([]):
list(map(self.writeone, id))
else:
self.writeone(id, seq)
def writeone(self, id, seq=''):
"""
Internal method.
"""
if type(id) == type((0,0)):
seq = id[1]
id = id[0]
line_width = self.width
if self.width == 0:
line_width = len(seq)
self.file.write(">" + id + "\n")
i = 0
while i < len(seq):
self.file.write(seq[i:i+line_width] + "\n")
i+=line_width
def close(self):
"""
Closes the fasta file.
"""
self.file.close()
| gpl-3.0 | 4,324,001,643,226,316,000 | 26.3125 | 749 | 0.553477 | false |
openstack/sahara-dashboard | sahara_dashboard/content/data_processing/clusters/cluster_templates/views.py | 1 | 7433 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from sahara_dashboard.api import sahara as saharaclient
import sahara_dashboard.content.data_processing.clusters. \
cluster_templates.tables as ct_tables
import sahara_dashboard.content.data_processing.clusters. \
cluster_templates.tabs as _tabs
import sahara_dashboard.content.data_processing.clusters. \
cluster_templates.workflows.copy as copy_flow
import sahara_dashboard.content.data_processing.clusters. \
cluster_templates.workflows.create as create_flow
import sahara_dashboard.content.data_processing.clusters. \
cluster_templates.workflows.edit as edit_flow
import sahara_dashboard.content.data_processing.clusters. \
cluster_templates.forms.import_forms as import_forms
class ClusterTemplateDetailsView(tabs.TabView):
tab_group_class = _tabs.ClusterTemplateDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ template.name|default:template.id }}"
@memoized.memoized_method
def get_object(self):
ct_id = self.kwargs["template_id"]
try:
return saharaclient.cluster_template_get(self.request, ct_id)
except Exception:
msg = _('Unable to retrieve details for '
'cluster template "%s".') % ct_id
redirect = self.get_redirect_url()
exceptions.handle(self.request, msg, redirect=redirect)
def get_context_data(self, **kwargs):
context = super(ClusterTemplateDetailsView, self)\
.get_context_data(**kwargs)
cluster_template = self.get_object()
context['template'] = cluster_template
context['url'] = self.get_redirect_url()
context['actions'] = self._get_actions(cluster_template)
return context
def _get_actions(self, cluster_template):
table = ct_tables.ClusterTemplatesTable(self.request)
return table.render_row_actions(cluster_template)
@staticmethod
def get_redirect_url():
return reverse("horizon:project:data_processing."
"clusters:index")
class CreateClusterTemplateView(workflows.WorkflowView):
workflow_class = create_flow.CreateClusterTemplate
success_url = ("horizon:project:data_processing.clusters"
":create-cluster-template")
classes = ("ajax-modal",)
template_name = "cluster_templates/create.html"
page_title = _("Create Cluster Template")
class ConfigureClusterTemplateView(workflows.WorkflowView):
workflow_class = create_flow.ConfigureClusterTemplate
success_url = ("horizon:project:data_processing.clusters"
":index")
template_name = "cluster_templates/configure.html"
page_title = _("Configure Cluster Template")
class CopyClusterTemplateView(workflows.WorkflowView):
workflow_class = copy_flow.CopyClusterTemplate
success_url = ("horizon:project:data_processing.clusters"
":index")
template_name = "cluster_templates/configure.html"
page_title = _("Copy Cluster Template")
def get_context_data(self, **kwargs):
context = super(CopyClusterTemplateView, self)\
.get_context_data(**kwargs)
context["template_id"] = kwargs["template_id"]
return context
def get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
template_id = self.kwargs['template_id']
try:
template = saharaclient.cluster_template_get(self.request,
template_id)
except Exception:
template = {}
exceptions.handle(self.request,
_("Unable to fetch cluster template."))
self._object = template
return self._object
def get_initial(self):
initial = super(CopyClusterTemplateView, self).get_initial()
initial['template_id'] = self.kwargs['template_id']
return initial
class EditClusterTemplateView(CopyClusterTemplateView):
workflow_class = edit_flow.EditClusterTemplate
success_url = ("horizon:project:data_processing.clusters"
":index")
template_name = "cluster_templates/configure.html"
class ImportClusterTemplateFileView(forms.ModalFormView):
template_name = "cluster_templates/import.html"
form_class = import_forms.ImportClusterTemplateFileForm
submit_label = _("Next")
submit_url = reverse_lazy("horizon:project:data_processing."
"clusters:import-cluster-template-file")
success_url = reverse_lazy("horizon:project:data_processing."
"clusters:import-cluster-template-name")
page_title = _("Import Cluster Template")
def get_form_kwargs(self):
kwargs = super(ImportClusterTemplateFileView, self).get_form_kwargs()
kwargs['next_view'] = ImportClusterTemplateNameView
return kwargs
class ImportClusterTemplateNameView(forms.ModalFormView):
template_name = "cluster_templates/import.html"
form_class = import_forms.ImportClusterTemplateNameForm
submit_label = _("Next")
submit_url = reverse_lazy("horizon:project:data_processing."
"clusters:import-cluster-template-name")
success_url = reverse_lazy("horizon:project:data_processing."
"clusters:import-cluster-template-nodegroups")
page_title = _("Import Cluster Template")
def get_form_kwargs(self):
kwargs = super(ImportClusterTemplateNameView, self).get_form_kwargs()
kwargs['next_view'] = ImportClusterTemplateNodegroupsView
if 'template_upload' in self.kwargs:
kwargs['template_upload'] = self.kwargs['template_upload']
return kwargs
class ImportClusterTemplateNodegroupsView(forms.ModalFormView):
template_name = "cluster_templates/import_nodegroups.html"
# template_name = "some_random_stuff.html"
form_class = import_forms.ImportClusterTemplateNodegroupsForm
submit_label = _("Import")
submit_url = reverse_lazy("horizon:project:data_processing."
"clusters:import-cluster-template-nodegroups")
success_url = reverse_lazy("horizon:project:data_processing."
"clusters:index")
page_title = _("Import Cluster Template")
def get_form_kwargs(self):
kwargs = super(ImportClusterTemplateNodegroupsView,
self).get_form_kwargs()
if 'template_upload' in self.kwargs:
kwargs['template_upload'] = self.kwargs['template_upload']
return kwargs
| apache-2.0 | 4,539,590,981,925,018,600 | 40.066298 | 77 | 0.675636 | false |
caterinaurban/Lyra | src/lyra/tests/code_jam/pancake_flipper/pancakes_fyodr.py | 1 | 1842 | def pow(a: int, b: int) -> int:
power: int = 1
for i in range(b):
power = power * a
return power
def check(memos: Dict[(Tuple[(int, int, int, int)], int)], i: int, s: int, c: int, k: int) -> int:
if (s == 0):
return 0
elif ((i, s, c, k) not in memos):
memos[(i, s, c, k)]: int = (- 1)
flip: int = 0
for j in range(k):
flip += pow(2, j)
flip: int = flip * pow(2, i)
new_s: int = (s ^ flip)
best: int = (- 1)
for j in range((c - (k - 1))):
maybe: int = check(j, new_s, c, k)
if (maybe == 0):
best: int = maybe
break
elif (maybe == (- 1)):
pass
elif ((best == (- 1)) or (maybe < best)):
best: int = maybe
if (best == (- 1)):
memos[(i, s, c, k)]: int = best
else:
memos[(i, s, c, k)]: int = (best + 1)
return memos[(i, s, c, k)]
T: int = int(input().strip())
lines: List[str] = []
memos: Dict[(Tuple[(int, int, int, int)], int)] = {
}
for t in range(1, (T + 1)):
line: List[str] = input().strip().split()
cakes: str = line[0]
k: int = int(line[1])
s: int = 0
for i in range(len(cakes)):
c: str = cakes[i]
if (c == '-'):
s += pow(2, i)
best: int = (- 1)
for i in range((len(cakes) - (k - 1))):
maybe: int = check(memos, i, s, len(cakes), k)
if (maybe == 0):
best: int = maybe
break
if (maybe == (- 1)):
pass
elif ((maybe < best) or (best == (- 1))):
best: int = maybe
if (best == (- 1)):
lines.append('Case #' + str(t) + ': ' + 'IMPOSSIBLE')
else:
lines.append('Case #' + str(t) + ': ' + str(best))
print(lines[(- 1)])
| mpl-2.0 | -7,948,510,953,474,577,000 | 28.709677 | 98 | 0.410966 | false |
RedHatInsights/insights-core | insights/parsers/corosync_cmapctl.py | 1 | 2063 | """
CorosyncCmapctl - Command ``corosync-cmapctl [params]``
=======================================================
This module parses the output of the ``corosync-cmapctl [params]`` command.
"""
from insights import parser, CommandParser
from insights.parsers import SkipException, ParseException
from insights.specs import Specs
@parser(Specs.corosync_cmapctl)
class CorosyncCmapctl(CommandParser, dict):
"""
Class for parsing the `/usr/sbin/corosync-cmapctl [params]` command.
All lines are stored in the dictionary with the left part of the equal
sign witout parenthese info as the key and the right part of equal sign
as the value.
Typical output of the command is::
config.totemconfig_reload_in_progress (u8) = 0
internal_configuration.service.0.name (str) = corosync_cmap
internal_configuration.service.0.ver (u32) = 0
internal_configuration.service.1.name (str) = corosync_cfg
internal_configuration.service.1.ver (u32) = 0
internal_configuration.service.2.name (str) = corosync_cpg
internal_configuration.service.2.ver (u32) = 0
Examples:
>>> type(corosync)
<class 'insights.parsers.corosync_cmapctl.CorosyncCmapctl'>
>>> 'internal_configuration.service.0.name' in corosync
True
>>> corosync['internal_configuration.service.0.name']
'corosync_cmap'
Raises:
SkipException: When there is no content
ParseException: When there is no "=" in the content
"""
def __init__(self, context):
super(CorosyncCmapctl, self).__init__(context, extra_bad_lines=['corosync-cmapctl: invalid option'])
def parse_content(self, content):
if not content:
raise SkipException
for line in content:
if '=' not in line:
raise ParseException("Can not parse line %s" % line)
key, value = [item.strip() for item in line.split('=')]
key_without_parenthese = key.split()[0]
self[key_without_parenthese] = value
| apache-2.0 | 1,929,703,601,921,233,200 | 36.509091 | 108 | 0.644207 | false |
ksteinfe/decodes | src/decodes/core/dc_interval.py | 1 | 10356 | from decodes.core import *
import math, random
class Interval(object):
"""
an interval class
"""
def __init__(self, a=0,b=1):
""" Interval Constructor.
:param a: First number of the interval.
:type a: float
:param b: Second number of the interval.
:type a: float
:result: Interval Object.
:rtype: Interval
"""
self.a = float(a)
self.b = float(b)
def __truediv__(self,divs): return self.__div__(divs)
def __div__(self, divs):
""" Overloads the division **(/)** operator. Calls Interval.divide(divs).
:param divs: Number of divisions.
:type divs: int
:result: List of numbers in which a list is divided.
:rtype: list
"""
return self.divide(divs)
def __floordiv__(self, other):
""" Overloads the integer division **(//)** operator. Calls Interval.subinterval(other).
:param other: Number to subintervals.
:type other: int
:result: list of subintervals
:rtype: list
"""
return self.subinterval(other)
def __add__(self, val):
""" Overloads the addition **(+)** operator.
:param val: Value to add to the interval.
:type val: float
:result: New interval.
:rtype: Interval
"""
return Interval(self.a + val, self.b + val)
def __sub__(self, val):
""" Overloads the subtraction **(-)** operator.
:param val: Value to subtract from the interval.
:type val: float
:result: New interval.
:rtype: Interval
"""
return Interval(self.a - val, self.b - val)
def __contains__(self, number):
""" Overloads the containment **(in)** operator
:param number: Number whose containment must be determined.
:type number: float
:result: Boolean result of containment.
:rtype: bool
"""
ival = self.order()
return (ival.a <= number) and (ival.b >= number)
def __eq__(self, other):
""" Overloads the equal **(==)** operator.
:param other: Interval to be compared.
:type other: Interval
:result: Boolean result of comparison
:rtype: bool
"""
return all([self.a==other.a,self.b==other.b])
def __hash__(self):
return hash( (self.a, self.b) )
@property
def list(self):
""" Returns a list of the interval's start and end values.
:result: List of interval's components
:rtype: list
"""
return [self.a, self.b]
@property
def is_ordered(self):
""" Returns True if the start value of the interval is smaller than the end value.
:result: Boolean value
:rtype: bool
"""
return True if self.a < self.b else False
@property
def length(self):
"""| Returns the absolute value of length of the interval.
| For a signed representation, use delta.
:result: Absolute value of length of an interval.
:rtype: int
"""
length = self.b - self.a
if length > 0: return length
else: return length *-1
@property
def delta(self):
"""| Returns the signed delta of the interval, calculated as b-a
| For an unsigned representation, use length.
:result: Delta of an interval.
:rtype: float
"""
return float(self.b - self.a)
@property
def mid(self):
"""Returns the midpoint value of the interval.
"""
return self.eval(0.5)
def overlaps(self,other):
"""
"""
return other.a in self or other.b in self or self.a in other or self.b in other
def order(self):
""" Returns a copy of this interval with ordered values, such that a < b
:result: Ordered copy of Interval object.
:rtype: Interval
"""
if self.is_ordered: return Interval(self.a, self.b)
else: return Interval(self.b, self.a)
def invert(self):
"""| Returns a copy of this interval with swapped values.
| Such that this.a = new.b and this.b = new.a
:result: Interval object with swapped values.
:rtype: Interval
"""
return Interval(self.b, self.a)
def divide(self, divs=10, include_last=False):
"""| Divides this interval into a list of values equally spaced between a and b.
| Unless include_last is set to True, returned list will not include Interval.b: the first value returned is Interval.a and the last is Interval.b-(Interval.delta/divs)
:param divs: Number of interval divisions.
:type divs: int
:param include_last: Boolean value.
:type include_last: bool
:result: List of numbers in which a list is divided.
:rtype: list
"""
step = self.delta/float(divs)
if include_last : divs += 1
return [self.a+step*n for n in range(divs)]
def subinterval(self, divs):
""" Divides an interval into a list of equal size subintervals(interval objects).
:param divs: Number of subintervals.
:type divs: int
:result: List of subintervals (interval objects).
:rtype: list
"""
return [Interval(n,n+self.delta/float(divs)) for n in self.divide(divs)]
def rand_interval(self, divs):
""" Divides an interval into a list of randomly sized subintervals(interval objects).
:param divs: Number of subintervals.
:type divs: int
:result: List of subintervals (interval objects).
:rtype: list
"""
if divs < 1 : return ival
result = []
r_list = [self.a,self.b]
r_list.extend(self.eval(random.random()) for k in range(divs-1))
r_list.sort()
return [Interval(r_list[n],r_list[n+1]) for n in range(divs)]
def deval(self, number):
"""| Returns a parameter corresponding to the position of the given number within this interval.
| Effectively, the opposite of eval().
:param number: Number to find the parameter of.
:type number: float
:result: Parameter.
:rtype: float
::
print Interval(10,20).deval(12)
>>0.2
print Interval(10,20).deval(25)
>>1.5
"""
if self.delta == 0 : raise ZeroDivisionError("This interval cannot be devaluated because the delta is zero")
return (number-self.a) / self.delta
def eval(self, t,limited=False):
"""| Evaluates a given parameter within this interval.
| For example, given an Interval(0->2*math.pi): eval(0.5) == math.pi
| Optionally, you may limit the resulting output to this interval
:param t: Number to evaluate.
:type t: float
:result: Evaluated number.
:rtype: float
::
print Interval(10,20).eval(0.2)
>>12.0
print Interval(10,20).deval(1.5)
>>25.0
"""
ret = self.delta * t + self.a
if not limited : return ret
return self.limit_val(ret)
def limit_val(self, n):
""" Limits a given value to the min and max of this Interval.
:param n: the number to be limited by the Interval.
:type n: float
:result: a number between the min and max of this Interval (inclusive).
:rtype: float
"""
if n < self.a : return self.a
if n > self.b : return self.b
return n
def __repr__(self): return "ival[{0},{1}]".format(self.a,self.b)
def remap_to(self,val,target_interval=None,limited=False):
return Interval.remap(val,self,target_interval,limited)
@staticmethod
def remap(val, source_interval, target_interval=None, limited=False):
""" Translates a number from its position within the source interval to its relative position in the target interval. Optionally, you may limit the resulting output to the target interval.
:param val: Number to remap.
:type val: float
:param source_interval: Source interval.
:type source_interval: interval
:param target_interval: Target interval
:type target_interval: interval
:param limited: flag that limits result to target interval
:type limited: bool
:result: The given number remapped to the target interval.
:rtype: float
"""
if target_interval is None: target_interval = Interval(0,1)
t = source_interval.deval(val)
return target_interval.eval(t,limited)
@staticmethod
def encompass(values = [0],nudge=False):
""" Returns an interval defined by the minimum and maximum of a list of values.
:param values: A list of numbers.
:type values: list
:result: An Interval from the min and max of a list of values.
:rtype: Interval
"""
from .dc_base import EPSILON
if nudge: return Interval(min(values)-EPSILON, max(values)+EPSILON)
a, b = min(values), max(values)
if a == b : return False
return Interval(a,b)
@staticmethod
def twopi():
""" Creates an interval from 0->2PI
:result: Interval from 0 to 2PI.
:rtype: Interval
"""
return Interval(0,math.pi*2)
@staticmethod
def pi():
""" Creates an interval from 0->PI
:result: Interval from 0 to 2PI.
:rtype: Interval
"""
return Interval(0,math.pi)
| gpl-3.0 | 6,334,377,875,742,004,000 | 31.772152 | 197 | 0.539784 | false |
bnbowman/BifoAlgo | src/Chapter2/Sec24_LeaderCycleSeq.py | 1 | 3081 | #! /usr/bin/env python3
from collections import Counter
from operator import itemgetter
def cyclo_seq( spectrum_file, spectrum_table_file ):
N, spectrum = parse_spectrum_file( spectrum_file )
spectrum_table = parse_spectrum_table( spectrum_table_file )
aa_weights = set(spectrum_table.values())
peptides = list(find_possible_peptides( spectrum, aa_weights, N ))
max_peptides = find_max_peptides( peptides, spectrum )
return set(['-'.join([str(w) for w in p]) for p in max_peptides])
def find_possible_peptides( spectrum, weights, N ):
peptides = [ [0] ]
true_weight = max(spectrum)
while peptides:
peptides = expand_peptides( peptides, weights )
peptides = [p for p in peptides if sum(p) <= max(spectrum)]
for p in peptides:
if sum( p ) != true_weight:
continue
yield p
del p
peptides = cut_peptides( peptides, spectrum, N )
def expand_peptides( peptides, weights ):
new_peptides = []
for peptide in peptides:
for weight in weights:
if peptide == [0]:
copy = []
else:
copy = peptide[:]
copy.append( weight )
new_peptides.append( copy )
return new_peptides
def cut_peptides( peptides, spectrum, N ):
if len(peptides) <= N:
return peptides
scores = {}
for peptide in peptides:
sub_peptides = find_subpeptides( peptide )
weights = [sum(p) for p in sub_peptides]
peptide_str = '-'.join( [str(p) for p in peptide] )
scores[peptide_str] = sum([1 for w in weights if w in spectrum])
sorted_scores = sorted(scores.items(), key=itemgetter(1), reverse=True)
min_score = sorted_scores[N][1]
peptides = [p for p, s in scores.items() if s >= min_score]
peptides = [[int(n) for n in p.split('-')] for p in peptides]
return peptides
def find_max_peptides( peptides, spectrum ):
scores = {}
for peptide in peptides:
sub_peptides = find_subpeptides( peptide )
weights = [sum(p) for p in sub_peptides]
peptide_str = '-'.join( [str(p) for p in peptide] )
scores[peptide_str] = sum([1 for w in weights if w in spectrum])
sorted_scores = sorted(scores.items(), key=itemgetter(1), reverse=True)
max_score = sorted_scores[0][1]
peptides = [p for p, s in scores.items() if s == max_score]
peptides = [[int(n) for n in p.split('-')] for p in peptides]
return peptides
def find_subpeptides( peptide ):
subpeptides = [ peptide ]
for j in range(1, len(peptide)):
for i in range(len(peptide)-j+1):
subpeptides.append( peptide[i:i+j] )
return subpeptides
def parse_spectrum_file( spectrum_file ):
inputs = []
with open(spectrum_file) as handle:
for line in handle:
inputs += [int(w) for w in line.strip().split()]
return inputs[0], inputs[1:]
def parse_spectrum_table( spectrum_table_file ):
table = {}
with open( spectrum_table_file ) as handle:
for line in handle:
aa, size = line.strip().split()
try:
size = int(size)
table[aa] = size
except:
raise ValueError
return table
if __name__ == '__main__':
import sys
spectrum_file = sys.argv[1]
spectrum_table_file = sys.argv[2]
results = cyclo_seq( spectrum_file, spectrum_table_file )
print(' '.join(results))
| gpl-2.0 | -5,203,654,426,318,608,000 | 29.81 | 72 | 0.674132 | false |
taigaio/taiga-back | taiga/projects/attachments/permissions.py | 1 | 5169 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from taiga.base.api.permissions import (TaigaResourcePermission, HasProjectPerm,
AllowAny, PermissionComponent)
class IsAttachmentOwnerPerm(PermissionComponent):
def check_permissions(self, request, view, obj=None):
if obj and obj.owner and request.user.is_authenticated:
return request.user == obj.owner
return False
class CommentAttachmentPerm(PermissionComponent):
def check_permissions(self, request, view, obj=None):
if obj.from_comment:
return True
return False
class EpicAttachmentPermission(TaigaResourcePermission):
retrieve_perms = HasProjectPerm('view_epics') | IsAttachmentOwnerPerm()
create_perms = HasProjectPerm('modify_epic') | (CommentAttachmentPerm() & HasProjectPerm('comment_epic'))
update_perms = HasProjectPerm('modify_epic') | IsAttachmentOwnerPerm()
partial_update_perms = HasProjectPerm('modify_epic') | IsAttachmentOwnerPerm()
destroy_perms = HasProjectPerm('modify_epic') | IsAttachmentOwnerPerm()
list_perms = AllowAny()
class UserStoryAttachmentPermission(TaigaResourcePermission):
retrieve_perms = HasProjectPerm('view_us') | IsAttachmentOwnerPerm()
create_perms = HasProjectPerm('modify_us') | (CommentAttachmentPerm() & HasProjectPerm('comment_us'))
update_perms = HasProjectPerm('modify_us') | IsAttachmentOwnerPerm()
partial_update_perms = HasProjectPerm('modify_us') | IsAttachmentOwnerPerm()
destroy_perms = HasProjectPerm('modify_us') | IsAttachmentOwnerPerm()
list_perms = AllowAny()
class TaskAttachmentPermission(TaigaResourcePermission):
retrieve_perms = HasProjectPerm('view_tasks') | IsAttachmentOwnerPerm()
create_perms = HasProjectPerm('modify_task') | (CommentAttachmentPerm() & HasProjectPerm('comment_task'))
update_perms = HasProjectPerm('modify_task') | IsAttachmentOwnerPerm()
partial_update_perms = HasProjectPerm('modify_task') | IsAttachmentOwnerPerm()
destroy_perms = HasProjectPerm('modify_task') | IsAttachmentOwnerPerm()
list_perms = AllowAny()
class IssueAttachmentPermission(TaigaResourcePermission):
retrieve_perms = HasProjectPerm('view_issues') | IsAttachmentOwnerPerm()
create_perms = HasProjectPerm('modify_issue') | (CommentAttachmentPerm() & HasProjectPerm('comment_issue'))
update_perms = HasProjectPerm('modify_issue') | IsAttachmentOwnerPerm()
partial_update_perms = HasProjectPerm('modify_issue') | IsAttachmentOwnerPerm()
destroy_perms = HasProjectPerm('modify_issue') | IsAttachmentOwnerPerm()
list_perms = AllowAny()
class WikiAttachmentPermission(TaigaResourcePermission):
retrieve_perms = HasProjectPerm('view_wiki_pages') | IsAttachmentOwnerPerm()
create_perms = HasProjectPerm('modify_wiki_page') | (CommentAttachmentPerm() & HasProjectPerm('comment_wiki_page'))
update_perms = HasProjectPerm('modify_wiki_page') | IsAttachmentOwnerPerm()
partial_update_perms = HasProjectPerm('modify_wiki_page') | IsAttachmentOwnerPerm()
destroy_perms = HasProjectPerm('modify_wiki_page') | IsAttachmentOwnerPerm()
list_perms = AllowAny()
class RawAttachmentPerm(PermissionComponent):
def check_permissions(self, request, view, obj=None):
is_owner = IsAttachmentOwnerPerm().check_permissions(request, view, obj)
if obj.content_type.app_label == "epics" and obj.content_type.model == "epic":
return EpicAttachmentPermission(request, view).check_permissions('retrieve', obj) or is_owner
elif obj.content_type.app_label == "userstories" and obj.content_type.model == "userstory":
return UserStoryAttachmentPermission(request, view).check_permissions('retrieve', obj) or is_owner
elif obj.content_type.app_label == "tasks" and obj.content_type.model == "task":
return TaskAttachmentPermission(request, view).check_permissions('retrieve', obj) or is_owner
elif obj.content_type.app_label == "issues" and obj.content_type.model == "issue":
return IssueAttachmentPermission(request, view).check_permissions('retrieve', obj) or is_owner
elif obj.content_type.app_label == "wiki" and obj.content_type.model == "wikipage":
return WikiAttachmentPermission(request, view).check_permissions('retrieve', obj) or is_owner
return False
class RawAttachmentPermission(TaigaResourcePermission):
retrieve_perms = RawAttachmentPerm()
| agpl-3.0 | -7,189,436,673,565,517,000 | 52.28866 | 119 | 0.734185 | false |
cysuncn/python | spark/crm/PROC_O_LNA_XDXT_CUSTOMER_RELATIVE.py | 1 | 5008 | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_O_LNA_XDXT_CUSTOMER_RELATIVE').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
O_CI_XDXT_CUSTOMER_RELATIVE = sqlContext.read.parquet(hdfs+'/O_CI_XDXT_CUSTOMER_RELATIVE/*')
O_CI_XDXT_CUSTOMER_RELATIVE.registerTempTable("O_CI_XDXT_CUSTOMER_RELATIVE")
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT A.CUSTOMERID AS CUSTOMERID
,A.RELATIVEID AS RELATIVEID
,A.RELATIONSHIP AS RELATIONSHIP
,A.CUSTOMERNAME AS CUSTOMERNAME
,A.CERTTYPE AS CERTTYPE
,A.CERTID AS CERTID
,A.FICTITIOUSPERSON AS FICTITIOUSPERSON
,A.CURRENCYTYPE AS CURRENCYTYPE
,A.INVESTMENTSUM AS INVESTMENTSUM
,A.OUGHTSUM AS OUGHTSUM
,A.INVESTMENTPROP AS INVESTMENTPROP
,A.INVESTDATE AS INVESTDATE
,A.STOCKCERTNO AS STOCKCERTNO
,A.DUTY AS DUTY
,A.TELEPHONE AS TELEPHONE
,A.EFFECT AS EFFECT
,A.WHETHEN1 AS WHETHEN1
,A.WHETHEN2 AS WHETHEN2
,A.WHETHEN3 AS WHETHEN3
,A.WHETHEN4 AS WHETHEN4
,A.WHETHEN5 AS WHETHEN5
,A.DESCRIBE AS DESCRIBE
,A.INPUTORGID AS INPUTORGID
,A.INPUTUSERID AS INPUTUSERID
,A.INPUTDATE AS INPUTDATE
,A.REMARK AS REMARK
,A.SEX AS SEX
,A.BIRTHDAY AS BIRTHDAY
,A.SINO AS SINO
,A.FAMILYADD AS FAMILYADD
,A.FAMILYZIP AS FAMILYZIP
,A.EDUEXPERIENCE AS EDUEXPERIENCE
,A.INVESTYIELD AS INVESTYIELD
,A.HOLDDATE AS HOLDDATE
,A.ENGAGETERM AS ENGAGETERM
,A.HOLDSTOCK AS HOLDSTOCK
,A.LOANCARDNO AS LOANCARDNO
,A.EFFSTATUS AS EFFSTATUS
,A.CUSTOMERTYPE AS CUSTOMERTYPE
,A.INVESINITIALSUM AS INVESINITIALSUM
,A.ACCOUNTSUM AS ACCOUNTSUM
,A.FAIRSUM AS FAIRSUM
,A.DIATHESIS AS DIATHESIS
,A.ABILITY AS ABILITY
,A.INNOVATION AS INNOVATION
,A.CHARACTER AS CHARACTER
,A.COMPETITION AS COMPETITION
,A.STRATEGY AS STRATEGY
,A.RISE AS RISE
,A.POSSESS AS POSSESS
,A.EYESHOT AS EYESHOT
,A.FORESIGHT AS FORESIGHT
,A.STATUS AS STATUS
,A.INDUSTRY AS INDUSTRY
,A.PROSECUTION AS PROSECUTION
,A.FIRSTINVESTSUM AS FIRSTINVESTSUM
,A.FIRSTINVESTDATE AS FIRSTINVESTDATE
,A.LASTINVESTSUM AS LASTINVESTSUM
,A.LASTINVESTDATE AS LASTINVESTDATE
,A.DEADLINE AS DEADLINE
,A.FR_ID AS FR_ID
,V_DT AS ODS_ST_DATE
,'LNA' AS ODS_SYS_ID
FROM O_CI_XDXT_CUSTOMER_RELATIVE A --客户关联信息
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_XDXT_CUSTOMER_RELATIVE = sqlContext.sql(sql)
F_CI_XDXT_CUSTOMER_RELATIVE.registerTempTable("F_CI_XDXT_CUSTOMER_RELATIVE")
dfn="F_CI_XDXT_CUSTOMER_RELATIVE/"+V_DT+".parquet"
F_CI_XDXT_CUSTOMER_RELATIVE.cache()
nrows = F_CI_XDXT_CUSTOMER_RELATIVE.count()
F_CI_XDXT_CUSTOMER_RELATIVE.write.save(path=hdfs + '/' + dfn, mode='overwrite')
F_CI_XDXT_CUSTOMER_RELATIVE.unpersist()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_XDXT_CUSTOMER_RELATIVE/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert F_CI_XDXT_CUSTOMER_RELATIVE lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
| gpl-3.0 | 2,939,206,429,795,156,000 | 41.551724 | 179 | 0.545989 | false |
saffsd/updatedir | src/updatedir/__init__.py | 1 | 4396 | import logging
import os
import urlparse
logger = logging.getLogger(__name__)
def updatetree(source, dest, overwrite=False):
parsed_url = urlparse.urlparse(dest)
logger.debug(parsed_url)
if parsed_url.scheme == '':
import shutil
if overwrite and os.path.exists(parsed_url.path):
logger.debug("Deleting existing '%s'", parsed_url.path)
shutil.rmtree(parsed_url.path)
logger.debug("Local copy '%s' -> '%s'", source, parsed_url.path)
shutil.copytree(source, parsed_url.path)
else:
dest = parsed_url.path
def visit(arg, dirname, names):
logger.debug("Visit '%s'", dirname)
abs_dir = os.path.normpath(os.path.join(dest, os.path.relpath(dirname, source)))
logger.debug("abs_dir '%s'", abs_dir)
for name in names:
src = os.path.join(dirname, name)
dst = os.path.join(abs_dir, name)
logger.debug("Processing '%s'", src)
if os.path.isdir(src):
if not os.path.isdir(dst):
logger.debug("mkdir '%s'", dst)
os.mkdir(dst)
else:
if os.path.exists(dst):
if overwrite:
logger.debug("overwrite '%s' -> '%s'", src, dst)
shutil.copyfile(src,dst)
else:
logger.debug("will not overwrite '%s'", dst)
else:
logger.debug("copy '%s' -> '%s'", src, dst)
shutil.copyfile(src,dst)
# TODO: mkdir -p behaviour
if not os.path.exists(dest):
os.mkdir(dest)
os.path.walk(source, visit, None)
elif parsed_url.scheme == 'ssh':
import paramiko
import getpass
# Work out host details
host = parsed_url.hostname
port = parsed_url.port if parsed_url.port else 22
transport = paramiko.Transport((host,port))
# Connect the transport
username = parsed_url.username if parsed_url.username else getpass.getuser()
logger.debug("Using username '%s'", username)
if parsed_url.password:
logger.debug("Using password")
transport.connect(username = username, password = parsed_url.password)
# TODO allow the keyfile to be configured in .hydratrc
elif os.path.exists(os.path.expanduser('~/.ssh/id_rsa')):
logger.debug("Using private RSA key")
privatekeyfile = os.path.expanduser('~/.ssh/id_rsa')
mykey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
transport.connect(username = username, pkey = mykey)
elif os.path.exists(os.path.expanduser('~/.ssh/id_dsa')):
logger.debug("Using private DSS key")
privatekeyfile = os.path.expanduser('~/.ssh/id_dsa')
mykey = paramiko.DSSKey.from_private_key_file(privatekeyfile)
transport.connect(username = username, pkey = mykey)
else:
raise ValueError, "Cannot connect transport: Unable to authenticate"
logger.debug("Transport Connected")
# Start the sftp client
sftp = paramiko.SFTPClient.from_transport(transport)
def visit(arg, dirname, names):
logger.debug("Visit '%s'", dirname)
abs_dir = sftp.normalize(os.path.relpath(dirname, source))
logger.debug("abs_dir '%s'", abs_dir)
for name in names:
src = os.path.join(dirname, name)
dst = os.path.join(abs_dir, name)
logger.debug("Processing '%s'", src)
if os.path.isdir(src):
try:
sftp.stat(dst)
except IOError:
sftp.mkdir(dst)
else:
try:
sftp.stat(dst)
if overwrite:
logger.debug("overwrite '%s'", dst)
sftp.put(src, dst)
except IOError:
sftp.put(src, dst)
head = str(parsed_url.path)
tails = []
done = False
# Roll back the path until we find one that exists
while not done:
try:
sftp.stat(head)
done = True
except IOError:
head, tail = os.path.split(head)
tails.append(tail)
# Now create all the missing paths that don't exist
for tail in reversed(tails):
head = os.path.join(head, tail)
sftp.mkdir(head)
sftp.chdir(parsed_url.path)
os.path.walk(source, visit, None)
else:
raise ValueError, "Don't know how to use scheme '%s'" % parsed_url.scheme
def main():
import sys
logging.basicConfig(level = logging.DEBUG)
updatetree(sys.argv[1], sys.argv[2], overwrite=False)
| gpl-3.0 | -6,502,143,634,632,490,000 | 33.077519 | 88 | 0.605778 | false |
jelly/calibre | src/calibre/gui2/actions/show_quickview.py | 2 | 7899 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
from PyQt5.Qt import QAction
from calibre.gui2.actions import InterfaceAction
from calibre.gui2.dialogs.quickview import Quickview
from calibre.gui2 import error_dialog, gprefs
from calibre.gui2.widgets import LayoutButton
class QuickviewButton(LayoutButton): # {{{
def __init__(self, gui, quickview_manager):
self.qv = quickview_manager
qaction = quickview_manager.qaction
LayoutButton.__init__(self, I('quickview.png'), _('Quickview'),
parent=gui, shortcut=qaction.shortcut().toString())
self.toggled.connect(self.update_state)
self.action_toggle = qaction
self.action_toggle.triggered.connect(self.toggle)
self.action_toggle.changed.connect(self.update_shortcut)
def update_state(self, checked):
if checked:
self.set_state_to_hide()
self.qv._show_quickview()
else:
self.set_state_to_show()
self.qv._hide_quickview()
def save_state(self):
gprefs['quickview visible'] = bool(self.isChecked())
def restore_state(self):
if gprefs.get('quickview visible', False):
self.toggle()
# }}}
current_qv_action_pi = None
def set_quickview_action_plugin(pi):
global current_qv_action_pi
current_qv_action_pi = pi
def get_quickview_action_plugin():
return current_qv_action_pi
class ShowQuickviewAction(InterfaceAction):
name = 'Quickview'
action_spec = (_('Quickview'), 'quickview.png', None, None)
dont_add_to = frozenset(['context-menu-device'])
action_type = 'current'
current_instance = None
def genesis(self):
self.gui.keyboard.register_shortcut('Toggle Quickview', _('Toggle Quickview'),
description=_('Open/close the Quickview panel/window'),
default_keys=('Q',), action=self.qaction,
group=self.action_spec[0])
self.focus_action = QAction(self.gui)
self.gui.addAction(self.focus_action)
self.gui.keyboard.register_shortcut('Focus To Quickview', _('Focus to Quickview'),
description=_('Move the focus to the Quickview panel/window'),
default_keys=('Shift+Q',), action=self.focus_action,
group=self.action_spec[0])
self.focus_action.triggered.connect(self.focus_quickview)
self.focus_bl_action = QAction(self.gui)
self.gui.addAction(self.focus_bl_action)
self.gui.keyboard.register_shortcut('Focus from Quickview',
_('Focus from Quickview to the book list'),
description=_('Move the focus from Quickview to the book list'),
default_keys=('Shift+Alt+Q',), action=self.focus_bl_action,
group=self.action_spec[0])
self.focus_bl_action.triggered.connect(self.focus_booklist)
self.focus_refresh_action = QAction(self.gui)
self.gui.addAction(self.focus_refresh_action)
self.gui.keyboard.register_shortcut('Refresh from Quickview',
_('Refresh Quickview'),
description=_('Refresh the information shown in the Quickview pane'),
action=self.focus_refresh_action,
group=self.action_spec[0])
self.focus_refresh_action.triggered.connect(self.refill_quickview)
self.search_action = QAction(self.gui)
self.gui.addAction(self.search_action)
self.gui.keyboard.register_shortcut('Search from Quickview', _('Search from Quickview'),
description=_('Search for the currently selected Quickview item'),
default_keys=('Shift+S',), action=self.search_action,
group=self.action_spec[0])
self.search_action.triggered.connect(self.search_quickview)
self.search_action.changed.connect(self.set_search_shortcut)
self.menuless_qaction.changed.connect(self.set_search_shortcut)
self.qv_button = QuickviewButton(self.gui, self)
def initialization_complete(self):
set_quickview_action_plugin(self)
def _hide_quickview(self):
'''
This is called only from the QV button toggle
'''
if self.current_instance:
if not self.current_instance.is_closed:
self.current_instance._reject()
self.current_instance = None
def _show_quickview(self, *args):
'''
This is called only from the QV button toggle
'''
if self.current_instance:
if not self.current_instance.is_closed:
self.current_instance._reject()
self.current_instance = None
if self.gui.current_view() is not self.gui.library_view:
error_dialog(self.gui, _('No quickview available'),
_('Quickview is not available for books '
'on the device.')).exec_()
return
self.qv_button.set_state_to_hide()
index = self.gui.library_view.currentIndex()
self.current_instance = Quickview(self.gui, index)
self.current_instance.reopen_after_dock_change.connect(self.open_quickview)
self.set_search_shortcut()
self.current_instance.show()
self.current_instance.quickview_closed.connect(self.qv_button.set_state_to_show)
def set_search_shortcut(self):
if self.current_instance and not self.current_instance.is_closed:
self.current_instance.addAction(self.focus_bl_action)
self.current_instance.set_shortcuts(self.search_action.shortcut().toString(),
self.menuless_qaction.shortcut().toString())
def open_quickview(self):
'''
QV moved from/to dock. Close and reopen the pane/window.
Also called when QV is closed and the user asks to move the focus
'''
if self.current_instance and not self.current_instance.is_closed:
self.current_instance.reject()
self.current_instance = None
self.qaction.triggered.emit()
def refill_quickview(self):
'''
Called when the columns shown in the QV pane might have changed.
'''
if self.current_instance and not self.current_instance.is_closed:
self.current_instance.refill()
def refresh_quickview(self, idx):
'''
Called when the data shown in the QV pane might have changed.
'''
if self.current_instance and not self.current_instance.is_closed:
self.current_instance.refresh(idx)
def change_quickview_column(self, idx):
'''
Called from the column header context menu to change the QV query column
'''
self.focus_quickview()
self.current_instance.slave(idx)
def library_changed(self, db):
'''
If QV is open, close it then reopen it so the columns are correct
'''
if self.current_instance and not self.current_instance.is_closed:
self.current_instance.reject()
self.qaction.triggered.emit()
def focus_quickview(self):
'''
Used to move the focus to the QV books table. Open QV if needed
'''
if not self.current_instance or self.current_instance.is_closed:
self.open_quickview()
else:
self.current_instance.set_focus()
def focus_booklist(self):
self.gui.activateWindow()
self.gui.library_view.setFocus()
def search_quickview(self):
if not self.current_instance or self.current_instance.is_closed:
return
self.current_instance.do_search()
| gpl-3.0 | -8,621,730,550,062,112,000 | 37.531707 | 96 | 0.620965 | false |
Pal3love/otRebuilder | Package/otRebuilder/Dep/ufoLib/test/test_filenames.py | 1 | 3845 | from __future__ import unicode_literals
import unittest
from ufoLib.filenames import userNameToFileName, handleClash1, handleClash2
class TestFilenames(unittest.TestCase):
def test_userNameToFileName(self):
self.assertEqual(userNameToFileName("a"), "a")
self.assertEqual(userNameToFileName("A"), "A_")
self.assertEqual(userNameToFileName("AE"), "A_E_")
self.assertEqual(userNameToFileName("Ae"), "A_e")
self.assertEqual(userNameToFileName("ae"), "ae")
self.assertEqual(userNameToFileName("aE"), "aE_")
self.assertEqual(userNameToFileName("a.alt"), "a.alt")
self.assertEqual(userNameToFileName("A.alt"), "A_.alt")
self.assertEqual(userNameToFileName("A.Alt"), "A_.A_lt")
self.assertEqual(userNameToFileName("A.aLt"), "A_.aL_t")
self.assertEqual(userNameToFileName("A.alT"), "A_.alT_")
self.assertEqual(userNameToFileName("T_H"), "T__H_")
self.assertEqual(userNameToFileName("T_h"), "T__h")
self.assertEqual(userNameToFileName("t_h"), "t_h")
self.assertEqual(userNameToFileName("F_F_I"), "F__F__I_")
self.assertEqual(userNameToFileName("f_f_i"), "f_f_i")
self.assertEqual(userNameToFileName("Aacute_V.swash"),
"A_acute_V_.swash")
self.assertEqual(userNameToFileName(".notdef"), "_notdef")
self.assertEqual(userNameToFileName("con"), "_con")
self.assertEqual(userNameToFileName("CON"), "C_O_N_")
self.assertEqual(userNameToFileName("con.alt"), "_con.alt")
self.assertEqual(userNameToFileName("alt.con"), "alt._con")
def test_userNameToFileName_ValueError(self):
with self.assertRaises(ValueError):
userNameToFileName(b"a")
with self.assertRaises(ValueError):
userNameToFileName({"a"})
with self.assertRaises(ValueError):
userNameToFileName(("a",))
with self.assertRaises(ValueError):
userNameToFileName(["a"])
with self.assertRaises(ValueError):
userNameToFileName(["a"])
with self.assertRaises(ValueError):
userNameToFileName(b"\xd8\x00")
def test_handleClash1(self):
prefix = ("0" * 5) + "."
suffix = "." + ("0" * 10)
existing = ["a" * 5]
e = list(existing)
self.assertEqual(
handleClash1(userName="A" * 5, existing=e, prefix=prefix,
suffix=suffix),
'00000.AAAAA000000000000001.0000000000'
)
e = list(existing)
e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
self.assertEqual(
handleClash1(userName="A" * 5, existing=e, prefix=prefix,
suffix=suffix),
'00000.AAAAA000000000000002.0000000000'
)
e = list(existing)
e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
self.assertEqual(
handleClash1(userName="A" * 5, existing=e, prefix=prefix,
suffix=suffix),
'00000.AAAAA000000000000001.0000000000'
)
def test_handleClash2(self):
prefix = ("0" * 5) + "."
suffix = "." + ("0" * 10)
existing = [prefix + str(i) + suffix for i in range(100)]
e = list(existing)
self.assertEqual(
handleClash2(existing=e, prefix=prefix, suffix=suffix),
'00000.100.0000000000'
)
e = list(existing)
e.remove(prefix + "1" + suffix)
self.assertEqual(
handleClash2(existing=e, prefix=prefix, suffix=suffix),
'00000.1.0000000000'
)
e = list(existing)
e.remove(prefix + "2" + suffix)
self.assertEqual(
handleClash2(existing=e, prefix=prefix, suffix=suffix),
'00000.2.0000000000'
)
| mit | -1,878,946,315,541,174,300 | 38.234694 | 75 | 0.585696 | false |
kantai/passe-framework-prototype | django/http/__init__.py | 1 | 31597 | import datetime
import os
import re
import time
from pprint import pformat
from urllib import urlencode, quote
from urlparse import urljoin
#try:
# from StringIO import StringIO
#except ImportError:
from StringIO import StringIO
from copy import deepcopy
try:
# The mod_python version is more efficient, so try importing it first.
from mod_python.util import parse_qsl
except ImportError:
try:
# Python 2.6 and greater
from urlparse import parse_qsl
except ImportError:
# Python 2.5, 2.4. Works on Python 2.6 but raises
# PendingDeprecationWarning
from cgi import parse_qsl
import Cookie
# httponly support exists in Python 2.6's Cookie library,
# but not in Python 2.4 or 2.5.
_morsel_supports_httponly = Cookie.Morsel._reserved.has_key('httponly')
# Some versions of Python 2.7 and later won't need this encoding bug fix:
_cookie_encodes_correctly = Cookie.SimpleCookie().value_encode(';') == (';', '"\\073"')
# See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256
_tc = Cookie.SimpleCookie()
_tc.load('f:oo')
_cookie_allows_colon_in_names = 'Set-Cookie: f:oo=' in _tc.output()
if False: #_morsel_supports_httponly and _cookie_encodes_correctly and _cookie_allows_colon_in_names:
SimpleCookie = Cookie.SimpleCookie
else:
class Morsel(Cookie.Morsel):
def __getstate__(self):
d = dict([(k,v) for k,v in dict.items(self)])
return d
def __setstate__(self, state):
for k,v in state.items():
dict.__setitem__(self, k, v)
if not _morsel_supports_httponly:
def __setitem__(self, K, V):
K = K.lower()
if K == "httponly":
if V:
# The superclass rejects httponly as a key,
# so we jump to the grandparent.
super(Cookie.Morsel, self).__setitem__(K, V)
else:
super(Morsel, self).__setitem__(K, V)
def OutputString(self, attrs=None):
output = super(Morsel, self).OutputString(attrs)
if "httponly" in self:
output += "; httponly"
return output
class SimpleCookie(Cookie.SimpleCookie):
def __getstate__(self):
d = dict([(k,v) for k,v in dict.items(self)])
return d
def __setstate__(self, state):
for k,v in state.items():
dict.__setitem__(self, k, v)
if not _morsel_supports_httponly:
def __set(self, key, real_value, coded_value):
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
def __setitem__(self, key, value):
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
if not _cookie_encodes_correctly:
def value_encode(self, val):
# Some browsers do not support quoted-string from RFC 2109,
# including some versions of Safari and Internet Explorer.
# These browsers split on ';', and some versions of Safari
# are known to split on ', '. Therefore, we encode ';' and ','
# SimpleCookie already does the hard work of encoding and decoding.
# It uses octal sequences like '\\012' for newline etc.
# and non-ASCII chars. We just make use of this mechanism, to
# avoid introducing two encoding schemes which would be confusing
# and especially awkward for javascript.
# NB, contrary to Python docs, value_encode returns a tuple containing
# (real val, encoded_val)
val, encoded = super(SimpleCookie, self).value_encode(val)
encoded = encoded.replace(";", "\\073").replace(",","\\054")
# If encoded now contains any quoted chars, we need double quotes
# around the whole string.
if "\\" in encoded and not encoded.startswith('"'):
encoded = '"' + encoded + '"'
return val, encoded
if not _cookie_allows_colon_in_names:
def load(self, rawdata, ignore_parse_errors=False):
if ignore_parse_errors:
self.bad_cookies = set()
self._BaseCookie__set = self._loose_set
super(SimpleCookie, self).load(rawdata)
if ignore_parse_errors:
self._BaseCookie__set = self._strict_set
for key in self.bad_cookies:
del self[key]
_strict_set = Cookie.BaseCookie._BaseCookie__set
def _loose_set(self, key, real_value, coded_value):
try:
self._strict_set(key, real_value, coded_value)
except Cookie.CookieError:
self.bad_cookies.add(key)
dict.__setitem__(self, key, Cookie.Morsel())
class CompatCookie(SimpleCookie):
def __init__(self, *args, **kwargs):
super(CompatCookie, self).__init__(*args, **kwargs)
import warnings
warnings.warn("CompatCookie is deprecated, use django.http.SimpleCookie instead.",
PendingDeprecationWarning)
from django.utils.datastructures import MultiValueDict, ImmutableList
from django.utils.encoding import smart_str, iri_to_uri, force_unicode
from django.utils.http import cookie_date
from django.http.multipartparser import MultiPartParser
from django.conf import settings
from django.core.files import uploadhandler
from utils import *
RESERVED_CHARS="!*'();:@&=+$,/?%#[]"
absolute_http_url_re = re.compile(r"^https?://", re.I)
class Http404(Exception):
pass
class HttpRequestDummy(object):
"""
A stripped down HTTP request object that's pickle-able
TODO FILES
"""
def __init__(self,request):
self.GET = request.GET
self.POST = request.POST
self.COOKIES = request.COOKIES
self.META = dict([(k,v) for k,v in request.META.items() if not k.startswith('wsgi')])
self.FILES = request.FILES
self.path = request.path
self.session = request.session
self.path_info = request.path_info
self.user = request.user
self.method = request.method
if hasattr(request, '_messages'):
self._messages = request._messages
def __repr__(self):
return "G: %s \nP: %s \nC: %s\nM: %s\np: %s\nm: %s" % (self.GET,
self.POST,
self.COOKIES,
self.META,
self.path,
self.method)
def _get_request(self):
from django.utils import datastructures
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
def get_host(self):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = str(self.META['SERVER_PORT'])
if server_port != (self.is_secure() and '443' or '80'):
host = '%s:%s' % (host, server_port)
return host
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (self.path, self.META.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) or '')
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no location is specified, the absolute URI is built on
``request.get_full_path()``.
"""
if not location:
location = self.get_full_path()
if not absolute_http_url_re.match(location):
current_uri = '%s://%s%s' % (self.is_secure() and 'https' or 'http',
self.get_host(), self.path)
location = urljoin(current_uri, location)
return iri_to_uri(location)
def is_secure(self):
return os.environ.get("HTTPS") == "on"
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
REQUEST = property(_get_request)
typeset = [int, float, str, unicode, bool]
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {}
self.path = ''
self.path_info = ''
self.method = None
def __repr__(self):
return '<HttpRequest\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % \
(pformat(self.GET), pformat(self.POST), pformat(self.COOKIES),
pformat(self.META))
def get_host(self):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = str(self.META['SERVER_PORT'])
if server_port != (self.is_secure() and '443' or '80'):
host = '%s:%s' % (host, server_port)
return host
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (self.path, self.META.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) or '')
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no location is specified, the absolute URI is built on
``request.get_full_path()``.
"""
if not location:
location = self.get_full_path()
if not absolute_http_url_re.match(location):
current_uri = '%s://%s%s' % (self.is_secure() and 'https' or 'http',
self.get_host(), self.path)
location = urljoin(current_uri, location)
return iri_to_uri(location)
def is_secure(self):
return os.environ.get("HTTPS") == "on"
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
def _set_encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, '_get'):
del self._get
if hasattr(self, '_post'):
del self._post
def _get_encoding(self):
return self._encoding
encoding = property(_get_encoding, _set_encoding)
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
def _set_upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def _get_upload_handlers(self):
if not self._upload_handlers:
# If thre are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
upload_handlers = property(_get_upload_handlers, _set_upload_handlers)
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning = "You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
def _get_raw_post_data(self):
if not hasattr(self, '_raw_post_data'):
if self._read_started:
raise Exception("You cannot access raw_post_data after reading from request's data stream")
try:
content_length = int(self.META.get('CONTENT_LENGTH', 0))
except (ValueError, TypeError):
# If CONTENT_LENGTH was empty string or not an integer, don't
# error out. We've also seen None passed in here (against all
# specs, but see ticket #8259), so we handle TypeError as well.
content_length = 0
if content_length:
self._raw_post_data = self.read(content_length)
else:
self._raw_post_data = self.read()
self._stream = StringIO(self._raw_post_data) # HACHI: used to be self._stream -- uh oh, 0-copy fuckup.
#self._streamed = _stream.getvalue()
return self._raw_post_data
raw_post_data = property(_get_raw_post_data)
def _mark_post_parse_error(self):
self._post = QueryDict('')
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
# Populates self._post and self._files
if self.method != 'POST':
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_raw_post_data'):
self._mark_post_parse_error()
return
if self.META.get('CONTENT_TYPE', '').startswith('multipart'):
if hasattr(self, '_raw_post_data'):
# Use already read data
data = StringIO(self._raw_post_data)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except:
# An error occured while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occured. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
else:
self._post, self._files = QueryDict(self.raw_post_data, encoding=self._encoding), MultiValueDict()
## File-like and iterator interface.
##
## Expects self._stream to be set to an appropriate source of bytes by
## a corresponding request subclass (WSGIRequest or ModPythonRequest).
## Also when request data has already been read by request.POST or
## request.raw_post_data, self._stream points to a StringIO instance
## containing that data.
def read(self, size=0):
self._read_started = True
return self._stream.read(size)
def readline(self, *args, **kwargs):
self._read_started = True
return self._stream.readline(*args, **kwargs)
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
def get_changeset(request):
"""
Right now, this just makes a dict of all the attributes that we allow to be modified.
"""
d = {}
for attr, val in request.__dict__.items():
if callable(val):
continue
if attr in ['PUT', 'GET', 'REQUEST', 'META', 'path', 'path_info', 'script_name',
'method', '_request', '_post', '_files']:
continue
d[attr] = val
return d
class RequestDelta(object):
def __init__(self, wrapped):
self._wrapped = wrapped
self.changeset = {}
def __getattr__(self, name):
# if an attribute is fetched, we should add it to the changset :(
if name in self.changeset:
return self.changeset[name]
if not hasattr(self._wrapped, name):
raise AttributeError("Barfing %s on %s" % (name, type(self._wrapped) ))
val = getattr(self._wrapped, name)
if name in ['PUT', 'GET', 'REQUEST', 'META']: # these are immutable now, so deal with it punks
return val
if callable(val):
return val
if type(val) not in typeset:
self.changeset[name] = val
return val
def __setattr__(self, name, val):
if name == '_wrapped' or name == 'changeset':
self.__dict__[name] = val
else:
self.changeset[name] = val
def __getstate__(self):
return self.changeset
def __setstate__(self, state):
if '_wrapped' not in self.__dict__:
self._wrapped = None
self.changeset = state
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict that takes a query string when initialized.
This is immutable unless you create a copy of it.
Values retrieved from this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string, mutable=False, encoding=None):
MultiValueDict.__init__(self)
if not encoding:
# *Important*: do not import settings any earlier because of note
# in core.handlers.modpython.
from django.conf import settings
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
for key, value in parse_qsl((query_string or ''), True): # keep_blank_values=True
self.appendlist(force_unicode(key, encoding, errors='replace'),
force_unicode(value, encoding, errors='replace'))
self._mutable = mutable
def _get_encoding(self):
if self._encoding is None:
# *Important*: do not import settings at the module level because
# of the note in core.handlers.modpython.
from django.conf import settings
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
def _set_encoding(self, value):
self._encoding = value
encoding = property(_get_encoding, _set_encoding)
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
value = str_to_unicode(value, self.encoding)
MultiValueDict.__setitem__(self, key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in dict.items(self):
dict.__setitem__(result, key, value)
return result
def __deepcopy__(self, memo):
import django.utils.copycompat as copy
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
list_ = [str_to_unicode(elt, self.encoding) for elt in list_]
MultiValueDict.setlist(self, key, list_)
def setlistdefault(self, key, default_list=()):
self._assert_mutable()
if key not in self:
self.setlist(key, default_list)
return MultiValueDict.getlist(self, key)
def appendlist(self, key, value):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
value = str_to_unicode(value, self.encoding)
MultiValueDict.appendlist(self, key, value)
def update(self, other_dict):
self._assert_mutable()
f = lambda s: str_to_unicode(s, self.encoding)
if hasattr(other_dict, 'lists'):
for key, valuelist in other_dict.lists():
for value in valuelist:
MultiValueDict.update(self, {f(key): f(value)})
else:
d = dict([(f(k), f(v)) for k, v in other_dict.items()])
MultiValueDict.update(self, d)
def pop(self, key, *args):
self._assert_mutable()
return MultiValueDict.pop(self, key, *args)
def popitem(self):
self._assert_mutable()
return MultiValueDict.popitem(self)
def clear(self):
self._assert_mutable()
MultiValueDict.clear(self)
def setdefault(self, key, default=None):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
default = str_to_unicode(default, self.encoding)
return MultiValueDict.setdefault(self, key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Returns an encoded string of all query string arguments.
:arg safe: Used to specify characters which do not require quoting, for
example::
>>> q = QueryDict('', mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
encode = lambda k, v: urlencode({k: v})
for k, list_ in self.lists():
k = smart_str(k, self.encoding)
output.extend([encode(k, smart_str(v, self.encoding))
for v in list_])
return '&'.join(output)
def parse_cookie(cookie):
if cookie == '':
return {}
if not isinstance(cookie, Cookie.BaseCookie):
try:
c = SimpleCookie()
c.load(cookie, ignore_parse_errors=True)
except Cookie.CookieError:
# Invalid cookie
return {}
else:
c = cookie
cookiedict = {}
for key in c.keys():
cookiedict[key] = c.get(key).value
return cookiedict
class BadHeaderError(ValueError):
pass
class HttpResponse(object):
"""A basic HTTP response, with content and dictionary-accessed headers."""
status_code = 200
def __init__(self, content='', mimetype=None, status=None,
content_type=None):
# _headers is a mapping of the lower-case name to the original case of
# the header (required for working with legacy systems) and the header
# value. Both the name of the header and its value are ASCII strings.
self._headers = {}
self._charset = settings.DEFAULT_CHARSET
if mimetype:
content_type = mimetype # For backwards compatibility
if not content_type:
content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE,
self._charset)
if not isinstance(content, basestring) and hasattr(content, '__iter__'):
self._container = content
self._is_string = False
else:
self._container = [content]
self._is_string = True
self.cookies = SimpleCookie()
if status:
self.status_code = status
self['Content-Type'] = content_type
def __str__(self):
"""Full HTTP message, including headers."""
return '\n'.join(['%s: %s' % (key, value)
for key, value in self._headers.values()]) \
+ '\n\n' + self.content
def _convert_to_ascii(self, *values):
"""Converts all values to ascii strings."""
for value in values:
if isinstance(value, unicode):
try:
value = value.encode('us-ascii')
except UnicodeError, e:
e.reason += ', HTTP response headers must be in US-ASCII format'
raise
else:
value = str(value)
if '\n' in value or '\r' in value:
raise BadHeaderError("Header values can't contain newlines (got %r)" % (value))
yield value
def __setitem__(self, header, value):
header, value = self._convert_to_ascii(header, value)
self._headers[header.lower()] = (header, value)
def __delitem__(self, header):
try:
del self._headers[header.lower()]
except KeyError:
pass
def __getitem__(self, header):
return self._headers[header.lower()][1]
def has_header(self, header):
"""Case-insensitive check for a header."""
return self._headers.has_key(header.lower())
__contains__ = has_header
def items(self):
return self._headers.values()
def get(self, header, alternate):
return self._headers.get(header.lower(), (None, alternate))[1]
def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
"""
Sets a cookie.
``expires`` can be a string in the correct format or a
``datetime.datetime`` object in UTC. If ``expires`` is a datetime
object then ``max_age`` will be calculated.
"""
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
delta = expires - expires.utcnow()
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]['expires'] = expires
if max_age is not None:
self.cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]['expires'] = cookie_date(time.time() +
max_age)
if path is not None:
self.cookies[key]['path'] = path
if domain is not None:
self.cookies[key]['domain'] = domain
if secure:
self.cookies[key]['secure'] = True
if httponly:
self.cookies[key]['httponly'] = True
def delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
def _get_content(self):
if self.has_header('Content-Encoding'):
return ''.join(self._container)
return smart_str(''.join(self._container), self._charset)
def _set_content(self, value):
self._container = [value]
self._is_string = True
content = property(_get_content, _set_content)
def __iter__(self):
self._iterator = iter(self._container)
return self
def next(self):
chunk = self._iterator.next()
if isinstance(chunk, unicode):
chunk = chunk.encode(self._charset)
return str(chunk)
def close(self):
if hasattr(self._container, 'close'):
self._container.close()
# The remaining methods partially implement the file-like object interface.
# See http://docs.python.org/lib/bltin-file-objects.html
def write(self, content):
if not self._is_string:
raise Exception("This %s instance is not writable" % self.__class__)
self._container.append(content)
def flush(self):
pass
def tell(self):
if not self._is_string:
raise Exception("This %s instance cannot tell its position" % self.__class__)
return sum([len(chunk) for chunk in self._container])
class HttpResponseRedirect(HttpResponse):
status_code = 302
def __init__(self, redirect_to):
super(HttpResponseRedirect, self).__init__()
self['Location'] = iri_to_uri(redirect_to)
class HttpResponsePermanentRedirect(HttpResponse):
status_code = 301
def __init__(self, redirect_to):
super(HttpResponsePermanentRedirect, self).__init__()
self['Location'] = iri_to_uri(redirect_to)
class HttpResponseNotModified(HttpResponse):
status_code = 304
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods):
super(HttpResponseNotAllowed, self).__init__()
self['Allow'] = ', '.join(permitted_methods)
class HttpResponseGone(HttpResponse):
status_code = 410
class HttpResponseServerError(HttpResponse):
status_code = 500
# A backwards compatible alias for HttpRequest.get_host.
def get_host(request):
return request.get_host()
# It's neither necessary nor appropriate to use
# django.utils.encoding.smart_unicode for parsing URLs and form inputs. Thus,
# this slightly more restricted function.
def str_to_unicode(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, str):
return unicode(s, encoding, 'replace')
else:
return s
| bsd-3-clause | 649,093,225,685,276,500 | 36.705251 | 134 | 0.576352 | false |
groschovskiy/lerigos_music | Server/API/lib/gcloud/bigtable/test_row.py | 1 | 63227 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestRow(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import Row
return Row
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
row_key = b'row_key'
table = object()
filter_ = object()
row = self._makeOne(row_key, table, filter_=filter_)
self.assertEqual(row._row_key, row_key)
self.assertTrue(row._table is table)
self.assertTrue(row._filter is filter_)
def test_constructor_with_unicode(self):
row_key = u'row_key'
row_key_bytes = b'row_key'
table = object()
row = self._makeOne(row_key, table)
self.assertEqual(row._row_key, row_key_bytes)
self.assertTrue(row._table is table)
def test_constructor_with_non_bytes(self):
row_key = object()
with self.assertRaises(TypeError):
self._makeOne(row_key, None)
def _get_mutations_helper(self, filter_=None, state=None):
row_key = b'row_key'
row = self._makeOne(row_key, None, filter_=filter_)
# Mock the mutations with unique objects so we can compare.
row._pb_mutations = no_bool = object()
row._true_pb_mutations = true_mutations = object()
row._false_pb_mutations = false_mutations = object()
mutations = row._get_mutations(state)
return (no_bool, true_mutations, false_mutations), mutations
def test__get_mutations_no_filter(self):
(no_bool, _, _), mutations = self._get_mutations_helper()
self.assertTrue(mutations is no_bool)
def test__get_mutations_no_filter_bad_state(self):
state = object() # State should be null when no filter.
with self.assertRaises(ValueError):
self._get_mutations_helper(state=state)
def test__get_mutations_with_filter_true_state(self):
filter_ = object()
state = True
(_, true_filter, _), mutations = self._get_mutations_helper(
filter_=filter_, state=state)
self.assertTrue(mutations is true_filter)
def test__get_mutations_with_filter_false_state(self):
filter_ = object()
state = False
(_, _, false_filter), mutations = self._get_mutations_helper(
filter_=filter_, state=state)
self.assertTrue(mutations is false_filter)
def test__get_mutations_with_filter_bad_state(self):
filter_ = object()
state = None
with self.assertRaises(ValueError):
self._get_mutations_helper(filter_=filter_, state=state)
def _set_cell_helper(self, column=None, column_bytes=None,
value=b'foobar', timestamp=None,
timestamp_micros=-1):
import six
import struct
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
row_key = b'row_key'
column_family_id = u'column_family_id'
if column is None:
column = b'column'
table = object()
row = self._makeOne(row_key, table)
self.assertEqual(row._pb_mutations, [])
row.set_cell(column_family_id, column,
value, timestamp=timestamp)
if isinstance(value, six.integer_types):
value = struct.pack('>q', value)
expected_pb = data_pb2.Mutation(
set_cell=data_pb2.Mutation.SetCell(
family_name=column_family_id,
column_qualifier=column_bytes or column,
timestamp_micros=timestamp_micros,
value=value,
),
)
self.assertEqual(row._pb_mutations, [expected_pb])
def test_set_cell(self):
self._set_cell_helper()
def test_set_cell_with_string_column(self):
column_bytes = b'column'
column_non_bytes = u'column'
self._set_cell_helper(column=column_non_bytes,
column_bytes=column_bytes)
def test_set_cell_with_integer_value(self):
value = 1337
self._set_cell_helper(value=value)
def test_set_cell_with_non_bytes_value(self):
row_key = b'row_key'
column = b'column'
column_family_id = u'column_family_id'
table = object()
row = self._makeOne(row_key, table)
value = object() # Not bytes
with self.assertRaises(TypeError):
row.set_cell(column_family_id, column, value)
def test_set_cell_with_non_null_timestamp(self):
import datetime
from gcloud._helpers import _EPOCH
microseconds = 898294371
millis_granularity = microseconds - (microseconds % 1000)
timestamp = _EPOCH + datetime.timedelta(microseconds=microseconds)
self._set_cell_helper(timestamp=timestamp,
timestamp_micros=millis_granularity)
def test_append_cell_value(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
table = object()
row_key = b'row_key'
row = self._makeOne(row_key, table)
self.assertEqual(row._rule_pb_list, [])
column = b'column'
column_family_id = u'column_family_id'
value = b'bytes-val'
row.append_cell_value(column_family_id, column, value)
expected_pb = data_pb2.ReadModifyWriteRule(
family_name=column_family_id, column_qualifier=column,
append_value=value)
self.assertEqual(row._rule_pb_list, [expected_pb])
def test_increment_cell_value(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
table = object()
row_key = b'row_key'
row = self._makeOne(row_key, table)
self.assertEqual(row._rule_pb_list, [])
column = b'column'
column_family_id = u'column_family_id'
int_value = 281330
row.increment_cell_value(column_family_id, column, int_value)
expected_pb = data_pb2.ReadModifyWriteRule(
family_name=column_family_id, column_qualifier=column,
increment_amount=int_value)
self.assertEqual(row._rule_pb_list, [expected_pb])
def test_delete(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
row_key = b'row_key'
row = self._makeOne(row_key, object())
self.assertEqual(row._pb_mutations, [])
row.delete()
expected_pb = data_pb2.Mutation(
delete_from_row=data_pb2.Mutation.DeleteFromRow(),
)
self.assertEqual(row._pb_mutations, [expected_pb])
def test_delete_cell(self):
klass = self._getTargetClass()
class MockRow(klass):
def __init__(self, *args, **kwargs):
super(MockRow, self).__init__(*args, **kwargs)
self._args = []
self._kwargs = []
# Replace the called method with one that logs arguments.
def delete_cells(self, *args, **kwargs):
self._args.append(args)
self._kwargs.append(kwargs)
row_key = b'row_key'
column = b'column'
column_family_id = u'column_family_id'
table = object()
mock_row = MockRow(row_key, table)
# Make sure no values are set before calling the method.
self.assertEqual(mock_row._pb_mutations, [])
self.assertEqual(mock_row._args, [])
self.assertEqual(mock_row._kwargs, [])
# Actually make the request against the mock class.
time_range = object()
mock_row.delete_cell(column_family_id, column, time_range=time_range)
self.assertEqual(mock_row._pb_mutations, [])
self.assertEqual(mock_row._args, [(column_family_id, [column])])
self.assertEqual(mock_row._kwargs, [{
'state': None,
'time_range': time_range,
}])
def test_delete_cells_non_iterable(self):
row_key = b'row_key'
column_family_id = u'column_family_id'
table = object()
row = self._makeOne(row_key, table)
columns = object() # Not iterable
with self.assertRaises(TypeError):
row.delete_cells(column_family_id, columns)
def test_delete_cells_all_columns(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
row_key = b'row_key'
column_family_id = u'column_family_id'
table = object()
row = self._makeOne(row_key, table)
klass = self._getTargetClass()
self.assertEqual(row._pb_mutations, [])
row.delete_cells(column_family_id, klass.ALL_COLUMNS)
expected_pb = data_pb2.Mutation(
delete_from_family=data_pb2.Mutation.DeleteFromFamily(
family_name=column_family_id,
),
)
self.assertEqual(row._pb_mutations, [expected_pb])
def test_delete_cells_no_columns(self):
row_key = b'row_key'
column_family_id = u'column_family_id'
table = object()
row = self._makeOne(row_key, table)
columns = []
self.assertEqual(row._pb_mutations, [])
row.delete_cells(column_family_id, columns)
self.assertEqual(row._pb_mutations, [])
def _delete_cells_helper(self, time_range=None):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
row_key = b'row_key'
column = b'column'
column_family_id = u'column_family_id'
table = object()
row = self._makeOne(row_key, table)
columns = [column]
self.assertEqual(row._pb_mutations, [])
row.delete_cells(column_family_id, columns, time_range=time_range)
expected_pb = data_pb2.Mutation(
delete_from_column=data_pb2.Mutation.DeleteFromColumn(
family_name=column_family_id,
column_qualifier=column,
),
)
if time_range is not None:
expected_pb.delete_from_column.time_range.CopyFrom(
time_range.to_pb())
self.assertEqual(row._pb_mutations, [expected_pb])
def test_delete_cells_no_time_range(self):
self._delete_cells_helper()
def test_delete_cells_with_time_range(self):
import datetime
from gcloud._helpers import _EPOCH
from gcloud.bigtable.row import TimestampRange
microseconds = 30871000 # Makes sure already milliseconds granularity
start = _EPOCH + datetime.timedelta(microseconds=microseconds)
time_range = TimestampRange(start=start)
self._delete_cells_helper(time_range=time_range)
def test_delete_cells_with_bad_column(self):
# This makes sure a failure on one of the columns doesn't leave
# the row's mutations in a bad state.
row_key = b'row_key'
column = b'column'
column_family_id = u'column_family_id'
table = object()
row = self._makeOne(row_key, table)
columns = [column, object()]
self.assertEqual(row._pb_mutations, [])
with self.assertRaises(TypeError):
row.delete_cells(column_family_id, columns)
self.assertEqual(row._pb_mutations, [])
def test_delete_cells_with_string_columns(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
row_key = b'row_key'
column_family_id = u'column_family_id'
column1 = u'column1'
column1_bytes = b'column1'
column2 = u'column2'
column2_bytes = b'column2'
table = object()
row = self._makeOne(row_key, table)
columns = [column1, column2]
self.assertEqual(row._pb_mutations, [])
row.delete_cells(column_family_id, columns)
expected_pb1 = data_pb2.Mutation(
delete_from_column=data_pb2.Mutation.DeleteFromColumn(
family_name=column_family_id,
column_qualifier=column1_bytes,
),
)
expected_pb2 = data_pb2.Mutation(
delete_from_column=data_pb2.Mutation.DeleteFromColumn(
family_name=column_family_id,
column_qualifier=column2_bytes,
),
)
self.assertEqual(row._pb_mutations, [expected_pb1, expected_pb2])
def test_commit(self):
from google.protobuf import empty_pb2
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
from gcloud.bigtable._generated import (
bigtable_service_messages_pb2 as messages_pb2)
from gcloud.bigtable._testing import _FakeStub
row_key = b'row_key'
table_name = 'projects/more-stuff'
column_family_id = u'column_family_id'
column = b'column'
timeout_seconds = 711
client = _Client(timeout_seconds=timeout_seconds)
table = _Table(table_name, client=client)
row = self._makeOne(row_key, table)
# Create request_pb
value = b'bytes-value'
mutation = data_pb2.Mutation(
set_cell=data_pb2.Mutation.SetCell(
family_name=column_family_id,
column_qualifier=column,
timestamp_micros=-1, # Default value.
value=value,
),
)
request_pb = messages_pb2.MutateRowRequest(
table_name=table_name,
row_key=row_key,
mutations=[mutation],
)
# Create response_pb
response_pb = empty_pb2.Empty()
# Patch the stub used by the API method.
client._data_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = None # commit() has no return value when no filter.
# Perform the method and check the result.
row.set_cell(column_family_id, column, value)
result = row.commit()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'MutateRow',
(request_pb, timeout_seconds),
{},
)])
self.assertEqual(row._pb_mutations, [])
self.assertEqual(row._true_pb_mutations, None)
self.assertEqual(row._false_pb_mutations, None)
def test_commit_too_many_mutations(self):
from gcloud._testing import _Monkey
from gcloud.bigtable import row as MUT
row_key = b'row_key'
table = object()
row = self._makeOne(row_key, table)
row._pb_mutations = [1, 2, 3]
num_mutations = len(row._pb_mutations)
with _Monkey(MUT, _MAX_MUTATIONS=num_mutations - 1):
with self.assertRaises(ValueError):
row.commit()
def test_commit_no_mutations(self):
from gcloud.bigtable._testing import _FakeStub
row_key = b'row_key'
client = _Client()
table = _Table(None, client=client)
row = self._makeOne(row_key, table)
self.assertEqual(row._pb_mutations, [])
# Patch the stub used by the API method.
client._data_stub = stub = _FakeStub()
# Perform the method and check the result.
result = row.commit()
self.assertEqual(result, None)
# Make sure no request was sent.
self.assertEqual(stub.method_calls, [])
def test_commit_with_filter(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
from gcloud.bigtable._generated import (
bigtable_service_messages_pb2 as messages_pb2)
from gcloud.bigtable._testing import _FakeStub
from gcloud.bigtable.row import RowSampleFilter
row_key = b'row_key'
table_name = 'projects/more-stuff'
column_family_id = u'column_family_id'
column = b'column'
timeout_seconds = 262
client = _Client(timeout_seconds=timeout_seconds)
table = _Table(table_name, client=client)
row_filter = RowSampleFilter(0.33)
row = self._makeOne(row_key, table, filter_=row_filter)
# Create request_pb
value1 = b'bytes-value'
mutation1 = data_pb2.Mutation(
set_cell=data_pb2.Mutation.SetCell(
family_name=column_family_id,
column_qualifier=column,
timestamp_micros=-1, # Default value.
value=value1,
),
)
value2 = b'other-bytes'
mutation2 = data_pb2.Mutation(
set_cell=data_pb2.Mutation.SetCell(
family_name=column_family_id,
column_qualifier=column,
timestamp_micros=-1, # Default value.
value=value2,
),
)
request_pb = messages_pb2.CheckAndMutateRowRequest(
table_name=table_name,
row_key=row_key,
predicate_filter=row_filter.to_pb(),
true_mutations=[mutation1],
false_mutations=[mutation2],
)
# Create response_pb
predicate_matched = True
response_pb = messages_pb2.CheckAndMutateRowResponse(
predicate_matched=predicate_matched)
# Patch the stub used by the API method.
client._data_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = predicate_matched
# Perform the method and check the result.
row.set_cell(column_family_id, column, value1, state=True)
row.set_cell(column_family_id, column, value2, state=False)
result = row.commit()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'CheckAndMutateRow',
(request_pb, timeout_seconds),
{},
)])
self.assertEqual(row._pb_mutations, None)
self.assertEqual(row._true_pb_mutations, [])
self.assertEqual(row._false_pb_mutations, [])
def test_commit_with_filter_too_many_mutations(self):
from gcloud._testing import _Monkey
from gcloud.bigtable import row as MUT
row_key = b'row_key'
table = object()
filter_ = object()
row = self._makeOne(row_key, table, filter_=filter_)
row._true_pb_mutations = [1, 2, 3]
num_mutations = len(row._true_pb_mutations)
with _Monkey(MUT, _MAX_MUTATIONS=num_mutations - 1):
with self.assertRaises(ValueError):
row.commit()
def test_commit_with_filter_no_mutations(self):
from gcloud.bigtable._testing import _FakeStub
row_key = b'row_key'
client = _Client()
table = _Table(None, client=client)
filter_ = object()
row = self._makeOne(row_key, table, filter_=filter_)
self.assertEqual(row._true_pb_mutations, [])
self.assertEqual(row._false_pb_mutations, [])
# Patch the stub used by the API method.
client._data_stub = stub = _FakeStub()
# Perform the method and check the result.
result = row.commit()
self.assertEqual(result, None)
# Make sure no request was sent.
self.assertEqual(stub.method_calls, [])
def test_commit_modifications(self):
from gcloud._testing import _Monkey
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
from gcloud.bigtable._generated import (
bigtable_service_messages_pb2 as messages_pb2)
from gcloud.bigtable._testing import _FakeStub
from gcloud.bigtable import row as MUT
row_key = b'row_key'
table_name = 'projects/more-stuff'
column_family_id = u'column_family_id'
column = b'column'
timeout_seconds = 87
client = _Client(timeout_seconds=timeout_seconds)
table = _Table(table_name, client=client)
row = self._makeOne(row_key, table)
# Create request_pb
value = b'bytes-value'
# We will call row.append_cell_value(COLUMN_FAMILY_ID, COLUMN, value).
request_pb = messages_pb2.ReadModifyWriteRowRequest(
table_name=table_name,
row_key=row_key,
rules=[
data_pb2.ReadModifyWriteRule(
family_name=column_family_id,
column_qualifier=column,
append_value=value,
),
],
)
# Create response_pb
response_pb = object()
# Patch the stub used by the API method.
client._data_stub = stub = _FakeStub(response_pb)
# Create expected_result.
row_responses = []
expected_result = object()
def mock_parse_rmw_row_response(row_response):
row_responses.append(row_response)
return expected_result
# Perform the method and check the result.
with _Monkey(MUT, _parse_rmw_row_response=mock_parse_rmw_row_response):
row.append_cell_value(column_family_id, column, value)
result = row.commit_modifications()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'ReadModifyWriteRow',
(request_pb, timeout_seconds),
{},
)])
self.assertEqual(row._pb_mutations, [])
self.assertEqual(row._true_pb_mutations, None)
self.assertEqual(row._false_pb_mutations, None)
self.assertEqual(row_responses, [response_pb])
self.assertEqual(row._rule_pb_list, [])
def test_commit_modifications_no_rules(self):
from gcloud.bigtable._testing import _FakeStub
row_key = b'row_key'
client = _Client()
table = _Table(None, client=client)
row = self._makeOne(row_key, table)
self.assertEqual(row._rule_pb_list, [])
# Patch the stub used by the API method.
client._data_stub = stub = _FakeStub()
# Perform the method and check the result.
result = row.commit_modifications()
self.assertEqual(result, {})
# Make sure no request was sent.
self.assertEqual(stub.method_calls, [])
class Test_BoolFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import _BoolFilter
return _BoolFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
flag = object()
row_filter = self._makeOne(flag)
self.assertTrue(row_filter.flag is flag)
def test___eq__type_differ(self):
flag = object()
row_filter1 = self._makeOne(flag)
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
def test___eq__same_value(self):
flag = object()
row_filter1 = self._makeOne(flag)
row_filter2 = self._makeOne(flag)
self.assertEqual(row_filter1, row_filter2)
def test___ne__same_value(self):
flag = object()
row_filter1 = self._makeOne(flag)
row_filter2 = self._makeOne(flag)
comparison_val = (row_filter1 != row_filter2)
self.assertFalse(comparison_val)
class TestSinkFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import SinkFilter
return SinkFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
flag = True
row_filter = self._makeOne(flag)
pb_val = row_filter.to_pb()
expected_pb = data_pb2.RowFilter(sink=flag)
self.assertEqual(pb_val, expected_pb)
class TestPassAllFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import PassAllFilter
return PassAllFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
flag = True
row_filter = self._makeOne(flag)
pb_val = row_filter.to_pb()
expected_pb = data_pb2.RowFilter(pass_all_filter=flag)
self.assertEqual(pb_val, expected_pb)
class TestBlockAllFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import BlockAllFilter
return BlockAllFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
flag = True
row_filter = self._makeOne(flag)
pb_val = row_filter.to_pb()
expected_pb = data_pb2.RowFilter(block_all_filter=flag)
self.assertEqual(pb_val, expected_pb)
class Test_RegexFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import _RegexFilter
return _RegexFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
regex = object()
row_filter = self._makeOne(regex)
self.assertTrue(row_filter.regex is regex)
def test___eq__type_differ(self):
regex = object()
row_filter1 = self._makeOne(regex)
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
def test___eq__same_value(self):
regex = object()
row_filter1 = self._makeOne(regex)
row_filter2 = self._makeOne(regex)
self.assertEqual(row_filter1, row_filter2)
def test___ne__same_value(self):
regex = object()
row_filter1 = self._makeOne(regex)
row_filter2 = self._makeOne(regex)
comparison_val = (row_filter1 != row_filter2)
self.assertFalse(comparison_val)
class TestRowKeyRegexFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import RowKeyRegexFilter
return RowKeyRegexFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
regex = b'row-key-regex'
row_filter = self._makeOne(regex)
pb_val = row_filter.to_pb()
expected_pb = data_pb2.RowFilter(row_key_regex_filter=regex)
self.assertEqual(pb_val, expected_pb)
class TestRowSampleFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import RowSampleFilter
return RowSampleFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
sample = object()
row_filter = self._makeOne(sample)
self.assertTrue(row_filter.sample is sample)
def test___eq__type_differ(self):
sample = object()
row_filter1 = self._makeOne(sample)
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
def test___eq__same_value(self):
sample = object()
row_filter1 = self._makeOne(sample)
row_filter2 = self._makeOne(sample)
self.assertEqual(row_filter1, row_filter2)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
sample = 0.25
row_filter = self._makeOne(sample)
pb_val = row_filter.to_pb()
expected_pb = data_pb2.RowFilter(row_sample_filter=sample)
self.assertEqual(pb_val, expected_pb)
class TestFamilyNameRegexFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import FamilyNameRegexFilter
return FamilyNameRegexFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
regex = u'family-regex'
row_filter = self._makeOne(regex)
pb_val = row_filter.to_pb()
expected_pb = data_pb2.RowFilter(family_name_regex_filter=regex)
self.assertEqual(pb_val, expected_pb)
class TestColumnQualifierRegexFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import ColumnQualifierRegexFilter
return ColumnQualifierRegexFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
regex = b'column-regex'
row_filter = self._makeOne(regex)
pb_val = row_filter.to_pb()
expected_pb = data_pb2.RowFilter(column_qualifier_regex_filter=regex)
self.assertEqual(pb_val, expected_pb)
class TestTimestampRange(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import TimestampRange
return TimestampRange
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
start = object()
end = object()
time_range = self._makeOne(start=start, end=end)
self.assertTrue(time_range.start is start)
self.assertTrue(time_range.end is end)
def test___eq__(self):
start = object()
end = object()
time_range1 = self._makeOne(start=start, end=end)
time_range2 = self._makeOne(start=start, end=end)
self.assertEqual(time_range1, time_range2)
def test___eq__type_differ(self):
start = object()
end = object()
time_range1 = self._makeOne(start=start, end=end)
time_range2 = object()
self.assertNotEqual(time_range1, time_range2)
def test___ne__same_value(self):
start = object()
end = object()
time_range1 = self._makeOne(start=start, end=end)
time_range2 = self._makeOne(start=start, end=end)
comparison_val = (time_range1 != time_range2)
self.assertFalse(comparison_val)
def _to_pb_helper(self, start_micros=None, end_micros=None):
import datetime
from gcloud._helpers import _EPOCH
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
pb_kwargs = {}
start = None
if start_micros is not None:
start = _EPOCH + datetime.timedelta(microseconds=start_micros)
pb_kwargs['start_timestamp_micros'] = start_micros
end = None
if end_micros is not None:
end = _EPOCH + datetime.timedelta(microseconds=end_micros)
pb_kwargs['end_timestamp_micros'] = end_micros
time_range = self._makeOne(start=start, end=end)
expected_pb = data_pb2.TimestampRange(**pb_kwargs)
self.assertEqual(time_range.to_pb(), expected_pb)
def test_to_pb(self):
# Makes sure already milliseconds granularity
start_micros = 30871000
end_micros = 12939371000
self._to_pb_helper(start_micros=start_micros,
end_micros=end_micros)
def test_to_pb_start_only(self):
# Makes sure already milliseconds granularity
start_micros = 30871000
self._to_pb_helper(start_micros=start_micros)
def test_to_pb_end_only(self):
# Makes sure already milliseconds granularity
end_micros = 12939371000
self._to_pb_helper(end_micros=end_micros)
class TestTimestampRangeFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import TimestampRangeFilter
return TimestampRangeFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
range_ = object()
row_filter = self._makeOne(range_)
self.assertTrue(row_filter.range_ is range_)
def test___eq__type_differ(self):
range_ = object()
row_filter1 = self._makeOne(range_)
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
def test___eq__same_value(self):
range_ = object()
row_filter1 = self._makeOne(range_)
row_filter2 = self._makeOne(range_)
self.assertEqual(row_filter1, row_filter2)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
from gcloud.bigtable.row import TimestampRange
range_ = TimestampRange()
row_filter = self._makeOne(range_)
pb_val = row_filter.to_pb()
expected_pb = data_pb2.RowFilter(
timestamp_range_filter=data_pb2.TimestampRange())
self.assertEqual(pb_val, expected_pb)
class TestColumnRangeFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import ColumnRangeFilter
return ColumnRangeFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor_defaults(self):
column_family_id = object()
row_filter = self._makeOne(column_family_id)
self.assertTrue(row_filter.column_family_id is column_family_id)
self.assertEqual(row_filter.start_column, None)
self.assertEqual(row_filter.end_column, None)
self.assertTrue(row_filter.inclusive_start)
self.assertTrue(row_filter.inclusive_end)
def test_constructor_explicit(self):
column_family_id = object()
start_column = object()
end_column = object()
inclusive_start = object()
inclusive_end = object()
row_filter = self._makeOne(column_family_id, start_column=start_column,
end_column=end_column,
inclusive_start=inclusive_start,
inclusive_end=inclusive_end)
self.assertTrue(row_filter.column_family_id is column_family_id)
self.assertTrue(row_filter.start_column is start_column)
self.assertTrue(row_filter.end_column is end_column)
self.assertTrue(row_filter.inclusive_start is inclusive_start)
self.assertTrue(row_filter.inclusive_end is inclusive_end)
def test_constructor_bad_start(self):
column_family_id = object()
self.assertRaises(ValueError, self._makeOne,
column_family_id, inclusive_start=True)
def test_constructor_bad_end(self):
column_family_id = object()
self.assertRaises(ValueError, self._makeOne,
column_family_id, inclusive_end=True)
def test___eq__(self):
column_family_id = object()
start_column = object()
end_column = object()
inclusive_start = object()
inclusive_end = object()
row_filter1 = self._makeOne(column_family_id,
start_column=start_column,
end_column=end_column,
inclusive_start=inclusive_start,
inclusive_end=inclusive_end)
row_filter2 = self._makeOne(column_family_id,
start_column=start_column,
end_column=end_column,
inclusive_start=inclusive_start,
inclusive_end=inclusive_end)
self.assertEqual(row_filter1, row_filter2)
def test___eq__type_differ(self):
column_family_id = object()
row_filter1 = self._makeOne(column_family_id)
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
column_family_id = u'column-family-id'
row_filter = self._makeOne(column_family_id)
col_range_pb = data_pb2.ColumnRange(family_name=column_family_id)
expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
def test_to_pb_inclusive_start(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
column_family_id = u'column-family-id'
column = b'column'
row_filter = self._makeOne(column_family_id, start_column=column)
col_range_pb = data_pb2.ColumnRange(
family_name=column_family_id,
start_qualifier_inclusive=column,
)
expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
def test_to_pb_exclusive_start(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
column_family_id = u'column-family-id'
column = b'column'
row_filter = self._makeOne(column_family_id, start_column=column,
inclusive_start=False)
col_range_pb = data_pb2.ColumnRange(
family_name=column_family_id,
start_qualifier_exclusive=column,
)
expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
def test_to_pb_inclusive_end(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
column_family_id = u'column-family-id'
column = b'column'
row_filter = self._makeOne(column_family_id, end_column=column)
col_range_pb = data_pb2.ColumnRange(
family_name=column_family_id,
end_qualifier_inclusive=column,
)
expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
def test_to_pb_exclusive_end(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
column_family_id = u'column-family-id'
column = b'column'
row_filter = self._makeOne(column_family_id, end_column=column,
inclusive_end=False)
col_range_pb = data_pb2.ColumnRange(
family_name=column_family_id,
end_qualifier_exclusive=column,
)
expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
class TestValueRegexFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import ValueRegexFilter
return ValueRegexFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
regex = b'value-regex'
row_filter = self._makeOne(regex)
pb_val = row_filter.to_pb()
expected_pb = data_pb2.RowFilter(value_regex_filter=regex)
self.assertEqual(pb_val, expected_pb)
class TestValueRangeFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import ValueRangeFilter
return ValueRangeFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor_defaults(self):
row_filter = self._makeOne()
self.assertEqual(row_filter.start_value, None)
self.assertEqual(row_filter.end_value, None)
self.assertTrue(row_filter.inclusive_start)
self.assertTrue(row_filter.inclusive_end)
def test_constructor_explicit(self):
start_value = object()
end_value = object()
inclusive_start = object()
inclusive_end = object()
row_filter = self._makeOne(start_value=start_value,
end_value=end_value,
inclusive_start=inclusive_start,
inclusive_end=inclusive_end)
self.assertTrue(row_filter.start_value is start_value)
self.assertTrue(row_filter.end_value is end_value)
self.assertTrue(row_filter.inclusive_start is inclusive_start)
self.assertTrue(row_filter.inclusive_end is inclusive_end)
def test_constructor_bad_start(self):
self.assertRaises(ValueError, self._makeOne, inclusive_start=True)
def test_constructor_bad_end(self):
self.assertRaises(ValueError, self._makeOne, inclusive_end=True)
def test___eq__(self):
start_value = object()
end_value = object()
inclusive_start = object()
inclusive_end = object()
row_filter1 = self._makeOne(start_value=start_value,
end_value=end_value,
inclusive_start=inclusive_start,
inclusive_end=inclusive_end)
row_filter2 = self._makeOne(start_value=start_value,
end_value=end_value,
inclusive_start=inclusive_start,
inclusive_end=inclusive_end)
self.assertEqual(row_filter1, row_filter2)
def test___eq__type_differ(self):
row_filter1 = self._makeOne()
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
row_filter = self._makeOne()
expected_pb = data_pb2.RowFilter(
value_range_filter=data_pb2.ValueRange())
self.assertEqual(row_filter.to_pb(), expected_pb)
def test_to_pb_inclusive_start(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
value = b'some-value'
row_filter = self._makeOne(start_value=value)
val_range_pb = data_pb2.ValueRange(start_value_inclusive=value)
expected_pb = data_pb2.RowFilter(value_range_filter=val_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
def test_to_pb_exclusive_start(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
value = b'some-value'
row_filter = self._makeOne(start_value=value, inclusive_start=False)
val_range_pb = data_pb2.ValueRange(start_value_exclusive=value)
expected_pb = data_pb2.RowFilter(value_range_filter=val_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
def test_to_pb_inclusive_end(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
value = b'some-value'
row_filter = self._makeOne(end_value=value)
val_range_pb = data_pb2.ValueRange(end_value_inclusive=value)
expected_pb = data_pb2.RowFilter(value_range_filter=val_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
def test_to_pb_exclusive_end(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
value = b'some-value'
row_filter = self._makeOne(end_value=value, inclusive_end=False)
val_range_pb = data_pb2.ValueRange(end_value_exclusive=value)
expected_pb = data_pb2.RowFilter(value_range_filter=val_range_pb)
self.assertEqual(row_filter.to_pb(), expected_pb)
class Test_CellCountFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import _CellCountFilter
return _CellCountFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
num_cells = object()
row_filter = self._makeOne(num_cells)
self.assertTrue(row_filter.num_cells is num_cells)
def test___eq__type_differ(self):
num_cells = object()
row_filter1 = self._makeOne(num_cells)
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
def test___eq__same_value(self):
num_cells = object()
row_filter1 = self._makeOne(num_cells)
row_filter2 = self._makeOne(num_cells)
self.assertEqual(row_filter1, row_filter2)
def test___ne__same_value(self):
num_cells = object()
row_filter1 = self._makeOne(num_cells)
row_filter2 = self._makeOne(num_cells)
comparison_val = (row_filter1 != row_filter2)
self.assertFalse(comparison_val)
class TestCellsRowOffsetFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import CellsRowOffsetFilter
return CellsRowOffsetFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
num_cells = 76
row_filter = self._makeOne(num_cells)
pb_val = row_filter.to_pb()
expected_pb = data_pb2.RowFilter(cells_per_row_offset_filter=num_cells)
self.assertEqual(pb_val, expected_pb)
class TestCellsRowLimitFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import CellsRowLimitFilter
return CellsRowLimitFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
num_cells = 189
row_filter = self._makeOne(num_cells)
pb_val = row_filter.to_pb()
expected_pb = data_pb2.RowFilter(cells_per_row_limit_filter=num_cells)
self.assertEqual(pb_val, expected_pb)
class TestCellsColumnLimitFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import CellsColumnLimitFilter
return CellsColumnLimitFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
num_cells = 10
row_filter = self._makeOne(num_cells)
pb_val = row_filter.to_pb()
expected_pb = data_pb2.RowFilter(
cells_per_column_limit_filter=num_cells)
self.assertEqual(pb_val, expected_pb)
class TestStripValueTransformerFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import StripValueTransformerFilter
return StripValueTransformerFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
flag = True
row_filter = self._makeOne(flag)
pb_val = row_filter.to_pb()
expected_pb = data_pb2.RowFilter(strip_value_transformer=flag)
self.assertEqual(pb_val, expected_pb)
class TestApplyLabelFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import ApplyLabelFilter
return ApplyLabelFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
label = object()
row_filter = self._makeOne(label)
self.assertTrue(row_filter.label is label)
def test___eq__type_differ(self):
label = object()
row_filter1 = self._makeOne(label)
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
def test___eq__same_value(self):
label = object()
row_filter1 = self._makeOne(label)
row_filter2 = self._makeOne(label)
self.assertEqual(row_filter1, row_filter2)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
label = u'label'
row_filter = self._makeOne(label)
pb_val = row_filter.to_pb()
expected_pb = data_pb2.RowFilter(apply_label_transformer=label)
self.assertEqual(pb_val, expected_pb)
class Test_FilterCombination(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import _FilterCombination
return _FilterCombination
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor_defaults(self):
row_filter = self._makeOne()
self.assertEqual(row_filter.filters, [])
def test_constructor_explicit(self):
filters = object()
row_filter = self._makeOne(filters=filters)
self.assertTrue(row_filter.filters is filters)
def test___eq__(self):
filters = object()
row_filter1 = self._makeOne(filters=filters)
row_filter2 = self._makeOne(filters=filters)
self.assertEqual(row_filter1, row_filter2)
def test___eq__type_differ(self):
filters = object()
row_filter1 = self._makeOne(filters=filters)
row_filter2 = object()
self.assertNotEqual(row_filter1, row_filter2)
class TestRowFilterChain(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import RowFilterChain
return RowFilterChain
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
from gcloud.bigtable.row import RowSampleFilter
from gcloud.bigtable.row import StripValueTransformerFilter
row_filter1 = StripValueTransformerFilter(True)
row_filter1_pb = row_filter1.to_pb()
row_filter2 = RowSampleFilter(0.25)
row_filter2_pb = row_filter2.to_pb()
row_filter3 = self._makeOne(filters=[row_filter1, row_filter2])
filter_pb = row_filter3.to_pb()
expected_pb = data_pb2.RowFilter(
chain=data_pb2.RowFilter.Chain(
filters=[row_filter1_pb, row_filter2_pb],
),
)
self.assertEqual(filter_pb, expected_pb)
def test_to_pb_nested(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
from gcloud.bigtable.row import CellsRowLimitFilter
from gcloud.bigtable.row import RowSampleFilter
from gcloud.bigtable.row import StripValueTransformerFilter
row_filter1 = StripValueTransformerFilter(True)
row_filter2 = RowSampleFilter(0.25)
row_filter3 = self._makeOne(filters=[row_filter1, row_filter2])
row_filter3_pb = row_filter3.to_pb()
row_filter4 = CellsRowLimitFilter(11)
row_filter4_pb = row_filter4.to_pb()
row_filter5 = self._makeOne(filters=[row_filter3, row_filter4])
filter_pb = row_filter5.to_pb()
expected_pb = data_pb2.RowFilter(
chain=data_pb2.RowFilter.Chain(
filters=[row_filter3_pb, row_filter4_pb],
),
)
self.assertEqual(filter_pb, expected_pb)
class TestRowFilterUnion(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import RowFilterUnion
return RowFilterUnion
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
from gcloud.bigtable.row import RowSampleFilter
from gcloud.bigtable.row import StripValueTransformerFilter
row_filter1 = StripValueTransformerFilter(True)
row_filter1_pb = row_filter1.to_pb()
row_filter2 = RowSampleFilter(0.25)
row_filter2_pb = row_filter2.to_pb()
row_filter3 = self._makeOne(filters=[row_filter1, row_filter2])
filter_pb = row_filter3.to_pb()
expected_pb = data_pb2.RowFilter(
interleave=data_pb2.RowFilter.Interleave(
filters=[row_filter1_pb, row_filter2_pb],
),
)
self.assertEqual(filter_pb, expected_pb)
def test_to_pb_nested(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
from gcloud.bigtable.row import CellsRowLimitFilter
from gcloud.bigtable.row import RowSampleFilter
from gcloud.bigtable.row import StripValueTransformerFilter
row_filter1 = StripValueTransformerFilter(True)
row_filter2 = RowSampleFilter(0.25)
row_filter3 = self._makeOne(filters=[row_filter1, row_filter2])
row_filter3_pb = row_filter3.to_pb()
row_filter4 = CellsRowLimitFilter(11)
row_filter4_pb = row_filter4.to_pb()
row_filter5 = self._makeOne(filters=[row_filter3, row_filter4])
filter_pb = row_filter5.to_pb()
expected_pb = data_pb2.RowFilter(
interleave=data_pb2.RowFilter.Interleave(
filters=[row_filter3_pb, row_filter4_pb],
),
)
self.assertEqual(filter_pb, expected_pb)
class TestConditionalRowFilter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import ConditionalRowFilter
return ConditionalRowFilter
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
base_filter = object()
true_filter = object()
false_filter = object()
cond_filter = self._makeOne(base_filter,
true_filter=true_filter,
false_filter=false_filter)
self.assertTrue(cond_filter.base_filter is base_filter)
self.assertTrue(cond_filter.true_filter is true_filter)
self.assertTrue(cond_filter.false_filter is false_filter)
def test___eq__(self):
base_filter = object()
true_filter = object()
false_filter = object()
cond_filter1 = self._makeOne(base_filter,
true_filter=true_filter,
false_filter=false_filter)
cond_filter2 = self._makeOne(base_filter,
true_filter=true_filter,
false_filter=false_filter)
self.assertEqual(cond_filter1, cond_filter2)
def test___eq__type_differ(self):
base_filter = object()
true_filter = object()
false_filter = object()
cond_filter1 = self._makeOne(base_filter,
true_filter=true_filter,
false_filter=false_filter)
cond_filter2 = object()
self.assertNotEqual(cond_filter1, cond_filter2)
def test_to_pb(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
from gcloud.bigtable.row import CellsRowOffsetFilter
from gcloud.bigtable.row import RowSampleFilter
from gcloud.bigtable.row import StripValueTransformerFilter
row_filter1 = StripValueTransformerFilter(True)
row_filter1_pb = row_filter1.to_pb()
row_filter2 = RowSampleFilter(0.25)
row_filter2_pb = row_filter2.to_pb()
row_filter3 = CellsRowOffsetFilter(11)
row_filter3_pb = row_filter3.to_pb()
row_filter4 = self._makeOne(row_filter1, true_filter=row_filter2,
false_filter=row_filter3)
filter_pb = row_filter4.to_pb()
expected_pb = data_pb2.RowFilter(
condition=data_pb2.RowFilter.Condition(
predicate_filter=row_filter1_pb,
true_filter=row_filter2_pb,
false_filter=row_filter3_pb,
),
)
self.assertEqual(filter_pb, expected_pb)
def test_to_pb_true_only(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
from gcloud.bigtable.row import RowSampleFilter
from gcloud.bigtable.row import StripValueTransformerFilter
row_filter1 = StripValueTransformerFilter(True)
row_filter1_pb = row_filter1.to_pb()
row_filter2 = RowSampleFilter(0.25)
row_filter2_pb = row_filter2.to_pb()
row_filter3 = self._makeOne(row_filter1, true_filter=row_filter2)
filter_pb = row_filter3.to_pb()
expected_pb = data_pb2.RowFilter(
condition=data_pb2.RowFilter.Condition(
predicate_filter=row_filter1_pb,
true_filter=row_filter2_pb,
),
)
self.assertEqual(filter_pb, expected_pb)
def test_to_pb_false_only(self):
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
from gcloud.bigtable.row import RowSampleFilter
from gcloud.bigtable.row import StripValueTransformerFilter
row_filter1 = StripValueTransformerFilter(True)
row_filter1_pb = row_filter1.to_pb()
row_filter2 = RowSampleFilter(0.25)
row_filter2_pb = row_filter2.to_pb()
row_filter3 = self._makeOne(row_filter1, false_filter=row_filter2)
filter_pb = row_filter3.to_pb()
expected_pb = data_pb2.RowFilter(
condition=data_pb2.RowFilter.Condition(
predicate_filter=row_filter1_pb,
false_filter=row_filter2_pb,
),
)
self.assertEqual(filter_pb, expected_pb)
class Test__parse_rmw_row_response(unittest2.TestCase):
def _callFUT(self, row_response):
from gcloud.bigtable.row import _parse_rmw_row_response
return _parse_rmw_row_response(row_response)
def test_it(self):
from gcloud._helpers import _datetime_from_microseconds
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
col_fam1 = u'col-fam-id'
col_fam2 = u'col-fam-id2'
col_name1 = b'col-name1'
col_name2 = b'col-name2'
col_name3 = b'col-name3-but-other-fam'
cell_val1 = b'cell-val'
cell_val2 = b'cell-val-newer'
cell_val3 = b'altcol-cell-val'
cell_val4 = b'foo'
microseconds = 1000871
timestamp = _datetime_from_microseconds(microseconds)
expected_output = {
col_fam1: {
col_name1: [
(cell_val1, timestamp),
(cell_val2, timestamp),
],
col_name2: [
(cell_val3, timestamp),
],
},
col_fam2: {
col_name3: [
(cell_val4, timestamp),
],
},
}
sample_input = data_pb2.Row(
families=[
data_pb2.Family(
name=col_fam1,
columns=[
data_pb2.Column(
qualifier=col_name1,
cells=[
data_pb2.Cell(
value=cell_val1,
timestamp_micros=microseconds,
),
data_pb2.Cell(
value=cell_val2,
timestamp_micros=microseconds,
),
],
),
data_pb2.Column(
qualifier=col_name2,
cells=[
data_pb2.Cell(
value=cell_val3,
timestamp_micros=microseconds,
),
],
),
],
),
data_pb2.Family(
name=col_fam2,
columns=[
data_pb2.Column(
qualifier=col_name3,
cells=[
data_pb2.Cell(
value=cell_val4,
timestamp_micros=microseconds,
),
],
),
],
),
],
)
self.assertEqual(expected_output, self._callFUT(sample_input))
class Test__parse_family_pb(unittest2.TestCase):
def _callFUT(self, family_pb):
from gcloud.bigtable.row import _parse_family_pb
return _parse_family_pb(family_pb)
def test_it(self):
from gcloud._helpers import _datetime_from_microseconds
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
col_fam1 = u'col-fam-id'
col_name1 = b'col-name1'
col_name2 = b'col-name2'
cell_val1 = b'cell-val'
cell_val2 = b'cell-val-newer'
cell_val3 = b'altcol-cell-val'
microseconds = 5554441037
timestamp = _datetime_from_microseconds(microseconds)
expected_dict = {
col_name1: [
(cell_val1, timestamp),
(cell_val2, timestamp),
],
col_name2: [
(cell_val3, timestamp),
],
}
expected_output = (col_fam1, expected_dict)
sample_input = data_pb2.Family(
name=col_fam1,
columns=[
data_pb2.Column(
qualifier=col_name1,
cells=[
data_pb2.Cell(
value=cell_val1,
timestamp_micros=microseconds,
),
data_pb2.Cell(
value=cell_val2,
timestamp_micros=microseconds,
),
],
),
data_pb2.Column(
qualifier=col_name2,
cells=[
data_pb2.Cell(
value=cell_val3,
timestamp_micros=microseconds,
),
],
),
],
)
self.assertEqual(expected_output, self._callFUT(sample_input))
class _Client(object):
data_stub = None
def __init__(self, timeout_seconds=None):
self.timeout_seconds = timeout_seconds
class _Cluster(object):
def __init__(self, client=None):
self._client = client
class _Table(object):
def __init__(self, name, client=None):
self.name = name
self._cluster = _Cluster(client)
| apache-2.0 | 1,095,877,507,249,676,800 | 34.461021 | 79 | 0.598921 | false |
diplomacy/research | diplomacy_research/proto/diplomacy_tensorflow/contrib/boosted_trees/proto/split_info_pb2.py | 1 | 6904 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: diplomacy_tensorflow/contrib/boosted_trees/proto/split_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from diplomacy_tensorflow.contrib.boosted_trees.proto import tree_config_pb2 as diplomacy__tensorflow_dot_contrib_dot_boosted__trees_dot_proto_dot_tree__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='diplomacy_tensorflow/contrib/boosted_trees/proto/split_info.proto',
package='diplomacy.tensorflow.boosted_trees.learner',
syntax='proto3',
serialized_options=_b('\370\001\001'),
serialized_pb=_b('\nAdiplomacy_tensorflow/contrib/boosted_trees/proto/split_info.proto\x12*diplomacy.tensorflow.boosted_trees.learner\x1a\x42\x64iplomacy_tensorflow/contrib/boosted_trees/proto/tree_config.proto\"\xdc\x01\n\tSplitInfo\x12\x46\n\nsplit_node\x18\x01 \x01(\x0b\x32\x32.diplomacy.tensorflow.boosted_trees.trees.TreeNode\x12\x42\n\nleft_child\x18\x02 \x01(\x0b\x32..diplomacy.tensorflow.boosted_trees.trees.Leaf\x12\x43\n\x0bright_child\x18\x03 \x01(\x0b\x32..diplomacy.tensorflow.boosted_trees.trees.Leaf\"\xba\x01\n\x12ObliviousSplitInfo\x12\x46\n\nsplit_node\x18\x01 \x01(\x0b\x32\x32.diplomacy.tensorflow.boosted_trees.trees.TreeNode\x12@\n\x08\x63hildren\x18\x02 \x03(\x0b\x32..diplomacy.tensorflow.boosted_trees.trees.Leaf\x12\x1a\n\x12\x63hildren_parent_id\x18\x03 \x03(\x05\x42\x03\xf8\x01\x01\x62\x06proto3')
,
dependencies=[diplomacy__tensorflow_dot_contrib_dot_boosted__trees_dot_proto_dot_tree__config__pb2.DESCRIPTOR,])
_SPLITINFO = _descriptor.Descriptor(
name='SplitInfo',
full_name='diplomacy.tensorflow.boosted_trees.learner.SplitInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='split_node', full_name='diplomacy.tensorflow.boosted_trees.learner.SplitInfo.split_node', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='left_child', full_name='diplomacy.tensorflow.boosted_trees.learner.SplitInfo.left_child', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='right_child', full_name='diplomacy.tensorflow.boosted_trees.learner.SplitInfo.right_child', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=182,
serialized_end=402,
)
_OBLIVIOUSSPLITINFO = _descriptor.Descriptor(
name='ObliviousSplitInfo',
full_name='diplomacy.tensorflow.boosted_trees.learner.ObliviousSplitInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='split_node', full_name='diplomacy.tensorflow.boosted_trees.learner.ObliviousSplitInfo.split_node', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='children', full_name='diplomacy.tensorflow.boosted_trees.learner.ObliviousSplitInfo.children', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='children_parent_id', full_name='diplomacy.tensorflow.boosted_trees.learner.ObliviousSplitInfo.children_parent_id', index=2,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=405,
serialized_end=591,
)
_SPLITINFO.fields_by_name['split_node'].message_type = diplomacy__tensorflow_dot_contrib_dot_boosted__trees_dot_proto_dot_tree__config__pb2._TREENODE
_SPLITINFO.fields_by_name['left_child'].message_type = diplomacy__tensorflow_dot_contrib_dot_boosted__trees_dot_proto_dot_tree__config__pb2._LEAF
_SPLITINFO.fields_by_name['right_child'].message_type = diplomacy__tensorflow_dot_contrib_dot_boosted__trees_dot_proto_dot_tree__config__pb2._LEAF
_OBLIVIOUSSPLITINFO.fields_by_name['split_node'].message_type = diplomacy__tensorflow_dot_contrib_dot_boosted__trees_dot_proto_dot_tree__config__pb2._TREENODE
_OBLIVIOUSSPLITINFO.fields_by_name['children'].message_type = diplomacy__tensorflow_dot_contrib_dot_boosted__trees_dot_proto_dot_tree__config__pb2._LEAF
DESCRIPTOR.message_types_by_name['SplitInfo'] = _SPLITINFO
DESCRIPTOR.message_types_by_name['ObliviousSplitInfo'] = _OBLIVIOUSSPLITINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SplitInfo = _reflection.GeneratedProtocolMessageType('SplitInfo', (_message.Message,), dict(
DESCRIPTOR = _SPLITINFO,
__module__ = 'diplomacy_tensorflow.contrib.boosted_trees.proto.split_info_pb2'
# @@protoc_insertion_point(class_scope:diplomacy.tensorflow.boosted_trees.learner.SplitInfo)
))
_sym_db.RegisterMessage(SplitInfo)
ObliviousSplitInfo = _reflection.GeneratedProtocolMessageType('ObliviousSplitInfo', (_message.Message,), dict(
DESCRIPTOR = _OBLIVIOUSSPLITINFO,
__module__ = 'diplomacy_tensorflow.contrib.boosted_trees.proto.split_info_pb2'
# @@protoc_insertion_point(class_scope:diplomacy.tensorflow.boosted_trees.learner.ObliviousSplitInfo)
))
_sym_db.RegisterMessage(ObliviousSplitInfo)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| mit | 6,749,990,357,615,990,000 | 46.944444 | 830 | 0.749131 | false |
rchav/vinerack | saleor/userprofile/models.py | 2 | 5462 | from __future__ import unicode_literals
from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager, PermissionsMixin)
from django.db import models
from django.forms.models import model_to_dict
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import pgettext_lazy
from django_countries.fields import Country, CountryField
class AddressManager(models.Manager):
def as_data(self, address):
data = model_to_dict(address, exclude=['id', 'user'])
if isinstance(data['country'], Country):
data['country'] = data['country'].code
return data
def are_identical(self, addr1, addr2):
data1 = self.as_data(addr1)
data2 = self.as_data(addr2)
return data1 == data2
def store_address(self, user, address):
data = self.as_data(address)
address, dummy_created = user.addresses.get_or_create(**data)
return address
@python_2_unicode_compatible
class Address(models.Model):
first_name = models.CharField(
pgettext_lazy('Address field', 'first name'),
max_length=256)
last_name = models.CharField(
pgettext_lazy('Address field', 'last name'),
max_length=256)
company_name = models.CharField(
pgettext_lazy('Address field', 'company or organization'),
max_length=256, blank=True)
street_address_1 = models.CharField(
pgettext_lazy('Address field', 'address'),
max_length=256, blank=True)
street_address_2 = models.CharField(
pgettext_lazy('Address field', 'address'),
max_length=256, blank=True)
city = models.CharField(
pgettext_lazy('Address field', 'city'),
max_length=256, blank=True)
city_area = models.CharField(
pgettext_lazy('Address field', 'district'),
max_length=128, blank=True)
postal_code = models.CharField(
pgettext_lazy('Address field', 'postal code'),
max_length=20, blank=True)
country = CountryField(
pgettext_lazy('Address field', 'country'))
country_area = models.CharField(
pgettext_lazy('Address field', 'state or province'),
max_length=128, blank=True)
phone = models.CharField(
pgettext_lazy('Address field', 'phone number'),
max_length=30, blank=True)
objects = AddressManager()
@property
def full_name(self):
return '%s %s' % (self.first_name, self.last_name)
def __str__(self):
if self.company_name:
return '%s - %s' % (self.company_name, self.full_name)
return self.full_name
def __repr__(self):
return (
'Address(first_name=%r, last_name=%r, company_name=%r, '
'street_address_1=%r, street_address_2=%r, city=%r, '
'postal_code=%r, country=%r, country_area=%r, phone=%r)' % (
self.first_name, self.last_name, self.company_name,
self.street_address_1, self.street_address_2, self.city,
self.postal_code, self.country, self.country_area,
self.phone))
class UserManager(BaseUserManager):
def create_user(self, email, password=None, is_staff=False,
is_active=True, **extra_fields):
'Creates a User with the given username, email and password'
email = UserManager.normalize_email(email)
user = self.model(email=email, is_active=is_active,
is_staff=is_staff, **extra_fields)
if password:
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password=None, **extra_fields):
return self.create_user(email, password, is_staff=True,
is_superuser=True, **extra_fields)
def store_address(self, user, address, billing=False, shipping=False):
entry = Address.objects.store_address(user, address)
changed = False
if billing and not user.default_billing_address_id:
user.default_billing_address = entry
changed = True
if shipping and not user.default_shipping_address_id:
user.default_shipping_address = entry
changed = True
if changed:
user.save()
return entry
class User(PermissionsMixin, AbstractBaseUser):
email = models.EmailField(unique=True)
addresses = models.ManyToManyField(Address, blank=True)
is_staff = models.BooleanField(
pgettext_lazy('User field', 'staff status'),
default=False)
is_active = models.BooleanField(
pgettext_lazy('User field', 'active'),
default=False)
date_joined = models.DateTimeField(
pgettext_lazy('User field', 'date joined'),
default=timezone.now, editable=False)
default_shipping_address = models.ForeignKey(
Address, related_name='+', null=True, blank=True,
on_delete=models.SET_NULL,
verbose_name=pgettext_lazy('User field', 'default shipping address'))
default_billing_address = models.ForeignKey(
Address, related_name='+', null=True, blank=True,
on_delete=models.SET_NULL,
verbose_name=pgettext_lazy('User field', 'default billing address'))
USERNAME_FIELD = 'email'
objects = UserManager()
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
| bsd-3-clause | -8,794,741,306,931,394,000 | 35.657718 | 77 | 0.631637 | false |
lmazuel/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/file_info.py | 1 | 1684 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class FileInfo(Model):
"""Information about a image store file.
:param file_size: The size of file in bytes.
:type file_size: str
:param file_version: Information about the version of image store file.
:type file_version: ~azure.servicefabric.models.FileVersion
:param modified_date: The date and time when the image store file was last
modified.
:type modified_date: datetime
:param store_relative_path: The file path relative to the image store root
path.
:type store_relative_path: str
"""
_attribute_map = {
'file_size': {'key': 'FileSize', 'type': 'str'},
'file_version': {'key': 'FileVersion', 'type': 'FileVersion'},
'modified_date': {'key': 'ModifiedDate', 'type': 'iso-8601'},
'store_relative_path': {'key': 'StoreRelativePath', 'type': 'str'},
}
def __init__(self, file_size=None, file_version=None, modified_date=None, store_relative_path=None):
super(FileInfo, self).__init__()
self.file_size = file_size
self.file_version = file_version
self.modified_date = modified_date
self.store_relative_path = store_relative_path
| mit | -8,983,340,538,904,316,000 | 39.095238 | 104 | 0.612827 | false |
ScreamingUdder/mantid | Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSBeamSpreaderTransmission.py | 1 | 13137 | #pylint: disable=no-init,invalid-name
from __future__ import (absolute_import, division, print_function)
import mantid.simpleapi as api
from mantid.api import *
from mantid.kernel import *
import os
from reduction_workflow.find_data import find_data
class SANSBeamSpreaderTransmission(PythonAlgorithm):
def category(self):
return "Workflow\\SANS\\UsesPropertyManager"
def name(self):
return "SANSBeamSpreaderTransmission"
def summary(self):
return "Compute transmission using the beam spreader method"
def PyInit(self):
self.declareProperty(MatrixWorkspaceProperty("InputWorkspace", "",
direction=Direction.Input))
self.declareProperty(FileProperty("SampleSpreaderFilename", "",
action=FileAction.Load,
extensions=['xml', 'nxs', 'nxs.h5']))
self.declareProperty(FileProperty("DirectSpreaderFilename", "",
action=FileAction.Load,
extensions=['xml', 'nxs', 'nxs.h5']))
self.declareProperty(FileProperty("SampleScatteringFilename", "",
action=FileAction.Load,
extensions=['xml', 'nxs', 'nxs.h5']))
self.declareProperty(FileProperty("DirectScatteringFilename", "",
action=FileAction.Load,
extensions=['xml', 'nxs', 'nxs.h5']))
self.declareProperty("SpreaderTransmissionValue", 1.0,
"Transmission of the beam spreader")
self.declareProperty("SpreaderTransmissionError", 0.0,
"Error on the transmission of the beam spreader")
self.declareProperty("ThetaDependent", True,
"If true, a theta-dependent correction will be applied")
self.declareProperty(FileProperty("DarkCurrentFilename", "",
action=FileAction.OptionalLoad,
extensions=['xml', 'nxs', 'nxs.h5']))
self.declareProperty("UseSampleDarkCurrent", False,
"If true, the sample dark current will be used")
self.declareProperty("ReductionProperties", "__sans_reduction_properties",
validator=StringMandatoryValidator(),
doc="Property manager name for the reduction")
self.declareProperty(MatrixWorkspaceProperty("OutputWorkspace", "",
direction = Direction.Output))
self.declareProperty("MeasuredTransmission", 0.0,
direction=Direction.Output)
self.declareProperty("MeasuredError", 0.0,
direction=Direction.Output)
self.declareProperty("OutputMessage", "",
direction=Direction.Output, doc = "Output message")
def PyExec(self): # noqa: C901
# Get the reduction property manager
property_manager_name = self.getProperty("ReductionProperties").value
property_manager = PropertyManagerDataService.retrieve(property_manager_name)
# Build the name we are going to give the transmission workspace
sample_scatt = self.getPropertyValue("SampleScatteringFilename")
sample_basename = os.path.basename(sample_scatt)
entry_name = "TransmissionSpreader%s" % sample_scatt
trans_ws_name = "__transmission_fit_%s" % sample_basename
trans_ws = None
# If we have already computed the transmission, used the
# previously computed workspace
if property_manager.existsProperty(entry_name):
trans_ws_name = property_manager.getProperty(entry_name)
if AnalysisDataService.doesExist(trans_ws_name):
trans_ws = AnalysisDataService.retrieve(trans_ws_name)
# Get instrument to use with FileFinder
instrument = ''
if property_manager.existsProperty("InstrumentName"):
instrument = property_manager.getProperty("InstrumentName").value
# Get the data loader
def _load_data(filename, output_ws):
if not property_manager.existsProperty("LoadAlgorithm"):
Logger("SANSBeamSpreaderTransmission").error("SANS reduction not set up properly: missing load algorithm")
raise RuntimeError("SANS reduction not set up properly: missing load algorithm")
p=property_manager.getProperty("LoadAlgorithm")
alg=Algorithm.fromString(p.valueAsStr)
alg.setProperty("Filename", filename)
alg.setProperty("OutputWorkspace", output_ws)
if alg.existsProperty("ReductionProperties"):
alg.setProperty("ReductionProperties", property_manager_name)
alg.execute()
msg = ''
if alg.existsProperty("OutputMessage"):
msg = alg.getProperty("OutputMessage").value
return msg
# Compute the transmission if we don't already have it
if trans_ws is None:
# Load data files
sample_spreader_ws = "__trans_sample_spreader"
direct_spreader_ws = "__trans_direct_spreader"
sample_scatt_ws = "__trans_sample_scatt"
direct_scatt_ws = "__trans_direct_scatt"
sample_spread = self.getPropertyValue("SampleSpreaderFilename")
direct_spread = self.getPropertyValue("DirectSpreaderFilename")
direct_scatt = self.getPropertyValue("DirectScatteringFilename")
ws_names = [[sample_spread, sample_spreader_ws],
[direct_spread, direct_spreader_ws],
[sample_scatt, sample_scatt_ws],
[direct_scatt, direct_scatt_ws]]
for f in ws_names:
filepath = find_data(f[0], instrument=instrument)
_load_data(filepath, f[1])
self._subtract_dark_current(f[1], property_manager)
# Get normalization for transmission calculation
monitor_det_ID = None
if property_manager.existsProperty("TransmissionNormalisation"):
sample_ws = AnalysisDataService.retrieve(sample_scatt_ws)
if property_manager.getProperty("TransmissionNormalisation").value=="Monitor":
monitor_det_ID = int(sample_ws.getInstrument().getNumberParameter("default-incident-monitor-spectrum")[0])
else:
monitor_det_ID = int(sample_ws.getInstrument().getNumberParameter("default-incident-timer-spectrum")[0])
elif property_manager.existsProperty("NormaliseAlgorithm"):
def _normalise(workspace):
p=property_manager.getProperty("NormaliseAlgorithm")
alg=Algorithm.fromString(p.valueAsStr)
alg.setProperty("InputWorkspace", workspace)
alg.setProperty("OutputWorkspace", workspace)
if alg.existsProperty("ReductionProperties"):
alg.setProperty("ReductionProperties", property_manager_name)
alg.execute()
msg = ''
if alg.existsProperty("OutputMessage"):
msg += alg.getProperty("OutputMessage").value+'\n'
return msg
for f in ws_names:
_normalise(f[1])
# Calculate transmission. Use the reduction method's normalization channel (time or beam monitor)
# as the monitor channel.
spreader_t_value = self.getPropertyValue("SpreaderTransmissionValue")
spreader_t_error = self.getPropertyValue("SpreaderTransmissionError")
alg = AlgorithmManager.createUnmanaged('CalculateTransmissionBeamSpreader')
alg.initialize()
alg.setProperty("SampleSpreaderRunWorkspace", sample_spreader_ws)
alg.setProperty("DirectSpreaderRunWorkspace", direct_spreader_ws)
alg.setProperty("SampleScatterRunWorkspace", sample_scatt_ws)
alg.setProperty("DirectScatterRunWorkspace", direct_scatt_ws)
alg.setProperty("IncidentBeamMonitor", monitor_det_ID)
alg.setProperty("OutputWorkspace",trans_ws_name)
alg.setProperty("SpreaderTransmissionValue",spreader_t_value)
alg.setProperty("SpreaderTransmissionError",spreader_t_error)
alg.execute()
trans_ws = AnalysisDataService.retrieve(trans_ws_name)
for f in ws_names:
if AnalysisDataService.doesExist(f[1]):
AnalysisDataService.remove(f[1])
# 2- Apply correction (Note: Apply2DTransCorr)
input_ws_name = self.getPropertyValue("InputWorkspace")
if not AnalysisDataService.doesExist(input_ws_name):
Logger("SANSBeamSpreaderTransmission").error("Could not find input workspace")
workspace = AnalysisDataService.retrieve(input_ws_name).name()
# Clone workspace to make boost-python happy
api.CloneWorkspace(InputWorkspace=workspace,
OutputWorkspace='__'+workspace)
workspace = '__'+workspace
self._apply_transmission(workspace, trans_ws_name)
trans = trans_ws.dataY(0)[0]
error = trans_ws.dataE(0)[0]
output_str = ''
if len(trans_ws.dataY(0))==1:
self.setProperty("MeasuredTransmission", trans)
self.setProperty("MeasuredError", error)
output_str = "\n%s T = %6.2g += %6.2g\n" % (output_str, trans, error)
output_msg = "Transmission correction applied [%s]%s\n" % (trans_ws_name, output_str)
output_ws = AnalysisDataService.retrieve(workspace)
self.setProperty("OutputWorkspace", output_ws)
self.setPropertyValue("OutputMessage", output_msg)
def _apply_transmission(self, workspace, trans_workspace):
"""
Apply transmission correction
@param workspace: workspace to apply correction to
@param trans_workspace: workspace name for of the transmission
"""
# Make sure the binning is compatible
api.RebinToWorkspace(WorkspaceToRebin=trans_workspace,
WorkspaceToMatch=workspace,
OutputWorkspace=trans_workspace+'_rebin',
PreserveEvents=False)
# Apply angle-dependent transmission correction using the zero-angle transmission
theta_dependent = self.getProperty("ThetaDependent").value
api.ApplyTransmissionCorrection(InputWorkspace=workspace,
TransmissionWorkspace=trans_workspace+'_rebin',
OutputWorkspace=workspace,
ThetaDependent=theta_dependent)
if AnalysisDataService.doesExist(trans_workspace+'_rebin'):
AnalysisDataService.remove(trans_workspace+'_rebin')
def _subtract_dark_current(self, workspace_name, property_manager):
"""
Subtract the dark current
@param workspace_name: name of the workspace to subtract from
@param property_manager: property manager object
"""
# Subtract dark current
use_sample_dc = self.getProperty("UseSampleDarkCurrent").value
dark_current_data = self.getPropertyValue("DarkCurrentFilename")
property_manager_name = self.getProperty("ReductionProperties").value
def _dark(workspace, dark_current_property):
if property_manager.existsProperty(dark_current_property):
p=property_manager.getProperty(dark_current_property)
# Dark current subtraction for sample data
alg=Algorithm.fromString(p.valueAsStr)
alg.setProperty("InputWorkspace", workspace)
alg.setProperty("OutputWorkspace", workspace)
alg.setProperty("Filename", dark_current_data)
if alg.existsProperty("PersistentCorrection"):
alg.setProperty("PersistentCorrection", False)
if alg.existsProperty("ReductionProperties"):
alg.setProperty("ReductionProperties", property_manager_name)
alg.execute()
msg = "Dark current subtracted"
if alg.existsProperty("OutputMessage"):
msg += alg.getProperty("OutputMessage").value
return msg
if use_sample_dc is True:
_dark(workspace_name, "DarkCurrentAlgorithm")
elif len(dark_current_data.strip())>0:
_dark(workspace_name, "DefaultDarkCurrentAlgorithm")
#############################################################################################
AlgorithmFactory.subscribe(SANSBeamSpreaderTransmission)
| gpl-3.0 | -2,155,445,629,212,738,800 | 49.722008 | 126 | 0.599528 | false |
sebleier/python-redis | pyredis/hash.py | 1 | 2365 | from collections import defaultdict
class Hash(object):
def __init__(self):
self._data = defaultdict(int)
def hset(self, key, value):
"""
Set ``key`` to ``value`` within hash ``name``
Returns 1 if HSET created a new field, otherwise 0
"""
if key in self._data:
created = 0
else:
created = 1
self._data[key] = value
return created
def hget(self, key):
"Return the value of ``key``"
return self._data.get(key, None)
def hdel(self, *keys):
"Delete ``keys``"
deleted = 0
for key in keys:
if key in self._data:
deleted += 1
del self._data[key]
return deleted
def hexists(self, key):
"Returns a boolean indicating if ``key`` exists within hash ``name``"
return key in self._data
def hgetall(self):
"Return a Python dict of the hash's name/value pairs"
return self._data
def hincrby(self, key, amount=1):
"Increment the value of ``key`` in hash by ``amount``"
self._data[key] += amount
return self._data[key]
def hincrbyfloat(self, key, amount=1.0):
"""
Increment the value of ``key`` in hash by floating ``amount``
"""
return self.hincrby(key, amount)
def hkeys(self):
"Return the list of keys within hash"
return self._data.keys()
def hlen(self):
"Return the number of elements in hash"
return len(self._data)
def hsetnx(self, key, value):
"""
Set ``key`` to ``value`` within hash if ``key`` does not
exist. Returns 1 if HSETNX created a field, otherwise 0.
"""
if key in self._data:
return 0
self._data[key] = value
return 1
def hmset(self, mapping):
"""
Sets each key in the ``mapping`` dict to its corresponding value
in the hash
"""
return self._data.update(mapping)
def hmget(self, keys):
"Returns a list of values ordered identically to ``keys``"
values = []
for key in keys:
values.append(self._data.get(key, None))
return values
def hvals(self):
"Return the list of values within hash"
return self._data.values()
| bsd-3-clause | 7,353,101,178,696,724,000 | 26.183908 | 77 | 0.542072 | false |
stepanovsh/project_template | {{cookiecutter.repo_name}}/config/settings/local.py | 1 | 2101 | # -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
{%- if cookiecutter.use_celery == "y" -%}
########## CELERY
# In development, all tasks will be executed locally by blocking until the task returns
CELERY_ALWAYS_EAGER = True
########## END CELERY
{%- endif %}
# Your local stuff: Below this line define 3rd party library settings
| bsd-3-clause | 1,962,637,750,829,820,200 | 30.358209 | 101 | 0.502142 | false |
Wapaul1/ray | python/ray/tune/trial.py | 1 | 5216 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import traceback
import ray
from collections import namedtuple
from ray.rllib.agents import get_agent_class
# Ray resources required to schedule a Trial
Resources = namedtuple("Resources", ["cpu", "gpu"])
class Trial(object):
"""A trial object holds the state for one model training run.
Trials are themselves managed by the TrialRunner class, which implements
the event loop for submitting trial runs to a Ray cluster.
Trials start in the PENDING state, and transition to RUNNING once started.
On error it transitions to ERROR, otherwise TERMINATED on success.
"""
PENDING = 'PENDING'
RUNNING = 'RUNNING'
TERMINATED = 'TERMINATED'
ERROR = 'ERROR'
def __init__(
self, env_creator, alg, config={}, local_dir='/tmp/ray',
agent_id=None, resources=Resources(cpu=1, gpu=0),
stopping_criterion={}, checkpoint_freq=sys.maxsize,
restore_path=None, upload_dir=None):
"""Initialize a new trial.
The args here take the same meaning as the command line flags defined
in ray.tune.config_parser.
"""
# Immutable config
self.env_creator = env_creator
if type(env_creator) is str:
self.env_name = env_creator
else:
self.env_name = "custom"
self.alg = alg
self.config = config
self.local_dir = local_dir
self.agent_id = agent_id
self.resources = resources
self.stopping_criterion = stopping_criterion
self.checkpoint_freq = checkpoint_freq
self.restore_path = restore_path
self.upload_dir = upload_dir
# Local trial state that is updated during the run
self.last_result = None
self.checkpoint_path = None
self.agent = None
self.status = Trial.PENDING
def start(self):
"""Starts this trial.
If an error is encountered when starting the trial, an exception will
be thrown.
"""
self.status = Trial.RUNNING
agent_cls = get_agent_class(self.alg)
cls = ray.remote(
num_cpus=self.resources.cpu, num_gpus=self.resources.gpu)(
agent_cls)
self.agent = cls.remote(
self.env_creator, self.config, self.local_dir, self.upload_dir,
agent_id=self.agent_id)
if self.restore_path:
ray.get(self.agent.restore.remote(self.restore_path))
def stop(self, error=False):
"""Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error (bool): Whether to mark this trial as terminated in error.
"""
if error:
self.status = Trial.ERROR
else:
self.status = Trial.TERMINATED
try:
if self.agent:
self.agent.stop.remote()
self.agent.__ray_terminate__.remote(
self.agent._ray_actor_id.id())
except:
print("Error stopping agent:", traceback.format_exc())
self.status = Trial.ERROR
finally:
self.agent = None
def train_remote(self):
"""Returns Ray future for one iteration of training."""
assert self.status == Trial.RUNNING, self.status
return self.agent.train.remote()
def should_stop(self, result):
"""Whether the given result meets this trial's stopping criteria."""
for criteria, stop_value in self.stopping_criterion.items():
if getattr(result, criteria) >= stop_value:
return True
return False
def should_checkpoint(self):
"""Whether this trial is due for checkpointing."""
if self.checkpoint_freq is None:
return False
return self.last_result.training_iteration % self.checkpoint_freq == 0
def progress_string(self):
"""Returns a progress message for printing out to the console."""
if self.last_result is None:
return self.status
return '{}, {} s, {} ts, {} itrs, {} rew'.format(
self.status,
int(self.last_result.time_total_s),
int(self.last_result.timesteps_total),
self.last_result.training_iteration,
round(self.last_result.episode_reward_mean, 1))
def checkpoint(self):
"""Synchronously checkpoints the state of this trial.
TODO(ekl): we should support a PAUSED state based on checkpointing.
"""
path = ray.get(self.agent.save.remote())
self.checkpoint_path = path
print("Saved checkpoint to:", path)
return path
def __str__(self):
identifier = '{}_{}'.format(self.alg, self.env_name)
if self.agent_id:
identifier += '_' + self.agent_id
return identifier
def __eq__(self, other):
return str(self) == str(other)
def __hash__(self):
return hash(str(self))
| apache-2.0 | -6,987,886,982,920,780,000 | 30.233533 | 78 | 0.602186 | false |
jmeppley/py-metagenomics | sample_records.py | 1 | 3684 | #!/usr/bin/env python
"""
This script takes any file that can be divided into records and
returns N randomly selected records
Records can be fasta, fastq, genbank, or something described by a
simple RegExp
"""
from os import path
from edl.util import *
from edl.batch import *
import re
import sys
import argparse
def main():
# set up CLI
description = """
This script takes any file that can be divided into records and
returns N randomly selected records.
NOTE:
By default, all sampled records are stored in memory. This requires a
good amount of RAM (depending on record size and sample size). To avoid
this, specify the number of records or request a count using the
-n (population_size) option.
Records can be fasta, fastq, genbank, or something described by a
simple RegExp
"""
parser = argparse.ArgumentParser(description=description)
add_IO_arguments(parser)
add_record_parsing_arguments(parser)
parser.add_argument(
"-s",
"--sample_size",
default=1000000,
type=int,
metavar="SAMPLE_SIZE",
help="Number of records to pull out. Defaults to 1 million.")
parser.add_argument("-n", "--population_size", type=int, default=0,
metavar="POPULATION_SIZE",
help="Number of records in file. An integer, should "
"be greater than the SAMPLE_SIZE, except: 0 "
"(default)=> do a separate pass to count records "
"first; -1 => reservoir sample to RAM on the fly")
add_universal_arguments(parser)
arguments = parser.parse_args()
setup_logging(arguments)
# check arguments
if arguments.input_files == [sys.stdin,
] and arguments.population_size == 0:
parser.error("We cannot count records from STDIN, please specify a"
"positive population size or use reservoir sampling "
"(-n -1)")
if arguments.population_size > 0 and \
arguments.population_size < arguments.sample_size:
parser.error("We cannot sample more records then "
"there are in the file!")
for inhandle, outhandle in inputIterator(arguments):
# We need the file name to ge the type, get from handle (if not stdin)
infilename = inhandle.name
fileType = getFileType(arguments, infilename)
record_iterator = fileType.recordStreamer(inhandle)
logging.debug("Looking for %d records in %s" % (arguments.sample_size,
infilename))
# if arguments.population_size<0:
# indexed_sample_generator will only read file once
# using reservoir sampling
# count records if asked to
if arguments.population_size == 0:
record_count, total_size = get_total_size(inhandle.name, fileType)
arguments.population_size = record_count
logging.debug("setting population size to: {}"
.format(arguments.population_size))
# get sampled record generator (will use reservoir if P is <0)
sampled_records = indexed_sample_generator(record_iterator,
N=arguments.sample_size,
P=arguments.population_size)
# print out sampled records
count = 0
for record in sampled_records:
outhandle.writelines(record)
count += 1
logging.debug("Sampled %d records" % (count))
if __name__ == '__main__':
main()
| mit | -5,833,227,940,174,607,000 | 34.76699 | 79 | 0.602334 | false |
aagusti/o-sipkd | osipkd/views/eis/eis_item.py | 1 | 10134 | import os
import uuid
from osipkd.tools import row2dict, xls_reader
from datetime import datetime
from sqlalchemy import not_, func
from pyramid.view import (view_config,)
from pyramid.httpexceptions import ( HTTPFound, )
import colander
from deform import (Form, widget, ValidationFailure, )
from osipkd.models import DBSession
from osipkd.models.eis import ARPaymentDetail as AR
from datatables import ColumnDT, DataTables
from osipkd.views.base_view import BaseViews
SESS_ADD_FAILED = 'Tambah eis-item gagal'
SESS_EDIT_FAILED = 'Edit eis-item gagal'
def deferred_sumber_id(node, kw):
values = kw.get('sumber_id', [])
return widget.SelectWidget(values=values)
SUMBER_ID = (
(1, 'Manual'),
(2, 'PBB'),
(3, 'BPHTB'),
(4, 'PADL'),
)
class AddSchema(colander.Schema):
kode = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=18),
oid='kode')
nama = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=128),
oid = 'nama')
ref_kode = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=32),
)
ref_nama = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=64),
)
tanggal = colander.SchemaNode(
colander.Date(),
)
amount = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=32),
default = 0
)
kecamatan_kd = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=32),
missing=colander.drop)
kecamatan_nm = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=64),
missing=colander.drop)
kelurahan_kd = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=32),
missing=colander.drop
)
kelurahan_nm = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=64),
missing=colander.drop)
is_kota = colander.SchemaNode(
colander.Boolean(),
) # deferred_source_type)
disabled = colander.SchemaNode(
colander.Boolean(),
) # deferred_source_type)
sumber_id = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=32),
widget=widget.SelectWidget(values=SUMBER_ID)) # deferred_source_type)
class EditSchema(AddSchema):
id = colander.SchemaNode(colander.String(),
missing=colander.drop,
widget=widget.HiddenWidget(readonly=True))
class view_eis_item(BaseViews):
########
# List #
########
@view_config(route_name='eis-item', renderer='templates/eis-item/list.pt',
permission='read')
def view_list(self):
ses = self.request.session
req = self.request
params = req.params
url_dict = req.matchdict
return dict(project='EIS')
##########
# Action #
##########
@view_config(route_name='eis-item-act', renderer='json',
permission='read')
def eis_item_act(self):
ses = self.request.session
req = self.request
params = req.params
url_dict = req.matchdict
if url_dict['act']=='grid':
columns = []
columns.append(ColumnDT('id'))
columns.append(ColumnDT('kode'))
columns.append(ColumnDT('nama'))
columns.append(ColumnDT('ref_kode'))
columns.append(ColumnDT('ref_nama'))
columns.append(ColumnDT('tanggal', filter=self._DTstrftime))
columns.append(ColumnDT('amount', filter=self._number_format))
query = DBSession.query(AR)
rowTable = DataTables(req, AR, query, columns)
return rowTable.output_result()
#######
# Add #
#######
def form_validator(self, form, value):
if 'id' in form.request.matchdict:
uid = form.request.matchdict['id']
q = DBSession.query(AR).filter_by(id=uid)
row = q.first()
else:
row = None
def get_form(self, class_form, row=None):
schema = class_form(validator=self.form_validator)
schema = schema.bind(sumber_id=SUMBER_ID)
schema.request = self.request
if row:
schema.deserialize(row)
return Form(schema, buttons=('simpan','batal'))
def save(self, values, user, row=None):
if not row:
row = AR()
row.created = datetime.now()
row.create_uid = user.id
row.from_dict(values)
row.updated = datetime.now()
row.update_uid = user.id
row.disable = 'disable' in values and values['disable'] and 1 or 0
row.is_kota = 'is_kota' in values and values['is_kota'] and 1 or 0
DBSession.add(row)
DBSession.flush()
return row
def save_request(self, values, row=None):
if 'id' in self.request.matchdict:
values['id'] = self.request.matchdict['id']
row = self.save(values, self.request.user, row)
self.request.session.flash('AR sudah disimpan.')
def route_list(self):
return HTTPFound(location=self.request.route_url('eis-item') )
def session_failed(self, session_name):
#r = dict(form=self.session[session_name])
del self.session[session_name]
#return r
@view_config(route_name='eis-item-add', renderer='templates/eis-item/add.pt',
permission='add')
def view_eis_item_add(self):
req = self.request
ses = self.session
form = self.get_form(AddSchema)
if req.POST:
if 'simpan' in req.POST:
controls = req.POST.items()
try:
c = form.validate(controls)
except ValidationFailure, e:
#req.session[SESS_ADD_FAILED] = e.render()
#form.set_appstruct(rowd)
return dict(form=form)
#return HTTPFound(location=req.route_url('eis-item-add'))
self.save_request(dict(controls))
return self.route_list()
elif SESS_ADD_FAILED in req.session:
return dict(form=form)
#return self.session_failed(SESS_ADD_FAILED)
return dict(form=form)
########
# Edit #
########
def query_id(self):
return DBSession.query(AR).filter_by(id=self.request.matchdict['id'])
def id_not_found(self):
msg = 'AR ID %s Tidak Ditemukan.' % self.request.matchdict['id']
request.session.flash(msg, 'error')
return route_list()
@view_config(route_name='eis-item-edit', renderer='templates/eis-item/add.pt',
permission='edit')
def view_eis_item_edit(self):
request = self.request
row = self.query_id().first()
if not row:
return id_not_found(request)
#values = row.to_dict()
rowd={}
rowd['id'] = row.id
rowd['kode'] = row.kode
rowd['nama'] = row.nama
rowd['ref_kode'] = row.ref_kode
rowd['ref_nama'] = row.ref_nama
rowd['tanggal'] = row.tanggal
rowd['amount'] = row.amount
rowd['kecamatan_kd'] = row.kecamatan_kd
rowd['kecamatan_nm'] = row.kecamatan_nm
rowd['kelurahan_kd'] = row.kelurahan_kd
rowd['kelurahan_nm'] = row.kelurahan_nm
rowd['is_kota'] = row.is_kota
rowd['disabled'] = row.disabled
rowd['sumber_id'] = row.sumber_id
form = self.get_form(EditSchema)
form.set_appstruct(rowd)
if request.POST:
if 'simpan' in request.POST:
controls = request.POST.items()
print controls
try:
c = form.validate(controls)
except ValidationFailure, e:
return dict(form=form)
#request.session[SESS_EDIT_FAILED] = e.render()
#return HTTPFound(location=request.route_url('eis-item-edit',
# id=row.id))
self.save_request(dict(controls), row)
return self.route_list()
elif SESS_EDIT_FAILED in request.session:
return self.session_failed(SESS_EDIT_FAILED)
return dict(form=form)
##########
# Delete #
##########
@view_config(route_name='eis-item-delete', renderer='templates/eis-item/delete.pt',
permission='delete')
def view_eis_item_delete(self):
request = self.request
q = self.query_id()
row = q.first()
if not row:
return self.id_not_found(request)
form = Form(colander.Schema(), buttons=('hapus','batal'))
if request.POST:
if 'hapus' in request.POST:
msg = 'AR ID %d %s sudah dihapus.' % (row.id, row.nama)
try:
q.delete()
DBSession.flush()
except:
msg = 'AR ID %d %s tidak dapat dihapus.' % (row.id, row.nama)
request.session.flash(msg)
return self.route_list()
return dict(row=row,
form=form.render())
| mit | 1,049,621,007,874,338,300 | 34.683099 | 89 | 0.52013 | false |
Nichol4s/PyHead | tests/unreader.py | 1 | 1888 |
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Classes that can undo reading data from
# a given type of data source.
class Unreader(object):
def __init__(self):
self.buf = StringIO()
def chunk(self):
raise NotImplementedError()
def read(self, size=None):
if size is not None and not isinstance(size, (int, long)):
raise TypeError("size parameter must be an int or long.")
if size == 0:
return ""
if size < 0:
size = None
self.buf.seek(0, os.SEEK_END)
if size is None and self.buf.tell():
ret = self.buf.getvalue()
self.buf.truncate(0)
return ret
if size is None:
return self.chunk()
while self.buf.tell() < size:
chunk = self.chunk()
if not len(chunk):
ret = self.buf.getvalue()
self.buf.truncate(0)
return ret
self.buf.write(chunk)
data = self.buf.getvalue()
self.buf.truncate(0)
self.buf.write(data[size:])
return data[:size]
def unread(self, data):
self.buf.seek(0, os.SEEK_END)
self.buf.write(data)
class SocketUnreader(Unreader):
def __init__(self, sock, max_chunk=8192):
super(SocketUnreader, self).__init__()
self.sock = sock
self.mxchunk = max_chunk
def chunk(self):
return self.sock.recv(self.mxchunk)
class IterUnreader(Unreader):
def __init__(self, iterable):
super(IterUnreader, self).__init__()
self.iter = iter(iterable)
def chunk(self):
if not self.iter:
return ""
try:
return self.iter.next()
except StopIteration:
self.iter = None
return ""
| mit | -3,738,559,382,780,167,700 | 24.863014 | 69 | 0.544492 | false |
jzitelli/poolvr.py | poolvr/billboards.py | 1 | 4370 | import pkgutil
import os.path
from ctypes import c_void_p
import numpy as np
import OpenGL.GL as gl
import OpenGL.error
from .gl_rendering import Node, Technique, Material, Program, DTYPE_COMPONENT_TYPE, Texture
from .gl_primitives import PlanePrimitive
NULL_PTR = c_void_p(0)
# TODO: pkgutils way
TEXTURES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir,
'textures')
class BillboardParticles(Node):
technique = Technique(Program(pkgutil.get_data('poolvr', 'shaders/bb_particles_vs.glsl').decode(),
pkgutil.get_data('poolvr', 'shaders/bb_particles_fs.glsl').decode()))
_modelview = np.eye(4, dtype=np.float32)
def __init__(self,
texture=Texture(os.path.join(TEXTURES_DIR, 'sphere_bb_alpha.png')),
normal_map=Texture(os.path.join(TEXTURES_DIR, 'sphere_bb_normal.png')),
num_particles=1, scale=1.0, color=None, translate=None):
Node.__init__(self)
self.texture = texture
self.normal_map = normal_map
self.material = Material(self.technique, textures={'map': texture, 'u_normal': normal_map})
self.num_particles = num_particles
if color is None:
color = np.array([num_particles*[1.0, 1.0, 1.0]], dtype=np.float32)
if translate is None:
translate = np.array([[1.1*scale*i, 0.2, 0.0] for i in range(num_particles)], dtype=np.float32)
self.primitive = PlanePrimitive(width=scale, height=scale,
color=color, translate=translate,
attribute_usage={'color': gl.GL_STATIC_DRAW,
'translate': gl.GL_DYNAMIC_DRAW})
self.primitive.attributes['position'] = self.primitive.attributes['vertices']
self.primitive.attributes['uv'] = self.primitive.attributes['uvs']
self._initialized = False
def init_gl(self, force=False):
if self._initialized and not force:
return
self.material.init_gl(force=force)
self.primitive.init_gl(force=force)
self._initialized = True
def update_gl(self):
if not self._initialized: self.init_gl()
translate = self.primitive.attributes['translate']
values = translate.tobytes()
try:
gl.glNamedBufferSubData(self.primitive.buffers['translate'], 0, len(values), values)
except OpenGL.error.NullFunctionError as e:
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.primitive.buffers['translate'])
gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, len(values), values)
def draw(self, view=None, projection=None, frame_data=None):
self.material.use()
if view is not None:
self.world_matrix.dot(view, out=self._modelview)
gl.glUniformMatrix4fv(self.technique.uniform_locations['u_modelview'], 1, False, self._modelview)
if projection is not None:
gl.glUniformMatrix4fv(self.technique.uniform_locations['u_projection'], 1, False, projection)
for attribute_name, location in self.technique.attribute_locations.items():
attribute = self.primitive.attributes[attribute_name]
gl.glEnableVertexAttribArray(location)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.primitive.buffers[attribute_name])
gl.glVertexAttribPointer(location, attribute.shape[-1],
DTYPE_COMPONENT_TYPE[attribute.dtype], False,
attribute.dtype.itemsize * attribute.shape[-1],
NULL_PTR)
if attribute_name == 'translate' or attribute_name == 'color':
gl.glVertexAttribDivisor(location, 1)
else:
gl.glVertexAttribDivisor(location, 0)
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.primitive.index_buffer)
gl.glDrawElementsInstanced(self.primitive.mode, self.primitive.indices.size,
DTYPE_COMPONENT_TYPE[self.primitive.indices.dtype], NULL_PTR, self.num_particles)
# for location in self.technique.attribute_locations.values():
# gl.glDisableVertexAttribArray(location)
self.material.release()
| mit | -6,383,165,777,984,583,000 | 50.411765 | 116 | 0.613501 | false |
JioCloud/glance | glance/api/middleware/cache.py | 1 | 12967 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Transparent image file caching middleware, designed to live on
Glance API nodes. When images are requested from the API node,
this middleware caches the returned image file to local filesystem.
When subsequent requests for the same image file are received,
the local cached copy of the image file is returned.
"""
import re
from oslo_log import log as logging
import webob
from glance.api.common import size_checked_iter
from glance.api import policy
from glance.api.v1 import images
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
import glance.db
from glance import i18n
from glance import image_cache
from glance import notifier
import glance.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_LI = i18n._LI
_LE = i18n._LE
_LW = i18n._LW
PATTERNS = {
('v1', 'GET'): re.compile(r'^/v1/images/([^\/]+)$'),
('v1', 'DELETE'): re.compile(r'^/v1/images/([^\/]+)$'),
('v2', 'GET'): re.compile(r'^/v2/images/([^\/]+)/file$'),
('v2', 'DELETE'): re.compile(r'^/v2/images/([^\/]+)$')
}
class CacheFilter(wsgi.Middleware):
def __init__(self, app):
self.cache = image_cache.ImageCache()
self.serializer = images.ImageSerializer()
self.policy = policy.Enforcer()
LOG.info(_LI("Initialized image cache middleware"))
super(CacheFilter, self).__init__(app)
def _verify_metadata(self, image_meta):
"""
Sanity check the 'deleted' and 'size' metadata values.
"""
# NOTE: admins can see image metadata in the v1 API, but shouldn't
# be able to download the actual image data.
if image_meta['status'] == 'deleted' and image_meta['deleted']:
raise exception.NotFound()
if not image_meta['size']:
# override image size metadata with the actual cached
# file size, see LP Bug #900959
image_meta['size'] = self.cache.get_image_size(image_meta['id'])
@staticmethod
def _match_request(request):
"""Determine the version of the url and extract the image id
:returns tuple of version and image id if the url is a cacheable,
otherwise None
"""
for ((version, method), pattern) in PATTERNS.items():
if request.method != method:
continue
match = pattern.match(request.path_info)
if match is None:
continue
image_id = match.group(1)
# Ensure the image id we got looks like an image id to filter
# out a URI like /images/detail. See LP Bug #879136
if image_id != 'detail':
return (version, method, image_id)
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden as e:
LOG.debug("User not permitted to perform '%s' action", action)
raise webob.exc.HTTPForbidden(explanation=e.msg, request=req)
def _get_v1_image_metadata(self, request, image_id):
"""
Retrieves image metadata using registry for v1 api and creates
dictionary-like mash-up of image core and custom properties.
"""
try:
image_metadata = registry.get_image_metadata(request.context,
image_id)
return utils.create_mashup_dict(image_metadata)
except exception.NotFound as e:
LOG.debug("No metadata found for image '%s'", image_id)
raise webob.exc.HTTPNotFound(explanation=e.msg, request=request)
def _get_v2_image_metadata(self, request, image_id):
"""
Retrieves image and for v2 api and creates adapter like object
to access image core or custom properties on request.
"""
db_api = glance.db.get_api()
image_repo = glance.db.ImageRepo(request.context, db_api)
try:
image = image_repo.get(image_id)
# Storing image object in request as it is required in
# _process_v2_request call.
request.environ['api.cache.image'] = image
return policy.ImageTarget(image)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg, request=request)
def process_request(self, request):
"""
For requests for an image file, we check the local image
cache. If present, we return the image file, appending
the image metadata in headers. If not present, we pass
the request on to the next application in the pipeline.
"""
match = self._match_request(request)
try:
(version, method, image_id) = match
except TypeError:
# Trying to unpack None raises this exception
return None
self._stash_request_info(request, image_id, method, version)
if request.method != 'GET' or not self.cache.is_cached(image_id):
return None
method = getattr(self, '_get_%s_image_metadata' % version)
image_metadata = method(request, image_id)
# Deactivated images shall not be served from cache
if image_metadata['status'] == 'deactivated':
return None
try:
self._enforce(request, 'download_image', target=image_metadata)
except exception.Forbidden:
return None
LOG.debug("Cache hit for image '%s'", image_id)
image_iterator = self.get_from_cache(image_id)
method = getattr(self, '_process_%s_request' % version)
try:
return method(request, image_id, image_iterator, image_metadata)
except exception.NotFound:
msg = _LE("Image cache contained image file for image '%s', "
"however the registry did not contain metadata for "
"that image!") % image_id
LOG.error(msg)
self.cache.delete_cached_image(image_id)
@staticmethod
def _stash_request_info(request, image_id, method, version):
"""
Preserve the image id, version and request method for later retrieval
"""
request.environ['api.cache.image_id'] = image_id
request.environ['api.cache.method'] = method
request.environ['api.cache.version'] = version
@staticmethod
def _fetch_request_info(request):
"""
Preserve the cached image id, version for consumption by the
process_response method of this middleware
"""
try:
image_id = request.environ['api.cache.image_id']
method = request.environ['api.cache.method']
version = request.environ['api.cache.version']
except KeyError:
return None
else:
return (image_id, method, version)
def _process_v1_request(self, request, image_id, image_iterator,
image_meta):
# Don't display location
if 'location' in image_meta:
del image_meta['location']
image_meta.pop('location_data', None)
self._verify_metadata(image_meta)
response = webob.Response(request=request)
raw_response = {
'image_iterator': image_iterator,
'image_meta': image_meta,
}
return self.serializer.show(response, raw_response)
def _process_v2_request(self, request, image_id, image_iterator,
image_meta):
# We do some contortions to get the image_metadata so
# that we can provide it to 'size_checked_iter' which
# will generate a notification.
# TODO(mclaren): Make notification happen more
# naturally once caching is part of the domain model.
image = request.environ['api.cache.image']
self._verify_metadata(image_meta)
response = webob.Response(request=request)
response.app_iter = size_checked_iter(response, image_meta,
image_meta['size'],
image_iterator,
notifier.Notifier())
# NOTE (flwang): Set the content-type, content-md5 and content-length
# explicitly to be consistent with the non-cache scenario.
# Besides, it's not worth the candle to invoke the "download" method
# of ResponseSerializer under image_data. Because method "download"
# will reset the app_iter. Then we have to call method
# "size_checked_iter" to avoid missing any notification. But after
# call "size_checked_iter", we will lose the content-md5 and
# content-length got by the method "download" because of this issue:
# https://github.com/Pylons/webob/issues/86
response.headers['Content-Type'] = 'application/octet-stream'
response.headers['Content-MD5'] = image.checksum
response.headers['Content-Length'] = str(image.size)
return response
def process_response(self, resp):
"""
We intercept the response coming back from the main
images Resource, removing image file from the cache
if necessary
"""
status_code = self.get_status_code(resp)
if not 200 <= status_code < 300:
return resp
try:
(image_id, method, version) = self._fetch_request_info(
resp.request)
except TypeError:
return resp
if method == 'GET' and status_code == 204:
# Bugfix:1251055 - Don't cache non-existent image files.
# NOTE: Both GET for an image without locations and DELETE return
# 204 but DELETE should be processed.
return resp
method_str = '_process_%s_response' % method
try:
process_response_method = getattr(self, method_str)
except AttributeError:
LOG.error(_LE('could not find %s') % method_str)
# Nothing to do here, move along
return resp
else:
return process_response_method(resp, image_id, version=version)
def _process_DELETE_response(self, resp, image_id, version=None):
if self.cache.is_cached(image_id):
LOG.debug("Removing image %s from cache", image_id)
self.cache.delete_cached_image(image_id)
return resp
def _process_GET_response(self, resp, image_id, version=None):
image_checksum = resp.headers.get('Content-MD5')
if not image_checksum:
# API V1 stores the checksum in a different header:
image_checksum = resp.headers.get('x-image-meta-checksum')
if not image_checksum:
LOG.error(_LE("Checksum header is missing."))
# fetch image_meta on the basis of version
image_metadata = None
if version:
method = getattr(self, '_get_%s_image_metadata' % version)
image_metadata = method(resp.request, image_id)
# NOTE(zhiyan): image_cache return a generator object and set to
# response.app_iter, it will be called by eventlet.wsgi later.
# So we need enforce policy firstly but do it by application
# since eventlet.wsgi could not catch webob.exc.HTTPForbidden and
# return 403 error to client then.
self._enforce(resp.request, 'download_image', target=image_metadata)
resp.app_iter = self.cache.get_caching_iter(image_id, image_checksum,
resp.app_iter)
return resp
def get_status_code(self, response):
"""
Returns the integer status code from the response, which
can be either a Webob.Response (used in testing) or httplib.Response
"""
if hasattr(response, 'status_int'):
return response.status_int
return response.status
def get_from_cache(self, image_id):
"""Called if cache hit"""
with self.cache.open_for_read(image_id) as cache_file:
chunks = utils.chunkiter(cache_file)
for chunk in chunks:
yield chunk
| apache-2.0 | -4,818,873,228,218,001,000 | 39.021605 | 78 | 0.61001 | false |
zseder/hunmisc | hunmisc/corpustools/20ng_to_conll.py | 1 | 4033 | """
Copyright 2011-13 Attila Zseder
Email: [email protected]
This file is part of hunmisc project
url: https://github.com/zseder/hunmisc
hunmisc is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
"""Converts a newsgroup file (as in the 20 Newsgroups collection) to the conll2
format."""
import os.path
import re
from langtools.nltk.nltktools import NltkTools
from langtools.utils import cmd_utils
from langtools.utils.file_utils import *
from langtools.io.conll2.conll_iter import FieldedDocument
re_pat = re.compile(r"^[\s>]+", re.UNICODE)
# Decoding is not required as NltkTools.tag_raw() handles that for utf-8.
def read_stream(ins):
"""Reads a stream. Returns a {field:raw text} map, with a Body field. The
title is the content of the subject header field."""
fields = {}
for line in ins:
line = line.strip()
if len(line) == 0:
break
if line.startswith("Subject:"):
fields['Title'] = line[8:]
fields['Body'] = u' '.join(re_pat.sub("", line.strip().replace(u'\ufffd', ' ')) for line in ins)
return fields
def read_file(infile):
"""Reads a file. Returns a {field:raw text} map, with a Body field. If title
is true, a Title field will be added too."""
with FileReader(infile, replace=True).open() as ins:
return read_stream(ins)
def write_doc(doc, outs):
"""Writes the document to outs. A header line is written, then the
Title field (if any), then the body."""
outs.write(u"%%#PAGE\t{0}\n".format(doc.title))
if 'Title' in doc.fields:
outs.write(u"%%#Field\tTitle\n")
write_text(doc.fields['Title'], outs)
outs.write(u"%%#Field\tBody\n")
write_text(doc.fields['Body'], outs)
def write_text(text, outs):
for token in text:
outs.write(u"\t".join(token))
outs.write("\n")
if __name__ == '__main__':
import sys
try:
params, args = cmd_utils.get_params_sing(sys.argv[1:], 'i:o:m:ta', 'i', 0)
if not os.path.isdir(params['i']):
raise ValueError('Input must be a directory of files.')
except ValueError as err:
print('Error: {0}'.format(err))
print(('Usage: {0} -i input_dir [-o output_file] -m [hunpos_model] ' +
'[-a]').format(sys.argv[0]))
print(' input_dir: the directory with the input text files.')
print(' hunpos_model: the hunpos model file.')
print(' output_file: the conll2 output file. If omitted, the result will')
print(' be written to stdout.')
print(' hunpos_model: the hunpos model file.')
print(' -a: the output is appended to output_file, instead of overwriting it.')
sys.exit()
if 'o' in params:
output_mode = 'a' if 'a' in params else 'w'
out = FileWriter(params['o'], output_mode).open()
else:
out = StreamWriter(sys.stdout)
nt = NltkTools(pos=True, stem=True, tok=True, pos_model=params.get('m'))
for infile in (os.path.join(d, f) for d, _, fs in os.walk(params['i']) for f in fs):
print "File " + infile
doc = FieldedDocument(infile)
doc.fields = {}
for field, raw_text in read_file(infile).iteritems():
doc.fields[field] = nt.tag_raw(raw_text)
write_doc(doc, out)
if 'o' in params:
out.close()
| gpl-3.0 | 898,430,724,181,993,000 | 36.691589 | 100 | 0.634763 | false |
OneDrive/onedrive-sdk-python | src/python2/request/item_copy.py | 1 | 3729 | # -*- coding: utf-8 -*-
'''
# Copyright (c) 2015 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# This file was generated and any changes will be overwritten.
'''
from ..model.item import Item
from ..request_base import RequestBase
from ..request_builder_base import RequestBuilderBase
from ..async_operation_monitor import AsyncOperationMonitor
from ..options import *
import json
class ItemCopyRequest(RequestBase):
def __init__(self, request_url, client, options, name=None, parent_reference=None):
super(ItemCopyRequest, self).__init__(request_url, client, options)
self.method = "POST"
self.body_options={}
if name:
self.body_options["name"] = name
if parent_reference:
self.body_options["parentReference"] = parent_reference
@property
def body_options(self):
return self._body_options
@body_options.setter
def body_options(self, value):
self._body_options=value
def post(self):
"""Sends the POST request
Returns:
:class:`AsyncOperationMonitor<onedrivesdk.async_operation_monitor.AsyncOperationMonitor>`:
The resulting entity from the operation
"""
self.content_type = "application/json"
self.append_option(HeaderOption("Prefer", "respond-async"))
response = self.send(self.body_options)
entity = AsyncOperationMonitor(response.headers["Location"], self._client, None)
return entity
class ItemCopyRequestBuilder(RequestBuilderBase):
def __init__(self, request_url, client, name=None, parent_reference=None):
super(ItemCopyRequestBuilder, self).__init__(request_url, client)
self._method_options = {}
self._method_options["name"] = name
self._method_options["parentReference"] = parent_reference._prop_dict
def request(self, options=None):
"""Builds the request for the ItemCopy
Args:
options (list of :class:`Option<onedrivesdk.options.Option>`):
Default to None, list of options to include in the request
Returns:
:class:`ItemCopyRequest<onedrivesdk.request.item_copy.ItemCopyRequest>`:
The request
"""
req = ItemCopyRequest(self._request_url, self._client, options, name=self._method_options["name"], parent_reference=self._method_options["parentReference"])
return req
def post(self):
"""Sends the POST request
Returns:
:class:`Item<onedrivesdk.model.item.Item>`:
The resulting Item from the operation
"""
return self.request().post()
| mit | -2,817,140,909,154,993,000 | 36.29 | 164 | 0.679807 | false |
savioabuga/lipame | config/settings/base.py | 1 | 10781 | """
Django settings for lipame project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (lipame/config/settings/base.py - 3 = lipame/)
APPS_DIR = ROOT_DIR.path('lipame')
# Load operating system environment variables and then prepare to use them
env = environ.Env()
# .env file, should load only in development environment
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# Operating System Environment variables have precedence over variables defined in the .env file,
# that is to say variables from the .env files will only be used if not defined
# as environment variables.
env_file = str(ROOT_DIR.path('.env'))
print('Loading : {}'.format(env_file))
env.read_env(env_file)
print('The .env file has been loaded. See base.py for more information')
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = [
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'phonenumber_field',
'django_tables2',
'bootstrap3',
'wkhtmltopdf'
]
# Apps specific for this project go here.
LOCAL_APPS = [
# custom users app
'lipame.users.apps.UsersConfig',
# Your stuff: custom apps go here
'lipame.lipa.apps.LipaConfig',
'wallet'
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'lipame.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Savio & Nick""", '[email protected]'),
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='mysql:///lipame'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# PASSWORD STORAGE SETTINGS
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# PASSWORD VALIDATION
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_USER_MODEL_USERNAME_FIELD = 'phone_number'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'lipame.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'lipame.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
########## CELERY
INSTALLED_APPS += ['lipame.taskapp.celery.CeleryConfig']
# CELERY_BROKER_URL = env('CELERY_BROKER_URL', default='django://')
CELERY_BROKER_URL = 'amqp://guest:guest@localhost:5672/'
if CELERY_BROKER_URL == 'django://':
CELERY_RESULT_BACKEND = 'redis://'
else:
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
########## END CELERY
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
# COMVIVA
MERCHANT_ID = '780900003'
CURRENCY_CODE = 'TZS'
BASE_API_URL = 'http://172.27.34.27:1190/v0.14'
BALANCE_URL = BASE_API_URL + '/MM/accounts/msisdn'
TRANSACTIONS_URL = BASE_API_URL + '/MM/transactions'
| mit | -572,856,647,607,185,150 | 35.056856 | 101 | 0.616455 | false |
thehackercat/aha-memo | serverApp/common/aha.py | 1 | 13263 | # -*- coding:utf-8 -*-
__author__ = 'LexusLee'
import time
import json
import tornado
import tornado.gen
from tornado.web import HTTPError
from tornado.escape import json_decode
from foundation.log import logger
from foundation import const
from serverAppConfig import DEVICE_TYPE
from serverAppConfig import TOKEN_ROLE, TOKEN_DEADLINE_TIME, TOKEN_USER_ID
from cacheTool import _get_redis_connection
class ArgumentTypeError(HTTPError):
"""Exception raised by `IntLongRequestHandler.add_query_argument`.
This is a subclass of `HTTPError`, so if it is uncaught a 400 response
code will be used instead of 500 (and a stack trace will not be logged).
"""
def __init__(self, arg_name):
super(ArgumentTypeError, self).__init__(
400, 'Type of argument %s must be string type' % arg_name)
self.arg_name = arg_name
class RequestHandlerAha(tornado.web.RequestHandler):
"""
根据需要,定制tornado.web.RequestHandler
"""
def __init__(self, application, request, auto_init=True, **kwargs):
"""
构造函数
:param write_to_client: 如果是后台调用,该值必须是True,否则是False,默认为False
"""
super(RequestHandlerAha, self).__init__(application, request, **kwargs)
self.auto_init = auto_init
self.decoded_secure_cookie = {}
self.redis_client = None
def on_finish(self):
if self.redis_client:
logger.debug('存在redis连接,所以关闭连接')
self.redis_client.disconnect()
def get_secure_cookie(self, name, value=None, max_age_days=31,
min_version=None):
"""
重写重写tornado.web.RequestHandler中的get_secure_cookie方法,用于在多次调用get_secure_cookie
不重复去解密
:param name:
:return:
"""
if name in self.decoded_secure_cookie.keys():
return self.decoded_secure_cookie[name]
else:
value = super(RequestHandlerAha, self).get_secure_cookie(name, value, max_age_days, min_version)
self.decoded_secure_cookie[name] = value
return value
def get_current_user_role(self):
"""
返回当前用户的角色名字,角色名字参考configueFiles文件夹下面的authority文件注释
:return: 角色名字
"""
tokenstr = self.get_secure_cookie("token")
if not tokenstr:
logger.debug("cookie中没有token,因此是游客访问")
return "visitor"
token = json_decode(tokenstr)
role = token.get(TOKEN_ROLE, "visitor")
return role
def get_current_user(self):
"""
重写tornado.web.RequestHandler中的get_current_user方法,用于在调用self.current_user能正确返回
直接调用该函数可以返回相应的用户id,也可以使用self.current_user来返回用户的id。
:return: 如果有相应的token,则返回对应的id,否则返回None
"""
tokenstr = self.get_secure_cookie("token")
if not tokenstr:
logger.debug("cookie中没有token,因此是游客访问,因此没有用户的id")
return None
token = json_decode(tokenstr)
user_id = token.get(TOKEN_USER_ID)
return user_id
def write(self, chunk):
"""
向调用者返回数据,如果是客户端直接请求的,则向客户端返回对应写的数据,函数返回None;如果是后台自己调用,
则返回相应的python对象数据,函数返回对应python对象数据
:param chunk: 待返回的数据
:return: 如果是后台自己调用,返回对应数据的python对象;否则返回None
"""
# self.set_header("Content-Type", "application/json; charset=UTF-8")
if self.auto_init:
super(RequestHandlerAha, self).write(chunk)
else:
return chunk
def __add_arg(self, source, name, *args):
"""
用来底层实现增加请求参数
:param source: 增加参数到指定的source上
:param name: 参数的名字,必须是字符串
:param args: 参数的值,可以是多个参数,但是必须是字符串
:return:None
:exception ArgumentTypeError
"""
if not isinstance(name, basestring):
raise ArgumentTypeError(name)
for v in args:
if not isinstance(v, basestring):
raise ArgumentTypeError(name)
addvalue = list(args)
if name in self.request.query_arguments.keys():
addvalue.extend(source.get(name, []))
self.request.query_arguments[name] = addvalue
def add_query_argument(self, name, *args):
"""
增加query的参数,形如URL后面的参数
:param name: 参数的名字,必须是字符串
:param args: 参数的值,可以是多个参数,但是必须是字符串
:return:None
"""
self.__add_arg(self.request.query_arguments, name, *args)
def add_body_argument(self, name, *args):
"""
增加body的参数,形如提交表单里面的数据
:param name: 参数的名字,必须是字符串
:param args: 参数的值,可以是多个参数,但是必须是字符串
:return:None
"""
self.__add_arg(self.request.body_arguments, name, *args)
def add_argument(self, name, *args):
"""
增加全局参数
:param name: 参数的名字,必须是字符串
:param args: 参数的值,可以是多个参数,但是必须是字符串
:return:None
"""
self.__add_arg(self.request.arguments, name, *args)
def get_redis_conn(self):
"""
得到一个redis的连接
"""
if not self.redis_client:
self.redis_client = _get_redis_connection()
return self.redis_client
@property
def device_type(self):
"""
得到设备类型,返回的模拟枚举类型: DEVICE_TYPE
:return:
"""
if not hasattr(self, "_device_type"):
userAgent = self.request.headers.get('User-Agent', "")
via = self.request.headers.get("Via", "")
self._device_type = self._distinguishDevice(via, userAgent)
return self._device_type
def _distinguishDevice(self, via, userAgent):
"""
验证设备是什么类型设备
:param via:
:param userAgent:
:return: 0代表手机,1表示pc
"""
pcHeaders = ["Windows 98",
"Windows ME",
"Windows 2000",
"Windows XP",
"Windows NT",
"Ubuntu"]
mobileGateWayHeaders = [ "ZXWAP",
"chinamobile.com",
"monternet.com",
"infoX",
"wap.lizongbo.com","Bytemobile"]
mobileUserAgents = [ "Nokia", "SAMSUNG", "MIDP-2", "CLDC1.1", "SymbianOS", "MAUI", "UNTRUSTED/1.0", "Windows CE",
"iPhone", "iPad", "Android", "BlackBerry", "UCWEB", "ucweb", "BREW", "J2ME", "YULONG",
"YuLong", "COOLPAD","TIANYU","TY-", "K-Touch", "Haier", "DOPOD","Lenovo","LENOVO", "HUAQIN",
"AIGO-", "CTC/1.0", "CTC/2.0","CMCC","DAXIAN","MOT-","SonyEricsson","GIONEE","HTC","ZTE",
"HUAWEI", "webOS","GoBrowser","IEMobile", "WAP2.0"]
pcFlag = False
mobileFlag = False
for pcHeader in pcHeaders:
if pcFlag:
break
if userAgent.find(pcHeader) != -1:
pcFlag = True
break
for mobileGateWayHeader in mobileGateWayHeaders:
if mobileFlag:
break
if via.find(mobileGateWayHeader) != -1:
mobileFlag = True
break
for mobileUserAgent in mobileUserAgents:
if mobileFlag:
break
if userAgent.find(mobileUserAgent) != -1:
mobileFlag = True
break
if mobileFlag==True and mobileFlag!=pcFlag:
return DEVICE_TYPE.MOBILE
else:
return DEVICE_TYPE.PC
class ResponseJSON:
"""
处理返回给客户端的json对象
"""
def __init__(self, code, data=None, description=None, status=None):
"""
:param code: 返回的code,数字类型
:param description: code相关描述
:param data: 具体的data数据
"""
self.code = code
self.description = description
self.data = data
self.status = status
def resultDict(self):
"""
返回一个dict对象。如果code不是数字,则认为系统内部错误,code置为500。如果
description为空,则没有description在dict中。如果data为一个json对象字符串,则会把对应
的字符串转换成dict
:return:返回一个dict对象
"""
if isinstance(self.code, int):
meta = {"code": self.code}
else:
meta = {"code": 500}
if const.basic.get('send_description') == 'True' and self.description:
meta["description"] = self.description
if self.status:
if isinstance(self.status, int):
meta['status'] = self.status
else:
meta['status'] = -9999
rdict = {"meta": meta}
if isinstance(self.data, basestring):
try:
rdict["data"] = json.loads(self.data, encoding="utf-8")
except ValueError:
logger.warning("ResponseJSON:data数据格式错误")
elif isinstance(self.data, dict) or isinstance(self.data, list):
rdict["data"] = self.data
return rdict
def resultStr(self):
"""
返回的是结果json字符串
"""
return json.dumps(self.resultDict(), ensure_ascii=False)
def _auth_user_token(token):
"""
通过token去验证用户是否已经登陆成功
:param token:字典格式,token:
CT: create_time,该token创建时间
DT: deadline_time,该token的有效日期
:return: 验证成功返回True,验证失败返回False
"""
if token is None:
return False
else:
token = json_decode(token)
deadline_time = token[TOKEN_DEADLINE_TIME]
now_time = get_system_time(pretty=False)
if now_time < deadline_time:
return True
else:
return False
def authenticated(method):
"""
Decorate methods with this to require that the user be logged in.
"""
def wrapper(self, *args, **kwargs):
try:
if not self.request.loginSuccess: # 第一次登陆会产生异常,如果没有产生异常,说明已经验证过登陆了
return self.write(ResponseJSON(401, description="not login.").resultDict())
# return '已经验证过登陆,但是验证失败'
except AttributeError:
resp = _auth_user_token(self.get_secure_cookie("token"))
if resp:
self.request.loginSuccess = True
return method(self, *args, **kwargs)
else:
self.request.loginSuccess = False
return self.write(ResponseJSON(401, description="not login").resultDict())
# return '验证失败'
else:
return method(self, *args, **kwargs)
return wrapper
def _auth_user_authority(code, role):
"""
通过code去验证用户是否有该权限
:param code: 功能标识码
:return: 如果验证成功,返回True,否则返回False
"""
logger.debug(role)
rolelist = const.authority.get(str(code))
logger.debug(rolelist)
if role in rolelist:
return True
else:
return False
def authorized(code):
"""
一个装饰器,用来验证该用户是否有权限使用该功能,如果有使用该模块的权限,则
返回对应的函数,如果没有,则函数不继续往下执行
:param code: 该模块的标识
"""
def _deco(method):
def wrappers(self, *args, **kwargs):
role = self.get_current_user_role()
resp = _auth_user_authority(code, role)
if resp:
return method(self, *args, **kwargs)
else:
logger.debug("该用户没有此功能的权限")
return self.write(ResponseJSON(403, description="No authority for the function").resultDict()) # 该用户没有该权限
return wrappers
return _deco
def get_system_time(pretty=True):
"""
该函数用于返回系统当前时间
:return:当前系统时间
"""
if pretty:
ISOTIMEFORMAT = "%Y-%m-%d-%X"
current_time = time.strftime(ISOTIMEFORMAT, time.localtime(time.time()))
else:
current_time = time.time()
return current_time
| gpl-3.0 | 1,503,458,384,952,282,600 | 30.134771 | 122 | 0.565492 | false |
leighpauls/k2cro4 | third_party/pymox/src/mox_test.py | 1 | 76281 | #!/usr/bin/python2.4
#
# Unit tests for Mox.
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cStringIO
import unittest
import re
import mox
import mox_test_helper
OS_LISTDIR = mox_test_helper.os.listdir
class ExpectedMethodCallsErrorTest(unittest.TestCase):
"""Test creation and string conversion of ExpectedMethodCallsError."""
def testAtLeastOneMethod(self):
self.assertRaises(ValueError, mox.ExpectedMethodCallsError, [])
def testOneError(self):
method = mox.MockMethod("testMethod", [], False)
method(1, 2).AndReturn('output')
e = mox.ExpectedMethodCallsError([method])
self.assertEqual(
"Verify: Expected methods never called:\n"
" 0. testMethod(1, 2) -> 'output'",
str(e))
def testManyErrors(self):
method1 = mox.MockMethod("testMethod", [], False)
method1(1, 2).AndReturn('output')
method2 = mox.MockMethod("testMethod", [], False)
method2(a=1, b=2, c="only named")
method3 = mox.MockMethod("testMethod2", [], False)
method3().AndReturn(44)
method4 = mox.MockMethod("testMethod", [], False)
method4(1, 2).AndReturn('output')
e = mox.ExpectedMethodCallsError([method1, method2, method3, method4])
self.assertEqual(
"Verify: Expected methods never called:\n"
" 0. testMethod(1, 2) -> 'output'\n"
" 1. testMethod(a=1, b=2, c='only named') -> None\n"
" 2. testMethod2() -> 44\n"
" 3. testMethod(1, 2) -> 'output'",
str(e))
class OrTest(unittest.TestCase):
"""Test Or correctly chains Comparators."""
def testValidOr(self):
"""Or should be True if either Comparator returns True."""
self.assert_(mox.Or(mox.IsA(dict), mox.IsA(str)) == {})
self.assert_(mox.Or(mox.IsA(dict), mox.IsA(str)) == 'test')
self.assert_(mox.Or(mox.IsA(str), mox.IsA(str)) == 'test')
def testInvalidOr(self):
"""Or should be False if both Comparators return False."""
self.failIf(mox.Or(mox.IsA(dict), mox.IsA(str)) == 0)
class AndTest(unittest.TestCase):
"""Test And correctly chains Comparators."""
def testValidAnd(self):
"""And should be True if both Comparators return True."""
self.assert_(mox.And(mox.IsA(str), mox.IsA(str)) == '1')
def testClauseOneFails(self):
"""And should be False if the first Comparator returns False."""
self.failIf(mox.And(mox.IsA(dict), mox.IsA(str)) == '1')
def testAdvancedUsage(self):
"""And should work with other Comparators.
Note: this test is reliant on In and ContainsKeyValue.
"""
test_dict = {"mock" : "obj", "testing" : "isCOOL"}
self.assert_(mox.And(mox.In("testing"),
mox.ContainsKeyValue("mock", "obj")) == test_dict)
def testAdvancedUsageFails(self):
"""Note: this test is reliant on In and ContainsKeyValue."""
test_dict = {"mock" : "obj", "testing" : "isCOOL"}
self.failIf(mox.And(mox.In("NOTFOUND"),
mox.ContainsKeyValue("mock", "obj")) == test_dict)
class FuncTest(unittest.TestCase):
"""Test Func correctly evaluates based upon true-false return."""
def testFuncTrueFalseEvaluation(self):
"""Should return True if the validating function returns True."""
equals_one = lambda x: x == 1
always_none = lambda x: None
self.assert_(mox.Func(equals_one) == 1)
self.failIf(mox.Func(equals_one) == 0)
self.failIf(mox.Func(always_none) == 1)
self.failIf(mox.Func(always_none) == 0)
self.failIf(mox.Func(always_none) == None)
def testFuncExceptionPropagation(self):
"""Exceptions within the validating function should propagate."""
class TestException(Exception):
pass
def raiseExceptionOnNotOne(value):
if value != 1:
raise TestException
else:
return True
self.assert_(mox.Func(raiseExceptionOnNotOne) == 1)
self.assertRaises(TestException, mox.Func(raiseExceptionOnNotOne).__eq__, 2)
class SameElementsAsTest(unittest.TestCase):
"""Test SameElementsAs correctly identifies sequences with same elements."""
def testSortedLists(self):
"""Should return True if two lists are exactly equal."""
self.assert_(mox.SameElementsAs([1, 2.0, 'c']) == [1, 2.0, 'c'])
def testUnsortedLists(self):
"""Should return True if two lists are unequal but have same elements."""
self.assert_(mox.SameElementsAs([1, 2.0, 'c']) == [2.0, 'c', 1])
def testUnhashableLists(self):
"""Should return True if two lists have the same unhashable elements."""
self.assert_(mox.SameElementsAs([{'a': 1}, {2: 'b'}]) ==
[{2: 'b'}, {'a': 1}])
def testEmptyLists(self):
"""Should return True for two empty lists."""
self.assert_(mox.SameElementsAs([]) == [])
def testUnequalLists(self):
"""Should return False if the lists are not equal."""
self.failIf(mox.SameElementsAs([1, 2.0, 'c']) == [2.0, 'c'])
def testUnequalUnhashableLists(self):
"""Should return False if two lists with unhashable elements are unequal."""
self.failIf(mox.SameElementsAs([{'a': 1}, {2: 'b'}]) == [{2: 'b'}])
def testActualIsNotASequence(self):
"""Should return False if the actual object is not a sequence."""
self.failIf(mox.SameElementsAs([1]) == object())
def testOneUnhashableObjectInActual(self):
"""Store the entire iterator for a correct comparison.
In a previous version of SameElementsAs, iteration stopped when an
unhashable object was encountered and then was restarted, so the actual list
appeared smaller than it was.
"""
self.failIf(mox.SameElementsAs([1, 2]) == iter([{}, 1, 2]))
class ContainsKeyValueTest(unittest.TestCase):
"""Test ContainsKeyValue correctly identifies key/value pairs in a dict.
"""
def testValidPair(self):
"""Should return True if the key value is in the dict."""
self.assert_(mox.ContainsKeyValue("key", 1) == {"key": 1})
def testInvalidValue(self):
"""Should return False if the value is not correct."""
self.failIf(mox.ContainsKeyValue("key", 1) == {"key": 2})
def testInvalidKey(self):
"""Should return False if they key is not in the dict."""
self.failIf(mox.ContainsKeyValue("qux", 1) == {"key": 2})
class ContainsAttributeValueTest(unittest.TestCase):
"""Test ContainsAttributeValue correctly identifies properties in an object.
"""
def setUp(self):
"""Create an object to test with."""
class TestObject(object):
key = 1
self.test_object = TestObject()
def testValidPair(self):
"""Should return True if the object has the key attribute and it matches."""
self.assert_(mox.ContainsAttributeValue("key", 1) == self.test_object)
def testInvalidValue(self):
"""Should return False if the value is not correct."""
self.failIf(mox.ContainsKeyValue("key", 2) == self.test_object)
def testInvalidKey(self):
"""Should return False if they the object doesn't have the property."""
self.failIf(mox.ContainsKeyValue("qux", 1) == self.test_object)
class InTest(unittest.TestCase):
"""Test In correctly identifies a key in a list/dict"""
def testItemInList(self):
"""Should return True if the item is in the list."""
self.assert_(mox.In(1) == [1, 2, 3])
def testKeyInDict(self):
"""Should return True if the item is a key in a dict."""
self.assert_(mox.In("test") == {"test" : "module"})
def testItemInTuple(self):
"""Should return True if the item is in the list."""
self.assert_(mox.In(1) == (1, 2, 3))
def testTupleInTupleOfTuples(self):
self.assert_(mox.In((1, 2, 3)) == ((1, 2, 3), (1, 2)))
def testItemNotInList(self):
self.failIf(mox.In(1) == [2, 3])
def testTupleNotInTupleOfTuples(self):
self.failIf(mox.In((1, 2)) == ((1, 2, 3), (4, 5)))
class NotTest(unittest.TestCase):
"""Test Not correctly identifies False predicates."""
def testItemInList(self):
"""Should return True if the item is NOT in the list."""
self.assert_(mox.Not(mox.In(42)) == [1, 2, 3])
def testKeyInDict(self):
"""Should return True if the item is NOT a key in a dict."""
self.assert_(mox.Not(mox.In("foo")) == {"key" : 42})
def testInvalidKeyWithNot(self):
"""Should return False if they key is NOT in the dict."""
self.assert_(mox.Not(mox.ContainsKeyValue("qux", 1)) == {"key": 2})
class StrContainsTest(unittest.TestCase):
"""Test StrContains correctly checks for substring occurrence of a parameter.
"""
def testValidSubstringAtStart(self):
"""Should return True if the substring is at the start of the string."""
self.assert_(mox.StrContains("hello") == "hello world")
def testValidSubstringInMiddle(self):
"""Should return True if the substring is in the middle of the string."""
self.assert_(mox.StrContains("lo wo") == "hello world")
def testValidSubstringAtEnd(self):
"""Should return True if the substring is at the end of the string."""
self.assert_(mox.StrContains("ld") == "hello world")
def testInvaildSubstring(self):
"""Should return False if the substring is not in the string."""
self.failIf(mox.StrContains("AAA") == "hello world")
def testMultipleMatches(self):
"""Should return True if there are multiple occurances of substring."""
self.assert_(mox.StrContains("abc") == "ababcabcabcababc")
class RegexTest(unittest.TestCase):
"""Test Regex correctly matches regular expressions."""
def testIdentifyBadSyntaxDuringInit(self):
"""The user should know immediately if a regex has bad syntax."""
self.assertRaises(re.error, mox.Regex, '(a|b')
def testPatternInMiddle(self):
"""Should return True if the pattern matches at the middle of the string.
This ensures that re.search is used (instead of re.find).
"""
self.assert_(mox.Regex(r"a\s+b") == "x y z a b c")
def testNonMatchPattern(self):
"""Should return False if the pattern does not match the string."""
self.failIf(mox.Regex(r"a\s+b") == "x y z")
def testFlagsPassedCorrectly(self):
"""Should return True as we pass IGNORECASE flag."""
self.assert_(mox.Regex(r"A", re.IGNORECASE) == "a")
def testReprWithoutFlags(self):
"""repr should return the regular expression pattern."""
self.assert_(repr(mox.Regex(r"a\s+b")) == "<regular expression 'a\s+b'>")
def testReprWithFlags(self):
"""repr should return the regular expression pattern and flags."""
self.assert_(repr(mox.Regex(r"a\s+b", flags=4)) ==
"<regular expression 'a\s+b', flags=4>")
class IsTest(unittest.TestCase):
"""Verify Is correctly checks equality based upon identity, not value"""
class AlwaysComparesTrue(object):
def __eq__(self, other):
return True
def __cmp__(self, other):
return 0
def __ne__(self, other):
return False
def testEqualityValid(self):
o1 = self.AlwaysComparesTrue()
self.assertTrue(mox.Is(o1), o1)
def testEqualityInvalid(self):
o1 = self.AlwaysComparesTrue()
o2 = self.AlwaysComparesTrue()
self.assertTrue(o1 == o2)
# but...
self.assertFalse(mox.Is(o1) == o2)
def testInequalityValid(self):
o1 = self.AlwaysComparesTrue()
o2 = self.AlwaysComparesTrue()
self.assertTrue(mox.Is(o1) != o2)
def testInequalityInvalid(self):
o1 = self.AlwaysComparesTrue()
self.assertFalse(mox.Is(o1) != o1)
def testEqualityInListValid(self):
o1 = self.AlwaysComparesTrue()
o2 = self.AlwaysComparesTrue()
isa_list = [mox.Is(o1), mox.Is(o2)]
str_list = [o1, o2]
self.assertTrue(isa_list == str_list)
def testEquailtyInListInvalid(self):
o1 = self.AlwaysComparesTrue()
o2 = self.AlwaysComparesTrue()
isa_list = [mox.Is(o1), mox.Is(o2)]
mixed_list = [o2, o1]
self.assertFalse(isa_list == mixed_list)
class IsATest(unittest.TestCase):
"""Verify IsA correctly checks equality based upon class type, not value."""
def testEqualityValid(self):
"""Verify that == correctly identifies objects of the same type."""
self.assert_(mox.IsA(str) == 'test')
def testEqualityInvalid(self):
"""Verify that == correctly identifies objects of different types."""
self.failIf(mox.IsA(str) == 10)
def testInequalityValid(self):
"""Verify that != identifies objects of different type."""
self.assert_(mox.IsA(str) != 10)
def testInequalityInvalid(self):
"""Verify that != correctly identifies objects of the same type."""
self.failIf(mox.IsA(str) != "test")
def testEqualityInListValid(self):
"""Verify list contents are properly compared."""
isa_list = [mox.IsA(str), mox.IsA(str)]
str_list = ["abc", "def"]
self.assert_(isa_list == str_list)
def testEquailtyInListInvalid(self):
"""Verify list contents are properly compared."""
isa_list = [mox.IsA(str),mox.IsA(str)]
mixed_list = ["abc", 123]
self.failIf(isa_list == mixed_list)
def testSpecialTypes(self):
"""Verify that IsA can handle objects like cStringIO.StringIO."""
isA = mox.IsA(cStringIO.StringIO())
stringIO = cStringIO.StringIO()
self.assert_(isA == stringIO)
class IsAlmostTest(unittest.TestCase):
"""Verify IsAlmost correctly checks equality of floating point numbers."""
def testEqualityValid(self):
"""Verify that == correctly identifies nearly equivalent floats."""
self.assertEquals(mox.IsAlmost(1.8999999999), 1.9)
def testEqualityInvalid(self):
"""Verify that == correctly identifies non-equivalent floats."""
self.assertNotEquals(mox.IsAlmost(1.899), 1.9)
def testEqualityWithPlaces(self):
"""Verify that specifying places has the desired effect."""
self.assertNotEquals(mox.IsAlmost(1.899), 1.9)
self.assertEquals(mox.IsAlmost(1.899, places=2), 1.9)
def testNonNumericTypes(self):
"""Verify that IsAlmost handles non-numeric types properly."""
self.assertNotEquals(mox.IsAlmost(1.8999999999), '1.9')
self.assertNotEquals(mox.IsAlmost('1.8999999999'), 1.9)
self.assertNotEquals(mox.IsAlmost('1.8999999999'), '1.9')
class ValueRememberTest(unittest.TestCase):
"""Verify comparing argument against remembered value."""
def testValueEquals(self):
"""Verify that value will compare to stored value."""
value = mox.Value()
value.store_value('hello world')
self.assertEquals(value, 'hello world')
def testNoValue(self):
"""Verify that uninitialized value does not compare to "empty" values."""
value = mox.Value()
self.assertNotEquals(value, None)
self.assertNotEquals(value, False)
self.assertNotEquals(value, 0)
self.assertNotEquals(value, '')
self.assertNotEquals(value, ())
self.assertNotEquals(value, [])
self.assertNotEquals(value, {})
self.assertNotEquals(value, object())
self.assertNotEquals(value, set())
def testRememberValue(self):
"""Verify that comparing against remember will store argument."""
value = mox.Value()
remember = mox.Remember(value)
self.assertNotEquals(value, 'hello world') # value not yet stored.
self.assertEquals(remember, 'hello world') # store value here.
self.assertEquals(value, 'hello world') # compare against stored value.
class MockMethodTest(unittest.TestCase):
"""Test class to verify that the MockMethod class is working correctly."""
def setUp(self):
self.expected_method = mox.MockMethod("testMethod", [], False)(['original'])
self.mock_method = mox.MockMethod("testMethod", [self.expected_method],
True)
def testNameAttribute(self):
"""Should provide a __name__ attribute."""
self.assertEquals('testMethod', self.mock_method.__name__)
def testAndReturnNoneByDefault(self):
"""Should return None by default."""
return_value = self.mock_method(['original'])
self.assert_(return_value == None)
def testAndReturnValue(self):
"""Should return a specificed return value."""
expected_return_value = "test"
self.expected_method.AndReturn(expected_return_value)
return_value = self.mock_method(['original'])
self.assert_(return_value == expected_return_value)
def testAndRaiseException(self):
"""Should raise a specified exception."""
expected_exception = Exception('test exception')
self.expected_method.AndRaise(expected_exception)
self.assertRaises(Exception, self.mock_method)
def testWithSideEffects(self):
"""Should call state modifier."""
local_list = ['original']
def modifier(mutable_list):
self.assertTrue(local_list is mutable_list)
mutable_list[0] = 'mutation'
self.expected_method.WithSideEffects(modifier).AndReturn(1)
self.mock_method(local_list)
self.assertEquals('mutation', local_list[0])
def testWithReturningSideEffects(self):
"""Should call state modifier and propagate its return value."""
local_list = ['original']
expected_return = 'expected_return'
def modifier_with_return(mutable_list):
self.assertTrue(local_list is mutable_list)
mutable_list[0] = 'mutation'
return expected_return
self.expected_method.WithSideEffects(modifier_with_return)
actual_return = self.mock_method(local_list)
self.assertEquals('mutation', local_list[0])
self.assertEquals(expected_return, actual_return)
def testWithReturningSideEffectsWithAndReturn(self):
"""Should call state modifier and ignore its return value."""
local_list = ['original']
expected_return = 'expected_return'
unexpected_return = 'unexpected_return'
def modifier_with_return(mutable_list):
self.assertTrue(local_list is mutable_list)
mutable_list[0] = 'mutation'
return unexpected_return
self.expected_method.WithSideEffects(modifier_with_return).AndReturn(
expected_return)
actual_return = self.mock_method(local_list)
self.assertEquals('mutation', local_list[0])
self.assertEquals(expected_return, actual_return)
def testEqualityNoParamsEqual(self):
"""Methods with the same name and without params should be equal."""
expected_method = mox.MockMethod("testMethod", [], False)
self.assertEqual(self.mock_method, expected_method)
def testEqualityNoParamsNotEqual(self):
"""Methods with different names and without params should not be equal."""
expected_method = mox.MockMethod("otherMethod", [], False)
self.failIfEqual(self.mock_method, expected_method)
def testEqualityParamsEqual(self):
"""Methods with the same name and parameters should be equal."""
params = [1, 2, 3]
expected_method = mox.MockMethod("testMethod", [], False)
expected_method._params = params
self.mock_method._params = params
self.assertEqual(self.mock_method, expected_method)
def testEqualityParamsNotEqual(self):
"""Methods with the same name and different params should not be equal."""
expected_method = mox.MockMethod("testMethod", [], False)
expected_method._params = [1, 2, 3]
self.mock_method._params = ['a', 'b', 'c']
self.failIfEqual(self.mock_method, expected_method)
def testEqualityNamedParamsEqual(self):
"""Methods with the same name and same named params should be equal."""
named_params = {"input1": "test", "input2": "params"}
expected_method = mox.MockMethod("testMethod", [], False)
expected_method._named_params = named_params
self.mock_method._named_params = named_params
self.assertEqual(self.mock_method, expected_method)
def testEqualityNamedParamsNotEqual(self):
"""Methods with the same name and diffnamed params should not be equal."""
expected_method = mox.MockMethod("testMethod", [], False)
expected_method._named_params = {"input1": "test", "input2": "params"}
self.mock_method._named_params = {"input1": "test2", "input2": "params2"}
self.failIfEqual(self.mock_method, expected_method)
def testEqualityWrongType(self):
"""Method should not be equal to an object of a different type."""
self.failIfEqual(self.mock_method, "string?")
def testObjectEquality(self):
"""Equality of objects should work without a Comparator"""
instA = TestClass();
instB = TestClass();
params = [instA, ]
expected_method = mox.MockMethod("testMethod", [], False)
expected_method._params = params
self.mock_method._params = [instB, ]
self.assertEqual(self.mock_method, expected_method)
def testStrConversion(self):
method = mox.MockMethod("f", [], False)
method(1, 2, "st", n1=8, n2="st2")
self.assertEqual(str(method), ("f(1, 2, 'st', n1=8, n2='st2') -> None"))
method = mox.MockMethod("testMethod", [], False)
method(1, 2, "only positional")
self.assertEqual(str(method), "testMethod(1, 2, 'only positional') -> None")
method = mox.MockMethod("testMethod", [], False)
method(a=1, b=2, c="only named")
self.assertEqual(str(method),
"testMethod(a=1, b=2, c='only named') -> None")
method = mox.MockMethod("testMethod", [], False)
method()
self.assertEqual(str(method), "testMethod() -> None")
method = mox.MockMethod("testMethod", [], False)
method(x="only 1 parameter")
self.assertEqual(str(method), "testMethod(x='only 1 parameter') -> None")
method = mox.MockMethod("testMethod", [], False)
method().AndReturn('return_value')
self.assertEqual(str(method), "testMethod() -> 'return_value'")
method = mox.MockMethod("testMethod", [], False)
method().AndReturn(('a', {1: 2}))
self.assertEqual(str(method), "testMethod() -> ('a', {1: 2})")
class MockAnythingTest(unittest.TestCase):
"""Verify that the MockAnything class works as expected."""
def setUp(self):
self.mock_object = mox.MockAnything()
def testRepr(self):
"""Calling repr on a MockAnything instance must work."""
self.assertEqual('<MockAnything instance>', repr(self.mock_object))
def testCanMockStr(self):
self.mock_object.__str__().AndReturn("foo");
self.mock_object._Replay()
actual = str(self.mock_object)
self.mock_object._Verify();
self.assertEquals("foo", actual)
def testSetupMode(self):
"""Verify the mock will accept any call."""
self.mock_object.NonsenseCall()
self.assert_(len(self.mock_object._expected_calls_queue) == 1)
def testReplayWithExpectedCall(self):
"""Verify the mock replays method calls as expected."""
self.mock_object.ValidCall() # setup method call
self.mock_object._Replay() # start replay mode
self.mock_object.ValidCall() # make method call
def testReplayWithUnexpectedCall(self):
"""Unexpected method calls should raise UnexpectedMethodCallError."""
self.mock_object.ValidCall() # setup method call
self.mock_object._Replay() # start replay mode
self.assertRaises(mox.UnexpectedMethodCallError,
self.mock_object.OtherValidCall)
def testVerifyWithCompleteReplay(self):
"""Verify should not raise an exception for a valid replay."""
self.mock_object.ValidCall() # setup method call
self.mock_object._Replay() # start replay mode
self.mock_object.ValidCall() # make method call
self.mock_object._Verify()
def testVerifyWithIncompleteReplay(self):
"""Verify should raise an exception if the replay was not complete."""
self.mock_object.ValidCall() # setup method call
self.mock_object._Replay() # start replay mode
# ValidCall() is never made
self.assertRaises(mox.ExpectedMethodCallsError, self.mock_object._Verify)
def testSpecialClassMethod(self):
"""Verify should not raise an exception when special methods are used."""
self.mock_object[1].AndReturn(True)
self.mock_object._Replay()
returned_val = self.mock_object[1]
self.assert_(returned_val)
self.mock_object._Verify()
def testNonzero(self):
"""You should be able to use the mock object in an if."""
self.mock_object._Replay()
if self.mock_object:
pass
def testNotNone(self):
"""Mock should be comparable to None."""
self.mock_object._Replay()
if self.mock_object is not None:
pass
if self.mock_object is None:
pass
def testEquals(self):
"""A mock should be able to compare itself to another object."""
self.mock_object._Replay()
self.assertEquals(self.mock_object, self.mock_object)
def testEqualsMockFailure(self):
"""Verify equals identifies unequal objects."""
self.mock_object.SillyCall()
self.mock_object._Replay()
self.assertNotEquals(self.mock_object, mox.MockAnything())
def testEqualsInstanceFailure(self):
"""Verify equals identifies that objects are different instances."""
self.mock_object._Replay()
self.assertNotEquals(self.mock_object, TestClass())
def testNotEquals(self):
"""Verify not equals works."""
self.mock_object._Replay()
self.assertFalse(self.mock_object != self.mock_object)
def testNestedMockCallsRecordedSerially(self):
"""Test that nested calls work when recorded serially."""
self.mock_object.CallInner().AndReturn(1)
self.mock_object.CallOuter(1)
self.mock_object._Replay()
self.mock_object.CallOuter(self.mock_object.CallInner())
self.mock_object._Verify()
def testNestedMockCallsRecordedNested(self):
"""Test that nested cals work when recorded in a nested fashion."""
self.mock_object.CallOuter(self.mock_object.CallInner().AndReturn(1))
self.mock_object._Replay()
self.mock_object.CallOuter(self.mock_object.CallInner())
self.mock_object._Verify()
def testIsCallable(self):
"""Test that MockAnything can even mock a simple callable.
This is handy for "stubbing out" a method in a module with a mock, and
verifying that it was called.
"""
self.mock_object().AndReturn('mox0rd')
self.mock_object._Replay()
self.assertEquals('mox0rd', self.mock_object())
self.mock_object._Verify()
def testIsReprable(self):
"""Test that MockAnythings can be repr'd without causing a failure."""
self.failUnless('MockAnything' in repr(self.mock_object))
class MethodCheckerTest(unittest.TestCase):
"""Tests MockMethod's use of MethodChecker method."""
def testNoParameters(self):
method = mox.MockMethod('NoParameters', [], False,
CheckCallTestClass.NoParameters)
method()
self.assertRaises(AttributeError, method, 1)
self.assertRaises(AttributeError, method, 1, 2)
self.assertRaises(AttributeError, method, a=1)
self.assertRaises(AttributeError, method, 1, b=2)
def testOneParameter(self):
method = mox.MockMethod('OneParameter', [], False,
CheckCallTestClass.OneParameter)
self.assertRaises(AttributeError, method)
method(1)
method(a=1)
self.assertRaises(AttributeError, method, b=1)
self.assertRaises(AttributeError, method, 1, 2)
self.assertRaises(AttributeError, method, 1, a=2)
self.assertRaises(AttributeError, method, 1, b=2)
def testTwoParameters(self):
method = mox.MockMethod('TwoParameters', [], False,
CheckCallTestClass.TwoParameters)
self.assertRaises(AttributeError, method)
self.assertRaises(AttributeError, method, 1)
self.assertRaises(AttributeError, method, a=1)
self.assertRaises(AttributeError, method, b=1)
method(1, 2)
method(1, b=2)
method(a=1, b=2)
method(b=2, a=1)
self.assertRaises(AttributeError, method, b=2, c=3)
self.assertRaises(AttributeError, method, a=1, b=2, c=3)
self.assertRaises(AttributeError, method, 1, 2, 3)
self.assertRaises(AttributeError, method, 1, 2, 3, 4)
self.assertRaises(AttributeError, method, 3, a=1, b=2)
def testOneDefaultValue(self):
method = mox.MockMethod('OneDefaultValue', [], False,
CheckCallTestClass.OneDefaultValue)
method()
method(1)
method(a=1)
self.assertRaises(AttributeError, method, b=1)
self.assertRaises(AttributeError, method, 1, 2)
self.assertRaises(AttributeError, method, 1, a=2)
self.assertRaises(AttributeError, method, 1, b=2)
def testTwoDefaultValues(self):
method = mox.MockMethod('TwoDefaultValues', [], False,
CheckCallTestClass.TwoDefaultValues)
self.assertRaises(AttributeError, method)
self.assertRaises(AttributeError, method, c=3)
self.assertRaises(AttributeError, method, 1)
self.assertRaises(AttributeError, method, 1, d=4)
self.assertRaises(AttributeError, method, 1, d=4, c=3)
method(1, 2)
method(a=1, b=2)
method(1, 2, 3)
method(1, 2, 3, 4)
method(1, 2, c=3)
method(1, 2, c=3, d=4)
method(1, 2, d=4, c=3)
method(d=4, c=3, a=1, b=2)
self.assertRaises(AttributeError, method, 1, 2, 3, 4, 5)
self.assertRaises(AttributeError, method, 1, 2, e=9)
self.assertRaises(AttributeError, method, a=1, b=2, e=9)
def testArgs(self):
method = mox.MockMethod('Args', [], False, CheckCallTestClass.Args)
self.assertRaises(AttributeError, method)
self.assertRaises(AttributeError, method, 1)
method(1, 2)
method(a=1, b=2)
method(1, 2, 3)
method(1, 2, 3, 4)
self.assertRaises(AttributeError, method, 1, 2, a=3)
self.assertRaises(AttributeError, method, 1, 2, c=3)
def testKwargs(self):
method = mox.MockMethod('Kwargs', [], False, CheckCallTestClass.Kwargs)
self.assertRaises(AttributeError, method)
method(1)
method(1, 2)
method(a=1, b=2)
method(b=2, a=1)
self.assertRaises(AttributeError, method, 1, 2, 3)
self.assertRaises(AttributeError, method, 1, 2, a=3)
method(1, 2, c=3)
method(a=1, b=2, c=3)
method(c=3, a=1, b=2)
method(a=1, b=2, c=3, d=4)
self.assertRaises(AttributeError, method, 1, 2, 3, 4)
def testArgsAndKwargs(self):
method = mox.MockMethod('ArgsAndKwargs', [], False,
CheckCallTestClass.ArgsAndKwargs)
self.assertRaises(AttributeError, method)
method(1)
method(1, 2)
method(1, 2, 3)
method(a=1)
method(1, b=2)
self.assertRaises(AttributeError, method, 1, a=2)
method(b=2, a=1)
method(c=3, b=2, a=1)
method(1, 2, c=3)
class CheckCallTestClass(object):
def NoParameters(self):
pass
def OneParameter(self, a):
pass
def TwoParameters(self, a, b):
pass
def OneDefaultValue(self, a=1):
pass
def TwoDefaultValues(self, a, b, c=1, d=2):
pass
def Args(self, a, b, *args):
pass
def Kwargs(self, a, b=2, **kwargs):
pass
def ArgsAndKwargs(self, a, *args, **kwargs):
pass
class MockObjectTest(unittest.TestCase):
"""Verify that the MockObject class works as exepcted."""
def setUp(self):
self.mock_object = mox.MockObject(TestClass)
def testSetupModeWithValidCall(self):
"""Verify the mock object properly mocks a basic method call."""
self.mock_object.ValidCall()
self.assert_(len(self.mock_object._expected_calls_queue) == 1)
def testSetupModeWithInvalidCall(self):
"""UnknownMethodCallError should be raised if a non-member method is called.
"""
# Note: assertRaises does not catch exceptions thrown by MockObject's
# __getattr__
try:
self.mock_object.InvalidCall()
self.fail("No exception thrown, expected UnknownMethodCallError")
except mox.UnknownMethodCallError:
pass
except Exception:
self.fail("Wrong exception type thrown, expected UnknownMethodCallError")
def testReplayWithInvalidCall(self):
"""UnknownMethodCallError should be raised if a non-member method is called.
"""
self.mock_object.ValidCall() # setup method call
self.mock_object._Replay() # start replay mode
# Note: assertRaises does not catch exceptions thrown by MockObject's
# __getattr__
try:
self.mock_object.InvalidCall()
self.fail("No exception thrown, expected UnknownMethodCallError")
except mox.UnknownMethodCallError:
pass
except Exception:
self.fail("Wrong exception type thrown, expected UnknownMethodCallError")
def testIsInstance(self):
"""Mock should be able to pass as an instance of the mocked class."""
self.assert_(isinstance(self.mock_object, TestClass))
def testFindValidMethods(self):
"""Mock should be able to mock all public methods."""
self.assert_('ValidCall' in self.mock_object._known_methods)
self.assert_('OtherValidCall' in self.mock_object._known_methods)
self.assert_('MyClassMethod' in self.mock_object._known_methods)
self.assert_('MyStaticMethod' in self.mock_object._known_methods)
self.assert_('_ProtectedCall' in self.mock_object._known_methods)
self.assert_('__PrivateCall' not in self.mock_object._known_methods)
self.assert_('_TestClass__PrivateCall' in self.mock_object._known_methods)
def testFindsSuperclassMethods(self):
"""Mock should be able to mock superclasses methods."""
self.mock_object = mox.MockObject(ChildClass)
self.assert_('ValidCall' in self.mock_object._known_methods)
self.assert_('OtherValidCall' in self.mock_object._known_methods)
self.assert_('MyClassMethod' in self.mock_object._known_methods)
self.assert_('ChildValidCall' in self.mock_object._known_methods)
def testAccessClassVariables(self):
"""Class variables should be accessible through the mock."""
self.assert_('SOME_CLASS_VAR' in self.mock_object._known_vars)
self.assert_('_PROTECTED_CLASS_VAR' in self.mock_object._known_vars)
self.assertEquals('test_value', self.mock_object.SOME_CLASS_VAR)
def testEquals(self):
"""A mock should be able to compare itself to another object."""
self.mock_object._Replay()
self.assertEquals(self.mock_object, self.mock_object)
def testEqualsMockFailure(self):
"""Verify equals identifies unequal objects."""
self.mock_object.ValidCall()
self.mock_object._Replay()
self.assertNotEquals(self.mock_object, mox.MockObject(TestClass))
def testEqualsInstanceFailure(self):
"""Verify equals identifies that objects are different instances."""
self.mock_object._Replay()
self.assertNotEquals(self.mock_object, TestClass())
def testNotEquals(self):
"""Verify not equals works."""
self.mock_object._Replay()
self.assertFalse(self.mock_object != self.mock_object)
def testMockSetItem_ExpectedSetItem_Success(self):
"""Test that __setitem__() gets mocked in Dummy.
In this test, _Verify() succeeds.
"""
dummy = mox.MockObject(TestClass)
dummy['X'] = 'Y'
dummy._Replay()
dummy['X'] = 'Y'
dummy._Verify()
def testMockSetItem_ExpectedSetItem_NoSuccess(self):
"""Test that __setitem__() gets mocked in Dummy.
In this test, _Verify() fails.
"""
dummy = mox.MockObject(TestClass)
dummy['X'] = 'Y'
dummy._Replay()
# NOT doing dummy['X'] = 'Y'
self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
def testMockSetItem_ExpectedNoSetItem_Success(self):
"""Test that __setitem__() gets mocked in Dummy."""
dummy = mox.MockObject(TestClass)
# NOT doing dummy['X'] = 'Y'
dummy._Replay()
def call(): dummy['X'] = 'Y'
self.assertRaises(mox.UnexpectedMethodCallError, call)
def testMockSetItem_ExpectedNoSetItem_NoSuccess(self):
"""Test that __setitem__() gets mocked in Dummy.
In this test, _Verify() fails.
"""
dummy = mox.MockObject(TestClass)
# NOT doing dummy['X'] = 'Y'
dummy._Replay()
# NOT doing dummy['X'] = 'Y'
dummy._Verify()
def testMockSetItem_ExpectedSetItem_NonmatchingParameters(self):
"""Test that __setitem__() fails if other parameters are expected."""
dummy = mox.MockObject(TestClass)
dummy['X'] = 'Y'
dummy._Replay()
def call(): dummy['wrong'] = 'Y'
self.assertRaises(mox.UnexpectedMethodCallError, call)
dummy._Verify()
def testMockSetItem_WithSubClassOfNewStyleClass(self):
class NewStyleTestClass(object):
def __init__(self):
self.my_dict = {}
def __setitem__(self, key, value):
self.my_dict[key], value
class TestSubClass(NewStyleTestClass):
pass
dummy = mox.MockObject(TestSubClass)
dummy[1] = 2
dummy._Replay()
dummy[1] = 2
dummy._Verify()
def testMockGetItem_ExpectedGetItem_Success(self):
"""Test that __getitem__() gets mocked in Dummy.
In this test, _Verify() succeeds.
"""
dummy = mox.MockObject(TestClass)
dummy['X'].AndReturn('value')
dummy._Replay()
self.assertEqual(dummy['X'], 'value')
dummy._Verify()
def testMockGetItem_ExpectedGetItem_NoSuccess(self):
"""Test that __getitem__() gets mocked in Dummy.
In this test, _Verify() fails.
"""
dummy = mox.MockObject(TestClass)
dummy['X'].AndReturn('value')
dummy._Replay()
# NOT doing dummy['X']
self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
def testMockGetItem_ExpectedNoGetItem_NoSuccess(self):
"""Test that __getitem__() gets mocked in Dummy."""
dummy = mox.MockObject(TestClass)
# NOT doing dummy['X']
dummy._Replay()
def call(): return dummy['X']
self.assertRaises(mox.UnexpectedMethodCallError, call)
def testMockGetItem_ExpectedGetItem_NonmatchingParameters(self):
"""Test that __getitem__() fails if other parameters are expected."""
dummy = mox.MockObject(TestClass)
dummy['X'].AndReturn('value')
dummy._Replay()
def call(): return dummy['wrong']
self.assertRaises(mox.UnexpectedMethodCallError, call)
dummy._Verify()
def testMockGetItem_WithSubClassOfNewStyleClass(self):
class NewStyleTestClass(object):
def __getitem__(self, key):
return {1: '1', 2: '2'}[key]
class TestSubClass(NewStyleTestClass):
pass
dummy = mox.MockObject(TestSubClass)
dummy[1].AndReturn('3')
dummy._Replay()
self.assertEquals('3', dummy.__getitem__(1))
dummy._Verify()
def testMockIter_ExpectedIter_Success(self):
"""Test that __iter__() gets mocked in Dummy.
In this test, _Verify() succeeds.
"""
dummy = mox.MockObject(TestClass)
iter(dummy).AndReturn(iter(['X', 'Y']))
dummy._Replay()
self.assertEqual([x for x in dummy], ['X', 'Y'])
dummy._Verify()
def testMockContains_ExpectedContains_Success(self):
"""Test that __contains__ gets mocked in Dummy.
In this test, _Verify() succeeds.
"""
dummy = mox.MockObject(TestClass)
dummy.__contains__('X').AndReturn(True)
dummy._Replay()
self.failUnless('X' in dummy)
dummy._Verify()
def testMockContains_ExpectedContains_NoSuccess(self):
"""Test that __contains__() gets mocked in Dummy.
In this test, _Verify() fails.
"""
dummy = mox.MockObject(TestClass)
dummy.__contains__('X').AndReturn('True')
dummy._Replay()
# NOT doing 'X' in dummy
self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
def testMockContains_ExpectedContains_NonmatchingParameter(self):
"""Test that __contains__ fails if other parameters are expected."""
dummy = mox.MockObject(TestClass)
dummy.__contains__('X').AndReturn(True)
dummy._Replay()
def call(): return 'Y' in dummy
self.assertRaises(mox.UnexpectedMethodCallError, call)
dummy._Verify()
def testMockIter_ExpectedIter_NoSuccess(self):
"""Test that __iter__() gets mocked in Dummy.
In this test, _Verify() fails.
"""
dummy = mox.MockObject(TestClass)
iter(dummy).AndReturn(iter(['X', 'Y']))
dummy._Replay()
# NOT doing self.assertEqual([x for x in dummy], ['X', 'Y'])
self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
def testMockIter_ExpectedNoIter_NoSuccess(self):
"""Test that __iter__() gets mocked in Dummy."""
dummy = mox.MockObject(TestClass)
# NOT doing iter(dummy)
dummy._Replay()
def call(): return [x for x in dummy]
self.assertRaises(mox.UnexpectedMethodCallError, call)
def testMockIter_ExpectedGetItem_Success(self):
"""Test that __iter__() gets mocked in Dummy using getitem."""
dummy = mox.MockObject(SubscribtableNonIterableClass)
dummy[0].AndReturn('a')
dummy[1].AndReturn('b')
dummy[2].AndRaise(IndexError)
dummy._Replay()
self.assertEquals(['a', 'b'], [x for x in dummy])
dummy._Verify()
def testMockIter_ExpectedNoGetItem_NoSuccess(self):
"""Test that __iter__() gets mocked in Dummy using getitem."""
dummy = mox.MockObject(SubscribtableNonIterableClass)
# NOT doing dummy[index]
dummy._Replay()
function = lambda: [x for x in dummy]
self.assertRaises(mox.UnexpectedMethodCallError, function)
def testMockGetIter_WithSubClassOfNewStyleClass(self):
class NewStyleTestClass(object):
def __iter__(self):
return iter([1, 2, 3])
class TestSubClass(NewStyleTestClass):
pass
dummy = mox.MockObject(TestSubClass)
iter(dummy).AndReturn(iter(['a', 'b']))
dummy._Replay()
self.assertEquals(['a', 'b'], [x for x in dummy])
dummy._Verify()
def testInstantiationWithAdditionalAttributes(self):
mock_object = mox.MockObject(TestClass, attrs={"attr1": "value"})
self.assertEquals(mock_object.attr1, "value")
def testCantOverrideMethodsWithAttributes(self):
self.assertRaises(ValueError, mox.MockObject, TestClass,
attrs={"ValidCall": "value"})
def testCantMockNonPublicAttributes(self):
self.assertRaises(mox.PrivateAttributeError, mox.MockObject, TestClass,
attrs={"_protected": "value"})
self.assertRaises(mox.PrivateAttributeError, mox.MockObject, TestClass,
attrs={"__private": "value"})
class MoxTest(unittest.TestCase):
"""Verify Mox works correctly."""
def setUp(self):
self.mox = mox.Mox()
def testCreateObject(self):
"""Mox should create a mock object."""
mock_obj = self.mox.CreateMock(TestClass)
def testVerifyObjectWithCompleteReplay(self):
"""Mox should replay and verify all objects it created."""
mock_obj = self.mox.CreateMock(TestClass)
mock_obj.ValidCall()
mock_obj.ValidCallWithArgs(mox.IsA(TestClass))
self.mox.ReplayAll()
mock_obj.ValidCall()
mock_obj.ValidCallWithArgs(TestClass("some_value"))
self.mox.VerifyAll()
def testVerifyObjectWithIncompleteReplay(self):
"""Mox should raise an exception if a mock didn't replay completely."""
mock_obj = self.mox.CreateMock(TestClass)
mock_obj.ValidCall()
self.mox.ReplayAll()
# ValidCall() is never made
self.assertRaises(mox.ExpectedMethodCallsError, self.mox.VerifyAll)
def testEntireWorkflow(self):
"""Test the whole work flow."""
mock_obj = self.mox.CreateMock(TestClass)
mock_obj.ValidCall().AndReturn("yes")
self.mox.ReplayAll()
ret_val = mock_obj.ValidCall()
self.assertEquals("yes", ret_val)
self.mox.VerifyAll()
def testSignatureMatchingWithComparatorAsFirstArg(self):
"""Test that the first argument can be a comparator."""
def VerifyLen(val):
"""This will raise an exception when not given a list.
This exception will be raised when trying to infer/validate the
method signature.
"""
return len(val) != 1
mock_obj = self.mox.CreateMock(TestClass)
# This intentionally does not name the 'nine' param so it triggers
# deeper inspection.
mock_obj.MethodWithArgs(mox.Func(VerifyLen), mox.IgnoreArg(), None)
self.mox.ReplayAll()
mock_obj.MethodWithArgs([1, 2], "foo", None)
self.mox.VerifyAll()
def testCallableObject(self):
"""Test recording calls to a callable object works."""
mock_obj = self.mox.CreateMock(CallableClass)
mock_obj("foo").AndReturn("qux")
self.mox.ReplayAll()
ret_val = mock_obj("foo")
self.assertEquals("qux", ret_val)
self.mox.VerifyAll()
def testInheritedCallableObject(self):
"""Test recording calls to an object inheriting from a callable object."""
mock_obj = self.mox.CreateMock(InheritsFromCallable)
mock_obj("foo").AndReturn("qux")
self.mox.ReplayAll()
ret_val = mock_obj("foo")
self.assertEquals("qux", ret_val)
self.mox.VerifyAll()
def testCallOnNonCallableObject(self):
"""Test that you cannot call a non-callable object."""
mock_obj = self.mox.CreateMock(TestClass)
self.assertRaises(TypeError, mock_obj)
def testCallableObjectWithBadCall(self):
"""Test verifying calls to a callable object works."""
mock_obj = self.mox.CreateMock(CallableClass)
mock_obj("foo").AndReturn("qux")
self.mox.ReplayAll()
self.assertRaises(mox.UnexpectedMethodCallError, mock_obj, "ZOOBAZ")
def testCallableObjectVerifiesSignature(self):
mock_obj = self.mox.CreateMock(CallableClass)
# Too many arguments
self.assertRaises(AttributeError, mock_obj, "foo", "bar")
def testUnorderedGroup(self):
"""Test that using one unordered group works."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Method(1).InAnyOrder()
mock_obj.Method(2).InAnyOrder()
self.mox.ReplayAll()
mock_obj.Method(2)
mock_obj.Method(1)
self.mox.VerifyAll()
def testUnorderedGroupsInline(self):
"""Unordered groups should work in the context of ordered calls."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).InAnyOrder()
mock_obj.Method(2).InAnyOrder()
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
mock_obj.Method(2)
mock_obj.Method(1)
mock_obj.Close()
self.mox.VerifyAll()
def testMultipleUnorderdGroups(self):
"""Multiple unoreded groups should work."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Method(1).InAnyOrder()
mock_obj.Method(2).InAnyOrder()
mock_obj.Foo().InAnyOrder('group2')
mock_obj.Bar().InAnyOrder('group2')
self.mox.ReplayAll()
mock_obj.Method(2)
mock_obj.Method(1)
mock_obj.Bar()
mock_obj.Foo()
self.mox.VerifyAll()
def testMultipleUnorderdGroupsOutOfOrder(self):
"""Multiple unordered groups should maintain external order"""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Method(1).InAnyOrder()
mock_obj.Method(2).InAnyOrder()
mock_obj.Foo().InAnyOrder('group2')
mock_obj.Bar().InAnyOrder('group2')
self.mox.ReplayAll()
mock_obj.Method(2)
self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Bar)
def testUnorderedGroupWithReturnValue(self):
"""Unordered groups should work with return values."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).InAnyOrder().AndReturn(9)
mock_obj.Method(2).InAnyOrder().AndReturn(10)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
actual_two = mock_obj.Method(2)
actual_one = mock_obj.Method(1)
mock_obj.Close()
self.assertEquals(9, actual_one)
self.assertEquals(10, actual_two)
self.mox.VerifyAll()
def testUnorderedGroupWithComparator(self):
"""Unordered groups should work with comparators"""
def VerifyOne(cmd):
if not isinstance(cmd, str):
self.fail('Unexpected type passed to comparator: ' + str(cmd))
return cmd == 'test'
def VerifyTwo(cmd):
return True
mock_obj = self.mox.CreateMockAnything()
mock_obj.Foo(['test'], mox.Func(VerifyOne), bar=1).InAnyOrder().\
AndReturn('yes test')
mock_obj.Foo(['test'], mox.Func(VerifyTwo), bar=1).InAnyOrder().\
AndReturn('anything')
self.mox.ReplayAll()
mock_obj.Foo(['test'], 'anything', bar=1)
mock_obj.Foo(['test'], 'test', bar=1)
self.mox.VerifyAll()
def testMultipleTimes(self):
"""Test if MultipleTimesGroup works."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Method(1).MultipleTimes().AndReturn(9)
mock_obj.Method(2).AndReturn(10)
mock_obj.Method(3).MultipleTimes().AndReturn(42)
self.mox.ReplayAll()
actual_one = mock_obj.Method(1)
second_one = mock_obj.Method(1) # This tests MultipleTimes.
actual_two = mock_obj.Method(2)
actual_three = mock_obj.Method(3)
mock_obj.Method(3)
mock_obj.Method(3)
self.mox.VerifyAll()
self.assertEquals(9, actual_one)
self.assertEquals(9, second_one) # Repeated calls should return same number.
self.assertEquals(10, actual_two)
self.assertEquals(42, actual_three)
def testMultipleTimesUsingIsAParameter(self):
"""Test if MultipleTimesGroup works with a IsA parameter."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(mox.IsA(str)).MultipleTimes("IsA").AndReturn(9)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
actual_one = mock_obj.Method("1")
second_one = mock_obj.Method("2") # This tests MultipleTimes.
mock_obj.Close()
self.mox.VerifyAll()
self.assertEquals(9, actual_one)
self.assertEquals(9, second_one) # Repeated calls should return same number.
def testMutlipleTimesUsingFunc(self):
"""Test that the Func is not evaluated more times than necessary.
If a Func() has side effects, it can cause a passing test to fail.
"""
self.counter = 0
def MyFunc(actual_str):
"""Increment the counter if actual_str == 'foo'."""
if actual_str == 'foo':
self.counter += 1
return True
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(mox.Func(MyFunc)).MultipleTimes()
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
mock_obj.Method('foo')
mock_obj.Method('foo')
mock_obj.Method('not-foo')
mock_obj.Close()
self.mox.VerifyAll()
self.assertEquals(2, self.counter)
def testMultipleTimesThreeMethods(self):
"""Test if MultipleTimesGroup works with three or more methods."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).MultipleTimes().AndReturn(9)
mock_obj.Method(2).MultipleTimes().AndReturn(8)
mock_obj.Method(3).MultipleTimes().AndReturn(7)
mock_obj.Method(4).AndReturn(10)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
actual_three = mock_obj.Method(3)
mock_obj.Method(1)
actual_two = mock_obj.Method(2)
mock_obj.Method(3)
actual_one = mock_obj.Method(1)
actual_four = mock_obj.Method(4)
mock_obj.Close()
self.assertEquals(9, actual_one)
self.assertEquals(8, actual_two)
self.assertEquals(7, actual_three)
self.assertEquals(10, actual_four)
self.mox.VerifyAll()
def testMultipleTimesMissingOne(self):
"""Test if MultipleTimesGroup fails if one method is missing."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).MultipleTimes().AndReturn(9)
mock_obj.Method(2).MultipleTimes().AndReturn(8)
mock_obj.Method(3).MultipleTimes().AndReturn(7)
mock_obj.Method(4).AndReturn(10)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
mock_obj.Method(3)
mock_obj.Method(2)
mock_obj.Method(3)
mock_obj.Method(3)
mock_obj.Method(2)
self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Method, 4)
def testMultipleTimesTwoGroups(self):
"""Test if MultipleTimesGroup works with a group after a
MultipleTimesGroup.
"""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).MultipleTimes().AndReturn(9)
mock_obj.Method(3).MultipleTimes("nr2").AndReturn(42)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
actual_one = mock_obj.Method(1)
mock_obj.Method(1)
actual_three = mock_obj.Method(3)
mock_obj.Method(3)
mock_obj.Close()
self.assertEquals(9, actual_one)
self.assertEquals(42, actual_three)
self.mox.VerifyAll()
def testMultipleTimesTwoGroupsFailure(self):
"""Test if MultipleTimesGroup fails with a group after a
MultipleTimesGroup.
"""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).MultipleTimes().AndReturn(9)
mock_obj.Method(3).MultipleTimes("nr2").AndReturn(42)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
actual_one = mock_obj.Method(1)
mock_obj.Method(1)
actual_three = mock_obj.Method(3)
self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Method, 1)
def testWithSideEffects(self):
"""Test side effect operations actually modify their target objects."""
def modifier(mutable_list):
mutable_list[0] = 'mutated'
mock_obj = self.mox.CreateMockAnything()
mock_obj.ConfigureInOutParameter(['original']).WithSideEffects(modifier)
mock_obj.WorkWithParameter(['mutated'])
self.mox.ReplayAll()
local_list = ['original']
mock_obj.ConfigureInOutParameter(local_list)
mock_obj.WorkWithParameter(local_list)
self.mox.VerifyAll()
def testWithSideEffectsException(self):
"""Test side effect operations actually modify their target objects."""
def modifier(mutable_list):
mutable_list[0] = 'mutated'
mock_obj = self.mox.CreateMockAnything()
method = mock_obj.ConfigureInOutParameter(['original'])
method.WithSideEffects(modifier).AndRaise(Exception('exception'))
mock_obj.WorkWithParameter(['mutated'])
self.mox.ReplayAll()
local_list = ['original']
self.failUnlessRaises(Exception,
mock_obj.ConfigureInOutParameter,
local_list)
mock_obj.WorkWithParameter(local_list)
self.mox.VerifyAll()
def testStubOutMethod(self):
"""Test that a method is replaced with a MockObject."""
test_obj = TestClass()
method_type = type(test_obj.OtherValidCall)
# Replace OtherValidCall with a mock.
self.mox.StubOutWithMock(test_obj, 'OtherValidCall')
self.assertTrue(isinstance(test_obj.OtherValidCall, mox.MockObject))
self.assertFalse(type(test_obj.OtherValidCall) is method_type)
test_obj.OtherValidCall().AndReturn('foo')
self.mox.ReplayAll()
actual = test_obj.OtherValidCall()
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.assertEquals('foo', actual)
self.assertTrue(type(test_obj.OtherValidCall) is method_type)
def testStubOutMethod_Unbound_Comparator(self):
instance = TestClass()
self.mox.StubOutWithMock(TestClass, 'OtherValidCall')
TestClass.OtherValidCall(mox.IgnoreArg()).AndReturn('foo')
self.mox.ReplayAll()
actual = TestClass.OtherValidCall(instance)
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.assertEquals('foo', actual)
def testStubOutMethod_Unbound_Subclass_Comparator(self):
self.mox.StubOutWithMock(mox_test_helper.TestClassFromAnotherModule, 'Value')
mox_test_helper.TestClassFromAnotherModule.Value(
mox.IsA(mox_test_helper.ChildClassFromAnotherModule)).AndReturn('foo')
self.mox.ReplayAll()
instance = mox_test_helper.ChildClassFromAnotherModule()
actual = mox_test_helper.TestClassFromAnotherModule.Value(instance)
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.assertEquals('foo', actual)
def testStubOuMethod_Unbound_WithOptionalParams(self):
self.mox = mox.Mox()
self.mox.StubOutWithMock(TestClass, 'OptionalArgs')
TestClass.OptionalArgs(mox.IgnoreArg(), foo=2)
self.mox.ReplayAll()
t = TestClass()
TestClass.OptionalArgs(t, foo=2)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutMethod_Unbound_ActualInstance(self):
instance = TestClass()
self.mox.StubOutWithMock(TestClass, 'OtherValidCall')
TestClass.OtherValidCall(instance).AndReturn('foo')
self.mox.ReplayAll()
actual = TestClass.OtherValidCall(instance)
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.assertEquals('foo', actual)
def testStubOutMethod_Unbound_DifferentInstance(self):
instance = TestClass()
self.mox.StubOutWithMock(TestClass, 'OtherValidCall')
TestClass.OtherValidCall(instance).AndReturn('foo')
self.mox.ReplayAll()
# This should fail, since the instances are different
self.assertRaises(mox.UnexpectedMethodCallError,
TestClass.OtherValidCall, "wrong self")
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutMethod_Unbound_NamedUsingPositional(self):
"""Check positional parameters can be matched to keyword arguments."""
self.mox.StubOutWithMock(mox_test_helper.ExampleClass, 'NamedParams')
instance = mox_test_helper.ExampleClass()
mox_test_helper.ExampleClass.NamedParams(instance, 'foo', baz=None)
self.mox.ReplayAll()
mox_test_helper.ExampleClass.NamedParams(instance, 'foo', baz=None)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutMethod_Unbound_NamedUsingPositional_SomePositional(self):
"""Check positional parameters can be matched to keyword arguments."""
self.mox.StubOutWithMock(mox_test_helper.ExampleClass, 'TestMethod')
instance = mox_test_helper.ExampleClass()
mox_test_helper.ExampleClass.TestMethod(instance, 'one', 'two', 'nine')
self.mox.ReplayAll()
mox_test_helper.ExampleClass.TestMethod(instance, 'one', 'two', 'nine')
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutMethod_Unbound_SpecialArgs(self):
self.mox.StubOutWithMock(mox_test_helper.ExampleClass, 'SpecialArgs')
instance = mox_test_helper.ExampleClass()
mox_test_helper.ExampleClass.SpecialArgs(instance, 'foo', None, bar='bar')
self.mox.ReplayAll()
mox_test_helper.ExampleClass.SpecialArgs(instance, 'foo', None, bar='bar')
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutMethod_Bound_SimpleTest(self):
t = self.mox.CreateMock(TestClass)
t.MethodWithArgs(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('foo')
self.mox.ReplayAll()
actual = t.MethodWithArgs(None, None);
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.assertEquals('foo', actual)
def testStubOutMethod_Bound_NamedUsingPositional(self):
"""Check positional parameters can be matched to keyword arguments."""
self.mox.StubOutWithMock(mox_test_helper.ExampleClass, 'NamedParams')
instance = mox_test_helper.ExampleClass()
instance.NamedParams('foo', baz=None)
self.mox.ReplayAll()
instance.NamedParams('foo', baz=None)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutMethod_Bound_NamedUsingPositional_SomePositional(self):
"""Check positional parameters can be matched to keyword arguments."""
self.mox.StubOutWithMock(mox_test_helper.ExampleClass, 'TestMethod')
instance = mox_test_helper.ExampleClass()
instance.TestMethod(instance, 'one', 'two', 'nine')
self.mox.ReplayAll()
instance.TestMethod(instance, 'one', 'two', 'nine')
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutMethod_Bound_SpecialArgs(self):
self.mox.StubOutWithMock(mox_test_helper.ExampleClass, 'SpecialArgs')
instance = mox_test_helper.ExampleClass()
instance.SpecialArgs(instance, 'foo', None, bar='bar')
self.mox.ReplayAll()
instance.SpecialArgs(instance, 'foo', None, bar='bar')
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutMethod_Func_PropgatesExceptions(self):
"""Errors in a Func comparator should propagate to the calling method."""
class TestException(Exception):
pass
def raiseExceptionOnNotOne(value):
if value == 1:
return True
else:
raise TestException
test_obj = TestClass()
self.mox.StubOutWithMock(test_obj, 'MethodWithArgs')
test_obj.MethodWithArgs(
mox.IgnoreArg(), mox.Func(raiseExceptionOnNotOne)).AndReturn(1)
test_obj.MethodWithArgs(
mox.IgnoreArg(), mox.Func(raiseExceptionOnNotOne)).AndReturn(1)
self.mox.ReplayAll()
self.assertEqual(test_obj.MethodWithArgs('ignored', 1), 1)
self.assertRaises(TestException,
test_obj.MethodWithArgs, 'ignored', 2)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOut_SignatureMatching_init_(self):
self.mox.StubOutWithMock(mox_test_helper.ExampleClass, '__init__')
mox_test_helper.ExampleClass.__init__(mox.IgnoreArg())
self.mox.ReplayAll()
# Create an instance of a child class, which calls the parent
# __init__
mox_test_helper.ChildExampleClass()
self.mox.VerifyAll()
self.mox.UnsetStubs()
def testStubOutClass_OldStyle(self):
"""Test a mocked class whose __init__ returns a Mock."""
self.mox.StubOutWithMock(mox_test_helper, 'TestClassFromAnotherModule')
self.assert_(isinstance(mox_test_helper.TestClassFromAnotherModule,
mox.MockObject))
mock_instance = self.mox.CreateMock(
mox_test_helper.TestClassFromAnotherModule)
mox_test_helper.TestClassFromAnotherModule().AndReturn(mock_instance)
mock_instance.Value().AndReturn('mock instance')
self.mox.ReplayAll()
a_mock = mox_test_helper.TestClassFromAnotherModule()
actual = a_mock.Value()
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.assertEquals('mock instance', actual)
def testStubOutClass(self):
self.mox.StubOutClassWithMocks(mox_test_helper, 'CallableClass')
# Instance one
mock_one = mox_test_helper.CallableClass(1, 2)
mock_one.Value().AndReturn('mock')
# Instance two
mock_two = mox_test_helper.CallableClass(8, 9)
mock_two('one').AndReturn('called mock')
self.mox.ReplayAll()
one = mox_test_helper.CallableClass(1, 2)
actual_one = one.Value()
two = mox_test_helper.CallableClass(8, 9)
actual_two = two('one')
self.mox.VerifyAll()
self.mox.UnsetStubs()
# Verify the correct mocks were returned
self.assertEquals(mock_one, one)
self.assertEquals(mock_two, two)
# Verify
self.assertEquals('mock', actual_one)
self.assertEquals('called mock', actual_two)
def testStubOutClass_NotAClass(self):
self.assertRaises(TypeError, self.mox.StubOutClassWithMocks,
mox_test_helper, 'MyTestFunction')
def testStubOutClassNotEnoughCreated(self):
self.mox.StubOutClassWithMocks(mox_test_helper, 'CallableClass')
mox_test_helper.CallableClass(1, 2)
mox_test_helper.CallableClass(8, 9)
self.mox.ReplayAll()
mox_test_helper.CallableClass(1, 2)
self.assertRaises(mox.ExpectedMockCreationError, self.mox.VerifyAll)
self.mox.UnsetStubs()
def testStubOutClassWrongSignature(self):
self.mox.StubOutClassWithMocks(mox_test_helper, 'CallableClass')
self.assertRaises(AttributeError, mox_test_helper.CallableClass)
self.mox.UnsetStubs()
def testStubOutClassWrongParameters(self):
self.mox.StubOutClassWithMocks(mox_test_helper, 'CallableClass')
mox_test_helper.CallableClass(1, 2)
self.mox.ReplayAll()
self.assertRaises(mox.UnexpectedMethodCallError,
mox_test_helper.CallableClass, 8, 9)
self.mox.UnsetStubs()
def testStubOutClassTooManyCreated(self):
self.mox.StubOutClassWithMocks(mox_test_helper, 'CallableClass')
mox_test_helper.CallableClass(1, 2)
self.mox.ReplayAll()
mox_test_helper.CallableClass(1, 2)
self.assertRaises(mox.UnexpectedMockCreationError,
mox_test_helper.CallableClass, 8, 9)
self.mox.UnsetStubs()
def testWarnsUserIfMockingMock(self):
"""Test that user is warned if they try to stub out a MockAnything."""
self.mox.StubOutWithMock(TestClass, 'MyStaticMethod')
self.assertRaises(TypeError, self.mox.StubOutWithMock, TestClass,
'MyStaticMethod')
def testStubOutFirstClassMethodVerifiesSignature(self):
self.mox.StubOutWithMock(mox_test_helper, 'MyTestFunction')
# Wrong number of arguments
self.assertRaises(AttributeError, mox_test_helper.MyTestFunction, 1)
self.mox.UnsetStubs()
def _testMethodSignatureVerification(self, stubClass):
# If stubClass is true, the test is run against an a stubbed out class,
# else the test is run against a stubbed out instance.
if stubClass:
self.mox.StubOutWithMock(mox_test_helper.ExampleClass, "TestMethod")
obj = mox_test_helper.ExampleClass()
else:
obj = mox_test_helper.ExampleClass()
self.mox.StubOutWithMock(mox_test_helper.ExampleClass, "TestMethod")
self.assertRaises(AttributeError, obj.TestMethod)
self.assertRaises(AttributeError, obj.TestMethod, 1)
self.assertRaises(AttributeError, obj.TestMethod, nine=2)
obj.TestMethod(1, 2)
obj.TestMethod(1, 2, 3)
obj.TestMethod(1, 2, nine=3)
self.assertRaises(AttributeError, obj.TestMethod, 1, 2, 3, 4)
self.mox.UnsetStubs()
def testStubOutClassMethodVerifiesSignature(self):
self._testMethodSignatureVerification(stubClass=True)
def testStubOutObjectMethodVerifiesSignature(self):
self._testMethodSignatureVerification(stubClass=False)
def testStubOutObject(self):
"""Test than object is replaced with a Mock."""
class Foo(object):
def __init__(self):
self.obj = TestClass()
foo = Foo()
self.mox.StubOutWithMock(foo, "obj")
self.assert_(isinstance(foo.obj, mox.MockObject))
foo.obj.ValidCall()
self.mox.ReplayAll()
foo.obj.ValidCall()
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.failIf(isinstance(foo.obj, mox.MockObject))
def testForgotReplayHelpfulMessage(self):
"""If there is an AttributeError on a MockMethod, give users a helpful msg.
"""
foo = self.mox.CreateMockAnything()
bar = self.mox.CreateMockAnything()
foo.GetBar().AndReturn(bar)
bar.ShowMeTheMoney()
# Forgot to replay!
try:
foo.GetBar().ShowMeTheMoney()
except AttributeError, e:
self.assertEquals('MockMethod has no attribute "ShowMeTheMoney". '
'Did you remember to put your mocks in replay mode?', str(e))
class ReplayTest(unittest.TestCase):
"""Verify Replay works properly."""
def testReplay(self):
"""Replay should put objects into replay mode."""
mock_obj = mox.MockObject(TestClass)
self.assertFalse(mock_obj._replay_mode)
mox.Replay(mock_obj)
self.assertTrue(mock_obj._replay_mode)
class MoxTestBaseTest(unittest.TestCase):
"""Verify that all tests in a class derived from MoxTestBase are wrapped."""
def setUp(self):
self.mox = mox.Mox()
self.test_mox = mox.Mox()
self.test_stubs = mox.stubout.StubOutForTesting()
self.result = unittest.TestResult()
def tearDown(self):
self.mox.UnsetStubs()
self.test_mox.UnsetStubs()
self.test_stubs.UnsetAll()
self.test_stubs.SmartUnsetAll()
def _setUpTestClass(self):
"""Replacement for setUp in the test class instance.
Assigns a mox.Mox instance as the mox attribute of the test class instance.
This replacement Mox instance is under our control before setUp is called
in the test class instance.
"""
self.test.mox = self.test_mox
self.test.stubs = self.test_stubs
def _CreateTest(self, test_name):
"""Create a test from our example mox class.
The created test instance is assigned to this instances test attribute.
"""
self.test = mox_test_helper.ExampleMoxTest(test_name)
self.mox.stubs.Set(self.test, 'setUp', self._setUpTestClass)
def _VerifySuccess(self):
"""Run the checks to confirm test method completed successfully."""
self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
self.mox.StubOutWithMock(self.test_mox, 'VerifyAll')
self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
self.test_mox.UnsetStubs()
self.test_mox.VerifyAll()
self.test_stubs.UnsetAll()
self.test_stubs.SmartUnsetAll()
self.mox.ReplayAll()
self.test.run(result=self.result)
self.assertTrue(self.result.wasSuccessful())
self.mox.VerifyAll()
self.mox.UnsetStubs() # Needed to call the real VerifyAll() below.
self.test_mox.VerifyAll()
def testSuccess(self):
"""Successful test method execution test."""
self._CreateTest('testSuccess')
self._VerifySuccess()
def testSuccessNoMocks(self):
"""Let testSuccess() unset all the mocks, and verify they've been unset."""
self._CreateTest('testSuccess')
self.test.run(result=self.result)
self.assertTrue(self.result.wasSuccessful())
self.assertEqual(OS_LISTDIR, mox_test_helper.os.listdir)
def testStubs(self):
"""Test that "self.stubs" is provided as is useful."""
self._CreateTest('testHasStubs')
self._VerifySuccess()
def testStubsNoMocks(self):
"""Let testHasStubs() unset the stubs by itself."""
self._CreateTest('testHasStubs')
self.test.run(result=self.result)
self.assertTrue(self.result.wasSuccessful())
self.assertEqual(OS_LISTDIR, mox_test_helper.os.listdir)
def testExpectedNotCalled(self):
"""Stubbed out method is not called."""
self._CreateTest('testExpectedNotCalled')
self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
# Don't stub out VerifyAll - that's what causes the test to fail
self.test_mox.UnsetStubs()
self.test_stubs.UnsetAll()
self.test_stubs.SmartUnsetAll()
self.mox.ReplayAll()
self.test.run(result=self.result)
self.failIf(self.result.wasSuccessful())
self.mox.VerifyAll()
def testExpectedNotCalledNoMocks(self):
"""Let testExpectedNotCalled() unset all the mocks by itself."""
self._CreateTest('testExpectedNotCalled')
self.test.run(result=self.result)
self.failIf(self.result.wasSuccessful())
self.assertEqual(OS_LISTDIR, mox_test_helper.os.listdir)
def testUnexpectedCall(self):
"""Stubbed out method is called with unexpected arguments."""
self._CreateTest('testUnexpectedCall')
self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
# Ensure no calls are made to VerifyAll()
self.mox.StubOutWithMock(self.test_mox, 'VerifyAll')
self.test_mox.UnsetStubs()
self.test_stubs.UnsetAll()
self.test_stubs.SmartUnsetAll()
self.mox.ReplayAll()
self.test.run(result=self.result)
self.failIf(self.result.wasSuccessful())
self.mox.VerifyAll()
def testFailure(self):
"""Failing assertion in test method."""
self._CreateTest('testFailure')
self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
# Ensure no calls are made to VerifyAll()
self.mox.StubOutWithMock(self.test_mox, 'VerifyAll')
self.test_mox.UnsetStubs()
self.test_stubs.UnsetAll()
self.test_stubs.SmartUnsetAll()
self.mox.ReplayAll()
self.test.run(result=self.result)
self.failIf(self.result.wasSuccessful())
self.mox.VerifyAll()
def testMixin(self):
"""Run test from mix-in test class, ensure it passes."""
self._CreateTest('testStat')
self._VerifySuccess()
def testMixinAgain(self):
"""Run same test as above but from the current test class.
This ensures metaclass properly wrapped test methods from all base classes.
If unsetting of stubs doesn't happen, this will fail.
"""
self._CreateTest('testStatOther')
self._VerifySuccess()
class VerifyTest(unittest.TestCase):
"""Verify Verify works properly."""
def testVerify(self):
"""Verify should be called for all objects.
This should throw an exception because the expected behavior did not occur.
"""
mock_obj = mox.MockObject(TestClass)
mock_obj.ValidCall()
mock_obj._Replay()
self.assertRaises(mox.ExpectedMethodCallsError, mox.Verify, mock_obj)
class ResetTest(unittest.TestCase):
"""Verify Reset works properly."""
def testReset(self):
"""Should empty all queues and put mocks in record mode."""
mock_obj = mox.MockObject(TestClass)
mock_obj.ValidCall()
self.assertFalse(mock_obj._replay_mode)
mock_obj._Replay()
self.assertTrue(mock_obj._replay_mode)
self.assertEquals(1, len(mock_obj._expected_calls_queue))
mox.Reset(mock_obj)
self.assertFalse(mock_obj._replay_mode)
self.assertEquals(0, len(mock_obj._expected_calls_queue))
class MyTestCase(unittest.TestCase):
"""Simulate the use of a fake wrapper around Python's unittest library."""
def setUp(self):
super(MyTestCase, self).setUp()
self.critical_variable = 42
self.another_critical_variable = 42
def testMethodOverride(self):
"""Should be properly overriden in a derived class."""
self.assertEquals(42, self.another_critical_variable)
self.another_critical_variable += 1
class MoxTestBaseMultipleInheritanceTest(mox.MoxTestBase, MyTestCase):
"""Test that multiple inheritance can be used with MoxTestBase."""
def setUp(self):
super(MoxTestBaseMultipleInheritanceTest, self).setUp()
self.another_critical_variable = 99
def testMultipleInheritance(self):
"""Should be able to access members created by all parent setUp()."""
self.assert_(isinstance(self.mox, mox.Mox))
self.assertEquals(42, self.critical_variable)
def testMethodOverride(self):
"""Should run before MyTestCase.testMethodOverride."""
self.assertEquals(99, self.another_critical_variable)
self.another_critical_variable = 42
super(MoxTestBaseMultipleInheritanceTest, self).testMethodOverride()
self.assertEquals(43, self.another_critical_variable)
class MoxTestDontMockProperties(MoxTestBaseTest):
def testPropertiesArentMocked(self):
mock_class = self.mox.CreateMock(ClassWithProperties)
self.assertRaises(mox.UnknownMethodCallError, lambda:
mock_class.prop_attr)
class TestClass:
"""This class is used only for testing the mock framework"""
SOME_CLASS_VAR = "test_value"
_PROTECTED_CLASS_VAR = "protected value"
def __init__(self, ivar=None):
self.__ivar = ivar
def __eq__(self, rhs):
return self.__ivar == rhs
def __ne__(self, rhs):
return not self.__eq__(rhs)
def ValidCall(self):
pass
def MethodWithArgs(self, one, two, nine=None):
pass
def OtherValidCall(self):
pass
def OptionalArgs(self, foo='boom'):
pass
def ValidCallWithArgs(self, *args, **kwargs):
pass
@classmethod
def MyClassMethod(cls):
pass
@staticmethod
def MyStaticMethod():
pass
def _ProtectedCall(self):
pass
def __PrivateCall(self):
pass
def __getitem__(self, key):
pass
def __DoNotMock(self):
pass
def __getitem__(self, key):
"""Return the value for key."""
return self.d[key]
def __setitem__(self, key, value):
"""Set the value for key to value."""
self.d[key] = value
def __contains__(self, key):
"""Returns True if d contains the key."""
return key in self.d
def __iter__(self):
pass
class ChildClass(TestClass):
"""This inherits from TestClass."""
def __init__(self):
TestClass.__init__(self)
def ChildValidCall(self):
pass
class CallableClass(object):
"""This class is callable, and that should be mockable!"""
def __init__(self):
pass
def __call__(self, param):
return param
class ClassWithProperties(object):
def setter_attr(self, value):
pass
def getter_attr(self):
pass
prop_attr = property(getter_attr, setter_attr)
class SubscribtableNonIterableClass(object):
def __getitem__(self, index):
raise IndexError
class InheritsFromCallable(CallableClass):
"""This class should also be mockable; it inherits from a callable class."""
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -3,131,272,746,335,434,000 | 31.654538 | 81 | 0.682844 | false |
stratton-oakcoin/oakcoin | test/functional/wallet-accounts.py | 1 | 5091 | #!/usr/bin/env python3
# Copyright (c) 2016 The Oakcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test account RPCs.
RPCs tested are:
- getaccountaddress
- getaddressesbyaccount
- listaddressgroupings
- setaccount
- sendfrom (with account arguments)
- move (with account arguments)
"""
from test_framework.test_framework import OakcoinTestFramework
from test_framework.util import (
start_nodes,
assert_equal,
)
class WalletAccountsTest(OakcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[]]
def run_test(self):
node = self.nodes[0]
# Check that there's no UTXO on any of the nodes
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/50 each
node.generate(1)
node.generate(101)
assert_equal(node.getbalance(), 100)
# there should be 2 address groups
# each with 1 address with a balance of 50 Oakcoins
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 2)
# the addresses aren't linked now, but will be after we send to the
# common address
linked_addresses = set()
for address_group in address_groups:
assert_equal(len(address_group), 1)
assert_equal(len(address_group[0]), 2)
assert_equal(address_group[0][1], 50)
linked_addresses.add(address_group[0][0])
# send 50 from each address to a third address not in this wallet
# There's some fee that will come back to us when the miner reward
# matures.
common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr"
txid = node.sendmany(
fromaccount="",
amounts={common_address: 100},
subtractfeefrom=[common_address],
minconf=1,
)
tx_details = node.gettransaction(txid)
fee = -tx_details['details'][0]['fee']
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
assert_equal(len(address_groups[0]), 2)
assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" account has what's expected.
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
node.sendfrom("", common_address, fee)
accounts = ["a", "b", "c", "d", "e"]
amount_to_send = 1.0
account_addresses = dict()
for account in accounts:
address = node.getaccountaddress(account)
account_addresses[account] = address
node.getnewaddress(account)
assert_equal(node.getaccount(address), account)
assert(address in node.getaddressesbyaccount(account))
node.sendfrom("", address, amount_to_send)
node.generate(1)
for i in range(len(accounts)):
from_account = accounts[i]
to_account = accounts[(i+1) % len(accounts)]
to_address = account_addresses[to_account]
node.sendfrom(from_account, to_address, amount_to_send)
node.generate(1)
for account in accounts:
address = node.getaccountaddress(account)
assert(address != account_addresses[account])
assert_equal(node.getreceivedbyaccount(account), 2)
node.move(account, "", node.getbalance(account))
node.generate(101)
expected_account_balances = {"": 5200}
for account in accounts:
expected_account_balances[account] = 0
assert_equal(node.listaccounts(), expected_account_balances)
assert_equal(node.getbalance(""), 5200)
for account in accounts:
address = node.getaccountaddress("")
node.setaccount(address, account)
assert(address in node.getaddressesbyaccount(account))
assert(address not in node.getaddressesbyaccount(""))
for account in accounts:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, account)
node.sendfrom("", multisig_address, 50)
node.generate(101)
for account in accounts:
assert_equal(node.getbalance(account), 50)
if __name__ == '__main__':
WalletAccountsTest().main()
| mit | 8,348,462,049,676,824,000 | 35.891304 | 78 | 0.604596 | false |
uclouvain/osis | education_group/tests/api/serializers/test_group.py | 1 | 3566 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.conf import settings
from django.test import TestCase, RequestFactory
from django.urls import reverse
from base.models.enums import organization_type
from base.tests.factories.academic_year import AcademicYearFactory
from base.tests.factories.entity_version import EntityVersionFactory
from education_group.api.serializers.group import GroupDetailSerializer
from education_group.tests.factories.group_year import GroupYearFactory
class GroupDetailSerializerTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.academic_year = AcademicYearFactory(year=2018)
cls.entity_version = EntityVersionFactory(
entity__organization__type=organization_type.MAIN
)
cls.group = GroupYearFactory(
academic_year=cls.academic_year,
management_entity=cls.entity_version.entity,
)
url = reverse('education_group_api_v1:group_read', kwargs={
'partial_acronym': cls.group.partial_acronym,
'year': cls.academic_year.year
})
cls.serializer = GroupDetailSerializer(cls.group, context={
'request': RequestFactory().get(url),
'language': settings.LANGUAGE_CODE_FR
})
def test_contains_expected_fields(self):
expected_fields = [
'title',
'title_en',
'url',
'acronym',
'code',
'management_entity',
'management_faculty',
'academic_year',
'education_group_type',
'education_group_type_text',
'credits',
'min_constraint',
'max_constraint',
'constraint_type',
'constraint_type_text',
'remark',
'remark_en',
'campus',
]
self.assertListEqual(list(self.serializer.data.keys()), expected_fields)
def test_ensure_academic_year_field_is_slugified(self):
self.assertEqual(
self.serializer.data['academic_year'],
self.academic_year.year
)
def test_ensure_education_group_type_field_is_slugified(self):
self.assertEqual(
self.serializer.data['education_group_type'],
self.group.education_group_type.name
)
| agpl-3.0 | 9,107,631,064,224,839,000 | 38.611111 | 87 | 0.628892 | false |
jpetto/olympia | src/olympia/amo/tests/test_readonly.py | 1 | 2126 | from django.conf import settings
from django.db import models
from django.utils import importlib
import MySQLdb as mysql
from nose.tools import assert_raises, eq_
from pyquery import PyQuery as pq
from olympia.amo.tests import TestCase
from olympia.addons.models import Addon
def pubdir(ob):
for name in dir(ob):
if not name.startswith('_'):
yield name
def quickcopy(val):
if isinstance(val, dict):
val = val.copy()
elif isinstance(val, list):
val = list(val)
return val
class ReadOnlyModeTest(TestCase):
extra = ('olympia.amo.middleware.ReadOnlyMiddleware',)
def setUp(self):
super(ReadOnlyModeTest, self).setUp()
models.signals.pre_save.connect(self.db_error)
models.signals.pre_delete.connect(self.db_error)
self.old_settings = dict((k, quickcopy(getattr(settings, k)))
for k in pubdir(settings))
settings.SLAVE_DATABASES = ['default']
settings_module = importlib.import_module(settings.SETTINGS_MODULE)
settings_module.read_only_mode(settings._wrapped.__dict__)
self.client.handler.load_middleware()
def tearDown(self):
for k in pubdir(settings):
if k not in self.old_settings:
delattr(self.old_settings, k)
for k, v in self.old_settings.items():
try:
setattr(settings, k, v)
except AttributeError:
# __weakref__
pass
models.signals.pre_save.disconnect(self.db_error)
models.signals.pre_delete.disconnect(self.db_error)
super(ReadOnlyModeTest, self).tearDown()
def db_error(self, *args, **kwargs):
raise mysql.OperationalError("You can't do this in read-only mode.")
def test_db_error(self):
assert_raises(mysql.OperationalError, Addon.objects.create, id=12)
def test_bail_on_post(self):
r = self.client.post('/en-US/firefox/')
eq_(r.status_code, 503)
title = pq(r.content)('title').text()
assert title.startswith('Maintenance in progress'), title
| bsd-3-clause | 8,769,222,976,531,357,000 | 31.707692 | 76 | 0.633114 | false |
par2/lamana | lamana/models/fixtures/fixture_model_class.py | 1 | 10351 | #------------------------------------------------------------------------------
'''Class-style Model
This fixture is used to test the importing of models, handled by the
`theories.handshake()` module. As of 0.4.11, models can:
- be located in the `lamana.models` folder
- module and classes can have any pythonic name; hard-coding removed
- any sub-package can be accessed by the "model" keyword in `Case.apply()`
- search for the hook-containing class and it's hook method
This module is here to test these aspects as the module is imported. The Wilson_LT
model was adapted. No functions are expected in this module; there are tests
against this.
'''
import math
import collections as ct
import pandas as pd
from lamana.input_ import BaseDefaults
from lamana.theories import BaseModel
from lamana.lt_exceptions import IndeterminateError
# This class lacks a hook method; theories should skip it.
class DummyModel():
pass
# The class containing the hook method can have any name.
class RandomName(BaseModel):
'''A modified laminate theory for circular biaxial flexure disks,
loaded with a flat piston punch on 3-ball support having two distinct
materials (polymer and ceramic).'''
'''Accept extra args and kwds here'''
def __init__(self):
self.Laminate = None
self.FeatureInput = None
self.LaminateModel = None
# TODO: eventually abstract into BaseModel and deprecate direct coding
# TODO: accept kwargs from Case -> handshake
def _use_model_(self, Laminate, adjusted_z=False):
'''Return updated DataFrame and FeatureInput Return None if exceptions raised.
Parameters
----------
df : DataFrame
LaminateModel with IDs and Dimensional Variables.
FeatureInut : dict
Geometry, laminate parameters and more. Updates Globals dict for
parameters in the dashboard output.
adjusted_z: bool; default=False
If True, uses z(m)* values instead; different assumption for internal calc.
Raises
------
ZeroDivisionError
If zero `r` or `a` in the log term are zero.
ValueError
If negative numbers are in the log term or the support radius exceeds
the sample radius.
Returns
-------
tuple
The updated calculations and parameters stored in a tuple
`(LaminateModel, FeatureInput)``.
'''
self.Laminate = Laminate
df = Laminate.LFrame.copy()
FeatureInput = Laminate.FeatureInput
# Author-defined Exception Handling
if (FeatureInput['Parameters']['r'] == 0):
raise ZeroDivisionError('r=0 is invalid for the log term in the moment eqn.')
elif (FeatureInput['Parameters']['a'] == 0):
raise ZeroDivisionError('a=0 is invalid for the log term in the moment eqn.')
elif (FeatureInput['Parameters']['r'] < 0) | (FeatureInput['Parameters']['a'] < 0):
raise ValueError('Negative numbers are invalid for the log term '
'in moment eqn.')
elif FeatureInput['Parameters']['a'] > FeatureInput['Parameters']['R']:
raise ValueError('Support radius is larger than sample radius.')
elif df['side'].str.contains('INDET').any():
print('INDET value found. Rolling back...')
raise IndeterminateError('INDET value found. Unable to accurately calculate stress.')
#raise AssertionError('Indeterminate value found. Unable to accurately calculate stress.')
# Calling functions to calculate Qs and Ds
df.loc[:, 'Q_11'] = self.calc_stiffness(df, FeatureInput['Properties']).q_11
df.loc[:, 'Q_12'] = self.calc_stiffness(df, FeatureInput['Properties']).q_12
df.loc[:, 'D_11'] = self.calc_bending(df, adj_z=adjusted_z).d_11
df.loc[:, 'D_12'] = self.calc_bending(df, adj_z=adjusted_z).d_12
# Global Variable Update
if (FeatureInput['Parameters']['p'] == 1) & (Laminate.nplies%2 == 0):
D_11T = sum(df['D_11'])
D_12T = sum(df['D_12'])
else:
D_11T = sum(df.loc[df['label'] == 'interface', 'D_11']) # total D11
D_12T = sum(df.loc[df['label'] == 'interface', 'D_12'])
#print(FeatureInput['Geometric']['p'])
D_11p = (1./((D_11T**2 - D_12T**2)) * D_11T) #
D_12n = -(1./((D_11T**2 - D_12T**2)) *D_12T) #
v_eq = D_12T/D_11T # equiv. Poisson's ratio
M_r = self.calc_moment(df, FeatureInput['Parameters'], v_eq).m_r
M_t = self.calc_moment(df, FeatureInput['Parameters'], v_eq).m_t
K_r = (D_11p*M_r) + (D_12n*M_t) # curvatures
K_t = (D_12n*M_r) + (D_11p*M_t)
# Update FeatureInput
global_params = {
'D_11T': D_11T,
'D_12T': D_12T,
'D_11p': D_11p,
'D_12n': D_12n,
'v_eq ': v_eq,
'M_r': M_r,
'M_t': M_t,
'K_r': K_r,
'K_t:': K_t,
}
FeatureInput['Globals'] = global_params
self.FeatureInput = FeatureInput # update with Globals
#print(FeatureInput)
# Calculate Strains and Stresses and Update DataFrame
df.loc[:,'strain_r'] = K_r * df.loc[:, 'Z(m)']
df.loc[:,'strain_t'] = K_t * df.loc[:, 'Z(m)']
df.loc[:, 'stress_r (Pa/N)'] = (df.loc[:, 'strain_r'] * df.loc[:, 'Q_11']
) + (df.loc[:, 'strain_t'] * df.loc[:, 'Q_12'])
df.loc[:,'stress_t (Pa/N)'] = (df.loc[:, 'strain_t'] * df.loc[:, 'Q_11']
) + (df.loc[:, 'strain_r'] * df.loc[:, 'Q_12'])
df.loc[:,'stress_f (MPa/N)'] = df.loc[:, 'stress_t (Pa/N)']/1e6
del df['Modulus']
del df['Poissons']
self.LaminateModel = df
return (df, FeatureInput)
#------------------------------------------------------------------------------
'''Prefer staticmethods here. Add formulas to doc strings.'''
def calc_stiffness(self, df, mat_props):
'''Return tuple of Series of (Q11, Q12) floats per lamina.'''
# Iterate to Apply Modulus and Poisson's to correct Material
# TODO: Prefer cleaner ways to parse materials from mat_props
df_mat_props = pd.DataFrame(mat_props) # df easier to munge
df_mat_props.index.name = 'materials'
##for material in mat_props.index:
for material in df_mat_props.index:
mat_idx = df['matl'] == material
df.loc[mat_idx, 'Modulus'] = df_mat_props.loc[material, 'Modulus']
df.loc[mat_idx, 'Poissons'] = df_mat_props.loc[material, 'Poissons']
E = df['Modulus'] # series of moduli
v = df['Poissons']
stiffness = ct.namedtuple('stiffness', ['q_11', 'q_12'])
q_11 = E / (1 - (v**2))
q_12 = (v*E) / (1 - (v**2))
return stiffness(q_11, q_12)
def calc_bending(self, df, adj_z=False):
'''Return tuple of Series of (D11, D12) floats.'''
q_11 = df['Q_11']
q_12 = df['Q_12']
h = df['h(m)']
# TODO: need to fix kwargs passing first; tabled since affects many modules.
if not adj_z:
z = df['z(m)']
else:
z = df['z(m)*']
bending = ct.namedtuple('bending', ['d_11', 'd_12'])
d_11 = ((q_11*(h**3)) / 12.) + (q_11*h*(z**2))
d_12 = ((q_12*(h**3)) / 12.) + (q_12*h*(z**2))
return bending(d_11, d_12)
def calc_moment(self, df, load_params, v_eq):
'''Return tuple of moments (radial and tangential); floats.
See Timishenko-Woinowsky: Eq. 91; default'''
P_a = load_params['P_a']
a = load_params['a']
r = load_params['r']
moments = ct.namedtuple('moments', ['m_r', 'm_t'])
m_r = ((P_a/(4*math.pi)) * ((1 + v_eq)*math.log10(a/r)))
m_t = ((P_a/(4*math.pi)) * (((1 + v_eq)*math.log10(a/r)) + (1 - v_eq)))
return moments(m_r, m_t)
class Defaults(BaseDefaults):
'''Return parameters for building distributions cases. Useful for consistent
testing.
Dimensional defaults are inherited from utils.BaseDefaults().
Material-specific parameters are defined here by he user.
- Default geometric parameters
- Default material properties
- Default FeatureInput
Examples
--------
>>> dft = Defaults()
>>> dft.load_params
{'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,}
>>> dft.mat_props
{'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9},
'Poissons': {'HA': 0.25, 'PSu': 0.33}}
>>> dft.FeatureInput
{'Geometry' : '400-[200]-800',
'Geometric' : {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,},
'Materials' : {'HA' : [5.2e10, 0.25], 'PSu' : [2.7e9, 0.33],},
'Custom' : None,
'Model' : Wilson_LT}
Returns
-------
class
Updated attributes inherited from the `BaseDefaults` class.
'''
def __init__(self):
BaseDefaults.__init__(self)
'''DEV: Add defaults first. Then adjust attributes.'''
# DEFAULTS ------------------------------------------------------------
# Build dicts of geometric and material parameters
self.load_params = {
'R': 12e-3, # specimen radius
'a': 7.5e-3, # support ring radius
'p': 5, # points/layer
'P_a': 1, # applied load
'r': 2e-4, # radial distance from center loading
}
self.mat_props = {
'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9},
'Poissons': {'HA': 0.25, 'PSu': 0.33}
}
# ATTRIBUTES ----------------------------------------------------------
# FeatureInput
self.FeatureInput = self.get_FeatureInput(
self.Geo_objects['standard'][0],
load_params=self.load_params,
mat_props=self.mat_props,
##custom_matls=None,
model='Wilson_LT',
global_vars=None
)
| bsd-3-clause | 8,944,103,067,520,001,000 | 39.120155 | 103 | 0.526906 | false |
mglidden/git-analysis | analysis/word_frequencies.py | 1 | 1325 | import fix_paths
import common
import config
from load_samples import load_samples_from_file
from models.commit import Commit
from collections import Counter
import json
import string
def get_words_from_message(commit_message):
#TODO: clean up this method
cleaned_message = str(commit_message.encode('ascii', 'ignore').replace('\n', ' ')).translate(string.maketrans('', ''), string.punctuation + '\t').lower()
return set(cleaned_message.split(' '))
def create_word_frequencies():
session = common.Session()
training_samples = load_samples_from_file(config.TRAINING_DATA_PATH)
word_frequencies = Counter()
for _, commit_id in training_samples:
commit = session.query(Commit).filter(Commit.id == commit_id).first()
for word in get_words_from_message(commit.message):
word_frequencies[word] += 1
all_words = [word for word, _ in word_frequencies.most_common(800)]
word_frequency_file = open(config.WORD_FREQUENCY_PATH, 'w')
word_frequency_file.write(json.dumps(all_words))
word_frequency_file.close()
def load_word_frequencies():
# TODO: Cache this file
word_frequency_file = open(config.WORD_FREQUENCY_PATH, 'r')
word_frequency = json.loads(word_frequency_file.read())
word_frequency_file.close()
return word_frequency
if __name__ == '__main__':
create_word_frequencies()
| mit | -2,114,682,968,862,255,400 | 32.974359 | 155 | 0.723019 | false |
chebee7i/dit | dit/algorithms/optutil.py | 1 | 8288 | """
Various utilities that can be helpful for optimization problems.
"""
from __future__ import division, print_function
from collections import defaultdict
import itertools
import numpy as np
import dit
from .frankwolfe import frank_wolfe
def as_full_rank(A, b):
"""
From a linear system Ax = b, return Bx = c such that B has full rank.
In CVXOPT, linear constraints are denoted as: Ax = b. A has shape (p, n)
and must have full rank. x has shape (n, 1), and so b has shape (p, 1).
Let's assume that we have:
rank(A) = q <= n
This is a typical situation if you are doing optimization, where you have
an under-determined system and are using some criterion for selecting out
a particular solution. Now, it may happen that q < p, which means that some
of your constraint equations are not independent. Since CVXOPT requires
that A have full rank, we must isolate an equivalent system Bx = c which
does have full rank. We use SVD for this. So A = U \Sigma V^*, where
U is (p, p), \Sigma is (p, n) and V^* is (n, n). Then:
\Sigma V^* x = U^{-1} b
We take B = \Sigma V^* and c = U^T b, where we use U^T instead of U^{-1}
for computational efficiency (and since U is orthogonal). But note, we
take only the cols of U (which are rows in U^{-1}) and rows of \Sigma that
have nonzero singular values.
Parameters
----------
A : array-like, shape (p, n)
The LHS for the linear constraints.
b : array-like, shape (p,) or (p, 1)
The RHS for the linear constraints.
Returns
-------
B : array-like, shape (q, n)
The LHS for the linear constraints.
c : array-like, shape (q,) or (q, 1)
The RHS for the linear constraints.
rank : int
The rank of B.
"""
try:
from scipy.linalg import svd
except ImportError:
from numpy.linalg import svd
import scipy.linalg as splinalg
A = np.atleast_2d(A)
b = np.asarray(b)
U, S, Vh = svd(A)
Smat = splinalg.diagsvd(S, A.shape[0], A.shape[1])
# See np.linalg.matrix_rank
tol = S.max() * max(A.shape) * np.finfo(S.dtype).eps
rank = np.sum(S > tol)
B = np.dot(Smat, Vh)[:rank]
c = np.dot(U.transpose(), b)[:rank]
return B, c, rank
class CVXOPT_Template(object):
"""
Template for convex minimization on probability distributions.
"""
def __init__(self, dist, tol=None, prng=None):
"""
Initialize optimizer.
Parameters
----------
dist : distribution
The distribution that is used during optimization.
tol : float | None
The desired convergence tolerance.
prng : RandomState
A NumPy-compatible pseudorandom number generator.
"""
dist = prepare_dist(dist)
self.dist = dist
self.pmf = dist.pmf
self.n_variables = dist.outcome_length()
self.n_symbols = len(dist.alphabet[0])
self.n_elements = len(self.pmf)
if prng is None:
prng = np.random.RandomState()
self.prng = prng
if tol is None:
tol = {}
self.tol = tol
self.init()
def init(self):
# Dimension of optimization variable
self.n = len(self.pmf)
# Number of nonlinear constraints
self.m = 0
self.prep()
self.build_function()
self.build_gradient_hessian()
self.build_linear_inequality_constraints()
self.build_linear_equality_constraints()
self.build_F()
def prep(self):
pass
def build_function(self):
self.func = lambda x: x.sum()
def build_gradient_hessian(self):
import numdifftools
self.gradient = numdifftools.Gradient(self.func)
self.hessian = numdifftools.Hessian(self.func)
def build_linear_inequality_constraints(self):
from cvxopt import matrix
# Dimension of optimization variable
n = self.n
# Nonnegativity constraint
#
# We have M = N = 0 (no 2nd order cones or positive semidefinite cones)
# So, K = l where l is the dimension of the nonnegative orthant. Thus,
# we have l = n.
G = matrix(-1 * np.eye(n)) # G should have shape: (K,n) = (n,n)
h = matrix(np.zeros((n,1))) # h should have shape: (K,1) = (n,1)
self.G = G
self.h = h
def build_linear_equality_constraints(self):
from cvxopt import matrix
# Normalization constraint only
A = [np.ones(self.n_elements)]
b = [1]
A = np.asarray(A, dtype=float)
b = np.asarray(b, dtype=float)
self.A = matrix(A)
self.b = matrix(b) # now a column vector
def initial_dist(self):
return self.prng.dirichlet([1] * self.n)
def build_F(self):
from cvxopt import matrix
n = self.n
m = self.m
def F(x=None, z=None):
# x has shape: (n,1) and is the distribution
# z has shape: (m+1,1) and is the Hessian of f_0
if x is None and z is None:
d = self.initial_dist()
return (m, matrix(d))
xarr = np.array(x)[:, 0]
# Verify that x is in domain.
# Does G,h and A,b take care of this?
#
if np.any(xarr > 1) or np.any(xarr < 0):
return None
if not np.allclose(np.sum(xarr), 1, **self.tol):
return None
# Derivatives
f = self.func(xarr)
Df = self.gradient(xarr)
Df = matrix(Df.reshape((1, n)))
if z is None:
return (f, Df)
else:
# Hessian
H = self.hessian(xarr)
H = matrix(H)
return (f, Df, z[0] * H)
self.F = F
def optimize(self, **kwargs):
"""
Options:
show_progress=False,
maxiters=100,
abstol=1e-7,
reltol=1e-6,
feastol=1e-7,
refinement=0 if m=0 else 1
"""
from cvxopt.solvers import cp, options
old_options = options.copy()
out = None
try:
options.clear()
options.update(kwargs)
with np.errstate(divide='ignore', invalid='ignore'):
result = cp(F=self.F,
G=self.G,
h=self.h,
dims={'l':self.G.size[0], 'q':[], 's':[]},
A=self.A,
b=self.b)
except:
raise
else:
self.result = result
out = np.asarray(result['x'])
finally:
options.clear()
options.update(old_options)
return out
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def prepare_dist(dist):
if not isinstance(dist._sample_space, dit.samplespace.CartesianProduct):
dist = dit.expanded_samplespace(dist, union=True)
if not dist.is_dense():
if len(dist._sample_space) > 1e4:
import warnings
msg = "Sample space has more than 10k elements."
msg += " This could be slow."
warnings.warn(msg)
dist.make_dense()
# We also need linear probabilities.
dist.set_base('linear')
return dist
def op_runner(objective, constraints, **kwargs):
"""
Minimize the objective specified by the constraints.
This safely let's you pass options to the solver and restores their values
once the optimization process has completed.
The objective must be linear in the variables.
This uses cvxopt.modeling.
"""
from cvxopt.solvers import options
from cvxopt.modeling import variable, op
old_options = options.copy()
opt = op(objective, constraints)
try:
options.clear()
options.update(kwargs)
# Ignore 0 log 0 warnings.
with np.errstate(divide='ignore', invalid='ignore'):
opt.solve()
except:
raise
finally:
options.clear()
options.update(old_options)
return opt
| bsd-3-clause | 3,453,613,929,888,165,000 | 25.14511 | 79 | 0.552847 | false |
monetaproject/moneta | qa/rpc-tests/util.py | 1 | 5291 | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Moneta developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal
import json
import shutil
import subprocess
import time
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
START_P2P_PORT=11000
START_RPC_PORT=11100
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = []
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
monetad and moneta-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run monetads:
for i in range(4):
datadir = os.path.join("cache", "node"+str(i))
os.makedirs(datadir)
with open(os.path.join(datadir, "moneta.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(START_P2P_PORT+i)+"\n");
f.write("rpcport="+str(START_RPC_PORT+i)+"\n");
args = [ "monetad", "-keypool=1", "-datadir="+datadir ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(START_P2P_PORT))
bitcoind_processes.append(subprocess.Popen(args))
subprocess.check_call([ "moneta-cli", "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(START_RPC_PORT+i,)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
for i in range(4):
rpcs[i].setgenerate(True, 25)
sync_blocks(rpcs)
for i in range(4):
rpcs[i].setgenerate(True, 25)
sync_blocks(rpcs)
# Shut them down, and remove debug.logs:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(debug_log("cache", i))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
def start_nodes(num_nodes, dir):
# Start monetads, and wait for RPC interface to be up and running:
devnull = open("/dev/null", "w+")
for i in range(num_nodes):
datadir = os.path.join(dir, "node"+str(i))
args = [ "monetad", "-datadir="+datadir ]
bitcoind_processes.append(subprocess.Popen(args))
subprocess.check_call([ "moneta-cli", "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
# Create&return JSON-RPC connections
rpc_connections = []
for i in range(num_nodes):
url = "http://rt:[email protected]:%d"%(START_RPC_PORT+i,)
rpc_connections.append(AuthServiceProxy(url))
return rpc_connections
def debug_log(dir, n_node):
return os.path.join(dir, "node"+str(n_node), "regtest", "debug.log")
def stop_nodes(nodes):
for i in range(len(nodes)):
nodes[i].stop()
del nodes[:] # Emptying array closes connections as a side effect
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes:
bitcoind.wait()
del bitcoind_processes[:]
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(START_P2P_PORT+node_num)
from_connection.addnode(ip_port, "onetry")
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
| mit | -734,973,261,075,377,800 | 33.581699 | 94 | 0.600832 | false |
wright-group/WrightData | 2015-12 Czech/workup.py | 1 | 3041 | '''
First Created 2016/05/05 by Blaise Thompson
Last Edited 2016/08/08 by Blaise Thompson
Contributors: Blaise Thompson, Kyle Czech
'''
### import ####################################################################
import os
import sys
import importlib
import collections
import WrightTools as wt
### define ####################################################################
# paths
directory = os.path.dirname(__file__)
key = os.path.basename(directory)
package_folder = os.path.dirname(directory)
# shared module
spec = importlib.util.spec_from_file_location('shared', os.path.join(package_folder, 'shared.py'))
shared_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(shared_module)
# dictionaries to fill
raw_dictionary = collections.OrderedDict()
processed_dictionary = collections.OrderedDict()
### download ##################################################################
bypass_download = False
if __name__ == '__main__' and not bypass_download:
shared_module.download(key, directory)
### movie #####################################################################
raw_pickle_path = os.path.join(directory, 'raw_movie.p')
processed_pickle_path = os.path.join(directory, 'processed_movie.p')
def workup():
# raw
data_paths = wt.kit.glob_handler('.dat', folder=os.path.join(directory, 'movie'))
raw_movie = wt.data.from_COLORS(data_paths, name='MoS2 TrEE Movie')
raw_movie.save(raw_pickle_path)
# processed
processed_movie = raw_movie.copy()
processed_movie.level('ai0', 'd2', -3)
processed_movie.smooth([2, 2, 0], channel='ai0')
processed_movie.scale(channel='ai0', kind='amplitude')
processed_movie.normalize(channel='ai0')
processed_movie.save(processed_pickle_path)
# finish
return raw_movie, processed_movie
# force workup
if False:
workup()
# automatically process
shared_module.process(key='movie',
workup_method=workup, raw_pickle_path=raw_pickle_path,
processed_pickle_path=processed_pickle_path,
raw_dictionary=raw_dictionary,
processed_dictionary=processed_dictionary)
### absorbance ################################################################
raw_pickle_path = os.path.join(directory, 'absorbance_data.p')
processed_pickle_path = raw_pickle_path
def workup():
absorbance_path = os.path.join(directory, 'MoS2_TF_III_ebeam_1nm_Mo_onQuartz_T=300K__corrected.txt')
absorbance_data = wt.data.from_shimadzu(absorbance_path, name='MoS2 thin film absorbance')
absorbance_data.save(raw_pickle_path)
return absorbance_data, absorbance_data
# force workup
if False:
workup()
# automatically process
shared_module.process(key='absorbance',
workup_method=workup, raw_pickle_path=raw_pickle_path,
processed_pickle_path=processed_pickle_path,
raw_dictionary=raw_dictionary,
processed_dictionary=processed_dictionary)
| cc0-1.0 | 7,105,389,251,518,506,000 | 28.813725 | 104 | 0.610654 | false |
lacion/forge | forge/forge.py | 1 | 5804 | #!/usr/bin/env python
"""
forge.forge
~~~~~
:copyright: (c) 2010-2013 by Luis Morales
:license: BSD, see LICENSE for more details.
"""
#heavely based on diamond https://github.com/BrightcoveOS/Diamond
import os
import sys
import argparse
import logging
import traceback
import inspect
from util import load_class_from_name
from module import Module
class Forge(object):
"""
Forge class loads and starts modules
"""
pass
def __init__(self, user, path, modules):
# Initialize Logging
self.log = logging.getLogger('forge')
# Initialize Members
self.modules = modules
self.user = user
self.path = path
def load_include_path(self, path):
"""
Scan for and add paths to the include path
"""
# Verify the path is valid
if not os.path.isdir(path):
return
# Add path to the system path
sys.path.append(path)
# Load all the files in path
for f in os.listdir(path):
# Are we a directory? If so process down the tree
fpath = os.path.join(path, f)
if os.path.isdir(fpath):
self.load_include_path(fpath)
def load_module(self, fqcn):
"""
Load Module class named fqcn
"""
# Load class
cls = load_class_from_name(fqcn)
# Check if cls is subclass of Module
if cls == Module or not issubclass(cls, Module):
raise TypeError("%s is not a valid Module" % fqcn)
# Log
self.log.debug("Loaded Module: %s", fqcn)
return cls
def load_modules(self, path):
"""
Scan for collectors to load from path
"""
# Initialize return value
modules = {}
# Get a list of files in the directory, if the directory exists
if not os.path.exists(path):
raise OSError("Directory does not exist: %s" % path)
if path.endswith('tests') or path.endswith('fixtures'):
return modules
# Log
self.log.debug("Loading Modules from: %s", path)
# Load all the files in path
for f in os.listdir(path):
# Are we a directory? If so process down the tree
fpath = os.path.join(path, f)
if os.path.isdir(fpath):
submodules = self.load_modules(fpath)
for key in submodules:
modules[key] = submodules[key]
# Ignore anything that isn't a .py file
elif (os.path.isfile(fpath)
and len(f) > 3
and f[-3:] == '.py'
and f[0:4] != 'test'
and f[0] != '.'):
modname = f[:-3]
try:
# Import the module
mod = __import__(modname, globals(), locals(), ['*'])
except ImportError:
# Log error
self.log.error("Failed to import module: %s. %s", modname, traceback.format_exc())
continue
# Log
self.log.debug("Loaded Module: %s", modname)
# Find all classes defined in the module
for attrname in dir(mod):
attr = getattr(mod, attrname)
# Only attempt to load classes that are infact classes
# are Collectors but are not the base Collector class
if (inspect.isclass(attr) and issubclass(attr, Module) and attr != Module):
# Get class name
fqcn = '.'.join([modname, attrname])
try:
# Load Collector class
cls = self.load_module(fqcn)
# Add Collector class
modules[cls.__name__] = cls
except Exception:
# Log error
self.log.error("Failed to load Module: %s. %s", fqcn, traceback.format_exc())
continue
# Return Collector classes
return modules
def init_module(self, cls):
"""
Initialize module
"""
module = None
try:
# Initialize module
module = cls(self.user)
# Log
self.log.debug("Initialized Module: %s", cls.__name__)
except Exception:
# Log error
self.log.error("Failed to initialize Module: %s. %s", cls.__name__, traceback.format_exc())
# Return module
return module
def run(self):
"""
Load module classes and run them
"""
# Load collectors
modules_path = self.path
self.load_include_path(modules_path)
modules = self.load_modules(modules_path)
for module in self.modules:
c = self.init_module(modules[module.capitalize()])
c.execute()
def run():
"""
executes the recipe list to set the system
"""
parser = argparse.ArgumentParser(
prog='forge',
description='forge is a command line tool that allows to execute modules to configure a linux system.',
epilog='this epilog whose whitespace will be cleaned up and whose words will be wrapped across a couple lines'
)
parser.add_argument('-u', '--user', help='Destination user', type=str, required=True)
parser.add_argument('-m', '--modules', help='List of modules to execute', nargs='+', type=str, required=True)
parser.add_argument('-p', '--path', help='path to find modules', type=str, required=True)
args = parser.parse_args()
init = Forge(args.user, args.path, args.modules)
init.run()
| bsd-3-clause | -454,126,331,821,404,100 | 30.372973 | 118 | 0.52929 | false |
edgedb/edgedb | edb/testbase/protocol/render_utils.py | 1 | 1595 | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2020-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import contextlib
import textwrap
class RenderBuffer:
ilevel: int
buf: List[str]
def __init__(self):
self.ilevel = 0
self.buf = []
def write(self, line: str) -> None:
self.buf.append(' ' * (self.ilevel * 2) + line)
def newline(self) -> None:
self.buf.append('')
def lastline(self) -> Optional[str]:
return self.buf[-1] if len(self.buf) else None
def popline(self) -> str:
return self.buf.pop()
def write_comment(self, comment: str) -> None:
lines = textwrap.wrap(comment, width=40)
for line in lines:
self.write(f'// {line}')
def __str__(self):
return '\n'.join(self.buf)
@contextlib.contextmanager
def indent(self):
self.ilevel += 1
try:
yield
finally:
self.ilevel -= 1
| apache-2.0 | -8,708,220,317,205,776 | 25.147541 | 74 | 0.641379 | false |
derekjchow/models | research/object_detection/meta_architectures/ssd_meta_arch_test_lib.py | 1 | 9018 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for SSD models meta architecture tests."""
import functools
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import post_processing_builder
from object_detection.core import anchor_generator
from object_detection.core import balanced_positive_negative_sampler as sampler
from object_detection.core import box_list
from object_detection.core import losses
from object_detection.core import post_processing
from object_detection.core import region_similarity_calculator as sim_calc
from object_detection.core import target_assigner
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.protos import calibration_pb2
from object_detection.protos import model_pb2
from object_detection.utils import ops
from object_detection.utils import test_case
from object_detection.utils import test_utils
slim = tf.contrib.slim
keras = tf.keras.layers
class FakeSSDFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""Fake ssd feature extracture for ssd meta arch tests."""
def __init__(self):
super(FakeSSDFeatureExtractor, self).__init__(
is_training=True,
depth_multiplier=0,
min_depth=0,
pad_to_multiple=1,
conv_hyperparams_fn=None)
def preprocess(self, resized_inputs):
return tf.identity(resized_inputs)
def extract_features(self, preprocessed_inputs):
with tf.variable_scope('mock_model'):
features = slim.conv2d(
inputs=preprocessed_inputs,
num_outputs=32,
kernel_size=1,
scope='layer1')
return [features]
class FakeSSDKerasFeatureExtractor(ssd_meta_arch.SSDKerasFeatureExtractor):
"""Fake keras based ssd feature extracture for ssd meta arch tests."""
def __init__(self):
with tf.name_scope('mock_model'):
super(FakeSSDKerasFeatureExtractor, self).__init__(
is_training=True,
depth_multiplier=0,
min_depth=0,
pad_to_multiple=1,
conv_hyperparams=None,
freeze_batchnorm=False,
inplace_batchnorm_update=False,
)
self._conv = keras.Conv2D(filters=32, kernel_size=1, name='layer1')
def preprocess(self, resized_inputs):
return tf.identity(resized_inputs)
def _extract_features(self, preprocessed_inputs, **kwargs):
with tf.name_scope('mock_model'):
return [self._conv(preprocessed_inputs)]
class MockAnchorGenerator2x2(anchor_generator.AnchorGenerator):
"""A simple 2x2 anchor grid on the unit square used for test only."""
def name_scope(self):
return 'MockAnchorGenerator'
def num_anchors_per_location(self):
return [1]
def _generate(self, feature_map_shape_list, im_height, im_width):
return [
box_list.BoxList(
tf.constant(
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[1., 1., 1.5, 1.5] # Anchor that is outside clip_window.
],
tf.float32))
]
def num_anchors(self):
return 4
class SSDMetaArchTestBase(test_case.TestCase):
"""Base class to test SSD based meta architectures."""
def _create_model(
self,
model_fn=ssd_meta_arch.SSDMetaArch,
apply_hard_mining=True,
normalize_loc_loss_by_codesize=False,
add_background_class=True,
random_example_sampling=False,
expected_loss_weights=model_pb2.DetectionModel().ssd.loss.NONE,
min_num_negative_samples=1,
desired_negative_sampling_ratio=3,
use_keras=False,
predict_mask=False,
use_static_shapes=False,
nms_max_size_per_class=5,
calibration_mapping_value=None):
is_training = False
num_classes = 1
mock_anchor_generator = MockAnchorGenerator2x2()
if use_keras:
mock_box_predictor = test_utils.MockKerasBoxPredictor(
is_training, num_classes, add_background_class=add_background_class)
else:
mock_box_predictor = test_utils.MockBoxPredictor(
is_training, num_classes, add_background_class=add_background_class)
mock_box_coder = test_utils.MockBoxCoder()
if use_keras:
fake_feature_extractor = FakeSSDKerasFeatureExtractor()
else:
fake_feature_extractor = FakeSSDFeatureExtractor()
mock_matcher = test_utils.MockMatcher()
region_similarity_calculator = sim_calc.IouSimilarity()
encode_background_as_zeros = False
def image_resizer_fn(image):
return [tf.identity(image), tf.shape(image)]
classification_loss = losses.WeightedSigmoidClassificationLoss()
localization_loss = losses.WeightedSmoothL1LocalizationLoss()
non_max_suppression_fn = functools.partial(
post_processing.batch_multiclass_non_max_suppression,
score_thresh=-20.0,
iou_thresh=1.0,
max_size_per_class=nms_max_size_per_class,
max_total_size=nms_max_size_per_class,
use_static_shapes=use_static_shapes)
score_conversion_fn = tf.identity
calibration_config = calibration_pb2.CalibrationConfig()
if calibration_mapping_value:
calibration_text_proto = """
function_approximation {
x_y_pairs {
x_y_pair {
x: 0.0
y: %f
}
x_y_pair {
x: 1.0
y: %f
}}}""" % (calibration_mapping_value, calibration_mapping_value)
text_format.Merge(calibration_text_proto, calibration_config)
score_conversion_fn = (
post_processing_builder._build_calibrated_score_converter( # pylint: disable=protected-access
tf.identity, calibration_config))
classification_loss_weight = 1.0
localization_loss_weight = 1.0
negative_class_weight = 1.0
normalize_loss_by_num_matches = False
hard_example_miner = None
if apply_hard_mining:
# This hard example miner is expected to be a no-op.
hard_example_miner = losses.HardExampleMiner(
num_hard_examples=None, iou_threshold=1.0)
random_example_sampler = None
if random_example_sampling:
random_example_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=0.5)
target_assigner_instance = target_assigner.TargetAssigner(
region_similarity_calculator,
mock_matcher,
mock_box_coder,
negative_class_weight=negative_class_weight)
model_config = model_pb2.DetectionModel()
if expected_loss_weights == model_config.ssd.loss.NONE:
expected_loss_weights_fn = None
else:
raise ValueError('Not a valid value for expected_loss_weights.')
code_size = 4
kwargs = {}
if predict_mask:
kwargs.update({
'mask_prediction_fn': test_utils.MockMaskHead(num_classes=1).predict,
})
model = model_fn(
is_training=is_training,
anchor_generator=mock_anchor_generator,
box_predictor=mock_box_predictor,
box_coder=mock_box_coder,
feature_extractor=fake_feature_extractor,
encode_background_as_zeros=encode_background_as_zeros,
image_resizer_fn=image_resizer_fn,
non_max_suppression_fn=non_max_suppression_fn,
score_conversion_fn=score_conversion_fn,
classification_loss=classification_loss,
localization_loss=localization_loss,
classification_loss_weight=classification_loss_weight,
localization_loss_weight=localization_loss_weight,
normalize_loss_by_num_matches=normalize_loss_by_num_matches,
hard_example_miner=hard_example_miner,
target_assigner_instance=target_assigner_instance,
add_summaries=False,
normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
freeze_batchnorm=False,
inplace_batchnorm_update=False,
add_background_class=add_background_class,
random_example_sampler=random_example_sampler,
expected_loss_weights_fn=expected_loss_weights_fn,
**kwargs)
return model, num_classes, mock_anchor_generator.num_anchors(), code_size
def _get_value_for_matching_key(self, dictionary, suffix):
for key in dictionary.keys():
if key.endswith(suffix):
return dictionary[key]
raise ValueError('key not found {}'.format(suffix))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -3,750,139,240,896,300,000 | 34.785714 | 104 | 0.672322 | false |
AnhellO/DAS_Sistemas | Ene-Jun-2019/juanalmaguer/Extraordinario/Ejercicio 4.py | 1 | 1725 |
import peewee
import sqlite3
file = 'countries.db'
db = peewee.SqliteDatabase(file)
class Pais(peewee.Model):
nombre = peewee.TextField()
lenguajes = peewee.TextField()
continente = peewee.TextField()
capital = peewee.TextField()
zona = peewee.TextField()
class Meta:
database = db
db_table = 'Country'
def count_paises():
db.connect()
total = Pais.select().count()
db.close()
return total
def data_countries(pais = 'Mexico'):
conexion = sqlite3.connect(file)
cursor = conexion.cursor()
datos = cursor.execute('select * from Paises where Nombre = "{}"'.format(pais)).fetchall()
conexion.close()
return datos[0]
def latinos():
conexion = sqlite3.connect(file)
cursor = conexion.cursor()
paises = cursor.execute('select Nombre, Lenguajes from Paises').fetchall()
hispanohablantes = []
for pais in paises:
lenguajes = pais[1].split(',')
if type(lenguajes) != 'NoneType':
if 'spa' in lenguajes:
hispanohablantes.append(pais[0])
return hispanohablantes
def europeos():
conexion = sqlite3.connect(file)
cursor = conexion.cursor()
paises = cursor.execute('select Nombre from Paises where Continente = "Europe"').fetchall()
conexion.close()
return paises
def main():
print('Total de países: {}'.format(count_paises()))
print('\nDatos de México: {}'.format(data_countries()))
paises = latinos()
print('\nPaíses hispanohablantes: ')
for pais in paises:
print(pais)
paises_europeos = europeos()
print('\nPaíses de Europa: ')
for pais in paises_europeos:
print(pais[0])
if __name__ == '__main__':
main() | mit | 7,194,455,465,097,568,000 | 24.323529 | 95 | 0.632772 | false |
TuftsBCB/Walker | run_walker.py | 1 | 2820 | """
Main script for running tissue-specific graph walk experiments, to convergence.
"""
import sys
import argparse
from walker import Walker
def generate_seed_list(seed_file):
""" Read seed file into a list. """
seed_list = []
try:
fp = open(seed_file, "r")
except IOError:
sys.exit("Error opening file {}".format(seed_file))
for line in fp.readlines():
info = line.rstrip().split()
if len(info) > 1:
seed_list.append(info[1])
else:
seed_list.append(info[0])
fp.close()
return seed_list
def get_node_list(node_file):
node_list = []
try:
fp = open(node_file, 'r')
except IOError:
sys.exit('Could not open file: {}'.format(node_file))
# read the first (i.e. largest) connected component
cur_line = fp.readline()
while cur_line and not cur_line.isspace():
if cur_line:
node_list.append(cur_line.rstrip())
cur_line = fp.readline()
fp.close()
return node_list
def main(argv):
# set up argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('input_graph', help='Original graph input file, in\
edge list format')
parser.add_argument('seed', help='Seed file, to pull start nodes from')
parser.add_argument('-e', '--restart_prob', type=float, default=0.7,
help='Restart probability for random walk')
parser.add_argument('-l', '--low_list', nargs='?', default=None,
help='<Optional> List of genes expressed and\
unexpressed in the current tissue, if applicable')
parser.add_argument('-n', '--node_list', nargs='?', default=None,
help='<Optional> Order of output probs')
parser.add_argument('-o', '--original_graph_prob', type=float, default=0.1,
help='Probability of walking on the original (non-\
tissue specific) graph, if applicable')
parser.add_argument('-r', '--remove', nargs='+',
help='<Optional> Nodes to remove from the graph, if any')
opts = parser.parse_args()
seed_list = generate_seed_list(opts.seed)
node_list = get_node_list(opts.node_list) if opts.node_list else []
# filter nodes we want to remove out of the starting seed, if any
remove_list = opts.remove if opts.remove else []
if remove_list:
seed_list = [s for s in seed_list if s not in remove_list]
# run the experiments, and write a rank list to stdout
wk = Walker(opts.input_graph, opts.low_list, remove_list)
wk.run_exp(seed_list, opts.restart_prob,
opts.original_graph_prob, node_list)
if __name__ == '__main__':
main(sys.argv)
| mit | -6,129,807,627,736,509,000 | 33.390244 | 81 | 0.588652 | false |
brunoabud/ic | ic/queue.py | 1 | 4887 | # coding: utf-8
# Copyright (C) 2016 Bruno Abude Cardoso
#
# Imagem Cinemática is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Imagem Cinemática is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import deque
from PyQt4.QtCore import QMutex, QThread, QWaitCondition, QElapsedTimer
__all__ = ['Empty', 'Full', 'Queue']
class Empty(Exception):
pass
class Full(Exception):
pass
class Locked(Exception):
pass
class Queue(object):
"""Create a queue object with a given maximum size.
"""
def __init__(self, maxsize=0):
self.maxsize = maxsize
self.queue = deque()
# Mutex using for accessing the deque
self.mutex = QMutex()
# Condition that will be held when the queue is empty and the consumer
# needs to wait for a new item
self.item_added = QWaitCondition()
# Condition that will be held when the queue is full and the producer
# needs to wait for a new place to insert the item
self.item_removed = QWaitCondition()
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
Parameters
----------
block : bool
If True(default), the caller thread will block until the queue has
a free space available for putting an new item. If False, the `Full`
exception will be raised if there is no free space in the queue
timeout : int
The max time to wait for a new space to be avaible, in milliseconds.
"""
self.mutex.lock()
try:
# Check if the queue has a limit (0 means not)
if self.maxsize > 0:
# Raise Full if block is False and the queue is at max cap.
if not block:
if self._qsize() == self.maxsize:
raise Full
# If a timeout is not provided, wait indefinitely
elif timeout is None:
while self._qsize() == self.maxsize:
self.item_removed.wait(self.mutex)
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
timer = QElapsedTimer()
timer.start()
while self._qsize() == self.maxsize:
remaining = timeout - timer.elapsed()
if remaining <= 0.0:
raise Full
self.item_removed.wait(self.mutex, remaining)
self._put(item)
self.item_added.wakeOne()
finally:
self.mutex.unlock()
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
Parameters
----------
block : bool
If True(default), the caller thread will block until the queue has
an item available for putting an new item. If False, the `Empty`
exception will be raised if there is no item in the queue
timeout : int
The max time to wait for a new item to be avaible, in milliseconds.
"""
self.mutex.lock()
try:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.item_added.wait(self.mutex)
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
timer = QElapsedTimer()
timer.start()
while not self._qsize():
remaining = timeout - timer.elapsed()
if remaining <= 0.0:
raise Empty
self.item_added.wait(self.mutex, remaining)
item = self._get()
self.item_removed.wakeOne()
return item
finally:
self.mutex.unlock()
def _qsize(self, len=len):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
def _clear(self):
self.queue.clear()
def clear(self):
self._clear()
| gpl-3.0 | 6,604,661,603,224,259,000 | 32.689655 | 80 | 0.561515 | false |
WPI-ARC/constrained_path_generator | scripts/demo.py | 1 | 2723 | #!/usr/bin/python
import math
import rospy
import random
from sensor_msgs.msg import *
from geometry_msgs.msg import *
from constrained_path_generator.msg import *
from constrained_path_generator.srv import *
def make_pose((px, py, pz), (rx, ry, rz, rw)):
new_pose = Pose()
new_pose.position.x = px
new_pose.position.y = py
new_pose.position.z = pz
new_pose.orientation.x = rx
new_pose.orientation.y = ry
new_pose.orientation.z = rz
new_pose.orientation.w = rw
return new_pose
def make_pose_stamped((px, py, pz), (rx, ry, rz, rw), frame):
pose_stamped = PoseStamped()
pose_stamped.pose = make_pose((px, py, pz), (rx, ry, rz, rw))
pose_stamped.header.frame_id = frame
return pose_stamped
def make_quaternion(w, x, y, z):
new_quat = Quaternion()
new_quat.w = w
new_quat.x = x
new_quat.y = y
new_quat.z = z
return new_quat
def make_vector(x, y, z):
new_vector = Vector3()
new_vector.x = x
new_vector.y = y
new_vector.z = z
return new_vector
_joint_state = None
def joint_state_cb(msg):
global _joint_state
_joint_state = msg
def test():
test_node = rospy.init_node("test_planner")
js_sub = rospy.Subscriber("joint_states", JointState, joint_state_cb)
planner_client = rospy.ServiceProxy("plan_constrained_path", PlanConstrainedPath)
# Wait for a joint state
while _joint_state is None and not rospy.is_shutdown():
rospy.sleep(0.1)
print "got robot state"
# Make the waypoints
pose_1 = make_pose_stamped((0.585, 0.15, 1.250), (0.0, 0.888, 0.0, -0.460), "base_link")
waypoints = [pose_1]
# Make the request
query = PlanConstrainedPathQuery()
query.path_type = PlanConstrainedPathQuery.CHECK_ENVIRONMENT_COLLISIONS | PlanConstrainedPathQuery.CARTESIAN_IK | PlanConstrainedPathQuery.PLAN
query.waypoints = waypoints
query.group_name = "left_arm"
query.target_link = "l_wrist_roll_link"
query.planning_time = 5.0
query.max_cspace_jump = 0.05
query.task_space_step_size = 0.025
query.initial_state.joint_state = _joint_state
query.path_orientation_constraint = make_quaternion(0.0, 0.888, 0.0, -0.460)
query.path_angle_tolerance = make_vector(0.01, 0.01, 0.01)
query.path_position_tolerance = make_vector(0.02, 0.02, 0.02)
query.goal_angle_tolerance = make_vector(0.01, 0.01, 0.01)
query.goal_position_tolerance = make_vector(0.01, 0.01, 0.01)
full_req = PlanConstrainedPathRequest()
full_req.query = query
full_res = planner_client.call(full_req)
print full_res
# Make some collision_planes
raw_input("Press ENTER to close...")
print "Done"
if __name__ == '__main__':
test() | bsd-2-clause | -177,377,274,693,844,160 | 28.608696 | 147 | 0.658832 | false |
roderickmackenzie/gpvdm | gpvdm_gui/gui/license_key.py | 1 | 2732 | #
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2012-2017 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
# Room B86 Coates, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
## @package register
# Registration window
#
import os
#qt
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtWidgets import QWidget,QLineEdit,QComboBox,QHBoxLayout,QPushButton,QLabel,QDialog,QVBoxLayout,QSizePolicy
from PyQt5.QtGui import QPainter,QIcon,QImage
from PyQt5.QtGui import QFont
from icon_lib import icon_get
from PyQt5.QtCore import QSize, Qt
from inp import inp_load_file
import re
from error_dlg import error_dlg
from lock import get_lock
class license_key(QDialog):
def callback_ok(self):
print("boom")
#get_lock().register(email=self.email0.text(),name=self.name.text())
#get_lock().get_license()
self.accept()
def __init__(self):
QWidget.__init__(self)
self.setWindowIcon(icon_get("icon"))
self.setWindowTitle(_("Registration window (www.gpvdm.com)"))
self.setWindowFlags(Qt.WindowStaysOnTopHint)
vbox=QVBoxLayout()
l=QLabel(_("Enter the license key below:"))
l.setFont(QFont('SansSerif', 14))
vbox.addWidget(l)
hbox_widget=QWidget()
hbox=QHBoxLayout()
hbox_widget.setLayout(hbox)
l=QLabel("<b>"+_("Key")+"</b>:")
l.setFont(QFont('SansSerif', 14))
hbox.addWidget(l)
self.name = QLineEdit()
hbox.addWidget(self.name)
vbox.addWidget(hbox_widget)
button_box=QHBoxLayout()
spacer = QWidget()
spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
button_box.addWidget(spacer)
self.register=QPushButton("Register", self)
self.register.clicked.connect(self.callback_ok)
button_box.addWidget(self.register)
button_box_widget=QWidget()
button_box_widget.setLayout(button_box)
vbox.addWidget(button_box_widget)
self.setLayout(vbox)
self.setMinimumWidth(400)
self.name.setText("key")
def run(self):
return self.exec_()
| gpl-2.0 | -5,252,635,026,861,043,000 | 25.524272 | 118 | 0.732064 | false |
EmanueleCannizzaro/scons | test/Climb/U-Default-no-target.py | 1 | 1782 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Climb/U-Default-no-target.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Make sure a Default() target that doesn't exist is handled with
the correct failure when used with the -U option.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
Default('not_a_target.in')
""")
test.run(arguments = '-U', status=2, match=TestSCons.match_re, stderr="""\
scons: \*\*\* Do not know how to make File target `not_a_target.in' \(.*not_a_target.in\). Stop.
""")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 855,950,900,662,275,600 | 34.64 | 107 | 0.741302 | false |
lneuhaus/pyrpl | pyrpl/widgets/module_widgets/pid_widget.py | 1 | 1109 | """
A widget for pid modules.
"""
from .base_module_widget import ModuleWidget
from qtpy import QtCore, QtWidgets
class PidWidget(ModuleWidget):
"""
Widget for a single PID.
"""
def init_gui(self):
self.init_main_layout(orientation="vertical")
#self.main_layout = QtWidgets.QVBoxLayout()
#self.setLayout(self.main_layout)
self.init_attribute_layout()
input_filter_widget = self.attribute_widgets["inputfilter"]
self.attribute_layout.removeWidget(input_filter_widget)
self.main_layout.addWidget(input_filter_widget)
for prop in ['p', 'i']: #, 'd']:
self.attribute_widgets[prop].widget.set_log_increment()
# can't avoid timer to update ival
# self.timer_ival = QtCore.QTimer()
# self.timer_ival.setInterval(1000)
# self.timer_ival.timeout.connect(self.update_ival)
# self.timer_ival.start()
def update_ival(self):
widget = self.attribute_widgets['ival']
if self.isVisible() and not widget.editing():
widget.write_attribute_value_to_widget()
| gpl-3.0 | 1,500,945,310,355,024,100 | 31.617647 | 67 | 0.64202 | false |
openweave/openweave-tlv-schema | openweave/tlv/schema/tool.py | 1 | 8124 | #!/usr/bin/env python3
#
# Copyright (c) 2020 Google LLC.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Tool for working with Weave TLV schemas.
#
import sys
import os
import argparse
from .obj import WeaveTLVSchema
from .error import WeaveTLVSchemaError
scriptName = os.path.basename(sys.argv[0])
class _UsageError(Exception):
pass
class _ArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise _UsageError('{0}: {1}'.format(self.prog, message))
class _ValidateCommand(object):
name = 'validate'
summary = 'Validate the syntax and consistency of a TLV schema'
help = ('{0} validate : {1}\n'
'\n'
'Usage:\n'
' {0} validate [options...] {{schema-files...}}\n'
'\n'
' -s|--silent\n'
' Do not display results (exit code indicates the number of errors).\n'
).format(scriptName, summary)
def run(self, args):
argParser = _ArgumentParser(prog='{0} {1}'.format(scriptName, self.name),
add_help=False)
argParser.add_argument('-s', '--silent', action='store_true')
argParser.add_argument('files', nargs='*')
args = argParser.parse_args(args)
if len(args.files) == 0:
raise _UsageError('{0} {1}: Please specify one or more schema files'.format(scriptName, self.name))
schema = WeaveTLVSchema()
errs = []
for schemaFileName in args.files:
if not os.path.exists(schemaFileName):
raise _UsageError('{0} {1}: Schema file not found: {0}\n'.format(scriptName, self.name, schemaFileName))
try:
schema.loadSchemaFromFile(schemaFileName)
except WeaveTLVSchemaError as err:
errs.append(err)
errs += schema.validate()
if not args.silent:
if len(errs) == 0:
print('Validation completed successfully')
else:
detailShown = {}
for err in errs:
withDetail = False
if err.detail is not None:
withDetail = not detailShown.get(err.detail, False)
if withDetail:
detailShown[err.detail] = True
print("%s\n" % err.format(withDetail=withDetail), file=sys.stderr)
return len(errs)
class _DumpCommand(object):
name = 'dump'
summary = 'Dump the syntax tree for a TLV schema'
help = ('{0} dump : {1}\n'
'\n'
'Usage:\n'
' {0} dump {{schema-files...}}\n'
).format(scriptName, summary)
def run(self, args):
argParser = _ArgumentParser(prog='{0} {1}'.format(scriptName, self.name),
add_help=False)
argParser.add_argument('files', nargs='*')
args = argParser.parse_args(args)
if len(args.files) == 0:
raise _UsageError('{0} {1}: Please specify one or more schema files'.format(scriptName, self.name))
schema = WeaveTLVSchema()
for schemaFileName in args.files:
if not os.path.exists(schemaFileName):
raise _UsageError('{0} {1}: Schema file not found: {0}\n'.format(scriptName, self.name, schemaFileName))
schema.loadSchemaFromFile(schemaFileName)
for schemaFile in schema.allFiles():
schemaFile.summarize(sys.stdout)
return 0
class _UnitTestCommand(object):
name = 'unittest'
summary = 'Run unit tests on the TLV schema code'
help = ('{0} unittest : {1}\n'
'\n'
'Usage:\n'
' {0} unittest [options...] [test-names...]\n'
'\n'
' -v|--verbosity [int]\n'
' Test progress verbosity (defaults to 2).\n'
).format(scriptName, summary)
def run(self, args):
argParser = _ArgumentParser(prog='{0} {1}'.format(scriptName, self.name), add_help=False)
argParser.add_argument('-v', '--verbosity', type=int, default=2)
argParser.add_argument('testnames', nargs=argparse.REMAINDER, default=[])
args = argParser.parse_args(args)
import unittest
from . import tests
if len(args.testnames) > 0:
selectedTests = unittest.defaultTestLoader.loadTestsFromNames(args.testnames, module=tests)
else:
selectedTests = unittest.defaultTestLoader.loadTestsFromModule(tests)
runner = unittest.TextTestRunner(verbosity=int(args.verbosity))
result = runner.run(selectedTests)
return len(result.errors)
class _HelpCommand(object):
name = 'help'
summary = 'Display usage information'
@property
def help(self):
maxWidth = max((len(c.name) for c in self.availCommands))
commandSummary = ''.join(( '\n {0:<{width}} - {1}'.format(c.name, c.summary, width=maxWidth) for c in self.availCommands ))
return ('{0} : A tool for working with Weave TLV Schemas\n'
'\n'
'Usage:\n'
' {0} {{command}} [options] ...\n'
'\n'
'Available commands:{1}\n'
'\n'
'Run "{0} help <command>" for additional help.\n'
).format(scriptName, commandSummary)
def __init__(self, availCommands):
self.availCommands = availCommands
def run(self, args):
topic = args[0] if len(args) > 0 else 'help'
topic = topic.lower()
for command in self.availCommands:
if topic == command.name:
print(command.help)
break
else:
raise _UsageError('Unrecognized help topic: {0}'.format(topic))
return 0
def main():
try:
# Construct a list of the available commands.
commands = [
_ValidateCommand(),
_DumpCommand(),
_UnitTestCommand()
]
commands.append(_HelpCommand(availCommands=commands))
# Parse the command name argument, along with arguments for the command.
argParser = _ArgumentParser(prog=scriptName, add_help=False)
argParser.add_argument('-h', '--help', nargs='?', const='help')
argParser.add_argument('commandName', nargs='?')
argParser.add_argument('commandArgs', nargs=argparse.REMAINDER, default=[])
args = argParser.parse_args()
# Allow the user to invoke the help command using -h or --help.
if args.help is not None:
args.commandName = 'help'
args.commandArgs = [ args.help ]
# Fail if no command was given.
if args.commandName is None:
raise _UsageError('Please specify a command, or run "{0} help" for a list of available commands.'.format(scriptName))
# Run the specified command.
commandNameLC = args.commandName.lower()
for command in commands:
if commandNameLC == command.name:
command.run(args.commandArgs)
break
else:
raise _UsageError('Unrecognized command: {1}\nRun "{0} help" for a list of available commands.'.format(scriptName, args.commandName))
except _UsageError as ex:
print(str(ex), file=sys.stderr)
return -1
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | -6,430,546,911,352,932,000 | 33.570213 | 145 | 0.569301 | false |
mozillazg/bild.me-cli | setup.py | 1 | 1880 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from codecs import open
import sys
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import bild
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
requirements = [
'requests>=2.0.1',
'argparse',
]
packages = [
'bild',
]
def long_description():
readme = open('README.rst', encoding='utf8').read()
text = readme + '\n\n' + open('CHANGELOG.rst', encoding='utf8').read()
return text
setup(
name='bild.me-cli',
version=bild.__version__,
description=bild.__doc__,
long_description=long_description(),
url='https://github.com/mozillazg/bild.me-cli',
download_url='https://github.com/mozillazg/bild.me-cli/archive/master.zip',
author=bild.__author__,
author_email='[email protected]',
license=bild.__license__,
packages=packages,
package_data={'': ['LICENSE.txt']},
package_dir={'bild': 'bild'},
entry_points={
'console_scripts': [
'bild = bild.bild:main',
],
},
include_package_data=True,
install_requires=requirements,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Environment :: Console',
'Topic :: Utilities',
'Topic :: Terminals',
],
keywords='bild.me, CLI',
)
| mit | 1,008,756,644,684,967,000 | 25.478873 | 79 | 0.598936 | false |
kamailio/kamcli | kamcli/commands/cmd_shv.py | 1 | 1435 | import click
from kamcli.cli import pass_context
from kamcli.iorpc import command_ctl
@click.group(
"shv",
help="Manage $shv(name) variables",
short_help="Manage $shv(name) variables",
)
@pass_context
def cli(ctx):
pass
@cli.command("get", short_help="Get the value for $shv(name)")
@click.argument("name", nargs=-1, metavar="<name>")
@pass_context
def shv_get(ctx, name):
"""Get the value for $shv(name)
\b
Parameters:
<name> - the name of shv variable
"""
if not name:
command_ctl(ctx, "pv.shvGet")
else:
for n in name:
command_ctl(ctx, "pv.shvGet", [n])
@cli.command("sets", short_help="Set $shv(name) to string value")
@click.argument("name", metavar="<name>")
@click.argument("sval", metavar="<sval>")
@pass_context
def shv_sets(ctx, name, sval):
"""Set $shv(name) to string value
\b
Parameters:
<name> - the name of shv variable
<sval> - the string value
"""
command_ctl(ctx, "pv.shvSet", [name, "str", sval])
@cli.command("seti", short_help="Set $shv(name) to int value")
@click.argument("name", metavar="<name>")
@click.argument("ival", metavar="<ival>", type=int)
@pass_context
def srv_seti(ctx, name, ival):
"""Set $shv(name) to int value
\b
Parameters:
<name> - the name of shv variable
<ival> - the int value
"""
command_ctl(ctx, "pv.shvSet", [name, "int", ival])
| gpl-2.0 | -3,036,620,697,451,798,500 | 22.916667 | 65 | 0.609059 | false |
emmanuelle/multi-diffusion | doc/conf.py | 1 | 8597 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# multidiff documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 5 15:11:19 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx_gallery.gen_gallery',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'multidiff'
copyright = '2017, Emmanuelle Gouillart'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'multidiffdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'multidiff.tex', 'multidiff Documentation',
'Emmanuelle Gouillart', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'multidiff', 'multidiff Documentation',
['Emmanuelle Gouillart'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'multidiff', 'multidiff Documentation',
'Emmanuelle Gouillart', 'multidiff', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
sphinx_gallery_conf = {
# path to your examples scripts
'examples_dirs' : '../examples',
# path where to save gallery generated examples
'gallery_dirs' : 'auto_examples'}
| bsd-3-clause | 6,489,047,847,471,528,000 | 30.606618 | 79 | 0.707921 | false |
piotroxp/scibibscan | scib/lib/python3.5/site-packages/astropy/io/fits/hdu/nonstandard.py | 1 | 4066 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import io
from ..file import _File
from .base import NonstandardExtHDU
from .hdulist import HDUList
from ..header import Header, _pad_length
from ..util import fileobj_name
from ....extern.six import string_types
from ....utils import lazyproperty
class FitsHDU(NonstandardExtHDU):
"""
A non-standard extension HDU for encapsulating entire FITS files within a
single HDU of a container FITS file. These HDUs have an extension (that is
an XTENSION keyword) of FITS.
The FITS file contained in the HDU's data can be accessed by the `hdulist`
attribute which returns the contained FITS file as an `HDUList` object.
"""
_extension = 'FITS'
@lazyproperty
def hdulist(self):
self._file.seek(self._data_offset)
fileobj = io.BytesIO()
# Read the data into a BytesIO--reading directly from the file
# won't work (at least for gzipped files) due to problems deep
# within the gzip module that make it difficult to read gzip files
# embedded in another file
fileobj.write(self._file.read(self.size))
fileobj.seek(0)
if self._header['COMPRESS']:
fileobj = gzip.GzipFile(fileobj=fileobj)
return HDUList.fromfile(fileobj, mode='readonly')
@classmethod
def fromfile(cls, filename, compress=False):
"""
Like `FitsHDU.fromhdulist()`, but creates a FitsHDU from a file on
disk.
Parameters
----------
filename : str
The path to the file to read into a FitsHDU
compress : bool, optional
Gzip compress the FITS file
"""
return cls.fromhdulist(HDUList.fromfile(filename), compress=compress)
@classmethod
def fromhdulist(cls, hdulist, compress=False):
"""
Creates a new FitsHDU from a given HDUList object.
Parameters
----------
hdulist : HDUList
A valid Headerlet object.
compress : bool, optional
Gzip compress the FITS file
"""
fileobj = bs = io.BytesIO()
if compress:
if hasattr(hdulist, '_file'):
name = fileobj_name(hdulist._file)
else:
name = None
fileobj = gzip.GzipFile(name, mode='wb', fileobj=bs)
hdulist.writeto(fileobj)
if compress:
fileobj.close()
# A proper HDUList should still be padded out to a multiple of 2880
# technically speaking
padding = (_pad_length(bs.tell()) * cls._padding_byte).encode('ascii')
bs.write(padding)
bs.seek(0)
cards = [
('XTENSION', cls._extension, 'FITS extension'),
('BITPIX', 8, 'array data type'),
('NAXIS', 1, 'number of array dimensions'),
('NAXIS1', len(bs.getvalue()), 'Axis length'),
('PCOUNT', 0, 'number of parameters'),
('GCOUNT', 1, 'number of groups'),
]
# Add the XINDn keywords proposed by Perry, though nothing is done with
# these at the moment
if len(hdulist) > 1:
for idx, hdu in enumerate(hdulist[1:]):
cards.append(('XIND' + str(idx + 1), hdu._header_offset,
'byte offset of extension %d' % (idx + 1)))
cards.append(('COMPRESS', compress, 'Uses gzip compression'))
header = Header(cards)
return cls._readfrom_internal(_File(bs), header=header)
@classmethod
def match_header(cls, header):
card = header.cards[0]
if card.keyword != 'XTENSION':
return False
xtension = card.value
if isinstance(xtension, string_types):
xtension = xtension.rstrip()
return xtension == cls._extension
# TODO: Add header verification
def _summary(self):
# TODO: Perhaps make this more descriptive...
return (self.name, self.__class__.__name__, len(self._header))
| mit | 3,870,601,165,106,004,500 | 31.528 | 79 | 0.591982 | false |
sh-ft/mudwyrm_users | mudwyrm_users/admin/achaea/scripts/brain/combat.py | 1 | 8663 | from mudwyrm_users.admin.achaea import ScriptState
from mudwyrm_users.admin.achaea.action import Action, Outcome, EventOutcome
from mudwyrm_users.admin.achaea.trigger import Trigger, Alias, OnEvent, TriggerPack
from mudwyrm_users.admin.achaea.common import not_, traverse_scripts, AttrDict, partition_action
from mudwyrm_users.admin.achaea.database import Base
from mudwyrm_users.admin.achaea.scripts import char
from mudwyrm_users.admin.achaea.scripts.actions import all_actions as actions
import sqlalchemy as sa
p = None
s = ScriptState()
def init(processor):
assert processor is not None
global p
p = processor
s.loot = []
s.info_here = {}
s.state = 'inactive'
s.target = None
def think():
if s.state == 'inactive':
return
if not s.target:
return combat_echo("No target to fight.")
if isinstance(s.target, int) and s.target not in room['objects']:
s.state = 'inactive'
return combat_echo("Target has been lost. Given up on fighting.")
cure()
if s.state == 'attacking':
attack()
elif s.state == 'defending':
defend()
elif s.state == 'looting':
loot()
##################
def combat_echo(text):
p.echo("[Combat] %s" % text)
def choose_offensive_action(target):
if char.race == 'Dragon':
return (actions.gut, target)
elif char.class_ == 'Sylvan':
return (actions.thornrend if char.status('viridian')
else actions.firelash, target)
elif char.class_ == 'Serpent':
if char.skill_available('garrote'):
return (actions.garrote, target)
else:
assert char.skill_available('bite')
venom = 'sumac' if not char.skill_available('camus', 'venom') else 'camus'
return (actions.venom_bite, venom, target)
elif char.class_ == 'Shaman':
return (actions.curse, 'bleed', target)
elif char.class_ == 'Blademaster':
return (actions.drawslash, target)
elif char.class_ == 'Alchemist':
return (actions.educe_iron, target)
return None
def choose_defensive_action():
if char.skill_available('reflection'):
return (actions.reflection, 'me')
return None
def offensive_mode():
if s.state == 'defending':
s.state = 'attacking'
combat_echo("Switched to offensive mode.")
def defensive_mode():
if s.state == 'attacking':
s.state = 'defending'
combat_echo("Switched to defensive mode.")
def attack():
if char.health < char.defensive_health_level:
defensive_mode()
return
action, args = partition_action(choose_offensive_action(s.target))
if not action:
return combat_echo("No offensive action was set for this character, not attacking.")
if action.possible(*args) and not p.action_already_active(action, *args):
p.act(action, *args)
def defend():
action, args = partition_action(choose_defensive_action())
if not action:
offensive_mode()
return combat_echo("No defensive action was set for this character, not defending.")
if action.possible(*args) and not p.action_already_active(action, *args):
p.act(action, *args)
if char.health > char.offensive_health_level:
offensive_mode()
def loot():
if char.balance('balance') and char.balance('equilibrium'):
for item in s.loot:
p.send("get %s" % item)
s.loot = []
s.state = 'inactive'
combat_echo("Finished fighting.")
def cure():
if char.status('loki'):
if actions.diagnose.possible():
p.act(actions.diagnose)
##########################
@Alias(r'^(?:kill|k) (.+)$')
def combat_start(match):
target = match.group(1)
if s.state not in ['inactive', 'looting']:
return combat_echo("Already fighting someone.")
s.target = target
s.state = 'attacking'
combat_echo("Fighting %s" % s.target)
think()
@Alias(r'^(?:autokill|ak|k)$')
def autotarget_combat_start(match):
if s.state not in ['inactive', 'looting']:
return combat_echo("Already fighting someone.")
def find_target():
target_list = p.db.query(Target).all()
for obj in char.room_objects.itervalues():
for t in target_list:
if obj['name'].find(t.name) >= 0:
return obj
return None
target = find_target()
if not target:
return combat_echo("No target found.")
s.target = target['id']
s.state = 'attacking'
combat_echo("Target found: %s" % target['name'])
think()
@Alias(r'^(?:stopkill|sk)$')
def combat_stop(match):
if s.state not in ['inactive', 'looting']:
s.state = 'inactive'
combat_echo("Given up on fighting.")
else:
combat_echo("Already not fighting")
########################
@OnEvent('TargetNotFound')
def on_target_not_found():
if s.state in ['attacking', 'defending']:
s.state = 'inactive'
combat_echo("Target has been lost. Given up on fighting.")
p.notification("Combat", "Target has been lost.")
@OnEvent('CreatureSlain')
def on_creature_slain(name):
# TODO: check if a creature was the target.
if s.state in ['attacking', 'defending']:
combat_echo("Target has been slain.")
s.state = 'looting'
p.notification("Combat", "Target has been slain.")
@OnEvent('LootDropped')
def on_loot_dropped(name):
s.loot.append(name)
@Trigger(r'^You have slain (.+), retrieving the corpse\.$')
def creature_slain(match):
p.raise_event('CreatureSlain', name=match.group(1))
@Trigger(r'^A few golden sovereigns spill from the corpse\.$',
r'^A small quantity of sovereigns spills from the corpse\.$',
r'^A (?:tiny|small|large) pile of sovereigns spills from the corpse\.$',
r'^(?:Many|Numerous) golden sovereigns spill from the corpse\.$')
def gold_loot(match):
p.raise_event('LootDropped', name='money')
@Trigger(r'^A glistening iridescent pearl tumbles out of the corpse of a barnacle encrusted oyster\.$',
r'^A gleaming black pearl tumbles out of the corpse of a marsh ooze\.$')
def pearl_loot(match):
p.raise_event('LootDropped', name='pearl')
@Trigger(r'^A (?:chipped|jagged|smooth) iconic shard appears and clatters to the ground\.$')
def shard_loot(match):
p.raise_event('LootDropped', name='shard')
@Trigger(r'^The Mask of the Beast tumbles out of the corpse of a mysterious cloaked figure\.$')
def mask_of_the_beast_loot(match):
p.raise_event('LootDropped', name='mask')
class info_here(Action):
def start(action):
s.info_here.clear()
p.send("info here")
@Alias(r'^(ih|info here)$')
def aliases(match):
p.act(info_here)
@Trigger(r'^(\D+\d+)\s+(.*)$')
def info_here_line(match, action):
s.info_here[match.group(1)] = match.group(2)
@Outcome(r'^Number of objects: (\d+)$')
def info_here_end_line(match, action):
n = int(match.group(1))
p.raise_event('InfoHereUpdated')
if len(s.info_here) != n:
raise ScriptError("Warning: Number of objects captured from "
"'info here' doesn't match the actual number of objects.")
@OnEvent('InfoHereUpdated')
def ih_updated():
pass
class Target(Base):
__tablename__ = 'targets'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String, nullable=False, unique=True)
def __init__(self, name):
self.name = name
@Alias(r'^target_list$')
def target_list(match):
targets = p.db.query(Target).all()
if not targets:
p.echo("Target list is empty.")
else:
p.echo("Target list: %s." % ", ".join(t.name for t in targets))
@Alias(r'^target_add (.*)$')
def target_add(match):
target = Target(match.group(1))
p.db.add(target)
p.db.commit()
p.echo("%s has been added to the target list." % target.name)
@Alias(r'^target_remove (.*)$')
def target_remove(match):
name = match.group(1)
target = p.db.query(Target).filter(Target.name == name).first()
if not target:
return p.echo("Target list doesn't contain %s." % name)
p.db.delete(target)
p.db.commit()
p.echo("%s has been removed from the target list." % name)
| mit | -8,037,533,459,721,834,000 | 31.430769 | 103 | 0.594482 | false |
GoogleCloudPlatform/PerfKitBenchmarker | perfkitbenchmarker/windows_benchmarks/diskspd_benchmark.py | 1 | 1623 | # Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run DiskSpd in a single VM."""
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker.windows_packages import diskspd
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'diskspd'
BENCHMARK_CONFIG = """
diskspd:
description: Run diskspd on a single machine
vm_groups:
default:
vm_spec: *default_single_core
vm_count: 1
disk_spec: *default_500_gb
"""
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec):
vm = benchmark_spec.vms[0]
vm.Install('diskspd')
def Run(benchmark_spec):
"""Measure the disk performance in one VM.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects with the benchmark results.
"""
vm = benchmark_spec.vms[0]
results = []
results.extend(diskspd.RunDiskSpd(vm))
return results
def Cleanup(unused_benchmark_spec):
pass
| apache-2.0 | -5,473,095,602,450,988,000 | 25.177419 | 74 | 0.736907 | false |
garnertb/fire-risk | fire_risk/backends/__init__.py | 1 | 2548 | import psycopg2
from .queries import ALL_RESIDENTIAL_FIRES
from psycopg2.extras import DictCursor
class Backend(object):
"""
Backend mixin that should be used to implement APIs to read data.
"""
def connect(self):
"""
Connect to the backend.
"""
raise NotImplementedError
def close_connection(self):
"""
Close the connection to the backend.
"""
raise NotImplementedError
def query(self):
"""
Query the backend.
"""
raise NotImplementedError
class FileBackend(Backend):
"""
Parse a set of NFIRS incident flat files for structure fires.
Args:
flatfiles (list): a list of file pathnames for files to be parsed.
Returns:
changes the values of the firespread_count attributes to calculated
values
"""
pass
class PostgresBackend(Backend):
"""
The Postgres Backend.
"""
def __init__(self, connection_params):
self.connection_params = connection_params
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close_connection()
def connect(self):
self.connection = psycopg2.connect(**self.connection_params)
return self.connection
def get_cursor(self):
return self.connection.cursor(cursor_factory=DictCursor)
def close_connection(self):
self.connection.close()
def query(self, query, query_params=()):
cursor = self.get_cursor()
cursor.execute(query, query_params)
return cursor
def get_firespread_counts(self, query=ALL_RESIDENTIAL_FIRES, query_params=()):
results = self.query(query=query, query_params=query_params).fetchall()
counts = dict(object_of_origin=0, room_of_origin=0, floor_of_origin=0, building_of_origin=0, beyond=0)
for result in results:
if result['fire_sprd'] == '1':
counts['object_of_origin'] += result['count']
if result['fire_sprd'] == '2':
counts['room_of_origin'] += result['count']
if result['fire_sprd'] == '3':
counts['floor_of_origin'] += result['count']
if result['fire_sprd'] == '4':
counts['building_of_origin'] += result['count']
if result['fire_sprd'] == '5':
counts['beyond'] += result['count']
return counts
if __name__ == '__main__':
import doctest
doctest.testmod()
| mit | -6,737,203,091,711,348,000 | 24.737374 | 110 | 0.588305 | false |
gardir/Devilry_sort | sort_deliveries.py | 1 | 18439 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
import time
import shutil
import glob
from rettescript import print_failed
class Devilry_Sort:
def __init__(self,
rootDir,
execute=True,
delete=False,
log=False,
rename=True,
unzip="false",
javacFlag=False,
verbose=False):
"""
Initializes the class
Parameters
----------
self : this
This class
rootDir : String
A string describing the path to root directory
execute : boolean
Execute means the primary function will be executed (default=True)
delete : boolean
If true it will delete all older deliveries (default=False)
log : boolean
If log is true a seperate log-file for what was done is created (default False)
rename : boolean
If renaming is false, the user-id directories will not be renamed to
contain only user-id (default=True)
unzip : boolean
If true program is to unzip a .zip file containing the deliveries before execute (default=False)
verbose : boolean
Be loud about what to do
"""
self.rootDir = rootDir
self.execute = execute
self.delete = delete
self.log = log
self.rename = rename
self.unzip = unzip
self.javacFlag = javacFlag
self.verbose = verbose
self.failed_javac = []
self.my_out = sys.stdout
self.my_err = sys.stderr
if log:
log_filename = os.path.join(rootDir, "log.txt")
self.log_file = open(log_filename, 'w')
self.log_file.close()
self.log_file = open(log_filename, 'a')
self.write_to_log("Log created")
self.my_out = self.log_file
self.my_err = self.log_file
elif not verbose:
self.null_out = open(os.devnull, 'w')
self.my_out = self.null_out
self.my_err = subprocess.STDOUT
def attempt_javac(self, path):
"""
Function inspired by rettescript.py written by Henrik Hillestad Løvold
"""
command = format("javac %s" % os.path.join(path, "*.java"))
if self.verbose:
print("%s:" % (command))
elif self.log:
self.write_to_log(format("%s:" % command))
try:
subprocess.check_call(command, shell=True, stdout=self.my_out, stderr=self.my_err)
except subprocess.CalledProcessError:
return 1
# No problem
return 0
def dive_delete(self, root_depth):
"""
"""
for dirpath, subdirList, fileList in os.walk(rootDir, topdown=False):
depthList = dirpath.split(os.path.sep)
depth = len(depthList) - root_depth
if depth == 1:
for subdir in subdirList:
path = os.path.join(dirpath, subdir).replace(" ", "\ ")
command = ["rm", "-r", path]
if self.verbose:
print("Recursive removing '%s'" % path)
elif self.log:
self.write_to_log(format("Recursive removing '%s'" % path))
#subprocess.call(command, stdout = self.my_out, stderr = self.my_err)
shutil.rmtree(path)
def dive_delete_dir(self, root_depth):
for dirpath, subdirList, fileList in os.walk(rootDir, topdown = False):
depth = len(dirpath.split(os.path.sep)) - root_depth
created = False
for subdir in subdirList:
folder = os.path.join(dirpath, subdir)
command = ['rm', '-d', folder]
try:
if self.verbose:
print("Trying to remove empty folder: %s" % folder)
elif self.log:
self.write_to_log(format("Trying to remove empty folder: %s" % folder))
#subprocess.check_call(command, stdout = self.my_out, stderr = self.my_err)
os.rmdir(folder)
#except subprocess.CalledProcessError:
except OSError:
if self.verbose:
print("Removing empty folder failed: %s" % folder)
elif self.log:
self.write_to_log(format("Removing empty folder failed: %s" % folder))
if depth == 1:
self.move(dirpath, subdir)
java_files_present = len(glob.glob(dirpath+os.path.sep+'*.java')) > 0
if java_files_present and self.attempt_javac(dirpath) != 0:
if self.verbose:
print("%s failed javac" % dirpath)
elif self.log:
self.write_to_log(format("%s failed javac" % dirpath))
self.failed_javac.append(dirpath)
def dive_move(self, root_depth):
for dirpath, subdirList, fileList in os.walk(rootDir, topdown=True):
depthList = dirpath.split(os.path.sep)
depth = len(depthList) - root_depth
# We only want last deadline and last delivery
if depth == 1 or depth == 2:
if (len(subdirList) > 1):
last = sorted(subdirList)[-1]
i = 0
max = len(subdirList)
while (i < max):
if (last != subdirList[i]):
del subdirList[i]
i-=1
max-=1
i+=1
#subdirList = sorted(subdirList)[-1:]
elif depth == 3:
from_path = dirpath
to_path = os.path.join(*from_path.split(os.path.sep)[:-2])
if self.verbose:
print("Moving all files in '%s' to '%s'" % (from_path, to_path))
elif self.log:
self.write_to_log(format(
"Moving all files in '%s' to '%s'" % (from_path, to_path)))
for work_file in fileList:
file_path = os.path.join(from_path, work_file)
new_file_path = os.path.join(to_path, work_file)
if self.verbose:
print("Renaming '%s' to '%s'" % (file_path, new_file_path))
elif self.log:
self.write_to_log(format("Moved '%s' to '%s'" % (file_path, new_file_path)))
#shutil.move(file_path, new_file_path)
os.rename(file_path, new_file_path)
def move(self, root_path, folder):
from_path = os.path.join(root_path, folder)
to_path = os.path.join(root_path, "older")
command = ['mv', from_path, to_path]
if self.verbose:
print("Moving older files '%s' into '%s'" % (from_path, to_path))
elif self.log:
self.write_to_log(format("Moving older files '%s' into '%s'" % (from_path, to_path)))
#subprocess.call(command, stdout = self.my_out, stderr = self.my_err)
try:
shutil.move(from_path, to_path)
except IOError as e:
if self.verbose:
print("ERROR: Could not move '%s' to '%s'" % (from_path, to_path))
print(e)
elif self.log:
self.write_to_log("ERROR: Could not move '%s' to '%s'\n%s" % (from_path, to_path, e))
def run(self):
root_depth = len(self.rootDir.split(os.path.sep))
if self.unzip != "false":
self.execute = self.unzip_execute(root_depth)
if self.execute:
if self.rename:
self.user_rename()
self.dive_move(root_depth)
self.dive_delete_dir(root_depth)
if self.delete:
self.dive_delete(root_depth)
if self.log:
self.log_file.close()
elif not verbose:
self.null_out.close()
def unzip_execute(self, root_depth):
zipfile = self.unzip
if self.unzip == "true":
zipfile = self.find_zip_file(root_depth)
# Return if _one_ zip file only not found.
if self.execute:
self.unzip_file(zipfile)
self.unzip_clean(root_depth, zipfile)
return execute
def find_zip_file(self, root_depth):
files = ""
zipfiles = []
for dirpath, subdirs, filenames in os.walk(self.rootDir):
depth = len(dirpath.split(os.path.sep)) - root_depth
if depth == 0:
if self.verbose:
print("Looking for zip files.")
files = filenames;
for afile in files:
if afile[-4:] == ".zip":
if self.verbose:
print("Found zip-file: %s" % afile)
elif self.log:
self.write_to_log(format("Found zip-file: %s" % afile))
zipfiles.append(afile)
if len(zipfiles) > 1:
print("Please have only the zipfile from Devilry in folder")
self.execute = False
elif len(zipfiles) == 0:
print("No zipfiles were found in '%s%s'" % (rootDir, os.path.sep))
self.execute = False
break # out from os.walk() as only files from root needed
if len(zipfiles) > 0:
return zipfiles[0]
return ""
def unzip_file(self, zipfile):
# Unzip command
from_path = format("%s" % (zipfile))
to_path = self.rootDir
command = ['unzip',
from_path,
"-d",
to_path]
if self.verbose:
print("Unzipping file: %s" % from_path)
elif self.log:
self.write_to_log(format("Unzipping file '%s'" % (from_path)))
subprocess.call(command, stdout = self.my_out, stderr = self.my_err)
def unzip_clean(self, root_depth, unzip_file):
for dirpath, subdirs, filenames in os.walk(self.rootDir):
# Finding current depth
if (dirpath[-1] == os.path.sep):
depth = len(dirpath[:-1].split(os.path.sep)) - root_depth
else:
depth = len(dirpath.split(os.path.sep)) - root_depth
# After unzipping, depth 1 is inside unzipped folder (based on Devilry)
if depth == 1:
if self.verbose:
print("Going through folders within '%s'" % dirpath)
elif self.log:
self.write_to_log(format("Going through folders within '%s'" % (dirpath)))
# Move all users/groups one directory down/back
for subdir in subdirs:
from_path = os.path.join(dirpath, subdir)
to_path = os.path.join(*dirpath.split(os.path.sep)[:-1])
if self.verbose:
print("Moving '%s' down to '%s'" % (from_path, to_path))
elif self.log:
self.write_to_log(format("Moving '%s' down to '%s'" % (from_path, to_path)))
shutil.move(from_path, to_path)
break # out from sub-folder created after zip. only these files needed moving
# Remove the now empty folder
unzipped_folder = unzip_file[unzip_file.rfind("/")+1:-4]
from_path = os.path.join(self.rootDir, unzipped_folder)
command = ["rm", "-d", from_path]
if self.verbose:
print("Removing empty folder: %s" % from_path)
elif self.log:
self.write_to_log(format("Removing empty folder: %s" % (from_path)))
#subprocess.call(command, stdout = self.my_out, stderr = self.my_err)
shutil.rmtree(from_path)
def user_rename(self):
for dirpath, subdirList, fileList in os.walk(rootDir):
for subdir in subdirList:
filepath = os.path.join(dirpath, subdir)
new_filepath = os.path.join(dirpath, (subdir[0:subdir.find('(')]).replace(" ", ""))
if self.verbose:
print("Renaming '%s' to '%s'" % (filepath, new_filepath))
elif self.log:
self.write_to_log(format("Renaming '%s' to '%s'" % (filepath, new_filepath)))
os.rename(filepath, new_filepath)
break
def write_to_log(self, text):
self.log_file.write(
format("%s-%s: %s\n" %
(time.strftime("%H:%M"),
time.strftime("%d/%m/%Y"),
text)))
def print_usage():
print("Usage: python sort_deliveries.py [options] path")
print("Mandatory: path")
print("%10s -- %-s" % ("path", "the mandatory argument which is the output folder to have all user directories within when script is done"))
print("Options: -b -c -d -D -h -l -v -z [zipfile]")
print("%10s -- %-s" % ("-b", "bare move, no rename of user folder"))
print("%10s -- %-s" % ("-c", "runs javac on each user, and prints those that fail"))
print("%10s -- %-s" % ("-d", "delete the other files and folders"))
print("%10s -- %-s" % ("-D", "DEBUG mode, program will not execute"))
print("%10s -- %-s" % ("-h", "shows this menu"))
print("%10s -- %-s" % ("-l", "creates a log file for what happens"))
print("%10s -- %-s" % ("-v", "loud about what happens"))
print("%10s -- %-s" % ("-z", "unzips the .zip file in path first (if only 1 is present)"))
print("%10s -- %-s" % ("-z zipfile", "unzipz the specified zip file in path first"))
print("Example usages")
print("python sort_deliveries -z ~/Downloads/deliveries.zip .")
print("Above command will first unzip the 'deliveries.zip' into current folder, and then sort all files")
print("--")
print("python sort_deliveries -z ~/Downloads/deliveries.zip ~/assignments/assignment1")
print("Above command will first unzip the 'deliveries.zip' into the folder at '$HOME/assignments/assignment1/' before sorting said directory")
print("--")
print("python sort_deliveries .")
print("Above command will sort deliveries from current directory - it should contain ALL the users folders - so it is NOT enough to just unzip the zip file and then run the sort script on subdirectory. It should be run on directory.")
print("Command executions example")
print("unzip ~/Downloads/deliveries.zip ## This will create a folder with the same name as zip-file in current working directory")
print("python sort_deliveries deliveries ## Assuming the name of folder is equal to the zip file, it should be included as 'path'")
if __name__=='__main__':
"""
TO BE DONE
# Argument Parser
parser = argparse.ArgumentParser(description="Usage:\npython sort_deliveries.py [options] pathProgram preprocesses a latex-file ('infile') and produces a new latex-file ('outfile') with additional functionality")
parser.add_argument("infile", help="Name of the latex-file you want preprocessed")
parser.add_argument("-o", "--outfile", nargs=1, help="Name of the new file (cannot be equal to infile)")
parser.add_argument("-f", "--fancy_verbatim", help="produces more fancy verbatim", action="store_true")
parser.add_argument("-v", "--verbosity", help="increase output verbosity", action="store_true")
args = parser.parse_args()
verbose = args.verbosity
fancy = args.fancy_verbatim
if len(sys.argv) < 2 or sys.argv[-1][0] == '-':
print_usage()
sys.exit()
# Quits
"""
rootDir = "."
execute = True
delete = False
rename = True
log = False
unzip = "false"
verbose = False
javacFlag = False
# Find correct path according to arguments
argc = 1 # 0 would be programname
argl = len(sys.argv)-1
# .py -> program not the only argument
# '-' -> last argument not an option
# .zip -> last argument not the zip-file
if argl < 1 or \
sys.argv[argl].find(".py") >= 0 or \
sys.argv[argl][0] == '-' or \
sys.argv[argl].find(".zip") >= 0:
print_usage()
sys.exit()
rootDir = os.path.join(rootDir, sys.argv[-1])[2:]
if (rootDir[-1] == os.path.sep):
rootDir = rootDir[:-1]
# Handle arguments
while argc < argl:
arg = sys.argv[argc]
options = list(arg)
for letter in options[1]:
if letter == 'z':
unzip = "true"
if argc+1 < argl and sys.argv[argc+1].find(".zip", len(sys.argv[argc+1])-4) != -1:
argc += 1
unzip = sys.argv[argc]
elif letter == "h":
print_usage()
execute = False
break
elif letter == "l":
log = True
elif letter == "v":
verbose = True
elif letter == "d":
delete = True
elif letter == "b":
rename = False
elif letter == "D":
execute = False
elif letter == "c":
javacFlag = True
argc += 1
# Execute if executable
if execute:
sorter = Devilry_Sort(rootDir, execute, delete, log, rename, unzip, javacFlag, verbose)
sorter.run()
if javacFlag and len(sorter.failed_javac) > 0:
print_failed(sorter.failed_javac)
elif javacFlag:
print("All students compiled")
| gpl-2.0 | 3,814,187,668,424,409,600 | 39.169935 | 238 | 0.507485 | false |
gurnec/HashCheck | UnitTests/get-sha-test-vectors.py | 1 | 3573 | #!/usr/bin/python3
#
# SHA test vector downloader & builder
# Copyright (C) 2016 Christopher Gurnee. All rights reserved.
#
# Please refer to readme.md for information about this source code.
# Please refer to license.txt for details about distribution and modification.
#
# Downloads/builds SHA1-3 test vectors from the NIST Cryptographic Algorithm Validation Program
import os, os.path, urllib.request, io, zipfile, glob, re
# Determine and if necessary create the output directory
test_vectors_dir = os.path.join(os.path.dirname(__file__), 'vectors\\')
if not os.path.isdir(test_vectors_dir):
os.mkdir(test_vectors_dir)
# Download and unzip the two NIST test vector "response" files
for sha_url in ('http://csrc.nist.gov/groups/STM/cavp/documents/shs/shabytetestvectors.zip',
'http://csrc.nist.gov/groups/STM/cavp/documents/sha3/sha-3bytetestvectors.zip'):
print('downloading and extracting', sha_url)
with urllib.request.urlopen(sha_url) as sha_downloading: # open connection to the download url;
with io.BytesIO(sha_downloading.read()) as sha_downloaded_zip: # download entirely into ram;
with zipfile.ZipFile(sha_downloaded_zip) as sha_zipcontents: # open the zip file from ram;
sha_zipcontents.extractall(test_vectors_dir) # extract the zip file into the output dir
# Convert each response file into a set of test vector files and a single expected .sha* file
print('creating test vector files and expected .sha* files from NIST response files')
rsp_filename_re = re.compile(r'\bSHA([\d_]+)(?:Short|Long)Msg.rsp$', re.IGNORECASE)
for rsp_filename in glob.iglob(test_vectors_dir + '*.rsp'):
rsp_filename_match = rsp_filename_re.search(rsp_filename)
if not rsp_filename_match: # ignore the Monte Carlo simulation files
continue
print(' processing', rsp_filename_match.group(0))
with open(rsp_filename) as rsp_file:
# Create the expected .sha file which covers this set of test vector files
with open(rsp_filename + '.sha' + rsp_filename_match.group(1).replace('_', '-'), 'w', encoding='utf8') as sha_file:
dat_filenum = 0
for line in rsp_file:
# The "Len" line, specifies the length of the following test vector in bits
if line.startswith('Len ='):
dat_filelen = int(line[5:].strip())
dat_filelen, dat_filelenmod = divmod(dat_filelen, 8)
if dat_filelenmod != 0:
raise ValueError('unexpected bit length encountered (not divisible by 8)')
# The "Msg" line, specifies the test vector encoded in hex
elif line.startswith('Msg ='):
dat_filename = rsp_filename + '-{:04}.dat'.format(dat_filenum)
dat_filenum += 1
# Create the test vector file
with open(dat_filename, 'wb') as dat_file:
dat_file.write(bytes.fromhex(line[5:].strip()[:2*dat_filelen]))
del dat_filelen
# The "MD" line, specifies the expected hash encoded in hex
elif line.startswith('MD ='):
# Write the expected hash to the .sha file which covers this test vector file
print(line[4:].strip(), '*' + os.path.basename(dat_filename), file=sha_file)
del dat_filename
print("done")
| bsd-3-clause | -4,253,490,971,588,409,000 | 46.283784 | 123 | 0.617128 | false |
NINAnor/QGIS | python/plugins/processing/gui/AlgorithmDialogBase.py | 1 | 6211 | # -*- coding: utf-8 -*-
"""
***************************************************************************
AlgorithmDialogBase.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import webbrowser
from PyQt4 import uic
from PyQt4.QtCore import QCoreApplication, QSettings, QByteArray, SIGNAL, QUrl
from PyQt4.QtGui import QApplication, QDialogButtonBox, QDesktopWidget
from qgis.utils import iface
from qgis.core import *
from processing.core.ProcessingConfig import ProcessingConfig
from processing.gui import AlgorithmClassification
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgAlgorithmBase.ui'))
class AlgorithmDialogBase(BASE, WIDGET):
class InvalidParameterValue(Exception):
def __init__(self, param, widget):
(self.parameter, self.widget) = (param, widget)
def __init__(self, alg):
super(AlgorithmDialogBase, self).__init__(iface.mainWindow())
self.setupUi(self)
self.settings = QSettings()
self.restoreGeometry(self.settings.value("/Processing/dialogBase", QByteArray()))
self.executed = False
self.mainWidget = None
self.alg = alg
# Rename OK button to Run
self.btnRun = self.buttonBox.button(QDialogButtonBox.Ok)
self.btnRun.setText(self.tr('Run'))
self.btnClose = self.buttonBox.button(QDialogButtonBox.Close)
self.setWindowTitle(AlgorithmClassification.getDisplayName(self.alg))
desktop = QDesktopWidget()
if desktop.physicalDpiX() > 96:
self.textHelp.setZoomFactor(desktop.physicalDpiX() / 96)
algHelp = self.alg.shortHelp()
if algHelp is None:
self.textShortHelp.setVisible(False)
else:
self.textShortHelp.document().setDefaultStyleSheet('''.summary { margin-left: 10px; margin-right: 10px; }
h2 { color: #555555; padding-bottom: 15px; }
a { text-decoration: none; color: #3498db; font-weight: bold; }
p { color: #666666; }
b { color: #333333; }
dl dd { margin-bottom: 5px; }''')
self.textShortHelp.setHtml(algHelp)
self.textShortHelp.setOpenLinks(False)
def linkClicked(url):
webbrowser.open(url.toString())
self.textShortHelp.connect(self.textShortHelp, SIGNAL("anchorClicked(const QUrl&)"), linkClicked)
self.textHelp.page().setNetworkAccessManager(QgsNetworkAccessManager.instance())
isText, algHelp = self.alg.help()
if algHelp is not None:
algHelp = algHelp if isText else QUrl(algHelp)
try:
if isText:
self.textHelp.setHtml(algHelp)
else:
self.textHelp.settings().clearMemoryCaches()
self.textHelp.load(algHelp)
except:
self.tabWidget.removeTab(2)
else:
self.tabWidget.removeTab(2)
self.showDebug = ProcessingConfig.getSetting(
ProcessingConfig.SHOW_DEBUG_IN_DIALOG)
def closeEvent(self, evt):
self.settings.setValue("/Processing/dialogBase", self.saveGeometry())
super(AlgorithmDialogBase, self).closeEvent(evt)
def setMainWidget(self):
self.tabWidget.widget(0).layout().addWidget(self.mainWidget)
def error(self, msg):
QApplication.restoreOverrideCursor()
self.setInfo(msg, True)
self.resetGUI()
self.tabWidget.setCurrentIndex(1)
def resetGUI(self):
QApplication.restoreOverrideCursor()
self.lblProgress.setText('')
self.progressBar.setMaximum(100)
self.progressBar.setValue(0)
self.btnRun.setEnabled(True)
self.btnClose.setEnabled(True)
def setInfo(self, msg, error=False):
if error:
self.txtLog.append('<span style="color:red"><br>%s<br></span>' % msg)
else:
self.txtLog.append(msg)
QCoreApplication.processEvents()
def setCommand(self, cmd):
if self.showDebug:
self.setInfo('<code>%s<code>' % cmd)
QCoreApplication.processEvents()
def setDebugInfo(self, msg):
if self.showDebug:
self.setInfo('<span style="color:blue">%s</span>' % msg)
QCoreApplication.processEvents()
def setConsoleInfo(self, msg):
if self.showDebug:
self.setCommand('<span style="color:darkgray">%s</span>' % msg)
QCoreApplication.processEvents()
def setPercentage(self, value):
if self.progressBar.maximum() == 0:
self.progressBar.setMaximum(100)
self.progressBar.setValue(value)
QCoreApplication.processEvents()
def setText(self, text):
self.lblProgress.setText(text)
self.setInfo(text, False)
QCoreApplication.processEvents()
def setParamValues(self):
pass
def setParamValue(self, param, widget, alg=None):
pass
def accept(self):
pass
def finish(self):
pass
| gpl-2.0 | -3,842,399,775,241,946,000 | 34.090395 | 117 | 0.566736 | false |
nathanielvarona/airflow | airflow/providers/apache/sqoop/hooks/sqoop.py | 1 | 15515 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains a sqoop 1.x hook"""
import subprocess
from copy import deepcopy
from typing import Any, Dict, List, Optional
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
class SqoopHook(BaseHook):
"""
This hook is a wrapper around the sqoop 1 binary. To be able to use the hook
it is required that "sqoop" is in the PATH.
Additional arguments that can be passed via the 'extra' JSON field of the
sqoop connection:
* ``job_tracker``: Job tracker local|jobtracker:port.
* ``namenode``: Namenode.
* ``lib_jars``: Comma separated jar files to include in the classpath.
* ``files``: Comma separated files to be copied to the map reduce cluster.
* ``archives``: Comma separated archives to be unarchived on the compute
machines.
* ``password_file``: Path to file containing the password.
:param conn_id: Reference to the sqoop connection.
:type conn_id: str
:param verbose: Set sqoop to verbose.
:type verbose: bool
:param num_mappers: Number of map tasks to import in parallel.
:type num_mappers: int
:param properties: Properties to set via the -D argument
:type properties: dict
"""
conn_name_attr = 'conn_id'
default_conn_name = 'sqoop_default'
conn_type = 'sqoop'
hook_name = 'Sqoop'
def __init__(
self,
conn_id: str = default_conn_name,
verbose: bool = False,
num_mappers: Optional[int] = None,
hcatalog_database: Optional[str] = None,
hcatalog_table: Optional[str] = None,
properties: Optional[Dict[str, Any]] = None,
) -> None:
# No mutable types in the default parameters
super().__init__()
self.conn = self.get_connection(conn_id)
connection_parameters = self.conn.extra_dejson
self.job_tracker = connection_parameters.get('job_tracker', None)
self.namenode = connection_parameters.get('namenode', None)
self.libjars = connection_parameters.get('libjars', None)
self.files = connection_parameters.get('files', None)
self.archives = connection_parameters.get('archives', None)
self.password_file = connection_parameters.get('password_file', None)
self.hcatalog_database = hcatalog_database
self.hcatalog_table = hcatalog_table
self.verbose = verbose
self.num_mappers = num_mappers
self.properties = properties or {}
self.log.info("Using connection to: %s:%s/%s", self.conn.host, self.conn.port, self.conn.schema)
def get_conn(self) -> Any:
return self.conn
def cmd_mask_password(self, cmd_orig: List[str]) -> List[str]:
"""Mask command password for safety"""
cmd = deepcopy(cmd_orig)
try:
password_index = cmd.index('--password')
cmd[password_index + 1] = 'MASKED'
except ValueError:
self.log.debug("No password in sqoop cmd")
return cmd
def popen(self, cmd: List[str], **kwargs: Any) -> None:
"""
Remote Popen
:param cmd: command to remotely execute
:param kwargs: extra arguments to Popen (see subprocess.Popen)
:return: handle to subprocess
"""
masked_cmd = ' '.join(self.cmd_mask_password(cmd))
self.log.info("Executing command: %s", masked_cmd)
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs) as sub_process:
for line in iter(sub_process.stdout): # type: ignore
self.log.info(line.strip())
sub_process.wait()
self.log.info("Command exited with return code %s", sub_process.returncode)
if sub_process.returncode:
raise AirflowException(f"Sqoop command failed: {masked_cmd}")
def _prepare_command(self, export: bool = False) -> List[str]:
sqoop_cmd_type = "export" if export else "import"
connection_cmd = ["sqoop", sqoop_cmd_type]
for key, value in self.properties.items():
connection_cmd += ["-D", f"{key}={value}"]
if self.namenode:
connection_cmd += ["-fs", self.namenode]
if self.job_tracker:
connection_cmd += ["-jt", self.job_tracker]
if self.libjars:
connection_cmd += ["-libjars", self.libjars]
if self.files:
connection_cmd += ["-files", self.files]
if self.archives:
connection_cmd += ["-archives", self.archives]
if self.conn.login:
connection_cmd += ["--username", self.conn.login]
if self.conn.password:
connection_cmd += ["--password", self.conn.password]
if self.password_file:
connection_cmd += ["--password-file", self.password_file]
if self.verbose:
connection_cmd += ["--verbose"]
if self.num_mappers:
connection_cmd += ["--num-mappers", str(self.num_mappers)]
if self.hcatalog_database:
connection_cmd += ["--hcatalog-database", self.hcatalog_database]
if self.hcatalog_table:
connection_cmd += ["--hcatalog-table", self.hcatalog_table]
connect_str = self.conn.host
if self.conn.port:
connect_str += f":{self.conn.port}"
if self.conn.schema:
connect_str += f"/{self.conn.schema}"
connection_cmd += ["--connect", connect_str]
return connection_cmd
@staticmethod
def _get_export_format_argument(file_type: str = 'text') -> List[str]:
if file_type == "avro":
return ["--as-avrodatafile"]
elif file_type == "sequence":
return ["--as-sequencefile"]
elif file_type == "parquet":
return ["--as-parquetfile"]
elif file_type == "text":
return ["--as-textfile"]
else:
raise AirflowException("Argument file_type should be 'avro', 'sequence', 'parquet' or 'text'.")
def _import_cmd(
self,
target_dir: Optional[str],
append: bool,
file_type: str,
split_by: Optional[str],
direct: Optional[bool],
driver: Any,
extra_import_options: Any,
) -> List[str]:
cmd = self._prepare_command(export=False)
if target_dir:
cmd += ["--target-dir", target_dir]
if append:
cmd += ["--append"]
cmd += self._get_export_format_argument(file_type)
if split_by:
cmd += ["--split-by", split_by]
if direct:
cmd += ["--direct"]
if driver:
cmd += ["--driver", driver]
if extra_import_options:
for key, value in extra_import_options.items():
cmd += [f'--{key}']
if value:
cmd += [str(value)]
return cmd
# pylint: disable=too-many-arguments
def import_table(
self,
table: str,
target_dir: Optional[str] = None,
append: bool = False,
file_type: str = "text",
columns: Optional[str] = None,
split_by: Optional[str] = None,
where: Optional[str] = None,
direct: bool = False,
driver: Any = None,
extra_import_options: Optional[Dict[str, Any]] = None,
) -> Any:
"""
Imports table from remote location to target dir. Arguments are
copies of direct sqoop command line arguments
:param table: Table to read
:param target_dir: HDFS destination dir
:param append: Append data to an existing dataset in HDFS
:param file_type: "avro", "sequence", "text" or "parquet".
Imports data to into the specified format. Defaults to text.
:param columns: <col,col,col…> Columns to import from table
:param split_by: Column of the table used to split work units
:param where: WHERE clause to use during import
:param direct: Use direct connector if exists for the database
:param driver: Manually specify JDBC driver class to use
:param extra_import_options: Extra import options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
"""
cmd = self._import_cmd(target_dir, append, file_type, split_by, direct, driver, extra_import_options)
cmd += ["--table", table]
if columns:
cmd += ["--columns", columns]
if where:
cmd += ["--where", where]
self.popen(cmd)
def import_query(
self,
query: str,
target_dir: Optional[str] = None,
append: bool = False,
file_type: str = "text",
split_by: Optional[str] = None,
direct: Optional[bool] = None,
driver: Optional[Any] = None,
extra_import_options: Optional[Dict[str, Any]] = None,
) -> Any:
"""
Imports a specific query from the rdbms to hdfs
:param query: Free format query to run
:param target_dir: HDFS destination dir
:param append: Append data to an existing dataset in HDFS
:param file_type: "avro", "sequence", "text" or "parquet"
Imports data to hdfs into the specified format. Defaults to text.
:param split_by: Column of the table used to split work units
:param direct: Use direct import fast path
:param driver: Manually specify JDBC driver class to use
:param extra_import_options: Extra import options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
"""
cmd = self._import_cmd(target_dir, append, file_type, split_by, direct, driver, extra_import_options)
cmd += ["--query", query]
self.popen(cmd)
# pylint: disable=too-many-arguments
def _export_cmd(
self,
table: str,
export_dir: Optional[str] = None,
input_null_string: Optional[str] = None,
input_null_non_string: Optional[str] = None,
staging_table: Optional[str] = None,
clear_staging_table: bool = False,
enclosed_by: Optional[str] = None,
escaped_by: Optional[str] = None,
input_fields_terminated_by: Optional[str] = None,
input_lines_terminated_by: Optional[str] = None,
input_optionally_enclosed_by: Optional[str] = None,
batch: bool = False,
relaxed_isolation: bool = False,
extra_export_options: Optional[Dict[str, Any]] = None,
) -> List[str]:
cmd = self._prepare_command(export=True)
if input_null_string:
cmd += ["--input-null-string", input_null_string]
if input_null_non_string:
cmd += ["--input-null-non-string", input_null_non_string]
if staging_table:
cmd += ["--staging-table", staging_table]
if clear_staging_table:
cmd += ["--clear-staging-table"]
if enclosed_by:
cmd += ["--enclosed-by", enclosed_by]
if escaped_by:
cmd += ["--escaped-by", escaped_by]
if input_fields_terminated_by:
cmd += ["--input-fields-terminated-by", input_fields_terminated_by]
if input_lines_terminated_by:
cmd += ["--input-lines-terminated-by", input_lines_terminated_by]
if input_optionally_enclosed_by:
cmd += ["--input-optionally-enclosed-by", input_optionally_enclosed_by]
if batch:
cmd += ["--batch"]
if relaxed_isolation:
cmd += ["--relaxed-isolation"]
if export_dir:
cmd += ["--export-dir", export_dir]
if extra_export_options:
for key, value in extra_export_options.items():
cmd += [f'--{key}']
if value:
cmd += [str(value)]
# The required option
cmd += ["--table", table]
return cmd
# pylint: disable=too-many-arguments
def export_table(
self,
table: str,
export_dir: Optional[str] = None,
input_null_string: Optional[str] = None,
input_null_non_string: Optional[str] = None,
staging_table: Optional[str] = None,
clear_staging_table: bool = False,
enclosed_by: Optional[str] = None,
escaped_by: Optional[str] = None,
input_fields_terminated_by: Optional[str] = None,
input_lines_terminated_by: Optional[str] = None,
input_optionally_enclosed_by: Optional[str] = None,
batch: bool = False,
relaxed_isolation: bool = False,
extra_export_options: Optional[Dict[str, Any]] = None,
) -> None:
"""
Exports Hive table to remote location. Arguments are copies of direct
sqoop command line Arguments
:param table: Table remote destination
:param export_dir: Hive table to export
:param input_null_string: The string to be interpreted as null for
string columns
:param input_null_non_string: The string to be interpreted as null
for non-string columns
:param staging_table: The table in which data will be staged before
being inserted into the destination table
:param clear_staging_table: Indicate that any data present in the
staging table can be deleted
:param enclosed_by: Sets a required field enclosing character
:param escaped_by: Sets the escape character
:param input_fields_terminated_by: Sets the field separator character
:param input_lines_terminated_by: Sets the end-of-line character
:param input_optionally_enclosed_by: Sets a field enclosing character
:param batch: Use batch mode for underlying statement execution
:param relaxed_isolation: Transaction isolation to read uncommitted
for the mappers
:param extra_export_options: Extra export options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
"""
cmd = self._export_cmd(
table,
export_dir,
input_null_string,
input_null_non_string,
staging_table,
clear_staging_table,
enclosed_by,
escaped_by,
input_fields_terminated_by,
input_lines_terminated_by,
input_optionally_enclosed_by,
batch,
relaxed_isolation,
extra_export_options,
)
self.popen(cmd)
| apache-2.0 | 389,434,711,630,519,940 | 36.652913 | 110 | 0.597499 | false |
andywalz/PyFileMaker | setup.py | 1 | 1145 | #!/usr/bin/env python
from setuptools import setup
from PyFileMaker import __version__
setup(
name='PyFileMaker',
version=__version__,
description='Python Object Wrapper for FileMaker Server XML Interface',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords=['FileMaker'],
author='Klokan Petr Pridal, Pieter Claerhout, Marcin Kawa',
author_email='[email protected], [email protected], [email protected]',
url='https://github.com/aeguana/PyFileMaker',
download_url='https://github.com/aeguana/PyFileMaker/releases',
license='http://www.opensource.org/licenses/bsd-license.php',
platforms = ['any'],
packages=['PyFileMaker'],
install_requires=['requests'],
)
| bsd-3-clause | -9,189,683,703,606,496,000 | 37.166667 | 80 | 0.655022 | false |
jpvanhal/cloudsizzle | cloudsizzle/scrapers/oodi/items.py | 1 | 1589 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2009-2010 CloudSizzle Team
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
The models for scraped items.
See documentation in:
http://doc.scrapy.org/topics/items.html
"""
from scrapy.item import Item, Field
from cloudsizzle.scrapers.items import DateField
class CompletedCourseItem(Item):
name = Field()
code = Field()
cr = Field()
ocr = Field()
grade = Field()
date = DateField('%d.%m.%Y')
teacher = Field()
module = Field()
class ModuleItem(Item):
name = Field()
code = Field()
| mit | 7,193,697,621,543,712,000 | 30.156863 | 67 | 0.730648 | false |
srluge/SickRage | sickbeard/versionChecker.py | 1 | 33707 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os
import platform
import subprocess
import re
import tarfile
import stat
import traceback
import time
import datetime
import requests
import sickbeard
from sickbeard import db
from sickbeard import ui
from sickbeard import notifiers
from sickbeard import logger, helpers
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex
from sickbeard.helpers import removetree
class CheckVersion(object):
"""
Version check class meant to run as a thread object with the sr scheduler.
"""
def __init__(self):
self.updater = None
self.install_type = None
self.amActive = False
if sickbeard.gh:
self.install_type = self.find_install_type()
if self.install_type == 'git':
self.updater = GitUpdateManager()
elif self.install_type == 'source':
self.updater = SourceUpdateManager()
self.session = requests.Session()
def run(self, force=False):
self.amActive = True
if self.updater:
# set current branch version
sickbeard.BRANCH = self.get_branch()
if self.check_for_new_version(force):
if sickbeard.AUTO_UPDATE:
logger.log(u"New update found for SickRage, starting auto-updater ...")
ui.notifications.message('New update found for SickRage, starting auto-updater')
if self.run_backup_if_safe() is True:
if sickbeard.versionCheckScheduler.action.update():
logger.log(u"Update was successful!")
ui.notifications.message('Update was successful')
sickbeard.events.put(sickbeard.events.SystemEvent.RESTART)
else:
logger.log(u"Update failed!")
ui.notifications.message('Update failed!')
self.check_for_new_news(force)
self.amActive = False
def run_backup_if_safe(self):
return self.safe_to_update() is True and self._runbackup() is True
def _runbackup(self):
# Do a system backup before update
logger.log(u"Config backup in progress...")
ui.notifications.message('Backup', 'Config backup in progress...')
try:
backupDir = ek(os.path.join, sickbeard.DATA_DIR, 'backup')
if not ek(os.path.isdir,backupDir):
ek(os.mkdir, backupDir)
if self._keeplatestbackup(backupDir) and self._backup(backupDir):
logger.log(u"Config backup successful, updating...")
ui.notifications.message('Backup', 'Config backup successful, updating...')
return True
else:
logger.log(u"Config backup failed, aborting update", logger.ERROR)
ui.notifications.message('Backup', 'Config backup failed, aborting update')
return False
except Exception as e:
logger.log(u'Update: Config backup failed. Error: %s' % ex(e), logger.ERROR)
ui.notifications.message('Backup', 'Config backup failed, aborting update')
return False
@staticmethod
def _keeplatestbackup(backupDir=None):
if not backupDir:
return False
import glob
files = glob.glob(ek(os.path.join, backupDir, '*.zip'))
if not files:
return True
now = time.time()
newest = files[0], now - ek(os.path.getctime, files[0])
for f in files[1:]:
age = now - ek(os.path.getctime, f)
if age < newest[1]:
newest = f, age
files.remove(newest[0])
for f in files:
ek(os.remove, f)
return True
# TODO: Merge with backup in helpers
@staticmethod
def _backup(backupDir=None):
if not backupDir:
return False
source = [ek(os.path.join, sickbeard.DATA_DIR, 'sickbeard.db'), sickbeard.CONFIG_FILE]
source.append(ek(os.path.join, sickbeard.DATA_DIR, 'failed.db'))
source.append(ek(os.path.join, sickbeard.DATA_DIR, 'cache.db'))
target = ek(os.path.join, backupDir, 'sickrage-' + time.strftime('%Y%m%d%H%M%S') + '.zip')
for (path, dirs, files) in ek(os.walk, sickbeard.CACHE_DIR, topdown=True):
for dirname in dirs:
if path == sickbeard.CACHE_DIR and dirname not in ['images']:
dirs.remove(dirname)
for filename in files:
source.append(ek(os.path.join, path, filename))
return helpers.backupConfigZip(source, target, sickbeard.DATA_DIR)
def getDBcompare(self):
try:
self.updater.need_update()
cur_hash = str(self.updater.get_newest_commit_hash())
assert len(cur_hash) is 40, "Commit hash wrong length: %s hash: %s" % (len(cur_hash), cur_hash)
check_url = "http://cdn.rawgit.com/%s/%s/%s/sickbeard/databases/mainDB.py" % (sickbeard.GIT_ORG, sickbeard.GIT_REPO, cur_hash)
response = helpers.getURL(check_url, session=self.session)
assert response, "Empty response from %s" % check_url
match = re.search(r"MAX_DB_VERSION\s=\s(?P<version>\d{2,3})", response)
branchDestDBversion = int(match.group('version'))
myDB = db.DBConnection()
branchCurrDBversion = myDB.checkDBVersion()
if branchDestDBversion > branchCurrDBversion:
return 'upgrade'
elif branchDestDBversion == branchCurrDBversion:
return 'equal'
else:
return 'downgrade'
except Exception as e:
raise
def safe_to_update(self):
def db_safe():
try:
result = self.getDBcompare()
if result == 'equal':
logger.log(u"We can proceed with the update. New update has same DB version", logger.DEBUG)
return True
elif result == 'upgrade':
logger.log(
u"We can't proceed with the update. New update has a new DB version. Please manually update",
logger.WARNING)
return False
elif result == 'downgrade':
logger.log(
u"We can't proceed with the update. New update has a old DB version. It's not possible to downgrade",
logger.ERROR)
return False
except Exception as e:
logger.log(u"We can't proceed with the update. Unable to compare DB version. Error: %s" % repr(e),
logger.ERROR)
def postprocessor_safe():
if not sickbeard.autoPostProcesserScheduler.action.amActive:
logger.log(u"We can proceed with the update. Post-Processor is not running", logger.DEBUG)
return True
else:
logger.log(u"We can't proceed with the update. Post-Processor is running", logger.DEBUG)
return False
def showupdate_safe():
if not sickbeard.showUpdateScheduler.action.amActive:
logger.log(u"We can proceed with the update. Shows are not being updated", logger.DEBUG)
return True
else:
logger.log(u"We can't proceed with the update. Shows are being updated", logger.DEBUG)
return False
if (db_safe(), postprocessor_safe(), showupdate_safe()):
logger.log(u"Proceeding with auto update", logger.DEBUG)
return True
else:
logger.log(u"Auto update aborted", logger.DEBUG)
return False
@staticmethod
def find_install_type():
"""
Determines how this copy of sr was installed.
returns: type of installation. Possible values are:
'win': any compiled windows build
'git': running from source using git
'source': running from source without git
"""
# check if we're a windows build
if sickbeard.BRANCH.startswith('build '):
install_type = 'win'
elif ek(os.path.isdir,ek(os.path.join, sickbeard.PROG_DIR, u'.git')):
install_type = 'git'
else:
install_type = 'source'
return install_type
def check_for_new_version(self, force=False):
"""
Checks the internet for a newer version.
returns: bool, True for new version or False for no new version.
force: if true the VERSION_NOTIFY setting will be ignored and a check will be forced
"""
if not self.updater or (not sickbeard.VERSION_NOTIFY and not sickbeard.AUTO_UPDATE and not force):
logger.log(u"Version checking is disabled, not checking for the newest version")
return False
# checking for updates
if not sickbeard.AUTO_UPDATE:
logger.log(u"Checking for updates using " + self.install_type.upper())
if not self.updater.need_update():
sickbeard.NEWEST_VERSION_STRING = None
if force:
ui.notifications.message('No update needed')
logger.log(u"No update needed")
# no updates needed
return False
# found updates
self.updater.set_newest_text()
return True
def check_for_new_news(self, force=False):
"""
Checks GitHub for the latest news.
returns: unicode, a copy of the news
force: ignored
"""
# Grab a copy of the news
logger.log(u'check_for_new_news: Checking GitHub for latest news.', logger.DEBUG)
try:
news = helpers.getURL(sickbeard.NEWS_URL, session=self.session)
except Exception:
logger.log(u'check_for_new_news: Could not load news from repo.', logger.WARNING)
news = ''
if not news:
return ''
dates = re.finditer(r'^####(\d{4}-\d{2}-\d{2})####$', news, re.M)
if not list(dates):
return news or ''
try:
last_read = datetime.datetime.strptime(sickbeard.NEWS_LAST_READ, '%Y-%m-%d')
except Exception:
last_read = 0
sickbeard.NEWS_UNREAD = 0
gotLatest = False
for match in dates:
if not gotLatest:
gotLatest = True
sickbeard.NEWS_LATEST = match.group(1)
try:
if datetime.datetime.strptime(match.group(1), '%Y-%m-%d') > last_read:
sickbeard.NEWS_UNREAD += 1
except Exception:
pass
return news
def update(self):
if self.updater:
# update branch with current config branch value
self.updater.branch = sickbeard.BRANCH
# check for updates
if self.updater.need_update():
return self.updater.update()
def list_remote_branches(self):
if self.updater:
return self.updater.list_remote_branches()
def get_branch(self):
if self.updater:
return self.updater.branch
class UpdateManager(object):
@staticmethod
def get_github_org():
return sickbeard.GIT_ORG
@staticmethod
def get_github_repo():
return sickbeard.GIT_REPO
@staticmethod
def get_update_url():
return sickbeard.WEB_ROOT + "/home/update/?pid=" + str(sickbeard.PID)
class GitUpdateManager(UpdateManager):
def __init__(self):
self._git_path = self._find_working_git()
self.github_org = self.get_github_org()
self.github_repo = self.get_github_repo()
self.branch = sickbeard.BRANCH = self._find_installed_branch()
self._cur_commit_hash = None
self._newest_commit_hash = None
self._num_commits_behind = 0
self._num_commits_ahead = 0
def get_cur_commit_hash(self):
return self._cur_commit_hash
def get_newest_commit_hash(self):
return self._newest_commit_hash
def get_cur_version(self):
return self._run_git(self._git_path, "describe --abbrev=0 " + self._cur_commit_hash)[0]
def get_newest_version(self):
return self._run_git(self._git_path, "describe --abbrev=0 " + self._newest_commit_hash)[0]
def get_num_commits_behind(self):
return self._num_commits_behind
@staticmethod
def _git_error():
error_message = 'Unable to find your git executable - Shutdown SickRage and EITHER set git_path in your config.ini OR delete your .git folder and run from source to enable updates.'
sickbeard.NEWEST_VERSION_STRING = error_message
def _find_working_git(self):
test_cmd = 'version'
if sickbeard.GIT_PATH:
main_git = '"' + sickbeard.GIT_PATH + '"'
else:
main_git = 'git'
logger.log(u"Checking if we can use git commands: " + main_git + ' ' + test_cmd, logger.DEBUG)
_, _, exit_status = self._run_git(main_git, test_cmd)
if exit_status == 0:
logger.log(u"Using: " + main_git, logger.DEBUG)
return main_git
else:
logger.log(u"Not using: " + main_git, logger.DEBUG)
# trying alternatives
alternative_git = []
# osx people who start sr from launchd have a broken path, so try a hail-mary attempt for them
if platform.system().lower() == 'darwin':
alternative_git.append('/usr/local/git/bin/git')
if platform.system().lower() == 'windows':
if main_git != main_git.lower():
alternative_git.append(main_git.lower())
if alternative_git:
logger.log(u"Trying known alternative git locations", logger.DEBUG)
for cur_git in alternative_git:
logger.log(u"Checking if we can use git commands: " + cur_git + ' ' + test_cmd, logger.DEBUG)
_, _, exit_status = self._run_git(cur_git, test_cmd)
if exit_status == 0:
logger.log(u"Using: " + cur_git, logger.DEBUG)
return cur_git
else:
logger.log(u"Not using: " + cur_git, logger.DEBUG)
# Still haven't found a working git
error_message = 'Unable to find your git executable - Shutdown SickRage and EITHER set git_path in your config.ini OR delete your .git folder and run from source to enable updates.'
sickbeard.NEWEST_VERSION_STRING = error_message
return None
@staticmethod
def _run_git(git_path, args):
output = err = exit_status = None
if not git_path:
logger.log(u"No git specified, can't use git commands", logger.WARNING)
exit_status = 1
return (output, err, exit_status)
cmd = git_path + ' ' + args
try:
logger.log(u"Executing " + cmd + " with your shell in " + sickbeard.PROG_DIR, logger.DEBUG)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=True, cwd=sickbeard.PROG_DIR)
output, err = p.communicate()
exit_status = p.returncode
if output:
output = output.strip()
except OSError:
logger.log(u"Command " + cmd + " didn't work")
exit_status = 1
if exit_status == 0:
logger.log(cmd + u" : returned successful", logger.DEBUG)
exit_status = 0
elif exit_status == 1:
if 'stash' in output:
logger.log(u"Please enable 'git reset' in settings or stash your changes in local files", logger.WARNING)
else:
logger.log(cmd + u" returned : " + str(output), logger.ERROR)
exit_status = 1
elif exit_status == 128 or 'fatal:' in output or err:
logger.log(cmd + u" returned : " + str(output), logger.WARNING)
exit_status = 128
else:
logger.log(cmd + u" returned : " + str(output) + u", treat as error for now", logger.ERROR)
exit_status = 1
return (output, err, exit_status)
def _find_installed_version(self):
"""
Attempts to find the currently installed version of SickRage.
Uses git show to get commit version.
Returns: True for success or False for failure
"""
output, _, exit_status = self._run_git(self._git_path, 'rev-parse HEAD') # @UnusedVariable
if exit_status == 0 and output:
cur_commit_hash = output.strip()
if not re.match('^[a-z0-9]+$', cur_commit_hash):
logger.log(u"Output doesn't look like a hash, not using it", logger.ERROR)
return False
self._cur_commit_hash = cur_commit_hash
sickbeard.CUR_COMMIT_HASH = str(cur_commit_hash)
return True
else:
return False
def _find_installed_branch(self):
branch_info, _, exit_status = self._run_git(self._git_path, 'symbolic-ref -q HEAD') # @UnusedVariable
if exit_status == 0 and branch_info:
branch = branch_info.strip().replace('refs/heads/', '', 1)
if branch:
sickbeard.BRANCH = branch
return branch
return ""
def _check_github_for_update(self):
"""
Uses git commands to check if there is a newer version that the provided
commit hash. If there is a newer version it sets _num_commits_behind.
"""
self._num_commits_behind = 0
self._num_commits_ahead = 0
# update remote origin url
self.update_remote_origin()
# get all new info from github
output, _, exit_status = self._run_git(self._git_path, 'fetch %s' % sickbeard.GIT_REMOTE)
if not exit_status == 0:
logger.log(u"Unable to contact github, can't check for update", logger.WARNING)
return
# get latest commit_hash from remote
output, _, exit_status = self._run_git(self._git_path, 'rev-parse --verify --quiet "@{upstream}"')
if exit_status == 0 and output:
cur_commit_hash = output.strip()
if not re.match('^[a-z0-9]+$', cur_commit_hash):
logger.log(u"Output doesn't look like a hash, not using it", logger.DEBUG)
return
else:
self._newest_commit_hash = cur_commit_hash
else:
logger.log(u"git didn't return newest commit hash", logger.DEBUG)
return
# get number of commits behind and ahead (option --count not supported git < 1.7.2)
output, _, exit_status = self._run_git(self._git_path, 'rev-list --left-right "@{upstream}"...HEAD')
if exit_status == 0 and output:
try:
self._num_commits_behind = int(output.count("<"))
self._num_commits_ahead = int(output.count(">"))
except Exception:
logger.log(u"git didn't return numbers for behind and ahead, not using it", logger.DEBUG)
return
logger.log(u"cur_commit = %s, newest_commit = %s, num_commits_behind = %s, num_commits_ahead = %s" %
(self._cur_commit_hash, self._newest_commit_hash, self._num_commits_behind, self._num_commits_ahead), logger.DEBUG)
def set_newest_text(self):
# if we're up to date then don't set this
sickbeard.NEWEST_VERSION_STRING = None
if self._num_commits_ahead:
logger.log(u"Local branch is ahead of " + self.branch + ". Automatic update not possible.", logger.WARNING)
newest_text = "Local branch is ahead of " + self.branch + ". Automatic update not possible."
elif self._num_commits_behind > 0:
base_url = 'http://github.com/' + self.github_org + '/' + self.github_repo
if self._newest_commit_hash:
url = base_url + '/compare/' + self._cur_commit_hash + '...' + self._newest_commit_hash
else:
url = base_url + '/commits/'
newest_text = 'There is a <a href="' + url + '" onclick="window.open(this.href); return false;">newer version available</a> '
newest_text += " (you're " + str(self._num_commits_behind) + " commit"
if self._num_commits_behind > 1:
newest_text += 's'
newest_text += ' behind)' + "— <a href=\"" + self.get_update_url() + "\">Update Now</a>"
else:
return
sickbeard.NEWEST_VERSION_STRING = newest_text
def need_update(self):
if self.branch != self._find_installed_branch():
logger.log(u"Branch checkout: " + self._find_installed_branch() + "->" + self.branch, logger.DEBUG)
return True
self._find_installed_version()
if not self._cur_commit_hash:
return True
else:
try:
self._check_github_for_update()
except Exception as e:
logger.log(u"Unable to contact github, can't check for update: " + repr(e), logger.WARNING)
return False
if self._num_commits_behind > 0:
return True
return False
def update(self):
"""
Calls git pull origin <branch> in order to update SickRage. Returns a bool depending
on the call's success.
"""
# update remote origin url
self.update_remote_origin()
# remove untracked files and performs a hard reset on git branch to avoid update issues
if sickbeard.GIT_RESET:
# self.clean() # This is removing user data and backups
self.reset()
if self.branch == self._find_installed_branch():
_, _, exit_status = self._run_git(self._git_path, 'pull -f %s %s' % (sickbeard.GIT_REMOTE, self.branch)) # @UnusedVariable
else:
_, _, exit_status = self._run_git(self._git_path, 'checkout -f ' + self.branch) # @UnusedVariable
if exit_status == 0:
_, _, exit_status = self._run_git(self._git_path, 'submodule update --init --recursive')
if exit_status == 0:
self._find_installed_version()
sickbeard.GIT_NEWVER = True
# Notify update successful
if sickbeard.NOTIFY_ON_UPDATE:
notifiers.notify_git_update(sickbeard.CUR_COMMIT_HASH if sickbeard.CUR_COMMIT_HASH else "")
return True
else:
return False
else:
return False
def clean(self):
"""
Calls git clean to remove all untracked files. Returns a bool depending
on the call's success.
"""
_, _, exit_status = self._run_git(self._git_path, 'clean -df ""') # @UnusedVariable
if exit_status == 0:
return True
def reset(self):
"""
Calls git reset --hard to perform a hard reset. Returns a bool depending
on the call's success.
"""
_, _, exit_status = self._run_git(self._git_path, 'reset --hard') # @UnusedVariable
if exit_status == 0:
return True
def list_remote_branches(self):
# update remote origin url
self.update_remote_origin()
sickbeard.BRANCH = self._find_installed_branch()
branches, _, exit_status = self._run_git(self._git_path, 'ls-remote --heads %s' % sickbeard.GIT_REMOTE) # @UnusedVariable
if exit_status == 0 and branches:
if branches:
return re.findall(r'refs/heads/(.*)', branches)
return []
def update_remote_origin(self):
self._run_git(self._git_path, 'config remote.%s.url %s' % (sickbeard.GIT_REMOTE, sickbeard.GIT_REMOTE_URL))
if sickbeard.GIT_USERNAME:
self._run_git(self._git_path, 'config remote.%s.pushurl %s' % (sickbeard.GIT_REMOTE, sickbeard.GIT_REMOTE_URL.replace(sickbeard.GIT_ORG, sickbeard.GIT_USERNAME)))
class SourceUpdateManager(UpdateManager):
def __init__(self):
self.github_org = self.get_github_org()
self.github_repo = self.get_github_repo()
self.branch = sickbeard.BRANCH
if sickbeard.BRANCH == '':
self.branch = self._find_installed_branch()
self._cur_commit_hash = sickbeard.CUR_COMMIT_HASH
self._newest_commit_hash = None
self._num_commits_behind = 0
self.session = requests.Session()
@staticmethod
def _find_installed_branch():
return sickbeard.CUR_COMMIT_BRANCH if sickbeard.CUR_COMMIT_BRANCH else "master"
def get_cur_commit_hash(self):
return self._cur_commit_hash
def get_newest_commit_hash(self):
return self._newest_commit_hash
@staticmethod
def get_cur_version():
return ""
@staticmethod
def get_newest_version():
return ""
def get_num_commits_behind(self):
return self._num_commits_behind
def need_update(self):
# need this to run first to set self._newest_commit_hash
try:
self._check_github_for_update()
except Exception as e:
logger.log(u"Unable to contact github, can't check for update: " + repr(e), logger.WARNING)
return False
if self.branch != self._find_installed_branch():
logger.log(u"Branch checkout: " + self._find_installed_branch() + "->" + self.branch, logger.DEBUG)
return True
if not self._cur_commit_hash or self._num_commits_behind > 0:
return True
return False
def _check_github_for_update(self):
"""
Uses pygithub to ask github if there is a newer version that the provided
commit hash. If there is a newer version it sets SickRage's version text.
commit_hash: hash that we're checking against
"""
self._num_commits_behind = 0
self._newest_commit_hash = None
# try to get newest commit hash and commits behind directly by comparing branch and current commit
if self._cur_commit_hash:
branch_compared = sickbeard.gh.compare(base=self.branch, head=self._cur_commit_hash)
self._newest_commit_hash = branch_compared.base_commit.sha
self._num_commits_behind = branch_compared.behind_by
# fall back and iterate over last 100 (items per page in gh_api) commits
if not self._newest_commit_hash:
for curCommit in sickbeard.gh.get_commits():
if not self._newest_commit_hash:
self._newest_commit_hash = curCommit.sha
if not self._cur_commit_hash or curCommit.sha == self._cur_commit_hash:
break
# when _cur_commit_hash doesn't match anything _num_commits_behind == 100
self._num_commits_behind += 1
logger.log(u"cur_commit = " + str(self._cur_commit_hash) + u", newest_commit = " + str(self._newest_commit_hash)
+ u", num_commits_behind = " + str(self._num_commits_behind), logger.DEBUG)
def set_newest_text(self):
# if we're up to date then don't set this
sickbeard.NEWEST_VERSION_STRING = None
if not self._cur_commit_hash:
logger.log(u"Unknown current version number, don't know if we should update or not", logger.DEBUG)
newest_text = "Unknown current version number: If you've never used the SickRage upgrade system before then current version is not set."
newest_text += "— <a href=\"" + self.get_update_url() + "\">Update Now</a>"
elif self._num_commits_behind > 0:
base_url = 'http://github.com/' + self.github_org + '/' + self.github_repo
if self._newest_commit_hash:
url = base_url + '/compare/' + self._cur_commit_hash + '...' + self._newest_commit_hash
else:
url = base_url + '/commits/'
newest_text = 'There is a <a href="' + url + '" onclick="window.open(this.href); return false;">newer version available</a>'
newest_text += " (you're " + str(self._num_commits_behind) + " commit"
if self._num_commits_behind > 1:
newest_text += "s"
newest_text += " behind)" + "— <a href=\"" + self.get_update_url() + "\">Update Now</a>"
else:
return
sickbeard.NEWEST_VERSION_STRING = newest_text
def update(self):
"""
Downloads the latest source tarball from github and installs it over the existing version.
"""
tar_download_url = 'http://github.com/' + self.github_org + '/' + self.github_repo + '/tarball/' + self.branch
try:
# prepare the update dir
sr_update_dir = ek(os.path.join, sickbeard.PROG_DIR, u'sr-update')
if ek(os.path.isdir,sr_update_dir):
logger.log(u"Clearing out update folder " + sr_update_dir + " before extracting")
ek(removetree,sr_update_dir)
logger.log(u"Creating update folder " + sr_update_dir + " before extracting")
ek(os.makedirs, sr_update_dir)
# retrieve file
logger.log(u"Downloading update from " + repr(tar_download_url))
tar_download_path = ek(os.path.join, sr_update_dir, u'sr-update.tar')
helpers.download_file(tar_download_url, tar_download_path, session=self.session)
if not ek(os.path.isfile,tar_download_path):
logger.log(u"Unable to retrieve new version from " + tar_download_url + ", can't update", logger.WARNING)
return False
if not ek(tarfile.is_tarfile, tar_download_path):
logger.log(u"Retrieved version from " + tar_download_url + " is corrupt, can't update", logger.ERROR)
return False
# extract to sr-update dir
logger.log(u"Extracting file " + tar_download_path)
tar = tarfile.open(tar_download_path)
tar.extractall(sr_update_dir)
tar.close()
# delete .tar.gz
logger.log(u"Deleting file " + tar_download_path)
ek(os.remove, tar_download_path)
# find update dir name
update_dir_contents = [x for x in ek(os.listdir, sr_update_dir) if
ek(os.path.isdir,ek(os.path.join, sr_update_dir, x))]
if len(update_dir_contents) != 1:
logger.log(u"Invalid update data, update failed: " + str(update_dir_contents), logger.ERROR)
return False
content_dir = ek(os.path.join, sr_update_dir, update_dir_contents[0])
# walk temp folder and move files to main folder
logger.log(u"Moving files from " + content_dir + " to " + sickbeard.PROG_DIR)
for dirname, _, filenames in ek(os.walk, content_dir): # @UnusedVariable
dirname = dirname[len(content_dir) + 1:]
for curfile in filenames:
old_path = ek(os.path.join, content_dir, dirname, curfile)
new_path = ek(os.path.join, sickbeard.PROG_DIR, dirname, curfile)
# Avoid DLL access problem on WIN32/64
# These files needing to be updated manually
# or find a way to kill the access from memory
if curfile in ('unrar.dll', 'unrar64.dll'):
try:
ek(os.chmod, new_path, stat.S_IWRITE)
ek(os.remove, new_path)
ek(os.renames, old_path, new_path)
except Exception as e:
logger.log(u"Unable to update " + new_path + ': ' + ex(e), logger.DEBUG)
ek(os.remove, old_path) # Trash the updated file without moving in new path
continue
if ek(os.path.isfile,new_path):
ek(os.remove, new_path)
ek(os.renames, old_path, new_path)
sickbeard.CUR_COMMIT_HASH = self._newest_commit_hash
sickbeard.CUR_COMMIT_BRANCH = self.branch
except Exception as e:
logger.log(u"Error while trying to update: {}".format(ex(e)), logger.ERROR)
logger.log(u"Traceback: " + traceback.format_exc(), logger.DEBUG)
return False
# Notify update successful
notifiers.notify_git_update(sickbeard.NEWEST_VERSION_STRING)
return True
@staticmethod
def list_remote_branches():
return [x.name for x in sickbeard.gh.get_branches() if x]
| gpl-3.0 | -3,693,652,447,685,165,000 | 37.346985 | 189 | 0.575696 | false |
Jigsaw-Code/net-analysis | netanalysis/traffic/data/api_repository.py | 1 | 3175 | #!/usr/bin/python
#
# Copyright 2019 Jigsaw Operations LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Library to access Google's traffic data from its Transparency Report
"""
import datetime
import json
import ssl
import time
from urllib.request import urlopen, Request
from urllib.parse import urlencode, quote
import certifi
import pandas as pd
from netanalysis.traffic.data import model
def _to_timestamp(time_point: datetime.datetime):
return time.mktime(time_point.timetuple())
_SSL_CONTEXT = ssl.create_default_context(cafile=certifi.where())
class ApiTrafficRepository(model.TrafficRepository):
"""TrafficRepository that reads the traffic data from Google's Transparency Report."""
def _query_api(self, endpoint, params=None):
query_url = "https://www.google.com/transparencyreport/api/v3/traffic/" + \
quote(endpoint)
if params:
query_url = query_url + "?" + urlencode(params)
try:
request = Request(query_url)
request.add_header("User-Agent", "Jigsaw-Code/netanalysis")
with urlopen(request, context=_SSL_CONTEXT) as response:
return json.loads(response.read()[6:].decode("utf8"))
except Exception as error:
raise Exception("Failed to query url %s" % query_url, error)
def list_regions(self):
response_proto = self._query_api("regionlist")
return sorted([e[0] for e in response_proto[0][1]])
def get_traffic(self, region_code: str, product_id: model.ProductId,
start: datetime.datetime = None, end: datetime.datetime = None):
DEFAULT_INTERVAL_DAYS = 2 * 365
POINTS_PER_DAY = 48
if not end:
end = datetime.datetime.now()
if not start:
start = end - datetime.timedelta(days=DEFAULT_INTERVAL_DAYS)
number_of_days = (end - start).days
total_points = int(number_of_days * POINTS_PER_DAY)
entries = []
params = [
("start", int(_to_timestamp(start) * 1000)),
("end", int(_to_timestamp(end) * 1000)),
("width", total_points),
("product", product_id.value),
("region", region_code)]
response_proto = self._query_api("fraction", params)
entry_list_proto = response_proto[0][1]
for entry_proto in entry_list_proto:
timestamp = datetime.datetime.utcfromtimestamp(
entry_proto[0] / 1000)
value = entry_proto[1][0][1]
entries.append((timestamp, value / POINTS_PER_DAY / 2))
dates, traffic = zip(*entries)
return pd.Series(traffic, index=dates)
| apache-2.0 | -1,836,266,151,497,937,000 | 36.352941 | 90 | 0.647874 | false |
openstate/yournextrepresentative | candidates/diffs.py | 1 | 8701 | # The functions in this file are to help produce human readable diffs
# between our JSON representation of candidates.
import re
from django.conf import settings
from django.utils.translation import ugettext as _
import jsonpatch
import jsonpointer
def get_descriptive_value(election, attribute, value, leaf):
"""Get a sentence fragment describing someone's status in a particular year
'attribute' is either "standing_in" or "party_membership", 'election'
is one of the keys from settings.ELECTIONS, and 'value' is what would
be under that year in the 'standing_in' or 'party_memberships'
dictionary (see the comment at the top of update.py)."""
election_data = settings.ELECTIONS[election]
current_election = election_data.get('current')
election_name = election_data['name']
if attribute == 'party_memberships':
if leaf:
# In that case, there's only a particular value in the
# dictionary that's changed:
if leaf == 'name':
if current_election:
message = _(u"is known to be standing for the party '{party}' in the {election}")
else:
message = _(u"was known to be standing for the party '{party}' in the {election}")
return message.format(party=value, election=election_name)
elif leaf == 'id':
if current_election:
message = _(u'is known to be standing for the party with ID {party} in the {election}')
else:
message = _(u'was known to be standing for the party with ID {party} in the {election}')
return message.format(party=value, election=election_name)
else:
message = _(u"Unexpected leaf {0} (attribute: {1}, election: {2}")
raise Exception, message.format(
leaf, attribute, election
)
else:
if current_election:
message = _(u'is known to be standing for the party "{party}" in the {election}')
else:
message = _(u'was known to be standing for the party "{party}" in the {election}')
return message.format(party=value['name'], election=election_name)
elif attribute == 'standing_in':
if value is None:
if current_election:
message = _(u'is known not to be standing in the {election}')
else:
message = _(u'was known not to be standing in the {election}')
return message.format(election=election_name)
else:
if leaf:
if leaf == 'post_id':
if current_election:
message = _("is known to be standing for the post with ID {party} in the {election}")
else:
message = _("was known to be standing for the post with ID {party} in the {election}")
return message.format(party=value, election=election_name)
elif leaf == 'mapit_url':
if current_election:
message = _("is known to be standing in the constituency with MapIt URL {party} in the {election}")
else:
message = _("was known to be standing in the constituency with MapIt URL {party} in the {election}")
return message.format(party=value, election=election_name)
elif leaf == 'name':
if current_election:
message = _("is known to be standing in {party} in the {election}")
else:
message = _("was known to be standing in {party} in the {election}")
return message.format(party=value, election=election_name)
elif leaf == 'elected':
if value:
return _("was elected in the {election}").format(election=election_name)
else:
return _("was not elected in the {election}").format(election=election_name)
else:
message = _(u"Unexpected leaf {0} (attribute: {1}, election: {2}")
raise Exception, message.format(
leaf, attribute, election
)
else:
if current_election:
message = _(u'is known to be standing in {party} in the {election}')
else:
message = _(u'was known to be standing in {party} in the {election}')
return message.format(party=value['name'], election=election_name)
def explain_standing_in_and_party_memberships(operation, attribute, election, leaf):
"""Set 'value' and 'previous_value' in operation to a readable explanation
'attribute' is one of 'standing_in' or 'party_memberships'."""
for key in ('previous_value', 'value'):
if key not in operation:
continue
if election:
operation[key] = get_descriptive_value(
election,
attribute,
operation[key],
leaf,
)
else:
clauses = []
for election, value in (operation[key] or {}).items():
clauses.append(get_descriptive_value(
election,
attribute,
value,
leaf,
))
operation[key] = _(u' and ').join(clauses)
def get_version_diff(from_data, to_data):
"""Calculate the diff (a mangled JSON patch) between from_data and to_data"""
basic_patch = jsonpatch.make_patch(from_data, to_data)
result = []
for operation in basic_patch:
op = operation['op']
# We deal with standing_in and party_memberships slightly
# differently so they can be presented in human-readable form,
# so match those cases first:
m = re.search(
r'(standing_in|party_memberships)(?:/([-_A-Za-z0-9]+))?(?:/(\w+))?',
operation['path'],
)
if op in ('replace', 'remove'):
operation['previous_value'] = \
jsonpointer.resolve_pointer(
from_data,
operation['path']
)
attribute, election, leaf = m.groups() if m else (None, None, None)
if attribute:
explain_standing_in_and_party_memberships(operation, attribute, election, leaf)
if op in ('replace', 'remove'):
# Ignore replacing no data with no data:
if op == 'replace' and \
not operation['previous_value'] and \
not operation['value']:
continue
if op == 'replace' and not operation['previous_value']:
operation['op'] = 'add'
elif op == 'add':
# It's important that we don't skip the case where a
# standing_in value is being set to None, because that's
# saying 'we *know* they're not standing then'
if (not operation['value']) and (attribute != 'standing_in'):
continue
operation['path'] = re.sub(r'^/', '', operation['path'])
result.append(operation)
result.sort(key=lambda o: (o['op'], o['path']))
return result
def clean_version_data(data):
# We're not interested in changes of these IDs:
for i in data.get('identifiers', []):
i.pop('id', None)
for on in data.get('other_names', []):
on.pop('id', None)
data.pop('last_party', None)
data.pop('proxy_image', None)
data.pop('date_of_birth', None)
def get_version_diffs(versions):
"""Add a diff to each of an array of version dicts
The first version is the most recent; the last is the original
version."""
result = []
n = len(versions)
for i, v in enumerate(versions):
# to_version_data = replace_empty_with_none(
# versions[i]['data']
# )
to_version_data = versions[i]['data']
if i == (n - 1):
from_version_data = {}
else:
# from_version_data = replace_empty_with_none(
# versions[i + 1]['data']
# )
from_version_data = versions[i + 1]['data']
clean_version_data(to_version_data)
clean_version_data(from_version_data)
version_with_diff = versions[i].copy()
version_with_diff['diff'] = \
get_version_diff(from_version_data, to_version_data)
result.append(version_with_diff)
return result
| agpl-3.0 | -1,124,514,147,237,027,800 | 42.288557 | 124 | 0.54488 | false |
iwm911/plaso | plaso/classifier/scanner.py | 1 | 24473 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the classes for a scan tree-based format scanner."""
import logging
import os
from plaso.classifier import patterns
from plaso.classifier import range_list
from plaso.classifier import scan_tree
class _ScanMatch(object):
"""Class that implements a scan match."""
def __init__(self, total_data_offset, pattern):
"""Initializes the scan result.
Args:
total_data_offset: the offset of the resulting match relative
to the start of the total data scanned.
pattern: the pattern matched.
"""
super(_ScanMatch, self).__init__()
self.total_data_offset = total_data_offset
self.pattern = pattern
@property
def specification(self):
"""The specification."""
return self.pattern.specification
class _ScanResult(object):
"""Class that implements a scan result."""
def __init__(self, specification):
"""Initializes the scan result.
Args:
scan_tree_node: the corresponding scan tree node or None.
"""
super(_ScanResult, self).__init__()
self.specification = specification
self.scan_matches = []
@property
def identifier(self):
"""The specification identifier."""
return self.specification.identifier
class ScanState(object):
"""Class that implements a scan state."""
# The state definitions.
_SCAN_STATE_START = 1
_SCAN_STATE_SCANNING = 2
_SCAN_STATE_STOP = 3
def __init__(self, scan_tree_node, total_data_size=None):
"""Initializes the scan state.
Args:
scan_tree_node: the corresponding scan tree node or None.
total_data_size: optional value to indicate the total data size.
The default is None.
"""
super(ScanState, self).__init__()
self._matches = []
self.remaining_data = None
self.remaining_data_size = 0
self.scan_tree_node = scan_tree_node
self.state = self._SCAN_STATE_START
self.total_data_offset = 0
self.total_data_size = total_data_size
def AddMatch(self, total_data_offset, pattern):
"""Adds a result to the state to scanning.
Args:
total_data_offset: the offset of the resulting match relative
to the start total data scanned.
pattern: the pattern matched.
Raises:
RuntimeError: when a unsupported state is encountered.
"""
if (self.state != self._SCAN_STATE_START and
self.state != self._SCAN_STATE_SCANNING):
raise RuntimeError(u'Unsupported scan state.')
self._matches.append(_ScanMatch(total_data_offset, pattern))
def GetMatches(self):
"""Retrieves a list containing the results.
Returns:
A list of scan matches (instances of _ScanMatch).
Raises:
RuntimeError: when a unsupported state is encountered.
"""
if self.state != self._SCAN_STATE_STOP:
raise RuntimeError(u'Unsupported scan state.')
return self._matches
def Reset(self, scan_tree_node):
"""Resets the state to start.
This function will clear the remaining data.
Args:
scan_tree_node: the corresponding scan tree node or None.
Raises:
RuntimeError: when a unsupported state is encountered.
"""
if self.state != self._SCAN_STATE_STOP:
raise RuntimeError(u'Unsupported scan state.')
self.remaining_data = None
self.remaining_data_size = 0
self.scan_tree_node = scan_tree_node
self.state = self._SCAN_STATE_START
def Scanning(self, scan_tree_node, total_data_offset):
"""Sets the state to scanning.
Args:
scan_tree_node: the active scan tree node.
total_data_offset: the offset of the resulting match relative
to the start of the total data scanned.
Raises:
RuntimeError: when a unsupported state is encountered.
"""
if (self.state != self._SCAN_STATE_START and
self.state != self._SCAN_STATE_SCANNING):
raise RuntimeError(u'Unsupported scan state.')
self.scan_tree_node = scan_tree_node
self.state = self._SCAN_STATE_SCANNING
self.total_data_offset = total_data_offset
def Stop(self):
"""Sets the state to stop.
Raises:
RuntimeError: when a unsupported state is encountered.
"""
if (self.state != self._SCAN_STATE_START and
self.state != self._SCAN_STATE_SCANNING):
raise RuntimeError(u'Unsupported scan state.')
self.scan_tree_node = None
self.state = self._SCAN_STATE_STOP
class ScanTreeScannerBase(object):
"""Class that implements a scan tree-based scanner base."""
def __init__(self, specification_store):
"""Initializes the scanner.
Args:
specification_store: the specification store (instance of
SpecificationStore) that contains the format
specifications.
"""
super(ScanTreeScannerBase, self).__init__()
self._scan_tree = None
self._specification_store = specification_store
def _ScanBufferScanState(
self, scan_tree_object, scan_state, data, data_size, total_data_offset,
total_data_size=None):
"""Scans a buffer using the scan tree.
This function implements a Boyer–Moore–Horspool equivalent approach
in combination with the scan tree.
Args:
scan_tree_object: the scan tree (instance of ScanTree).
scan_state: the scan state (instance of ScanState).
data: a buffer containing raw data.
data_size: the size of the raw data in the buffer.
total_data_offset: the offset of the data relative to the start of
the total data scanned.
total_data_size: optional value to indicate the total data size.
The default is None.
Raises:
RuntimeError: if the total data offset, total data size or the last
pattern offset value is out of bounds
"""
if total_data_size is not None and total_data_size < 0:
raise RuntimeError(u'Invalid total data size, value out of bounds.')
if total_data_offset < 0 or (
total_data_size is not None and total_data_offset >= total_data_size):
raise RuntimeError(u'Invalid total data offset, value out of bounds.')
data_offset = 0
scan_tree_node = scan_state.scan_tree_node
if scan_state.remaining_data:
# str.join() should be more efficient then concatenation by +.
data = ''.join([scan_state.remaining_data, data])
data_size += scan_state.remaining_data_size
scan_state.remaining_data = None
scan_state.remaining_data_size = 0
if (total_data_size is not None and
total_data_offset + data_size >= total_data_size):
match_on_boundary = True
else:
match_on_boundary = False
while data_offset < data_size:
if (not match_on_boundary and
data_offset + scan_tree_object.largest_length >= data_size):
break
found_match = False
scan_done = False
while not scan_done:
scan_object = scan_tree_node.CompareByteValue(
data, data_offset, data_size, total_data_offset,
total_data_size=total_data_size)
if isinstance(scan_object, scan_tree.ScanTreeNode):
scan_tree_node = scan_object
else:
scan_done = True
if isinstance(scan_object, patterns.Pattern):
pattern_length = len(scan_object.signature.expression)
data_last_offset = data_offset + pattern_length
if cmp(scan_object.signature.expression,
data[data_offset:data_last_offset]) == 0:
if (not scan_object.signature.is_bound or
scan_object.signature.offset == data_offset):
found_match = True
logging.debug(
u'Signature match at data offset: 0x{0:08x}.'.format(
data_offset))
scan_state.AddMatch(total_data_offset + data_offset, scan_object)
if found_match:
skip_value = len(scan_object.signature.expression)
scan_tree_node = scan_tree_object.root_node
else:
last_pattern_offset = (
scan_tree_object.skip_table.skip_pattern_length - 1)
if data_offset + last_pattern_offset >= data_size:
raise RuntimeError(
u'Invalid last pattern offset, value out of bounds.')
skip_value = 0
while last_pattern_offset >= 0 and not skip_value:
last_data_offset = data_offset + last_pattern_offset
byte_value = ord(data[last_data_offset])
skip_value = scan_tree_object.skip_table[byte_value]
last_pattern_offset -= 1
if not skip_value:
skip_value = 1
scan_tree_node = scan_tree_object.root_node
data_offset += skip_value
if not match_on_boundary and data_offset < data_size:
scan_state.remaining_data = data[data_offset:data_size]
scan_state.remaining_data_size = data_size - data_offset
scan_state.Scanning(scan_tree_node, total_data_offset + data_offset)
def _ScanBufferScanStateFinal(self, scan_tree_object, scan_state):
"""Scans the remaining data in the scan state using the scan tree.
Args:
scan_tree_object: the scan tree (instance of ScanTree).
scan_state: the scan state (instance of ScanState).
"""
if scan_state.remaining_data:
data = scan_state.remaining_data
data_size = scan_state.remaining_data_size
scan_state.remaining_data = None
scan_state.remaining_data_size = 0
# Setting the total data size will make boundary matches are returned
# in this scanning pass.
total_data_size = scan_state.total_data_size
if total_data_size is None:
total_data_size = scan_state.total_data_offset + data_size
self._ScanBufferScanState(
scan_tree_object, scan_state, data, data_size,
scan_state.total_data_offset, total_data_size=total_data_size)
scan_state.Stop()
def GetScanResults(self, scan_state):
"""Retrieves the scan results.
Args:
scan_state: the scan state (instance of ScanState).
Return:
A list of scan results (instances of _ScanResult).
"""
scan_results = {}
for scan_match in scan_state.GetMatches():
specification = scan_match.specification
identifier = specification.identifier
logging.debug(
u'Scan match at offset: 0x{0:08x} specification: {1:s}'.format(
scan_match.total_data_offset, identifier))
if identifier not in scan_results:
scan_results[identifier] = _ScanResult(specification)
scan_results[identifier].scan_matches.append(scan_match)
return scan_results.values()
class Scanner(ScanTreeScannerBase):
"""Class that implements a scan tree-based scanner."""
_READ_BUFFER_SIZE = 512
def __init__(self, specification_store):
"""Initializes the scanner.
Args:
specification_store: the specification store (instance of
SpecificationStore) that contains the format
specifications.
"""
super(Scanner, self).__init__(specification_store)
def ScanBuffer(self, scan_state, data, data_size):
"""Scans a buffer.
Args:
scan_state: the scan state (instance of ScanState).
data: a buffer containing raw data.
data_size: the size of the raw data in the buffer.
"""
self._ScanBufferScanState(
self._scan_tree, scan_state, data, data_size,
scan_state.total_data_offset,
total_data_size=scan_state.total_data_size)
def ScanFileObject(self, file_object):
"""Scans a file-like object.
Args:
file_object: a file-like object.
Returns:
A list of scan results (instances of ScanResult).
"""
file_offset = 0
if hasattr(file_object, 'get_size'):
file_size = file_object.get_size()
else:
file_object.seek(0, os.SEEK_END)
file_size = file_object.tell()
scan_state = self.StartScan(total_data_size=file_size)
file_object.seek(file_offset, os.SEEK_SET)
while file_offset < file_size:
data = file_object.read(self._READ_BUFFER_SIZE)
data_size = len(data)
if data_size == 0:
break
self._ScanBufferScanState(
self._scan_tree, scan_state, data, data_size, file_offset,
total_data_size=file_size)
file_offset += data_size
self.StopScan(scan_state)
return self.GetScanResults(scan_state)
def StartScan(self, total_data_size=None):
"""Starts a scan.
The function sets up the scanning related structures if necessary.
Args:
total_data_size: optional value to indicate the total data size.
The default is None.
Returns:
A scan state (instance of ScanState).
Raises:
RuntimeError: when total data size is invalid.
"""
if total_data_size is not None and total_data_size < 0:
raise RuntimeError(u'Invalid total data size.')
if self._scan_tree is None:
self._scan_tree = scan_tree.ScanTree(
self._specification_store, None)
return ScanState(self._scan_tree.root_node, total_data_size=total_data_size)
def StopScan(self, scan_state):
"""Stops a scan.
Args:
scan_state: the scan state (instance of ScanState).
"""
self._ScanBufferScanStateFinal(self._scan_tree, scan_state)
class OffsetBoundScanner(ScanTreeScannerBase):
"""Class that implements an offset-bound scan tree-based scanner."""
_READ_BUFFER_SIZE = 512
def __init__(self, specification_store):
"""Initializes the scanner.
Args:
specification_store: the specification store (instance of
SpecificationStore) that contains the format
specifications.
"""
super(OffsetBoundScanner, self).__init__(specification_store)
self._footer_scan_tree = None
self._footer_spanning_range = None
self._header_scan_tree = None
self._header_spanning_range = None
def _GetFooterRange(self, total_data_size):
"""Retrieves the read buffer aligned footer range.
Args:
total_data_size: optional value to indicate the total data size.
The default is None.
Returns:
A range (instance of Range).
"""
# The actual footer range is in reverse since the spanning footer range
# is based on positive offsets, where 0 is the end of file.
if self._footer_spanning_range.end_offset < total_data_size:
footer_range_start_offset = (
total_data_size - self._footer_spanning_range.end_offset)
else:
footer_range_start_offset = 0
# Calculate the lower bound modulus of the footer range start offset
# in increments of the read buffer size.
footer_range_start_offset /= self._READ_BUFFER_SIZE
footer_range_start_offset *= self._READ_BUFFER_SIZE
# Calculate the upper bound modulus of the footer range size
# in increments of the read buffer size.
footer_range_size = self._footer_spanning_range.size
remainder = footer_range_size % self._READ_BUFFER_SIZE
footer_range_size /= self._READ_BUFFER_SIZE
if remainder > 0:
footer_range_size += 1
footer_range_size *= self._READ_BUFFER_SIZE
return range_list.Range(footer_range_start_offset, footer_range_size)
def _GetHeaderRange(self):
"""Retrieves the read buffer aligned header range.
Returns:
A range (instance of Range).
"""
# Calculate the lower bound modulus of the header range start offset
# in increments of the read buffer size.
header_range_start_offset = self._header_spanning_range.start_offset
header_range_start_offset /= self._READ_BUFFER_SIZE
header_range_start_offset *= self._READ_BUFFER_SIZE
# Calculate the upper bound modulus of the header range size
# in increments of the read buffer size.
header_range_size = self._header_spanning_range.size
remainder = header_range_size % self._READ_BUFFER_SIZE
header_range_size /= self._READ_BUFFER_SIZE
if remainder > 0:
header_range_size += 1
header_range_size *= self._READ_BUFFER_SIZE
return range_list.Range(header_range_start_offset, header_range_size)
def _ScanBufferScanState(
self, scan_tree_object, scan_state, data, data_size, total_data_offset,
total_data_size=None):
"""Scans a buffer using the scan tree.
This function implements a Boyer–Moore–Horspool equivalent approach
in combination with the scan tree.
Args:
scan_tree_object: the scan tree (instance of ScanTree).
scan_state: the scan state (instance of ScanState).
data: a buffer containing raw data.
data_size: the size of the raw data in the buffer.
total_data_offset: the offset of the data relative to the start of
the total data scanned.
total_data_size: optional value to indicate the total data size.
The default is None.
"""
scan_done = False
scan_tree_node = scan_tree_object.root_node
while not scan_done:
data_offset = 0
scan_object = scan_tree_node.CompareByteValue(
data, data_offset, data_size, total_data_offset,
total_data_size=total_data_size)
if isinstance(scan_object, scan_tree.ScanTreeNode):
scan_tree_node = scan_object
else:
scan_done = True
if isinstance(scan_object, patterns.Pattern):
pattern_length = len(scan_object.signature.expression)
pattern_start_offset = scan_object.signature.offset
pattern_end_offset = pattern_start_offset + pattern_length
if cmp(scan_object.signature.expression,
data[pattern_start_offset:pattern_end_offset]) == 0:
scan_state.AddMatch(
total_data_offset + scan_object.signature.offset, scan_object)
logging.debug(
u'Signature match at data offset: 0x{0:08x}.'.format(data_offset))
# TODO: implement.
# def ScanBuffer(self, scan_state, data, data_size):
# """Scans a buffer.
# Args:
# scan_state: the scan state (instance of ScanState).
# data: a buffer containing raw data.
# data_size: the size of the raw data in the buffer.
# """
# # TODO: fix footer scanning logic.
# # need to know the file size here for the footers.
# # TODO: check for clashing ranges?
# header_range = self._GetHeaderRange()
# footer_range = self._GetFooterRange(scan_state.total_data_size)
# if self._scan_tree == self._header_scan_tree:
# if (scan_state.total_data_offset >= header_range.start_offset and
# scan_state.total_data_offset < header_range.end_offset):
# self._ScanBufferScanState(
# self._scan_tree, scan_state, data, data_size,
# scan_state.total_data_offset,
# total_data_size=scan_state.total_data_size)
# elif scan_state.total_data_offset > header_range.end_offset:
# # TODO: implement.
# pass
# if self._scan_tree == self._footer_scan_tree:
# if (scan_state.total_data_offset >= footer_range.start_offset and
# scan_state.total_data_offset < footer_range.end_offset):
# self._ScanBufferScanState(
# self._scan_tree, scan_state, data, data_size,
# scan_state.total_data_offset,
# total_data_size=scan_state.total_data_size)
def ScanFileObject(self, file_object):
"""Scans a file-like object.
Args:
file_object: a file-like object.
Returns:
A scan state (instance of ScanState).
"""
# TODO: add support for fixed size block-based reads.
if hasattr(file_object, 'get_size'):
file_size = file_object.get_size()
else:
file_object.seek(0, os.SEEK_END)
file_size = file_object.tell()
file_offset = 0
scan_state = self.StartScan(total_data_size=file_size)
if self._header_scan_tree.root_node is not None:
header_range = self._GetHeaderRange()
# TODO: optimize the read by supporting fixed size block-based reads.
# if file_offset < header_range.start_offset:
# file_offset = header_range.start_offset
file_object.seek(file_offset, os.SEEK_SET)
# TODO: optimize the read by supporting fixed size block-based reads.
# data = file_object.read(header_range.size)
data = file_object.read(header_range.end_offset)
data_size = len(data)
if data_size > 0:
self._ScanBufferScanState(
self._scan_tree, scan_state, data, data_size, file_offset,
total_data_size=file_size)
file_offset += data_size
if self._footer_scan_tree.root_node is not None:
self.StopScan(scan_state)
self._scan_tree = self._footer_scan_tree
scan_state.Reset(self._scan_tree.root_node)
if self._footer_scan_tree.root_node is not None:
footer_range = self._GetFooterRange(file_size)
# Note that the offset in the footer scan tree start with 0. Make sure
# the data offset of the data being scanned is aligned with the offset
# in the scan tree.
if footer_range.start_offset < self._footer_spanning_range.end_offset:
data_offset = (
self._footer_spanning_range.end_offset - footer_range.start_offset)
else:
data_offset = 0
if file_offset < footer_range.start_offset:
file_offset = footer_range.start_offset
file_object.seek(file_offset, os.SEEK_SET)
data = file_object.read(self._READ_BUFFER_SIZE)
data_size = len(data)
if data_size > 0:
self._ScanBufferScanState(
self._scan_tree, scan_state, data[data_offset:],
data_size - data_offset, file_offset + data_offset,
total_data_size=file_size)
file_offset += data_size
self.StopScan(scan_state)
return self.GetScanResults(scan_state)
def StartScan(self, total_data_size=None):
"""Starts a scan.
The function sets up the scanning related structures if necessary.
Args:
total_data_size: optional value to indicate the total data size.
The default is None.
Returns:
A list of scan results (instances of ScanResult).
Raises:
RuntimeError: when total data size is invalid.
"""
if total_data_size is None or total_data_size < 0:
raise RuntimeError(u'Invalid total data size.')
if self._header_scan_tree is None:
self._header_scan_tree = scan_tree.ScanTree(
self._specification_store, True,
offset_mode=scan_tree.ScanTree.OFFSET_MODE_POSITIVE)
if self._header_spanning_range is None:
spanning_range = self._header_scan_tree.range_list.GetSpanningRange()
self._header_spanning_range = spanning_range
if self._footer_scan_tree is None:
self._footer_scan_tree = scan_tree.ScanTree(
self._specification_store, True,
offset_mode=scan_tree.ScanTree.OFFSET_MODE_NEGATIVE)
if self._footer_spanning_range is None:
spanning_range = self._footer_scan_tree.range_list.GetSpanningRange()
self._footer_spanning_range = spanning_range
if self._header_scan_tree.root_node is not None:
self._scan_tree = self._header_scan_tree
elif self._footer_scan_tree.root_node is not None:
self._scan_tree = self._footer_scan_tree
else:
self._scan_tree = None
if self._scan_tree is not None:
root_node = self._scan_tree.root_node
else:
root_node = None
return ScanState(root_node, total_data_size=total_data_size)
def StopScan(self, scan_state):
"""Stops a scan.
Args:
scan_state: the scan state (instance of ScanState).
"""
self._ScanBufferScanStateFinal(self._scan_tree, scan_state)
self._scan_tree = None
| apache-2.0 | -7,426,356,127,187,481,000 | 31.576565 | 80 | 0.651707 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.