repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
vmax-feihu/hue | desktop/core/ext-py/Django-1.6.10/django/utils/termcolors.py | 117 | 6948 | """
termcolors.py
"""
from django.utils import six
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = dict([(color_names[x], '3%s' % x) for x in range(8)])
background = dict([(color_names[x], '4%s' % x) for x in range(8)])
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print(colorize('first line', fg='red', opts=('noreset',)))
print('this should be red too')
print(colorize('and so should this'))
print('this should not be red')
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in six.iteritems(kwargs):
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = '%s\x1b[%sm' % (text or '', RESET)
return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '')
def make_style(opts=(), **kwargs):
"""
Returns a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print(bold_red('hello'))
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
return lambda text: colorize(text, opts, **kwargs)
NOCOLOR_PALETTE = 'nocolor'
DARK_PALETTE = 'dark'
LIGHT_PALETTE = 'light'
PALETTES = {
NOCOLOR_PALETTE: {
'ERROR': {},
'NOTICE': {},
'SQL_FIELD': {},
'SQL_COLTYPE': {},
'SQL_KEYWORD': {},
'SQL_TABLE': {},
'HTTP_INFO': {},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {},
'HTTP_NOT_MODIFIED': {},
'HTTP_BAD_REQUEST': {},
'HTTP_NOT_FOUND': {},
'HTTP_SERVER_ERROR': {},
},
DARK_PALETTE: {
'ERROR': { 'fg': 'red', 'opts': ('bold',) },
'NOTICE': { 'fg': 'red' },
'SQL_FIELD': { 'fg': 'green', 'opts': ('bold',) },
'SQL_COLTYPE': { 'fg': 'green' },
'SQL_KEYWORD': { 'fg': 'yellow' },
'SQL_TABLE': { 'opts': ('bold',) },
'HTTP_INFO': { 'opts': ('bold',) },
'HTTP_SUCCESS': { },
'HTTP_REDIRECT': { 'fg': 'green' },
'HTTP_NOT_MODIFIED': { 'fg': 'cyan' },
'HTTP_BAD_REQUEST': { 'fg': 'red', 'opts': ('bold',) },
'HTTP_NOT_FOUND': { 'fg': 'yellow' },
'HTTP_SERVER_ERROR': { 'fg': 'magenta', 'opts': ('bold',) },
},
LIGHT_PALETTE: {
'ERROR': { 'fg': 'red', 'opts': ('bold',) },
'NOTICE': { 'fg': 'red' },
'SQL_FIELD': { 'fg': 'green', 'opts': ('bold',) },
'SQL_COLTYPE': { 'fg': 'green' },
'SQL_KEYWORD': { 'fg': 'blue' },
'SQL_TABLE': { 'opts': ('bold',) },
'HTTP_INFO': { 'opts': ('bold',) },
'HTTP_SUCCESS': { },
'HTTP_REDIRECT': { 'fg': 'green', 'opts': ('bold',) },
'HTTP_NOT_MODIFIED': { 'fg': 'green' },
'HTTP_BAD_REQUEST': { 'fg': 'red', 'opts': ('bold',) },
'HTTP_NOT_FOUND': { 'fg': 'red' },
'HTTP_SERVER_ERROR': { 'fg': 'magenta', 'opts': ('bold',) },
}
}
DEFAULT_PALETTE = DARK_PALETTE
def parse_color_setting(config_string):
"""Parse a DJANGO_COLORS environment variable to produce the system palette
The general form of a pallete definition is:
"palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
where:
palette is a named palette; one of 'light', 'dark', or 'nocolor'.
role is a named style used by Django
fg is a background color.
bg is a background color.
option is a display options.
Specifying a named palette is the same as manually specifying the individual
definitions for each role. Any individual definitions following the pallete
definition will augment the base palette definition.
Valid roles:
'error', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table',
'http_info', 'http_success', 'http_redirect', 'http_bad_request',
'http_not_found', 'http_server_error'
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold', 'underscore', 'blink', 'reverse', 'conceal'
"""
if not config_string:
return PALETTES[DEFAULT_PALETTE]
# Split the color configuration into parts
parts = config_string.lower().split(';')
palette = PALETTES[NOCOLOR_PALETTE].copy()
for part in parts:
if part in PALETTES:
# A default palette has been specified
palette.update(PALETTES[part])
elif '=' in part:
# Process a palette defining string
definition = {}
# Break the definition into the role,
# plus the list of specific instructions.
# The role must be in upper case
role, instructions = part.split('=')
role = role.upper()
styles = instructions.split(',')
styles.reverse()
# The first instruction can contain a slash
# to break apart fg/bg.
colors = styles.pop().split('/')
colors.reverse()
fg = colors.pop()
if fg in color_names:
definition['fg'] = fg
if colors and colors[-1] in color_names:
definition['bg'] = colors[-1]
# All remaining instructions are options
opts = tuple(s for s in styles if s in opt_dict.keys())
if opts:
definition['opts'] = opts
# The nocolor palette has all available roles.
# Use that palette as the basis for determining
# if the role is valid.
if role in PALETTES[NOCOLOR_PALETTE] and definition:
palette[role] = definition
# If there are no colors specified, return the empty palette.
if palette == PALETTES[NOCOLOR_PALETTE]:
return None
return palette
| apache-2.0 |
kaplun/invenio | modules/miscutil/lib/dateutils.py | 11 | 18834 | # -*- coding: utf-8 -*-
##
## Some functions about dates
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
API for date conversion and date related GUI creation.
Lexicon
datetext:
textual format => 'YEAR-MONTH-DAY HOUR:MINUTE:SECOND'
e.g. '2005-11-16 15:11:44'
default value: '0000-00-00 00:00:00'
datestruct:
tuple format => see http://docs.python.org/lib/module-time.html
(YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, WEEKDAY, YEARDAY, DAYLIGHT)
e.g. (2005, 11, 16, 15, 11, 44, 2, 320, 0)
default value: (0, 0, 0, 0, 0, 0, 0, 0, 0)
dategui:
textual format for output => 'DAY MONTH YEAR, HOUR:MINUTE'
e.g. '16 nov 2005, 15:11'
default value: _("N/A")
"""
__revision__ = "$Id$"
import re
import time
from datetime import date as real_date, \
datetime as real_datetime, \
time as real_time, \
timedelta
from invenio.config import CFG_SITE_LANG
from invenio.messages import gettext_set_language
try:
from mx.DateTime import Parser
CFG_HAS_EGENIX_DATETIME = True
except ImportError:
CFG_HAS_EGENIX_DATETIME = False
datetext_default = '0000-00-00 00:00:00'
datestruct_default = (0, 0, 0, 0, 0, 0, 0, 0, 0)
datetext_format = "%Y-%m-%d %H:%M:%S"
class date(real_date):
def strftime(self, fmt):
return strftime(fmt, self)
class datetime(real_datetime):
def strftime(self, fmt):
return strftime(fmt, self)
def __add__(self, other):
d = real_datetime.combine(self, self.timetz())
d += other
return self.combine(d, d.timetz())
def date(self):
return date(self.year, self.month, self.day)
@staticmethod
def strptime(date_string, format):
return datetime(*(time.strptime(date_string, format)[0:6]))
def convert_datetext_to_dategui(datetext, ln=CFG_SITE_LANG, secs=False):
"""
Convert:
'2005-11-16 15:11:57' => '16 nov 2005, 15:11'
Or optionally with seconds:
'2005-11-16 15:11:57' => '16 nov 2005, 15:11:57'
Month is internationalized
"""
try:
datestruct = convert_datetext_to_datestruct(datetext)
if datestruct == datestruct_default:
raise ValueError
month = get_i18n_month_name(datestruct[1], ln=ln)
if secs:
output_format = "%d " + month + " %Y, %H:%M:%S"
else:
output_format = "%d " + month + " %Y, %H:%M"
return strftime(output_format, datestruct)
except:
_ = gettext_set_language(ln)
return _("N/A")
def convert_datetext_to_datestruct(datetext):
"""
Convert:
'2005-11-16 15:11:57' => (2005, 11, 16, 15, 11, 44, 2, 320, 0)
"""
try:
return time.strptime(datetext, datetext_format)
except:
return datestruct_default
def convert_datestruct_to_dategui(datestruct, ln=CFG_SITE_LANG):
"""
Convert:
(2005, 11, 16, 15, 11, 44, 2, 320, 0) => '16 nov 2005, 15:11'
Month is internationalized
"""
try:
if datestruct[0] and datestruct[1] and datestruct[2]:
month = get_i18n_month_name(datestruct[1], ln=ln)
output_format = "%d " + month + " %Y, %H:%M"
return strftime(output_format, datestruct)
else:
raise ValueError
except:
_ = gettext_set_language(ln)
return _("N/A")
def convert_datestruct_to_datetext(datestruct):
"""
Convert:
(2005, 11, 16, 15, 11, 44, 2, 320, 0) => '2005-11-16 15:11:57'
"""
try:
return strftime(datetext_format, datestruct)
except:
return datetext_default
def convert_datecvs_to_datestruct(datecvs):
"""
Convert CVS $Date$ and
$Id$
formats into datestruct. Useful for later conversion of Last
updated timestamps in the page footers.
Example: '$Date$' => (2006, 09, 20, 19, 27, 11, 0, 0)
"""
try:
if datecvs.startswith("$Id"):
date_time = ' '.join(datecvs.split(" ")[3:5])
return time.strptime(date_time, '%Y/%m/%d %H:%M:%S')
else:
# here we have to use '$' + 'Date...' here, otherwise the CVS
# commit would erase this time format to put commit date:
return time.strptime(datecvs, '$' + 'Date: %Y/%m/%d %H:%M:%S $')
except ValueError:
return datestruct_default
def get_datetext(year, month, day):
"""
year=2005, month=11, day=16 => '2005-11-16 00:00:00'
"""
input_format = "%Y-%m-%d"
try:
datestruct = time.strptime("%i-%i-%i"% (year, month, day), input_format)
return strftime(datetext_format, datestruct)
except:
return datetext_default
def get_datestruct(year, month, day):
"""
year=2005, month=11, day=16 => (2005, 11, 16, 0, 0, 0, 2, 320, -1)
"""
input_format = "%Y-%m-%d"
try:
return time.strptime("%i-%i-%i"% (year, month, day), input_format)
except ValueError or TypeError:
return datestruct_default
def get_i18n_day_name(day_nb, display='short', ln=CFG_SITE_LANG):
"""
get the string representation of a weekday, internationalized
@param day_nb: number of weekday UNIX like.
=> 0=Sunday
@param ln: language for output
@return: the string representation of the day
"""
_ = gettext_set_language(ln)
if display == 'short':
days = {0: _("Sun"),
1: _("Mon"),
2: _("Tue"),
3: _("Wed"),
4: _("Thu"),
5: _("Fri"),
6: _("Sat")}
else:
days = {0: _("Sunday"),
1: _("Monday"),
2: _("Tuesday"),
3: _("Wednesday"),
4: _("Thursday"),
5: _("Friday"),
6: _("Saturday")}
return days[day_nb]
def get_i18n_month_name(month_nb, display='short', ln=CFG_SITE_LANG):
"""
get a non-numeric representation of a month, internationalized.
@param month_nb: number of month, (1 based!)
=>1=jan,..,12=dec
@param ln: language for output
@return: the string representation of month
"""
_ = gettext_set_language(ln)
if display == 'short':
months = {0: _("Month"),
1: _("Jan"),
2: _("Feb"),
3: _("Mar"),
4: _("Apr"),
5: _("May"),
6: _("Jun"),
7: _("Jul"),
8: _("Aug"),
9: _("Sep"),
10: _("Oct"),
11: _("Nov"),
12: _("Dec")}
else:
months = {0: _("Month"),
1: _("January"),
2: _("February"),
3: _("March"),
4: _("April"),
5: _("May "), # trailing space distinguishes short/long form
6: _("June"),
7: _("July"),
8: _("August"),
9: _("September"),
10: _("October"),
11: _("November"),
12: _("December")}
return months[month_nb].strip()
def create_day_selectbox(name, selected_day=0, ln=CFG_SITE_LANG):
"""
Creates an HTML menu for day selection. (0..31 values).
@param name: name of the control (i.e. name of the var you'll get)
@param selected_day: preselect a day. Use 0 for the label 'Day'
@param ln: language of the menu
@return: html a string
"""
_ = gettext_set_language(ln)
out = "<select name=\"%s\">\n"% name
for i in range(0, 32):
out += " <option value=\"%i\""% i
if (i == selected_day):
out += " selected=\"selected\""
if (i == 0):
out += ">%s</option>\n"% _("Day")
else:
out += ">%i</option>\n"% i
out += "</select>\n"
return out
def create_month_selectbox(name, selected_month=0, ln=CFG_SITE_LANG):
"""
Creates an HTML menu for month selection. Value of selected field is numeric
@param name: name of the control (your form will be sent with name=value...)
@param selected_month: preselect a month. use 0 for the Label 'Month'
@param ln: language of the menu
@return: html as string
"""
out = "<select name=\"%s\">\n"% name
for i in range(0, 13):
out += "<option value=\"%i\""% i
if (i == selected_month):
out += " selected=\"selected\""
out += ">%s</option>\n"% get_i18n_month_name(i, ln)
out += "</select>\n"
return out
def create_year_inputbox(name, value=0):
"""
Creates an HTML field (simple input) for year selection.
@param name: name of the control (i.e. name of the variable you'll get)
@param value: prefilled value (int)
@return: html as string
"""
out = "<input type=\"text\" name=\"%s\" value=\"%i\" maxlength=\"4\" size=\"4\"/>\n"% (name, value)
return out
def create_year_selectbox(name, from_year=-1, length=10, selected_year=0, ln=CFG_SITE_LANG):
"""
Creates an HTML menu (dropdownbox) for year selection.
@param name: name of control( i.e. name of the variable you'll get)
@param from_year: year on which to begin. if <0 assume it is current year
@param length: number of items in menu
@param selected_year: initial selected year (if in range), else: label is selected
@param ln: language
@return: html as string
"""
_ = gettext_set_language(ln)
if from_year < 0:
from_year = time.localtime()[0]
out = "<select name=\"%s\">\n"% name
out += ' <option value="0"'
if selected_year == 0:
out += ' selected="selected"'
out += ">%s</option>\n"% _("Year")
for i in range(from_year, from_year + length):
out += "<option value=\"%i\""% i
if (i == selected_year):
out += " selected=\"selected\""
out += ">%i</option>\n"% i
out += "</select>\n"
return out
_RE_RUNTIMELIMIT_FULL = re.compile(r"(?:(?P<weekday_begin>[a-z]+)(?:-(?P<weekday_end>[a-z]+))?)?\s*((?P<hour_begin>\d\d?(:\d\d?)?)(-(?P<hour_end>\d\d?(:\d\d?)?))?)?", re.I)
_RE_RUNTIMELIMIT_HOUR = re.compile(r'(?P<hours>\d\d?)(:(?P<minutes>\d\d?))?')
def parse_runtime_limit(value, now=None):
"""
Parsing CLI option for runtime limit, supplied as VALUE.
Value could be something like: Sunday 23:00-05:00, the format being
[Wee[kday]] [hh[:mm][-hh[:mm]]].
The function will return two valid time ranges. The first could be in the past, containing the present or in the future. The second is always in the future.
"""
def extract_time(value):
value = _RE_RUNTIMELIMIT_HOUR.search(value).groupdict()
return timedelta(hours=int(value['hours']),
minutes=int(value['minutes']))
def extract_weekday(value):
key = value[:3].lower()
try:
return {
'mon' : 0,
'tue' : 1,
'wed' : 2,
'thu' : 3,
'fri' : 4,
'sat' : 5,
'sun' : 6,
}[key]
except KeyError:
raise ValueError("%s is not a good weekday name." % value)
if now is None:
now = datetime.now()
today = now.date()
g = _RE_RUNTIMELIMIT_FULL.search(value)
if not g:
raise ValueError('"%s" does not seem to be correct format for parse_runtime_limit() [Wee[kday]] [hh[:mm][-hh[:mm]]]).' % value)
pieces = g.groupdict()
if pieces['weekday_begin'] is None:
# No weekday specified. So either today or tomorrow
first_occasion_day = timedelta(days=0)
next_occasion_delta = timedelta(days=1)
else:
# If given 'Mon' then we transform it to 'Mon-Mon'
if pieces['weekday_end'] is None:
pieces['weekday_end'] = pieces['weekday_begin']
# Day range
weekday_begin = extract_weekday(pieces['weekday_begin'])
weekday_end = extract_weekday(pieces['weekday_end'])
if weekday_begin <= today.weekday() <= weekday_end:
first_occasion_day = timedelta(days=0)
else:
days = (weekday_begin - today.weekday()) % 7
first_occasion_day = timedelta(days=days)
weekday = (now + first_occasion_day).weekday()
if weekday < weekday_end:
# Fits in the same week
next_occasion_delta = timedelta(days=1)
else:
# The week after
days = weekday_begin - weekday + 7
next_occasion_delta = timedelta(days=days)
if pieces['hour_begin'] is None:
pieces['hour_begin'] = '00:00'
if pieces['hour_end'] is None:
pieces['hour_end'] = '00:00'
beginning_time = extract_time(pieces['hour_begin'])
ending_time = extract_time(pieces['hour_end'])
if not ending_time:
ending_time = beginning_time + timedelta(days=1)
elif beginning_time and ending_time and beginning_time > ending_time:
ending_time += timedelta(days=1)
start_time = real_datetime.combine(today, real_time(hour=0, minute=0))
current_range = (
start_time + first_occasion_day + beginning_time,
start_time + first_occasion_day + ending_time
)
if now > current_range[1]:
current_range = tuple(t + next_occasion_delta for t in current_range)
future_range = (
current_range[0] + next_occasion_delta,
current_range[1] + next_occasion_delta
)
return current_range, future_range
def guess_datetime(datetime_string):
"""
Try to guess the datetime contained in a string of unknow format.
@param datetime_string: the datetime representation.
@type datetime_string: string
@return: the guessed time.
@rtype: L{time.struct_time}
@raises ValueError: in case it's not possible to guess the time.
"""
if CFG_HAS_EGENIX_DATETIME:
try:
return Parser.DateTimeFromString(datetime_string).timetuple()
except ValueError:
pass
else:
for format in (None, '%x %X', '%X %x', '%Y-%M-%dT%h:%m:%sZ'):
try:
return time.strptime(datetime_string, format)
except ValueError:
pass
raise ValueError("It is not possible to guess the datetime format of %s" % datetime_string)
def get_time_estimator(total):
"""
Given a total amount of items to compute, return a function that,
if called every time an item is computed (or every step items are computed)
will give a time estimation for how long it will take to compute the whole
set of itmes. The function will return two values: the first is the
number of seconds that are still needed to compute the whole set, the second
value is the time in the future when the operation is expected to end.
"""
t1 = time.time()
count = [0]
def estimate_needed_time(step=1):
count[0] += step
t2 = time.time()
t3 = 1.0 * (t2 - t1) / count[0] * (total - count[0])
return t3, t3 + t1
return estimate_needed_time
# This library does not support strftime's "%s" or "%y" format strings.
# Allowed if there's an even number of "%"s because they are escaped.
_illegal_formatting = re.compile(r"((^|[^%])(%%)*%[sy])")
def _findall(text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i=j+1
return sites
def strftime(fmt, dt):
if not isinstance(dt, real_date):
dt = datetime(dt[0], dt[1], dt[2], dt[3], dt[4], dt[5])
if dt.year >= 1900:
return time.strftime(fmt, dt.timetuple())
illegal_formatting = _illegal_formatting.search(fmt)
if illegal_formatting:
raise TypeError("strftime of dates before 1900 does not handle" + illegal_formatting.group(0))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year) // 28) * 28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = _findall(s1, str(year))
s2 = time.strftime(fmt, (year+28,) + timetuple[1:])
sites2 = _findall(s2, str(year+28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%04d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site+4:]
return s
def get_dst(date_obj):
"""Determine if dst is locally enabled at this time"""
dst = 0
if date_obj.year >= 1900:
tmp_date = time.mktime(date_obj.timetuple())
# DST is 1 so reduce time with 1 hour.
dst = time.localtime(tmp_date)[-1]
return dst
def utc_to_localtime(date_str, fmt="%Y-%m-%d %H:%M:%S", input_fmt="%Y-%m-%dT%H:%M:%SZ"):
"""
Convert UTC to localtime
Reference:
- (1) http://www.openarchives.org/OAI/openarchivesprotocol.html#Dates
- (2) http://www.w3.org/TR/NOTE-datetime
This function works only with dates complying with the
"Complete date plus hours, minutes and seconds" profile of
ISO 8601 defined by (2), and linked from (1).
Eg: 1994-11-05T13:15:30Z
"""
date_struct = datetime.strptime(date_str, input_fmt)
date_struct += timedelta(hours=get_dst(date_struct))
date_struct -= timedelta(seconds=time.timezone)
return strftime(fmt, date_struct)
def localtime_to_utc(date_str, fmt="%Y-%m-%dT%H:%M:%SZ", input_fmt="%Y-%m-%d %H:%M:%S"):
"""Convert localtime to UTC"""
date_struct = datetime.strptime(date_str, input_fmt)
date_struct -= timedelta(hours=get_dst(date_struct))
date_struct += timedelta(seconds=time.timezone)
return strftime(fmt, date_struct)
def strptime(date_string, fmt):
return real_datetime(*(time.strptime(date_string, fmt)[:6]))
| gpl-2.0 |
quantifiedcode-bot/blitzdb | blitzdb/tests/test_querying.py | 2 | 10823 | from __future__ import absolute_import
from .fixtures import *
from blitzdb.tests.helpers.movie_data import Actor, Director, Movie
import blitzdb
def test_basic_delete(backend, small_test_data):
backend.filter(Actor, {}).delete()
backend.commit()
assert len(backend.filter(Actor, {})) == 0
def test_basic_storage(backend, small_test_data):
(movies, actors, directors) = small_test_data
assert len(backend.filter(Movie, {})) == len(movies)
assert len(backend.filter(Actor, {})) == len(actors)
#removed this functionality since it was misleading...
@pytest.mark.skipif(True, reason='Removed functionality')
def test_keys_with_dots(backend):
actor = Actor({'some.key.with.nasty.dots': [{'some.more.nasty.dots': 100}], 'pk': 'test'})
backend.save(actor)
backend.commit()
assert actor == backend.get(Actor, {'pk': 'test'})
def test_delete(backend):
actor = Actor({'foo' : 'bar'})
backend.save(actor)
backend.commit()
assert actor.foo == 'bar'
assert backend.get(Actor,{'pk' : actor.pk}).foo == 'bar'
del actor.foo
with pytest.raises(AttributeError):
actor.foo
with pytest.raises(KeyError):
actor['foo']
backend.save(actor)
backend.commit()
with pytest.raises(AttributeError):
backend.get(Actor,{'pk' : actor.pk}).foo
def test_negative_indexing(backend, small_test_data):
(movies, actors, directors) = small_test_data
actors = backend.filter(Actor, {})
assert actors[-1] == actors[len(actors) - 1]
assert actors[-10:-1] == actors[len(actors) - 10:len(actors) - 1]
assert actors[-len(actors):-1] == actors[0:len(actors) - 1]
# To do: Make step tests for file backend (MongoDB does not support this)
# assert actors[-10:-1:2] == actors[len(actors)-10:len(actors)-1:2]
def test_missing_keys_in_slice(backend, small_test_data):
(movies, actors, directors) = small_test_data
actors = backend.filter(Actor, {})
assert actors[:] == actors
assert actors[1:] == actors[1:len(actors)]
assert actors[:len(actors)] == actors[0:len(actors)]
def test_query_set(backend):
actors = [Actor({'foo': 'bar', 'value': 10}),
Actor({'foo': 'baz', 'value': 10}),
Actor({'foo': 'baz', 'value': 11}),
Actor({'foo': 'bar', 'value': 11})
]
for actor in actors:
backend.save(actor)
backend.commit()
queryset = backend.filter(Actor, {'foo': 'bar','value' : 10})
assert queryset.next() == actors[0]
def test_and_queries(backend):
backend.save(Actor({'foo': 'bar', 'value': 10}))
backend.save(Actor({'foo': 'baz', 'value': 10}))
backend.save(Actor({'foo': 'baz', 'value': 11}))
backend.save(Actor({'foo': 'bar', 'value': 11}))
backend.commit()
assert len(backend.filter(Actor, {'foo': 'bar'})) == 2
assert len(backend.filter(Actor, {'value': 10})) == 2
assert len(backend.filter(Actor, {'foo': 'bar', 'value': 10})) == 1
assert len(backend.filter(Actor, {'foo': 'baz', 'value': 10})) == 1
assert len(backend.filter(Actor, {'foo': 'bar', 'value': 11})) == 1
assert len(backend.filter(Actor, {'foo': 'baz', 'value': 11})) == 1
def test_composite_queries(backend):
backend.filter(Actor, {}).delete()
backend.save(Actor({'values': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}))
backend.save(Actor({'values': [7, 6, 5, 4, 3, 2, 1]}))
backend.save(Actor({'values': [1, 2, 3, 4]}))
backend.save(Actor({'values': [1, 2, 3, 4, {'foo': 'bar'}]}))
backend.save(Actor({'values': 'foobar'}))
backend.commit()
for f in (lambda: True, lambda: backend.create_index(Actor, 'values')):
assert len(backend.filter(Actor, {})) == 5
assert len(backend.filter(Actor, {'values': [1, 2, 3, 4]})) == 1
assert len(backend.filter(Actor, {'values': [1, 2, 3, 4, {'foo': 'bar'}]})) == 1
assert len(backend.filter(Actor, {'values': [1, 2, 3, {'foo': 'bar'}, 4]})) == 0
assert len(backend.filter(Actor, {'values': [1, 2, 3, 4, 5]})) == 0
assert len(backend.filter(Actor, {'values': [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]})) == 0
assert len(backend.filter(Actor, {'values': {'$all': [4, 3, 2, 1]}})) == 4
assert len(backend.filter(Actor, {'values': {'$all': [4, 3, 2, 1, {'foo': 'bar'}]}})) == 1
assert len(backend.filter(Actor, {'values': {'$all': [{'foo': 'bar'}]}})) == 1
assert len(backend.filter(Actor, {'values': {'$all': [4, 3, 2, 1, 14]}})) == 0
assert len(backend.filter(Actor, {'values': {'$all': [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]}})) == 1
assert len(backend.filter(Actor, {'values': {'$in': [[1, 2, 3, 4], [7, 6, 5, 4, 3, 2, 1], [1, 2, 3, 5], 'foobar']}})) == 3
def test_operators(backend):
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
leonardo_di_caprio = Actor({'name': 'Leonardo di Caprio', 'gross_income_m': 12.453, 'appearances': 34, 'is_funny': 'it depends', 'birth_year': 1974})
david_hasselhoff = Actor({'name': 'David Hasselhoff', 'gross_income_m': 12.453, 'appearances': 173, 'is_funny': True, 'birth_year': 1952})
charlie_chaplin = Actor({'name': 'Charlie Chaplin', 'gross_income_m': 0.371, 'appearances': 473, 'is_funny': True, 'birth_year': 1889})
backend.save(marlon_brando)
backend.save(leonardo_di_caprio)
backend.save(david_hasselhoff)
backend.save(charlie_chaplin)
backend.commit()
assert len(backend.filter(Actor, {})) == 4
for op, results in (('$gt', [david_hasselhoff]), ('$gte', [david_hasselhoff]), ('$lt', [charlie_chaplin]), ('$lte', [charlie_chaplin])):
query = {
'$and':
[
{'gross_income_m': {op: 1.0}},
{'is_funny': True}
]
}
assert len(backend.filter(Actor, query)) == len(results)
assert results in backend.filter(Actor, query)
for op, results in (('$gt', [david_hasselhoff, charlie_chaplin, marlon_brando]), ('$gte', [marlon_brando, david_hasselhoff, charlie_chaplin]), ('$lt', [charlie_chaplin]), ('$lte', [charlie_chaplin])):
query = {
'$and':
[
{'$or': [
{'gross_income_m': {op: 1.0}},
{'birth_year': {'$lt': 1900}},
]},
{'$or': [
{'is_funny': True},
{'name': 'Marlon Brando'},
]},
]
}
assert len(backend.filter(Actor, query)) == len(results)
assert results in backend.filter(Actor, query)
assert len(backend.filter(Actor, {'name': {'$ne': 'David Hasselhoff'}})) == 3
assert len(backend.filter(Actor, {'name': 'David Hasselhoff'})) == 1
assert len(backend.filter(Actor, {'name': {'$not': {'$in': ['David Hasselhoff', 'Marlon Brando', 'Charlie Chaplin']}}})) == 1
assert len(backend.filter(Actor, {'name': {'$in': ['Marlon Brando', 'Leonardo di Caprio']}})) == 2
def test_regex_operator(backend, small_test_data):
backend.filter(Actor, {}).delete()
marlon_brando = Actor({'name': 'Marlon Brando', 'gross_income_m': 1.453, 'appearances': 78, 'is_funny': False, 'birth_year': 1924})
marlon_wayans = Actor({'name': 'Marlon Wayans'})
backend.save(marlon_brando)
backend.save(marlon_wayans)
backend.commit()
assert backend.get(Actor, {'name': {'$regex': r'^Marlon\s+(?!Wayans)[\w]+$'}}) == marlon_brando
assert len(backend.filter(Actor, {'name': {'$regex': r'^Marlon\s+.*$'}})) == 2
assert len(backend.filter(Actor, {'name': {'$regex': r'^.*\s+Brando$'}})) == 1
def test_list_query(backend, small_test_data):
(movies, actors, directors) = small_test_data
movie = None
i = 0
while not movie or len(movie.cast) < 4:
movie = movies[i]
i += 1
actor = movie.cast[0]['actor']
other_movie = movies[i % len(movies)]
while other_movie in actor.movies:
other_movie = movies[i % len(movies)]
i += 1
assert actor in backend.filter(Actor, {'movies': movie})
assert actor not in backend.filter(Actor, {'movies': other_movie})
def test_list_query_multiple_items(backend, small_test_data):
(movies, actors, directors) = small_test_data
actor = None
i = 0
while not actor or len(actor.movies) < 2:
actor = actors[i]
i += 1
assert actor in backend.filter(Actor, {'movies': actor.movies})
def test_indexed_delete(backend, small_test_data):
all_movies = backend.filter(Movie, {})
for movie in all_movies:
backend.filter(Actor, {'movies': movie}).delete()
backend.commit()
for actor in backend.filter(Actor, {}):
assert actor.movies == []
def test_non_indexed_delete(backend, small_test_data):
(movies, actors, directors) = small_test_data
for movie in movies:
backend.filter(Director, {'movies': {'$all': [movie]}}).delete()
backend.commit()
for director in backend.filter(Director, {}):
assert director.movies == []
def test_positional_query(backend, small_test_data):
"""
We test a search query which explicitly references a given list item in an object
"""
(movies, actors, directors) = small_test_data
movie = None
i = 0
while not movie or len(movie.cast) < 3:
if len(movies[i].cast):
movie = movies[i]
actor = movie.cast[0]['actor']
index = actor.movies.index(movie)
i += 1
assert actor in backend.filter(Actor, {'movies.%d' % index: movie})
def test_default_backend(backend, small_test_data):
movies = backend.filter(Movie, {})
old_len = len(movies)
movie = movies[0]
movie.delete()
backend.commit()
with pytest.raises(Movie.DoesNotExist):
backend.get(Movie, {'pk': movie.pk})
assert old_len == len(backend.filter(Movie, {})) + 1
def test_index_reloading(backend, small_test_data):
(movies, actors, directors) = small_test_data
backend.filter(Actor, {'movies': movies[0]}).delete()
backend.commit()
assert list(backend.filter(Actor, {'movies': movies[0]})) == []
def test_query_function(backend):
if isinstance(backend, blitzdb.backends.mongo.Backend):
pytest.skip('Query by function is not supported for MongoDB')
Movie({'name': 'The Godfather', 'year': 1972}).save(backend)
Movie({'name': 'Goodfellas', 'year': 1990}).save(backend)
Movie({'name': 'Star Wars', 'year': 1977}).save(backend)
backend.commit()
movies = backend.filter(Movie, {
'year': lambda year: year >= 1970 and year <= 1979,
})
assert sorted([m.name for m in movies]) == ['Star Wars', 'The Godfather']
| mit |
google/objax | objax/jaxboard.py | 1 | 4637 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import os
from time import time
from typing import Union, Callable, Tuple, ByteString
import numpy as np
from tensorboard.compat.proto import event_pb2
from tensorboard.compat.proto import summary_pb2
from tensorboard.summary.writer.event_file_writer import EventFileWriter
from tensorboard.util.tensor_util import make_tensor_proto
from objax import util
class Reducer(enum.Enum):
"""Reduces tensor batch into a single tensor."""
FIRST = lambda x: x[0]
LAST = lambda x: x[-1]
MEAN = lambda x: np.mean(x)
class DelayedScalar:
def __init__(self, reduce: Union[Callable, Reducer]):
self.values = []
self.reduce = reduce
def __call__(self):
return self.reduce(self.values)
class Image:
def __init__(self, shape: Tuple[int, int, int], png: ByteString):
self.shape = shape
self.png = png
class Text:
def __init__(self, text: str):
self.text = text
class Summary(dict):
"""Writes entries to `Summary` protocol buffer."""
def image(self, tag: str, image: np.ndarray):
"""Adds image to the summary. Float image in [-1, 1] in CHW format expected."""
self[tag] = Image(image.shape, util.image.to_png(image))
def scalar(self, tag: str, value: float, reduce: Union[Callable, Reducer] = Reducer.MEAN):
"""Adds scalar to the summary."""
if tag not in self:
self[tag] = DelayedScalar(reduce)
self[tag].values.append(value)
def text(self, tag: str, text: str):
"""Adds text to the summary."""
self[tag] = Text(text)
def __call__(self):
entries = []
for tag, value in self.items():
if isinstance(value, DelayedScalar):
entries.append(summary_pb2.Summary.Value(tag=tag, simple_value=value()))
elif isinstance(value, Image):
image_summary = summary_pb2.Summary.Image(encoded_image_string=value.png,
colorspace=value.shape[0],
height=value.shape[1],
width=value.shape[2])
entries.append(summary_pb2.Summary.Value(tag=tag, image=image_summary))
elif isinstance(value, Text):
metadata = summary_pb2.SummaryMetadata(
plugin_data=summary_pb2.SummaryMetadata.PluginData(plugin_name='text'))
entries.append(summary_pb2.Summary.Value(tag=tag, metadata=metadata,
tensor=make_tensor_proto(values=value.text.encode('utf-8'),
shape=(1,))))
else:
raise NotImplementedError(tag, value)
return summary_pb2.Summary(value=entries)
class SummaryWriter:
"""Writes entries to event files in the logdir to be consumed by Tensorboard."""
def __init__(self, logdir: str, queue_size: int = 5, write_interval: int = 5):
"""Creates SummaryWriter instance.
Args:
logdir: directory where event file will be written.
queue_size: size of the queue for pending events and summaries
before one of the 'add' calls forces a flush to disk.
write_interval: how often, in seconds, to write the pending events and summaries to disk.
"""
if not os.path.isdir(logdir):
os.makedirs(logdir, exist_ok=True)
self.writer = EventFileWriter(logdir, queue_size, write_interval)
def write(self, summary: Summary, step: int):
"""Adds on event to the event file."""
self.writer.add_event(event_pb2.Event(step=step, summary=summary(), wall_time=time()))
def close(self):
"""Flushes the event file to disk and close the file."""
self.writer.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| apache-2.0 |
jviada/QuantEcon.py | quantecon/tests/tests_models/test_optgrowth.py | 7 | 2959 | """
tests for quantecon.models.optgrowth
@author : Spencer Lyon
@date : 2014-08-05 10:20:53
TODO: I'd really like to see why the solutions only match analytical
counter part up to 1e-2. Seems like we should be able to do better
than that.
"""
from __future__ import division
from math import log
import numpy as np
from nose.tools import (assert_equal, assert_true, assert_less_equal)
from quantecon import compute_fixed_point
from quantecon.models import GrowthModel
from quantecon.tests import (get_h5_data_file, get_h5_data_group, write_array,
max_abs_diff)
# helper parameters
_tol = 1e-6
# helper functions
def _new_solution(gm, f, grp):
"gets a new set of solution objects and updates the data file"
# compute value function and policy rule using vfi
v_init = 5 * gm.u(gm.grid) - 25
v = compute_fixed_point(gm.bellman_operator, v_init, error_tol=_tol,
max_iter=5000)
# sigma = gm.get_greedy(v)
# write all arrays to file
write_array(f, grp, v, "v")
# return data
return v
def _get_data(gm, force_new=False):
"get solution data from file, or create if necessary"
with get_h5_data_file() as f:
existed, grp = get_h5_data_group("optgrowth")
if force_new or not existed:
if existed:
grp.w._f_remove()
v = _new_solution(gm, f, grp)
return v
# if we made it here, the group exists and we should try to read
# existing solutions
try:
# Try reading data
v = grp.v[:]
except:
# doesn't exist. Let's create it
v = _new_solution(gm, f, grp)
return v
# model parameters
alpha = 0.65
f = lambda k: k ** alpha
beta = 0.95
u = np.log
grid_max = 2
grid_size = 150
gm = GrowthModel(f, beta, u, grid_max, grid_size)
v = _get_data(gm)
# compute analytical policy function
true_sigma = (1 - alpha * beta) * gm.grid**alpha
# compute analytical value function
ab = alpha * beta
c1 = (log(1 - ab) + log(ab) * ab / (1 - ab)) / (1 - beta)
c2 = alpha / (1 - ab)
def v_star(k):
return c1 + c2 * np.log(k)
def test_h5_access():
"optgrowth: test access to data file"
assert_true(v is not None)
def test_bellman_return_both():
"optgrowth: bellman_operator compute_policy option works"
assert_equal(len(gm.bellman_operator(v, compute_policy=True)), 2)
def test_analytical_policy():
"optgrowth: approx sigma matches analytical"
sigma = gm.compute_greedy(v)
assert_less_equal(max_abs_diff(sigma, true_sigma), 1e-2)
def test_analytical_vf():
"optgrowth: approx v matches analytical"
true_v = v_star(gm.grid)
assert_less_equal(max_abs_diff(v[1:-1], true_v[1:-1]), 5e-2)
def test_vf_fixed_point():
"optgrowth: solution is fixed point of bellman"
new_v = gm.bellman_operator(v)
assert_less_equal(max_abs_diff(v[1:-1], new_v[1:-1]), 5e-2)
| bsd-3-clause |
haiyangd/Gelatin | src/Gelatin/util.py | 2 | 3376 | import generator
from parser import Parser
from compiler import SyntaxCompiler
def compile_string(syntax):
"""
Builds a converter from the given syntax and returns it.
@type syntax: str
@param syntax: A Gelatin syntax.
@rtype: compiler.Context
@return: The compiled converter.
"""
return Parser().parse_string(syntax, SyntaxCompiler())
def compile(syntax_file):
"""
Like compile_string(), but reads the syntax from the file with the
given name.
@type syntax_file: str
@param syntax_file: Name of a file containing Gelatin syntax.
@rtype: compiler.Context
@return: The compiled converter.
"""
return Parser().parse(syntax_file, SyntaxCompiler())
def generate(converter, input_file, format = 'xml'):
"""
Given a converter (as returned by compile()), this function reads
the given input file and converts it to the requested output format.
Supported output formats are 'xml', 'yaml', 'json', or 'none'.
@type converter: compiler.Context
@param converter: The compiled converter.
@type input_file: str
@param input_file: Name of a file to convert.
@type format: str
@param format: The output format.
@rtype: str
@return: The resulting output.
"""
with open(input_file) as thefile:
return generate_string(converter, thefile.read(), format = format)
def generate_to_file(converter, input_file, output_file, format = 'xml'):
"""
Like generate(), but writes the output to the given output file
instead.
@type converter: compiler.Context
@param converter: The compiled converter.
@type input_file: str
@param input_file: Name of a file to convert.
@type output_file: str
@param output_file: The output filename.
@type format: str
@param format: The output format.
@rtype: str
@return: The resulting output.
"""
with open(output_file, 'w') as thefile:
result = generate(converter, input_file, format = format)
thefile.write(result)
def generate_string(converter, input, format = 'xml'):
"""
Like generate(), but reads the input from a string instead of
from a file.
@type converter: compiler.Context
@param converter: The compiled converter.
@type input: str
@param input: The string to convert.
@type format: str
@param format: The output format.
@rtype: str
@return: The resulting output.
"""
builder = generator.new(format)
if builder is None:
raise TypeError('invalid output format ' + repr(format))
converter.parse_string(input, builder)
return builder.serialize()
def generate_string_to_file(converter, input, output_file, format = 'xml'):
"""
Like generate(), but reads the input from a string instead of
from a file, and writes the output to the given output file.
@type converter: compiler.Context
@param converter: The compiled converter.
@type input: str
@param input: The string to convert.
@type output_file: str
@param output_file: The output filename.
@type format: str
@param format: The output format.
@rtype: str
@return: The resulting output.
"""
with open(output_file, 'w') as thefile:
result = generate_string(converter, input_file, format = format)
thefile.write(result)
| gpl-2.0 |
yglazko/socorro | socorro/unittest/lib/test_search_common.py | 4 | 16114 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
from nose.tools import eq_, ok_, assert_raises
from configman import ConfigurationManager, Namespace
from socorro.external import BadArgumentError
from socorro.lib import datetimeutil
from socorro.lib.search_common import (
SearchBase, SearchParam, convert_to_type, get_parameters, restrict_fields
)
from socorro.unittest.testbase import TestCase
SUPERSEARCH_FIELDS_MOCKED_RESULTS = {
'signature': {
'name': 'signature',
'data_validation_type': 'str',
'namespace': 'processed_crash',
'permissions_needed': [],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
},
'product': {
'name': 'product',
'data_validation_type': 'enum',
'namespace': 'processed_crash',
'permissions_needed': [],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
},
'version': {
'name': 'version',
'data_validation_type': 'str',
'namespace': 'processed_crash',
'permissions_needed': [],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
},
'date': {
'name': 'date',
'data_validation_type': 'datetime',
'namespace': 'processed_crash',
'permissions_needed': [],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
},
'build_id': {
'name': 'build_id',
'data_validation_type': 'int',
'namespace': 'processed_crash',
'permissions_needed': [],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
},
'process_type': {
'name': 'process_type',
'data_validation_type': 'enum',
'namespace': 'processed_crash',
'permissions_needed': [],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
},
'hang_type': {
'name': 'hang_type',
'data_validation_type': 'enum',
'namespace': 'processed_crash',
'permissions_needed': [],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
},
'user_comments': {
'name': 'user_comments',
'data_validation_type': 'str',
'namespace': 'processed_crash',
'permissions_needed': [],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
},
}
def _get_config_manager():
required_config = Namespace()
webapi = Namespace()
webapi.search_default_date_range = 7
webapi.search_maximum_date_range = 365
required_config.webapi = webapi
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
argv_source=[]
)
return config_manager
class TestSearchBase(TestCase):
def test_get_parameters(self):
with _get_config_manager().context() as config:
search = SearchBase(
config=config,
fields=SUPERSEARCH_FIELDS_MOCKED_RESULTS,
)
args = {
'signature': 'mysig',
'product': 'WaterWolf',
'version': '1.0',
}
params = search.get_parameters(**args)
for i in ('signature', 'product', 'version'):
ok_(i in params)
ok_(isinstance(params[i], list))
ok_(isinstance(params[i][0], SearchParam))
eq_(params[i][0].operator, '')
args = {
'signature': '~js',
'product': ['WaterWolf', 'NightTrain'],
'process_type': '=crash',
}
params = search.get_parameters(**args)
eq_(params['signature'][0].operator, '~')
eq_(params['signature'][0].value, 'js')
eq_(params['product'][0].operator, '')
# Test that params with no operator are stacked
eq_(
params['product'][0].value,
['WaterWolf', 'NightTrain']
)
eq_(params['process_type'][0].operator, '')
args = {
'signature': ['~Mark', '$js'],
}
params = search.get_parameters(**args)
eq_(params['signature'][0].operator, '~')
eq_(params['signature'][0].value, 'Mark')
eq_(params['signature'][1].operator, '$')
eq_(params['signature'][1].value, 'js')
args = {
'build_id': ['>20000101000000', '<20150101000000'],
}
params = search.get_parameters(**args)
eq_(params['build_id'][0].operator, '>')
eq_(params['build_id'][0].value, 20000101000000)
eq_(params['build_id'][1].operator, '<')
eq_(params['build_id'][1].value, 20150101000000)
def test_get_parameters_with_not(self):
with _get_config_manager().context() as config:
search = SearchBase(
config=config,
fields=SUPERSEARCH_FIELDS_MOCKED_RESULTS,
)
args = {
'signature': '!~mysig',
'product': '!WaterWolf',
'version': '1.0',
'user_comments': '!__null__',
}
params = search.get_parameters(**args)
eq_(params['signature'][0].operator, '~')
ok_(params['signature'][0].operator_not)
eq_(params['signature'][0].value, 'mysig')
eq_(params['product'][0].operator, '')
ok_(params['product'][0].operator_not)
eq_(params['version'][0].operator, '')
ok_(not params['version'][0].operator_not)
eq_(params['user_comments'][0].operator, '__null__')
ok_(params['user_comments'][0].operator_not)
def test_get_parameters_date_defaults(self):
with _get_config_manager().context() as config:
search = SearchBase(
config=config,
fields=SUPERSEARCH_FIELDS_MOCKED_RESULTS,
)
now = datetimeutil.utc_now()
# Test default values when nothing is passed
params = search.get_parameters()
ok_('date' in params)
eq_(len(params['date']), 2)
# Pass only the high value
args = {
'date': '<%s' % datetimeutil.date_to_string(now)
}
params = search.get_parameters(**args)
ok_('date' in params)
eq_(len(params['date']), 2)
eq_(params['date'][0].operator, '<')
eq_(params['date'][1].operator, '>=')
eq_(params['date'][0].value.date(), now.date())
eq_(
params['date'][1].value.date(),
now.date() - datetime.timedelta(days=7)
)
# Pass only the low value
pasttime = now - datetime.timedelta(days=10)
args = {
'date': '>=%s' % datetimeutil.date_to_string(pasttime)
}
params = search.get_parameters(**args)
ok_('date' in params)
eq_(len(params['date']), 2)
eq_(params['date'][0].operator, '<=')
eq_(params['date'][1].operator, '>=')
eq_(params['date'][0].value.date(), now.date())
eq_(params['date'][1].value.date(), pasttime.date())
# Pass the two values
pasttime = now - datetime.timedelta(days=10)
args = {
'date': [
'<%s' % datetimeutil.date_to_string(now),
'>%s' % datetimeutil.date_to_string(pasttime),
]
}
params = search.get_parameters(**args)
ok_('date' in params)
eq_(len(params['date']), 2)
eq_(params['date'][0].operator, '<')
eq_(params['date'][1].operator, '>')
eq_(params['date'][0].value.date(), now.date())
eq_(params['date'][1].value.date(), pasttime.date())
def test_get_parameters_date_max_range(self):
with _get_config_manager().context() as config:
search = SearchBase(
config=config,
fields=SUPERSEARCH_FIELDS_MOCKED_RESULTS,
)
assert_raises(
BadArgumentError,
search.get_parameters,
date='>1999-01-01'
)
def test_process_type_parameter_correction(self):
with _get_config_manager().context() as config:
search = SearchBase(
config=config,
fields=SUPERSEARCH_FIELDS_MOCKED_RESULTS,
)
args = {
'process_type': 'browser'
}
params = search.get_parameters(**args)
ok_('process_type' in params)
eq_(len(params['process_type']), 1)
eq_(params['process_type'][0].value, [''])
eq_(params['process_type'][0].operator, '__null__')
eq_(params['process_type'][0].operator_not, False)
def test_hang_type_parameter_correction(self):
with _get_config_manager().context() as config:
search = SearchBase(
config=config,
fields=SUPERSEARCH_FIELDS_MOCKED_RESULTS,
)
args = {
'hang_type': 'hang'
}
params = search.get_parameters(**args)
ok_('hang_type' in params)
eq_(len(params['hang_type']), 1)
eq_(params['hang_type'][0].value, [-1, 1])
args = {
'hang_type': 'crash'
}
params = search.get_parameters(**args)
ok_('hang_type' in params)
eq_(len(params['hang_type']), 1)
eq_(params['hang_type'][0].value, [0])
def test_version_parameter_correction(self):
with _get_config_manager().context() as config:
search = SearchBase(
config=config,
fields=SUPERSEARCH_FIELDS_MOCKED_RESULTS,
)
args = {
'version': ['38.0b']
}
params = search.get_parameters(**args)
ok_('version' in params)
eq_(len(params['version']), 1)
eq_(params['version'][0].value, '38.0b')
eq_(params['version'][0].operator, '$')
ok_(not params['version'][0].operator_not)
args = {
'version': ['1.9b2', '1.9b', '!2.9b', '^.0b']
}
params = search.get_parameters(**args)
ok_('version' in params)
eq_(len(params['version']), 4)
for param in params['version']:
assert param.operator in ('$', '^', '')
if param.operator == '$' and not param.operator_not:
# starts with, this one was made up.
eq_(param.value, '1.9b')
elif param.operator == '$' and param.operator_not:
# starts with, this one was made up.
eq_(param.value, '2.9b')
elif param.operator == '^':
eq_(param.value, '.0b')
elif param.operator == '':
eq_(param.value, ['1.9b2'])
# =============================================================================
class TestSearchCommon(TestCase):
"""Test functions of the search_common module. """
def test_convert_to_type(self):
# Test null
res = convert_to_type(None, 'datetime')
ok_(res is None)
# Test integer
res = convert_to_type(12, 'int')
ok_(isinstance(res, int))
eq_(res, 12)
# Test integer
res = convert_to_type('12', 'int')
ok_(isinstance(res, int))
eq_(res, 12)
# Test string
res = convert_to_type(datetime.datetime(2012, 1, 1), 'str')
ok_(isinstance(res, str))
eq_(res, '2012-01-01 00:00:00')
# Test boolean
res = convert_to_type(1, 'bool')
ok_(isinstance(res, bool))
ok_(res)
# Test boolean
res = convert_to_type('T', 'bool')
ok_(isinstance(res, bool))
ok_(res)
# Test boolean
res = convert_to_type(14, 'bool')
ok_(isinstance(res, bool))
ok_(not res)
# Test datetime
res = convert_to_type('2012-01-01T12:23:34', 'datetime')
ok_(isinstance(res, datetime.datetime))
eq_(res.year, 2012)
eq_(res.month, 1)
eq_(res.hour, 12)
# Test date
res = convert_to_type('2012-01-01T00:00:00', 'date')
ok_(isinstance(res, datetime.date))
eq_(res.year, 2012)
eq_(res.month, 1)
# Test error
assert_raises(ValueError, convert_to_type, 'abds', 'int')
assert_raises(ValueError, convert_to_type, '2013-02-32', 'date')
# -------------------------------------------------------------------------
def test_get_parameters(self):
"""
Test search_common.get_parameters()
"""
# Empty params, only default values are returned
params = get_parameters({})
ok_(params)
for i in params:
typei = type(params[i])
if i in ("from_date", "to_date", "build_from", "build_to"):
ok_(typei is datetime.datetime)
else:
ok_(
not params[i] or
typei is int or
typei is str or
typei is list
)
# Empty params
params = get_parameters({
"terms": "",
"fields": "",
"products": "",
"from_date": "",
"to_date": "",
"versions": "",
"reasons": "",
"release_channels": "",
"os": "",
"search_mode": "",
"build_ids": "",
"report_process": "",
"report_type": "",
"plugin_in": "",
"plugin_search_mode": "",
"plugin_terms": ""
})
assert params, "SearchCommon.get_parameters() returned something " \
"empty or null."
for i in params:
typei = type(params[i])
if i in ("from_date", "to_date", "build_from", "build_to"):
ok_(typei is datetime.datetime)
else:
ok_(
not params[i] or
typei is int or
typei is str or
typei is list
)
# Test with encoded slashes in terms and signature
params = get_parameters({
"terms": ["some", "terms/sig"],
"signature": "my/little/signature"
})
ok_("signature" in params)
ok_("terms" in params)
eq_(params["terms"], ["some", "terms/sig"])
eq_(params["signature"], "my/little/signature")
# -------------------------------------------------------------------------
def test_restrict_fields(self):
"""
Test search_common.restrict_fields()
"""
authorized_fields = ['signature', 'dump']
fields = ["signatute", "signature", "123456sfdgerw&$%#&", "dump",
None, "dump"]
theoric_fields = ["signature", "dump"]
restricted_fields = restrict_fields(fields, authorized_fields)
eq_(restricted_fields, theoric_fields)
fields = []
theoric_fields = ["signature"]
restricted_fields = restrict_fields(fields, authorized_fields)
eq_(restricted_fields, theoric_fields)
fields = None
theoric_fields = ["signature"]
restricted_fields = restrict_fields(fields, authorized_fields)
eq_(restricted_fields, theoric_fields)
fields = ["nothing"]
theoric_fields = ["signature"]
restricted_fields = restrict_fields(fields, authorized_fields)
eq_(restricted_fields, theoric_fields)
assert_raises(ValueError, restrict_fields, fields, [])
assert_raises(TypeError, restrict_fields, fields, None)
| mpl-2.0 |
tod31/pyload | module/plugins/accounts/RPNetBiz.py | 5 | 2680 | # -*- coding: utf-8 -*-
from module.plugins.internal.MultiAccount import MultiAccount
from module.plugins.internal.misc import json
class RPNetBiz(MultiAccount):
__name__ = "RPNetBiz"
__type__ = "account"
__version__ = "0.19"
__status__ = "testing"
__config__ = [("mh_mode" , "all;listed;unlisted", "Filter hosters to use" , "all"),
("mh_list" , "str" , "Hoster list (comma separated)", "" ),
("mh_interval", "int" , "Reload interval in minutes" , 60 )]
__description__ = """RPNet.biz account plugin"""
__license__ = "GPLv3"
__authors__ = [("Dman", "[email protected]")]
def grab_hosters(self, user, password, data):
res = self.load("https://premium.rpnet.biz/client_api.php",
get={'username': user,
'password': password,
'action' : "showHosterList"})
hoster_list = json.loads(res)
#: If account is not valid thera are no hosters available
if 'error' in hoster_list:
return []
#: Extract hosters from json file
return hoster_list['hosters']
def grab_info(self, user, password, data):
#: Get account information from rpnet.biz
res = self.get_account_status(user, password)
try:
if res['accountInfo']['isPremium']:
#: Parse account info. Change the trafficleft later to support per host info.
account_info = {'validuntil': float(res['accountInfo']['premiumExpiry']),
'trafficleft': -1, 'premium': True}
else:
account_info = {'validuntil': None, 'trafficleft': None, 'premium': False}
except KeyError:
#: Handle wrong password exception
account_info = {'validuntil': None, 'trafficleft': None, 'premium': False}
return account_info
def signin(self, user, password, data):
#: Get account information from rpnet.biz
res = self.get_account_status(user, password)
#: If we have an error in the res, we have wrong login information
if 'error' in res:
self.fail_login()
def get_account_status(self, user, password):
#: Using the rpnet API, check if valid premium account
res = self.load("https://premium.rpnet.biz/client_api.php",
get={'username': user, 'password': password,
'action': "showAccountInformation"})
self.log_debug("JSON data: %s" % res)
return json.loads(res)
| gpl-3.0 |
GdZ/scriptfile | software/googleAppEngine/lib/django_1_2/django/contrib/gis/db/backends/oracle/creation.py | 620 | 2283 | from django.db.backends.oracle.creation import DatabaseCreation
from django.db.backends.util import truncate_name
class OracleCreation(DatabaseCreation):
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(OracleCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
output.append(style.SQL_KEYWORD('INSERT INTO ') +
style.SQL_TABLE('USER_SDO_GEOM_METADATA') +
' (%s, %s, %s, %s)\n ' % tuple(map(qn, ['TABLE_NAME', 'COLUMN_NAME', 'DIMINFO', 'SRID'])) +
style.SQL_KEYWORD(' VALUES ') + '(\n ' +
style.SQL_TABLE(gqn(db_table)) + ',\n ' +
style.SQL_FIELD(gqn(f.column)) + ',\n ' +
style.SQL_KEYWORD("MDSYS.SDO_DIM_ARRAY") + '(\n ' +
style.SQL_KEYWORD("MDSYS.SDO_DIM_ELEMENT") +
("('LONG', %s, %s, %s),\n " % (f._extent[0], f._extent[2], f._tolerance)) +
style.SQL_KEYWORD("MDSYS.SDO_DIM_ELEMENT") +
("('LAT', %s, %s, %s)\n ),\n" % (f._extent[1], f._extent[3], f._tolerance)) +
' %s\n );' % f.srid)
if f.spatial_index:
# Getting the index name, Oracle doesn't allow object
# names > 30 characters.
idx_name = truncate_name('%s_%s_id' % (db_table, f.column), 30)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn(idx_name)) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) + '(' +
style.SQL_FIELD(qn(f.column)) + ') ' +
style.SQL_KEYWORD('INDEXTYPE IS ') +
style.SQL_TABLE('MDSYS.SPATIAL_INDEX') + ';')
return output
| mit |
mwaskom/seaborn | doc/tools/generate_logos.py | 2 | 6982 | import numpy as np
import seaborn as sns
from matplotlib import patches
import matplotlib.pyplot as plt
from scipy.signal import gaussian
from scipy.spatial import distance
XY_CACHE = {}
STATIC_DIR = "_static"
plt.rcParams["savefig.dpi"] = 300
def poisson_disc_sample(array_radius, pad_radius, candidates=100, d=2, seed=None):
"""Find positions using poisson-disc sampling."""
# See http://bost.ocks.org/mike/algorithms/
rng = np.random.default_rng(seed)
uniform = rng.uniform
randint = rng.integers
# Cache the results
key = array_radius, pad_radius, seed
if key in XY_CACHE:
return XY_CACHE[key]
# Start at a fixed point we know will work
start = np.zeros(d)
samples = [start]
queue = [start]
while queue:
# Pick a sample to expand from
s_idx = randint(len(queue))
s = queue[s_idx]
for i in range(candidates):
# Generate a candidate from this sample
coords = uniform(s - 2 * pad_radius, s + 2 * pad_radius, d)
# Check the three conditions to accept the candidate
in_array = np.sqrt(np.sum(coords ** 2)) < array_radius
in_ring = np.all(distance.cdist(samples, [coords]) > pad_radius)
if in_array and in_ring:
# Accept the candidate
samples.append(coords)
queue.append(coords)
break
if (i + 1) == candidates:
# We've exhausted the particular sample
queue.pop(s_idx)
samples = np.array(samples)
XY_CACHE[key] = samples
return samples
def logo(
ax,
color_kws, ring, ring_idx, edge,
pdf_means, pdf_sigma, dy, y0, w, h,
hist_mean, hist_sigma, hist_y0, lw, skip,
scatter, pad, scale,
):
# Square, invisible axes with specified limits to center the logo
ax.set(xlim=(35 + w, 95 - w), ylim=(-3, 53))
ax.set_axis_off()
ax.set_aspect('equal')
# Magic numbers for the logo circle
radius = 27
center = 65, 25
# Full x and y grids for a gaussian curve
x = np.arange(101)
y = gaussian(x.size, pdf_sigma)
x0 = 30 # Magic number
xx = x[x0:]
# Vertical distances between the PDF curves
n = len(pdf_means)
dys = np.linspace(0, (n - 1) * dy, n) - (n * dy / 2)
dys -= dys.mean()
# Compute the PDF curves with vertical offsets
pdfs = [h * (y[x0 - m:-m] + y0 + dy) for m, dy in zip(pdf_means, dys)]
# Add in constants to fill from bottom and to top
pdfs.insert(0, np.full(xx.shape, -h))
pdfs.append(np.full(xx.shape, 50 + h))
# Color gradient
colors = sns.cubehelix_palette(n + 1 + bool(hist_mean), **color_kws)
# White fill between curves and around edges
bg = patches.Circle(
center, radius=radius - 1 + ring, color="white",
transform=ax.transData, zorder=0,
)
ax.add_artist(bg)
# Clipping artist (not shown) for the interior elements
fg = patches.Circle(center, radius=radius - edge, transform=ax.transData)
# Ring artist to surround the circle (optional)
if ring:
wedge = patches.Wedge(
center, r=radius + edge / 2, theta1=0, theta2=360, width=edge / 2,
transform=ax.transData, color=colors[ring_idx], alpha=1
)
ax.add_artist(wedge)
# Add histogram bars
if hist_mean:
hist_color = colors.pop(0)
hist_y = gaussian(x.size, hist_sigma)
hist = 1.1 * h * (hist_y[x0 - hist_mean:-hist_mean] + hist_y0)
dx = x[skip] - x[0]
hist_x = xx[::skip]
hist_h = h + hist[::skip]
# Magic number to avoid tiny sliver of bar on edge
use = hist_x < center[0] + radius * .5
bars = ax.bar(
hist_x[use], hist_h[use], bottom=-h, width=dx,
align="edge", color=hist_color, ec="w", lw=lw,
zorder=3,
)
for bar in bars:
bar.set_clip_path(fg)
# Add each smooth PDF "wave"
for i, pdf in enumerate(pdfs[1:], 1):
u = ax.fill_between(xx, pdfs[i - 1] + w, pdf, color=colors[i - 1], lw=0)
u.set_clip_path(fg)
# Add scatterplot in top wave area
if scatter:
seed = sum(map(ord, "seaborn logo"))
xy = poisson_disc_sample(radius - edge - ring, pad, seed=seed)
clearance = distance.cdist(xy + center, np.c_[xx, pdfs[-2]])
use = clearance.min(axis=1) > pad / 1.8
x, y = xy[use].T
sizes = (x - y) % 9
points = ax.scatter(
x + center[0], y + center[1], s=scale * (10 + sizes * 5),
zorder=5, color=colors[-1], ec="w", lw=scale / 2,
)
path = u.get_paths()[0]
points.set_clip_path(path, transform=u.get_transform())
u.set_visible(False)
def savefig(fig, shape, variant):
fig.subplots_adjust(0, 0, 1, 1, 0, 0)
facecolor = (1, 1, 1, 1) if bg == "white" else (1, 1, 1, 0)
for ext in ["png", "svg"]:
fig.savefig(f"{STATIC_DIR}/logo-{shape}-{variant}bg.{ext}", facecolor=facecolor)
if __name__ == "__main__":
for bg in ["white", "light", "dark"]:
color_idx = -1 if bg == "dark" else 0
kwargs = dict(
color_kws=dict(start=.3, rot=-.4, light=.8, dark=.3, reverse=True),
ring=True, ring_idx=color_idx, edge=1,
pdf_means=[8, 24], pdf_sigma=16,
dy=1, y0=1.8, w=.5, h=12,
hist_mean=2, hist_sigma=10, hist_y0=.6, lw=1, skip=6,
scatter=True, pad=1.8, scale=.5,
)
color = sns.cubehelix_palette(**kwargs["color_kws"])[color_idx]
# ------------------------------------------------------------------------ #
fig, ax = plt.subplots(figsize=(2, 2), facecolor="w", dpi=100)
logo(ax, **kwargs)
savefig(fig, "mark", bg)
# ------------------------------------------------------------------------ #
fig, axs = plt.subplots(1, 2, figsize=(8, 2), dpi=100,
gridspec_kw=dict(width_ratios=[1, 3]))
logo(axs[0], **kwargs)
font = {
"family": "avenir",
"color": color,
"weight": "regular",
"size": 120,
}
axs[1].text(.01, .35, "seaborn", ha="left", va="center",
fontdict=font, transform=axs[1].transAxes)
axs[1].set_axis_off()
savefig(fig, "wide", bg)
# ------------------------------------------------------------------------ #
fig, axs = plt.subplots(2, 1, figsize=(2, 2.5), dpi=100,
gridspec_kw=dict(height_ratios=[4, 1]))
logo(axs[0], **kwargs)
font = {
"family": "avenir",
"color": color,
"weight": "regular",
"size": 34,
}
axs[1].text(.5, 1, "seaborn", ha="center", va="top",
fontdict=font, transform=axs[1].transAxes)
axs[1].set_axis_off()
savefig(fig, "tall", bg)
| bsd-3-clause |
omnirom/android_kernel_htc_flounder | scripts/tracing/dma-api/plotting.py | 96 | 4043 | """Ugly graph drawing tools"""
import matplotlib.pyplot as plt
import matplotlib.cm as cmap
#import numpy as np
from matplotlib import cbook
# http://stackoverflow.com/questions/4652439/is-there-a-matplotlib-equivalent-of-matlabs-datacursormode
class DataCursor(object):
"""A simple data cursor widget that displays the x,y location of a
matplotlib artist when it is selected."""
def __init__(self, artists, tolerance=5, offsets=(-20, 20),
template='x: %0.2f\ny: %0.2f', display_all=False):
"""Create the data cursor and connect it to the relevant figure.
"artists" is the matplotlib artist or sequence of artists that will be
selected.
"tolerance" is the radius (in points) that the mouse click must be
within to select the artist.
"offsets" is a tuple of (x,y) offsets in points from the selected
point to the displayed annotation box
"template" is the format string to be used. Note: For compatibility
with older versions of python, this uses the old-style (%)
formatting specification.
"display_all" controls whether more than one annotation box will
be shown if there are multiple axes. Only one will be shown
per-axis, regardless.
"""
self.template = template
self.offsets = offsets
self.display_all = display_all
if not cbook.iterable(artists):
artists = [artists]
self.artists = artists
self.axes = tuple(set(art.axes for art in self.artists))
self.figures = tuple(set(ax.figure for ax in self.axes))
self.annotations = {}
for ax in self.axes:
self.annotations[ax] = self.annotate(ax)
for artist in self.artists:
artist.set_picker(tolerance)
for fig in self.figures:
fig.canvas.mpl_connect('pick_event', self)
def annotate(self, ax):
"""Draws and hides the annotation box for the given axis "ax"."""
annotation = ax.annotate(self.template, xy=(0, 0), ha='right',
xytext=self.offsets, textcoords='offset points', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')
)
annotation.set_visible(False)
return annotation
def __call__(self, event):
"""Intended to be called through "mpl_connect"."""
# Rather than trying to interpolate, just display the clicked coords
# This will only be called if it's within "tolerance", anyway.
x, y = event.mouseevent.xdata, event.mouseevent.ydata
try:
annotation = self.annotations[event.artist.axes]
except KeyError:
return
if x is not None:
if not self.display_all:
# Hide any other annotation boxes...
for ann in self.annotations.values():
ann.set_visible(False)
# Update the annotation in the current axis..
annotation.xy = x, y
annotation.set_text(self.template % (x, y))
annotation.set_visible(True)
event.canvas.draw()
def plotseries(*serieslabels):
"""Plot lists of series in separate axes, tie time axis together"""
global fig
fig, axes = plt.subplots(nrows=len(serieslabels), sharex=True)
for subplot, ax in zip(serieslabels, axes):
for ser, lab in zip(*subplot): # subplot = ([x], [y])
ax.step(ser[0], ser[1], label=lab, where="post")
ax.grid(True)
ax.legend()
(DataCursor(ax.lines))
plt.grid(True)
plt.show()
def disp_pic(bitmap):
"""Display the allocation bitmap. TODO."""
fig=plt.figure()
a=fig.add_subplot(1,1,1)
fig.clf()
implt=plt.imshow(bitmap, extent=(0, len(bitmap[0]), 0, len(bitmap)),
interpolation="nearest", cmap=cmap.gist_heat)
fig.canvas.draw()
plt.show()
| gpl-2.0 |
bjackman/workload-automation | wlauto/workloads/rt_app/__init__.py | 2 | 11959 | # Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import json
import tarfile
from collections import OrderedDict
from subprocess import CalledProcessError
from wlauto import Workload, Parameter, Executable, File
from wlauto.exceptions import WorkloadError, ResourceError
from wlauto.instrumentation import instrument_is_enabled
from wlauto.utils.misc import check_output
RAW_OUTPUT_FILENAME = 'raw-output.txt'
TARBALL_FILENAME = 'rtapp-logs.tar.gz'
BINARY_NAME = 'rt-app'
PACKAGED_USE_CASE_DIRECTORY = os.path.abspath(os.path.join(os.path.dirname(__file__), 'use_cases'))
PLOAD_REGEX = re.compile(r'pLoad = (\d+)(\w+) : calib_cpu (\d+)')
ERROR_REGEX = re.compile(r'error')
CRIT_REGEX = re.compile(r'crit')
class RtApp(Workload):
# pylint: disable=no-member,attribute-defined-outside-init
name = 'rt-app'
description = """
A test application that simulates configurable real-time periodic load.
rt-app is a test application that starts multiple periodic threads in order to
simulate a real-time periodic load. It supports SCHED_OTHER, SCHED_FIFO,
SCHED_RR as well as the AQuoSA framework and SCHED_DEADLINE.
The load is described using JSON-like config files. Below are a couple of simple
examples.
Simple use case which creates a thread that run 1ms then sleep 9ms
until the use case is stopped with Ctrl+C:
.. code-block:: json
{
"tasks" : {
"thread0" : {
"loop" : -1,
"run" : 20000,
"sleep" : 80000
}
},
"global" : {
"duration" : 2,
"calibration" : "CPU0",
"default_policy" : "SCHED_OTHER",
"pi_enabled" : false,
"lock_pages" : false,
"logdir" : "./",
"log_basename" : "rt-app1",
"ftrace" : false,
"gnuplot" : true,
}
}
Simple use case with 2 threads that runs for 10 ms and wake up each
other until the use case is stopped with Ctrl+C
.. code-block:: json
{
"tasks" : {
"thread0" : {
"loop" : -1,
"run" : 10000,
"resume" : "thread1",
"suspend" : "thread0"
},
"thread1" : {
"loop" : -1,
"run" : 10000,
"resume" : "thread0",
"suspend" : "thread1"
}
}
}
Please refer to the existing configs in ``$WA_ROOT/wlauto/workloads/rt_app/use_case``
for more examples.
The version of rt-app currently used with this workload contains enhancements and
modifications done by Linaro. The source code for this version may be obtained here:
http://git.linaro.org/power/rt-app.git
The upstream version of rt-app is hosted here:
https://github.com/scheduler-tools/rt-app
"""
parameters = [
Parameter('config', kind=str, default='taskset',
description='''
Use case configuration file to run with rt-app. This may be
either the name of one of the "standard" configurations included
with the workload. or a path to a custom JSON file provided by
the user. Either way, the ".json" extension is implied and will
be added automatically if not specified in the argument.
The following is the list of standard configurations currently
included with the workload: {}
'''.format(', '.join(os.listdir(PACKAGED_USE_CASE_DIRECTORY)))),
Parameter('duration', kind=int,
description='''
Duration of the workload execution in Seconds. If specified, this
will override the corresponding parameter in the JSON config.
'''),
Parameter('taskset_mask', kind=int,
description='Constrain execution to specific CPUs.'),
Parameter('uninstall_on_exit', kind=bool, default=False,
description="""
If set to ``True``, rt-app binary will be uninstalled from the device
at the end of the run.
"""),
Parameter('force_install', kind=bool, default=False,
description="""
If set to ``True``, rt-app binary will always be deployed to the
target device at the beginning of the run, regardless of whether it
was already installed there.
"""),
]
def initialize(self, context):
# initialize() runs once per run. setting a class variable to make it
# available to other instances of the workload
RtApp.device_working_directory = self.device.path.join(self.device.working_directory,
'rt-app-working')
RtApp.host_binary = context.resolver.get(Executable(self,
self.device.abi,
BINARY_NAME), strict=False)
RtApp.workgen_script = context.resolver.get(File(self, 'workgen'))
if not self.device.is_rooted: # some use cases require root privileges
raise WorkloadError('rt-app requires the device to be rooted.')
self.device.execute('mkdir -p {}'.format(self.device_working_directory))
self._deploy_rt_app_binary_if_necessary()
def setup(self, context):
self.log_basename = context.spec.label
self.host_json_config = self._load_json_config(context)
self.config_file_on_device = self.device.path.join(self.device_working_directory,
os.path.basename(self.host_json_config))
self.device.push_file(self.host_json_config, self.config_file_on_device, timeout=60)
self.command = '{} {}'.format(self.device_binary, self.config_file_on_device)
time_buffer = 30
self.timeout = self.duration + time_buffer
def run(self, context):
self.output = self.device.invoke(self.command,
on_cpus=self.taskset_mask,
timeout=self.timeout,
as_root=True)
def update_result(self, context):
self._pull_rt_app_logs(context)
context.result.classifiers.update(dict(
duration=self.duration,
task_count=self.task_count,
))
outfile = os.path.join(context.output_directory, RAW_OUTPUT_FILENAME)
with open(outfile, 'w') as wfh:
wfh.write(self.output)
error_count = 0
crit_count = 0
for line in self.output.split('\n'):
match = PLOAD_REGEX.search(line)
if match:
pload_value = match.group(1)
pload_unit = match.group(2)
calib_cpu_value = match.group(3)
context.result.add_metric('pLoad', float(pload_value), pload_unit)
context.result.add_metric('calib_cpu', float(calib_cpu_value))
error_match = ERROR_REGEX.search(line)
if error_match:
error_count += 1
crit_match = CRIT_REGEX.search(line)
if crit_match:
crit_count += 1
context.result.add_metric('error_count', error_count, 'count')
context.result.add_metric('crit_count', crit_count, 'count')
def finalize(self, context):
if self.uninstall_on_exit:
self.device.uninstall(self.device_binary)
self.device.execute('rm -rf {}'.format(self.device_working_directory))
def _deploy_rt_app_binary_if_necessary(self):
# called from initialize() so gets invoked once per run
RtApp.device_binary = self.device.get_binary_path("rt-app")
if self.force_install or not RtApp.device_binary:
if not self.host_binary:
message = '''rt-app is not installed on the device and could not be
found in workload resources'''
raise ResourceError(message)
RtApp.device_binary = self.device.install(self.host_binary)
def _load_json_config(self, context):
user_config_file = self._get_raw_json_config(context.resolver)
config_file = self._generate_workgen_config(user_config_file,
context.output_directory)
with open(config_file) as fh:
config_data = json.load(fh, object_pairs_hook=OrderedDict)
self._update_rt_app_config(config_data)
self.duration = config_data['global'].get('duration', 0)
self.task_count = len(config_data.get('tasks', []))
with open(config_file, 'w') as wfh:
json.dump(config_data, wfh, indent=4)
return config_file
def _get_raw_json_config(self, resolver):
if os.path.splitext(self.config)[1] != '.json':
self.config += '.json'
if os.path.isfile(self.config):
return os.path.abspath(self.config)
partial_path = os.path.join('use_cases', self.config)
return resolver.get(File(self, partial_path))
def _generate_workgen_config(self, user_file, output_directory):
output_file = os.path.join(output_directory, 'unkind.json')
# use workgen dry run option to generate a use case
# file with proper JSON grammar on host first
try:
check_output('python {} -d -o {} {}'.format(self.workgen_script,
output_file,
user_file),
shell=True)
except CalledProcessError as e:
message = 'Could not generate config using workgen, got "{}"'
raise WorkloadError(message.format(e))
return output_file
def _update_rt_app_config(self, config_data):
config_data['global'] = config_data.get('global', {})
config_data['global']['logdir'] = self.device_working_directory
config_data['global']['log_basename'] = self.log_basename
if self.duration is not None:
config_data['global']['duration'] = self.duration
def _pull_rt_app_logs(self, context):
tar_command = '{} tar czf {}/{} -C {} .'.format(self.device.busybox,
self.device_working_directory,
TARBALL_FILENAME,
self.device_working_directory)
self.device.execute(tar_command, timeout=300)
device_path = self.device.path.join(self.device_working_directory, TARBALL_FILENAME)
host_path = os.path.join(context.output_directory, TARBALL_FILENAME)
self.device.pull_file(device_path, host_path, timeout=120)
with tarfile.open(host_path, 'r:gz') as tf:
tf.extractall(context.output_directory)
os.remove(host_path)
self.device.execute('rm -rf {}/*'.format(self.device_working_directory))
| apache-2.0 |
manderson23/NewsBlur | apps/rss_feeds/icon_importer.py | 2 | 14448 | import urllib2
import lxml.html
import numpy
import scipy
import scipy.misc
import scipy.cluster
import urlparse
import struct
import operator
import gzip
import datetime
import requests
import httplib
from PIL import BmpImagePlugin, PngImagePlugin, Image
from socket import error as SocketError
from boto.s3.key import Key
from StringIO import StringIO
from django.conf import settings
from apps.rss_feeds.models import MFeedPage, MFeedIcon
from utils.facebook_fetcher import FacebookFetcher
from utils import log as logging
from utils.feed_functions import timelimit, TimeoutError
from OpenSSL.SSL import Error as OpenSSLError
from pyasn1.error import PyAsn1Error
from requests.packages.urllib3.exceptions import LocationParseError
class IconImporter(object):
def __init__(self, feed, page_data=None, force=False):
self.feed = feed
self.force = force
self.page_data = page_data
self.feed_icon = MFeedIcon.get_feed(feed_id=self.feed.pk)
def save(self):
if not self.force and self.feed.favicon_not_found:
# print 'Not found, skipping...'
return
if (
not self.force
and not self.feed.favicon_not_found
and self.feed_icon.icon_url
and self.feed.s3_icon
):
# print 'Found, but skipping...'
return
if 'facebook.com' in self.feed.feed_address:
image, image_file, icon_url = self.fetch_facebook_image()
else:
image, image_file, icon_url = self.fetch_image_from_page_data()
if not image:
image, image_file, icon_url = self.fetch_image_from_path(force=self.force)
if image:
image = self.normalize_image(image)
try:
color = self.determine_dominant_color_in_image(image)
except IndexError:
return
try:
image_str = self.string_from_image(image)
except TypeError:
return
if len(image_str) > 500000:
image = None
if (image and
(self.force or
self.feed_icon.data != image_str or
self.feed_icon.icon_url != icon_url or
self.feed_icon.not_found or
(settings.BACKED_BY_AWS.get('icons_on_s3') and not self.feed.s3_icon))):
logging.debug(" ---> [%-30s] ~SN~FBIcon difference:~FY color:%s (%s/%s) data:%s url:%s notfound:%s no-s3:%s" % (
self.feed.log_title[:30],
self.feed_icon.color != color, self.feed_icon.color, color,
self.feed_icon.data != image_str,
self.feed_icon.icon_url != icon_url,
self.feed_icon.not_found,
settings.BACKED_BY_AWS.get('icons_on_s3') and not self.feed.s3_icon))
self.feed_icon.data = image_str
self.feed_icon.icon_url = icon_url
self.feed_icon.color = color
self.feed_icon.not_found = False
self.feed_icon.save()
if settings.BACKED_BY_AWS.get('icons_on_s3'):
self.save_to_s3(image_str)
if self.feed.favicon_color != color:
self.feed.favicon_color = color
self.feed.favicon_not_found = False
self.feed.save(update_fields=['favicon_color', 'favicon_not_found'])
if not image:
self.feed_icon.not_found = True
self.feed_icon.save()
self.feed.favicon_not_found = True
self.feed.save()
return not self.feed.favicon_not_found
def save_to_s3(self, image_str):
expires = datetime.datetime.now() + datetime.timedelta(days=60)
expires = expires.strftime("%a, %d %b %Y %H:%M:%S GMT")
k = Key(settings.S3_CONN.get_bucket(settings.S3_ICONS_BUCKET_NAME))
k.key = self.feed.s3_icons_key
k.set_metadata('Content-Type', 'image/png')
k.set_metadata('Expires', expires)
k.set_contents_from_string(image_str.decode('base64'))
k.set_acl('public-read')
self.feed.s3_icon = True
self.feed.save()
def load_icon(self, image_file, index=None):
'''
DEPRECATED
Load Windows ICO image.
See http://en.wikipedia.org/w/index.php?oldid=264332061 for file format
description.
Cribbed and modified from http://djangosnippets.org/snippets/1287/
'''
try:
image_file.seek(0)
header = struct.unpack('<3H', image_file.read(6))
except Exception, e:
return
# Check magic
if header[:2] != (0, 1):
return
# Collect icon directories
directories = []
for i in xrange(header[2]):
directory = list(struct.unpack('<4B2H2I', image_file.read(16)))
for j in xrange(3):
if not directory[j]:
directory[j] = 256
directories.append(directory)
if index is None:
# Select best icon
directory = max(directories, key=operator.itemgetter(slice(0, 3)))
else:
directory = directories[index]
# Seek to the bitmap data
image_file.seek(directory[7])
prefix = image_file.read(16)
image_file.seek(-16, 1)
if PngImagePlugin._accept(prefix):
# Windows Vista icon with PNG inside
try:
image = PngImagePlugin.PngImageFile(image_file)
except IOError:
return
else:
# Load XOR bitmap
try:
image = BmpImagePlugin.DibImageFile(image_file)
except IOError:
return
if image.mode == 'RGBA':
# Windows XP 32-bit color depth icon without AND bitmap
pass
else:
# Patch up the bitmap height
image.size = image.size[0], image.size[1] >> 1
d, e, o, a = image.tile[0]
image.tile[0] = d, (0, 0) + image.size, o, a
# Calculate AND bitmap dimensions. See
# http://en.wikipedia.org/w/index.php?oldid=264236948#Pixel_storage
# for description
offset = o + a[1] * image.size[1]
stride = ((image.size[0] + 31) >> 5) << 2
size = stride * image.size[1]
# Load AND bitmap
image_file.seek(offset)
string = image_file.read(size)
mask = Image.frombytes('1', image.size, string, 'raw',
('1;I', stride, -1))
image = image.convert('RGBA')
image.putalpha(mask)
return image
def fetch_image_from_page_data(self):
image = None
image_file = None
if self.page_data:
content = self.page_data
elif settings.BACKED_BY_AWS.get('pages_on_s3') and self.feed.s3_page:
key = settings.S3_CONN.get_bucket(settings.S3_PAGES_BUCKET_NAME).get_key(self.feed.s3_pages_key)
compressed_content = key.get_contents_as_string()
stream = StringIO(compressed_content)
gz = gzip.GzipFile(fileobj=stream)
try:
content = gz.read()
except IOError:
content = None
else:
content = MFeedPage.get_data(feed_id=self.feed.pk)
url = self._url_from_html(content)
if not url:
try:
content = requests.get(self.cleaned_feed_link).content
url = self._url_from_html(content)
except (AttributeError, SocketError, requests.ConnectionError,
requests.models.MissingSchema, requests.sessions.InvalidSchema,
requests.sessions.TooManyRedirects,
requests.models.InvalidURL,
requests.models.ChunkedEncodingError,
requests.models.ContentDecodingError,
httplib.IncompleteRead,
LocationParseError, OpenSSLError, PyAsn1Error,
ValueError), e:
logging.debug(" ---> ~SN~FRFailed~FY to fetch ~FGfeed icon~FY: %s" % e)
if url:
image, image_file = self.get_image_from_url(url)
return image, image_file, url
@property
def cleaned_feed_link(self):
if self.feed.feed_link.startswith('http'):
return self.feed.feed_link
return 'http://' + self.feed.feed_link
def fetch_image_from_path(self, path='favicon.ico', force=False):
image = None
url = None
if not force:
url = self.feed_icon.icon_url
if not url and self.feed.feed_link and len(self.feed.feed_link) > 6:
try:
url = urlparse.urljoin(self.feed.feed_link, 'favicon.ico')
except ValueError:
url = None
if not url:
return None, None, None
image, image_file = self.get_image_from_url(url)
if not image:
url = urlparse.urljoin(self.feed.feed_link, '/favicon.ico')
image, image_file = self.get_image_from_url(url)
# print 'Found: %s - %s' % (url, image)
return image, image_file, url
def fetch_facebook_image(self):
facebook_fetcher = FacebookFetcher(self.feed)
url = facebook_fetcher.favicon_url()
image, image_file = self.get_image_from_url(url)
if not image:
url = urlparse.urljoin(self.feed.feed_link, '/favicon.ico')
image, image_file = self.get_image_from_url(url)
# print 'Found: %s - %s' % (url, image)
return image, image_file, url
def get_image_from_url(self, url):
# print 'Requesting: %s' % url
if not url:
return None, None
@timelimit(30)
def _1(url):
headers = {
'User-Agent': 'NewsBlur Favicon Fetcher - %s subscriber%s - %s '
'(Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_1) '
'AppleWebKit/534.48.3 (KHTML, like Gecko) Version/5.1 '
'Safari/534.48.3)' %
(
self.feed.num_subscribers,
's' if self.feed.num_subscribers != 1 else '',
self.feed.permalink
),
'Connection': 'close',
'Accept': 'image/png,image/x-icon,image/*;q=0.9,*/*;q=0.8'
}
try:
request = urllib2.Request(url, headers=headers)
icon = urllib2.urlopen(request).read()
except Exception:
return None
return icon
try:
icon = _1(url)
except TimeoutError:
return None, None
try:
icon_file = StringIO(icon)
image = Image.open(icon_file)
except (IOError, ValueError):
return None, None
return image, icon_file
def _url_from_html(self, content):
url = None
if not content:
return url
try:
if isinstance(content, unicode):
content = content.encode('utf-8')
icon_path = lxml.html.fromstring(content).xpath(
'//link[@rel="icon" or @rel="shortcut icon"]/@href'
)
except (lxml.etree.ParserError, TypeError):
return url
if icon_path:
if str(icon_path[0]).startswith('http'):
url = icon_path[0]
else:
url = urlparse.urljoin(self.feed.feed_link, icon_path[0])
return url
def normalize_image(self, image):
# if image.size != (16, 16):
# image = image.resize((16, 16), Image.BICUBIC)
if image.mode != 'RGBA':
try:
image = image.convert('RGBA')
except IOError:
pass
return image
def determine_dominant_color_in_image(self, image):
NUM_CLUSTERS = 5
# Convert image into array of values for each point.
if image.mode == '1':
image.convert('L')
ar = numpy.array(image)
# ar = scipy.misc.fromimage(image)
shape = ar.shape
# Reshape array of values to merge color bands. [[R], [G], [B], [A]] => [R, G, B, A]
if len(shape) > 2:
ar = ar.reshape(scipy.product(shape[:2]), shape[2])
# Get NUM_CLUSTERS worth of centroids.
ar = ar.astype(numpy.float)
codes, _ = scipy.cluster.vq.kmeans(ar, NUM_CLUSTERS)
# Pare centroids, removing blacks and whites and shades of really dark and really light.
original_codes = codes
for low, hi in [(60, 200), (35, 230), (10, 250)]:
codes = scipy.array([code for code in codes
if not ((code[0] < low and code[1] < low and code[2] < low) or
(code[0] > hi and code[1] > hi and code[2] > hi))])
if not len(codes):
codes = original_codes
else:
break
# Assign codes (vector quantization). Each vector is compared to the centroids
# and assigned the nearest one.
vecs, _ = scipy.cluster.vq.vq(ar, codes)
# Count occurences of each clustered vector.
counts, bins = scipy.histogram(vecs, len(codes))
# Show colors for each code in its hex value.
# colors = [''.join(chr(c) for c in code).encode('hex') for code in codes]
# total = scipy.sum(counts)
# print dict(zip(colors, [count/float(total) for count in counts]))
# Find the most frequent color, based on the counts.
index_max = scipy.argmax(counts)
peak = codes.astype(int)[index_max]
color = ''.join(chr(c) for c in peak).encode('hex')
return color[:6]
def string_from_image(self, image):
output = StringIO()
image.save(output, 'png', quality=95)
contents = output.getvalue()
output.close()
return contents.encode('base64')
| mit |
tcheehow/MissionPlanner | Lib/dircache.py | 67 | 1167 | """Read and cache directory listings.
The listdir() routine returns a sorted list of the files in a directory,
using a cache to avoid reading the directory more often than necessary.
The annotate() routine appends slashes to directories."""
from warnings import warnpy3k
warnpy3k("the dircache module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import os
__all__ = ["listdir", "opendir", "annotate", "reset"]
cache = {}
def reset():
"""Reset the cache completely."""
global cache
cache = {}
def listdir(path):
"""List directory contents, using cache."""
try:
cached_mtime, list = cache[path]
del cache[path]
except KeyError:
cached_mtime, list = -1, []
mtime = os.stat(path).st_mtime
if mtime != cached_mtime:
list = os.listdir(path)
list.sort()
cache[path] = mtime, list
return list
opendir = listdir # XXX backward compatibility
def annotate(head, list):
"""Add '/' suffixes to directories."""
for i in range(len(list)):
if os.path.isdir(os.path.join(head, list[i])):
list[i] = list[i] + '/'
| gpl-3.0 |
alizamus/pox_controller | pox/lib/recoco/recoco.py | 38 | 24045 | # Copyright 2011-2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from collections import deque
from Queue import PriorityQueue
from Queue import Queue
import time
import threading
from threading import Thread
import select
import traceback
import os
import socket
import pox.lib.util
import random
from pox.lib.epoll_select import EpollSelect
CYCLE_MAXIMUM = 2
# A ReturnFunction can return this to skip a scheduled slice at the last
# moment.
ABORT = object()
defaultScheduler = None
nextTaskID = 0
def generateTaskID ():
global nextTaskID
nextTaskID += 1
return nextTaskID
class BaseTask (object):
id = None
#running = False
priority = 1
@classmethod
def new (cls, *args, **kw):
"""
Creates a task and starts it on the default scheduler with the
default priority.
"""
o = cls(*args, **kw)
o.start(fast=True)
return o
def __init__ (self, *args, **kw):
#NOTE: keep in sync with Task.__init__ !
# (better yet, refactor)
self.id = generateTaskID()
self.gen = self.run(*args, **kw)
self.rv = None
self.rf = None # ReturnFunc
def start (self, scheduler = None, priority = None, fast = False):
"""
Schedules this task.
See Scheduler.schedule() and Scheduler.fast_schedule() for the meaning
of the 'fast' argument.
"""
if scheduler is None: scheduler = defaultScheduler
if priority != None: self.priority = priority
if fast:
scheduler.fast_schedule(self)
else:
scheduler.schedule(self)
def execute (self):
if self.rf is not None:
v = self.rf(self)
self.rf = None
self.rv = None
if v == ABORT:
return False
else:
v = self.rv
self.rv = None
return self.gen.send(v)
def run (self):
print("Dummy task")
yield 0
class Task (BaseTask):
"""
Provides an interface close to threading.Thread
"""
def __init__ (self, group=None, target=None, name=None, args=(), kwargs={}):
#NOTE: keep in sync with BaseTask.__init__ !
# (better yet, refactor)
assert(group == None) # Not supported
self.id = generateTaskID()
self.rv = None
self.name = name
if name == None: self.name = str(self.id)
self.target = target
self.args = args
self.kwargs = kwargs
self.gen = self.run(*args, **kwargs)
BaseTask.__init__(self)
def run (self):
g = self.target(*self.args, **self.kwargs)
g.next()
while True:
g.send((yield))
def __str__ (self):
return "<" + self.__class__.__name__ + "/tid" + str(self.name) + ">"
class Scheduler (object):
""" Scheduler for Tasks """
def __init__ (self, isDefaultScheduler = None, startInThread = True,
daemon = False, useEpoll=False):
self._ready = deque()
self._hasQuit = False
self._selectHub = SelectHub(self, useEpoll=useEpoll)
self._thread = None
self._event = threading.Event()
self._lock = threading.Lock()
self._callLaterTask = None
self._allDone = False
global defaultScheduler
if isDefaultScheduler or (isDefaultScheduler is None and
defaultScheduler is None):
defaultScheduler = self
if startInThread:
self.runThreaded(daemon)
def __del__ (self):
self._hasQuit = True
super(Scheduler, self).__del__()
def callLater (self, func, *args, **kw):
"""
Calls func with the given arguments at some later point, within this
scheduler. This is a good way for another thread to call something in
a co-op-thread-safe manner.
"""
with self._lock:
if self._callLaterTask is None:
self._callLaterTask = CallLaterTask()
self._callLaterTask.start()
self._callLaterTask.callLater(func, *args, **kw)
def runThreaded (self, daemon = False):
self._thread = Thread(target = self.run)
self._thread.daemon = daemon
self._thread.start()
def synchronized (self):
return Synchronizer(self)
def schedule (self, task, first = False):
"""
Schedule the given task to run later.
If first is True, the task will be the next to run.
Unlike fast_schedule(), this method will not schedule a task to run
multiple times. The one exception is if a Task actually schedules
itself. The easiest way to avoid this is simply not to do it.
See fast_schedule() and ScheduleTask for more info.
"""
if threading.current_thread() is self._thread:
# We're know we're good.
#TODO: Refactor the following with ScheduleTask
if task in self._ready:
# It might make sense to keep a flag on the task, since checking
# if it's in the ready list is not very efficient.
# Not sure if it makes sense to print out a message here or not.
import logging
logging.getLogger("recoco").info("Task %s scheduled multiple " +
"times", task)
return False
self.fast_schedule(task, first)
return True
st = ScheduleTask(self, task)
st.start(fast=True)
def fast_schedule (self, task, first = False):
"""
Schedule the given task to run later.
If first is True, the task will be the next to run.
This method does not protect you from scheduling the same Task more
than once, which you probably really don't want to do.
If you are scheduling an existing Task (waking it) from another Task,
you should either implement your own logic to ensure that you don't
schedule it multiple times, or you should just use schedule().
If you are scheduling an existing Task (waking it) from any thread
besides the one the scheduler is running on, there's a race condition
which makes it nontrivial to ensure that multiple schedulings never
happen, and you should just use schedule() for such Tasks.
If you are scheduling a new Task that you just created, this method
is always safe.
"""
# Sanity check. Won't catch all cases.
assert task not in self._ready
if first:
self._ready.appendleft(task)
else:
self._ready.append(task)
self._event.set()
def quit (self):
self._hasQuit = True
def run (self):
try:
while self._hasQuit == False:
if len(self._ready) == 0:
self._event.wait(CYCLE_MAXIMUM) # Wait for a while
self._event.clear()
if self._hasQuit: break
r = self.cycle()
finally:
#print("Scheduler done")
self._hasQuit = True
self._selectHub._cycle()
self._allDone = True
def cycle (self):
#if len(self._ready) == 0: return False
# Patented hilarious priority system
#TODO: Replace it with something better
t = None
try:
while True:
t = self._ready.popleft()
if t.priority >= 1: break
if len(self._ready) == 0: break
if t.priority >= random.random(): break
self._ready.append(t)
except IndexError:
return False
#print(len(self._ready), "tasks")
try:
rv = t.execute()
except StopIteration:
return True
except:
try:
print("Task", t, "caused exception and was de-scheduled")
traceback.print_exc()
except:
pass
return True
if isinstance(rv, BlockingOperation):
try:
rv.execute(t, self)
except:
print("Task", t, "caused exception during a blocking operation and " +
"was de-scheduled")
traceback.print_exc()
elif rv is False:
# Just unschedule/sleep
#print "Unschedule", t, rv
pass
elif type(rv) == int or type(rv) == long or type(rv) == float:
# Sleep time
if rv == 0:
#print "sleep 0"
self._ready.append(t)
else:
self._selectHub.registerTimer(t, rv)
elif rv == None:
raise RuntimeError("Must yield a value!")
return True
#TODO: Read() and Write() BlockingOperations that use nonblocking sockets with
# SelectHub and do post-processing of the return value.
class BlockingOperation (object):
"""
A base class for what can be thought of as syscalls for Tasks.
The separation between __init__ and execute may seem sort of artificial, but
it serves an actual purpose, which is that it makes it impossible for a task
to accidentally start to make a syscall (by instantiating a BlockingOperation)
without actually yielding.
"""
def __init__ (self):
""" When the syscall is made by a task, this is executed """
pass
def execute (self, task, scheduler):
""" Scheduler calls this to actually execute the syscall """
pass
class CallBlocking (BlockingOperation):
"""
Syscall that calls an actual blocking operation (like a real .recv()).
In order to keep from blocking, it calls it on another thread.
The return value is (ret_val, exc_info), one of which is always None.
"""
@classmethod
def new (_cls, _func, *_args, **_kw):
return _cls(_func, *_args, **_kw)
def __init__ (self, func, args=(), kw={}):
self.t = None
self.scheduler = None
self.task = None
self.func = func
self.args = args
self.kw = kw
def _proc (self):
try:
self.task.rv = (self.func(*self.args, **self.kw), None)
except:
import sys
self.task.rv = (None, sys.exc_info())
self.scheduler.fast_schedule(self.task)
def execute (self, task, scheduler):
self.task = task
self.scheduler = scheduler
#NOTE: It might be nice to use a pool here
self.t = threading.Thread(target=self._proc)
#pool.add(self._proc)
self.t.daemon = True
self.t.start()
class Exit (BlockingOperation):
"""
Syscall that kills the scheduler
"""
def __init__ (self):
pass
def execute (self, task, scheduler):
scheduler.quit()
class Sleep (BlockingOperation):
"""
Sleep for specified amount of time (seconds)
None means unscheduler (i.e., sleep until an outside force wakes it)
0 means reschedule for later (no additional time)
"""
def __init__ (self, timeToWake = None, absoluteTime = False):
if absoluteTime == False and timeToWake != None: timeToWake += time.time()
self._t = timeToWake
def execute (self, task, scheduler):
if self._t is None:
# Just unschedule
return
if self._t is 0 or self._t < time.time():
# Just reschedule
scheduler.fast_schedule(task)
return
scheduler._selectHub.registerTimer(task, self._t, True) # A bit ugly
class Select (BlockingOperation):
"""
Should be very similar to Python select.select()
"""
def __init__ (self, *args, **kw):
self._args = args
self._kw = kw
def execute (self, task, scheduler):
scheduler._selectHub.registerSelect(task, *self._args, **self._kw)
defaultRecvFlags = 0
try:
defaultRecvFlags = socket.MSG_DONTWAIT
except:
pass
class Recv (BlockingOperation):
def __init__ (self, fd, bufsize = 1024*8, flags = defaultRecvFlags,
timeout = None):
"""
Recv call on fd.
"""
self._fd = fd
self._length = bufsize
self._timeout = timeout
self._flags = flags
def _recvReturnFunc (self, task):
# Select() will have placed file descriptors in rv
if len(task.rv[2]) != 0 or len(task.rv[0]) == 0:
# Socket error
task.rv = None
return None
sock = task.rv[0][0]
task.rv = None
try:
return sock.recv(self._length, self._flags)
except:
traceback.print_exc()
return None #
def execute (self, task, scheduler):
task.rf = self._recvReturnFunc
scheduler._selectHub.registerSelect(task, [self._fd], None, [self._fd],
timeout=self._timeout)
class Send (BlockingOperation):
def __init__ (self, fd, data):
self._fd = fd
self._data = data
self._sent = 0
self._scheduler = None
def _sendReturnFunc (self, task):
# Select() will have placed file descriptors in rv
sock = task.rv[1]
if len(task.rv[2]) != 0:
# Socket error
task.rv = None
return self._sent
task.rv = None
try:
if len(self._data) > 1024:
data = self._data[:1024]
self._data = self._data[1024:]
l = sock.send(data, flags = socket.MSG_DONTWAIT)
self._sent += l
if l == len(data) and len(self._data) == 0:
return self._sent
self._data = data[l:] + self._data
except:
pass
# Still have data to send...
self.execute(task, self._scheduler)
return ABORT
def execute (self, task, scheduler):
self._scheduler = scheduler
task.rf = self._sendReturnFunc
scheduler._selectHub.registerSelect(task, None, [self._fd], [self._fd])
#TODO: just merge this in with Scheduler?
class SelectHub (object):
"""
This class is a single select() loop that handles all Select() requests for
a scheduler as well as timed wakes (i.e., Sleep()).
"""
def __init__ (self, scheduler, useEpoll=False):
# We store tuples of (elapse-time, task)
self._sleepers = [] # Sleeping items stored as a heap
self._incoming = Queue() # Threadsafe queue for new items
self._scheduler = scheduler
self._pinger = pox.lib.util.makePinger()
self.epoll = EpollSelect() if useEpoll else None
self._ready = False
self._thread = Thread(target = self._threadProc)
self._thread.daemon = True
self._thread.start()
# Ugly busy wait for initialization
#while self._ready == False:
def _threadProc (self):
tasks = {}
timeouts = []
rets = {}
while self._scheduler._hasQuit == False:
#print("SelectHub cycle")
if len(timeouts) == 0:
timeout = None
else:
timeout = self._sleepers[0][0] - time.time()
if timeout < 0: timeout = 0
#NOTE: Everything you select on eventually boils down to file descriptors,
# which are unique, obviously. It might be possible to leverage this
# to reduce hashing cost (i.e. by picking a really good hashing
# function), though this is complicated by wrappers, etc...
rl = {}
wl = {}
xl = {}
timeout = None
timeoutTask = None
now = time.time()
expired = None
for t,trl,twl,txl,tto in tasks.itervalues():
if tto != None:
if tto <= now:
# Already expired
if expired is None: expired = []
expired.append(t)
if tto-now > 0.1: print("preexpired",tto,now,tto-now)
continue
tt = tto - now
if tt < timeout or timeout is None:
timeout = tt
timeoutTask = t
if trl:
for i in trl: rl[i] = t
if twl:
for i in twl: wl[i] = t
if txl:
for i in txl: xl[i] = t
if expired:
for t in expired:
del tasks[t]
self._return(t, ([],[],[]))
if timeout is None: timeout = CYCLE_MAXIMUM
if self.epoll:
ro, wo, xo = self.epoll.select( rl.keys() + [self._pinger],
wl.keys(),
xl.keys(), timeout )
else:
ro, wo, xo = select.select( rl.keys() + [self._pinger],
wl.keys(),
xl.keys(), timeout )
if len(ro) == 0 and len(wo) == 0 and len(xo) == 0 and timeoutTask != None:
# IO is idle - dispatch timers / release timeouts
del tasks[timeoutTask]
self._return(timeoutTask, ([],[],[]))
else:
# We have IO events
if self._pinger in ro:
self._pinger.pongAll()
while not self._incoming.empty():
stuff = self._incoming.get(True)
task = stuff[0]
assert task not in tasks
tasks[task] = stuff
self._incoming.task_done()
if len(ro) == 1 and len(wo) == 0 and len(xo) == 0:
# Just recycle
continue
ro.remove(self._pinger)
# At least one thread is going to be resumed
for i in ro:
task = rl[i]
if task not in rets: rets[task] = ([],[],[])
rets[task][0].append(i)
for i in wo:
task = wl[i]
if task not in rets: rets[task] = ([],[],[])
rets[task][1].append(i)
for i in xo:
task = xl[i]
if task not in rets: rets[task] = ([],[],[])
rets[task][2].append(i)
for t,v in rets.iteritems():
del tasks[t]
self._return(t, v)
rets.clear()
def registerSelect (self, task, rlist = None, wlist = None, xlist = None,
timeout = None, timeIsAbsolute = False):
if not timeIsAbsolute:
if timeout != None:
timeout += time.time()
self._incoming.put((task, rlist, wlist, xlist, timeout))
self._cycle()
def _cycle (self):
"""
Cycle the wait thread so that new timers or FDs can be picked up
"""
self._pinger.ping()
def registerTimer (self, task, timeToWake, timeIsAbsolute = False):
"""
Register a task to be wakened up interval units in the future.
It means timeToWake seconds in the future if absoluteTime is False.
"""
return self.registerSelect(task, None, None, None, timeToWake,
timeIsAbsolute)
def _return (self, sleepingTask, returnVal):
#print("reschedule", sleepingTask)
sleepingTask.rv = returnVal
self._scheduler.fast_schedule(sleepingTask)
class ScheduleTask (BaseTask):
"""
If multiple real threads (such as a recoco scheduler thread and any
other thread, or any two other threads) try to schedule ("wake") the
same Task with Scheduler.fast_schedule(), there is a race condition where
the Task may get scheduled multiple times, which is probably quite bad.
Scheduler.schedule() fixes this by creating one of these ScheduleTasks,
and it's this ScheduleTask that actually calls fast_schedule(). This
way, the Task is only ever *really* scheduled from the scheduler thread
and the race condition doesn't exist.
"""
def __init__ (self, scheduler, task):
BaseTask.__init__(self)
self._scheduler = scheduler
self._task = task
def run (self):
#TODO: Refactor the following, since it is copy/pasted from schedule().
if self._task in self._scheduler._ready:
# It might make sense to keep a flag on the task, since checking
# if it's in the ready list is not very efficient.
# Not sure if it makes sense to print out a message here or not.
import logging
logging.getLogger("recoco").info("Task %s scheduled multiple " +
"times", self._task)
else:
self._scheduler.fast_schedule(self._task, True)
yield False
class SyncTask (BaseTask):
def __init__ (self, *args, **kw):
BaseTask.__init__(self)
self.inlock = threading.Lock()
self.outlock = threading.Lock()
self.inlock.acquire()
self.outlock.acquire()
def run (self):
self.inlock.release()
self.outlock.acquire()
class Synchronizer (object):
def __init__ (self, scheduler = None):
if scheduler is None:
scheduler = defaultScheduler
self.scheduler = scheduler
self.syncer = None
self.enter = 0
def __enter__ (self):
self.enter += 1
if self.enter == 1:
self.syncer = SyncTask()
self.syncer.start(self.scheduler) #NOTE: maybe add it to head of list?
self.syncer.inlock.acquire()
return self.syncer
def __exit__ (self, type_, value, traceback):
self.enter -= 1
if self.enter == 0:
self.syncer.outlock.release()
class Timer (Task):
"""
A simple timer.
timeToWake Amount of time to wait before calling callback (seconds)
callback Some callable to be called when the timer expires
absoluteTime A specific time to fire (as from time.time())
recurring Whether to call repeatedly or just once
args, kw Args and keyword args for the callback
scheduler The recoco scheduler to use (None means default scheduler)
started If False, requires you to call .start() to begin timer
selfStoppable If True, the callback can return False to cancel the timer
"""
def __init__ (self, timeToWake, callback, absoluteTime = False,
recurring = False, args = (), kw = {}, scheduler = None,
started = True, selfStoppable = True):
if absoluteTime and recurring:
raise RuntimeError("Can't have a recurring timer for an absolute time!")
Task.__init__(self)
self._self_stoppable = selfStoppable
self._next = timeToWake
self._interval = timeToWake if recurring else 0
if not absoluteTime:
self._next += time.time()
self._cancelled = False
self._recurring = recurring
self._callback = callback
self._args = args
self._kw = kw
if started: self.start(scheduler)
def cancel (self):
self._cancelled = True
def run (self):
while not self._cancelled:
yield Sleep(timeToWake=self._next, absoluteTime=True)
if self._cancelled: break
self._next = time.time() + self._interval
rv = self._callback(*self._args,**self._kw)
if self._self_stoppable and (rv is False): break
if not self._recurring: break
yield False # Quit
class CallLaterTask (BaseTask):
def __init__ (self):
BaseTask.__init__(self)
self._pinger = pox.lib.util.makePinger()
from collections import deque
self._calls = deque()
def callLater (self, func, *args, **kw):
assert callable(func)
self._calls.append((func,args,kw))
self._pinger.ping()
def run (self):
while True:
yield Select([self._pinger], None, None)
self._pinger.pongAll()
try:
while True:
e = self._calls.popleft()
try:
e[0](*e[1], **e[2])
except:
import logging
logging.getLogger("recoco").exception("Exception calling %s", e[0])
except:
pass
class BlockingTask (BaseTask):
@classmethod
def new (_cls, _func, _cb=None, *_args, **_kw):
return _cls(_func, _cb, *_args, **_kw)
def __init__ (self, func, callback=None, args=(), kw={}):
"""
callback takes two parameters: rv and exc. One is always None.
if callback is actually a tuple, the first one is called with
the return value on normal exit, the second is called with
exc_info on an exception.
"""
BaseTask.__init__(self)
self.func = func
self.callback = callback
self.args = args
self.kw = kw
def run (self):
rv,exc = (yield CallBlocking(self.func, args=self.args, kw=self.kw))
if self.callback is None:
pass
elif isinstance(self.callback, tuple):
if exc is not None:
if self.callback[1] is not None:
self.callback[1](exc)
else:
if self.callback[0] is not None:
self.callback[0](rv)
else:
self.callback(rv,exc)
# Sanity tests
if __name__ == "__main__":
class TestTask (BaseTask):
def __init__ (self, *args, **kw):
BaseTask.__init__(self, *args, **kw)
def run (self, a, b, inc = 1, sleep = 0):
n = a
while n <= b:
print(n)
n+=inc
yield Select([],[],[],sleep)
s = Scheduler(daemon=True)
t = TestTask(5,10,sleep=10)
t.start()
t = TestTask(100,110,sleep=20)
t.start()
#TestTask(1000,1010,sleep=1).start()
import code
code.interact(local=locals())
s.quit()
| apache-2.0 |
cchurch/ansible | test/units/modules/network/f5/test_bigip_profile_fastl4.py | 16 | 5811 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_profile_fastl4 import ApiParameters
from library.modules.bigip_profile_fastl4 import ModuleParameters
from library.modules.bigip_profile_fastl4 import ModuleManager
from library.modules.bigip_profile_fastl4 import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_profile_fastl4 import ApiParameters
from ansible.modules.network.f5.bigip_profile_fastl4 import ModuleParameters
from ansible.modules.network.f5.bigip_profile_fastl4 import ModuleManager
from ansible.modules.network.f5.bigip_profile_fastl4 import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='bar',
idle_timeout=100,
client_timeout=101,
description='description one',
explicit_flow_migration=False,
ip_df_mode='pmtu',
ip_tos_to_client=102,
ip_tos_to_server=103,
ip_ttl_v4=104,
ip_ttl_v6=105,
ip_ttl_mode='proxy',
keep_alive_interval=106,
late_binding=True,
link_qos_to_client=7,
link_qos_to_server=6,
loose_close=False,
loose_initialization=True,
mss_override=4,
reassemble_fragments=True,
receive_window_size=109,
reset_on_timeout=False,
rtt_from_client=True,
rtt_from_server=False,
server_sack=True,
server_timestamp=False,
syn_cookie_mss=110,
tcp_close_timeout=111,
tcp_generate_isn=True,
tcp_handshake_timeout=112,
tcp_strip_sack=False,
tcp_time_wait_timeout=113,
tcp_timestamp_mode='rewrite',
tcp_wscale_mode='strip',
timeout_recovery='fallback',
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/bar'
assert p.description == 'description one'
assert p.idle_timeout == 100
assert p.client_timeout == 101
assert p.explicit_flow_migration == 'no'
assert p.ip_df_mode == 'pmtu'
assert p.ip_tos_to_client == 102
assert p.ip_tos_to_server == 103
assert p.ip_ttl_v4 == 104
assert p.ip_ttl_v6 == 105
assert p.ip_ttl_mode == 'proxy'
assert p.keep_alive_interval == 106
assert p.late_binding == 'yes'
assert p.link_qos_to_client == 7
assert p.link_qos_to_server == 6
assert p.loose_close == 'no'
assert p.loose_initialization == 'yes'
assert p.mss_override == 4
assert p.reassemble_fragments == 'yes'
assert p.receive_window_size == 109
assert p.reset_on_timeout == 'no'
assert p.rtt_from_client == 'yes'
assert p.rtt_from_server == 'no'
assert p.server_sack == 'yes'
assert p.server_timestamp == 'no'
assert p.syn_cookie_mss == 110
assert p.tcp_close_timeout == 111
assert p.tcp_generate_isn == 'yes'
assert p.tcp_handshake_timeout == 112
assert p.tcp_strip_sack == 'no'
assert p.tcp_time_wait_timeout == 113
assert p.tcp_timestamp_mode == 'rewrite'
assert p.tcp_wscale_mode == 'strip'
assert p.timeout_recovery == 'fallback'
def test_api_parameters(self):
args = load_fixture('load_ltm_fastl4_profile_1.json')
p = ApiParameters(params=args)
assert p.name == 'fastL4'
assert p.description is None
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
name='foo',
parent='bar',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
Groestlcoin/electrumx-grs | tests/server/test_compaction.py | 4 | 4366 | # Test of compaction code in server/db.py
import array
from collections import defaultdict
from os import environ, urandom
from struct import pack
import random
from lib.hash import hash_to_str
from server.env import Env
from server.db import DB
def create_histories(db, hashX_count=100):
'''Creates a bunch of random transaction histories, and write them
to disk in a series of small flushes.'''
hashXs = [urandom(db.coin.HASHX_LEN) for n in range(hashX_count)]
mk_array = lambda : array.array('I')
histories = {hashX : mk_array() for hashX in hashXs}
this_history = defaultdict(mk_array)
tx_num = 0
while hashXs:
hash_indexes = set(random.randrange(len(hashXs))
for n in range(1 + random.randrange(4)))
for index in hash_indexes:
histories[hashXs[index]].append(tx_num)
this_history[hashXs[index]].append(tx_num)
tx_num += 1
# Occasionally flush and drop a random hashX if non-empty
if random.random() < 0.1:
db.flush_history(this_history)
this_history.clear()
index = random.randrange(0, len(hashXs))
if histories[hashXs[index]]:
del hashXs[index]
return histories
def check_hashX_compaction(db):
db.max_hist_row_entries = 40
row_size = db.max_hist_row_entries * 4
full_hist = array.array('I', range(100)).tobytes()
hashX = urandom(db.coin.HASHX_LEN)
pairs = ((1, 20), (26, 50), (56, 30))
cum = 0
hist_list = []
hist_map = {}
for flush_count, count in pairs:
key = hashX + pack('>H', flush_count)
hist = full_hist[cum * 4: (cum+count) * 4]
hist_map[key] = hist
hist_list.append(hist)
cum += count
write_items = []
keys_to_delete = set()
write_size = db._compact_hashX(hashX, hist_map, hist_list,
write_items, keys_to_delete)
# Check results for sanity
assert write_size == len(full_hist)
assert len(write_items) == 3
assert len(keys_to_delete) == 3
assert len(hist_map) == len(pairs)
for n, item in enumerate(write_items):
assert item == (hashX + pack('>H', n),
full_hist[n * row_size: (n + 1) * row_size])
for flush_count, count in pairs:
assert hashX + pack('>H', flush_count) in keys_to_delete
# Check re-compaction is null
hist_map = {key: value for key, value in write_items}
hist_list = [value for key, value in write_items]
write_items.clear()
keys_to_delete.clear()
write_size = db._compact_hashX(hashX, hist_map, hist_list,
write_items, keys_to_delete)
assert write_size == 0
assert len(write_items) == 0
assert len(keys_to_delete) == 0
assert len(hist_map) == len(pairs)
# Check re-compaction adding a single tx writes the one row
hist_list[-1] += array.array('I', [100]).tobytes()
write_size = db._compact_hashX(hashX, hist_map, hist_list,
write_items, keys_to_delete)
assert write_size == len(hist_list[-1])
assert write_items == [(hashX + pack('>H', 2), hist_list[-1])]
assert len(keys_to_delete) == 1
assert write_items[0][0] in keys_to_delete
assert len(hist_map) == len(pairs)
def check_written(db, histories):
for hashX, hist in histories.items():
db_hist = array.array('I', db.get_history_txnums(hashX, limit=None))
assert hist == db_hist
def compact_history(db):
'''Synchronously compact the DB history.'''
db.first_sync = False
db.comp_cursor = 0
db.comp_flush_count = max(db.comp_flush_count, 1)
limit = 5 * 1000
write_size = 0
while db.comp_cursor != -1:
write_size += db._compact_history(limit)
assert write_size != 0
def run_test(db_dir):
environ.clear()
environ['DB_DIRECTORY'] = db_dir
environ['DAEMON_URL'] = ''
environ['COIN'] = 'BitcoinCash'
env = Env()
db = DB(env)
# Test abstract compaction
check_hashX_compaction(db)
# Now test in with random data
histories = create_histories(db)
check_written(db, histories)
compact_history(db)
check_written(db, histories)
def test_compaction(tmpdir):
db_dir = str(tmpdir)
print('Temp dir: {}'.format(db_dir))
run_test(db_dir)
| mit |
ujjwalwahi/odoo | addons/l10n_in_hr_payroll/report/__init__.py | 424 | 1262 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report_payslip_details
import report_payroll_advice
import report_hr_salary_employee_bymonth
import payment_advice_report
import report_hr_yearly_salary_detail
import payslip_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Didou09/tofu | tofu/mag/regression_test.py | 2 | 11158 | # -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# Also if needed: retab
'''
Regression test
'''
from __future__ import (unicode_literals, absolute_import, \
print_function, division)
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
import subprocess
import sys
import time
#print('path 1 =', sys.path)
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
#print('path 2 =', sys.path)
# Local modules
import equimap
import imas
# REFERENCE FILE !!!
# ==================
REF_FILE = 'reference.npz'
REF_SHOT = 54178
REF_RUN = 9
REF_OCC = 0
REF_USER = 'imas_public'
REF_MACHINE = 'west'
# ==================
# Parameters
interp_points = 30
eps_time = 1.23456789E-2
lquantities = ('rho_pol_norm', 'rho_tor_norm', 'rho_tor', 'psi', 'phi', \
'theta', 'j_tor', 'b_field_r', 'b_field_z', 'b_field_tor', \
'b_field_norm')
def eval_diff(data, data_ref, name, rel_tolerance=1E-10):
'''
Function
--------
eval_diff(data, data_ref, name='data', rel_tolerance=1E-10)
Output
------
print the maximum and the maximum index of difference, displays
an error if the maximum is above the given relative tolerance
'''
data = np.asarray(data)
data_ref = np.asarray(data_ref)
if (data.shape != data_ref.shape):
raise ValueError('Shape of input data is not equal')
rel_diff = np.abs( (data - data_ref) / data_ref )
max_rel_diff = np.nanmax(rel_diff)
if (rel_diff.ndim != 0):
ind_max_rel_diff = np.unravel_index(np.nanargmax(rel_diff), rel_diff.shape)
else:
ind_max_rel_diff = 0
if (max_rel_diff > rel_tolerance):
raise ValueError('ERROR test in: ' + name + ', max relative difference = '
+ '{0} at index = {1}'.format(max_rel_diff, ind_max_rel_diff))
print('')
print('In field name: ' + name + ', max relative difference = '
+ '{0} at index = {1}'.format(max_rel_diff, ind_max_rel_diff))
print('')
if __name__ == '__main__':
print(' ')
# Parse input arguments
parser = argparse.ArgumentParser(description= \
'''Run regression EQUIMAP test using REF_FILE = {0}; REF_SHOT = {1};
REF_RUN = {2}; REF_OCC = {3}; REF_USER = {4}; REF_MACHINE = {5}
'''.format(REF_FILE, REF_SHOT, REF_RUN, REF_OCC, REF_USER, REF_MACHINE))
# To exclude 2 conflict options use:
#group = parser.add_mutually_exclusive_group()
#parser.add_argument('shot', type=int, nargs='?', default=53259, help='shot, default=53259')
parser.add_argument('--saveFile', action='store_true', \
help='saves a Python .npz file')
parser.add_argument('--figures', action='store_true', \
help='plot figures')
parser.add_argument('--no-git-check', action='store_true', \
help='no check for changes that are not commited')
args = parser.parse_args()
print('REF FILE =', REF_FILE)
print(' ')
if (not args.no_git_check):
try:
subprocess.run(['git', 'diff', '--exit-code', '--quiet'], check=True)
subprocess.run(['git', 'diff', '--cached', '--exit-code', '--quiet'], check=True)
except subprocess.CalledProcessError as err:
print(' ')
print('ERROR: not commited changes, please commit the changes.', err)
print(' ')
raise
# Initialize dictionary to store results
results = {}
idd = imas.ids(REF_SHOT, REF_RUN)
idd.open_env(REF_USER, REF_MACHINE, '3')
if (REF_OCC == 0):
idd.equilibrium.get()
else:
idd.equilibrium.get(REF_OCC)
equi = idd.equilibrium
# Test one time and spatial 3D
# ----------------------------
time_in = eps_time + 0.5*(np.nanmax(equi.time) + np.nanmin(equi.time))
equi_grid = idd.equilibrium.grids_ggd[0].grid[0]
NbrPoints = len(equi_grid.space[0].objects_per_dimension[0].object)
equiDict = {}
equiDict['r'] = np.full(NbrPoints, np.nan)
equiDict['z'] = np.full(NbrPoints, np.nan)
for ii in range(NbrPoints):
equiDict['r'][ii] = equi_grid.space[0].objects_per_dimension[0]. \
object[ii].geometry[0]
equiDict['z'][ii] = equi_grid.space[0].objects_per_dimension[0]. \
object[ii].geometry[1]
R_in = np.linspace(np.min(equiDict['r']), \
np.max(equiDict['r']), interp_points)
Z_in = np.linspace(np.min(equiDict['z']), \
np.max(equiDict['z']), interp_points)
Phi_in = np.linspace(0, 2*np.pi/18, interp_points)
R_in_tot = np.tile(R_in, int(interp_points**2))
Z_in_tot = np.tile(np.repeat(Z_in, interp_points), interp_points)
Phi_in_tot = np.repeat(Phi_in, int(interp_points**2))
Rr = R_in_tot.reshape((interp_points, interp_points, interp_points))
Zr = Z_in_tot.reshape((interp_points, interp_points, interp_points))
for iquant in lquantities:
start = time.time()
#sys.stdout = open(os.devnull, 'w')
out = equimap.get(REF_SHOT, time=time_in, R=R_in_tot, Phi=Phi_in_tot, \
Z=Z_in_tot, quantity=iquant, no_ripple=False, \
run=REF_RUN, occ=REF_OCC, user=REF_USER, \
machine=REF_MACHINE)
#sys.stdout = sys.__stdout__
end = time.time()
print()
print('====================================')
print('time for', iquant, ' =', end - start)
print('====================================')
print()
if (args.figures):
outr = out.reshape((interp_points, interp_points, interp_points))
plt.figure()
plt.contourf(Rr[int(0.2*interp_points), :, :], \
Zr[int(0.2*interp_points), :, :], \
outr[int(0.2*interp_points), :, :])
arg_time = np.argmin(np.abs(equi.time - time_in))
plt.plot(np.squeeze(equi.time_slice[arg_time].boundary.outline.r), \
np.squeeze(equi.time_slice[arg_time].boundary.outline.z), \
linewidth=2, color='red')
plt.plot(equi.time_slice[arg_time].global_quantities.magnetic_axis.r, \
equi.time_slice[arg_time].global_quantities.magnetic_axis.z, \
marker='+', color='red', markersize=20)
plt.xlabel('R [m]')
plt.ylabel('Z [m]')
plt.title('{0} t={1:.2f}'.format(iquant, time_in))
plt.colorbar()
# Save results in dict
results[iquant] = out
# End loop on lquantities
# Test large time and spatial 2D (R, Phi)
# --------------------------------------
# Check code.output_flag for data validity
if (np.any(np.isnan(equi.code.output_flag))):
mask = np.full(len(equi.time), True, dtype=bool)
else:
mask = np.asarray(equi.code.output_flag) >= 0
time1 = 0.495*(np.nanmax(equi.time[mask]) + np.nanmin(equi.time[mask]))
time2 = 0.505*(np.nanmax(equi.time[mask]) + np.nanmin(equi.time[mask]))
mask_time_tmp = (equi.time[mask] >= time1) \
& (equi.time[mask] <= time2)
indMin = np.abs(equi.time[mask] \
- equi.time[mask][mask_time_tmp][0]).argmin()
indMax = np.abs(equi.time[mask] \
- equi.time[mask][mask_time_tmp][-1]).argmin()
if (indMin == 0):
indMinApply = indMin
else:
indMinApply = indMin - 1
if (indMax == (equi.time[mask].size-1)):
indMaxApply = indMax
else:
indMaxApply = indMax + 1
mask_time = (equi.time[mask] >= equi.time[mask][indMinApply]) \
& (equi.time[mask] <= equi.time[mask][indMaxApply])
time_points = equi.time[mask][mask_time]
time_in = np.linspace(time1, time2, time_points.size + 1)
time_in += eps_time
R_in = np.linspace(np.min(equiDict['r']), \
np.max(equiDict['r']), interp_points)
Phi_in = np.linspace(0, 2*np.pi/18, interp_points)
R_in_tot = np.tile(R_in, interp_points)
Z_in_tot = np.zeros(R_in_tot.shape)
Phi_in_tot = np.repeat(Phi_in, interp_points)
Rr = R_in_tot.reshape((interp_points, interp_points))
Phir = Phi_in_tot.reshape((interp_points, interp_points))
arg_time = np.argmin(np.abs(equi.time - time_in[int(0.5*time_in.size)]))
if (args.figures):
mask_LFS = (equi.time_slice[arg_time].boundary.outline.r > equi.time_slice[arg_time].global_quantities.magnetic_axis.r)
indZ0_LFS = np.argmin(np.abs(equi.time_slice[arg_time].boundary.outline.z[mask_LFS]))
mask_HFS = (equi.time_slice[arg_time].boundary.outline.r < equi.time_slice[arg_time].global_quantities.magnetic_axis.r)
indZ0_HFS = np.argmin(np.abs(equi.time_slice[arg_time].boundary.outline.z[mask_HFS]))
for iquant in lquantities:
start = time.time()
#sys.stdout = open(os.devnull, 'w')
out = equimap.get(REF_SHOT, time=time_in, R=R_in_tot, Phi=Phi_in_tot, \
Z=Z_in_tot, quantity=iquant, no_ripple=False, \
run=REF_RUN, occ=REF_OCC, user=REF_USER, \
machine=REF_MACHINE)
#sys.stdout = sys.__stdout__
end = time.time()
print()
print('====================================')
print('time (large time input) for', iquant, ' =', end - start)
print('Z_axis =', equi.time_slice[arg_time].global_quantities.magnetic_axis.z)
print('====================================')
print()
if (args.figures):
outr = out[int(0.5*out.shape[0])].reshape((interp_points, interp_points))
plt.figure()
plt.contourf(Rr[:, :], Phir[:, :], outr[:, :])
plt.axvline(np.squeeze(equi.time_slice[arg_time].boundary.outline.r[mask_LFS][indZ0_LFS]), \
linewidth=2, color='red')
plt.axvline(np.squeeze(equi.time_slice[arg_time].boundary.outline.r[mask_HFS][indZ0_HFS]), \
linewidth=2, color='red')
plt.axvline(equi.time_slice[arg_time].global_quantities.magnetic_axis.r, \
linewidth=2, color='red', linestyle='--')
plt.xlabel('R [m]')
plt.ylabel('Phi [rad]')
plt.title('{0} t={1:.2f}'.format(iquant, time_in[int(0.5*out.shape[0])]))
plt.colorbar()
# Save results in dict
results[iquant + '_LT'] = out
# End loop on lquantities
if (args.saveFile):
filename = 'reg_test_{0}_Run{1}_Occ{2}_User_{3}_Machine_{4}.npz'.format( \
REF_SHOT, REF_RUN, REF_OCC, REF_USER, REF_MACHINE)
np.savez(filename, **results)
if (args.figures):
plt.show()
ref = np.load(REF_FILE)
for iquant in lquantities:
eval_diff(results[iquant], ref[iquant], iquant)
eval_diff(results[iquant + '_LT'], ref[iquant + '_LT'], iquant + '_LT')
print()
print('End regression test')
print()
| mit |
40223104/test_lego | static/Brython3.1.1-20150328-091302/Lib/stat.py | 765 | 4304 | """Constants/functions for interpreting results of os.stat() and os.lstat().
Suggested usage: from stat import *
"""
# Indices for stat struct members in the tuple returned by os.stat()
ST_MODE = 0
ST_INO = 1
ST_DEV = 2
ST_NLINK = 3
ST_UID = 4
ST_GID = 5
ST_SIZE = 6
ST_ATIME = 7
ST_MTIME = 8
ST_CTIME = 9
# Extract bits from the mode
def S_IMODE(mode):
"""Return the portion of the file's mode that can be set by
os.chmod().
"""
return mode & 0o7777
def S_IFMT(mode):
"""Return the portion of the file's mode that describes the
file type.
"""
return mode & 0o170000
# Constants used as S_IFMT() for various file types
# (not all are implemented on all systems)
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFBLK = 0o060000 # block device
S_IFREG = 0o100000 # regular file
S_IFIFO = 0o010000 # fifo (named pipe)
S_IFLNK = 0o120000 # symbolic link
S_IFSOCK = 0o140000 # socket file
# Functions to test for each file type
def S_ISDIR(mode):
"""Return True if mode is from a directory."""
return S_IFMT(mode) == S_IFDIR
def S_ISCHR(mode):
"""Return True if mode is from a character special device file."""
return S_IFMT(mode) == S_IFCHR
def S_ISBLK(mode):
"""Return True if mode is from a block special device file."""
return S_IFMT(mode) == S_IFBLK
def S_ISREG(mode):
"""Return True if mode is from a regular file."""
return S_IFMT(mode) == S_IFREG
def S_ISFIFO(mode):
"""Return True if mode is from a FIFO (named pipe)."""
return S_IFMT(mode) == S_IFIFO
def S_ISLNK(mode):
"""Return True if mode is from a symbolic link."""
return S_IFMT(mode) == S_IFLNK
def S_ISSOCK(mode):
"""Return True if mode is from a socket."""
return S_IFMT(mode) == S_IFSOCK
# Names for permission bits
S_ISUID = 0o4000 # set UID bit
S_ISGID = 0o2000 # set GID bit
S_ENFMT = S_ISGID # file locking enforcement
S_ISVTX = 0o1000 # sticky bit
S_IREAD = 0o0400 # Unix V7 synonym for S_IRUSR
S_IWRITE = 0o0200 # Unix V7 synonym for S_IWUSR
S_IEXEC = 0o0100 # Unix V7 synonym for S_IXUSR
S_IRWXU = 0o0700 # mask for owner permissions
S_IRUSR = 0o0400 # read by owner
S_IWUSR = 0o0200 # write by owner
S_IXUSR = 0o0100 # execute by owner
S_IRWXG = 0o0070 # mask for group permissions
S_IRGRP = 0o0040 # read by group
S_IWGRP = 0o0020 # write by group
S_IXGRP = 0o0010 # execute by group
S_IRWXO = 0o0007 # mask for others (not in group) permissions
S_IROTH = 0o0004 # read by others
S_IWOTH = 0o0002 # write by others
S_IXOTH = 0o0001 # execute by others
# Names for file flags
UF_NODUMP = 0x00000001 # do not dump file
UF_IMMUTABLE = 0x00000002 # file may not be changed
UF_APPEND = 0x00000004 # file may only be appended to
UF_OPAQUE = 0x00000008 # directory is opaque when viewed through a union stack
UF_NOUNLINK = 0x00000010 # file may not be renamed or deleted
UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed
UF_HIDDEN = 0x00008000 # OS X: file should not be displayed
SF_ARCHIVED = 0x00010000 # file may be archived
SF_IMMUTABLE = 0x00020000 # file may not be changed
SF_APPEND = 0x00040000 # file may only be appended to
SF_NOUNLINK = 0x00100000 # file may not be renamed or deleted
SF_SNAPSHOT = 0x00200000 # file is a snapshot file
_filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((S_IRUSR, "r"),),
((S_IWUSR, "w"),),
((S_IXUSR|S_ISUID, "s"),
(S_ISUID, "S"),
(S_IXUSR, "x")),
((S_IRGRP, "r"),),
((S_IWGRP, "w"),),
((S_IXGRP|S_ISGID, "s"),
(S_ISGID, "S"),
(S_IXGRP, "x")),
((S_IROTH, "r"),),
((S_IWOTH, "w"),),
((S_IXOTH|S_ISVTX, "t"),
(S_ISVTX, "T"),
(S_IXOTH, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form '-rwxrwxrwx'."""
perm = []
for table in _filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
| gpl-3.0 |
lanfker/vPRKS | examples/realtime/realtime-udp-echo.py | 195 | 3526 | #
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation;
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Network topology
#
# n0 n1 n2 n3
# | | | |
# =================
# LAN
#
# - UDP flows from n0 to n1 and back
# - DropTail queues
# - Tracing of queues and packet receptions to file "udp-echo.tr"
import ns.applications
import ns.core
import ns.csma
import ns.internet
import ns.network
def main(argv):
#
# Allow the user to override any of the defaults and the above Bind() at
# run-time, via command-line arguments
#
cmd = ns.core.CommandLine()
cmd.Parse(argv)
#
# But since this is a realtime script, don't allow the user to mess with
# that.
#
ns.core.GlobalValue.Bind("SimulatorImplementationType", ns.core.StringValue("ns3::RealtimeSimulatorImpl"))
#
# Explicitly create the nodes required by the topology (shown above).
#
print "Create nodes."
n = ns.network.NodeContainer()
n.Create(4)
internet = ns.internet.InternetStackHelper()
internet.Install(n)
#
# Explicitly create the channels required by the topology (shown above).
#
print ("Create channels.")
csma = ns.csma.CsmaHelper()
csma.SetChannelAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate(5000000)))
csma.SetChannelAttribute("Delay", ns.core.TimeValue(ns.core.MilliSeconds(2)));
csma.SetDeviceAttribute("Mtu", ns.core.UintegerValue(1400))
d = csma.Install(n)
#
# We've got the "hardware" in place. Now we need to add IP addresses.
#
print ("Assign IP Addresses.")
ipv4 = ns.internet.Ipv4AddressHelper()
ipv4.SetBase(ns.network.Ipv4Address("10.1.1.0"), ns.network.Ipv4Mask("255.255.255.0"))
i = ipv4.Assign(d)
print ("Create Applications.")
#
# Create a UdpEchoServer application on node one.
#
port = 9 # well-known echo port number
server = ns.applications.UdpEchoServerHelper(port)
apps = server.Install(n.Get(1))
apps.Start(ns.core.Seconds(1.0))
apps.Stop(ns.core.Seconds(10.0))
#
# Create a UdpEchoClient application to send UDP datagrams from node zero to
# node one.
#
packetSize = 1024
maxPacketCount = 500
interPacketInterval = ns.core.Seconds(0.01)
client = ns.applications.UdpEchoClientHelper(i.GetAddress (1), port)
client.SetAttribute("MaxPackets", ns.core.UintegerValue(maxPacketCount))
client.SetAttribute("Interval", ns.core.TimeValue(interPacketInterval))
client.SetAttribute("PacketSize", ns.core.UintegerValue(packetSize))
apps = client.Install(n.Get(0))
apps.Start(ns.core.Seconds(2.0))
apps.Stop(ns.core.Seconds(10.0))
ascii = ns.network.AsciiTraceHelper()
csma.EnableAsciiAll(ascii.CreateFileStream("realtime-udp-echo.tr"))
csma.EnablePcapAll("realtime-udp-echo", False)
#
# Now, do the actual simulation.
#
print ("Run Simulation.")
ns.core.Simulator.Run()
ns.core.Simulator.Destroy()
print ("Done.")
if __name__ == '__main__':
import sys
main(sys.argv)
| gpl-2.0 |
camptocamp/ngo-addons-backport | addons/document_page/wizard/wiki_wiki_page_open.py | 46 | 2658 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
class wiki_wiki_page_open(osv.osv_memory):
""" wizard Open Page """
_name = "wiki.wiki.page.open"
_description = "wiz open page"
def open_wiki_page(self, cr, uid, ids, context=None):
""" Opens Wiki Page of Group
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of open wiki page’s IDs
@return: dictionay of open wiki window on give group id
"""
if context is None:
context = {}
group_ids = context.get('active_ids', [])
for group in self.pool.get('wiki.groups').browse(cr, uid, group_ids, context=context):
value = {
'domain': "[('group_id','=',%d)]" % (group.id),
'name': 'Wiki Page',
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'wiki.wiki',
'view_id': False,
'type': 'ir.actions.act_window',
}
if group.method == 'page':
value['res_id'] = group.home.id
elif group.method == 'list':
value['view_type'] = 'form'
value['view_mode'] = 'tree,form'
elif group.method == 'tree':
view_id = self.pool.get('ir.ui.view').search(cr, uid, [('name', '=', 'wiki.wiki.tree.children')])
value['view_id'] = view_id
value['domain'] = [('group_id', '=', group.id), ('parent_id', '=', False)]
value['view_type'] = 'tree'
return value
wiki_wiki_page_open()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mcfiredrill/libcaca-old | python/caca/display.py | 1 | 9283 | # -*- coding: utf-8 -*-
#
# libcaca Colour ASCII-Art library
# Python language bindings
# Copyright (c) 2010 Alex Foulon <[email protected]>
# All Rights Reserved
#
# This library is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What the Fuck You Want
# to Public License, Version 2, as published by Sam Hocevar. See
# http://www.wtfpl.net/ for more details.
#
""" Libcaca Python bindings """
import ctypes
from caca import _lib, _PYTHON3, _str_to_bytes
from caca.canvas import _Canvas, Canvas
class _Display(object):
""" Model for Display objects.
"""
def from_param(self):
""" Required by ctypes module to call object as parameter of
a C function.
"""
return self._dp
def __str__(self):
return "<CacaDisplay>"
def __del__(self):
if self._dp > 0:
self._free()
def _free(self):
""" Free a libcaca display.
"""
_lib.caca_free_display.argtypes = [_Display]
_lib.caca_free_display.restype = ctypes.c_int
return _lib.caca_free_display(self)
class Display(_Display):
""" Display objects, methods are libcaca functions with display_t as first
parameter.
"""
def __init__(self, cv, driver=None):
""" Display constructor.
cv -- canvas to attach.
driver -- caca driver to set with display
"""
if driver is None:
_lib.caca_create_display.argtypes = [_Canvas]
self._dp = _lib.caca_create_display(cv)
else:
_lib.caca_create_display_with_driver.argtypes = [
_Canvas, ctypes.c_char_p
]
if _PYTHON3 and isinstance(driver, str):
driver = _str_to_bytes(driver)
self._dp = _lib.caca_create_display_with_driver(cv, driver)
if self._dp == 0:
raise DisplayError("Failed to create display")
def get_driver(self):
""" Return the caca graphical context's current output driver.
"""
_lib.caca_get_display_driver.argtypes = [_Display]
_lib.caca_get_display_driver.restype = ctypes.c_char_p
return _lib.caca_get_display_driver(self)
def set_driver(self, driver=None):
""" Set the output driver.
driver -- A string describing the desired output driver or NULL
to choose the best driver automatically.
"""
_lib.caca_set_display_driver.argtypes = [_Display, ctypes.c_char_p]
_lib.caca_set_display_driver.restype = ctypes.c_int
if not driver:
driver = ctypes.c_char_p(0)
else:
if _PYTHON3 and isinstance(driver, str):
driver = _str_to_bytes(driver)
return _lib.caca_set_display_driver(self, driver)
def get_canvas(self):
""" Get the canvas attached to a caca graphical context.
"""
_lib.caca_get_canvas.argtypes = [_Display]
_lib.caca_get_canvas.restype = ctypes.POINTER(ctypes.c_char_p)
return Canvas(pointer=_lib.caca_get_canvas(self))
def refresh(self):
""" Flush pending changes and redraw the screen.
"""
_lib.caca_refresh_display.argtypes = [_Display]
_lib.caca_refresh_display.restype = ctypes.c_int
return _lib.caca_refresh_display(self)
def set_time(self, usec):
""" Set the refresh delay.
usec -- the refresh delay in microseconds
"""
_lib.caca_set_display_time.argtypes = [_Display, ctypes.c_int]
_lib.caca_set_display_time.restype = ctypes.c_int
return _lib.caca_set_display_time(self, usec)
def get_time(self):
""" Get the display's average rendering time.
"""
_lib.caca_get_display_time.argtypes = [_Display]
_lib.caca_get_display_time.restype = ctypes.c_int
return _lib.caca_get_display_time(self)
def set_title(self, title):
""" Set the display title.
title -- the desired display title
"""
_lib.caca_set_display_title.argtypes = [_Display, ctypes.c_char_p]
_lib.caca_set_display_title.restype = ctypes.c_int
if _PYTHON3 and isinstance(title, str):
title = _str_to_bytes(title)
return _lib.caca_set_display_title(self, title)
def set_mouse(self, flag):
""" Show or hide the mouse pointer. This function works with the
ncurses, S-Lang and X11 drivers.
flag -- 0 hides the pointer, 1 shows the system's default pointer
(usually an arrow)
"""
_lib.caca_set_mouse.argtypes = [_Display, ctypes.c_int]
_lib.caca_set_mouse.restype = ctypes.c_int
return _lib.caca_set_mouse(self, flag)
def set_cursor(self, flag):
""" Show or hide the cursor, for devices that support such a feature.
flag -- 0 hides the cursor, 1 shows the system's default cursor
(usually a white rectangle).
"""
_lib.caca_set_cursor.argtypes = [Display, ctypes.c_int]
_lib.caca_set_cursor.restype = ctypes.c_int
return _lib.caca_set_cursor(self, flag)
def get_event(self, event_mask, event, timeout):
""" Get the next mouse or keyboard input event.
event_mask -- bitmask of requested events
event -- a pointer to caca_event structure or NULL
tiemout -- a timeout value in microseconds
"""
_lib.caca_get_event.argtypes = [
Display, ctypes.c_int, ctypes.POINTER(Event), ctypes.c_int
]
return _lib.caca_get_event(self, event_mask, ctypes.byref(event),
timeout)
def get_mouse_x(self):
""" Return the X mouse coordinate.
"""
_lib.caca_get_mouse_x.argtypes = [Display]
_lib.caca_get_mouse_x.restype = ctypes.c_int
return _lib.caca_get_mouse_x(self)
def get_mouse_y(self):
""" Return the Y mouse coordinate.
"""
_lib.caca_get_mouse_y.argtypes = [Display]
_lib.caca_get_mouse_y.restype = ctypes.c_int
return _lib.caca_get_mouse_y(self)
class DisplayError(Exception):
pass
class Event(ctypes.Structure):
""" Object to store libcaca event.
"""
_fields_ = (
('opaque_structure', ctypes.c_char_p * 32),
)
def from_param(self):
""" Required method to pass object as parameter of a C function.
"""
return ctypes.byref(self)
def get_type(self):
""" Return an event's type.
"""
_lib.caca_get_event_type.argtypes = [Event]
_lib.caca_get_event_type.restype = ctypes.c_int
return _lib.caca_get_event_type(self)
def get_key_ch(self):
""" Return a key press or key release event's value.
"""
_lib.caca_get_event_key_ch.argtypes = [Event]
_lib.caca_get_event_key_ch.restype = ctypes.c_int
return _lib.caca_get_event_key_ch(self)
def get_key_utf32(self):
""" Not implemented.
"""
raise DisplayError("Not implemented")
def get_key_utf8(self):
""" Return a key press or key release event's UTF-8 value
as python string.
"""
# set buffer for writing utf8 value
buf = ctypes.c_buffer(7)
_lib.caca_get_event_key_utf8.argtypes = [Event, ctypes.c_char_p]
_lib.caca_get_event_key_utf8.restype = ctypes.c_int
_lib.caca_get_event_key_utf8(self, buf)
raw = []
for item in list(buf.raw):
if item == '\x00':
break
else:
raw.append(item)
return "".join(raw)
def get_mouse_button(self):
""" Return a mouse press or mouse release event's button.
"""
_lib.caca_get_event_mouse_button.argtypes = [Event]
_lib.caca_get_event_mouse_button.restype = ctypes.c_int
return _lib.caca_get_event_mouse_button(self)
def get_mouse_x(self):
""" Return a mouse motion event's X coordinate.
"""
_lib.caca_get_event_mouse_x.argtypes = [Event]
_lib.caca_get_event_mouse_x.restype = ctypes.c_int
return _lib.caca_get_event_mouse_x(self)
def get_mouse_y(self):
""" Return a mouse motion event's Y coordinate.
"""
_lib.caca_get_event_mouse_y.argtypes = [Event]
_lib.caca_get_event_mouse_y.restype = ctypes.c_int
return _lib.caca_get_event_mouse_y(self)
def get_resize_width(self):
""" Return a resize event's display width value.
"""
_lib.caca_get_event_resize_width.argtypes = [Event]
_lib.caca_get_event_resize_width.restype = ctypes.c_int
return _lib.caca_get_event_resize_width(self)
def get_resize_height(self):
""" Return a resize event's display height value.
"""
_lib.caca_get_event_resize_height.argtypes = [Event]
_lib.caca_get_event_resize_height.restype = ctypes.c_int
return _lib.caca_get_event_resize_height(self)
| lgpl-2.1 |
sopac/pacgeo | geonode/base/tests.py | 3 | 4298 | from django.test import TestCase
from geonode.base.models import ResourceBase
from geonode.utils import OGC_Servers_Handler
class ThumbnailTests(TestCase):
def setUp(self):
self.rb = ResourceBase.objects.create()
def tearDown(self):
t = self.rb.thumbnail
if t:
t.delete()
def test_initial_behavior(self):
self.assertFalse(self.rb.has_thumbnail())
missing = self.rb.get_thumbnail_url()
self.assertEquals('/static/geonode/img/missing_thumb.png', missing)
def test_saving(self):
# monkey patch our render function to just put the 'spec' into the file
self.rb._render_thumbnail = lambda *a, **kw: '%s' % a[0]
self._do_save_test('abc', 1)
self._do_save_test('xyz', 2)
def _do_save_test(self, content, version):
self.rb.save_thumbnail(content)
thumb = self.rb.thumbnail
self.assertEquals(version, thumb.version)
self.assertEqual(content, thumb.thumb_file.read())
self.assertEqual(content, thumb.thumb_spec)
class UtilsTests(TestCase):
def test_ogc_server_settings(self):
"""
Tests the OGC Servers Handler class.
"""
OGC_SERVER = {
'default': {
'BACKEND': 'geonode.geoserver',
'LOCATION': 'http://localhost:8080/geoserver/',
'PUBLIC_LOCATION' : 'http://localhost:8080/geoserver/',
'USER': 'admin',
'PASSWORD': 'geoserver',
'MAPFISH_PRINT_ENABLED': True,
'PRINTING_ENABLED': True,
'GEONODE_SECURITY_ENABLED': True,
'GEOGIT_ENABLED': True,
'WMST_ENABLED': False,
'BACKEND_WRITE_ENABLED': True,
'WPS_ENABLED': False,
'DATASTORE': str(),
}
}
ogc_settings = OGC_Servers_Handler(OGC_SERVER)['default']
default = OGC_SERVER.get('default')
self.assertEqual(ogc_settings.server, default)
self.assertEqual(ogc_settings.BACKEND, default.get('BACKEND'))
self.assertEqual(ogc_settings.LOCATION, default.get('LOCATION'))
self.assertEqual(ogc_settings.PUBLIC_LOCATION, default.get('PUBLIC_LOCATION'))
self.assertEqual(ogc_settings.USER, default.get('USER'))
self.assertEqual(ogc_settings.PASSWORD, default.get('PASSWORD'))
self.assertEqual(ogc_settings.DATASTORE, str())
self.assertEqual(ogc_settings.credentials, ('admin', 'geoserver'))
self.assertTrue(ogc_settings.MAPFISH_PRINT_ENABLED)
self.assertTrue(ogc_settings.PRINTING_ENABLED)
self.assertTrue(ogc_settings.GEONODE_SECURITY_ENABLED)
self.assertTrue(ogc_settings.GEOGIT_ENABLED)
self.assertFalse(ogc_settings.WMST_ENABLED)
self.assertTrue(ogc_settings.BACKEND_WRITE_ENABLED)
self.assertFalse(ogc_settings.WPS_ENABLED)
def test_ogc_server_defaults(self):
"""
Tests that OGC_SERVER_SETTINGS are built if they do not exist in the settings.
"""
OGC_SERVER = {
'default': dict(),
}
EXPECTATION ={
'default' : {
'BACKEND' : 'geonode.geoserver',
'LOCATION' : 'http://localhost:8080/geoserver/',
'USER' : 'admin',
'PASSWORD' : 'geoserver',
'MAPFISH_PRINT_ENABLED' : True,
'PRINTING_ENABLED' : True,
'GEONODE_SECURITY_ENABLED' : True,
'GEOGIT_ENABLED' : False,
'WMST_ENABLED' : False,
'BACKEND_WRITE_ENABLED': True,
'WPS_ENABLED' : False,
'DATASTORE': str(),
'GEOGIT_DATASTORE_DIR': str(),
}
}
defaults = EXPECTATION.get('default')
ogc_settings = OGC_Servers_Handler(OGC_SERVER)['default']
self.assertEqual(ogc_settings.server, defaults)
self.assertEqual(ogc_settings.rest, defaults['LOCATION']+'rest')
self.assertEqual(ogc_settings.ows, defaults['LOCATION']+'ows')
# Make sure we get None vs a KeyError when the key does not exist
self.assertIsNone(ogc_settings.SFDSDFDSF)
| gpl-3.0 |
truetone/AutobahnPython | examples/wamp/pubsub/simple/example2/server.py | 27 | 1919 | ###############################################################################
##
## Copyright 2011,2012 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.python import log
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.websocket import listenWS
from autobahn.wamp import WampServerFactory, \
WampServerProtocol
class MyServerProtocol(WampServerProtocol):
def onSessionOpen(self):
## register a single, fixed URI as PubSub topic
self.registerForPubSub("http://example.com/simple")
## register a URI and all URIs having the string as prefix as PubSub topic
self.registerForPubSub("http://example.com/event#", True)
## register any URI (string) as topic
#self.registerForPubSub("", True)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
factory = WampServerFactory("ws://localhost:9000", debugWamp = True)
factory.protocol = MyServerProtocol
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory)
webdir = File(".")
web = Site(webdir)
reactor.listenTCP(8080, web)
reactor.run()
| apache-2.0 |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/tests/test_kernel_approximation.py | 78 | 7586 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# abbreviations for easier formula
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# abbreviations for easier formula
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| mit |
rcbops/nova-buildpackage | contrib/boto_v6/ec2/connection.py | 17 | 5286 | '''
Created on 2010/12/20
@author: Nachi Ueno <[email protected]>
'''
import boto
import base64
import boto.ec2
from boto_v6.ec2.instance import ReservationV6
from boto.ec2.securitygroup import SecurityGroup
class EC2ConnectionV6(boto.ec2.EC2Connection):
'''
EC2Connection for OpenStack IPV6 mode
'''
def get_all_instances(self, instance_ids=None, filters=None):
"""
Retrieve all the instances associated with your account.
:type instance_ids: list
:param instance_ids: A list of strings of instance IDs
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.instance.Reservation`
"""
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeInstancesV6', params,
[('item', ReservationV6)])
def run_instances(self, image_id, min_count=1, max_count=1,
key_name=None, security_groups=None,
user_data=None, addressing_type=None,
instance_type='m1.small', placement=None,
kernel_id=None, ramdisk_id=None,
monitoring_enabled=False, subnet_id=None,
block_device_map=None):
"""
Runs an image on EC2.
:type image_id: string
:param image_id: The ID of the image to run
:type min_count: int
:param min_count: The minimum number of instances to launch
:type max_count: int
:param max_count: The maximum number of instances to launch
:type key_name: string
:param key_name: The name of the key pair with which to
launch instances
:type security_groups: list of strings
:param security_groups: The names of the security groups with
which to associate instances
:type user_data: string
:param user_data: The user data passed to the launched instances
:type instance_type: string
:param instance_type: The type of instance to run
(m1.small, m1.large, m1.xlarge)
:type placement: string
:param placement: The availability zone in which to launch
the instances
:type kernel_id: string
:param kernel_id: The ID of the kernel with which to
launch the instances
:type ramdisk_id: string
:param ramdisk_id: The ID of the RAM disk with which to
launch the instances
:type monitoring_enabled: bool
:param monitoring_enabled: Enable CloudWatch monitoring
on the instance.
:type subnet_id: string
:param subnet_id: The subnet ID within which to launch
the instances for VPC.
:type block_device_map:
:class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
:param block_device_map: A BlockDeviceMapping data structure
describing the EBS volumes associated
with the Image.
:rtype: Reservation
:return: The :class:`boto.ec2.instance.ReservationV6`
associated with the request for machines
"""
params = {'ImageId': image_id,
'MinCount': min_count,
'MaxCount': max_count}
if key_name:
params['KeyName'] = key_name
if security_groups:
l = []
for group in security_groups:
if isinstance(group, SecurityGroup):
l.append(group.name)
else:
l.append(group)
self.build_list_params(params, l, 'SecurityGroup')
if user_data:
params['UserData'] = base64.b64encode(user_data)
if addressing_type:
params['AddressingType'] = addressing_type
if instance_type:
params['InstanceType'] = instance_type
if placement:
params['Placement.AvailabilityZone'] = placement
if kernel_id:
params['KernelId'] = kernel_id
if ramdisk_id:
params['RamdiskId'] = ramdisk_id
if monitoring_enabled:
params['Monitoring.Enabled'] = 'true'
if subnet_id:
params['SubnetId'] = subnet_id
if block_device_map:
block_device_map.build_list_params(params)
return self.get_object('RunInstances', params,
ReservationV6, verb='POST')
| apache-2.0 |
hsum/sqlalchemy | lib/sqlalchemy/orm/scoping.py | 80 | 6101 | # orm/scoping.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .. import exc as sa_exc
from ..util import ScopedRegistry, ThreadLocalRegistry, warn
from . import class_mapper, exc as orm_exc
from .session import Session
__all__ = ['scoped_session']
class scoped_session(object):
"""Provides scoped management of :class:`.Session` objects.
See :ref:`unitofwork_contextual` for a tutorial.
"""
def __init__(self, session_factory, scopefunc=None):
"""Construct a new :class:`.scoped_session`.
:param session_factory: a factory to create new :class:`.Session`
instances. This is usually, but not necessarily, an instance
of :class:`.sessionmaker`.
:param scopefunc: optional function which defines
the current scope. If not passed, the :class:`.scoped_session`
object assumes "thread-local" scope, and will use
a Python ``threading.local()`` in order to maintain the current
:class:`.Session`. If passed, the function should return
a hashable token; this token will be used as the key in a
dictionary in order to store and retrieve the current
:class:`.Session`.
"""
self.session_factory = session_factory
if scopefunc:
self.registry = ScopedRegistry(session_factory, scopefunc)
else:
self.registry = ThreadLocalRegistry(session_factory)
def __call__(self, **kw):
"""Return the current :class:`.Session`, creating it
using the session factory if not present.
:param \**kw: Keyword arguments will be passed to the
session factory callable, if an existing :class:`.Session`
is not present. If the :class:`.Session` is present and
keyword arguments have been passed,
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if kw:
scope = kw.pop('scope', False)
if scope is not None:
if self.registry.has():
raise sa_exc.InvalidRequestError(
"Scoped session is already present; "
"no new arguments may be specified.")
else:
sess = self.session_factory(**kw)
self.registry.set(sess)
return sess
else:
return self.session_factory(**kw)
else:
return self.registry()
def remove(self):
"""Dispose of the current :class:`.Session`, if present.
This will first call :meth:`.Session.close` method
on the current :class:`.Session`, which releases any existing
transactional/connection resources still being held; transactions
specifically are rolled back. The :class:`.Session` is then
discarded. Upon next usage within the same scope,
the :class:`.scoped_session` will produce a new
:class:`.Session` object.
"""
if self.registry.has():
self.registry().close()
self.registry.clear()
def configure(self, **kwargs):
"""reconfigure the :class:`.sessionmaker` used by this
:class:`.scoped_session`.
See :meth:`.sessionmaker.configure`.
"""
if self.registry.has():
warn('At least one scoped session is already present. '
' configure() can not affect sessions that have '
'already been created.')
self.session_factory.configure(**kwargs)
def query_property(self, query_cls=None):
"""return a class property which produces a :class:`.Query` object
against the class and the current :class:`.Session` when called.
e.g.::
Session = scoped_session(sessionmaker())
class MyClass(object):
query = Session.query_property()
# after mappers are defined
result = MyClass.query.filter(MyClass.name=='foo').all()
Produces instances of the session's configured query class by
default. To override and use a custom implementation, provide
a ``query_cls`` callable. The callable will be invoked with
the class's mapper as a positional argument and a session
keyword argument.
There is no limit to the number of query properties placed on
a class.
"""
class query(object):
def __get__(s, instance, owner):
try:
mapper = class_mapper(owner)
if mapper:
if query_cls:
# custom query class
return query_cls(mapper, session=self.registry())
else:
# session's configured query class
return self.registry().query(mapper)
except orm_exc.UnmappedClassError:
return None
return query()
ScopedSession = scoped_session
"""Old name for backwards compatibility."""
def instrument(name):
def do(self, *args, **kwargs):
return getattr(self.registry(), name)(*args, **kwargs)
return do
for meth in Session.public_methods:
setattr(scoped_session, meth, instrument(meth))
def makeprop(name):
def set(self, attr):
setattr(self.registry(), name, attr)
def get(self):
return getattr(self.registry(), name)
return property(get, set)
for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map',
'is_active', 'autoflush', 'no_autoflush', 'info'):
setattr(scoped_session, prop, makeprop(prop))
def clslevel(name):
def do(cls, *args, **kwargs):
return getattr(Session, name)(*args, **kwargs)
return classmethod(do)
for prop in ('close_all', 'object_session', 'identity_key'):
setattr(scoped_session, prop, clslevel(prop))
| mit |
BeZazz/lamebench | nb_third_party/dns/rdtypes/ANY/GPOS.py | 248 | 5304 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.rdata
import dns.tokenizer
def _validate_float_string(what):
if what[0] == '-' or what[0] == '+':
what = what[1:]
if what.isdigit():
return
(left, right) = what.split('.')
if left == '' and right == '':
raise dns.exception.FormError
if not left == '' and not left.isdigit():
raise dns.exception.FormError
if not right == '' and not right.isdigit():
raise dns.exception.FormError
class GPOS(dns.rdata.Rdata):
"""GPOS record
@ivar latitude: latitude
@type latitude: string
@ivar longitude: longitude
@type longitude: string
@ivar altitude: altitude
@type altitude: string
@see: RFC 1712"""
__slots__ = ['latitude', 'longitude', 'altitude']
def __init__(self, rdclass, rdtype, latitude, longitude, altitude):
super(GPOS, self).__init__(rdclass, rdtype)
if isinstance(latitude, float) or \
isinstance(latitude, int) or \
isinstance(latitude, long):
latitude = str(latitude)
if isinstance(longitude, float) or \
isinstance(longitude, int) or \
isinstance(longitude, long):
longitude = str(longitude)
if isinstance(altitude, float) or \
isinstance(altitude, int) or \
isinstance(altitude, long):
altitude = str(altitude)
_validate_float_string(latitude)
_validate_float_string(longitude)
_validate_float_string(altitude)
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
def to_text(self, origin=None, relativize=True, **kw):
return '%s %s %s' % (self.latitude, self.longitude, self.altitude)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
latitude = tok.get_string()
longitude = tok.get_string()
altitude = tok.get_string()
tok.get_eol()
return cls(rdclass, rdtype, latitude, longitude, altitude)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.latitude)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.latitude)
l = len(self.longitude)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.longitude)
l = len(self.altitude)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.altitude)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
latitude = wire[current : current + l]
current += l
rdlen -= l
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
longitude = wire[current : current + l]
current += l
rdlen -= l
l = ord(wire[current])
current += 1
rdlen -= 1
if l != rdlen:
raise dns.exception.FormError
altitude = wire[current : current + l]
return cls(rdclass, rdtype, latitude, longitude, altitude)
from_wire = classmethod(from_wire)
def _cmp(self, other):
v = cmp(self.latitude, other.latitude)
if v == 0:
v = cmp(self.longitude, other.longitude)
if v == 0:
v = cmp(self.altitude, other.altitude)
return v
def _get_float_latitude(self):
return float(self.latitude)
def _set_float_latitude(self, value):
self.latitude = str(value)
float_latitude = property(_get_float_latitude, _set_float_latitude,
doc="latitude as a floating point value")
def _get_float_longitude(self):
return float(self.longitude)
def _set_float_longitude(self, value):
self.longitude = str(value)
float_longitude = property(_get_float_longitude, _set_float_longitude,
doc="longitude as a floating point value")
def _get_float_altitude(self):
return float(self.altitude)
def _set_float_altitude(self, value):
self.altitude = str(value)
float_altitude = property(_get_float_altitude, _set_float_altitude,
doc="altitude as a floating point value")
| apache-2.0 |
mesheven/pyOCD | pyocd/rtos/common.py | 1 | 2964 | """
mbed CMSIS-DAP debugger
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .provider import (TargetThread, ThreadProvider)
from ..debug.context import DebugContext
from ..coresight.cortex_m import (CORE_REGISTER, register_name_to_index)
from ..core import exceptions
import logging
LIST_NODE_NEXT_OFFSET = 0
LIST_NODE_OBJ_OFFSET= 8
## @brief Reads a null-terminated C string from the target.
def read_c_string(context, ptr):
if ptr == 0:
return ""
s = ""
done = False
count = 0
badCount = 0
try:
while not done and count < 256:
data = context.read_memory_block8(ptr, 16)
ptr += 16
count += 16
for c in data:
if c == 0:
done = True
break
elif c > 127:
# Replace non-ASCII characters. If there is a run of invalid characters longer
# than 4, then terminate the string early.
badCount += 1
if badCount > 4:
done = True
break
s += '?'
else:
s += chr(c)
badCount = 0
except exceptions.TransferError:
logging.debug("TransferError while trying to read 16 bytes at 0x%08x", ptr)
return s
## @brief Class representing the handler mode.
class HandlerModeThread(TargetThread):
UNIQUE_ID = 2
def __init__(self, targetContext, provider):
super(HandlerModeThread, self).__init__()
self._target_context = targetContext
self._provider = provider
def get_stack_pointer(self):
return self._target_context.read_core_register('msp')
@property
def priority(self):
return 0
@property
def unique_id(self):
return self.UNIQUE_ID
@property
def name(self):
return "Handler mode"
@property
def description(self):
ipsr = self._target_context.read_core_register('ipsr');
return self._target_context.core.exception_number_to_name(ipsr)
@property
def is_current(self):
return self._target_context.read_core_register('ipsr') > 0
@property
def context(self):
return self._target_context
def __str__(self):
return "<HandlerModeThread@0x%08x>" % (id(self))
def __repr__(self):
return str(self)
| apache-2.0 |
rwl/muntjac | muntjac/demo/sampler/features/dragndrop/DragDropRearrangeComponentsExample.py | 1 | 5450 |
from muntjac.api import VerticalLayout, Label, Embedded, Button, Alignment
from muntjac.terminal.theme_resource import ThemeResource
from muntjac.ui.css_layout import CssLayout
from muntjac.ui.button import IClickListener
from muntjac.ui.custom_component import CustomComponent
from muntjac.ui.horizontal_layout import HorizontalLayout
from muntjac.ui.drag_and_drop_wrapper import DragAndDropWrapper, DragStartMode
from muntjac.event.dd.drop_handler import IDropHandler
from muntjac.event.dd.acceptcriteria.source_is_target import SourceIsTarget
from muntjac.event.dd.acceptcriteria.not_ import Not
from muntjac.terminal.gwt.client.ui.dd.horizontal_drop_location import \
HorizontalDropLocation
class DragDropRearrangeComponentsExample(VerticalLayout):
def __init__(self):
super(DragDropRearrangeComponentsExample, self).__init__()
layout = SortableLayout(True)
layout.setSizeUndefined()
layout.setHeight('100px')
# Use these styles to hide irrelevant drag hints
layout.addStyleName('no-vertical-drag-hints')
# layout.addStyleName("no-horizontal-drag-hints");
# layout.addStyleName("no-box-drag-hints");
for component in self.createComponents():
layout.addComponent(component)
self.addComponent(layout)
def createComponents(self):
components = list()
label = Label('This is a long text block that will wrap.')
label.setWidth('120px')
components.append(label)
image = Embedded('', ThemeResource('../runo/icons/64/document.png'))
components.append(image)
documentLayout = CssLayout()
documentLayout.setWidth('19px')
for _ in range(5):
e = Embedded(None, ThemeResource('../runo/icons/16/document.png'))
e.setHeight('16px')
e.setWidth('16px')
documentLayout.addComponent(e)
components.append(documentLayout)
buttonLayout = VerticalLayout()
button = Button('Button')
button.addListener(ButtonClickListener(self), IClickListener)
buttonLayout.addComponent(button)
buttonLayout.setComponentAlignment(button, Alignment.MIDDLE_CENTER)
components.append(buttonLayout)
return components
class ButtonClickListener(IClickListener):
def __init__(self, c):
self._c = c
def buttonClick(self, event):
self._c.getWindow().showNotification('Button clicked')
class SortableLayout(CustomComponent):
def __init__(self, horizontal):
super(SortableLayout, self).__init__()
self._horizontal = horizontal
if horizontal:
self._layout = HorizontalLayout()
else:
self._layout = VerticalLayout()
self._dropHandler = ReorderLayoutDropHandler(self._layout)
pane = DragAndDropWrapper(self._layout)
self.setCompositionRoot(pane)
def addComponent(self, component):
wrapper = WrappedComponent(component, self._dropHandler)
wrapper.setSizeUndefined()
if self._horizontal:
component.setHeight('100%')
wrapper.setHeight('100%')
else:
component.setWidth('100%')
wrapper.setWidth('100%')
self._layout.addComponent(wrapper)
class WrappedComponent(DragAndDropWrapper):
def __init__(self, content, dropHandler):
super(WrappedComponent, self).__init__(content)
self._dropHandler = dropHandler
self.setDragStartMode(DragStartMode.WRAPPER)
def getDropHandler(self):
return self._dropHandler
class ReorderLayoutDropHandler(IDropHandler):
def __init__(self, layout):
self._layout = layout
def getAcceptCriterion(self):
return Not(SourceIsTarget.get())
def drop(self, dropEvent):
transferable = dropEvent.getTransferable()
sourceComponent = transferable.getSourceComponent()
if isinstance(sourceComponent, WrappedComponent):
dropTargetData = dropEvent.getTargetDetails()
target = dropTargetData.getTarget()
# find the location where to move the dragged component
sourceWasAfterTarget = True
index = 0
componentIterator = self._layout.getComponentIterator()
nxt = None
while nxt != target:
try:
nxt = componentIterator.next()
if nxt != sourceComponent:
index += 1
else:
sourceWasAfterTarget = False
except StopIteration:
break
if (nxt is None) or (nxt != target):
# component not found - if dragging from another layout
return
# drop on top of target?
if (dropTargetData.getData('horizontalLocation')
== str(HorizontalDropLocation.CENTER)):
# drop before the target?
if sourceWasAfterTarget:
index -= 1
elif (dropTargetData.getData('horizontalLocation')
== str(HorizontalDropLocation.LEFT)):
index -= 1
if index < 0:
index = 0
# move component within the layout
self._layout.removeComponent(sourceComponent)
self._layout.addComponent(sourceComponent, index)
| apache-2.0 |
emccode/HeliosBurn | heliosburn/django/hbproject/api/views/testplan_rule.py | 1 | 5048 | from bson import ObjectId
import json
import logging
from bson.errors import InvalidId
from django.http import JsonResponse, HttpResponseNotFound, HttpResponseBadRequest, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from api.models import db_model
from api.models.auth import RequireLogin
from api.models import rule_model
logger = logging.getLogger(__name__)
@csrf_exempt
def rest(request, *pargs, **kwargs):
"""
Calls python function corresponding with HTTP METHOD name.
Calls with incomplete arguments will return HTTP 400
"""
if request.method == 'GET':
rest_function = get
elif request.method == 'POST':
rest_function = post
elif request.method == 'PUT':
rest_function = put
elif request.method == 'DELETE':
rest_function = delete
else:
return JsonResponse({"error": "HTTP METHOD UNKNOWN"})
try:
return rest_function(request, *pargs, **kwargs)
except TypeError:
return HttpResponseBadRequest("argument mismatch")
@RequireLogin()
def get(request, testplan_id, rule_id):
"""
Retrieve rule within testplan based on the testplan_id and rule_id.
"""
dbc = db_model.connect()
try:
testplan = dbc.testplan.find_one({"_id": ObjectId(testplan_id)}, {'_id': 0})
except InvalidId:
return HttpResponseNotFound()
if testplan is None:
return HttpResponseNotFound()
rule = filter(lambda r: r['id'] == rule_id, testplan['rules'])
if len(rule) == 0:
return HttpResponseBadRequest("rule not found within test plan")
else:
return JsonResponse(rule[0])
@RequireLogin()
def post(request, testplan_id):
"""
Create a new rule within a testplan based on testplan_id.
"""
if request.method != 'POST':
return HttpResponseBadRequest("only POST supported")
try:
new = json.loads(request.body)
except ValueError:
return HttpResponseBadRequest("invalid JSON")
except AssertionError:
return HttpResponseBadRequest("argument mismatch")
if 'id' in new: # Don't allow the user to shoot themselves in the foot providing dubious id
del new['id']
rule = rule_model.validate(new)
if rule is None:
return HttpResponseBadRequest("invalid rule")
dbc = db_model.connect()
try:
testplan = dbc.testplan.find_one({"_id": ObjectId(testplan_id)})
except InvalidId:
return HttpResponseNotFound("testplan '%s' not found" % testplan_id)
if testplan is None:
return HttpResponseNotFound("testplan '%s' not found" % testplan_id)
if 'rules' in testplan:
testplan['rules'].append(rule)
else:
testplan['rules'] = [rule]
dbc.testplan.save(testplan)
r = JsonResponse({"id": rule['id']}, status=200)
r['location'] = "/api/testplan/%s/rule/%s" % (testplan_id, rule['id'])
logger.info("rule '%s' within testplan '%s' created by '%s'" % (rule['id'], testplan_id, request.user['username']))
return r
@RequireLogin()
def put(request, testplan_id, rule_id):
"""
Update existing test plan based on testplan_id.
"""
dbc = db_model.connect()
try:
testplan = dbc.testplan.find_one({"_id": ObjectId(testplan_id)})
except InvalidId:
return HttpResponseNotFound("testplan '%s' not found" % testplan_id)
if testplan is None:
return HttpResponseNotFound("testplan '%s' not found" % testplan_id)
for ri in range(0, len(testplan['rules'])):
if testplan['rules'][ri]['id'] == rule_id:
rule = testplan['rules'][ri]
break
try:
new = json.loads(request.body)
except ValueError:
return HttpResponseBadRequest("invalid JSON")
except AssertionError:
return HttpResponseBadRequest("argument mismatch")
rule.update(new)
rule = rule_model.validate(rule)
if rule is None:
return HttpResponseBadRequest("invalid rule")
testplan['rules'][ri] = rule
dbc.testplan.save(testplan)
r = HttpResponse(status=200)
r['location'] = "/api/testplan/%s/rule/%s" % (testplan_id, rule['id'])
logger.info("rule '%s' within testplan '%s' updated by '%s'" % (rule['id'], testplan_id, request.user['username']))
return r
@RequireLogin()
def delete(request, testplan_id, rule_id):
"""
Delete test plan based on testplan_id.
"""
dbc = db_model.connect()
try:
testplan = dbc.testplan.find_one({"_id": ObjectId(testplan_id)})
except InvalidId:
return HttpResponseNotFound("testplan '%s' not found" % testplan_id)
if testplan is None:
return HttpResponseNotFound("testplan '%s' not found" % testplan_id)
for i in range(0, len(testplan['rules'])):
if testplan['rules'][i]['id'] == rule_id:
del testplan['rules'][i]
break
dbc.testplan.save(testplan)
logger.info("rule '%s' within testplan '%s' deleted by '%s'" % (rule_id, testplan_id, request.user['username']))
return HttpResponse()
| mit |
mark-ignacio/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/watchlist/amountchangedpattern_unittest.py | 124 | 3350 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Unit tests for amountchangedpattern.py.'''
import re
import unittest2 as unittest
from webkitpy.common.watchlist.amountchangedpattern import AmountChangedPattern
class AmountChangedPatternTest(unittest.TestCase):
# A quick note about the diff file structure.
# The first column indicated the old line number.
# The second column indicates the new line number.
# 0 in either column indicates it had no old or new line number.
_DIFF_FILE = ((0, 1, 'hi hi'),
(1, 0, 'bye hi'),
(2, 2, 'other hi'),
(3, 0, 'both'),
(0, 3, 'both'),
)
def run_amount_changed_pattern_match(self, pattern, index_for_zero_value):
return AmountChangedPattern(re.compile(pattern), index_for_zero_value).match(None, self._DIFF_FILE)
def test_added_lines(self):
self.assertTrue(self.run_amount_changed_pattern_match('hi', 0))
self.assertTrue(self.run_amount_changed_pattern_match('hi hi', 0))
self.assertFalse(self.run_amount_changed_pattern_match('other', 0))
self.assertFalse(self.run_amount_changed_pattern_match('both', 0))
self.assertFalse(self.run_amount_changed_pattern_match('bye', 0))
self.assertFalse(self.run_amount_changed_pattern_match('MatchesNothing', 0))
def test_removed_lines(self):
self.assertFalse(self.run_amount_changed_pattern_match('hi', 1))
self.assertFalse(self.run_amount_changed_pattern_match('hi hi', 1))
self.assertFalse(self.run_amount_changed_pattern_match('other', 1))
self.assertFalse(self.run_amount_changed_pattern_match('both', 1))
self.assertTrue(self.run_amount_changed_pattern_match('bye', 1))
self.assertFalse(self.run_amount_changed_pattern_match('MatchesNothing', 1))
| bsd-3-clause |
Alexsaphir/TP_EDP_Python | TP2_error.py | 1 | 1912 | # -*- coding: utf-8 -*-
from numpy import * # importation du module numpy
from numpy.linalg import * # importation du module numpy.linalg
from numpy.random import *
from matplotlib.pyplot import *
from mpl_toolkits.mplot3d import Axes3D
#Calcul l'erreur en faisant varier Ns
def Ud(x):
y = sin(2*pi*x)*sinh(2*pi)
return y
# Fonction définissant la solution exacte de l'équation
def solex(x, y):
z = sin(2*pi*x)*sinh(2*pi*y)
return z
def solver(Ns):
# Maillage
h = 1./(Ns + 1)
X = linspace(0,1,Ns+2)
Xh = X[1:Ns+1]
# Matrice du système linéaire
A = -1*(diag(ones(Ns*Ns-3),3) + diag(ones(Ns*Ns-3),-3))
B = 4*eye(Ns) -1*(diag(ones(Ns-1),1) + diag(ones(Ns-1),-1))
for i in arange(0,Ns):
A[Ns*i:Ns*i+Ns,Ns*i:Ns*i+Ns] = B
# Second membre
b = zeros(Ns*Ns)
b[Ns*(Ns-1):Ns*Ns] = Ud(Xh)
# Resolution du systeme lineaire
A_inv = linalg.inv(A)
Uh = solve(A_inv, b)
#Mise en forme de la matrice Zh
Zh = array( 0*Ud(X))
for i in arange (0, Ns, 1):
newrow = Uh[ i*(Ns):i*Ns+Ns]
newrow =concatenate([[0], newrow, [0]])
Zh = vstack([newrow, Zh])
Zh = vstack([Ud(X), Zh])
#Calcul du maillage
coordX, coordY= np.meshgrid(X, flip(X,0))
#Calcul de la solution exacte sur le maillage
U = solex(coordX,coordY)
#Calcul de l'erreur
Err = amax(absolute(U-Zh))
#fig = figure()
#ax = Axes3D(fig, azim = 30, elev = 30)
#ax.plot_surface(coordX, coordY, Zh, cmap = cm.jet)
#ax.plot_surface(coordX, coordY, U, cmap = cm.jet)
#fig.show()
return Err
def Err_Conv(N):
E=zeros(N-3)
for i in arange(3, N,1):
E[i-3]=solver(i)
plot(linspace(3,N-1,N-3),E,label='Erreur')
xlabel('Nb de points utilsés (log)')
ylabel('Erreur max mesurée')
title('Equation de Laplace 2D: Etude de la convergence')
xscale('log')
savefig('Picture/TP2/Erreur.png')
| lgpl-3.0 |
rancherio/validation-tests | tests/v2_validation/cattlevalidationtest/core/test_network_policy.py | 3 | 33957 | from common_fixtures import * # NOQA
test_network_policy = os.environ.get(
'TEST_NETWORK_POLICY', "False")
np_reason = \
'Intended to not execute this network policy test'
if_network_policy = pytest.mark.skipif(test_network_policy != "ALL",
reason=np_reason)
if_network_policy_none = pytest.mark.skipif(
test_network_policy != "NONE",
reason=np_reason)
if_network_policy_within_stack = pytest.mark.skipif(
test_network_policy != "WITHIN_STACK",
reason=np_reason)
if_network_policy_within_service = pytest.mark.skipif(
test_network_policy != "WITHIN_SERVICE",
reason=np_reason)
if_network_policy_within_linked = pytest.mark.skipif(
test_network_policy != "WITHIN_LINKED",
reason=np_reason)
if_network_policy_groupby = pytest.mark.skipif(
test_network_policy != "WITHIN_GROUPBY",
reason=np_reason)
NETWORKPOLICY_SUBDIR = \
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resources/networkpolicy')
policy_within_stack = {"within": "stack", "action": "allow"}
policy_groupby = {"between": {"groupBy": "com.rancher.stack.location"},
"action": "allow"}
policy_within_service = {"within": "service", "action": "allow"}
policy_within_linked = {"within": "linked", "action": "allow"}
shared_environment = {"env": []}
@pytest.fixture(scope='session', autouse=True)
def create_env_for_network_policy(request, client, socat_containers):
assert check_for_network_policy_manager(client)
env2 = create_stack_with_service(client, "test2", NETWORKPOLICY_SUBDIR,
"stack2.yml", "stack2-rc.yml")
assert len(env2.services()) == 6
env1 = create_stack_with_service(client, "test1", NETWORKPOLICY_SUBDIR,
"stack1.yml", "stack1-rc.yml")
assert len(env1.services()) == 11
create_standalone_containers(client)
time.sleep(sleep_interval)
populate_env_details(client)
def fin():
to_delete = [env1, env2]
delete_all(client, to_delete)
delete_all(client, shared_environment["containers"])
delete_all(client, shared_environment["containers_with_label"])
request.addfinalizer(fin)
def populate_env_details(client):
env = client.list_stack(name="test1")
assert len(env) == 1
env1 = env[0]
env = client.list_stack(name="test2")
assert len(env) == 1
env2 = env[0]
shared_environment["env"].append(env1)
shared_environment["env"].append(env2)
shared_environment["stack1_test1allow"] = \
get_service_by_name(client, env1, "test1allow")
shared_environment["stack1_test2allow"] = \
get_service_by_name(client, env1, "test2allow")
shared_environment["stack1_test3deny"] = \
get_service_by_name(client, env1, "test3deny")
shared_environment["stack1_test4deny"] = \
get_service_by_name(client, env1, "test4deny")
shared_environment["stack1_lbwithinstack"] = \
get_service_by_name(client, env1, "lbwithininstack")
shared_environment["stack1_lbcrossstack"] = \
get_service_by_name(client, env1, "lbcrossstack")
shared_environment["stack1_servicewithlinks"] = \
get_service_by_name(client, env1, "servicewithlinks")
shared_environment["stack1_servicecrosslinks"] = \
get_service_by_name(client, env1, "servicecrosslinks")
shared_environment["stack1_servicelinktosidekick"] = \
get_service_by_name(client, env1, "servicelinktosidekick")
shared_environment["stack1_linktowebservice"] = \
get_service_by_name(client, env1, "linktowebservice")
shared_environment["stack2_test1allow"] = \
get_service_by_name(client, env2, "test1allow")
shared_environment["stack2_test2allow"] = \
get_service_by_name(client, env2, "test2allow")
shared_environment["stack2_test3deny"] = \
get_service_by_name(client, env2, "test3deny")
shared_environment["stack2_test4deny"] = \
get_service_by_name(client, env2, "test4deny")
service_with_sidekick = {}
service_with_sidekick["p_con1"] = \
get_container_by_name(client, "test2-testp1-1")
service_with_sidekick["p_con2"] = \
get_container_by_name(client, "test2-testp1-2")
service_with_sidekick["s1_con1"] = \
get_container_by_name(client, "test2-testp1-tests1-1")
service_with_sidekick["s1_con2"] = \
get_container_by_name(client, "test2-testp1-tests1-2")
service_with_sidekick["s2_con1"] = \
get_container_by_name(client, "test2-testp1-tests2-1")
service_with_sidekick["s2_con2"] = \
get_container_by_name(client, "test2-testp1-tests2-2")
shared_environment["stack2_sidekick"] = service_with_sidekick
time.sleep(sleep_interval)
def validate_default_network_action_deny_networkpolicy_allow_within_stacks(
client):
# Validate that standalone containers are not able reach any
# service containers
for container in shared_environment["containers"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that there connectivity between containers of different
# services within the same stack is allowed
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test2allow"],
shared_environment["stack1_test3deny"],
shared_environment["stack1_test4deny"]],
connection="allow")
# Validate that there is no connectivity between containers of different
# services across stacks
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"],
shared_environment["stack2_test3deny"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that LB is able reach all targets which are in the same stack as
# Lb
validate_lb_service(client,
shared_environment["stack1_lbwithinstack"],
"9091",
[shared_environment["stack1_test1allow"]])
# Validate that LB is able reach all targets which are in the same stack as
# Lb
validate_linked_service(client,
shared_environment["stack1_servicewithlinks"],
[shared_environment["stack1_test1allow"]],
"99")
# Cross stacks access for links should be denied
validate_linked_service(client,
shared_environment["stack1_servicecrosslinks"],
[shared_environment["stack2_test2allow"]],
"98", linkName="test2allow.test2",
not_reachable=True)
# Cross stacks access for LBs should be denied
validate_lb_service_for_no_access(
client, shared_environment["stack1_lbcrossstack"], "9090")
def validate_default_network_action_deny_networkpolicy_none(
client):
# Validate that standalone containers are not able reach any
# service containers
for container in shared_environment["containers"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that there is no connectivity between containers of different
# services across stacks and within stacks
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test2allow"],
shared_environment["stack1_test3deny"],
shared_environment["stack1_test4deny"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"],
shared_environment["stack2_test3deny"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that Lb service is not able to reach targets within the
# same stack and cross stacks
validate_lb_service_for_no_access(
client, shared_environment["stack1_lbwithinstack"], "9091")
validate_lb_service_for_no_access(
client, shared_environment["stack1_lbcrossstack"], "9090")
# Validate that connectivity between linked service is denied within the
# same stack and cross stacks
validate_linked_service(client,
shared_environment["stack1_servicewithlinks"],
[shared_environment["stack1_test1allow"]],
"99", not_reachable=True)
validate_linked_service(client,
shared_environment["stack1_servicecrosslinks"],
[shared_environment["stack2_test2allow"]],
"98", linkName="test2allow.test2",
not_reachable=True)
def validate_default_network_action_deny_networkpolicy_groupby(
client):
# Validate that containers that do not have the labels defined
# in group by policy are not allowed to communicate with other
# service containers
for container in shared_environment["containers"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that stand alone containers that have the labels defined
# in group by policy are allowed to communicate with service containers
# having the same labels
for container in shared_environment["containers_with_label"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"]],
connection="allow")
# Validate that service containers that have matching labels defined
# in group by policy are allowed to communicate with each other
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"]],
connection="allow")
# Validate that all service containers within the same service that has
# group by labels are able to communicate with each other
validate_connectivity_between_services(
client, shared_environment["stack1_test3deny"],
[shared_environment["stack2_test3deny"]],
connection="allow")
# Validate that service containers that do not have matching labels defined
# in group by policy are not allowed to communicate with each other
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test3deny"],
shared_environment["stack1_test4deny"],
shared_environment["stack2_test3deny"],
shared_environment["stack2_test4deny"]],
connection="deny")
validate_connectivity_between_services(
client, shared_environment["stack1_test3deny"],
[shared_environment["stack1_test1allow"],
shared_environment["stack1_test2allow"],
shared_environment["stack1_test4deny"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
def validate_default_network_action_deny_networkpolicy_within_service(
client):
# Validate that standalone containers are not able reach any
# service containers
for container in shared_environment["containers"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test1allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that containers belonging to the same service are able to
# communicate with each other
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test1allow"]],
connection="allow")
# Validate that containers belonging to the different services within
# the same stack or cross stack are not able to communicate with each other
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"]],
connection="deny")
# Validate that Lb services has no access to targets with in
# same stacks or cross stacks
validate_lb_service_for_no_access(
client, shared_environment["stack1_lbcrossstack"], "9090")
validate_lb_service_for_no_access(
client, shared_environment["stack1_lbwithinstack"], "9091")
# Validate that connectivity between linked service is denied within the
# same stack and cross stacks
validate_linked_service(
client, shared_environment["stack1_servicewithlinks"],
[shared_environment["stack1_test1allow"]], "99", not_reachable=True)
validate_linked_service(client,
shared_environment["stack1_servicecrosslinks"],
[shared_environment["stack2_test2allow"]],
"98", linkName="test2allow.test2",
not_reachable=True)
def validate_default_network_action_deny_networkpolicy_within_service_for_sk(
client):
# Validate that containers of primary services are able to connect with
# other containers in the same service and containers in other sidekick
# services
validate_connectivity_between_container_list(
client,
shared_environment["stack2_sidekick"]["p_con1"],
[shared_environment["stack2_sidekick"]["p_con2"],
shared_environment["stack2_sidekick"]["s1_con1"],
shared_environment["stack2_sidekick"]["s1_con2"],
shared_environment["stack2_sidekick"]["s2_con1"],
shared_environment["stack2_sidekick"]["s2_con2"]],
"allow")
# Validate that containers of sidekick services are able to connect with
# other containers in the same service and containers in other sidekick
# services and primary service
validate_connectivity_between_container_list(
client,
shared_environment["stack2_sidekick"]["s1_con1"],
[shared_environment["stack2_sidekick"]["p_con1"],
shared_environment["stack2_sidekick"]["p_con2"],
shared_environment["stack2_sidekick"]["s1_con2"],
shared_environment["stack2_sidekick"]["s2_con1"],
shared_environment["stack2_sidekick"]["s2_con2"]],
"allow")
validate_connectivity_between_container_list(
client,
shared_environment["stack2_sidekick"]["s2_con1"],
[shared_environment["stack2_sidekick"]["p_con1"],
shared_environment["stack2_sidekick"]["p_con2"],
shared_environment["stack2_sidekick"]["s1_con1"],
shared_environment["stack2_sidekick"]["s1_con1"],
shared_environment["stack2_sidekick"]["s2_con2"]],
"allow")
def validate_default_network_action_deny_networkpolicy_within_linked(
client):
# Validate that standalone containers are not able reach any
# service containers
for container in shared_environment["containers"]:
validate_connectivity_between_con_to_services(
client, container,
[shared_environment["stack1_test2allow"],
shared_environment["stack2_test4deny"]],
connection="deny")
# Validate that containers belonging to a service are not able to
# communicate with other containers in the same service or different
# service
validate_connectivity_between_services(
client, shared_environment["stack1_test1allow"],
[shared_environment["stack1_test1allow"],
shared_environment["stack1_test2allow"],
shared_environment["stack2_test1allow"],
shared_environment["stack2_test2allow"]],
connection="deny")
# Validate that Lb services has access to targets with in
# same stacks
validate_lb_service(client,
shared_environment["stack1_lbwithinstack"],
"9091",
[shared_environment["stack1_test1allow"]])
# Validate that Lb services has access to targets cross stacks
validate_lb_service(client,
shared_environment["stack1_lbcrossstack"],
"9090",
[shared_environment["stack2_test1allow"]])
service_with_links = shared_environment["stack1_servicewithlinks"]
linked_service = [shared_environment["stack1_test1allow"]]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, linked_service, "99")
service_with_links = shared_environment["stack1_servicecrosslinks"]
linked_service = [shared_environment["stack2_test1allow"]]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, linked_service, "98", "mylink")
def validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, linked_service, port, linkName=None):
# Validate that all containers of a service with link has access to
# the containers of the service that it is linked to
validate_connectivity_between_services(
client,
service_with_links,
linked_service,
connection="allow")
# Validate that all containers of a service that is linked by other service
# has no access to the containers of the service that it is linked by
# (s1 -> s2) containers of s2 have no access to s1
for l_service in linked_service:
validate_connectivity_between_services(
client,
l_service,
[service_with_links],
connection="deny")
# Validate that containers are reachable using their link name
validate_linked_service(client,
service_with_links,
linked_service,
port,
linkName=linkName)
def validate_default_network_action_deny_networkpolicy_within_linked_for_sk(
client):
containers = get_service_container_list(
client, shared_environment["stack1_servicelinktosidekick"])
# Validate connectivity between containers of linked services to linked
# service with sidekick
for con in containers:
validate_connectivity_between_container_list(
client,
con,
shared_environment["stack2_sidekick"].values(),
"allow")
for linked_con in shared_environment["stack2_sidekick"].values():
for con in containers:
validate_connectivity_between_containers(
client, linked_con, con, "deny")
def validate_dna_deny_np_within_linked_for_servicealias(
client):
# Validate connectivity between containers of linked services to services
# linked to webservice
validate_connectivity_between_services(
client, shared_environment["stack1_linktowebservice"],
[shared_environment["stack1_test4deny"],
shared_environment["stack2_test3deny"]],
connection="allow")
validate_connectivity_between_services(
client, shared_environment["stack1_test4deny"],
[shared_environment["stack1_linktowebservice"]],
connection="deny")
validate_connectivity_between_services(
client, shared_environment["stack2_tes34deny"],
[shared_environment["stack1_linktowebservice"]],
connection="deny")
@if_network_policy
def test_default_network_action_deny_networkpolicy_allow_within_stacks(
client):
set_network_policy(client, "deny", policy_within_stack)
validate_default_network_action_deny_networkpolicy_allow_within_stacks(
client)
@if_network_policy_within_stack
def test_dna_deny_np_allow_within_stacks_stop_service(
client, socat_containers):
set_network_policy(client, "deny", policy_within_stack)
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_allow_within_stacks(
client)
@if_network_policy_within_stack
def test_dna_deny_np_allow_within_stacks_delete_service(
client, socat_containers):
set_network_policy(client, "deny", policy_within_stack)
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_allow_within_stacks(
client)
@if_network_policy_within_stack
def test_dna_deny_np_allow_within_stacks_restart_service(
client, socat_containers):
set_network_policy(client, "deny", policy_within_stack)
restart_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_allow_within_stacks(
client)
@if_network_policy
def test_default_network_action_deny_networkpolicy_none(client):
set_network_policy(client, "deny")
validate_default_network_action_deny_networkpolicy_none(
client)
@if_network_policy_none
def test_dna_deny_np_none_stop_service(
client, socat_containers):
set_network_policy(client, "deny")
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_none(
client)
@if_network_policy_none
def test_dna_deny_np_none_delete_service(
client, socat_containers):
set_network_policy(client, "deny")
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_none(
client)
@if_network_policy_none
def test_dna_deny_np_none_restart_service(
client, socat_containers):
set_network_policy(client, "deny")
restart_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_none(
client)
@if_network_policy
def test_default_network_action_deny_networkpolicy_groupby(
client):
set_network_policy(client, "deny", policy_groupby)
validate_default_network_action_deny_networkpolicy_groupby(
client)
@if_network_policy_groupby
def test_dna_deny_np_groupby_stop_service(
client, socat_containers):
set_network_policy(client, "deny", policy_groupby)
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_groupby(
client)
@if_network_policy_groupby
def test_dna_deny_np_groupby_delete_service(
client, socat_containers):
set_network_policy(client, "deny", policy_groupby)
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_groupby(
client)
@if_network_policy_groupby
def test_dna_deny_np_groupby_restart_service(
client, socat_containers):
set_network_policy(client, "deny", policy_groupby)
restart_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
validate_default_network_action_deny_networkpolicy_groupby(
client)
@if_network_policy
def test_default_network_action_deny_networkpolicy_allow_within_service(
client):
set_network_policy(client, "deny", policy_within_service)
validate_default_network_action_deny_networkpolicy_within_service(
client)
@if_network_policy_within_service
def test_dna_deny_np_allow_within_service_delete_service(
client):
set_network_policy(client, "deny", policy_within_service)
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_lbcrossstack"], [1])
delete_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_lbwithinstack"], [1])
delete_service_instances(
client, shared_environment["env"][0],
shared_environment["stack1_servicewithlinks"], [1])
validate_default_network_action_deny_networkpolicy_within_service(
client)
@if_network_policy_within_service
def test_dna_deny_np_allow_within_service_scale_service(
client):
set_network_policy(client, "deny", policy_within_service)
scale_service(shared_environment["stack1_test1allow"], client, 3)
scale_service(shared_environment["stack1_lbcrossstack"], client, 3)
scale_service(shared_environment["stack1_lbwithinstack"], client, 3)
scale_service(shared_environment["stack1_servicewithlinks"], client, 3)
populate_env_details(client)
validate_default_network_action_deny_networkpolicy_within_service(
client)
scale_service(shared_environment["stack1_test1allow"], client, 2)
scale_service(shared_environment["stack1_lbcrossstack"], client, 2)
scale_service(shared_environment["stack1_lbwithinstack"], client, 2)
scale_service(shared_environment["stack1_servicewithlinks"], client, 2)
@if_network_policy_within_service
def test_dna_deny_np_allow_within_service_stop_service(
client):
set_network_policy(client, "deny", policy_within_service)
validate_default_network_action_deny_networkpolicy_within_service(
client)
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_test1allow"], [1])
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_lbcrossstack"], [1])
stop_service_instances(client, shared_environment["env"][0],
shared_environment["stack1_lbwithinstack"], [1])
stop_service_instances(
client, shared_environment["env"][0],
shared_environment["stack1_servicewithlinks"], [1])
validate_default_network_action_deny_networkpolicy_within_service(
client)
@if_network_policy
def test_dna_deny_np_allow_within_service_check_sidekicks(
client):
set_network_policy(client, "deny", policy_within_service)
validate_default_network_action_deny_networkpolicy_within_service_for_sk(
client)
@if_network_policy
def test_default_network_action_deny_networkpolicy_allow_within_linked(
client):
set_network_policy(client, "deny", policy_within_linked)
validate_default_network_action_deny_networkpolicy_within_linked(
client)
@if_network_policy
def test_dna_deny_np_allow_within_linked_for_sk(
client):
set_network_policy(client, "deny", policy_within_linked)
validate_default_network_action_deny_networkpolicy_within_linked_for_sk(
client)
@if_network_policy
def test_dna_deny_np_allow_within_linked_for_sa(
client):
set_network_policy(client, "deny", policy_within_linked)
validate_dna_deny_np_within_linked_for_servicealias(
client)
@if_network_policy_within_linked
def test_dna_deny_np_allow_within_linked_after_scaleup(
client):
set_network_policy(client, "deny", policy_within_linked)
service_with_links = shared_environment["stack1_servicewithlinks"]
linked_service = shared_environment["stack1_test1allow"]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, [linked_service], "99")
scale_service(linked_service, client, 3)
shared_environment["stack1_test1allow"] = \
get_service_by_name(client,
shared_environment["env"][0],
"test1allow")
linked_service = shared_environment["stack1_test1allow"]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, [linked_service], "99")
scale_service(linked_service, client, 2)
shared_environment["stack1_test1allow"] = \
get_service_by_name(client,
shared_environment["env"][0],
"test1allow")
linked_service = shared_environment["stack1_test1allow"]
scale_service(service_with_links, client, 3)
shared_environment["stack1_servicewithlinks"] = \
get_service_by_name(client,
shared_environment["env"][0],
"servicewithlinks")
service_with_links = shared_environment["stack1_servicewithlinks"]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, [linked_service], "99")
scale_service(service_with_links, client, 2)
shared_environment["stack1_servicewithlinks"] = \
get_service_by_name(client,
shared_environment["env"][0],
"servicewithlinks")
@if_network_policy_within_linked
def test_dna_deny_np_allow_within_linked_after_adding_removing_links(
client):
set_network_policy(client, "deny", policy_within_linked)
service_with_links = shared_environment["stack1_servicewithlinks"]
linked_service = [shared_environment["stack1_test1allow"]]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, linked_service, "99")
# Add another service link
service_with_links.setservicelinks(
serviceLinks=[
{"serviceId": shared_environment["stack1_test1allow"].id},
{"serviceId": shared_environment["stack1_test2allow"].id}])
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links,
[shared_environment["stack1_test1allow"]], "99")
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links,
[shared_environment["stack1_test2allow"]], "99")
# Remove existing service link
service_with_links.setservicelinks(
serviceLinks=[
{"serviceId": shared_environment["stack1_test1allow"].id}])
linked_service = [shared_environment["stack1_test1allow"]]
validate_dna_deny_np_within_linked_for_linked_service(
client, service_with_links, linked_service, "99")
validate_connectivity_between_services(
client, service_with_links,
[shared_environment["stack1_test2allow"]],
connection="deny")
validate_connectivity_between_services(
client, shared_environment["stack1_test2allow"],
[service_with_links],
connection="deny")
def scale_service(service, client, final_scale):
service = client.update(service, name=service.name, scale=final_scale)
service = client.wait_success(service, 300)
assert service.state == "active"
assert service.scale == final_scale
check_container_in_service(client, service)
def set_network_policy(client, defaultPolicyAction="allow", policy=None):
networks = client.list_network(name='ipsec')
assert len(networks) == 1
network = networks[0]
network = client.update(
network, defaultPolicyAction=defaultPolicyAction, policy=policy)
network = wait_success(client, network)
assert network.defaultPolicyAction == defaultPolicyAction
populate_env_details(client)
def check_for_network_policy_manager(client):
np_manager = False
env = client.list_stack(name="network-policy-manager")
if len(env) == 1:
service = get_service_by_name(client, env[0],
"network-policy-manager")
if service.state == "active":
np_manager = True
return np_manager
def create_standalone_containers(client):
hosts = client.list_host(kind='docker', removed_null=True)
cons = []
cons_with_label = []
for host in hosts:
con_name = random_str()
con = client.create_container(
name=con_name,
ports=['3001:22'],
imageUuid=HEALTH_CHECK_IMAGE_UUID,
networkMode=MANAGED_NETWORK,
requestedHostId=host.id)
con = client.wait_success(con)
assert con.state == "running"
cons.append(con)
shared_environment["containers"] = cons
for host in hosts:
con_name = random_str()
con = client.create_container(
name=con_name,
ports=['3002:22'],
imageUuid=HEALTH_CHECK_IMAGE_UUID,
networkMode=MANAGED_NETWORK,
requestedHostId=host.id,
labels={"com.rancher.stack.location": "east"})
con = client.wait_success(con)
assert con.state == "running"
cons_with_label.append(con)
shared_environment["containers_with_label"] = cons_with_label
| apache-2.0 |
equitania/myodoo-addons-v10 | eq_report_pattern/models/account_invoice.py | 1 | 4656 | # -*- coding: utf-8 -*-
##############################################################################
#
# Odoo Addon, Open Source Management Solution
# Copyright (C) 2014-now Equitania Software GmbH(<http://www.equitania.de>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, _
# Erweitert account.invoice
class eq_account_invoice(models.Model):
_inherit = 'account.invoice'
document_template_id = fields.Many2one(comodel_name='eq.document.template', string='Document Template')#TODO: readonly falls Rechnung nicht mehr editierbar?
comment = fields.Html('Additional Information')
# @api.onchange('document_template_id')
# def onchange_document_template_id(self):
# selected_template = self.document_template_id
# # Falls partner_id und partner_id.lang und document_template_id vorhanden sind, wird das entsprechende Template mit der Sprache als Parameter ausgewaehlt
# if (self.partner_id and self.partner_id.lang and self.document_template_id):
# selected_template = self.document_template_id.with_context(lang=self.partner_id.lang)
#
# # Falls ein Template ausgewaehlt wurde, werden header und footer hinzugefuegt
# if (selected_template):
# self.eq_head_text = selected_template.eq_header
# self.comment = selected_template.eq_footer
@api.model
def change_template_id(self, quote_template, partner=False, fiscal_position_id=False, order_lines=[]):
if not quote_template:
return True
lines = [] # order_lines
sale_order_line_obj = self.env['sale.order.line']
for line in quote_template.eq_quote_line:
res = sale_order_line_obj.product_id_change()
# res = sale_order_line_obj.product_id_change(False, line.product_id.id, line.product_uom_qty,
# line.product_uom_id.id, False, False, line.name,
# partner, False, False, False, False,
# fiscal_position_id, False)
data = res.get('value', {})
if 'tax_id' in data:
data['tax_id'] = [(6, 0, data['tax_id'])]
data.update({
'name': line.name,
'price_unit': line.price_unit,
'discount': line.discount,
'product_uom_qty': line.product_uom_qty,
'product_id': line.product_id.id,
'product_uom': line.product_uom_id.id,
'website_description': line.website_description,
'state': 'draft',
})
lines.append((0, 0, data))
return lines
@api.onchange('document_template_id')
def onchange_document_template_id(self):
"""
Übernahme der Texte beim Wechsel des Templates
:return:
"""
selected_template = self.document_template_id
if (self.partner_id and self.partner_id.lang and self.document_template_id):
selected_template = self.document_template_id.with_context(lang=self.partner_id.lang)
if (selected_template):
self.eq_head_text = selected_template.eq_header
self.comment = selected_template.eq_footer
# partner_id = False
# if (self.partner_id):
# partner_id = self.partner_id.id
# self.eq_head_text = selected_template.eq_header
# self.comment = selected_template.eq_footer
"""
def _prepare_invoice(self, cr, uid, order, line_ids, context=None):
invoice_vals = super(eq_report_extension_purchase_order, self)._prepare_invoice(cr, uid, order, line_ids, context)
invoice_vals['document_template_id'] = order.document_template_id.id
return invoice_vals
""" | agpl-3.0 |
aoakeson/home-assistant | homeassistant/components/device_tracker/unifi.py | 4 | 2555 | """
Support for Unifi WAP controllers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.unifi/
"""
import logging
import urllib
from homeassistant.components.device_tracker import DOMAIN
from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD
from homeassistant.helpers import validate_config
# Unifi package doesn't list urllib3 as a requirement
REQUIREMENTS = ['urllib3', 'unifi==1.2.4']
_LOGGER = logging.getLogger(__name__)
CONF_PORT = 'port'
def get_scanner(hass, config):
"""Setup Unifi device_tracker."""
from unifi.controller import Controller
if not validate_config(config, {DOMAIN: [CONF_USERNAME,
CONF_PASSWORD]},
_LOGGER):
_LOGGER.error('Invalid configuration')
return False
this_config = config[DOMAIN]
host = this_config.get(CONF_HOST, 'localhost')
username = this_config.get(CONF_USERNAME)
password = this_config.get(CONF_PASSWORD)
try:
port = int(this_config.get(CONF_PORT, 8443))
except ValueError:
_LOGGER.error('Invalid port (must be numeric like 8443)')
return False
try:
ctrl = Controller(host, username, password, port, 'v4')
except urllib.error.HTTPError as ex:
_LOGGER.error('Failed to connect to unifi: %s', ex)
return False
return UnifiScanner(ctrl)
class UnifiScanner(object):
"""Provide device_tracker support from Unifi WAP client data."""
def __init__(self, controller):
"""Initialize the scanner."""
self._controller = controller
self._update()
def _update(self):
"""Get the clients from the device."""
try:
clients = self._controller.get_clients()
except urllib.error.HTTPError as ex:
_LOGGER.error('Failed to scan clients: %s', ex)
clients = []
self._clients = {client['mac']: client for client in clients}
def scan_devices(self):
"""Scan for devices."""
self._update()
return self._clients.keys()
def get_device_name(self, mac):
"""Return the name (if known) of the device.
If a name has been set in Unifi, then return that, else
return the hostname if it has been detected.
"""
client = self._clients.get(mac, {})
name = client.get('name') or client.get('hostname')
_LOGGER.debug('Device %s name %s', mac, name)
return name
| mit |
android-ia/hardware_intel_parameter-framework | tools/xmlGenerator/domainGenerator.py | 6 | 12409 | #! /usr/bin/python
#
# Copyright (c) 2011-2014, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import PyPfw
import EddParser
from PfwBaseTranslator import PfwBaseTranslator, PfwException
import hostConfig
import argparse
import re
import sys
import tempfile
import os
import logging
def wrap_pfw_error_semantic(func):
def wrapped(*args, **kwargs):
ok, error = func(*args, **kwargs)
if not ok:
raise PfwException(error)
return wrapped
class PfwTranslator(PfwBaseTranslator):
"""Generates calls to the Pfw's python bindings"""
def __init__(self, pfw_instance, error_handler):
super(PfwTranslator, self).__init__()
self._pfw = pfw_instance
self._error_handler = error_handler
def _handleException(self, ex):
if isinstance(ex, PfwException):
# catch and handle translation errors...
self._error_handler(ex, self._getContext())
else:
# ...but let any other error fall through
raise ex
@wrap_pfw_error_semantic
def _doCreateDomain(self, name):
return self._pfw.createDomain(name)
@wrap_pfw_error_semantic
def _doSetSequenceAware(self):
return self._pfw.setSequenceAwareness(self._ctx_domain, True)
@wrap_pfw_error_semantic
def _doAddElement(self, path):
return self._pfw.addConfigurableElementToDomain(self._ctx_domain, path)
@wrap_pfw_error_semantic
def _doCreateConfiguration(self, name):
return self._pfw.createConfiguration(self._ctx_domain, name)
@wrap_pfw_error_semantic
def _doSetElementSequence(self, paths):
return self._pfw.setElementSequence(self._ctx_domain, self._ctx_configuration, paths)
@wrap_pfw_error_semantic
def _doSetRule(self, rule):
return self._pfw.setApplicationRule(self._ctx_domain, self._ctx_configuration, rule)
@wrap_pfw_error_semantic
def _doSetParameter(self, path, value):
ok, _, error = self._pfw.accessConfigurationValue(
self._ctx_domain, self._ctx_configuration, path, value, True)
return ok, error
class PfwTranslationErrorHandler:
def __init__(self):
self._errors = []
self._hasFailed = False
def __call__(self, error, context):
sys.stderr.write("Error in context {}:\n\t{}\n".format(context, error))
self._hasFailed = True
def hasFailed(self):
return self._hasFailed
class PfwLogger(PyPfw.ILogger):
def __init__(self):
super(PfwLogger, self).__init__()
self.__logger = logging.root.getChild("parameter-framework")
def log(self, is_warning, message):
log_func = self.__logger.warning if is_warning else self.__logger.info
log_func(message)
# If this file is directly executed
if __name__ == "__main__":
logging.root.setLevel(logging.INFO)
argparser = argparse.ArgumentParser(description="Parameter-Framework XML \
Settings file generator")
argparser.add_argument('--toplevel-config',
help="Top-level parameter-framework configuration file. Mandatory.",
metavar="TOPLEVEL_CONFIG_FILE",
required=True)
argparser.add_argument('--criteria',
help="Criteria file, in '<type> <name> : <value> <value...>' \
format. Mandatory.",
metavar="CRITERIA_FILE",
type=argparse.FileType('r'),
required=True)
argparser.add_argument('--initial-settings',
help="Initial XML settings file (containing a \
<ConfigurableDomains> tag",
nargs='?',
metavar="XML_SETTINGS_FILE")
argparser.add_argument('--add-domains',
help="List of single domain files (each containing a single \
<ConfigurableDomain> tag",
metavar="XML_DOMAIN_FILE",
nargs='*',
dest='xml_domain_files',
default=[])
argparser.add_argument('--add-edds',
help="List of files in EDD syntax (aka \".pfw\" files)",
metavar="EDD_FILE",
type=argparse.FileType('r'),
nargs='*',
default=[],
dest='edd_files')
argparser.add_argument('--schemas-dir',
help="Directory of parameter-framework XML Schemas for generation \
validation",
default=None)
argparser.add_argument('--target-schemas-dir',
help="Directory of parameter-framework XML Schemas on target \
machine (may be different than generating machine). \
Defaults to \"Schemas\"",
default="Schemas")
argparser.add_argument('--validate',
help="Validate the settings against XML schemas",
action='store_true')
argparser.add_argument('--verbose',
action='store_true')
args = argparser.parse_args()
#
# Criteria file
#
# This file define one criteria per line; they should respect this format:
#
# <type> <name> : <values>
#
# Where <type> is 'InclusiveCriterion' or 'ExclusiveCriterion';
# <name> is any string w/o whitespace
# <values> is a list of whitespace-separated values, each of which is any
# string w/o a whitespace
criteria_pattern = re.compile(
r"^(?P<type>(?:Inclusive|Exclusive)Criterion)\s*" \
r"(?P<name>\S+)\s*:\s*" \
r"(?P<values>.*)$")
criterion_inclusiveness_table = {
'InclusiveCriterion' : True,
'ExclusiveCriterion' : False}
all_criteria = []
# Parse the criteria file
for line_number, line in enumerate(args.criteria, 1):
match = criteria_pattern.match(line)
if not match:
raise ValueError("The following line is invalid: {}:{}\n{}".format(
args.criteria.name, line_number, line))
criterion_name = match.groupdict()['name']
criterion_type = match.groupdict()['type']
criterion_values = re.split("\s*", match.groupdict()['values'])
criterion_inclusiveness = criterion_inclusiveness_table[criterion_type]
all_criteria.append({
"name" : criterion_name,
"inclusive" : criterion_inclusiveness,
"values" : criterion_values})
#
# EDD files (aka ".pfw" files)
#
parsed_edds = []
for edd_file in args.edd_files:
try:
root = parser = EddParser.Parser().parse(edd_file, args.verbose)
except EddParser.MySyntaxError as ex:
logging.critical(str(ex))
logging.info("EXIT ON FAILURE")
exit(2)
try:
root.propagate()
except EddParser.MyPropagationError, ex :
logging.critical(str(ex))
logging.info("EXIT ON FAILURE")
exit(1)
parsed_edds.append((edd_file.name, root))
# We need to modify the toplevel configuration file to account for differences
# between development setup and target (installation) setup, in particular, the
# TuningMode must be enforced, regardless of what will be allowed on the target
with tempfile.NamedTemporaryFile(mode='w') as fake_toplevel_config:
install_path = os.path.dirname(os.path.realpath(args.toplevel_config))
hostConfig.configure(
infile=args.toplevel_config,
outfile=fake_toplevel_config,
structPath=install_path)
fake_toplevel_config.flush()
# Create a new Pfw instance
pfw = PyPfw.ParameterFramework(fake_toplevel_config.name)
# create and inject all the criteria
logging.info("Creating all criteria")
for criterion in all_criteria:
criterion_type = pfw.createSelectionCriterionType(criterion['inclusive'])
for numerical, literal in enumerate(criterion['values']):
if criterion['inclusive']:
# inclusive criteria are "bitfields"
numerical = 1 << numerical
ok = criterion_type.addValuePair(numerical, literal)
if not ok:
logging.critical("valuepair {}/{} rejected for {}".format(
numerical, literal, criterion['name']))
exit(1)
# we don't need the reference to the created criterion type; ignore the
# return value
pfw.createSelectionCriterion(criterion['name'], criterion_type)
# Set failure conditions
pfw.setFailureOnMissingSubsystem(False)
pfw.setFailureOnFailedSettingsLoad(False)
if args.validate:
pfw.setValidateSchemasOnStart(True)
if args.schemas_dir is not None:
schemas_dir = args.schemas_dir
else:
schemas_dir = os.path.join(install_path, "Schemas")
pfw.setSchemaFolderLocation(schemas_dir)
logger = PfwLogger()
pfw.setLogger(logger)
# Disable the remote interface because we don't need it and it might
# get in the way (e.g. the port is already in use)
pfw.setForceNoRemoteInterface(True)
# Finally, start the Pfw
ok, error = pfw.start()
if not ok:
logging.critical("Error while starting the pfw: {}".format(error))
exit(1)
ok, error = pfw.setTuningMode(True)
if not ok:
logging.critical(error)
exit(1)
# Import initial settings file
if args.initial_settings:
initial_settings = os.path.realpath(args.initial_settings)
logging.info(
"Importing initial settings file {}".format(initial_settings))
ok, error = pfw.importDomainsXml(initial_settings, True, True)
if not ok:
logging.critical(error)
exit(1)
# Import each standalone domain files
for domain_file in args.xml_domain_files:
logging.info("Importing single domain file {}".format(domain_file))
ok, error = pfw.importSingleDomainXml(os.path.realpath(domain_file), False)
if not ok:
logging.critical(error)
exit(1)
# Parse and inject each EDD file
error_handler = PfwTranslationErrorHandler()
translator = PfwTranslator(pfw, error_handler)
for filename, parsed_edd in parsed_edds:
logging.info("Translating and injecting EDD file {}".format(filename))
parsed_edd.translate(translator)
if error_handler.hasFailed():
logging.error("Error while importing parsed EDD files.\n")
exit(1)
# dirty hack: we change the schema location (right before exporting the
# domains) to their location on the target (which may be different than on
# the machine that is generating the domains)
pfw.setSchemaFolderLocation(args.target_schemas_dir)
# Export the resulting settings to the standard output
ok, domains, error = pfw.exportDomainsXml("", True, False)
sys.stdout.write(domains)
| bsd-3-clause |
SEL-Columbia/commcare-hq | corehq/apps/app_manager/tests/__init__.py | 1 | 1477 | from __future__ import absolute_import
try:
from corehq.apps.app_manager.tests.test_app_manager import *
from corehq.apps.app_manager.tests.test_xml_parsing import *
from corehq.apps.app_manager.tests.test_xform_parsing import *
from corehq.apps.app_manager.tests.test_form_versioning import *
from corehq.apps.app_manager.tests.test_success_message import *
from corehq.apps.app_manager.tests.test_form_preparation_v2 import *
from corehq.apps.app_manager.tests.test_days_ago_migration import *
from corehq.apps.app_manager.tests.test_suite import *
from corehq.apps.app_manager.tests.test_profile import *
from corehq.apps.app_manager.tests.test_build_errors import *
from corehq.apps.app_manager.tests.test_views import *
from corehq.apps.app_manager.tests.test_commcare_settings import *
from corehq.apps.app_manager.tests.test_brief_view import *
from .test_location_xpath import *
from .test_get_questions import *
from .test_repeater import *
from .test_broken_build import *
except ImportError, e:
# for some reason the test harness squashes these so log them here for clarity
# otherwise debugging is a pain
import logging
logging.exception(e)
raise
from corehq.apps.app_manager.util import is_valid_case_type
from corehq.apps.app_manager.id_strings import _format_to_regex
__test__ = {
'is_valid_case_type': is_valid_case_type,
'_format_to_regex': _format_to_regex,
}
| bsd-3-clause |
davidegurgone/pyang | pyang/syntax.py | 8 | 11241 | """Description of YANG & YIN syntax."""
import re
### Regular expressions - constraints on arguments
# keywords and identifiers
identifier = r"[_A-Za-z][._\-A-Za-z0-9]*"
prefix = identifier
keyword = '((' + prefix + '):)?(' + identifier + ')'
comment = '(/\*([^*]|[\r\n\s]|(\*+([^*/]|[\r\n\s])))*\*+/)|(//.*)|(/\*.*)'
# no group version of keyword
keyword_ng = '(?:(' + prefix + '):)?(?:' + identifier + ')'
re_keyword = re.compile(keyword)
re_keyword_start = re.compile('^' + keyword)
re_comment = re.compile(comment)
pos_integer = r"[1-9][0-9]*"
nonneg_integer = r"(0|[1-9])[0-9]*"
integer_ = r"[-+]?" + nonneg_integer
decimal_ = r"(\+|\-)?[0-9]+(\.[0-9]+)?"
length_str = '((min|max|[0-9]+)\s*' \
'(\.\.\s*' \
'(min|max|[0-9]+)\s*)?)'
length_expr = length_str + '(\|\s*' + length_str + ')*'
re_length_part = re.compile(length_str)
range_str = '((\-INF|min|max|((\+|\-)?[0-9]+(\.[0-9]+)?))\s*' \
'(\.\.\s*' \
'(INF|min|max|(\+|\-)?[0-9]+(\.[0-9]+)?)\s*)?)'
range_expr = range_str + '(\|\s*' + range_str + ')*'
re_range_part = re.compile(range_str)
re_identifier = re.compile("^" + identifier + "$")
# path and unique
node_id = keyword_ng
rel_path_keyexpr = r"(\.\./)+(" + node_id + "/)*" + node_id
path_key_expr = r"(current\s*\(\s*\)/" + rel_path_keyexpr + ")"
path_equality_expr = node_id + r"\s*=\s*" + path_key_expr
path_predicate = r"\s*\[\s*" + path_equality_expr + r"\s*\]\s*"
absolute_path_arg = "(?:/" + node_id + "(" + path_predicate + ")*)+"
descendant_path_arg = node_id + "(" + path_predicate + ")*" + \
"(?:" + absolute_path_arg + ")?"
relative_path_arg = r"(\.\./)*" + descendant_path_arg
deref_path_arg = r"deref\s*\(\s*(?:" + relative_path_arg + \
")\s*\)/\.\./" + relative_path_arg
path_arg = "(" + absolute_path_arg + "|" + relative_path_arg + "|" + \
deref_path_arg + ")"
absolute_schema_nodeid = "(/" + node_id + ")+"
descendant_schema_nodeid = node_id + "(" + absolute_schema_nodeid + ")?"
schema_nodeid = "("+absolute_schema_nodeid+"|"+descendant_schema_nodeid+")"
unique_arg = descendant_schema_nodeid + "(\s+" + descendant_schema_nodeid + ")*"
key_arg = node_id + "(\s+" + node_id + ")*"
re_schema_node_id_part = re.compile('/' + keyword)
# URI - RFC 3986, Appendix A
scheme = "[A-Za-z][-+.A-Za-z0-9]*"
unreserved = "[-._~A-Za-z0-9]"
pct_encoded = "%[0-9A-F]{2}"
sub_delims = "[!$&'()*+,;=]"
pchar = ("(" + unreserved + "|" + pct_encoded + "|" +
sub_delims + "|[:@])")
segment = pchar + "*"
segment_nz = pchar + "+"
userinfo = ("(" + unreserved + "|" + pct_encoded + "|" +
sub_delims + "|:)*")
dec_octet = "([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])"
ipv4address = "(" + dec_octet + r"\.){3}" + dec_octet
h16 = "[0-9A-F]{1,4}"
ls32 = "(" + h16 + ":" + h16 + "|" + ipv4address + ")"
ipv6address = (
"((" + h16 + ":){6}" + ls32 +
"|::(" + h16 + ":){5}" + ls32 +
"|(" + h16 + ")?::(" + h16 + ":){4}" + ls32 +
"|((" + h16 + ":)?" + h16 + ")?::(" + h16 + ":){3}" + ls32 +
"|((" + h16 + ":){,2}" + h16 + ")?::(" + h16 + ":){2}" + ls32 +
"|((" + h16 + ":){,3}" + h16 + ")?::" + h16 + ":" + ls32 +
"|((" + h16 + ":){,4}" + h16 + ")?::" + ls32 +
"|((" + h16 + ":){,5}" + h16 + ")?::" + h16 +
"|((" + h16 + ":){,6}" + h16 + ")?::)")
ipvfuture = r"v[0-9A-F]+\.(" + unreserved + "|" + sub_delims + "|:)+"
ip_literal = r"\[(" + ipv6address + "|" + ipvfuture + r")\]"
reg_name = "(" + unreserved + "|" + pct_encoded + "|" + sub_delims + ")*"
host = "(" + ip_literal + "|" + ipv4address + "|" + reg_name + ")"
port = "[0-9]*"
authority = "(" + userinfo + "@)?" + host + "(:" + port + ")?"
path_abempty = "(/" + segment + ")*"
path_absolute = "/(" + segment_nz + "(/" + segment + ")*)?"
path_rootless = segment_nz + "(/" + segment + ")*"
path_empty = pchar + "{0}"
hier_part = ("(" + "//" + authority + path_abempty + "|" +
path_absolute + "|" + path_rootless + "|" + path_empty + ")")
query = "(" + pchar + "|[/?])*"
fragment = query
uri = (scheme + ":" + hier_part + r"(\?" + query + ")?" +
"(#" + fragment + ")?")
# Date
date = r"[1-2][0-9]{3}-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])"
re_nonneg_integer = re.compile("^" + nonneg_integer + "$")
re_integer = re.compile("^" + integer_ + "$")
re_decimal = re.compile("^" + decimal_ + "$")
re_uri = re.compile("^" + uri + "$")
re_boolean = re.compile("^(true|false)$")
re_version = re.compile("^1$")
re_date = re.compile("^" + date +"$")
re_status = re.compile("^(current|obsolete|deprecated)$")
re_key = re.compile("^" + key_arg + "$")
re_length = re.compile("^" + length_expr + "$")
re_range = re.compile("^" + range_expr + "$")
re_pos_integer = re.compile(r"^(unbounded|" + pos_integer + r")$")
re_ordered_by = re.compile(r"^(user|system)$")
re_node_id = re.compile("^" + node_id + "$")
re_path = re.compile("^" + path_arg + "$")
re_absolute_path = re.compile("^" + absolute_path_arg + "$")
re_unique = re.compile("^" + unique_arg + "$")
re_schema_nodeid = re.compile("^" + schema_nodeid + "$")
re_absolute_schema_nodeid = re.compile("^" + absolute_schema_nodeid + "$")
re_descendant_schema_nodeid = re.compile("^" + descendant_schema_nodeid + "$")
re_deviate = re.compile("^(add|delete|replace|not-supported)$")
arg_type_map = {
"identifier": lambda s: re_identifier.search(s) is not None,
"non-negative-integer": lambda s: re_nonneg_integer.search(s) is not None,
"integer": lambda s: re_integer.search(s) is not None,
"uri": lambda s: re_uri.search(s) is not None,
"boolean": lambda s: re_boolean.search(s) is not None,
"version": lambda s: re_version.search(s) is not None,
"date": lambda s: re_date.search(s) is not None,
"status-arg": lambda s: re_status.search(s) is not None,
"key-arg": lambda s: re_key.search(s) is not None,
"length-arg": lambda s: re_length.search(s) is not None,
"range-arg": lambda s: re_range.search(s) is not None,
"max-value": lambda s: re_pos_integer.search(s) is not None,
"ordered-by-arg": lambda s: re_ordered_by.search(s) is not None,
"identifier-ref": lambda s: re_node_id.search(s) is not None,
"path-arg": lambda s: re_path.search(s) is not None,
"absolute-path-arg": lambda s: re_absolute_path.search(s) is not None,
"unique-arg": lambda s: re_unique.search(s) is not None,
"absolute-schema-nodeid": lambda s: \
re_absolute_schema_nodeid.search(s) is not None,
"descendant-schema-nodeid": lambda s: \
re_descendant_schema_nodeid.search(s) is not None,
"schema-nodeid": lambda s: \
re_schema_nodeid.search(s) is not None,
"enum-arg": lambda s: chk_enum_arg(s),
"fraction-digits-arg": lambda s: chk_fraction_digits_arg(s),
"deviate-arg": lambda s: re_deviate.search(s) is not None,
"_comment": lambda s: re_comment.search(s) is not None,
}
"""Argument type definitions.
Regular expressions for all argument types except plain string that
are checked directly by the parser.
"""
def chk_enum_arg(s):
"""Checks if the string `s` is a valid enum string.
Return True or False."""
if len(s) == 0 or s[0].isspace() or s[-1].isspace():
return False
else:
return True
def chk_fraction_digits_arg(s):
"""Checks if the string `s` is a valid fraction-digits argument.
Return True or False."""
try:
v = int(s)
if v >= 1 and v <= 18:
return True
else:
return False
except ValueError:
return False
def add_arg_type(arg_type, regexp):
"""Add a new arg_type to the map.
Used by extension plugins to register their own argument types."""
arg_type_map[arg_type] = regexp
# keyword argument-name yin-element
yin_map = \
{'anyxml': ('name', False),
'argument': ('name', False),
'augment': ('target-node', False),
'base': ('name', False),
'belongs-to': ('module', False),
'bit': ('name', False),
'case': ('name', False),
'choice': ('name', False),
'config': ('value', False),
'contact': ('text', True),
'container': ('name', False),
'default': ('value', False),
'description': ('text', True),
'deviate': ('value', False),
'deviation': ('target-node', False),
'enum': ('name', False),
'error-app-tag': ('value', False),
'error-message': ('value', True),
'extension': ('name', False),
'feature': ('name', False),
'fraction-digits': ('value', False),
'grouping': ('name', False),
'identity': ('name', False),
'if-feature': ('name', False),
'import': ('module', False),
'include': ('module', False),
'input': (None, None),
'key': ('value', False),
'leaf': ('name', False),
'leaf-list': ('name', False),
'length': ('value', False),
'list': ('name', False),
'mandatory': ('value', False),
'max-elements': ('value', False),
'min-elements': ('value', False),
'module': ('name', False),
'must': ('condition', False),
'namespace': ('uri', False),
'notification': ('name', False),
'ordered-by': ('value', False),
'organization': ('text', True),
'output': (None, None),
'path': ('value', False),
'pattern': ('value', False),
'position': ('value', False),
'presence': ('value', False),
'prefix': ('value', False),
'range': ('value', False),
'reference': ('text', True),
'refine': ('target-node', False),
'require-instance': ('value', False),
'revision': ('date', False),
'revision-date': ('date', False),
'rpc': ('name', False),
'status': ('value', False),
'submodule': ('name', False),
'type': ('name', False),
'typedef': ('name', False),
'unique': ('tag', False),
'units': ('name', False),
'uses': ('name', False),
'value': ('value', False),
'when': ('condition', False),
'yang-version': ('value', False),
'yin-element': ('value', False),
}
"""Mapping of statements to the YIN representation of their arguments.
The values are pairs whose first component specifies whether the
argument is stored in a subelement and the second component is the
name of the attribute or subelement carrying the argument. See YANG
specification.
"""
| isc |
Maccimo/intellij-community | plugins/hg4idea/testData/bin/hgext/convert/convcmd.py | 90 | 17238 | # convcmd - convert extension commands definition
#
# Copyright 2005-2007 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from common import NoRepo, MissingTool, SKIPREV, mapfile
from cvs import convert_cvs
from darcs import darcs_source
from git import convert_git
from hg import mercurial_source, mercurial_sink
from subversion import svn_source, svn_sink
from monotone import monotone_source
from gnuarch import gnuarch_source
from bzr import bzr_source
from p4 import p4_source
import filemap, common
import os, shutil
from mercurial import hg, util, encoding
from mercurial.i18n import _
orig_encoding = 'ascii'
def recode(s):
if isinstance(s, unicode):
return s.encode(orig_encoding, 'replace')
else:
return s.decode('utf-8').encode(orig_encoding, 'replace')
source_converters = [
('cvs', convert_cvs, 'branchsort'),
('git', convert_git, 'branchsort'),
('svn', svn_source, 'branchsort'),
('hg', mercurial_source, 'sourcesort'),
('darcs', darcs_source, 'branchsort'),
('mtn', monotone_source, 'branchsort'),
('gnuarch', gnuarch_source, 'branchsort'),
('bzr', bzr_source, 'branchsort'),
('p4', p4_source, 'branchsort'),
]
sink_converters = [
('hg', mercurial_sink),
('svn', svn_sink),
]
def convertsource(ui, path, type, rev):
exceptions = []
if type and type not in [s[0] for s in source_converters]:
raise util.Abort(_('%s: invalid source repository type') % type)
for name, source, sortmode in source_converters:
try:
if not type or name == type:
return source(ui, path, rev), sortmode
except (NoRepo, MissingTool), inst:
exceptions.append(inst)
if not ui.quiet:
for inst in exceptions:
ui.write("%s\n" % inst)
raise util.Abort(_('%s: missing or unsupported repository') % path)
def convertsink(ui, path, type):
if type and type not in [s[0] for s in sink_converters]:
raise util.Abort(_('%s: invalid destination repository type') % type)
for name, sink in sink_converters:
try:
if not type or name == type:
return sink(ui, path)
except NoRepo, inst:
ui.note(_("convert: %s\n") % inst)
except MissingTool, inst:
raise util.Abort('%s\n' % inst)
raise util.Abort(_('%s: unknown repository type') % path)
class progresssource(object):
def __init__(self, ui, source, filecount):
self.ui = ui
self.source = source
self.filecount = filecount
self.retrieved = 0
def getfile(self, file, rev):
self.retrieved += 1
self.ui.progress(_('getting files'), self.retrieved,
item=file, total=self.filecount)
return self.source.getfile(file, rev)
def lookuprev(self, rev):
return self.source.lookuprev(rev)
def close(self):
self.ui.progress(_('getting files'), None)
class converter(object):
def __init__(self, ui, source, dest, revmapfile, opts):
self.source = source
self.dest = dest
self.ui = ui
self.opts = opts
self.commitcache = {}
self.authors = {}
self.authorfile = None
# Record converted revisions persistently: maps source revision
# ID to target revision ID (both strings). (This is how
# incremental conversions work.)
self.map = mapfile(ui, revmapfile)
# Read first the dst author map if any
authorfile = self.dest.authorfile()
if authorfile and os.path.exists(authorfile):
self.readauthormap(authorfile)
# Extend/Override with new author map if necessary
if opts.get('authormap'):
self.readauthormap(opts.get('authormap'))
self.authorfile = self.dest.authorfile()
self.splicemap = common.parsesplicemap(opts.get('splicemap'))
self.branchmap = mapfile(ui, opts.get('branchmap'))
def walktree(self, heads):
'''Return a mapping that identifies the uncommitted parents of every
uncommitted changeset.'''
visit = heads
known = set()
parents = {}
while visit:
n = visit.pop(0)
if n in known or n in self.map:
continue
known.add(n)
self.ui.progress(_('scanning'), len(known), unit=_('revisions'))
commit = self.cachecommit(n)
parents[n] = []
for p in commit.parents:
parents[n].append(p)
visit.append(p)
self.ui.progress(_('scanning'), None)
return parents
def mergesplicemap(self, parents, splicemap):
"""A splicemap redefines child/parent relationships. Check the
map contains valid revision identifiers and merge the new
links in the source graph.
"""
for c in sorted(splicemap):
if c not in parents:
if not self.dest.hascommit(self.map.get(c, c)):
# Could be in source but not converted during this run
self.ui.warn(_('splice map revision %s is not being '
'converted, ignoring\n') % c)
continue
pc = []
for p in splicemap[c]:
# We do not have to wait for nodes already in dest.
if self.dest.hascommit(self.map.get(p, p)):
continue
# Parent is not in dest and not being converted, not good
if p not in parents:
raise util.Abort(_('unknown splice map parent: %s') % p)
pc.append(p)
parents[c] = pc
def toposort(self, parents, sortmode):
'''Return an ordering such that every uncommitted changeset is
preceded by all its uncommitted ancestors.'''
def mapchildren(parents):
"""Return a (children, roots) tuple where 'children' maps parent
revision identifiers to children ones, and 'roots' is the list of
revisions without parents. 'parents' must be a mapping of revision
identifier to its parents ones.
"""
visit = sorted(parents)
seen = set()
children = {}
roots = []
while visit:
n = visit.pop(0)
if n in seen:
continue
seen.add(n)
# Ensure that nodes without parents are present in the
# 'children' mapping.
children.setdefault(n, [])
hasparent = False
for p in parents[n]:
if p not in self.map:
visit.append(p)
hasparent = True
children.setdefault(p, []).append(n)
if not hasparent:
roots.append(n)
return children, roots
# Sort functions are supposed to take a list of revisions which
# can be converted immediately and pick one
def makebranchsorter():
"""If the previously converted revision has a child in the
eligible revisions list, pick it. Return the list head
otherwise. Branch sort attempts to minimize branch
switching, which is harmful for Mercurial backend
compression.
"""
prev = [None]
def picknext(nodes):
next = nodes[0]
for n in nodes:
if prev[0] in parents[n]:
next = n
break
prev[0] = next
return next
return picknext
def makesourcesorter():
"""Source specific sort."""
keyfn = lambda n: self.commitcache[n].sortkey
def picknext(nodes):
return sorted(nodes, key=keyfn)[0]
return picknext
def makeclosesorter():
"""Close order sort."""
keyfn = lambda n: ('close' not in self.commitcache[n].extra,
self.commitcache[n].sortkey)
def picknext(nodes):
return sorted(nodes, key=keyfn)[0]
return picknext
def makedatesorter():
"""Sort revisions by date."""
dates = {}
def getdate(n):
if n not in dates:
dates[n] = util.parsedate(self.commitcache[n].date)
return dates[n]
def picknext(nodes):
return min([(getdate(n), n) for n in nodes])[1]
return picknext
if sortmode == 'branchsort':
picknext = makebranchsorter()
elif sortmode == 'datesort':
picknext = makedatesorter()
elif sortmode == 'sourcesort':
picknext = makesourcesorter()
elif sortmode == 'closesort':
picknext = makeclosesorter()
else:
raise util.Abort(_('unknown sort mode: %s') % sortmode)
children, actives = mapchildren(parents)
s = []
pendings = {}
while actives:
n = picknext(actives)
actives.remove(n)
s.append(n)
# Update dependents list
for c in children.get(n, []):
if c not in pendings:
pendings[c] = [p for p in parents[c] if p not in self.map]
try:
pendings[c].remove(n)
except ValueError:
raise util.Abort(_('cycle detected between %s and %s')
% (recode(c), recode(n)))
if not pendings[c]:
# Parents are converted, node is eligible
actives.insert(0, c)
pendings[c] = None
if len(s) != len(parents):
raise util.Abort(_("not all revisions were sorted"))
return s
def writeauthormap(self):
authorfile = self.authorfile
if authorfile:
self.ui.status(_('writing author map file %s\n') % authorfile)
ofile = open(authorfile, 'w+')
for author in self.authors:
ofile.write("%s=%s\n" % (author, self.authors[author]))
ofile.close()
def readauthormap(self, authorfile):
afile = open(authorfile, 'r')
for line in afile:
line = line.strip()
if not line or line.startswith('#'):
continue
try:
srcauthor, dstauthor = line.split('=', 1)
except ValueError:
msg = _('ignoring bad line in author map file %s: %s\n')
self.ui.warn(msg % (authorfile, line.rstrip()))
continue
srcauthor = srcauthor.strip()
dstauthor = dstauthor.strip()
if self.authors.get(srcauthor) in (None, dstauthor):
msg = _('mapping author %s to %s\n')
self.ui.debug(msg % (srcauthor, dstauthor))
self.authors[srcauthor] = dstauthor
continue
m = _('overriding mapping for author %s, was %s, will be %s\n')
self.ui.status(m % (srcauthor, self.authors[srcauthor], dstauthor))
afile.close()
def cachecommit(self, rev):
commit = self.source.getcommit(rev)
commit.author = self.authors.get(commit.author, commit.author)
commit.branch = self.branchmap.get(commit.branch, commit.branch)
self.commitcache[rev] = commit
return commit
def copy(self, rev):
commit = self.commitcache[rev]
changes = self.source.getchanges(rev)
if isinstance(changes, basestring):
if changes == SKIPREV:
dest = SKIPREV
else:
dest = self.map[changes]
self.map[rev] = dest
return
files, copies = changes
pbranches = []
if commit.parents:
for prev in commit.parents:
if prev not in self.commitcache:
self.cachecommit(prev)
pbranches.append((self.map[prev],
self.commitcache[prev].branch))
self.dest.setbranch(commit.branch, pbranches)
try:
parents = self.splicemap[rev]
self.ui.status(_('spliced in %s as parents of %s\n') %
(parents, rev))
parents = [self.map.get(p, p) for p in parents]
except KeyError:
parents = [b[0] for b in pbranches]
source = progresssource(self.ui, self.source, len(files))
newnode = self.dest.putcommit(files, copies, parents, commit,
source, self.map)
source.close()
self.source.converted(rev, newnode)
self.map[rev] = newnode
def convert(self, sortmode):
try:
self.source.before()
self.dest.before()
self.source.setrevmap(self.map)
self.ui.status(_("scanning source...\n"))
heads = self.source.getheads()
parents = self.walktree(heads)
self.mergesplicemap(parents, self.splicemap)
self.ui.status(_("sorting...\n"))
t = self.toposort(parents, sortmode)
num = len(t)
c = None
self.ui.status(_("converting...\n"))
for i, c in enumerate(t):
num -= 1
desc = self.commitcache[c].desc
if "\n" in desc:
desc = desc.splitlines()[0]
# convert log message to local encoding without using
# tolocal() because the encoding.encoding convert()
# uses is 'utf-8'
self.ui.status("%d %s\n" % (num, recode(desc)))
self.ui.note(_("source: %s\n") % recode(c))
self.ui.progress(_('converting'), i, unit=_('revisions'),
total=len(t))
self.copy(c)
self.ui.progress(_('converting'), None)
tags = self.source.gettags()
ctags = {}
for k in tags:
v = tags[k]
if self.map.get(v, SKIPREV) != SKIPREV:
ctags[k] = self.map[v]
if c and ctags:
nrev, tagsparent = self.dest.puttags(ctags)
if nrev and tagsparent:
# write another hash correspondence to override the previous
# one so we don't end up with extra tag heads
tagsparents = [e for e in self.map.iteritems()
if e[1] == tagsparent]
if tagsparents:
self.map[tagsparents[0][0]] = nrev
bookmarks = self.source.getbookmarks()
cbookmarks = {}
for k in bookmarks:
v = bookmarks[k]
if self.map.get(v, SKIPREV) != SKIPREV:
cbookmarks[k] = self.map[v]
if c and cbookmarks:
self.dest.putbookmarks(cbookmarks)
self.writeauthormap()
finally:
self.cleanup()
def cleanup(self):
try:
self.dest.after()
finally:
self.source.after()
self.map.close()
def convert(ui, src, dest=None, revmapfile=None, **opts):
global orig_encoding
orig_encoding = encoding.encoding
encoding.encoding = 'UTF-8'
# support --authors as an alias for --authormap
if not opts.get('authormap'):
opts['authormap'] = opts.get('authors')
if not dest:
dest = hg.defaultdest(src) + "-hg"
ui.status(_("assuming destination %s\n") % dest)
destc = convertsink(ui, dest, opts.get('dest_type'))
try:
srcc, defaultsort = convertsource(ui, src, opts.get('source_type'),
opts.get('rev'))
except Exception:
for path in destc.created:
shutil.rmtree(path, True)
raise
sortmodes = ('branchsort', 'datesort', 'sourcesort', 'closesort')
sortmode = [m for m in sortmodes if opts.get(m)]
if len(sortmode) > 1:
raise util.Abort(_('more than one sort mode specified'))
sortmode = sortmode and sortmode[0] or defaultsort
if sortmode == 'sourcesort' and not srcc.hasnativeorder():
raise util.Abort(_('--sourcesort is not supported by this data source'))
if sortmode == 'closesort' and not srcc.hasnativeclose():
raise util.Abort(_('--closesort is not supported by this data source'))
fmap = opts.get('filemap')
if fmap:
srcc = filemap.filemap_source(ui, srcc, fmap)
destc.setfilemapmode(True)
if not revmapfile:
try:
revmapfile = destc.revmapfile()
except Exception:
revmapfile = os.path.join(destc, "map")
c = converter(ui, srcc, destc, revmapfile, opts)
c.convert(sortmode)
| apache-2.0 |
Centreon-Community/centreon-discovery | modPython/setuptools-0.6c11/setuptools/command/easy_install.py | 32 | 63580 | #!python
"""\
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ http://peak.telecommunity.com/DevCenter/EasyInstall
"""
import sys, os.path, zipimport, shutil, tempfile, zipfile, re, stat, random
from glob import glob
from setuptools import Command
from setuptools.sandbox import run_setup
from distutils import log, dir_util
from distutils.sysconfig import get_python_lib
from distutils.errors import DistutilsArgError, DistutilsOptionError, \
DistutilsError
from setuptools.archive_util import unpack_archive
from setuptools.package_index import PackageIndex, parse_bdist_wininst
from setuptools.package_index import URL_SCHEME
from setuptools.command import bdist_egg, egg_info
from pkg_resources import *
sys_executable = os.path.normpath(sys.executable)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def samefile(p1,p2):
if hasattr(os.path,'samefile') and (
os.path.exists(p1) and os.path.exists(p2)
):
return os.path.samefile(p1,p2)
return (
os.path.normpath(os.path.normcase(p1)) ==
os.path.normpath(os.path.normcase(p2))
)
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("delete-conflicting", "D", "no longer needed; don't use this"),
("ignore-conflicts-at-my-risk", None,
"no longer needed; don't use this"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=','S',"list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l', "allow building eggs from local checkouts"),
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'delete-conflicting', 'ignore-conflicts-at-my-risk', 'editable',
'no-deps', 'local-snapshots-ok',
]
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.delete_conflicting = None
self.ignore_conflicts_at_my_risk = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
for filename in blockers:
if os.path.exists(filename) or os.path.islink(filename):
log.info("Deleting %s", filename)
if not self.dry_run:
if os.path.isdir(filename) and not os.path.islink(filename):
rmtree(filename)
else:
os.unlink(filename)
def finalize_options(self):
self._expand('install_dir','script_dir','build_directory','site_dirs')
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options('install_lib',
('install_dir','install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options('install_scripts',
('install_dir', 'script_dir')
)
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d+" (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable: self.check_site_dir()
self.index_url = self.index_url or "http://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path = self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path+sys.path)
if self.find_links is not None:
if isinstance(self.find_links, basestring):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path+sys.path)
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize','optimize'))
if not isinstance(self.optimize,int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2): raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.delete_conflicting and self.ignore_conflicts_at_my_risk:
raise DistutilsOptionError(
"Can't use both --delete-conflicting and "
"--ignore-conflicts-at-my-risk at the same time"
)
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def run(self):
if self.verbose!=self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in xrange(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except:
pid = random.randint(0,sys.maxint)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
if self.delete_conflicting or self.ignore_conflicts_at_my_risk:
log.warn(
"Note: The -D, --delete-conflicting and"
" --ignore-conflicts-at-my-risk no longer have any purpose"
" and should not be used."
)
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir,'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname()+'.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists: os.unlink(testfile)
open(testfile,'w').close()
os.unlink(testfile)
except (OSError,IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH','').split(os.pathsep)
if instdir not in map(normalize_path, filter(None,PYTHONPATH)):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
def cant_write_to_target(self):
msg = """can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
""" % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += """
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
"""
else:
msg += """
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
http://peak.telecommunity.com/EasyInstall.html
Please make the appropriate changes for your system and try again.
"""
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname()+".pth"
ok_file = pth_file+'.ok'
ok_exists = os.path.exists(ok_file)
try:
if ok_exists: os.unlink(ok_file)
f = open(pth_file,'w')
except (OSError,IOError):
self.cant_write_to_target()
else:
try:
f.write("import os;open(%r,'w').write('OK')\n" % (ok_file,))
f.close(); f=None
executable = sys.executable
if os.name=='nt':
dirname,basename = os.path.split(executable)
alt = os.path.join(dirname,'pythonw.exe')
if basename.lower()=='python.exe' and os.path.exists(alt):
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable,'-E','-c','pass'],0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f: f.close()
if os.path.exists(ok_file): os.unlink(ok_file)
if os.path.exists(pth_file): os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
self.install_script(
dist, script_name,
dist.get_metadata('scripts/'+script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base,filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self,spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
def easy_install(self, spec, deps=False):
tmpdir = tempfile.mkdtemp(prefix="easy_install-")
download = None
if not self.editable: self.install_site_py()
try:
if not isinstance(spec,Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
download = self.package_index.download(spec, tmpdir)
return self.install_item(None, download, tmpdir, deps, True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable, not self.always_copy,
self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg+=" (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence==DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location==download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.check_conflicts(self.egg_distribution(download))]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if dist.has_metadata('dependency_links.txt'):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = requirement or distreq
requirement = Requirement(
distreq.project_name, distreq.specs, requirement.extras
)
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound, e:
raise DistutilsError(
"Could not find required distribution %s" % e.args
)
except VersionConflict, e:
raise DistutilsError(
"Installed distribution %s conflicts with requirement %s"
% e.args
)
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
log.warn(
"%r already exists in %s; build directory %s will not be kept",
spec.key, self.build_directory, setup_base
)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename)==setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents)==1:
dist_filename = os.path.join(setup_base,contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst); shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if not self.exclude_scripts:
for args in get_script_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script and dev_path:
script_text = get_script_header(script_text) + (
"# EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r\n"
"__requires__ = %(spec)r\n"
"from pkg_resources import require; require(%(spec)r)\n"
"del require\n"
"__file__ = %(dev_path)r\n"
"execfile(__file__)\n"
) % locals()
elif is_script:
script_text = get_script_header(script_text) + (
"# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r\n"
"__requires__ = %(spec)r\n"
"import pkg_resources\n"
"pkg_resources.run_script(%(spec)r, %(script_name)r)\n"
) % locals()
self.write_script(script_name, script_text, 'b')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir,x) for x in blockers])
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
if not self.dry_run:
ensure_directory(target)
f = open(target,"w"+mode)
f.write(contents)
f.close()
chmod(target,0755)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None
):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" % os.path.abspath(dist_filename)
)
if len(setups)>1:
raise DistutilsError(
"Multiple setup scripts in %s" % os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path,os.path.join(egg_path,'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path,metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(self.install_dir,os.path.basename(egg_path))
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
self.check_conflicts(dist)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(os.unlink,(destination,),"Removing "+destination)
uncache_zipdir(destination)
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f,m = shutil.move, "Moving"
else:
f,m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f,m = self.unpack_and_compile, "Extracting"
elif egg_path.startswith(tmpdir):
f,m = shutil.move, "Moving"
else:
f,m = shutil.copy2, "Copying"
self.execute(f, (egg_path, destination),
(m+" %s to %s") %
(os.path.basename(egg_path),os.path.dirname(destination)))
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(None,
project_name=cfg.get('metadata','name'),
version=cfg.get('metadata','version'), platform="win32"
)
# Convert the .exe to an unpacked egg
egg_path = dist.location = os.path.join(tmpdir, dist.egg_name()+'.egg')
egg_tmp = egg_path+'.tmp'
egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf,'w')
f.write('Metadata-Version: 1.0\n')
for k,v in cfg.items('metadata'):
if k!='target_version':
f.write('%s: %s\n' % (k.replace('_','-').title(), v))
f.close()
script_dir = os.path.join(egg_info,'scripts')
self.delete_blockers( # delete entry-point scripts to avoid duping
[os.path.join(script_dir,args[0]) for args in get_script_args(dist)]
)
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src,dst):
s = src.lower()
for old,new in prefixes:
if s.startswith(old):
src = new+src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old!='SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1])+'.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile); stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(os.path.join(egg_tmp,'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level','native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name+'.txt')
if not os.path.exists(txt):
open(txt,'w').write('\n'.join(locals()[name])+'\n')
def check_conflicts(self, dist):
"""Verify that there are no conflicting "old-style" packages"""
return dist # XXX temporarily disable until new strategy is stable
from imp import find_module, get_suffixes
from glob import glob
blockers = []
names = dict.fromkeys(dist._get_metadata('top_level.txt')) # XXX private attr
exts = {'.pyc':1, '.pyo':1} # get_suffixes() might leave one out
for ext,mode,typ in get_suffixes():
exts[ext] = 1
for path,files in expand_paths([self.install_dir]+self.all_site_dirs):
for filename in files:
base,ext = os.path.splitext(filename)
if base in names:
if not ext:
# no extension, check for package
try:
f, filename, descr = find_module(base, [path])
except ImportError:
continue
else:
if f: f.close()
if filename not in blockers:
blockers.append(filename)
elif ext in exts and base!='site': # XXX ugh
blockers.append(os.path.join(path,filename))
if blockers:
self.found_conflicts(dist, blockers)
return dist
def found_conflicts(self, dist, blockers):
if self.delete_conflicting:
log.warn("Attempting to delete conflicting packages:")
return self.delete_blockers(blockers)
msg = """\
-------------------------------------------------------------------------
CONFLICT WARNING:
The following modules or packages have the same names as modules or
packages being installed, and will be *before* the installed packages in
Python's search path. You MUST remove all of the relevant files and
directories before you will be able to use the package(s) you are
installing:
%s
""" % '\n '.join(blockers)
if self.ignore_conflicts_at_my_risk:
msg += """\
(Note: you can run EasyInstall on '%s' with the
--delete-conflicting option to attempt deletion of the above files
and/or directories.)
""" % dist.project_name
else:
msg += """\
Note: you can attempt this installation again with EasyInstall, and use
either the --delete-conflicting (-D) option or the
--ignore-conflicts-at-my-risk option, to either delete the above files
and directories, or to ignore the conflicts, respectively. Note that if
you ignore the conflicts, the installed package(s) may not work.
"""
msg += """\
-------------------------------------------------------------------------
"""
sys.stderr.write(msg)
sys.stderr.flush()
if not self.ignore_conflicts_at_my_risk:
raise DistutilsError("Installation aborted due to conflicts")
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += """
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
"""
if self.install_dir not in map(normalize_path,sys.path):
msg += """
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
"""
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return """\nExtracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""" % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose>2:
v = 'v' * (self.verbose - 1)
args.insert(0,'-'+v)
elif self.verbose<2:
args.insert(0,'-q')
if self.dry_run:
args.insert(0,'-n')
log.info(
"Running %s %s", setup_script[len(setup_base)+1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit, v:
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def update_pth(self,dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key=='setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir,'setuptools.pth')
if os.path.islink(filename): os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location)+'\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []; to_chmod = []
def pf(src,dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src,dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0555) & 07755
chmod(f, mode)
def byte_compile(self, to_compile):
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
def no_default_version_msg(self):
return """bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
http://peak.telecommunity.com/EasyInstall.html#custom-installation-locations
Please make the appropriate changes for your system and try again.""" % (
self.install_dir, os.environ.get('PYTHONPATH','')
)
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string(Requirement.parse("setuptools"), "site.py")
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
current = open(sitepy,'rb').read()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
f = open(sitepy,'wb')
f.write(source)
f.close()
self.byte_compile([sitepy])
self.sitepy_installed = True
INSTALL_SCHEMES = dict(
posix = dict(
install_dir = '$base/lib/python$py_version_short/site-packages',
script_dir = '$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir = '$base/Lib/site-packages',
script_dir = '$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name,self.DEFAULT_SCHEME)
for attr,val in scheme.items():
if getattr(self,attr,None) is None:
setattr(self,attr,val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = filter(None,os.environ.get('PYTHONPATH','').split(os.pathsep))
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")])
else:
sitedirs.extend(
[prefix, os.path.join(prefix, "lib", "site-packages")]
)
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for plat_specific in (0,1):
site_lib = get_python_lib(plat_specific)
if site_lib not in sitedirs: sitedirs.append(site_lib)
sitedirs = map(normalize_path, sitedirs)
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth','setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname,name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a ConfigParser.RawConfigParser, or None
"""
f = open(dist_filename,'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended-12)
import struct, StringIO, ConfigParser
tag, cfglen, bmlen = struct.unpack("<iii",f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended-(12+cfglen))
cfg = ConfigParser.RawConfigParser({'version':'','target_version':''})
try:
cfg.readfp(StringIO.StringIO(f.read(cfglen).split(chr(0),1)[0]))
except ConfigParser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/')
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts)==3 and parts[2]=='PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0,('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts)!=2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB','PLATLIB'):
for pth in yield_lines(z.read(name)):
pth = pth.strip().replace('\\','/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0],pth)), ''))
finally:
z.close()
prefixes = [(x.lower(),y) for x, y in prefixes]
prefixes.sort(); prefixes.reverse()
return prefixes
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename; self.sitedirs=map(normalize_path, sitedirs)
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load(); Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
map(self.add, find_distributions(path, True))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
for line in open(self.filename,'rt'):
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir,path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
data = '\n'.join(map(self.make_relative,self.paths))
if data:
log.debug("Saving %s", self.filename)
data = (
"import sys; sys.__plen = len(sys.path)\n"
"%s\n"
"import sys; new=sys.path[sys.__plen:];"
" del sys.path[sys.__plen:];"
" p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;"
" sys.__egginsert = p+len(new)\n"
) % data
if os.path.islink(self.filename):
os.unlink(self.filename)
f = open(self.filename,'wb')
f.write(data); f.close()
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
def add(self,dist):
"""Add `dist` to the distribution map"""
if dist.location not in self.paths and dist.location not in self.sitedirs:
self.paths.append(dist.location); self.dirty = True
Environment.add(self,dist)
def remove(self,dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location); self.dirty = True
Environment.remove(self,dist)
def make_relative(self,path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep=='/' and '/' or os.sep
while len(npath)>=baselen:
if npath==self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
def get_script_header(script_text, executable=sys_executable, wininst=False):
"""Create a #! line, getting options (if any) from script_text"""
from distutils.command.build_scripts import first_line_re
first = (script_text+'\n').splitlines()[0]
match = first_line_re.match(first)
options = ''
if match:
options = match.group(1) or ''
if options: options = ' '+options
if wininst:
executable = "python.exe"
else:
executable = nt_quote_arg(executable)
hdr = "#!%(executable)s%(options)s\n" % locals()
if unicode(hdr,'ascii','ignore').encode('ascii') != hdr:
# Non-ascii path to sys.executable, use -x to prevent warnings
if options:
if options.strip().startswith('-'):
options = ' -x'+options.strip()[1:]
# else: punt, we can't do it, let the warning happen anyway
else:
options = ' -x'
executable = fix_jython_executable(executable, options)
hdr = "#!%(executable)s%(options)s\n" % locals()
return hdr
def auto_chmod(func, arg, exc):
if func is os.remove and os.name=='nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
exc = sys.exc_info()
raise exc[0], (exc[1][0], exc[1][1] + (" %s %s" % (func,arg)))
def uncache_zipdir(path):
"""Ensure that the importer caches dont have stale info for `path`"""
from zipimport import _zip_directory_cache as zdc
_uncache(path, zdc)
_uncache(path, sys.path_importer_cache)
def _uncache(path, cache):
if path in cache:
del cache[path]
else:
path = normalize_path(path)
for p in cache:
if normalize_path(p)==path:
del cache[p]
return
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
fp = open(executable)
magic = fp.read(2)
fp.close()
except (OSError,IOError): return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
result = []
needquote = False
nb = 0
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
nb += 1
elif c == '"':
# double preceding backslashes, then add a \"
result.append('\\' * (nb*2) + '\\"')
nb = 0
else:
if nb:
result.append('\\' * nb)
nb = 0
result.append(c)
if nb:
result.append('\\' * nb)
if needquote:
result.append('\\' * nb) # double the trailing backslashes
result.append('"')
return ''.join(result)
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args): pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error, e:
log.debug("chmod failed: %s", e)
def fix_jython_executable(executable, options):
if sys.platform.startswith('java') and is_sh(executable):
# Workaround Jython's sys.executable being a .sh (an invalid
# shebang line interpreter)
if options:
# Can't apply the workaround, leave it broken
log.warn("WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
else:
return '/usr/bin/env %s' % executable
return executable
def get_script_args(dist, executable=sys_executable, wininst=False):
"""Yield write_script() argument tuples for a distribution's entrypoints"""
spec = str(dist.as_requirement())
header = get_script_header("", executable, wininst)
for group in 'console_scripts', 'gui_scripts':
for name,ep in dist.get_entry_map(group).items():
script_text = (
"# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r\n"
"__requires__ = %(spec)r\n"
"import sys\n"
"from pkg_resources import load_entry_point\n"
"\n"
"sys.exit(\n"
" load_entry_point(%(spec)r, %(group)r, %(name)r)()\n"
")\n"
) % locals()
if sys.platform=='win32' or wininst:
# On Windows/wininst, add a .py extension and an .exe launcher
if group=='gui_scripts':
ext, launcher = '-script.pyw', 'gui.exe'
old = ['.pyw']
new_header = re.sub('(?i)python.exe','pythonw.exe',header)
else:
ext, launcher = '-script.py', 'cli.exe'
old = ['.py','.pyc','.pyo']
new_header = re.sub('(?i)pythonw.exe','python.exe',header)
if os.path.exists(new_header[2:-1]) or sys.platform!='win32':
hdr = new_header
else:
hdr = header
yield (name+ext, hdr+script_text, 't', [name+x for x in old])
yield (
name+'.exe', resource_string('setuptools', launcher),
'b') # write in binary mode
yield (name+'.exe.manifest', _launcher_manifest % (name,), 't')
else:
# On other platforms, we assume the right thing to do is to
# just write the stub with no extension.
yield (name, header+script_text)
_launcher_manifest = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%s.exe"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>"""
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
"""Recursively delete a directory tree.
This code is taken from the Python 2.4 version of 'shutil', because
the 2.3 version doesn't really work right.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
names = []
try:
names = os.listdir(path)
except os.error, err:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error, err:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools; argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0; sys.argv.append(argv0); main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
import distutils.core
USAGE = """\
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
"""
def gen_usage (script_name):
script = os.path.basename(script_name)
return USAGE % vars()
def with_ei_usage(f):
old_gen_usage = distutils.core.gen_usage
try:
distutils.core.gen_usage = gen_usage
return f()
finally:
distutils.core.gen_usage = old_gen_usage
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self,*args,**kw):
with_ei_usage(lambda: Distribution._show_help(self,*args,**kw))
if argv is None:
argv = sys.argv[1:]
with_ei_usage(lambda:
setup(
script_args = ['-q','easy_install', '-v']+argv,
script_name = sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands, **kw
)
)
| gpl-2.0 |
mathslinux/ceilometer | ceilometer/event/storage/pymongo_base.py | 9 | 5978 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common functions for MongoDB and DB2 backends
"""
from oslo_log import log
import pymongo
from ceilometer.event.storage import base
from ceilometer.event.storage import models
from ceilometer.i18n import _LE, _LI
from ceilometer.storage.mongo import utils as pymongo_utils
from ceilometer import utils
LOG = log.getLogger(__name__)
COMMON_AVAILABLE_CAPABILITIES = {
'events': {'query': {'simple': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(base.Connection):
"""Base event Connection class for MongoDB and DB2 drivers."""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
COMMON_AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
def record_events(self, event_models):
"""Write the events to database.
:param event_models: a list of models.Event objects.
"""
error = None
for event_model in event_models:
traits = []
if event_model.traits:
for trait in event_model.traits:
traits.append({'trait_name': trait.name,
'trait_type': trait.dtype,
'trait_value': trait.value})
try:
self.db.event.insert_one(
{'_id': event_model.message_id,
'event_type': event_model.event_type,
'timestamp': event_model.generated,
'traits': traits, 'raw': event_model.raw})
except pymongo.errors.DuplicateKeyError as ex:
LOG.info(_LI("Duplicate event detected, skipping it: %s") % ex)
except Exception as ex:
LOG.exception(_LE("Failed to record event: %s") % ex)
error = ex
if error:
raise error
def get_events(self, event_filter, limit=None):
"""Return an iter of models.Event objects.
:param event_filter: storage.EventFilter object, consists of filters
for events that are stored in database.
:param limit: Maximum number of results to return.
"""
if limit == 0:
return
q = pymongo_utils.make_events_query_from_filter(event_filter)
if limit is not None:
results = self.db.event.find(q, limit=limit)
else:
results = self.db.event.find(q)
for event in results:
traits = []
for trait in event['traits']:
traits.append(models.Trait(name=trait['trait_name'],
dtype=int(trait['trait_type']),
value=trait['trait_value']))
yield models.Event(message_id=event['_id'],
event_type=event['event_type'],
generated=event['timestamp'],
traits=traits, raw=event.get('raw'))
def get_event_types(self):
"""Return all event types as an iter of strings."""
return self.db.event.distinct('event_type')
def get_trait_types(self, event_type):
"""Return a dictionary containing the name and data type of the trait.
Only trait types for the provided event_type are returned.
:param event_type: the type of the Event.
"""
trait_names = set()
events = self.db.event.find({'event_type': event_type})
for event in events:
for trait in event['traits']:
trait_name = trait['trait_name']
if trait_name not in trait_names:
# Here we check that our method return only unique
# trait types. Method will return only one trait type. It
# is proposed that certain trait name could have only one
# trait type.
trait_names.add(trait_name)
yield {'name': trait_name,
'data_type': trait['trait_type']}
def get_traits(self, event_type, trait_name=None):
"""Return all trait instances associated with an event_type.
If trait_type is specified, only return instances of that trait type.
:param event_type: the type of the Event to filter by
:param trait_name: the name of the Trait to filter by
"""
if not trait_name:
events = self.db.event.find({'event_type': event_type})
else:
# We choose events that simultaneously have event_type and certain
# trait_name, and retrieve events contains only mentioned traits.
events = self.db.event.find({'$and': [{'event_type': event_type},
{'traits.trait_name': trait_name}]},
{'traits': {'$elemMatch':
{'trait_name': trait_name}}
})
for event in events:
for trait in event['traits']:
yield models.Trait(name=trait['trait_name'],
dtype=trait['trait_type'],
value=trait['trait_value'])
| apache-2.0 |
SRLFoundry/HookedGL | HookedGL/Communication.py | 1 | 3837 | import socket
import sys
import math
def bytes_needed(n):
if type(n) is str:
return len(n)
elif type(n) is list:
if type(n[0]) is int:
return 4 * len(n)
if n == 0:
return 1
return int(math.log(n, 256)) + 1
class Communication:
def __init__(self, address, port):
self.port = port
self.address = address
# Create the socket used to send data.
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
<<<<<<< HEAD
# Bind the socket to the server and address.
self.sock.bind(('localhost', port))
=======
>>>>>>> origin/master
def send(self, data):
#print("sending: ", data)
self.sock.sendall(data)
def recv(self):
pass
def connect(self):
# Initialize the connection to the server.
self.sock.connect((self.address, self.port))
# Send an inquiry to the server to see if it exists.
self.send(bytes([5]))
<<<<<<< HEAD
# Set the timeout so the program doesn't hang forever.
self.sock.settimeout(5)
# To catch timeout errors.
try:
# Accept incoming connections.
connection, inc_address = self.sock.accept()
# If it is from the server, we return true.
if inc_address == self.address:
return true
# Return false if we timeout.
except socket.timeout:
return false
def disconnect(self):
self.sock.close()
def ship(self, cid, args):
=======
# Wait for the server to send back an ACK.
if self.sock.recv(1) != b'\x06' :
print("connection hand-shake failed")
else :
print("connected")
def disconnect(self):
self.sock.close()
def pack(self, cid, args):
# magic number "-1" is becaues the protocal starts at 1 rather than 0
>>>>>>> origin/master
# Create the first byte that will contain the cid and
# number of arguments.
header = bytes([((cid << 4) & 0xff) | ((((len(args)-1) << 4) & 0xff) >> 4)])
data = header
# This appends the length of the various args to the data to be sent.
# The bytesneeded function handles different types.
for arg in args:
data += bytes([bytes_needed(arg)-1])
# This appends the actual data after the lengths are added.
for arg in args:
if type(arg) is str:
data += bytearray(arg, encoding = 'ascii')
elif type(arg) is int:
data += arg.to_bytes(bytes_needed(arg), byteorder = 'big')
<<<<<<< HEAD
# Send the data after packing.
=======
elif type(arg) is list:
for element in arg :
data += element.to_bytes(4, byteorder = 'little')
>>>>>>> origin/master
self.send(data)
r = self.sock.recv(2)
if r[0] == 6: #ACK
return r[1]
elif r[0] == 21: #NAK
print("Error, server could not read request; error code: " + str(r[1]), file=sys.stderr)
return -1
else :
return r[0]
<<<<<<< HEAD
<<<<<<< HEAD
lol = Communication('10.221.81.230', 27015)
lol.connect()
=======
lol = Communication('127.0.0.1', 3333)
lol.connect()
print("index: " + str(lol.pack(0,[[0,0,10,0xff]])))
print("index: " + str(lol.pack(0,[[0,0,10,0x100]])))
print("index: " + str(lol.pack(0,[[0,0,10,0xffff]])))
print("index: " + str(lol.pack(0,[[0,0,10,0x10000]])))
print("index: " + str(lol.pack(1, [0xff00ff00])))
print("index: " + str(lol.pack(0,[[0,0,10,0xff,1],1])))
print("index: " + str(lol.pack(10, [0xff00ff00])))
print("index: " + str(lol.pack(1, [0xffffff00])))
lol.disconnect()
>>>>>>> origin/master
=======
>>>>>>> origin/master
| gpl-2.0 |
santidediego/LearningDjango | lib/python3.5/site-packages/django/conf/locale/de_CH/formats.py | 115 | 1445 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
from __future__ import unicode_literals
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
# these are the separators for non-monetary numbers. For monetary numbers,
# the DECIMAL_SEPARATOR is a . (decimal point) and the THOUSAND_SEPARATOR is a
# ' (single quote).
# For details, please refer to http://www.bk.admin.ch/dokumentation/sprachen/04915/05016/index.html?lang=de
# (in German) and the documentation
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| mit |
youprofit/servo | tests/wpt/css-tests/tools/pywebsocket/src/setup.py | 434 | 2863 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Set up script for mod_pywebsocket.
"""
from distutils.core import setup, Extension
import sys
_PACKAGE_NAME = 'mod_pywebsocket'
# Build and use a C++ extension for faster masking. SWIG is required.
_USE_FAST_MASKING = False
if sys.version < '2.3':
print >> sys.stderr, '%s requires Python 2.3 or later.' % _PACKAGE_NAME
sys.exit(1)
if _USE_FAST_MASKING:
setup(ext_modules=[
Extension(
'mod_pywebsocket/_fast_masking',
['mod_pywebsocket/fast_masking.i'],
swig_opts=['-c++'])])
setup(author='Yuzo Fujishima',
author_email='[email protected]',
description='WebSocket extension for Apache HTTP Server.',
long_description=(
'mod_pywebsocket is an Apache HTTP Server extension for '
'the WebSocket Protocol (RFC 6455). '
'See mod_pywebsocket/__init__.py for more detail.'),
license='See COPYING',
name=_PACKAGE_NAME,
packages=[_PACKAGE_NAME, _PACKAGE_NAME + '.handshake'],
url='http://code.google.com/p/pywebsocket/',
# See the source of distutils.version, distutils.versionpredicate and
# distutils.dist to understand how to name version numbers.
version='0.7.9',
)
# vi:sts=4 sw=4 et
| mpl-2.0 |
cgvarela/graphite-web | webapp/graphite/events/models.py | 33 | 1383 | import os
from django.db import models
from tagging.managers import ModelTaggedItemManager
from tagging.models import Tag
if os.environ.get('READTHEDOCS'):
TagField = lambda *args, **kwargs: None
else:
from tagging.fields import TagField
class Event(models.Model):
when = models.DateTimeField()
what = models.CharField(max_length=255)
data = models.TextField(blank=True)
tags = TagField(default="")
def get_tags(self):
return Tag.objects.get_for_object(self)
def __str__(self):
return "%s: %s" % (self.when, self.what)
@staticmethod
def find_events(time_from=None, time_until=None, tags=None):
if tags is not None:
query = Event.tagged.with_all(tags)
else:
query = Event.objects.all()
if time_from is not None:
query = query.filter(when__gte=time_from)
if time_until is not None:
query = query.filter(when__lte=time_until)
result = list(query.order_by("when"))
return result
def as_dict(self):
return dict(
when=self.when,
what=self.what,
data=self.data,
tags=self.tags,
id=self.id,
)
# We use this rather than tagging.register() so that tags can be exposed
# in the admin UI
ModelTaggedItemManager().contribute_to_class(Event, 'tagged')
| apache-2.0 |
rkmaddox/mne-python | mne/gui/_backend.py | 11 | 1500 | """Deal with pyface backend issues."""
# Author: Christian Brodbeck <[email protected]>
#
# License: BSD (3-clause)
import os
import sys
from ..utils import warn, _check_pyqt5_version
def _get_pyface_backend():
"""Check the currently selected Pyface backend.
Returns
-------
backend : str
Name of the backend.
result : 0 | 1 | 2
0: the backend has been tested and works.
1: the backend has not been tested.
2: the backend not been tested.
Notes
-----
See also http://docs.enthought.com/pyface/.
"""
from traitsui.toolkit import toolkit
from traits.etsconfig.api import ETSConfig
toolkit()
return ETSConfig.toolkit
def _check_backend():
from pyface.api import warning
backend = _get_pyface_backend()
if backend == 'qt4':
_check_pyqt5_version()
# QT 5 macOS 11 compatibility:
if sys.platform == 'darwin' and 'QT_MAC_WANTS_LAYER' not in os.environ:
os.environ['QT_MAC_WANTS_LAYER'] = '1'
else:
msg = ("Using the currently selected Pyface backend %s is not "
"recommended, and it might not work properly. We recommend "
"using 'qt4' which can be enabled by installing the PyQt5"
"package." % backend)
warn(msg)
warning(None, msg, "Pyface Backend Warning")
def _testing_mode():
"""Determine if we're running tests."""
return os.getenv('_MNE_GUI_TESTING_MODE', '') == 'true'
| bsd-3-clause |
seem-sky/kbengine | kbe/res/scripts/common/Lib/ctypes/test/test_struct_fields.py | 264 | 1503 | import unittest
from ctypes import *
class StructFieldsTestCase(unittest.TestCase):
# Structure/Union classes must get 'finalized' sooner or
# later, when one of these things happen:
#
# 1. _fields_ is set.
# 2. An instance is created.
# 3. The type is used as field of another Structure/Union.
# 4. The type is subclassed
#
# When they are finalized, assigning _fields_ is no longer allowed.
def test_1_A(self):
class X(Structure):
pass
self.assertEqual(sizeof(X), 0) # not finalized
X._fields_ = [] # finalized
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_1_B(self):
class X(Structure):
_fields_ = [] # finalized
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_2(self):
class X(Structure):
pass
X()
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_3(self):
class X(Structure):
pass
class Y(Structure):
_fields_ = [("x", X)] # finalizes X
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_4(self):
class X(Structure):
pass
class Y(X):
pass
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
Y._fields_ = []
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
mahak/neutron | neutron/plugins/ml2/extensions/qos.py | 5 | 2024 | # Copyright (c) 2015 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.plugins.ml2 import api
from oslo_log import log as logging
from neutron.core_extensions import base as base_core
from neutron.core_extensions import qos as qos_core
LOG = logging.getLogger(__name__)
QOS_EXT_DRIVER_ALIAS = 'qos'
class QosExtensionDriver(api.ExtensionDriver):
def initialize(self):
self.core_ext_handler = qos_core.QosCoreResourceExtension()
LOG.debug("QosExtensionDriver initialization complete")
def process_create_network(self, context, data, result):
self.core_ext_handler.process_fields(
context, base_core.NETWORK, base_core.EVENT_CREATE, data, result)
def process_update_network(self, context, data, result):
self.core_ext_handler.process_fields(
context, base_core.NETWORK, base_core.EVENT_UPDATE, data, result)
def process_create_port(self, context, data, result):
self.core_ext_handler.process_fields(
context, base_core.PORT, base_core.EVENT_UPDATE, data, result)
process_update_port = process_create_port
def extend_network_dict(self, session, db_data, result):
result.update(
self.core_ext_handler.extract_fields(
base_core.NETWORK, db_data))
def extend_port_dict(self, session, db_data, result):
result.update(
self.core_ext_handler.extract_fields(base_core.PORT, db_data))
| apache-2.0 |
matthaywardwebdesign/rethinkdb | test/rql_test/connections/http_support/werkzeug/testsuite/security.py | 145 | 4264 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.security
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the security helpers.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import unittest
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.security import check_password_hash, generate_password_hash, \
safe_join, pbkdf2_hex, safe_str_cmp
class SecurityTestCase(WerkzeugTestCase):
def test_safe_str_cmp(self):
assert safe_str_cmp('a', 'a') is True
assert safe_str_cmp(b'a', u'a') is True
assert safe_str_cmp('a', 'b') is False
assert safe_str_cmp(b'aaa', 'aa') is False
assert safe_str_cmp(b'aaa', 'bbb') is False
assert safe_str_cmp(b'aaa', u'aaa') is True
def test_password_hashing(self):
hash0 = generate_password_hash('default')
assert check_password_hash(hash0, 'default')
assert hash0.startswith('pbkdf2:sha1:1000$')
hash1 = generate_password_hash('default', 'sha1')
hash2 = generate_password_hash(u'default', method='sha1')
assert hash1 != hash2
assert check_password_hash(hash1, 'default')
assert check_password_hash(hash2, 'default')
assert hash1.startswith('sha1$')
assert hash2.startswith('sha1$')
fakehash = generate_password_hash('default', method='plain')
assert fakehash == 'plain$$default'
assert check_password_hash(fakehash, 'default')
mhash = generate_password_hash(u'default', method='md5')
assert mhash.startswith('md5$')
assert check_password_hash(mhash, 'default')
legacy = 'md5$$c21f969b5f03d33d43e04f8f136e7682'
assert check_password_hash(legacy, 'default')
legacy = u'md5$$c21f969b5f03d33d43e04f8f136e7682'
assert check_password_hash(legacy, 'default')
def test_safe_join(self):
assert safe_join('foo', 'bar/baz') == os.path.join('foo', 'bar/baz')
assert safe_join('foo', '../bar/baz') is None
if os.name == 'nt':
assert safe_join('foo', 'foo\\bar') is None
def test_pbkdf2(self):
def check(data, salt, iterations, keylen, expected):
rv = pbkdf2_hex(data, salt, iterations, keylen)
self.assert_equal(rv, expected)
# From RFC 6070
check('password', 'salt', 1, None,
'0c60c80f961f0e71f3a9b524af6012062fe037a6')
check('password', 'salt', 1, 20,
'0c60c80f961f0e71f3a9b524af6012062fe037a6')
check('password', 'salt', 2, 20,
'ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957')
check('password', 'salt', 4096, 20,
'4b007901b765489abead49d926f721d065a429c1')
check('passwordPASSWORDpassword', 'saltSALTsaltSALTsaltSALTsaltSALTsalt',
4096, 25, '3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038')
check('pass\x00word', 'sa\x00lt', 4096, 16,
'56fa6aa75548099dcc37d7f03425e0c3')
# This one is from the RFC but it just takes for ages
##check('password', 'salt', 16777216, 20,
## 'eefe3d61cd4da4e4e9945b3d6ba2158c2634e984')
# From Crypt-PBKDF2
check('password', 'ATHENA.MIT.EDUraeburn', 1, 16,
'cdedb5281bb2f801565a1122b2563515')
check('password', 'ATHENA.MIT.EDUraeburn', 1, 32,
'cdedb5281bb2f801565a1122b25635150ad1f7a04bb9f3a333ecc0e2e1f70837')
check('password', 'ATHENA.MIT.EDUraeburn', 2, 16,
'01dbee7f4a9e243e988b62c73cda935d')
check('password', 'ATHENA.MIT.EDUraeburn', 2, 32,
'01dbee7f4a9e243e988b62c73cda935da05378b93244ec8f48a99e61ad799d86')
check('password', 'ATHENA.MIT.EDUraeburn', 1200, 32,
'5c08eb61fdf71e4e4ec3cf6ba1f5512ba7e52ddbc5e5142f708a31e2e62b1e13')
check('X' * 64, 'pass phrase equals block size', 1200, 32,
'139c30c0966bc32ba55fdbf212530ac9c5ec59f1a452f5cc9ad940fea0598ed1')
check('X' * 65, 'pass phrase exceeds block size', 1200, 32,
'9ccad6d468770cd51b10e6a68721be611a8b4d282601db3b36be9246915ec82a')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SecurityTestCase))
return suite
| agpl-3.0 |
rbalda/neural_ocr | env/lib/python2.7/site-packages/scipy/ndimage/tests/test_regression.py | 123 | 1429 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal, run_module_suite
import scipy.ndimage as ndimage
def test_byte_order_median():
"""Regression test for #413: median_filter does not handle bytes orders."""
a = np.arange(9, dtype='<f4').reshape(3, 3)
ref = ndimage.filters.median_filter(a,(3, 3))
b = np.arange(9, dtype='>f4').reshape(3, 3)
t = ndimage.filters.median_filter(b, (3, 3))
assert_array_almost_equal(ref, t)
def test_zoom_output_shape():
"""Ticket #643"""
x = np.arange(12).reshape((3,4))
ndimage.zoom(x, 2, output=np.zeros((6,8)))
def test_ticket_742():
def SE(img, thresh=.7, size=4):
mask = img > thresh
rank = len(mask.shape)
la, co = ndimage.label(mask,
ndimage.generate_binary_structure(rank, rank))
slices = ndimage.find_objects(la)
if np.dtype(np.intp) != np.dtype('i'):
shape = (3,1240,1240)
a = np.random.rand(np.product(shape)).reshape(shape)
# shouldn't crash
SE(a)
def test_gh_issue_3025():
"""Github issue #3025 - improper merging of labels"""
d = np.zeros((60,320))
d[:,:257] = 1
d[:,260:] = 1
d[36,257] = 1
d[35,258] = 1
d[35,259] = 1
assert ndimage.label(d, np.ones((3,3)))[1] == 1
if __name__ == "__main__":
run_module_suite()
| mit |
vortex-ape/scikit-learn | examples/model_selection/plot_nested_cross_validation_iris.py | 9 | 4415 | """
=========================================
Nested versus non-nested cross-validation
=========================================
This example compares non-nested and nested cross-validation strategies on a
classifier of the iris data set. Nested cross-validation (CV) is often used to
train a model in which hyperparameters also need to be optimized. Nested CV
estimates the generalization error of the underlying model and its
(hyper)parameter search. Choosing the parameters that maximize non-nested CV
biases the model to the dataset, yielding an overly-optimistic score.
Model selection without nested CV uses the same data to tune model parameters
and evaluate model performance. Information may thus "leak" into the model
and overfit the data. The magnitude of this effect is primarily dependent on
the size of the dataset and the stability of the model. See Cawley and Talbot
[1]_ for an analysis of these issues.
To avoid this problem, nested CV effectively uses a series of
train/validation/test set splits. In the inner loop (here executed by
:class:`GridSearchCV <sklearn.model_selection.GridSearchCV>`), the score is
approximately maximized by fitting a model to each training set, and then
directly maximized in selecting (hyper)parameters over the validation set. In
the outer loop (here in :func:`cross_val_score
<sklearn.model_selection.cross_val_score>`), generalization error is estimated
by averaging test set scores over several dataset splits.
The example below uses a support vector classifier with a non-linear kernel to
build a model with optimized hyperparameters by grid search. We compare the
performance of non-nested and nested CV strategies by taking the difference
between their scores.
.. topic:: See Also:
- :ref:`cross_validation`
- :ref:`grid_search`
.. topic:: References:
.. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and
subsequent selection bias in performance evaluation.
J. Mach. Learn. Res 2010,11, 2079-2107.
<http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_
"""
from sklearn.datasets import load_iris
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import numpy as np
print(__doc__)
# Number of random trials
NUM_TRIALS = 30
# Load the dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
# Set up possible values of parameters to optimize over
p_grid = {"C": [1, 10, 100],
"gamma": [.01, .1]}
# We will use a Support Vector Classifier with "rbf" kernel
svm = SVC(kernel="rbf")
# Arrays to store scores
non_nested_scores = np.zeros(NUM_TRIALS)
nested_scores = np.zeros(NUM_TRIALS)
# Loop for each trial
for i in range(NUM_TRIALS):
# Choose cross-validation techniques for the inner and outer loops,
# independently of the dataset.
# E.g "GroupKFold", "LeaveOneOut", "LeaveOneGroupOut", etc.
inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)
outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)
# Non_nested parameter search and scoring
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=inner_cv)
clf.fit(X_iris, y_iris)
non_nested_scores[i] = clf.best_score_
# Nested CV with parameter optimization
nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv)
nested_scores[i] = nested_score.mean()
score_difference = non_nested_scores - nested_scores
print("Average difference of {0:6f} with std. dev. of {1:6f}."
.format(score_difference.mean(), score_difference.std()))
# Plot scores on each trial for nested and non-nested CV
plt.figure()
plt.subplot(211)
non_nested_scores_line, = plt.plot(non_nested_scores, color='r')
nested_line, = plt.plot(nested_scores, color='b')
plt.ylabel("score", fontsize="14")
plt.legend([non_nested_scores_line, nested_line],
["Non-Nested CV", "Nested CV"],
bbox_to_anchor=(0, .4, .5, 0))
plt.title("Non-Nested and Nested Cross Validation on Iris Dataset",
x=.5, y=1.1, fontsize="15")
# Plot bar chart of the difference.
plt.subplot(212)
difference_plot = plt.bar(range(NUM_TRIALS), score_difference)
plt.xlabel("Individual Trial #")
plt.legend([difference_plot],
["Non-Nested CV - Nested CV Score"],
bbox_to_anchor=(0, 1, .8, 0))
plt.ylabel("score difference", fontsize="14")
plt.show()
| bsd-3-clause |
BaconPancakes/valor | lib/pip/_vendor/requests/packages/urllib3/util/request.py | 780 | 2128 | from __future__ import absolute_import
from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
| gpl-3.0 |
AMObox/teammaniac | plugin.video.PsychoTV/resources/lib/resolvers/filehoot.py | 23 | 1140 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from resources.lib.libraries import client
def resolve(url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://filehoot.com/embed-%s.html' % url
result = client.request(url, mobile=True)
url = re.compile('file *: *"(http.+?)"').findall(result)[0]
return url
except:
return
| gpl-2.0 |
kevinpt/ripyl | ripyl/protocol/iso_k_line.py | 1 | 20293 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''ISO K-line protocol decoder
Decodes ISO9141 and ISO14230 automotive data bus protocols
'''
# Copyright © 2013 Kevin Thibedeau
# This file is part of Ripyl.
# Ripyl is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# Ripyl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with Ripyl. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, division
from ripyl.decode import *
import ripyl.streaming as stream
import ripyl.sigproc as sigp
from ripyl.util.enum import Enum
import ripyl.protocol.uart as uart
import ripyl.protocol.obd2 as obd
# I have encountered an ECU (Sagem MC1000) that is largely ISO9141 based but
# also responds to an ISO14230 command for ECU identification after a special init
# phase. For that reason this decoder supports both message formats simultaneously.
# We use bit-7 of the first header byte to determine the protocol.
# 0 = ISO9141, 1 = ISO14230
# Another option for detecting the protocol is the key bytes in the init sequence (ISO9141)
# or in the response to the StartCommunication request. (ISO14230)
# ISO9141 keys : 08 08; 94 94
# ISO14230 keys: 8F E9; 8F 6B; 8F 6D; 8F EF
# Unknown key used on Sagem MC1000 for ISO14230 init: D9 8F
# Detecting the length of a message is tricky since ISO9141 doesn't include a byte
# count and we are not monitoring the L-line to differentiate between data sent
# and received. Until a better system can be devised the decoder uses a simple
# minimum time period between bytes to decide if a new message has started.
# The OBD-2 standard requires this to be at least 55ms but This can be violated by
# the tool controlling the interface. In one case, a piece of software had a gap
# as low as 8ms between messages.
# ISO14230 messages are terminated at the length indicated in their header.
# Messages are either a request from an external tool or a response from one or
# more ECUs. These are distinguished by looking at the value of the service ID (mode)
# in the message. This is the first data byte after the header.
# SIDs from 0x00 to 0x3F are requests. SIDs from 0x40 to 0x7F are responses.
class KLineProtocol(Enum):
'''Enumeration for identifying the message protocol'''
Unknown = 0
ISO9141 = 1
ISO14230 = 2 # KWP2000
class KLineStreamStatus(Enum):
'''Enumeration for KLineStreamMessage status codes'''
ChecksumError = stream.StreamStatus.Error + 1
BadInitError = stream.StreamStatus.Error + 2
InvalidMessageError = stream.StreamStatus.Error + 3
class ISO9141Header(object):
'''ISO9141 header object
Header byte 1: option
| 7-5 priority: 000 = high, 111 = low
| 4 header type: 0 = 3-byte; 1 = 1-byte
| 3 in frame response: 0 = required (Ford); 1 = not allowed (GM)
| 2 addressing mode: 1 = physical; 0 = functional
| 1-0 message type
|
| message type:
| bit: 3 2 1 0
| -------
| 1 0 0 0 function
| 1 0 0 1 broadcast
| 1 0 1 0 query
| 1 0 1 1 read
| 1 1 0 0 node-to-node
| 1 1 0 1 reserved
| 1 1 1 0 reserved
| 1 1 1 1 reserved
|
Header byte 2: target address
Header byte 3: source address
'''
def __init__(self, option, target, source):
'''
option, target, source
USBFrame objects for the header bytes
'''
self.option = option
self.target = target
self.source = source
def bytes(self):
'''Returns a list of header bytes in original order'''
return [self.option, self.target, self.source]
def __repr__(self):
return 'ISO9141Header({:02x}, {:02x}, {:02x})'.format(self.option.data, self.target.data, \
self.source.data)
def __str__(self):
return '[{:02x} {:02x} {:02x}]'.format(self.option.data, self.target.data, self.source.data)
class ISO14230Header(object):
'''ISO14230 header object
Header byte 1: length 0x10nnnnnn
5-0 data bytes in message
Header byte 2: optional data byte count if nnnnnn is 0
Header byte 2(3): target address
Header byte 3(4): source address
'''
def __init__(self, option, target, source, length=None):
'''
option, target, source, length
USBFrame objects for the header bytes
'''
self.option = option
self.target = target
self.source = source
self.length = length
def bytes(self):
'''Returns a list of header bytes in original order'''
if self.length is None:
return [self.option, self.target, self.source]
else:
return [self.option, self.length, self.target, self.source]
def __repr__(self):
if self.length is None:
rep = 'ISO14230Header({:02x}, {:02x}, {:02x})'.format(self.option.data, self.target.data, \
self.source.data)
else:
rep = 'ISO14230Header({:02x}, {:02x}, {:02x}, {:02x})'.format(self.option.data, \
self.target.data, self.source.data, self.length.data)
return rep
def __str__(self):
if self.length is None:
s = '[{:02x} {:02x} {:02x}]'.format(self.option.data, self.target.data, self.source.data)
else:
s = '[{:02x} ({:02x}) {:02x} {:02x}]'.format(self.option.data, self.length.data, \
self.target.data, self.source.data)
return s
class KLineMessage(obd.OBD2Message):
'''Message object for the K-line protocols ISO9141 and ISO14230'''
def __init__(self, msg_type, header, data, checksum):
obd.OBD2Message.__init__(self, msg_type)
self.header = header
self.data = data
self.checksum = checksum
def checksum_good(self):
'''Validate the message checksum
Returns a bool that is True when checksum is valid.
'''
bdata = self.header.bytes() + self.data
cs = sum([b.data for b in bdata]) % 256
return True if cs == self.checksum.data else False
def raw_data(self, full_message=False):
'''Get the raw data for the message
full_message (bool)
Returns complete message including header and checksum when true
Returns a list of bytes.
'''
if full_message:
return [b for a in [[b.data for b in self.header.bytes()], \
[b.data for b in self.data], [self.checksum.data]] for b in a]
else:
return [b.data for b in self.data]
@property
def start_time(self):
return self.header.option.start_time
@property
def end_time(self):
return self.checksum.end_time
def __repr__(self):
return 'KLineMessage({}, {}, {}, {:02x}'.format(obd.OBD2MsgType(self.msg_type), \
self.header, [hex(b.data) for b in self.data], self.checksum.data)
def __str__(self):
mtype = 'req >' if self.msg_type == obd.OBD2MsgType.Request else 'resp <'
data_bytes = ' '.join('{:02x}'.format(b.data) for b in self.data)
cs_flag = '' if self.checksum_good() else ' BAD checksum!'
return '{:>6} {} {} <{:02x}{}> '.format(mtype, self.header, data_bytes, self.checksum.data, cs_flag)
class KLineStreamMessage(obd.OBD2StreamMessage):
'''StreamMessage object for the K-line protocols ISO9141 and ISO14230'''
def __init__(self, msg, status=stream.StreamStatus.Ok):
obd.OBD2StreamMessage.__init__(self, msg, status)
@classmethod
def status_text(cls, status):
'''Returns the string representation of a status code'''
if status >= KLineStreamStatus.ChecksumError and \
status <= KLineStreamStatus.ChecksumError:
return KLineStreamStatus(status)
else:
return obd.OBD2StreamMessage.status_text(status)
def __repr__(self):
status_text = KLineStreamMessage.status_text(self.status)
return 'KLineStreamMessage({}, {})'.format(self.msg, status_text)
class KLineWakeup(stream.StreamSegment):
'''Encapsulates BRK data values representing the wakeup pattern
This is used for the slow init (0x33 at 5-baud) and the fast init (25ms low, 25ms high)
'''
def __init__(self, bounds, edges, status=stream.StreamStatus.Ok):
stream.StreamSegment.__init__(self, bounds, None, status)
self.data = edges
self.kind = 'K-line wakeup'
def __repr__(self):
status_text = stream.StreamSegment.status_text(self.status)
return 'KLineWakeup({})'.format(status_text)
class ISO9141Init(stream.StreamSegment):
'''Encapsulates initialization exchange before messaging begins on ISO9141
These are the bytes in the 0x55, key1, key2, ~key2 ~wakeup init sequence.
'''
def __init__(self, recs, status=stream.StreamStatus.Ok):
bounds = (recs[0].start_time, recs[-1].end_time)
stream.StreamSegment.__init__(self, bounds, status)
self.annotate('frame', {}, stream.AnnotationFormat.Hidden)
for r in recs:
self.subrecords.append(r.subrecords[1])
self.subrecords[-1].annotate('ctrl', {}, stream.AnnotationFormat.General)
self.kind = 'ISO9141 init'
def __repr__(self):
status_text = stream.StreamSegment.status_text(self.status)
return 'ISO9141Init({})'.format(status_text)
def iso_k_line_decode(stream_data, min_message_interval=7.0e-3, logic_levels=None, stream_type=stream.StreamType.Samples):
'''Decode ISO9141 and ISO14230 data streams
This is a generator function that can be used in a pipeline of waveform
procesing operations.
Sample streams are a sequence of SampleChunk Objects. Edge streams are a sequence
of 2-tuples of (time, int) pairs. The type of stream is identified by the stream_type
parameter. Sample streams will be analyzed to find edge transitions representing
0 and 1 logic states of the waveforms. With sample streams, an initial block of data
is consumed to determine the most likely logic levels in the signal.
stream_data (iterable of SampleChunk objects or (float, int) pairs)
A sample stream or edge stream of K-line messages.
min_message_interval (float)
The minimum time between bytes for identifying the end and start
of messages. For ISO14230 this is used in addition to the message length encoded
in the header.
logic_levels ((float, float) or None)
Optional pair that indicates (low, high) logic levels of the sample
stream. When present, auto level detection is disabled. This has no effect on
edge streams.
stream_type (streaming.StreamType)
A StreamType value indicating that the stream parameter represents either Samples
or Edges
Yields a series of KLineStreamMessage objects.
Raises AutoLevelError if stream_type = Samples and the logic levels cannot
be determined.
'''
if stream_type == stream.StreamType.Samples:
if logic_levels is None:
samp_it, logic_levels = check_logic_levels(stream_data)
else:
samp_it = stream_data
edges = find_edges(samp_it, logic_levels, hysteresis=0.4)
else: # the stream is already a list of edges
edges = stream_data
bits = 8
parity = None
stop_bits = 1
polarity = uart.UARTConfig.IdleHigh
baud_rate = 10400
records_it = uart.uart_decode(edges, bits, parity, stop_bits, lsb_first=True, \
polarity=polarity, baud_rate=baud_rate, logic_levels=logic_levels, stream_type=stream.StreamType.Edges)
S_WAKEUP = 0
S_INIT = 1
S_START_MSG = 2
S_GET_MSG = 3
state = S_GET_MSG
wakeup_edges = []
init_bytes = []
protocol = KLineProtocol.Unknown
msg_bytes = []
prev_byte_end = 0.0
total_length = None
def get_msg_len(msg_bytes):
# Try to determine message length
if len(msg_bytes) > 0:
if msg_bytes[0].data & 0x80: #ISO14230
# Get length from first byte
length = msg_bytes[0].data & 0x3F
total_length = 3 + length + 1
if length == 0: # Header is 4-bytes long
if len(msg_bytes) >= 2: # 2nd byte is present
length = msg_bytes[1].data
total_length = 4 + length + 1
else:
return None
return total_length
else: #ISO9141
return None
else:
return None
def build_msg(protocol, msg_bytes):
# determine header length
header_length = 3
if protocol == KLineProtocol.ISO14230 and msg_bytes[0].data == 0x80:
header_length = 4
# Message must be at least h_l + 1 + 1 = 5 or 6 bytes long
if len(msg_bytes) >= header_length + 2:
sid_byte = msg_bytes[header_length]
msg_type = obd.OBD2MsgType.Request if sid_byte.data <= 0x3F else obd.OBD2MsgType.Response
if protocol == KLineProtocol.ISO9141:
header = ISO9141Header(option=msg_bytes[0], target=msg_bytes[1], source=msg_bytes[2])
elif protocol == KLineProtocol.ISO14230:
if header_length == 4:
length = msg_bytes[1]
target = msg_bytes[2]
source = msg_bytes[3]
else:
length = None
target = msg_bytes[1]
source = msg_bytes[2]
header = ISO14230Header(option=msg_bytes[0], target=target, source=source, \
length=length)
else: # Unknown protocol
header = ISO9141Header(option=msg_bytes[0], target=msg_bytes[1], source=msg_bytes[2])
msg = KLineMessage(msg_type, header, msg_bytes[header_length:-1], msg_bytes[-1])
status = KLineStreamStatus.ChecksumError if not msg.checksum_good() else stream.StreamStatus.Ok
obd_msg = KLineStreamMessage(msg, status)
obd_msg.annotate('frame', {}, stream.AnnotationFormat.Hidden)
for b in msg_bytes:
obd_msg.subrecords.append(b.subrecords[1])
obd_msg.subrecords[-1].annotate('data', {'_bits':8}, stream.AnnotationFormat.General)
obd_msg.subrecords[-1].kind = 'data'
for sr in obd_msg.subrecords[0:header_length]:
sr.style = 'addr'
sr.kind = 'header'
obd_msg.subrecords[-1].style = 'check'
obd_msg.subrecords[-1].status = status
obd_msg.subrecords[-1].kind = 'checksum'
else:
# Not enough bytes for proper K-line message
msg = KLineMessage(obd.OBD2MsgType.Unknown, None, msg_bytes, None)
obd_msg = KLineStreamMessage(msg, KLineStreamStatus.InvalidMessageError)
for b in msg_bytes:
obd_msg.subrecords.append(b.subrecords[1])
obd_msg.subrecords[-1].annotate('misc', {'_bits':8}, stream.AnnotationFormat.General)
return obd_msg
for r in records_it:
if r.data == 0x00 and r.status == uart.UARTStreamStatus.FramingError:
state = S_WAKEUP
wakeup_edges.append(r.start_time)
continue
if state == S_WAKEUP:
if not (r.data == 0x00 and r.status == uart.UARTStreamStatus.FramingError):
# not a BRK byte; wakeup has ended
bounds = (wakeup_edges[0], r.start_time)
wu = KLineWakeup(bounds, wakeup_edges)
wu.annotate('frame', {}, stream.AnnotationFormat.Hidden)
yield wu
wakeup_edges = []
if r.data == 0x55: # ISO9141 sync byte
protocol = KLineProtocol.ISO9141
init_bytes.append(r)
init_bytes_left = 4
state = S_INIT
elif r.data == 0xc1: # KWP2000 start comm. format byte
protocol = KLineProtocol.ISO14230
msg_bytes.append(r)
state = S_GET_MSG
prev_byte_end = r.end_time
else: # Unexpected data
se = stream.StreamEvent(r.start_time, kind='Bad init', \
status=KLineStreamStatus.BadInitError)
yield se
# We will just assume this is the start of a new message
msg_bytes.append(r)
state = S_GET_MSG
prev_byte_end = r.end_time
elif state == S_INIT:
# After 0x55 there are 4 more bytes remaining in the init sequence
# Key 1, Key 2, ~Key 2, ~Wakeup (0xCC typ.)
init_bytes.append(r)
init_bytes_left -= 1
if init_bytes_left == 0:
yield ISO9141Init(init_bytes)
init_bytes = []
state = S_START_MSG
prev_byte_end = r.end_time
elif state == S_START_MSG:
protocol = KLineProtocol.ISO14230 if r.data & 0x80 else KLineProtocol.ISO9141
msg_bytes.append(r)
state = S_GET_MSG
prev_byte_end = r.end_time
elif state == S_GET_MSG:
if len(msg_bytes) == 2:
total_length = get_msg_len(msg_bytes)
#print('### byte:', eng.eng_si(r.start_time, 's'), \
# eng.eng_si(r.start_time - prev_byte_end, 's'), hex(r.data))
if (r.start_time - prev_byte_end > min_message_interval and len(msg_bytes) > 0) or \
(total_length is not None and len(msg_bytes) == total_length):
# Previous message ended
msg = build_msg(protocol, msg_bytes)
yield msg
msg_bytes = []
total_length = None
# Determine the protocol of the next message
protocol = KLineProtocol.ISO14230 if r.data & 0x80 else KLineProtocol.ISO9141
msg_bytes.append(r)
prev_byte_end = r.end_time
# Handle final message
if len(msg_bytes) > 0:
msg = build_msg(protocol, msg_bytes)
yield msg
msg_bytes = []
# There may have been a partial wakeup pattern at the end of the stream
if len(wakeup_edges) > 0:
bounds = (wakeup_edges[0], prev_byte_end)
wu = KLineWakeup(bounds, wakeup_edges)
wu.annotate('frame', {}, stream.AnnotationFormat.Hidden)
yield wu
def iso_k_line_synth(messages, idle_start=0.0, message_interval=8.0e-3, idle_end=0.0, word_interval=1.0e-3):
'''Generate synthesized ISO9141 and ISO14230 data streams
messages (sequence of tuple of int)
Messages to be synthesized. Each element is a tuple of bytes to send
for each message.
idle_start (float)
The amount of idle time before the transmission of messages begins.
message_interval (float)
The amount of time between messages.
idle_end (float)
The amount of idle time after the last message.
word_interval (float)
The amount of time between message bytes.
Yields an edge stream of (float, int) pairs. The first element in the iterator
is the initial state of the stream.
'''
msg_its = []
for i, msg in enumerate(messages):
istart = idle_start if i == 0 else 0.0
iend = idle_end if i == len(messages)-1 else 0.0
msg_its.append(uart.uart_synth(msg, bits=8, baud=10400, idle_start=istart, \
idle_end=iend, word_interval=word_interval))
return sigp.chain_edges(message_interval, *msg_its)
| lgpl-3.0 |
xzYue/odoo | addons/account/wizard/account_report_common.py | 342 | 10353 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
from openerp.osv.orm import setup_modifiers
from openerp.tools.translate import _
class account_common_report(osv.osv_memory):
_name = "account.common.report"
_description = "Account Common Report"
def onchange_chart_id(self, cr, uid, ids, chart_account_id=False, context=None):
res = {}
if chart_account_id:
company_id = self.pool.get('account.account').browse(cr, uid, chart_account_id, context=context).company_id.id
now = time.strftime('%Y-%m-%d')
domain = [('company_id', '=', company_id), ('date_start', '<', now), ('date_stop', '>', now)]
fiscalyears = self.pool.get('account.fiscalyear').search(cr, uid, domain, limit=1)
res['value'] = {'company_id': company_id, 'fiscalyear_id': fiscalyears and fiscalyears[0] or False}
return res
_columns = {
'chart_account_id': fields.many2one('account.account', 'Chart of Account', help='Select Charts of Accounts', required=True, domain = [('parent_id','=',False)]),
'company_id': fields.related('chart_account_id', 'company_id', type='many2one', relation='res.company', string='Company', readonly=True),
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', help='Keep empty for all open fiscal year'),
'filter': fields.selection([('filter_no', 'No Filters'), ('filter_date', 'Date'), ('filter_period', 'Periods')], "Filter by", required=True),
'period_from': fields.many2one('account.period', 'Start Period'),
'period_to': fields.many2one('account.period', 'End Period'),
'journal_ids': fields.many2many('account.journal', string='Journals', required=True),
'date_from': fields.date("Start Date"),
'date_to': fields.date("End Date"),
'target_move': fields.selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], 'Target Moves', required=True),
}
def _check_company_id(self, cr, uid, ids, context=None):
for wiz in self.browse(cr, uid, ids, context=context):
company_id = wiz.company_id.id
if wiz.fiscalyear_id and company_id != wiz.fiscalyear_id.company_id.id:
return False
if wiz.period_from and company_id != wiz.period_from.company_id.id:
return False
if wiz.period_to and company_id != wiz.period_to.company_id.id:
return False
return True
_constraints = [
(_check_company_id, 'The fiscalyear, periods or chart of account chosen have to belong to the same company.', ['chart_account_id','fiscalyear_id','period_from','period_to']),
]
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:context = {}
res = super(account_common_report, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
if context.get('active_model', False) == 'account.account':
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='chart_account_id']")
for node in nodes:
node.set('readonly', '1')
node.set('help', 'If you print the report from Account list/form view it will not consider Charts of account')
setup_modifiers(node, res['fields']['chart_account_id'])
res['arch'] = etree.tostring(doc)
return res
def onchange_filter(self, cr, uid, ids, filter='filter_no', fiscalyear_id=False, context=None):
res = {'value': {}}
if filter == 'filter_no':
res['value'] = {'period_from': False, 'period_to': False, 'date_from': False ,'date_to': False}
if filter == 'filter_date':
res['value'] = {'period_from': False, 'period_to': False, 'date_from': time.strftime('%Y-01-01'), 'date_to': time.strftime('%Y-%m-%d')}
if filter == 'filter_period' and fiscalyear_id:
start_period = end_period = False
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.special = false
ORDER BY p.date_start ASC, p.special ASC
LIMIT 1) AS period_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.date_start < NOW()
AND p.special = false
ORDER BY p.date_stop DESC
LIMIT 1) AS period_stop''', (fiscalyear_id, fiscalyear_id))
periods = [i[0] for i in cr.fetchall()]
if periods and len(periods) > 1:
start_period = periods[0]
end_period = periods[1]
res['value'] = {'period_from': start_period, 'period_to': end_period, 'date_from': False, 'date_to': False}
return res
def _get_account(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
accounts = self.pool.get('account.account').search(cr, uid, [('parent_id', '=', False), ('company_id', '=', user.company_id.id)], limit=1)
return accounts and accounts[0] or False
def _get_fiscalyear(self, cr, uid, context=None):
if context is None:
context = {}
now = time.strftime('%Y-%m-%d')
company_id = False
ids = context.get('active_ids', [])
if ids and context.get('active_model') == 'account.account':
company_id = self.pool.get('account.account').browse(cr, uid, ids[0], context=context).company_id.id
else: # use current company id
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
domain = [('company_id', '=', company_id), ('date_start', '<', now), ('date_stop', '>', now)]
fiscalyears = self.pool.get('account.fiscalyear').search(cr, uid, domain, limit=1)
return fiscalyears and fiscalyears[0] or False
def _get_all_journal(self, cr, uid, context=None):
return self.pool.get('account.journal').search(cr, uid ,[])
_defaults = {
'fiscalyear_id': _get_fiscalyear,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.common.report',context=c),
'journal_ids': _get_all_journal,
'filter': 'filter_no',
'chart_account_id': _get_account,
'target_move': 'posted',
}
def _build_contexts(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
result = {}
result['fiscalyear'] = 'fiscalyear_id' in data['form'] and data['form']['fiscalyear_id'] or False
result['journal_ids'] = 'journal_ids' in data['form'] and data['form']['journal_ids'] or False
result['chart_account_id'] = 'chart_account_id' in data['form'] and data['form']['chart_account_id'] or False
result['state'] = 'target_move' in data['form'] and data['form']['target_move'] or ''
if data['form']['filter'] == 'filter_date':
result['date_from'] = data['form']['date_from']
result['date_to'] = data['form']['date_to']
elif data['form']['filter'] == 'filter_period':
if not data['form']['period_from'] or not data['form']['period_to']:
raise osv.except_osv(_('Error!'),_('Select a starting and an ending period.'))
result['period_from'] = data['form']['period_from']
result['period_to'] = data['form']['period_to']
return result
def _print_report(self, cr, uid, ids, data, context=None):
raise (_('Error!'), _('Not implemented.'))
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = {}
data['ids'] = context.get('active_ids', [])
data['model'] = context.get('active_model', 'ir.ui.menu')
data['form'] = self.read(cr, uid, ids, ['date_from', 'date_to', 'fiscalyear_id', 'journal_ids', 'period_from', 'period_to', 'filter', 'chart_account_id', 'target_move'], context=context)[0]
for field in ['fiscalyear_id', 'chart_account_id', 'period_from', 'period_to']:
if isinstance(data['form'][field], tuple):
data['form'][field] = data['form'][field][0]
used_context = self._build_contexts(cr, uid, ids, data, context=context)
data['form']['periods'] = used_context.get('periods', False) and used_context['periods'] or []
data['form']['used_context'] = dict(used_context, lang=context.get('lang', 'en_US'))
return self._print_report(cr, uid, ids, data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
V155/qutebrowser | qutebrowser/config/configexc.py | 1 | 5311 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Exceptions related to config parsing."""
import typing
import attr
from qutebrowser.utils import jinja, usertypes
class Error(Exception):
"""Base exception for config-related errors."""
class NoAutoconfigError(Error):
"""Raised when this option can't be set in autoconfig.yml."""
def __init__(self, name: str) -> None:
super().__init__("The {} setting can only be set in config.py!"
.format(name))
class BackendError(Error):
"""Raised when this setting is unavailable with the current backend."""
def __init__(
self, name: str,
backend: usertypes.Backend,
raw_backends: typing.Optional[typing.Mapping[str, bool]]
) -> None:
if raw_backends is None or not raw_backends[backend.name]:
msg = ("The {} setting is not available with the {} backend!"
.format(name, backend.name))
else:
msg = ("The {} setting needs {} with the {} backend!"
.format(name, raw_backends[backend.name], backend.name))
super().__init__(msg)
class NoPatternError(Error):
"""Raised when the given setting does not support URL patterns."""
def __init__(self, name: str) -> None:
super().__init__("The {} setting does not support URL patterns!"
.format(name))
class ValidationError(Error):
"""Raised when a value for a config type was invalid.
Attributes:
value: Config value that triggered the error.
msg: Additional error message.
"""
def __init__(self, value: typing.Any,
msg: typing.Union[str, Exception]) -> None:
super().__init__("Invalid value '{}' - {}".format(value, msg))
self.option = None
class KeybindingError(Error):
"""Raised for issues with keybindings."""
class NoOptionError(Error):
"""Raised when an option was not found."""
def __init__(self, option: str, *,
deleted: bool = False,
renamed: str = None) -> None:
if deleted:
assert renamed is None
suffix = ' (this option was removed from qutebrowser)'
elif renamed is not None:
suffix = ' (this option was renamed to {!r})'.format(renamed)
else:
suffix = ''
super().__init__("No option {!r}{}".format(option, suffix))
self.option = option
@attr.s
class ConfigErrorDesc:
"""A description of an error happening while reading the config.
Attributes:
text: The text to show.
exception: The exception which happened.
traceback: The formatted traceback of the exception.
"""
text = attr.ib() # type: str
exception = attr.ib() # type: typing.Union[str, Exception]
traceback = attr.ib(None) # type: str
def __str__(self) -> str:
if self.traceback:
return '{} - {}: {}'.format(self.text,
self.exception.__class__.__name__,
self.exception)
return '{}: {}'.format(self.text, self.exception)
def with_text(self, text: str) -> 'ConfigErrorDesc':
"""Get a new ConfigErrorDesc with the given text appended."""
return self.__class__(text='{} ({})'.format(self.text, text),
exception=self.exception,
traceback=self.traceback)
class ConfigFileErrors(Error):
"""Raised when multiple errors occurred inside the config."""
def __init__(self,
basename: str,
errors: typing.Sequence[ConfigErrorDesc]) -> None:
super().__init__("Errors occurred while reading {}:\n{}".format(
basename, '\n'.join(' {}'.format(e) for e in errors)))
self.basename = basename
self.errors = errors
def to_html(self) -> str:
"""Get the error texts as a HTML snippet."""
template = jinja.environment.from_string("""
Errors occurred while reading {{ basename }}:
<ul>
{% for error in errors %}
<li>
<b>{{ error.text }}</b>: {{ error.exception }}
{% if error.traceback != none %}
<pre>
""".rstrip() + "\n{{ error.traceback }}" + """
</pre>
{% endif %}
</li>
{% endfor %}
</ul>
""")
return template.render(basename=self.basename, errors=self.errors)
| gpl-3.0 |
RainDogSoftware/pingpung | pingpung/pingpung.py | 1 | 22103 | #!/usr/bin/env python3
import sys
import time
import os
from collections import OrderedDict
from itertools import count
from gettext import gettext as _
from PyQt4 import QtCore, QtGui, uic
from pingpung import pplib
from pingpung.pplib import pping
from pingpung.pplib import audio
__date__ = "$Date: 2015/03/17 $"
__author__ = "Josh Price"
__credits__ = ["Rob Knight", "Peter Maxwell", "Gavin Huttley",
"Matthew Wakefield"]
__license__ = "GPLv2"
__maintainer__ = "Josh Price"
__email__ = "[email protected]"
__status__ = "Alpha"
############################################################################################
# Ping Thread
class PingThread(QtCore.QThread):
"""
A QThread subclass for running the pings.
"""
def __init__(self, ip, ping_count, interval, packet_size, tab_id, start_num, timeout=5000):
"""
:param args:
ip: The IP address or domain name of the target
ping_count: The number of packets to send. A zero indicates continuous pings
interval: The delay in seconds between pings
packet size: The size of the payload in bytes
tab_id: The ID number of the tab which sent the ping
start_num: The sequence number to begin with. Allows pause/resume functionality
The results of a ping are sent via Qt Signals. Errors starting the ping are sent with a string describing the
error, while the complete ping signal (including timeouts and such) includes a dict with the detailed results,
as provided by the ping library in use.
:return:
"""
super(PingThread, self).__init__()
self.ip = ip
self.ping_count = int(ping_count)
self.interval = int(interval)
self.packet_size = int(packet_size)
self.tab_id = int(tab_id)
self.start_num = start_num
self.timeout = timeout
def run(self):
seq_num = self.start_num
while (seq_num < self.ping_count) or (self.ping_count == 0):
seq_num += 1
# Cannot accept sequence number > 65535. This resets seq number but does not affect stats totals
if seq_num > 65535:
seq_num = 0
try:
self.result = pping.ping(self.ip, self.timeout, seq_num, self.packet_size)
except ValueError:
self.emit(QtCore.SIGNAL('error'), _("Invalid input"))
break
except pping.SocketError:
self.emit(QtCore.SIGNAL('error'), _("Error. Verify that we're running as root/admin. See README.md"))
break
except pping.AddressError:
self.emit(QtCore.SIGNAL('error'), _("Address error. Check IP/domain setting."))
break
self.result["tabID"] = self.tab_id
self.emit(QtCore.SIGNAL('complete'), self.result)
time.sleep(self.interval)
self.emit(QtCore.SIGNAL('suite_complete'), self.tab_id)
############################################################################################
# Main
class PingPung(QtGui.QMainWindow):
############################################################################################
# UI Setup
def __init__(self):
super(PingPung, self).__init__()
filepath = os.path.join(os.path.dirname(approot), "ppui/maingui.ui")
self.ui = uic.loadUi(filepath)
# Preparing to handle multiple tabs of pings. We keep a dict in self.tabs so that they can be referenced by
# id number, as assigned by the counter below. It's worth noting that this is because index number in tab
# bar widget is not enough. If a tab's index number changes while the ping thread is running, crazy
# things could happen. This is always why they're kept in a dict instead of a list or something dynamically
# numbered.
self.tabs = {}
self.counter_iter = count()
# Functionality for adding and removing tabs
self.tab_button = QtGui.QToolButton(self)
self.tab_button.setText('+')
self.ui.tab_bar.setCornerWidget(self.tab_button)
self.tab_button.clicked.connect(self._new_tab)
self.ui.tab_bar.tabCloseRequested.connect(self._remove_tab)
# Menu actions
self.ui.actionExit.triggered.connect(QtGui.qApp.quit)
self.ui.actionAbout_PingPung.triggered.connect(self._show_about)
# Always start with one tab
self._new_tab()
def _show_about(self):
"""
Loads and displays the About page of the UI
:return:
"""
filepath = os.path.join(os.path.dirname(approot), "ppui/about.ui")
self.about = uic.loadUi(filepath)
self.about.version_label.setText(__version__)
self.about.show()
def _run_button_action(self, tab_ui):
#if this tab contains a running thread, terminate it
if not self._set_inactive(tab_ui.tab_id):
self._set_active(tab_ui.tab_id)
def _connect_slots(self, sender):
# ♫ Connect the slots. Lalalalala. ♫
self.connect(sender, QtCore.SIGNAL('complete'), self._show_result)
self.connect(sender, QtCore.SIGNAL('error'), self._show_error)
self.connect(sender, QtCore.SIGNAL('set_state_inactive'), self._set_inactive)
self.connect(sender, QtCore.SIGNAL('set_state_active'), self._set_active)
self.connect(sender, QtCore.SIGNAL('suite_complete'), self._suite_complete)
############################################################################################
# Tab management
def _new_tab(self, *args):
"""
Creates a new tab from the pptab.ui file. Each tab is assigned a unique integer ID number. This is how
we keep track of which pings belong to which tab while having multiple simultaneous pings and tabs.
:param args: Unused
:return:
"""
# Tab contents are in their own object, as each tab needs to operate independently of the others in all cases.
# As noted above, tabs must have an unchanging ID number for thread support
filepath = os.path.join(os.path.dirname(approot), "ppui/pptab.ui")
tab_ui = uic.loadUi(filepath)
tab_ui.tab_id = next(self.counter_iter)
tab_ui.last_num = -1
# No non-Windows audio support yet. I'll get to it! I promise!
if sys.platform != "win32":
tab_ui.audio_options.setEnabled(False)
# We keep an OrderedDict of the ping statistics for each tab. This is used directly by the stats table
tab_ui.stat_dict = self.get_default_stats()
self._refresh_stat_display(tab_ui)
# This is a dictionary of tabs keyed by ID number, so that they can be referenced later even if index changes
self.tabs[tab_ui.tab_id] = tab_ui
# Connect enter key to start/stop ping in tab, connect start/stop button as well
tab_ui.ip_line.returnPressed.connect(lambda: self._run_button_action(tab_ui))
tab_ui.session_line.returnPressed.connect(lambda: self._run_button_action(tab_ui))
tab_ui.ping_count_line.returnPressed.connect(lambda: self._run_button_action(tab_ui))
tab_ui.interval_line.returnPressed.connect(lambda: self._run_button_action(tab_ui))
tab_ui.toggle_start.clicked.connect(lambda: self._run_button_action(tab_ui))
tab_ui.toggle_start.setStyleSheet("background-color: #66EE66")
# Connect the clear/save log buttons to actions
tab_ui.clear_log_button.clicked.connect(lambda: self._clear_log(tab_ui))
tab_ui.save_log_button.clicked.connect(lambda: self._save_log(tab_ui))
# The "average table". Item 1 is count of successful pings, item 2 is total latency.
tab_ui.avg_table = [0, 0]
# Until I can figure out how to make copy/paste automaticall take whole selection, this is how you copy
# the complete state total
tab_ui.copy_stats_button.clicked.connect(lambda: self.copy_stats(tab_ui.stat_dict))
tab_ui.send_stats_button.clicked.connect(lambda: self.write_stats(tab_ui))
# Always start with one tab
self.ui.tab_bar.addTab(tab_ui, _("New Tab"))
self.ui.tab_bar.setCurrentWidget(tab_ui)
def _remove_tab(self, index):
"""
Removes this tab as long as it is not the only remaining tab
:param index:
:return:
"""
if self.ui.tab_bar.count() >= 2:
tab_ui = self.ui.tab_bar.widget(index) # Get the tab object
self._set_inactive(tab_ui.tab_id) # Stop the ping (by id, NOT index)
self.ui.tab_bar.removeTab(index) # Remove the tab from UI (by index)
self.tabs.pop(tab_ui.tab_id) # Clear it from tabs dictionary
tab_ui = None # Being thorough. I've had trouble predicting Qt's garbage collector
@staticmethod
def copy_stats(stat_dict):
# Yeah, I have no idea why I thought all that previous work here was necessary. I went to great length to
# pull the data from the qt table... when the exact same data is already in a simple stats dictionary.
result = "\n".join(["{:s}: {:s}".format(x, str(y)) for x, y in stat_dict.items() if len(x) > 1])
clipboard = QtGui.QApplication.clipboard()
clipboard.setText(result)
def _get_index(self, tab_ui):
return self.ui.tab_bar.indexOf(tab_ui)
############################################################################################
# Stats & Data
def _clear_log(self, tab_ui):
"""
Clear the main output window, stat data dict, reset ping sequence number, reset stats display table
:param tab_ui: the tab instance to work on
:return:
"""
tab_ui.output_textedit.clear()
tab_ui.stat_dict = self.get_default_stats()
tab_ui.last_num = -1
tab_ui.avg_table = [0, 0] # Indicate no pings this session
self._refresh_stat_display(tab_ui)
def _save_log(self, tab_ui):
"""
Save the contents of the main output box to a plain text file of the user's choosing
:param tab_ui: the tab instance to work on
:return:
"""
file_types = "Plain Text (*.txt);;Plain Text (*.log)"
home = os.path.expanduser("~")
filename = QtGui.QFileDialog.getSaveFileName(self, 'Save Log file', home, file_types)
if len(filename) > 0: # Making sure the user selected a file (didn't hit Cancel)
file_handle = open(filename, 'w')
try:
file_handle.write(tab_ui.output_textedit.toPlainText())
file_handle.close()
except Exception as e:
# I don't normally do blanket exceptions, but in this case any error means we can't save file so
# it all has the same effect. Notify the user and move along.
self._show_error("Unable to save log file.", str(e))
def _show_result(self, result):
"""
This method accepts the result dictionary from a ping and updates the text in the output box and the color of
the tab text depending on the result. It also initiates playback of success/fail sounds if the option is
enabled in GUI
:param result: The disctionary containing the results of the last ping
:return:
"""
# The ID number of the tab which sent the ping is provided by the PingThread class
tab_ui = self.tabs[result["tabID"]]
index = self._get_index(tab_ui)
if result["Success"]:
self.ui.tab_bar.tabBar().setTabTextColor(index, QtGui.QColor(0, 128, 0))
output = self.format_output_success(result)
tab_ui.last_num = result["SeqNumber"]
if tab_ui.toggle_audio.isChecked() and tab_ui.alert_success.isChecked():
pplib.audio.play("data/woohoo.wav")
else:
self.ui.tab_bar.tabBar().setTabTextColor(index, QtGui.QColor(255, 64, 64))
output = self.format_output_failure(result)
if tab_ui.toggle_audio.isChecked() and tab_ui.alert_failure.isChecked():
pplib.audio.play("data/doh.wav")
output_box = tab_ui.output_textedit
output_box.append(_(output))
self.last_num = result["SeqNumber"]
self._update_stats(result, tab_ui)
@staticmethod
def get_default_stats():
"""
Takes no arguments, returns the ordered disctionary to be used in the stats display. I believe an ordered
dictionary is the best approach here, as it alows me to store the data in the same object that's displayed
directly to the user. I can keep them in a logical order while still accessing them by key (in case I decide
to change the order later, it won't break all my other functions that read/write to it)
:return: OrderedDict
"""
return OrderedDict([("Success", 0),
("Last Success", ""),
("Failure", 0),
("Last Failure", ""),
("% Success", 0),
("", ""),
("Highest Latency", ""),
("Lowest Latency", ""),
("Average Latency", ""),
])
@staticmethod
def format_output_success(result):
"""
This method accepts the result dictionary from a successful ping and generates colorized output
:param result:
:return: An html-formatted colorized string containing the timestamp, sequence number, text, packet size and
responding IP from a successful ping
"""
delay = result["Delay"]
if delay > 100:
color = "red"
elif delay > 50:
color = "#FF9900"
else:
color = "green"
ms = "<font color='{:s}'>{:.2f}</font>".format(color, delay)
output = "{:s} {:d} - {:s} - {:d} bytes from {:s} time={:s} ms".format(result["Timestamp"], result['SeqNumber'],
result['Message'], result["PacketSize"],
result['Responder'], ms)
return output
@staticmethod
def format_output_failure(result):
"""
This method accepts the result disctionary from a ping and generates colorized output
:param result:
:return: An html-formatted string containing the timestamp and error message
"""
output = "<font color='red'>{:s} - {:s}</font>".format(result["Timestamp"], result['Message'])
return output
def _show_error(self, message, optional=""):
QtGui.QMessageBox.about(self, "I'm sad now.", "\n".join([_(message), str(optional)]))
def _refresh_stat_display(self, tab_ui):
for row, key in enumerate(tab_ui.stat_dict.keys()):
tab_ui.stats_table.setItem(row, 0, QtGui.QTableWidgetItem(key))
tab_ui.stats_table.setItem(row, 1, QtGui.QTableWidgetItem(str(tab_ui.stat_dict[key])))
def _update_stats(self, result, tab_ui):
if result["Success"]:
tab_ui.stat_dict["Success"] += 1
tab_ui.stat_dict["Last Success"] = result["Timestamp"]
# This is sloppy,
# TODO: come back and clean this up.
high = tab_ui.stat_dict["Highest Latency"]
low = tab_ui.stat_dict["Lowest Latency"]
delay = round(result["Delay"], 2)
if high == "":
tab_ui.stat_dict["Highest Latency"] = delay
high = result["Delay"]
if low == "":
tab_ui.stat_dict["Lowest Latency"] = delay
low = result["Delay"]
if result["Delay"] > high:
tab_ui.stat_dict["Highest Latency"] = delay
elif result["Delay"] < low:
tab_ui.stat_dict["Lowest Latency"] = delay
# The average table is a 2-item list. The first item contains the number of successful pings (makes no sense
# to count latency on a ping that never returned) and the second is the total latency for all those pings
# combined. Divide latency total by count, and we've got our average.
tab_ui.avg_table[0] += 1
tab_ui.avg_table[1] += result["Delay"]
tab_ui.stat_dict["Average Latency"] = round(tab_ui.avg_table[1] / tab_ui.avg_table[0], 2)
else:
tab_ui.stat_dict["Failure"] += 1
tab_ui.stat_dict["Last Failure"] = result["Timestamp"]
tab_ui.stat_dict["% Success"] = round((tab_ui.stat_dict["Success"] / (tab_ui.stat_dict["Failure"] +
tab_ui.stat_dict["Success"])) * 100, 2)
self._refresh_stat_display(tab_ui)
############################################################################################
# Ping Management
def _suite_complete(self, tab_id):
"""
This is called when a limited number of pings have been specified. It resets the appropriate counters, sets
the program state to inacive, and adds a completion notice to the output box.
:param tab_id: The id number (not index) of the relevant tab
:return:
"""
tab_ui = self.tabs[tab_id]
tab_ui.output_textedit.append(_("<strong>Test Suite Complete</strong>"))
self.write_stats(tab_ui)
tab_ui.last_num = -1 # so sequence will start from 0 on next suite start
self._set_inactive(tab_id)
def write_stats(self, tab_ui):
"""
Writes the stats to the screen
:param tab_ui: the tab instance to work on
"""
# Some shorter variable names for brevity in upcoming list comprehension
sd = tab_ui.stat_dict
ot = tab_ui.output_textedit
# Don't bother trying to clean/speed this up by putting a single <strong> tag around all lines at once, the gui
# will only apply it to that one line. Means we've got to <strong> each line individually.
[ot.append("<strong>{:s}: {:s}</strong>".format(x, str(y))) for x, y in sd.items() if len(x) > 1]
def _set_inactive(self, tab_id):
"""
Sets the tab to the inactive state, including gui changes and terminating the thread
:param tab_id: The id number of the tab to set as inactive
:return:
"""
tab_ui = self.tabs[tab_id]
tab_ui.toggle_start.setText(_("Start"))
tab_ui.toggle_start.setStyleSheet("background-color: #66EE66")
index = self._get_index(tab_ui)
self.ui.tab_bar.setTabIcon(index, QtGui.QIcon(""))
if hasattr(tab_ui, "thread") and hasattr(tab_ui.thread, "isRunning") and (tab_ui.thread.isRunning() is True):
tab_ui.thread.terminate()
tab_ui.output_textedit.append(_("Pausing..."))
return True
else:
return False
def _set_active(self, tab_id):
"""
Sets the tab to active state, including gui changes and starting the ping thread
:param tab_id: The id number of the tab to set as active
:return:
"""
tab_ui = self.tabs[tab_id]
index = self._get_index(tab_ui)
# Default to black text (in case tab text is colored from previous session)
self.ui.tab_bar.tabBar().setTabTextColor(index, QtGui.QColor(0, 0, 0))
try:
ip = tab_ui.ip_line.text().strip()
ping_count = int(tab_ui.ping_count_line.text().strip())
interval = int(tab_ui.interval_line.text().strip())
label = tab_ui.session_line.text().strip()
packet_size = int(tab_ui.packet_size_line.text().strip())
except ValueError as e:
self._show_error("Invalid input\n" + str(e))
return
if packet_size > 65535:
raise ValueError(_("Packet size too ridiculously large"))
# We treat start/stop as start/pause, and a new session is indicated by a -1 sequence number
# If positive, pick up from that sequence number
if tab_ui.last_num > 0:
seq_num = tab_ui.last_num
else:
seq_num = 0
# Without this check, you could run an infitine ping (count 0), pause it, then run a finite ping with a count
# lower than last_num, and it would instantly think the suite is complete. Semi-obscure bug but worth a fix.
if ping_count > 0:
seq_num = 0
tab_ui.output_textedit.append(_("Starting..."))
tab_ui.thread = PingThread(ip, ping_count, interval, packet_size, tab_ui.tab_id, seq_num)
self._connect_slots(tab_ui.thread)
# Not in a try/except block because the thread does its own error checking and reports via signals
tab_ui.thread.start()
tab_ui.toggle_start.setText(_("Pause"))
tab_ui.toggle_start.setStyleSheet("background-color: #EE6666")
self.ui.tab_bar.setTabIcon(index, QtGui.QIcon("data/play.ico"))
# No sense placing a hyphen if there's nothing on the other side
if len(label) < 1:
self.ui.tab_bar.setTabText(index, ip)
else:
self.ui.tab_bar.setTabText(index, " - ".join([ip, label]))
def launch():
app = QtGui.QApplication(sys.argv)
pp = PingPung()
pp.ui.show()
sys.exit(app.exec_())
# Unfortunate, but necessary workaround for Windows exe builds
try:
approot = __file__
except NameError: # Building for Windows with cx_freeze
approot = "__file__"
# Helper function
def read(fname):
return open(os.path.join(os.path.dirname(approot), fname)).read()
if os.path.isfile('VERSION'):
__version__ = read('VERSION')
else:
__version__ = "0.1 - Unknown Build"
if __name__ == "__main__":
launch() | gpl-2.0 |
whummer/moto | moto/config/responses.py | 1 | 2064 | import json
from moto.core.responses import BaseResponse
from .models import config_backends
class ConfigResponse(BaseResponse):
@property
def config_backend(self):
return config_backends[self.region]
def put_configuration_recorder(self):
self.config_backend.put_configuration_recorder(self._get_param('ConfigurationRecorder'))
return ""
def describe_configuration_recorders(self):
recorders = self.config_backend.describe_configuration_recorders(self._get_param('ConfigurationRecorderNames'))
schema = {'ConfigurationRecorders': recorders}
return json.dumps(schema)
def describe_configuration_recorder_status(self):
recorder_statuses = self.config_backend.describe_configuration_recorder_status(
self._get_param('ConfigurationRecorderNames'))
schema = {'ConfigurationRecordersStatus': recorder_statuses}
return json.dumps(schema)
def put_delivery_channel(self):
self.config_backend.put_delivery_channel(self._get_param('DeliveryChannel'))
return ""
def describe_delivery_channels(self):
delivery_channels = self.config_backend.describe_delivery_channels(self._get_param('DeliveryChannelNames'))
schema = {'DeliveryChannels': delivery_channels}
return json.dumps(schema)
def describe_delivery_channel_status(self):
raise NotImplementedError()
def delete_delivery_channel(self):
self.config_backend.delete_delivery_channel(self._get_param('DeliveryChannelName'))
return ""
def delete_configuration_recorder(self):
self.config_backend.delete_configuration_recorder(self._get_param('ConfigurationRecorderName'))
return ""
def start_configuration_recorder(self):
self.config_backend.start_configuration_recorder(self._get_param('ConfigurationRecorderName'))
return ""
def stop_configuration_recorder(self):
self.config_backend.stop_configuration_recorder(self._get_param('ConfigurationRecorderName'))
return ""
| apache-2.0 |
Lambdanaut/crits | crits/screenshots/forms.py | 21 | 2748 | from django import forms
from django.forms.widgets import HiddenInput
from crits.core import form_consts
from crits.core.handlers import get_source_names
from crits.core.user_tools import get_user_organization
class AddScreenshotForm(forms.Form):
"""
Django form for adding an Object.
"""
error_css_class = 'error'
required_css_class = 'required'
screenshot = forms.FileField(required=False)
screenshot_ids = forms.CharField(required=False,
widget=forms.Textarea(attrs={'cols':'80',
'rows':'2'}),
help_text=('If it is an existing '
'screenshot (or screenshots), '
'enter the ID(s) instead. All '
'other data <br>will be copied '
'from the existing screenshot(s).'))
description = forms.CharField(widget=forms.Textarea(attrs={'cols':'80',
'rows':'2'}),
required=False)
tags = forms.CharField(widget=forms.TextInput(attrs={'size':'90'}),
required=False,
help_text='Comma-separated list of tags')
source = forms.ChoiceField(required=True,
label=form_consts.Object.SOURCE,
widget=forms.Select(attrs={'class': 'no_clear bulknoinitial'}))
method = forms.CharField(required=False, label=form_consts.Object.METHOD)
reference = forms.CharField(widget=forms.TextInput(attrs={'size':'90'}),
required=False, label=form_consts.Object.REFERENCE)
otype = forms.CharField(required=False,
widget=HiddenInput(attrs={'class':'bulkskip'}),
label=form_consts.Object.PARENT_OBJECT_TYPE)
oid = forms.CharField(required=False,
widget=HiddenInput(attrs={'class':'bulkskip'}),
label=form_consts.Object.PARENT_OBJECT_ID)
def __init__(self, username, *args, **kwargs):
super(AddScreenshotForm, self).__init__(*args, **kwargs)
self.fields['source'].choices = [(c.name,
c.name) for c in get_source_names(True,
True,
username)]
self.fields['source'].initial = get_user_organization(username)
| mit |
chrisfranko/MIM-MM | qa/rpc-tests/skeleton.py | 148 | 2426 | #!/usr/bin/env python
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Skeleton for python-based regression tests using
# JSON-RPC
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def run_test(nodes):
# Replace this as appropriate
for node in nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*50)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing bitcoind/bitcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
nodes = start_nodes(2, options.tmpdir)
connect_nodes(nodes[1], 0)
sync_blocks(nodes)
run_test(nodes)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
stop_nodes(nodes)
wait_bitcoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| mit |
tiborsimko/invenio-utils | invenio_utils/hash.py | 5 | 1142 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio hash functions.
Usage example:
>>> from invenio_utils.hash import md5
>>> print md5('MyPa$$')
Simplifies imports of hash functions depending on Python version.
"""
try:
from hashlib import sha256, sha1, md5
HASHLIB_IMPORTED = True
except ImportError:
from md5 import md5
from sha import sha as sha1
HASHLIB_IMPORTED = False
| gpl-2.0 |
adam111316/SickGear | sickbeard/providers/torrentleech.py | 3 | 5655 | # coding=utf-8
#
# This file is part of SickGear.
#
# SickGear is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickGear is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>.
import re
import datetime
import traceback
from . import generic
from sickbeard import logger, tvcache, helpers
from sickbeard.bs4_parser import BS4Parser
from lib.unidecode import unidecode
class TorrentLeechProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, 'TorrentLeech')
self.url_base = 'https://torrentleech.org/'
self.urls = {'config_provider_home_uri': self.url_base,
'login': self.url_base + 'user/account/login/',
'search': self.url_base + 'torrents/browse/index/query/%s/categories/%s',
'cache': self.url_base + 'torrents/browse/index/categories/%s',
'get': self.url_base + '%s'}
self.categories = '2,26,27,32'
self.url = self.urls['config_provider_home_uri']
self.username, self.password, self.minseed, self.minleech = 4 * [None]
self.cache = TorrentLeechCache(self)
def _do_login(self):
logged_in = lambda: 'tluid' in self.session.cookies and 'tlpass' in self.session.cookies
if logged_in():
return True
if self._check_auth():
login_params = {'username': self.username, 'password': self.password, 'remember_me': 'on', 'login': 'submit'}
response = helpers.getURL(self.urls['login'], post_data=login_params, session=self.session)
if response and logged_in():
return True
logger.log(u'Failed to authenticate with %s, abort provider.' % self.name, logger.ERROR)
return False
def _do_search(self, search_params, search_mode='eponly', epcount=0, age=0):
results = []
if not self._do_login():
return results
items = {'Season': [], 'Episode': [], 'Cache': []}
rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {'get': 'download'}.items())
for mode in search_params.keys():
for search_string in search_params[mode]:
if isinstance(search_string, unicode):
search_string = unidecode(search_string)
if 'Cache' == mode:
search_url = self.urls['cache'] % self.categories
else:
search_url = self.urls['search'] % (search_string, self.categories)
html = self.get_url(search_url)
cnt = len(items[mode])
try:
if not html or self._has_no_results(html):
raise generic.HaltParseException
with BS4Parser(html, features=['html5lib', 'permissive']) as soup:
torrent_table = soup.find('table', attrs={'id': 'torrenttable'})
torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')
if 2 > len(torrent_rows):
raise generic.HaltParseException
for tr in torrent_rows[1:]:
try:
seeders, leechers = [int(tr.find('td', attrs={'class': x}).get_text().strip())
for x in ('seeders', 'leechers')]
if mode != 'Cache' and (seeders < self.minseed or leechers < self.minleech):
continue
info = tr.find('td', {'class': 'name'}).a
title = ('title' in info.attrs and info['title']) or info.get_text().strip()
download_url = self.urls['get'] % str(tr.find('a', href=rc['get'])['href']).lstrip('/')
except (AttributeError, TypeError):
continue
if title and download_url:
items[mode].append((title, download_url, seeders))
except generic.HaltParseException:
pass
except Exception:
logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR)
self._log_result(mode, len(items[mode]) - cnt, search_url)
items[mode].sort(key=lambda tup: tup[2], reverse=True)
results += items[mode]
return results
def find_propers(self, search_date=datetime.datetime.today()):
return self._find_propers(search_date)
def _get_episode_search_strings(self, ep_obj, add_string='', **kwargs):
return generic.TorrentProvider._get_episode_search_strings(self, ep_obj, add_string, sep_date='|', use_or=False)
class TorrentLeechCache(tvcache.TVCache):
def __init__(self, this_provider):
tvcache.TVCache.__init__(self, this_provider)
self.minTime = 20 # cache update frequency
def _getRSSData(self):
return self.provider.get_cache_data()
provider = TorrentLeechProvider()
| gpl-3.0 |
makdharma/grpc | src/python/grpcio/grpc/framework/interfaces/base/base.py | 19 | 13144 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The base interface of RPC Framework.
Implementations of this interface support the conduct of "operations":
exchanges between two distinct ends of an arbitrary number of data payloads
and metadata such as a name for the operation, initial and terminal metadata
in each direction, and flow control. These operations may be used for transfers
of data, remote procedure calls, status indication, or anything else
applications choose.
"""
# threading is referenced from specification in this module.
import abc
import enum
import threading # pylint: disable=unused-import
import six
# abandonment is referenced from specification in this module.
from grpc.framework.foundation import abandonment # pylint: disable=unused-import
# pylint: disable=too-many-arguments
class NoSuchMethodError(Exception):
"""Indicates that an unrecognized operation has been called.
Attributes:
code: A code value to communicate to the other side of the operation
along with indication of operation termination. May be None.
details: A details value to communicate to the other side of the
operation along with indication of operation termination. May be None.
"""
def __init__(self, code, details):
"""Constructor.
Args:
code: A code value to communicate to the other side of the operation
along with indication of operation termination. May be None.
details: A details value to communicate to the other side of the
operation along with indication of operation termination. May be None.
"""
super(NoSuchMethodError, self).__init__()
self.code = code
self.details = details
class Outcome(object):
"""The outcome of an operation.
Attributes:
kind: A Kind value coarsely identifying how the operation terminated.
code: An application-specific code value or None if no such value was
provided.
details: An application-specific details value or None if no such value was
provided.
"""
@enum.unique
class Kind(enum.Enum):
"""Ways in which an operation can terminate."""
COMPLETED = 'completed'
CANCELLED = 'cancelled'
EXPIRED = 'expired'
LOCAL_SHUTDOWN = 'local shutdown'
REMOTE_SHUTDOWN = 'remote shutdown'
RECEPTION_FAILURE = 'reception failure'
TRANSMISSION_FAILURE = 'transmission failure'
LOCAL_FAILURE = 'local failure'
REMOTE_FAILURE = 'remote failure'
class Completion(six.with_metaclass(abc.ABCMeta)):
"""An aggregate of the values exchanged upon operation completion.
Attributes:
terminal_metadata: A terminal metadata value for the operaton.
code: A code value for the operation.
message: A message value for the operation.
"""
class OperationContext(six.with_metaclass(abc.ABCMeta)):
"""Provides operation-related information and action."""
@abc.abstractmethod
def outcome(self):
"""Indicates the operation's outcome (or that the operation is ongoing).
Returns:
None if the operation is still active or the Outcome value for the
operation if it has terminated.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_termination_callback(self, callback):
"""Adds a function to be called upon operation termination.
Args:
callback: A callable to be passed an Outcome value on operation
termination.
Returns:
None if the operation has not yet terminated and the passed callback will
later be called when it does terminate, or if the operation has already
terminated an Outcome value describing the operation termination and the
passed callback will not be called as a result of this method call.
"""
raise NotImplementedError()
@abc.abstractmethod
def time_remaining(self):
"""Describes the length of allowed time remaining for the operation.
Returns:
A nonnegative float indicating the length of allowed time in seconds
remaining for the operation to complete before it is considered to have
timed out. Zero is returned if the operation has terminated.
"""
raise NotImplementedError()
@abc.abstractmethod
def cancel(self):
"""Cancels the operation if the operation has not yet terminated."""
raise NotImplementedError()
@abc.abstractmethod
def fail(self, exception):
"""Indicates that the operation has failed.
Args:
exception: An exception germane to the operation failure. May be None.
"""
raise NotImplementedError()
class Operator(six.with_metaclass(abc.ABCMeta)):
"""An interface through which to participate in an operation."""
@abc.abstractmethod
def advance(self,
initial_metadata=None,
payload=None,
completion=None,
allowance=None):
"""Progresses the operation.
Args:
initial_metadata: An initial metadata value. Only one may ever be
communicated in each direction for an operation, and they must be
communicated no later than either the first payload or the completion.
payload: A payload value.
completion: A Completion value. May only ever be non-None once in either
direction, and no payloads may be passed after it has been communicated.
allowance: A positive integer communicating the number of additional
payloads allowed to be passed by the remote side of the operation.
"""
raise NotImplementedError()
class ProtocolReceiver(six.with_metaclass(abc.ABCMeta)):
"""A means of receiving protocol values during an operation."""
@abc.abstractmethod
def context(self, protocol_context):
"""Accepts the protocol context object for the operation.
Args:
protocol_context: The protocol context object for the operation.
"""
raise NotImplementedError()
class Subscription(six.with_metaclass(abc.ABCMeta)):
"""Describes customer code's interest in values from the other side.
Attributes:
kind: A Kind value describing the overall kind of this value.
termination_callback: A callable to be passed the Outcome associated with
the operation after it has terminated. Must be non-None if kind is
Kind.TERMINATION_ONLY. Must be None otherwise.
allowance: A callable behavior that accepts positive integers representing
the number of additional payloads allowed to be passed to the other side
of the operation. Must be None if kind is Kind.FULL. Must not be None
otherwise.
operator: An Operator to be passed values from the other side of the
operation. Must be non-None if kind is Kind.FULL. Must be None otherwise.
protocol_receiver: A ProtocolReceiver to be passed protocol objects as they
become available during the operation. Must be non-None if kind is
Kind.FULL.
"""
@enum.unique
class Kind(enum.Enum):
NONE = 'none'
TERMINATION_ONLY = 'termination only'
FULL = 'full'
class Servicer(six.with_metaclass(abc.ABCMeta)):
"""Interface for service implementations."""
@abc.abstractmethod
def service(self, group, method, context, output_operator):
"""Services an operation.
Args:
group: The group identifier of the operation to be serviced.
method: The method identifier of the operation to be serviced.
context: An OperationContext object affording contextual information and
actions.
output_operator: An Operator that will accept output values of the
operation.
Returns:
A Subscription via which this object may or may not accept more values of
the operation.
Raises:
NoSuchMethodError: If this Servicer does not handle operations with the
given group and method.
abandonment.Abandoned: If the operation has been aborted and there no
longer is any reason to service the operation.
"""
raise NotImplementedError()
class End(six.with_metaclass(abc.ABCMeta)):
"""Common type for entry-point objects on both sides of an operation."""
@abc.abstractmethod
def start(self):
"""Starts this object's service of operations."""
raise NotImplementedError()
@abc.abstractmethod
def stop(self, grace):
"""Stops this object's service of operations.
This object will refuse service of new operations as soon as this method is
called but operations under way at the time of the call may be given a
grace period during which they are allowed to finish.
Args:
grace: A duration of time in seconds to allow ongoing operations to
terminate before being forcefully terminated by the stopping of this
End. May be zero to terminate all ongoing operations and immediately
stop.
Returns:
A threading.Event that will be set to indicate all operations having
terminated and this End having completely stopped. The returned event
may not be set until after the full grace period (if some ongoing
operation continues for the full length of the period) or it may be set
much sooner (if for example this End had no operations in progress at
the time its stop method was called).
"""
raise NotImplementedError()
@abc.abstractmethod
def operate(self,
group,
method,
subscription,
timeout,
initial_metadata=None,
payload=None,
completion=None,
protocol_options=None):
"""Commences an operation.
Args:
group: The group identifier of the invoked operation.
method: The method identifier of the invoked operation.
subscription: A Subscription to which the results of the operation will be
passed.
timeout: A length of time in seconds to allow for the operation.
initial_metadata: An initial metadata value to be sent to the other side
of the operation. May be None if the initial metadata will be later
passed via the returned operator or if there will be no initial metadata
passed at all.
payload: An initial payload for the operation.
completion: A Completion value indicating the end of transmission to the
other side of the operation.
protocol_options: A value specified by the provider of a Base interface
implementation affording custom state and behavior.
Returns:
A pair of objects affording information about the operation and action
continuing the operation. The first element of the returned pair is an
OperationContext for the operation and the second element of the
returned pair is an Operator to which operation values not passed in
this call should later be passed.
"""
raise NotImplementedError()
@abc.abstractmethod
def operation_stats(self):
"""Reports the number of terminated operations broken down by outcome.
Returns:
A dictionary from Outcome.Kind value to an integer identifying the number
of operations that terminated with that outcome kind.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_idle_action(self, action):
"""Adds an action to be called when this End has no ongoing operations.
Args:
action: A callable that accepts no arguments.
"""
raise NotImplementedError()
| bsd-3-clause |
mwmuni/LIGGGHTS_GUI | OpenGL/raw/GL/ARB/map_buffer_range.py | 9 | 1118 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ARB_map_buffer_range'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ARB_map_buffer_range',error_checker=_errors._error_checker)
GL_MAP_FLUSH_EXPLICIT_BIT=_C('GL_MAP_FLUSH_EXPLICIT_BIT',0x0010)
GL_MAP_INVALIDATE_BUFFER_BIT=_C('GL_MAP_INVALIDATE_BUFFER_BIT',0x0008)
GL_MAP_INVALIDATE_RANGE_BIT=_C('GL_MAP_INVALIDATE_RANGE_BIT',0x0004)
GL_MAP_READ_BIT=_C('GL_MAP_READ_BIT',0x0001)
GL_MAP_UNSYNCHRONIZED_BIT=_C('GL_MAP_UNSYNCHRONIZED_BIT',0x0020)
GL_MAP_WRITE_BIT=_C('GL_MAP_WRITE_BIT',0x0002)
@_f
@_p.types(None,_cs.GLenum,_cs.GLintptr,_cs.GLsizeiptr)
def glFlushMappedBufferRange(target,offset,length):pass
@_f
@_p.types(ctypes.c_void_p,_cs.GLenum,_cs.GLintptr,_cs.GLsizeiptr,_cs.GLbitfield)
def glMapBufferRange(target,offset,length,access):pass
| gpl-3.0 |
jlublin/landpatterngen | gen_library.py | 1 | 2880 | #!/usr/bin/env python3
import re
import importlib
import sqlite3
import sys
def parse_tollens(part):
keys = list(part.keys());
for key in keys:
if(key[-2:] == '_l' and
key[:-2] + '_h' in part):
part[key[:-2]] = TolLen(float(part[key[:-2] + '_l']), float(part[key[:-2] + '_h']))
part.pop(key[:-2] + '_l');
part.pop(key[:-2] + '_h');
if(__name__ == '__main__'):
import sys
import target_eagle
from tollen import TolLen
target = target_eagle.get_target()
if(len(sys.argv) < 2):
print('Usage: {} <library.lbr>'.format(sys.argv[0]), file=sys.stderr)
fout = open(1, 'w')
process = { 'F': TolLen(0, 0.05, 1),
'P': TolLen(0, 0.05, 1) }
conn = sqlite3.connect(sys.argv[1])
conn.row_factory = sqlite3.Row
c = conn.cursor()
# Print library
c.execute('SELECT * FROM library')
print('Library version: {}\nName: {}\n{}'.format(*c.fetchone()), file=sys.stderr)
c.execute('SELECT * FROM symbols')
for sym in c.fetchall():
target.add_symbol(sym['name'])
sym = importlib.import_module('symbols.{}'.format(sym['type']))
sym.draw(target)
c.execute('SELECT * FROM packages')
for pac in c.fetchall():
package = dict(pac)
if(package['type'] in ['dual_row', 'sot23']):
c.execute('SELECT * FROM pac_{} WHERE package_id = :id'.format(package['type']), {'id': pac[0]})
package.update(dict(c.fetchone()))
else:
print('Unknown type {}'.format(package['type']), file=sys.stderr)
sys.exit()
c.execute('SELECT * FROM pac_deleted_pins WHERE package_id = :id', {'id': pac[0]})
package['deleted_pins'] = [x['pin'] for x in c.fetchall()]
c.execute('SELECT * FROM pac_holes WHERE package_id = :id', {'id': pac[0]})
package['holes'] = [dict(x) for x in c.fetchall()]
c.execute('SELECT * FROM pac_mount_pads WHERE package_id = :id', {'id': pac[0]})
package['mount_pads'] = [dict(x) for x in c.fetchall()]
parse_tollens(package)
target.add_package(package['name'])
mod = importlib.import_module('packages.{}'.format(package['type']))
pac = mod.get_package(package, 'IPC7351-B', process)
pac.gen(target)
c.execute('SELECT * FROM devices')
for dev in c.fetchall():
device = dict(dev)
device.update({'symbols': [], 'packages': []})
c.execute('SELECT * FROM dev_symbols WHERE device_id = :id', {'id': dev[0]})
for sym in [dict(x) for x in c.fetchall()]:
device['symbols'].append(sym)
for pac in c.execute('SELECT * FROM dev_packages WHERE device_id = :id', {'id': dev[0]}):
package = dict(pac)
c.execute('SELECT * FROM dev_pac_attributes WHERE dev_pac_id = :id', {'id': pac[0]})
package['attributes'] = [dict(x) for x in c.fetchall()]
c.execute('SELECT * FROM dev_pac_connections WHERE dev_pac_id = :id', {'id': pac[0]})
package['connections'] = [dict(x) for x in c.fetchall()]
device['packages'].append(package)
target.add_device(device)
target.output(fout)
fout.close()
| gpl-3.0 |
flagxor/swtoolkit | test/directx_9_0_c_test.py | 9 | 2384 | #!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for directx_9_0_c. These are MEDIUM tests."""
import TestFramework
def TestSConstruct(scons_globals):
"""Test SConstruct file.
Args:
scons_globals: Global variables dict from the SConscript file.
"""
# Get globals from SCons
Environment = scons_globals['Environment']
env = Environment(tools=['component_setup'])
# Make sure including the tool doesn't cause a failure on any platform
# Run hermetically
env1 = env.Clone(DIRECTX9_0_C_DIR='.')
env1.Tool('directx_9_0_c')
# Run non-hermetically
env2 = env.Clone(DIRECTX9_0_C_DIR=None)
env2.Tool('directx_9_0_c')
def main():
test = TestFramework.TestFramework()
base = 'test'
test.subdir(base)
test.WriteSConscript(base + '/SConstruct', TestSConstruct)
test.run(chdir=base)
test.pass_test()
if __name__ == '__main__':
main()
| bsd-3-clause |
seraphlnWu/in_trip | in_trip/in_trip/lib/arbiter.py | 1 | 16076 | #coding=utf-8
# This module implement prefork model, used for spawn and manage child process
import os
import sys
import time
import errno
import signal
import random
import select
import traceback
from in_trip.lib.pidfile import Pidfile
from in_trip.lib.config import Config
from in_trip.lib.errors import HaltServer
from in_trip.lib.sock import create_sockets
from in_trip.lib.log import setup_file_logging
from in_trip.lib.utils import (getcwd, daemonize, set_non_blocking, close_on_exec,
reopen_log_file, _setproctitle, set_process_owner, chown,
str2bool, get_user_info, import_app, reopen_log_file)
class Arbiter(object):
PIPE = []
START_CTX = {} # for exec
LISTENERS = [] # listening sockets
WORKERS = {}
WORKER_BOOT_ERROR = 3
SOCK_BACKLOG = 20
# signals
SIG_QUEUE = []
SIGNALS = [getattr(signal, "SIG%s" % x) \
for x in "HUP QUIT INT TERM TTIN TTOU USR1 USR2 WINCH".split()]
SIG_NAMES = dict(
(getattr(signal, name), name[3:].lower()) for name in dir(signal)
if name[:3] == "SIG" and name[3] != "_"
)
def __init__(self, worker_uri, config_file=None, section=None):
self.worker_uri = worker_uri
self.config_file = config_file
self.section = section
self.reexec_pid = 0
self.worker_age = 0
self.setup()
# init start context
cwd = getcwd()
#self.master_name = "Master"
args = sys.argv[:]
args.insert(0, sys.executable)
self.START_CTX = {
"args": args,
"cwd": cwd,
0: sys.executable
}
self.worker_boot_time = time.time()
def setup(self):
# load config file
if self.config_file:
Config.ACTUAL_CONFIG_FILE = self.config_file
else:
self.config_file = Config.DEFAULT_CONFIG_FILE
if self.section:
Config.SECTION_NAME = self.section
self.cfg = Config()
if isinstance(self.cfg.daemonize, basestring):
self.daemonize = str2bool(self.cfg.daemonize)
if self.cfg.log_file is not None:
self.log_file = self.cfg.log_file
if self.cfg.pidfile is not None:
self.pidfile = Pidfile(self.cfg.pidfile)
self.master_name = "master: %s" % self.cfg.proc_name
if self.cfg.number_workers:
self.number_workers = int(self.cfg.number_workers)
if self.cfg.graceful_timeout:
self.graceful_timeout = float(self.cfg.graceful_timeout)
if self.cfg.user:
user = self.cfg.user.split(':')
if len(user) <= 1:
user[1] = user[0]
self.uid, self.gid = get_user_info(*user[:2])
else:
# set as default
self.uid, self.gid = os.getuid(), os.getgid()
if self.cfg.bind:
binds = []
for b in self.cfg.bind.split(','):
addr = b.strip().split(':')
binds.append((addr[0], int(addr[1])))
self.bind = binds
# self.bind = [tuple(b.strip().split(":")) for b in self.cfg.bind.split(',')] # bind address comma separate
else:
self.bind = None
self.unix_socket = self.cfg.unix_socket
if self.cfg.kill_interval:
self.kill_interval = int(self.cfg.kill_interval)
else:
self.kill_interval = 0
def __getattr__(self, name):
value = getattr(self.cfg, name)
if value is None:
self.file_logger.warning('No config option %s' % name)
return value
@property
def worker_class(self):
"""
lazy load after fork
"""
return import_app(self.worker_uri)
def init_signals(self):
"""\
Initialize master signal handling. Most of the signals
are queued. Child signals only wake up the master.
"""
[signal.signal(s, self.signal) for s in self.SIGNALS]
signal.signal(signal.SIGCHLD, self.handle_chld)
def start(self):
# Initialize the arbiter, create pidfile if needed.
self.set_process_owner(self.uid, self.gid)
# setup logging
self.file_logger = setup_file_logging("prefork", self.log_file)
self.file_logger.info("Startting arbiter")
self.file_logger.info('Use Config File %s', self.config_file)
if self.daemonize:
daemonize()
_setproctitle(self.master_name)
self.pid = os.getpid()
if self.pidfile:
self.pidfile.create(self.pid)
# chown(self.pidfile.fname, self.uid, self.gid)
self.init_signals()
# close old PIPE on reexec
if self.PIPE:
[os.close(p) for p in self.PIPE]
# initialize the pipe
self.PIPE = pair = os.pipe()
for p in pair:
set_non_blocking(p)
close_on_exec(p)
if not self.LISTENERS and (self.bind or self.unix_socket):
self.file_logger.info("Listern on %s, unixdomian:%s",
self.cfg.bind or "",
self.cfg.unix_socket or "")
self.LISTENERS = create_sockets(self.bind,
self.unix_socket,
self.SOCK_BACKLOG)
for s in self.LISTENERS:
close_on_exec(s)
s.setblocking(0)
def run(self):
self.start()
self.manage_workers()
while 1: # handle signals and manage worker process
try:
self.reap_workers()
sig = self.SIG_QUEUE.pop(0) if len(self.SIG_QUEUE) else None
if sig is None:
self.sleep()
if self.kill_interval and time.time() > (self.worker_boot_time + self.kill_interval):
self.stop()
self.worker_boot_time = time.time()
self.manage_workers()
continue
if sig not in self.SIG_NAMES:
self.file_logger.info("Ignoring unknown signal: %s", sig)
continue
signame = self.SIG_NAMES.get(sig)
handler = getattr(self, "handle_%s" % signame, None)
if not handler:
self.file_logger.error("Unhandled signal: %s", signame)
continue
self.file_logger.info("Handling signal: %s", signame)
handler()
#except StopIteration:
# self.halt()
except KeyboardInterrupt:
self.halt()
except HaltServer as inst:
self.halt(reason=inst.reason, exit_status=inst.exit_status)
except SystemExit:
raise
except Exception:
self.file_logger.info("Unhandled exception in main loop:\n%s",
traceback.format_exc())
self.stop(False)
if self.pidfile is not None:
self.pidfile.unlink()
sys.exit(-1)
def manage_workers(self):
"""\
Maintain the number of worders by spawning or killing as required
"""
actual_number_workers = len(self.WORKERS.keys())
if actual_number_workers < self.number_workers:
self.spawn_workers()
else:
if actual_number_workers > self.number_workers:
workers = sorted(self.WORKERS.items(), key=lambda x: x[1]) # age, bigger means newer
pids = [pid for pid, _ in workers[:actual_number_workers - self.number_workers]]
self.file_logger.info("kill pids:%s", pids)
self.kill_workers(pids)
def spawn_workers(self):
for i in range(self.number_workers - len(self.WORKERS.keys())):
self.spawn_worker()
time.sleep(0.1 * random.random())
def spawn_worker(self):
self.worker_age += 1
pid = os.fork()
worker_pid = os.getpid()
if pid != 0: # parent
self.WORKERS[pid] = self.worker_age # need worker's age param
# self.file_logger.info("add pid:%s", pid)
return pid
# process child
try:
worker = self.worker_class(self.cfg, self.file_logger, self.pid, self.LISTENERS)
self.file_logger.info("Booting worker with pid:%s", worker_pid)
self.set_process_owner(self.uid, self.gid)
worker.run()
sys.exit(0)
except SystemExit:
raise
except:
self.file_logger.exception("Exception in worker process:\n%s", traceback.format_exc())
if not worker.booted:
sys.exit(self.WORKER_BOOT_ERROR)
sys.exit(-1)
finally:
self.file_logger.info("Worker exiting (pid: %s)", worker_pid)
# TODO: clean
def kill_workers(self, pids, sig=signal.SIGQUIT):
for pid in pids:
self.kill_worker(pid, sig)
def kill_worker(self, pid, sig=signal.SIGQUIT):
try:
os.kill(pid, sig)
except OSError as e:
if e.errno == errno.ESRCH:
try:
worker = self.WORKERS.pop(pid)
#TODO: clean worker
except (KeyError, OSError):
return
else:
raise e
def reap_workers(self):
"""\
Reap workers to avoid zombie processes
"""
try:
while 1:
wpid, status = os.waitpid(-1, os.WNOHANG) # -1 means wait for any child of the current process, see doc:http://docs.python.org/2/library/os.html
if not wpid:
break
self.file_logger.info("Reap worker %s", wpid)
if self.reexec_pid == wpid:
self.reexec_pid = 0
else:
exitcode = status >> 8
if exitcode == self.WORKER_BOOT_ERROR:
reason = "Worker failed to boot."
raise HaltServer(reason, self.WORKER_BOOT_ERROR)
worker = self.WORKERS.pop(wpid, None)
if not worker:
continue
# TODO:clean
except OSError as e:
if e.errno == errno.ECHILD:
pass
def sleep(self):
"""\
Sleep until PIPE is readable or we timeout. A readable PIPE means a signal occurred.
"""
try:
ready = select.select([self.PIPE[0]], [], [], 1.0)
if not ready[0]:
return
while os.read(self.PIPE[0], 1):
pass
except select.error as e:
if e.args[0] not in [errno.EAGAIN, errno.EINTR]:
raise
except OSError as e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
except KeyboardInterrupt:
sys.exit()
def wakeup(self):
"""\
Wake up the arbiter by writing to the PIPE
"""
try:
os.write(self.PIPE[1], b'.')
except IOError as e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def halt(self, reason=None, exit_status=0, graceful=True):
""" halt arbiter """
self.stop()
self.file_logger.info("Shutting down: %s", self.master_name)
if reason is not None:
self.file_logger.info("Reason: %s", reason)
if self.pidfile is not None:
self.pidfile.unlink()
if self.unix_socket:
os.unlink(self.unix_socket)
sys.exit(exit_status)
def stop(self, graceful=True):
"""
kill worker graceful or not
"""
# self.LISTENERS = []
sig = signal.SIGQUIT if graceful else signal.SIGTERM
limit = time.time() + self.graceful_timeout
while self.WORKERS and time.time() < limit:
self.kill_workers(self.WORKERS.keys(), sig)
time.sleep(0.1)
self.reap_workers()
self.kill_workers(self.WORKERS.keys(), signal.SIGKILL) # force quit after gracefull timeout
def reload(self):
self.file_logger.info("Reload config file:%s", self.cfg.ACTUAL_CONFIG_FILE)
old_pidfile = self.pidfile
self.cfg.load_config()
self.setup()
if old_pidfile and self.pidfile and old_pidfile.fname != self.pidfile.fname:
#old_pidfile.unlink()
self.file_logger.info("pidfile:%s", self.pidfile.fname)
self.pidfile.rename(self.pid)
self.set_process_owner(self.uid, self.gid)
self.file_logger = setup_file_logging("prefork", self.log_file)
_setproctitle(self.master_name)
# start new workers
for i in range(self.number_workers):
self.spawn_worker()
self.manage_workers()
def reexec(self):
"""
Relaunch the master and workers.
"""
if self.pidfile is not None:
self.pidfile.rename("%s.oldbin" % self.pidfile.fname)
self.reexec_pid = os.fork()
if self.reexec_pid != 0:
self.master_name = "Old %s" % self.master_name
_setproctitle(self.master_name)
return
os.chdir(self.START_CTX['cwd'])
os.execvpe(self.START_CTX[0], self.START_CTX['args'], os.environ)
def set_process_owner(self, uid, gid):
try:
set_process_owner(uid, gid)
except OSError as e:
if e.errno == errno.EPERM:
self.file_logger.warning("Set proc username need sudo permission, use default user instead")
else:
raise e
def signal(self, sig, frame):
if len(self.SIG_QUEUE) < 5:
self.SIG_QUEUE.append(sig)
self.wakeup()
def handle_chld(self, sig, frame):
"handle SIGCHLD"
self.wakeup()
def handle_quit(self):
"SIGQUIT handling"
self.halt()
def handle_int(self):
"SIGINT handling"
self.halt(graceful=False)
def handle_term(self):
"SIGTERM handling"
self.halt(graceful=False)
def handle_hup(self):
"""\
HUP handling.
- Reload configuration
- Start the new worker processes with a new configuration
- Gracefully shutdown the old worker processes
"""
self.file_logger.info("Hang up: %s", self.master_name)
self.reload()
def handle_winch(self):
"SIGWINCH handling, gracefully stop workers"
if self.daemonize:
self.file_logger.info("graceful stop of workers")
self.number_workers = 0
self.kill_workers(self.WORKERS.keys(), signal.SIGQUIT)
else:
self.file_logger.info("SIGWINCH ignored. Not daemonized")
def handle_ttin(self):
"""\
SIGTTIN handling.
Increases the number of workers by one.
"""
self.number_workers += 1
self.manage_workers()
def handle_ttou(self):
"""\
SIGTTOU handling.
Decreases the number of workers by one.
"""
if self.number_workers <= 1:
return
self.number_workers -= 1
self.manage_workers()
def handle_usr1(self):
"""\
SIGUSR1 handling.
Reload log file by sending them a SIGUSR1
"""
self.kill_workers(self.WORKERS.keys(), signal.SIGUSR1)
reopen_log_file(self.file_logger)
def handle_usr2(self):
"""\
SIGUSR2 handling.
Creates a new master/worker set as a slave of the current
master without affecting old workers. Use this to do live
deployment with the ability to backout a change.
"""
self.reexec()
self.kill_workers(self.WORKERS.keys(), signal.SIGUSR2)
| mit |
renegelinas/mi-instrument | mi/dataset/parser/test/test_auv_eng_auv.py | 7 | 22058 | #!/usr/bin/env python
"""
@package mi.dataset.parser.test
@fid marine-integrations/mi/dataset/parser/test/test_auv_eng_auv.py
@author Jeff Roy
@brief Test code for a auv_eng_auv data parser
NOTE: there have been several other parsers built on auv_common tested already
all negative paths through the code are not again verified here.
Testing is limited to code specific to the derived classes of auv_eng_auv
"""
import os
from nose.plugins.attrib import attr
from mi.core.exceptions import RecoverableSampleException
from mi.core.log import get_logger
from mi.dataset.driver.auv_eng.auv.resource import RESOURCE_PATH
from mi.dataset.parser.auv_eng_auv import AuvEngAuvParser
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
@attr('UNIT', group='mi')
class AuvEngAuvTestCase(ParserUnitTestCase):
"""
auv_eng_auv Parser unit test suite
"""
# IMAGENEX 852 TESTS
def test_simple_imagenex(self):
"""
Read test data and pull out data particles.
Assert that the results are those we expected.
"""
with open(os.path.join(RESOURCE_PATH, 'subset_imagenex.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(25)
self.assert_particles(particles, 'imagenex_telem_20.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
with open(os.path.join(RESOURCE_PATH, 'subset_imagenex.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=False)
particles = parser.get_records(25)
self.assert_particles(particles, 'imagenex_recov_20.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_bad_timestamp_imagenex(self):
"""
Read test data and pull out data particles.
Assert the expected we get 2 errors due to incorrect epoch and mission time formats
This tests the generic timestamp method with two parameters
"""
with open(os.path.join(RESOURCE_PATH, 'imagenex_bad_timestamps.csv'), 'rU') as stream_handle:
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(10) # ask for 10 should get 7
self.assertEqual(len(particles), 7)
self.assertEqual(len(self.exception_callback_value), 2)
self.assertIsInstance(self.exception_callback_value[0], RecoverableSampleException)
self.assertIsInstance(self.exception_callback_value[1], RecoverableSampleException)
# DIGITAL USBL TESTS
def test_simple_usbl(self):
"""
Read test data and pull out data particles.
Assert that the results are those we expected.
"""
with open(os.path.join(RESOURCE_PATH, 'subset_usbl.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(25)
self.assert_particles(particles, 'usbl_telem_22.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
with open(os.path.join(RESOURCE_PATH, 'subset_usbl.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=False)
particles = parser.get_records(25)
self.assert_particles(particles, 'usbl_recov_22.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_bad_timestamp_usbl(self):
"""
Read test data and pull out data particles.
Assert the expected we get 2 errors due to incorrect epoch and mission time formats
This tests the generic timestamp method with two parameters
"""
with open(os.path.join(RESOURCE_PATH, 'usbl_bad_timestamps.csv'), 'rU') as stream_handle:
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(10) # ask for 10 should get 6
self.assertEqual(len(particles), 6)
self.assertEqual(len(self.exception_callback_value), 2)
self.assertIsInstance(self.exception_callback_value[0], RecoverableSampleException)
self.assertIsInstance(self.exception_callback_value[1], RecoverableSampleException)
# TRI FIN MOTOR TESTS
def test_simple_motor(self):
"""
Read test data and pull out data particles.
Assert that the results are those we expected.
"""
with open(os.path.join(RESOURCE_PATH, 'subset_motor.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(28)
self.assert_particles(particles, 'motor_telem_28.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
with open(os.path.join(RESOURCE_PATH, 'subset_motor.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=False)
particles = parser.get_records(28)
self.assert_particles(particles, 'motor_recov_28.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_bad_timestamp_motor(self):
"""
Read test data and pull out data particles.
Assert the expected we get 2 errors due to incorrect epoch and mission time formats
This tests the generic timestamp method with two parameters
"""
with open(os.path.join(RESOURCE_PATH, 'motor_bad_timestamp.csv'), 'rU') as stream_handle:
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(10) # ask for 10 should get 4
self.assertEqual(len(particles), 4)
self.assertEqual(len(self.exception_callback_value), 1)
self.assertIsInstance(self.exception_callback_value[0], RecoverableSampleException)
# EMERGENCY BOARD TESTS
def test_simple_emergency(self):
"""
Read test data and pull out data particles.
Assert that the results are those we expected.
"""
with open(os.path.join(RESOURCE_PATH, 'subset_emergency.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(25)
self.assert_particles(particles, 'emergency_telem_7.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
with open(os.path.join(RESOURCE_PATH, 'subset_emergency.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=False)
particles = parser.get_records(25)
self.assert_particles(particles, 'emergency_recov_7.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_bad_timestamp_emergency(self):
"""
Read test data and pull out data particles.
Assert the expected we get 1 errors due to incorrect epoch and mission time formats
This tests the generic timestamp method with two parameters
"""
with open(os.path.join(RESOURCE_PATH, 'emergency_bad_timestamp.csv'), 'rU') as stream_handle:
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(10) # ask for 10 should get 3
self.assertEqual(len(particles), 3)
self.assertEqual(len(self.exception_callback_value), 1)
self.assertIsInstance(self.exception_callback_value[0], RecoverableSampleException)
# OIL COMPENSATOR TESTS
def test_simple_oil_comp(self):
"""
Read test data and pull out data particles.
Assert that the results are those we expected.
"""
with open(os.path.join(RESOURCE_PATH, 'subset_oil_comp.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(25)
self.assert_particles(particles, 'oil_comp_telem_20.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
with open(os.path.join(RESOURCE_PATH, 'subset_oil_comp.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=False)
particles = parser.get_records(25)
self.assert_particles(particles, 'oil_comp_recov_20.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_bad_timestamp_oil_comp(self):
"""
Read test data and pull out data particles.
Assert the expected we get 1 errors due to incorrect epoch and mission time formats
This tests the generic timestamp method with two parameters
"""
with open(os.path.join(RESOURCE_PATH, 'oil_comp_bad_timestamps.csv'), 'rU') as stream_handle:
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(10) # ask for 10 should get 4
self.assertEqual(len(particles), 4)
self.assertEqual(len(self.exception_callback_value), 2)
self.assertIsInstance(self.exception_callback_value[0], RecoverableSampleException)
self.assertIsInstance(self.exception_callback_value[1], RecoverableSampleException)
# SMART BATTERY TESTS
def test_simple_battery(self):
"""
Read test data and pull out data particles.
Assert that the results are those we expected.
"""
with open(os.path.join(RESOURCE_PATH, 'subset_battery.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(25)
self.assert_particles(particles, 'battery_telem_20.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
with open(os.path.join(RESOURCE_PATH, 'subset_battery.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=False)
particles = parser.get_records(25)
self.assert_particles(particles, 'battery_recov_20.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_bad_timestamp_battery(self):
"""
Read test data and pull out data particles.
Assert the expected we get 1 errors due to incorrect epoch and mission time formats
This tests the generic timestamp method with two parameters
"""
with open(os.path.join(RESOURCE_PATH, 'battery_bad_timestamp.csv'), 'rU') as stream_handle:
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(10) # ask for 10 should get 4
self.assertEqual(len(particles), 4)
self.assertEqual(len(self.exception_callback_value), 1)
self.assertIsInstance(self.exception_callback_value[0], RecoverableSampleException)
# DIGITAL TX BOARD TESTS
def test_simple_tx_board(self):
"""
Read test data and pull out data particles.
Assert that the results are those we expected.
"""
with open(os.path.join(RESOURCE_PATH, 'subset_tx_board.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(25)
self.assert_particles(particles, 'tx_board_telem_22.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
with open(os.path.join(RESOURCE_PATH, 'subset_tx_board.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=False)
particles = parser.get_records(25)
self.assert_particles(particles, 'tx_board_recov_22.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_bad_timestamp_tx_board(self):
"""
Read test data and pull out data particles.
Assert the expected we get 1 errors due to incorrect epoch and mission time formats
This tests the generic timestamp method with two parameters
"""
with open(os.path.join(RESOURCE_PATH, 'tx_board_bad_timestamps.csv'), 'rU') as stream_handle:
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(10) # ask for 10 should get 4
self.assertEqual(len(particles), 4)
self.assertEqual(len(self.exception_callback_value), 2)
self.assertIsInstance(self.exception_callback_value[0], RecoverableSampleException)
self.assertIsInstance(self.exception_callback_value[1], RecoverableSampleException)
# FAULT MESSAGE TESTS
def test_simple_fault(self):
"""
Read test data and pull out data particles.
Assert that the results are those we expected.
"""
with open(os.path.join(RESOURCE_PATH, 'subset_fault.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(35)
self.assert_particles(particles, 'fault_telem_35.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
with open(os.path.join(RESOURCE_PATH, 'subset_fault.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=False)
particles = parser.get_records(35)
self.assert_particles(particles, 'fault_recov_35.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_bad_timestamp_fault(self):
"""
Read test data and pull out data particles.
Assert the expected we get 1 errors due to incorrect epoch and mission time formats
This tests the generic timestamp method with two parameters
"""
with open(os.path.join(RESOURCE_PATH, 'fault_bad_timestamp.csv'), 'rU') as stream_handle:
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(10) # ask for 10 should get 5
self.assertEqual(len(particles), 5)
self.assertEqual(len(self.exception_callback_value), 1)
self.assertIsInstance(self.exception_callback_value[0], RecoverableSampleException)
# AUV STATE TESTS
def test_simple_state(self):
"""
Read test data and pull out data particles.
Assert that the results are those we expected.
"""
with open(os.path.join(RESOURCE_PATH, 'subset_state.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(25)
self.assert_particles(particles, 'state_telem_25.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
with open(os.path.join(RESOURCE_PATH, 'subset_state.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=False)
particles = parser.get_records(25)
self.assert_particles(particles, 'state_recov_25.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_bad_timestamp_state(self):
"""
Read test data and pull out data particles.
Assert the expected we get 1 errors due to incorrect epoch and mission time formats
This tests the generic timestamp method with two parameters
"""
# TODO the Mission time in this block looks to be way to big, waiting to hear from Hydroid
with open(os.path.join(RESOURCE_PATH, 'state_bad_timestamps.csv'), 'rU') as stream_handle:
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(10) # ask for 10 should get 6
self.assertEqual(len(particles), 6)
self.assertEqual(len(self.exception_callback_value), 2)
self.assertIsInstance(self.exception_callback_value[0], RecoverableSampleException)
self.assertIsInstance(self.exception_callback_value[1], RecoverableSampleException)
def test_get_many(self):
"""
Read test data and pull out data particles.
Assert that the results are those we expected.
This test parses a file containing all message types and verifies
all of the engineering data messages
"""
with open(os.path.join(RESOURCE_PATH, 'subset2_reduced.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
particles = parser.get_records(200)
self.assert_particles(particles, 'subset2_reduced_telem.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
with open(os.path.join(RESOURCE_PATH, 'subset2_reduced.csv'), 'rU') as stream_handle:
# test the recovered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=False)
particles = parser.get_records(200)
self.assert_particles(particles, 'subset2_reduced_recov.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_long_stream(self):
"""
Read test data and pull out data particles.
Assert that the results are those we expected.
This test parses a very large file containing all message types and verifies
there are no errors
"""
with open(os.path.join(RESOURCE_PATH, 'subset2.csv'), 'rU') as stream_handle:
# test the telemetered particle stream
parser = AuvEngAuvParser(stream_handle,
self.exception_callback,
is_telemetered=True)
parser.get_records(160000)
self.assertEqual(self.exception_callback_value, [])
| bsd-2-clause |
abergeron/pylearn2 | pylearn2/utils/image.py | 39 | 18841 | """
Utility functions for working with images.
"""
import logging
import numpy as np
plt = None
axes = None
from theano.compat.six.moves import xrange
from theano.compat.six import string_types
import warnings
try:
import matplotlib.pyplot as plt
import matplotlib.axes
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
import os
try:
from PIL import Image
except ImportError:
Image = None
from pylearn2.utils import string_utils as string
from pylearn2.utils.exc import reraise_as
from tempfile import mkstemp
from multiprocessing import Process
import subprocess
logger = logging.getLogger(__name__)
def ensure_Image():
"""Makes sure Image has been imported from PIL"""
global Image
if Image is None:
raise RuntimeError("You are trying to use PIL-dependent functionality"
" but don't have PIL installed.")
def imview(*args, **kwargs):
"""
A matplotlib-based image viewer command,
wrapping `matplotlib.pyplot.imshow` but behaving more
sensibly.
Parameters
----------
figure : TODO
TODO: write parameters section using decorators to inherit
the matplotlib docstring
Notes
-----
Parameters are identical to `matplotlib.pyplot.imshow`
but this behaves somewhat differently:
* By default, it creates a new figure (unless a
`figure` keyword argument is supplied.
* It modifies the axes of that figure to use the
full frame, without ticks or tick labels.
* It turns on `nearest` interpolation by default
(i.e., it does not antialias pixel data). This
can be overridden with the `interpolation`
argument as in `imshow`.
All other arguments and keyword arguments are passed
on to `imshow`.`
"""
if 'figure' not in kwargs:
f = plt.figure()
else:
f = kwargs['figure']
new_ax = matplotlib.axes.Axes(f,
[0, 0, 1, 1],
xticks=[],
yticks=[],
frame_on=False)
f.delaxes(f.gca())
f.add_axes(new_ax)
if len(args) < 5 and 'interpolation' not in kwargs:
kwargs['interpolation'] = 'nearest'
plt.imshow(*args, **kwargs)
def imview_async(*args, **kwargs):
"""
A version of `imview` that forks a separate process and
immediately shows the image.
Parameters
----------
window_title : str
TODO: writeme with decorators to inherit the other imviews'
docstrings
Notes
-----
Supports the `window_title` keyword argument to cope with
the title always being 'Figure 1'.
Returns the `multiprocessing.Process` handle.
"""
if 'figure' in kwargs:
raise ValueError("passing a figure argument not supported")
def fork_image_viewer():
f = plt.figure()
kwargs['figure'] = f
imview(*args, **kwargs)
if 'window_title' in kwargs:
f.set_window_title(kwargs['window_title'])
plt.show()
p = Process(None, fork_image_viewer)
p.start()
return p
def show(image):
"""
.. todo::
WRITEME
Parameters
----------
image : PIL Image object or ndarray
If ndarray, integer formats are assumed to use 0-255
and float formats are assumed to use 0-1
"""
viewer_command = string.preprocess('${PYLEARN2_VIEWER_COMMAND}')
if viewer_command == 'inline':
return imview(image)
if hasattr(image, '__array__'):
# do some shape checking because PIL just raises a tuple indexing error
# that doesn't make it very clear what the problem is
if len(image.shape) < 2 or len(image.shape) > 3:
raise ValueError('image must have either 2 or 3 dimensions but its'
' shape is ' + str(image.shape))
# The below is a temporary workaround that prevents us from crashing
# 3rd party image viewers such as eog by writing out overly large
# images.
# In the long run we should determine if this is a bug in PIL when
# producing
# such images or a bug in eog and determine a proper fix.
# Since this is hopefully just a short term workaround the
# constants below are not included in the interface to the
# function, so that 3rd party code won't start passing them.
max_height = 4096
max_width = 4096
# Display separate warnings for each direction, since it's
# common to crop only one.
if image.shape[0] > max_height:
image = image[0:max_height, :, :]
warnings.warn("Cropping image to smaller height to avoid crashing "
"the viewer program.")
if image.shape[0] > max_width:
image = image[:, 0:max_width, :]
warnings.warn("Cropping the image to a smaller width to avoid "
"crashing the viewer program.")
# This ends the workaround
if image.dtype == 'int8':
image = np.cast['uint8'](image)
elif str(image.dtype).startswith('float'):
# don't use *=, we don't want to modify the input array
image = image * 255.
image = np.cast['uint8'](image)
# PIL is too stupid to handle single-channel arrays
if len(image.shape) == 3 and image.shape[2] == 1:
image = image[:, :, 0]
try:
ensure_Image()
image = Image.fromarray(image)
except TypeError:
reraise_as(TypeError("PIL issued TypeError on ndarray of shape " +
str(image.shape) + " and dtype " +
str(image.dtype)))
# Create a temporary file with the suffix '.png'.
fd, name = mkstemp(suffix='.png')
os.close(fd)
# Note:
# Although we can use tempfile.NamedTemporaryFile() to create
# a temporary file, the function should be used with care.
#
# In Python earlier than 2.7, a temporary file created by the
# function will be deleted just after the file is closed.
# We can re-use the name of the temporary file, but there is an
# instant where a file with the name does not exist in the file
# system before we re-use the name. This may cause a race
# condition.
#
# In Python 2.7 or later, tempfile.NamedTemporaryFile() has
# the 'delete' argument which can control whether a temporary
# file will be automatically deleted or not. With the argument,
# the above race condition can be avoided.
#
image.save(name)
if os.name == 'nt':
subprocess.Popen(viewer_command + ' ' + name + ' && del ' + name,
shell=True)
else:
subprocess.Popen(viewer_command + ' ' + name + ' ; rm ' + name,
shell=True)
def pil_from_ndarray(ndarray):
"""
Converts an ndarray to a PIL image.
Parameters
----------
ndarray : ndarray
An ndarray containing an image.
Returns
-------
pil : PIL Image
A PIL Image containing the image.
"""
try:
if ndarray.dtype == 'float32' or ndarray.dtype == 'float64':
assert ndarray.min() >= 0.0
assert ndarray.max() <= 1.0
ndarray = np.cast['uint8'](ndarray * 255)
if len(ndarray.shape) == 3 and ndarray.shape[2] == 1:
ndarray = ndarray[:, :, 0]
ensure_Image()
rval = Image.fromarray(ndarray)
return rval
except Exception as e:
logger.exception('original exception: ')
logger.exception(e)
logger.exception('ndarray.dtype: {0}'.format(ndarray.dtype))
logger.exception('ndarray.shape: {0}'.format(ndarray.shape))
raise
assert False
def ndarray_from_pil(pil, dtype='uint8'):
"""
Converts a PIL Image to an ndarray.
Parameters
----------
pil : PIL Image
An image represented as a PIL Image object
dtype : str
The dtype of ndarray to create
Returns
-------
ndarray : ndarray
The image as an ndarray.
"""
rval = np.asarray(pil)
if dtype != rval.dtype:
rval = np.cast[dtype](rval)
if str(dtype).startswith('float'):
rval /= 255.
if len(rval.shape) == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
return rval
def rescale(image, shape):
"""
Scales image to be no larger than shape. PIL might give you
unexpected results beyond that.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
i = pil_from_ndarray(image)
ensure_Image()
i.thumbnail([shape[1], shape[0]], Image.ANTIALIAS)
rval = ndarray_from_pil(i, dtype=image.dtype)
return rval
resize = rescale
def fit_inside(image, shape):
"""
Scales image down to fit inside shape preserves proportions of image
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
if image.shape[0] <= shape[0] and image.shape[1] <= shape[1]:
return image.copy()
row_ratio = float(image.shape[0]) / float(shape[0])
col_ratio = float(image.shape[1]) / float(shape[1])
if row_ratio > col_ratio:
target_shape = [shape[0], min(image.shape[1] / row_ratio, shape[1])]
else:
target_shape = [min(image.shape[0] / col_ratio, shape[0]), shape[1]]
assert target_shape[0] <= shape[0]
assert target_shape[1] <= shape[1]
assert target_shape[0] == shape[0] or target_shape[1] == shape[1]
rval = rescale(image, target_shape)
return rval
def letterbox(image, shape):
"""
Pads image with black letterboxing to bring image.shape up to shape
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
assert image.shape[0] <= shape[0]
assert image.shape[1] <= shape[1]
if image.shape[0] == shape[0] and image.shape[1] == shape[1]:
return image.copy()
rval = np.zeros((shape[0], shape[1], image.shape[2]), dtype=image.dtype)
rstart = (shape[0] - image.shape[0]) / 2
cstart = (shape[1] - image.shape[1]) / 2
rend = rstart + image.shape[0]
cend = cstart + image.shape[1]
rval[rstart:rend, cstart:cend] = image
return rval
def make_letterboxed_thumbnail(image, shape):
"""
Scales image down to shape. Preserves proportions of image, introduces
black letterboxing if necessary.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3
assert len(shape) == 2
shrunk = fit_inside(image, shape)
letterboxed = letterbox(shrunk, shape)
return letterboxed
def load(filepath, rescale_image=True, dtype='float64'):
"""
Load an image from a file.
Parameters
----------
filepath : str
Path to the image file to load
rescale_image : bool
Default value: True
If True, returned images have pixel values in [0, 1]. Otherwise,
values are in [0, 255].
dtype: str
The dtype to use for the returned value
Returns
-------
img : numpy ndarray
An array containing the image that was in the file.
"""
assert isinstance(filepath, string_types)
if not rescale_image and dtype == 'uint8':
ensure_Image()
rval = np.asarray(Image.open(filepath))
assert rval.dtype == 'uint8'
return rval
s = 1.0
if rescale_image:
s = 255.
try:
ensure_Image()
rval = Image.open(filepath)
except Exception:
reraise_as(Exception("Could not open " + filepath))
numpy_rval = np.array(rval)
msg = ("Tried to load an image, got an array with %d"
" dimensions. Expected 2 or 3."
"This may indicate a mildly corrupted image file. Try "
"converting it to a different image format with a different "
"editor like gimp or imagemagic. Sometimes these programs are "
"more robust to minor corruption than PIL and will emit a "
"correctly formatted image in the new format.")
if numpy_rval.ndim not in [2, 3]:
logger.error(dir(rval))
logger.error(rval)
logger.error(rval.size)
rval.show()
raise AssertionError(msg % numpy_rval.ndim)
rval = numpy_rval
rval = np.cast[dtype](rval) / s
if rval.ndim == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
if rval.ndim != 3:
raise AssertionError("Something went wrong opening " +
filepath + '. Resulting shape is ' +
str(rval.shape) +
" (it's meant to have 3 dimensions by now)")
return rval
def save(filepath, ndarray):
"""
Saves an image to a file.
Parameters
----------
filepath : str
The path to write the file to.
ndarray : ndarray
An array containing the image to be saved.
"""
pil_from_ndarray(ndarray).save(filepath)
def scale_to_unit_interval(ndar, eps=1e-8):
"""
Scales all values in the ndarray ndar to be between 0 and 1
Parameters
----------
ndar : WRITEME
eps : WRITEME
Returns
-------
WRITEME
"""
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
Parameters
----------
x : numpy.ndarray
2-d ndarray or 4 tuple of 2-d ndarrays or None for channels,
in which every row is a flattened image.
shape : 2-tuple of ints
The first component is the height of each image,
the second component is the width.
tile_shape : 2-tuple of ints
The number of images to tile in (row, columns) form.
scale_rows_to_unit_interval : bool
Whether or not the values need to be before being plotted to [0, 1].
output_pixel_vals : bool
Whether or not the output should be pixel values (int8) or floats.
Returns
-------
y : 2d-ndarray
The return value has the same dtype as X, and is suitable for
viewing as an image with PIL.Image.fromarray.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
# colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = np.zeros(out_shape, dtype=dt) + \
channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
if __name__ == '__main__':
black = np.zeros((50, 50, 3), dtype='uint8')
red = black.copy()
red[:, :, 0] = 255
green = black.copy()
green[:, :, 1] = 255
show(black)
show(green)
show(red)
| bsd-3-clause |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/PyAMF-0.6.1/pyamf/remoting/__init__.py | 27 | 19278 | # Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
AMF Remoting support.
A Remoting request from the client consists of a short preamble, headers, and
bodies. The preamble contains basic information about the nature of the
request. Headers can be used to request debugging information, send
authentication info, tag transactions, etc. Bodies contain actual Remoting
requests and responses. A single Remoting envelope can contain several
requests; Remoting supports batching out of the box.
Client headers and bodies need not be responded to in a one-to-one manner.
That is, a body or header may not require a response. Debug information is
requested by a header but sent back as a body object. The response index is
essential for the Adobe Flash Player to understand the response therefore.
@see: U{Remoting Envelope on OSFlash
<http://osflash.org/documentation/amf/envelopes/remoting>}
@see: U{Remoting Headers on OSFlash
<http://osflash.org/amf/envelopes/remoting/headers>}
@see: U{Remoting Debug Headers on OSFlash
<http://osflash.org/documentation/amf/envelopes/remoting/debuginfo>}
@since: 0.1
"""
import pyamf
from pyamf import util
__all__ = ['Envelope', 'Request', 'Response', 'decode', 'encode']
#: Succesful call.
STATUS_OK = 0
#: Reserved for runtime errors.
STATUS_ERROR = 1
#: Debug information.
STATUS_DEBUG = 2
#: List of available status response codes.
STATUS_CODES = {
STATUS_OK: '/onResult',
STATUS_ERROR: '/onStatus',
STATUS_DEBUG: '/onDebugEvents'
}
#: AMF mimetype.
CONTENT_TYPE = 'application/x-amf'
ERROR_CALL_FAILED, = range(1)
ERROR_CODES = {
ERROR_CALL_FAILED: 'Server.Call.Failed'
}
APPEND_TO_GATEWAY_URL = 'AppendToGatewayUrl'
REPLACE_GATEWAY_URL = 'ReplaceGatewayUrl'
REQUEST_PERSISTENT_HEADER = 'RequestPersistentHeader'
class RemotingError(pyamf.BaseError):
"""
Generic remoting error class.
"""
class RemotingCallFailed(RemotingError):
"""
Raised if B{Server.Call.Failed} received.
"""
pyamf.add_error_class(RemotingCallFailed, ERROR_CODES[ERROR_CALL_FAILED])
class HeaderCollection(dict):
"""
Collection of AMF message headers.
"""
def __init__(self, raw_headers={}):
self.required = []
for (k, ig, v) in raw_headers:
self[k] = v
if ig:
self.required.append(k)
def is_required(self, idx):
"""
@raise KeyError: Unknown header found.
"""
if not idx in self:
raise KeyError("Unknown header %s" % str(idx))
return idx in self.required
def set_required(self, idx, value=True):
"""
@raise KeyError: Unknown header found.
"""
if not idx in self:
raise KeyError("Unknown header %s" % str(idx))
if not idx in self.required:
self.required.append(idx)
def __len__(self):
return len(self.keys())
class Envelope(object):
"""
I wrap an entire request, encapsulating headers and bodies.
There can be more than one request in a single transaction.
@ivar amfVersion: AMF encoding version. See L{pyamf.ENCODING_TYPES}
@type amfVersion: C{int} or C{None}
@ivar headers: AMF headers, a list of name, value pairs. Global to each
request.
@type headers: L{HeaderCollection}
@ivar bodies: A list of requests/response messages
@type bodies: C{list} containing tuples of the key of the request and the
L{Message}.
"""
def __init__(self, amfVersion=None):
self.amfVersion = amfVersion
self.headers = HeaderCollection()
self.bodies = []
def __repr__(self):
r = "<Envelope amfVersion=%r>\n" % (self.amfVersion,)
for h in self.headers:
r += " " + repr(h) + "\n"
for request in iter(self):
r += " " + repr(request) + "\n"
r += "</Envelope>"
return r
def __setitem__(self, name, value):
if not isinstance(value, Message):
raise TypeError("Message instance expected")
idx = 0
found = False
for body in self.bodies:
if name == body[0]:
self.bodies[idx] = (name, value)
found = True
idx = idx + 1
if not found:
self.bodies.append((name, value))
value.envelope = self
def __getitem__(self, name):
for body in self.bodies:
if name == body[0]:
return body[1]
raise KeyError("'%r'" % (name,))
def __nonzero__(self):
return len(self.bodies) != 0 or len(self.headers) != 0
def __iter__(self):
for body in self.bodies:
yield body[0], body[1]
raise StopIteration
def __len__(self):
return len(self.bodies)
def iteritems(self):
for body in self.bodies:
yield body
raise StopIteration
def keys(self):
return [body[0] for body in self.bodies]
def items(self):
return self.bodies
def __contains__(self, name):
for body in self.bodies:
if name == body[0]:
return True
return False
def __eq__(self, other):
if isinstance(other, Envelope):
return (self.amfVersion == other.amfVersion and
self.headers == other.headers and
self.bodies == other.bodies)
if hasattr(other, 'keys') and hasattr(other, 'items'):
keys, o_keys = self.keys(), other.keys()
if len(o_keys) != len(keys):
return False
for k in o_keys:
if k not in keys:
return False
keys.remove(k)
for k, v in other.items():
if self[k] != v:
return False
return True
class Message(object):
"""
I represent a singular request/response, containing a collection of
headers and one body of data.
I am used to iterate over all requests in the :class:`Envelope`.
@ivar envelope: The parent L{envelope<Envelope>} of this AMF Message.
@ivar body: The body of the message.
@ivar headers: The message headers. Dict like in behaviour.
"""
def __init__(self, envelope, body):
self.envelope = envelope
self.body = body
def _get_headers(self):
return self.envelope.headers
headers = property(_get_headers)
class Request(Message):
"""
An AMF Request payload.
@ivar target: The C{string} target of the request
"""
def __init__(self, target, body=[], envelope=None):
Message.__init__(self, envelope, body)
self.target = target
def __repr__(self):
return "<%s target=%s>%s</%s>" % (
type(self).__name__, repr(self.target), repr(self.body), type(self).__name__)
class Response(Message):
"""
An AMF Response.
@ivar status: The status of the message. Default is L{STATUS_OK}.
@type status: Member of L{STATUS_CODES}.
"""
def __init__(self, body, status=STATUS_OK, envelope=None):
Message.__init__(self, envelope, body)
self.status = status
def __repr__(self):
return "<%s status=%s>%s</%s>" % (
type(self).__name__, _get_status(self.status), repr(self.body),
type(self).__name__
)
class BaseFault(object):
"""
I represent a fault message (C{mx.rpc.Fault}).
@ivar level: The level of the fault.
@ivar code: A simple code describing the fault.
@ivar details: Any extra details of the fault.
@ivar description: A longer description of the fault.
@see: U{mx.rpc.Fault on Livedocs
<http://livedocs.adobe.com/flex/201/langref/mx/rpc/Fault.html>}
"""
level = None
class __amf__:
static = ('level', 'code', 'type', 'details', 'description')
def __init__(self, *args, **kwargs):
self.code = kwargs.get('code', '')
self.type = kwargs.get('type', '')
self.details = kwargs.get('details', '')
self.description = kwargs.get('description', '')
def __repr__(self):
x = '%s level=%s' % (self.__class__.__name__, self.level)
if self.code not in ('', None):
x += ' code=%s' % repr(self.code)
if self.type not in ('', None):
x += ' type=%s' % repr(self.type)
if self.description not in ('', None):
x += ' description=%s' % repr(self.description)
if self.details not in ('', None):
x += '\nTraceback:\n%s' % (repr(self.details),)
return x
def raiseException(self):
"""
Raises an exception based on the fault object. There is no traceback
available.
"""
raise get_exception_from_fault(self), self.description, None
class ErrorFault(BaseFault):
"""
I represent an error level fault.
"""
level = 'error'
def _read_header(stream, decoder, strict=False):
"""
Read AMF L{Message} header from the stream.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param decoder: An AMF0 decoder.
@param strict: Use strict decoding policy. Default is C{False}. Will raise a
L{pyamf.DecodeError} if the data that was read from the stream does not
match the header length.
@return: A C{tuple} containing the name of the header, a C{bool}
determining if understanding this header is required and the decoded
data.
@note: Quite what understanding required headers actually means is unknown.
"""
name_len = stream.read_ushort()
name = stream.read_utf8_string(name_len)
required = bool(stream.read_uchar())
data_len = stream.read_ulong()
pos = stream.tell()
data = decoder.readElement()
if strict and pos + data_len != stream.tell():
raise pyamf.DecodeError(
"Data read from stream does not match header length")
return (name, required, data)
def _write_header(name, header, required, stream, encoder, strict=False):
"""
Write AMF message header.
@param name: Name of the header.
@param header: Header value.
@param required: Whether understanding this header is required (?).
@param stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>} that
will receive the encoded header.
@param encoder: An encoder capable of encoding C{AMF0}.
@param strict: Use strict encoding policy. Default is C{False}. Will write
the correct header length after writing the header.
"""
stream.write_ushort(len(name))
stream.write_utf8_string(name)
stream.write_uchar(required)
write_pos = stream.tell()
stream.write_ulong(0)
old_pos = stream.tell()
encoder.writeElement(header)
new_pos = stream.tell()
if strict:
stream.seek(write_pos)
stream.write_ulong(new_pos - old_pos)
stream.seek(new_pos)
def _read_body(stream, decoder, strict=False, logger=None):
"""
Read an AMF message body from the stream.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param decoder: An AMF0 decoder.
@param strict: Use strict decoding policy. Default is `False`.
@param logger: Used to log interesting events whilst reading a remoting
body.
@type logger: A C{logging.Logger} instance or C{None}.
@return: A C{tuple} containing the C{id} of the request and the L{Request}
or L{Response}
"""
def _read_args():
# we have to go through this insanity because it seems that amf0
# does not keep the array of args in the object references lookup
type_byte = stream.peek(1)
if type_byte == '\x11':
if not decoder.use_amf3:
raise pyamf.DecodeError(
"Unexpected AMF3 type with incorrect message type")
return decoder.readElement()
if type_byte != '\x0a':
raise pyamf.DecodeError("Array type required for request body")
stream.read(1)
x = stream.read_ulong()
return [decoder.readElement() for i in xrange(x)]
target = stream.read_utf8_string(stream.read_ushort())
response = stream.read_utf8_string(stream.read_ushort())
status = STATUS_OK
is_request = True
for code, s in STATUS_CODES.iteritems():
if not target.endswith(s):
continue
is_request = False
status = code
target = target[:0 - len(s)]
if logger:
logger.debug('Remoting target: %r' % (target,))
data_len = stream.read_ulong()
pos = stream.tell()
if is_request:
data = _read_args()
else:
data = decoder.readElement()
if strict and pos + data_len != stream.tell():
raise pyamf.DecodeError("Data read from stream does not match body "
"length (%d != %d)" % (pos + data_len, stream.tell(),))
if is_request:
return response, Request(target, body=data)
if status == STATUS_ERROR and isinstance(data, pyamf.ASObject):
data = get_fault(data)
return target, Response(data, status)
def _write_body(name, message, stream, encoder, strict=False):
"""
Write AMF message body.
@param name: The name of the request.
@param message: The AMF L{Message}
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param encoder: An AMF0 encoder.
@param strict: Use strict encoding policy. Default is `False`.
"""
def _encode_body(message):
if isinstance(message, Response):
encoder.writeElement(message.body)
return
stream.write('\x0a')
stream.write_ulong(len(message.body))
for x in message.body:
encoder.writeElement(x)
if not isinstance(message, (Request, Response)):
raise TypeError("Unknown message type")
target = None
if isinstance(message, Request):
target = unicode(message.target)
else:
target = u"%s%s" % (name, _get_status(message.status))
target = target.encode('utf8')
stream.write_ushort(len(target))
stream.write_utf8_string(target)
response = 'null'
if isinstance(message, Request):
response = name
stream.write_ushort(len(response))
stream.write_utf8_string(response)
if not strict:
stream.write_ulong(0)
_encode_body(message)
return
write_pos = stream.tell()
stream.write_ulong(0)
old_pos = stream.tell()
_encode_body(message)
new_pos = stream.tell()
stream.seek(write_pos)
stream.write_ulong(new_pos - old_pos)
stream.seek(new_pos)
def _get_status(status):
"""
Get status code.
@see: L{STATUS_CODES}
"""
if status not in STATUS_CODES:
# TODO print that status code..
raise ValueError("Unknown status code")
return STATUS_CODES[status]
def get_fault_class(level, **kwargs):
if level == 'error':
return ErrorFault
return BaseFault
def get_fault(data):
try:
level = data['level']
del data['level']
except KeyError:
level = 'error'
e = {}
for x, y in data.iteritems():
if isinstance(x, unicode):
e[str(x)] = y
else:
e[x] = y
return get_fault_class(level, **e)(**e)
def decode(stream, strict=False, logger=None, timezone_offset=None):
"""
Decodes the incoming stream as a remoting message.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param strict: Enforce strict decoding. Default is `False`.
@param logger: Used to log interesting events whilst decoding a remoting
message.
@type logger: U{logging.Logger<http://
docs.python.org/library/logging.html#loggers>}
@param timezone_offset: The difference between the current timezone and
UTC. Date/times should always be handled in UTC to avoid confusion but
this is required for legacy systems.
@type timezone_offset: U{datetime.datetime.timedelta<http://
docs.python.org/library/datetime.html#datetime.timedelta}
@return: Message L{envelope<Envelope>}.
"""
if not isinstance(stream, util.BufferedByteStream):
stream = util.BufferedByteStream(stream)
if logger:
logger.debug('remoting.decode start')
msg = Envelope()
msg.amfVersion = stream.read_ushort()
# see http://osflash.org/documentation/amf/envelopes/remoting#preamble
# why we are doing this...
if msg.amfVersion > 0x09:
raise pyamf.DecodeError("Malformed stream (amfVersion=%d)" %
msg.amfVersion)
decoder = pyamf.get_decoder(pyamf.AMF0, stream, strict=strict,
timezone_offset=timezone_offset)
context = decoder.context
decoder.use_amf3 = msg.amfVersion == pyamf.AMF3
header_count = stream.read_ushort()
for i in xrange(header_count):
name, required, data = _read_header(stream, decoder, strict)
msg.headers[name] = data
if required:
msg.headers.set_required(name)
body_count = stream.read_short()
for i in xrange(body_count):
context.clear()
target, payload = _read_body(stream, decoder, strict, logger)
msg[target] = payload
if strict and stream.remaining() > 0:
raise RuntimeError("Unable to fully consume the buffer")
if logger:
logger.debug('remoting.decode end')
return msg
def encode(msg, strict=False, logger=None, timezone_offset=None):
"""
Encodes and returns the L{msg<Envelope>} as an AMF stream.
@param strict: Enforce strict encoding. Default is C{False}. Specifically
header/body lengths will be written correctly, instead of the default 0.
Default is `False`. Introduced in 0.4.
@param logger: Used to log interesting events whilst decoding a remoting
message.
@type logger: U{logging.Logger<http://
docs.python.org/library/logging.html#loggers>}
@param timezone_offset: The difference between the current timezone and
UTC. Date/times should always be handled in UTC to avoid confusion but
this is required for legacy systems.
@type timezone_offset: U{datetime.datetime.timedelta<http://
docs.python.org/library/datetime.html#datetime.timedelta}
@rtype: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
"""
stream = util.BufferedByteStream()
encoder = pyamf.get_encoder(pyamf.AMF0, stream, strict=strict,
timezone_offset=timezone_offset)
if msg.amfVersion == pyamf.AMF3:
encoder.use_amf3 = True
stream.write_ushort(msg.amfVersion)
stream.write_ushort(len(msg.headers))
for name, header in msg.headers.iteritems():
_write_header(name, header, int(msg.headers.is_required(name)),
stream, encoder, strict)
stream.write_short(len(msg))
for name, message in msg.iteritems():
encoder.context.clear()
_write_body(name, message, stream, encoder, strict)
stream.seek(0)
return stream
def get_exception_from_fault(fault):
"""
"""
return pyamf.ERROR_CLASS_MAP.get(fault.code, RemotingError)
pyamf.register_class(ErrorFault)
| bsd-3-clause |
robmagee/django-cms | cms/signals/permissions.py | 58 | 2695 | # -*- coding: utf-8 -*-
from cms.cache.permissions import clear_user_permission_cache
from cms.models import PageUser, PageUserGroup
from menus.menu_pool import menu_pool
def post_save_user(instance, raw, created, **kwargs):
"""Signal called when new user is created, required only when CMS_PERMISSION.
Assigns creator of the user to PageUserInfo model, so we know who had created
this user account.
requires: CurrentUserMiddleware
"""
from cms.utils.permissions import get_current_user
# read current user from thread locals
creator = get_current_user()
if not creator or not created or creator.is_anonymous():
return
page_user = PageUser(user_ptr_id=instance.pk, created_by=creator)
page_user.__dict__.update(instance.__dict__)
page_user.save()
def post_save_user_group(instance, raw, created, **kwargs):
"""The same like post_save_user, but for Group, required only when
CMS_PERMISSION.
Assigns creator of the group to PageUserGroupInfo model, so we know who had
created this user account.
requires: CurrentUserMiddleware
"""
from cms.utils.permissions import get_current_user
# read current user from thread locals
creator = get_current_user()
if not creator or not created or creator.is_anonymous():
return
page_user = PageUserGroup(group_ptr_id=instance.pk, created_by=creator)
page_user.__dict__.update(instance.__dict__)
page_user.save()
def pre_save_user(instance, raw, **kwargs):
clear_user_permission_cache(instance)
def pre_delete_user(instance, **kwargs):
clear_user_permission_cache(instance)
def pre_save_group(instance, raw, **kwargs):
if instance.pk:
user_set = getattr(instance, 'user_set')
for user in user_set.all():
clear_user_permission_cache(user)
def pre_delete_group(instance, **kwargs):
user_set = getattr(instance, 'user_set')
for user in user_set.all():
clear_user_permission_cache(user)
def _clear_users_permissions(instance):
if instance.user:
clear_user_permission_cache(instance.user)
if instance.group:
user_set = getattr(instance.group, 'user_set')
for user in user_set.all():
clear_user_permission_cache(user)
def pre_save_pagepermission(instance, raw, **kwargs):
_clear_users_permissions(instance)
def pre_delete_pagepermission(instance, **kwargs):
_clear_users_permissions(instance)
def pre_save_globalpagepermission(instance, raw, **kwargs):
_clear_users_permissions(instance)
menu_pool.clear(all=True)
def pre_delete_globalpagepermission(instance, **kwargs):
_clear_users_permissions(instance)
| bsd-3-clause |
ramadhane/odoo | addons/project/__openerp__.py | 259 | 2562 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Project Management',
'version': '1.1',
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/project-management',
'category': 'Project Management',
'sequence': 8,
'summary': 'Projects, Tasks',
'depends': [
'base_setup',
'product',
'analytic',
'board',
'mail',
'resource',
'web_kanban'
],
'description': """
Track multi-level projects, tasks, work done on tasks
=====================================================
This application allows an operational project management system to organize your activities into tasks and plan the work you need to get the tasks completed.
Gantt diagrams will give you a graphical representation of your project plans, as well as resources availability and workload.
Dashboard / Reports for Project Management will include:
--------------------------------------------------------
* My Tasks
* Open Tasks
* Tasks Analysis
* Cumulative Flow
""",
'data': [
'security/project_security.xml',
'wizard/project_task_delegate_view.xml',
'security/ir.model.access.csv',
'project_data.xml',
'project_view.xml',
'res_partner_view.xml',
'report/project_report_view.xml',
'report/project_cumulative.xml',
'res_config_view.xml',
'views/project.xml',
],
'demo': ['project_demo.xml'],
'test': [
],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zhsso/ubunto-one | src/backends/filesync/utilities/keyword_script.py | 6 | 4521 | #!/usr/bin/env python
# Copyright 2008-2015 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://launchpad.net/filesync-server
"""Get stats for estimating keyword search on files."""
import os
import re
import unicodedata
from optparse import OptionParser
import psycopg2
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
def get_keywords_from_path(volume_path):
"""Split keywords from a volume path."""
# we do not index the root volume path
clean_path = volume_path.replace("~/Ubuntu One", '')
clean_path = unicodedata.normalize('NFKD', clean_path)
clean_path = clean_path.encode('ASCII', 'ignore').lower()
keywords = re.findall(r'\w+', clean_path)
# convert to set for unique values
return set(keywords)
SQL = """select v.owner_id, v.path volpath, o.path, o.name
from object as o, userdefinedfolder v
where o.volume_id=v.id and
v.status='Live' and
o.status='Live' and
o.kind='File'
order by v.owner_id, o.id
OFFSET %d LIMIT %d"""
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--host", dest="host")
parser.add_option("--dbname", dest="dbname")
parser.add_option("--user", dest="user")
parser.add_option("--password", dest="password", default='')
parser.add_option("--limit", dest="limit", type="int",
default=10000)
(options, args) = parser.parse_args()
conn_string = "host='%s' dbname='%s' user='%s'" % (
options.host, options.dbname, options.user)
if options.password:
conn_string = conn_string + " password='%s'" % options.password
last_user_id = 0
offset = 0
kw_cnt = 0
node_cnt = 0
kw_len = 0
user_kw_cnt = 0
user_cnt = 0
volpath_len = 0
cass_rows = 0
cass_row_width_max = 0
cass_col_bytes = 0
conn = psycopg2.connect(conn_string)
conn.set_session(readonly=True)
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
curr = conn.cursor()
try:
while True:
sql = SQL % (offset, options.limit)
curr.execute(sql)
rows = curr.fetchall()
if not rows:
break
offset += options.limit
for r in rows:
node_cnt += 1
user_id, volpath, path, name = r
if user_id != last_user_id:
user_cnt += 1
user_kw_cnt = 0
last_user_id = user_id
volpath = os.path.join(volpath, path, name)
keywords = get_keywords_from_path(volpath)
keywords_len = len(keywords)
kw_cnt += keywords_len
user_kw_cnt += keywords_len
kw_bytes = sum(map(len, keywords))
kw_len += kw_bytes
volpath_len += len(volpath)
# 42 is node_id (16 bytes) generation (8 bytes) + overhead
cass_col_bytes += kw_bytes + (len(volpath) + 42) * keywords_len
if user_kw_cnt > cass_row_width_max:
cass_row_width_max = user_kw_cnt
finally:
curr.close()
conn.close()
if node_cnt:
print "Live Files: % 15d" % node_cnt
print "Keywords: % 15d" % kw_cnt
print "Avg Keyword per User: % 15.2f" % (
float(user_kw_cnt) / user_cnt)
print "Avg Keyword Length: % 15.2f" % (float(kw_len) / kw_cnt)
print "Avg Volume Path Length: % 15.2f" % (
float(volpath_len) / node_cnt)
print "Cassandra Row Count: % 15d" % user_cnt
print "Cassandra Max Columns: % 15d" % cass_row_width_max
print "Cassandra Column bytes: % 15d" % cass_col_bytes
else:
print "No files found."
| agpl-3.0 |
steedos/odoo | addons/account/edi/invoice.py | 342 | 13984 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.addons.edi import EDIMixin
from werkzeug import url_encode
INVOICE_LINE_EDI_STRUCT = {
'name': True,
'origin': True,
'uos_id': True,
'product_id': True,
'price_unit': True,
'quantity': True,
'discount': True,
# fields used for web preview only - discarded on import
'price_subtotal': True,
}
INVOICE_TAX_LINE_EDI_STRUCT = {
'name': True,
'base': True,
'amount': True,
'manual': True,
'sequence': True,
'base_amount': True,
'tax_amount': True,
}
INVOICE_EDI_STRUCT = {
'name': True,
'origin': True,
'company_id': True, # -> to be changed into partner
'type': True, # -> reversed at import
'internal_number': True, # -> reference at import
'comment': True,
'date_invoice': True,
'date_due': True,
'partner_id': True,
'payment_term': True,
#custom: currency_id
'invoice_line': INVOICE_LINE_EDI_STRUCT,
'tax_line': INVOICE_TAX_LINE_EDI_STRUCT,
# fields used for web preview only - discarded on import
#custom: 'partner_ref'
'amount_total': True,
'amount_untaxed': True,
'amount_tax': True,
}
class account_invoice(osv.osv, EDIMixin):
_inherit = 'account.invoice'
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Exports a supplier or customer invoice"""
edi_struct = dict(edi_struct or INVOICE_EDI_STRUCT)
res_company = self.pool.get('res.company')
res_partner = self.pool.get('res.partner')
edi_doc_list = []
for invoice in records:
# generate the main report
self._edi_generate_report_attachment(cr, uid, invoice, context=context)
edi_doc = super(account_invoice,self).edi_export(cr, uid, [invoice], edi_struct, context)[0]
edi_doc.update({
'company_address': res_company.edi_export_address(cr, uid, invoice.company_id, context=context),
'company_paypal_account': invoice.company_id.paypal_account,
'partner_address': res_partner.edi_export(cr, uid, [invoice.partner_id], context=context)[0],
'currency': self.pool.get('res.currency').edi_export(cr, uid, [invoice.currency_id], context=context)[0],
'partner_ref': invoice.reference or False,
})
edi_doc_list.append(edi_doc)
return edi_doc_list
def _edi_tax_account(self, cr, uid, invoice_type='out_invoice', context=None):
#TODO/FIXME: should select proper Tax Account
account_pool = self.pool.get('account.account')
account_ids = account_pool.search(cr, uid, [('type','<>','view'),('type','<>','income'), ('type', '<>', 'closed')])
tax_account = False
if account_ids:
tax_account = account_pool.browse(cr, uid, account_ids[0])
return tax_account
def _edi_invoice_account(self, cr, uid, partner_id, invoice_type, context=None):
res_partner = self.pool.get('res.partner')
partner = res_partner.browse(cr, uid, partner_id, context=context)
if invoice_type in ('out_invoice', 'out_refund'):
invoice_account = partner.property_account_receivable
else:
invoice_account = partner.property_account_payable
return invoice_account
def _edi_product_account(self, cr, uid, product_id, invoice_type, context=None):
product_pool = self.pool.get('product.product')
product = product_pool.browse(cr, uid, product_id, context=context)
if invoice_type in ('out_invoice','out_refund'):
account = product.property_account_income or product.categ_id.property_account_income_categ
else:
account = product.property_account_expense or product.categ_id.property_account_expense_categ
return account
def _edi_import_company(self, cr, uid, edi_document, context=None):
# TODO: for multi-company setups, we currently import the document in the
# user's current company, but we should perhaps foresee a way to select
# the desired company among the user's allowed companies
self._edi_requires_attributes(('company_id','company_address','type'), edi_document)
res_partner = self.pool.get('res.partner')
xid, company_name = edi_document.pop('company_id')
# Retrofit address info into a unified partner info (changed in v7 - used to keep them separate)
company_address_edi = edi_document.pop('company_address')
company_address_edi['name'] = company_name
company_address_edi['is_company'] = True
company_address_edi['__import_model'] = 'res.partner'
company_address_edi['__id'] = xid # override address ID, as of v7 they should be the same anyway
if company_address_edi.get('logo'):
company_address_edi['image'] = company_address_edi.pop('logo')
invoice_type = edi_document['type']
if invoice_type.startswith('out_'):
company_address_edi['customer'] = True
else:
company_address_edi['supplier'] = True
partner_id = res_partner.edi_import(cr, uid, company_address_edi, context=context)
# modify edi_document to refer to new partner
partner = res_partner.browse(cr, uid, partner_id, context=context)
partner_edi_m2o = self.edi_m2o(cr, uid, partner, context=context)
edi_document['partner_id'] = partner_edi_m2o
edi_document.pop('partner_address', None) # ignored, that's supposed to be our own address!
return partner_id
def edi_import(self, cr, uid, edi_document, context=None):
""" During import, invoices will import the company that is provided in the invoice as
a new partner (e.g. supplier company for a customer invoice will be come a supplier
record for the new invoice.
Summary of tasks that need to be done:
- import company as a new partner, if type==in then supplier=1, else customer=1
- partner_id field is modified to point to the new partner
- company_address data used to add address to new partner
- change type: out_invoice'<->'in_invoice','out_refund'<->'in_refund'
- reference: should contain the value of the 'internal_number'
- reference_type: 'none'
- internal number: reset to False, auto-generated
- journal_id: should be selected based on type: simply put the 'type'
in the context when calling create(), will be selected correctly
- payment_term: if set, create a default one based on name...
- for invoice lines, the account_id value should be taken from the
product's default, i.e. from the default category, as it will not
be provided.
- for tax lines, we disconnect from the invoice.line, so all tax lines
will be of type 'manual', and default accounts should be picked based
on the tax config of the DB where it is imported.
"""
if context is None:
context = {}
self._edi_requires_attributes(('company_id','company_address','type','invoice_line','currency'), edi_document)
# extract currency info
res_currency = self.pool.get('res.currency')
currency_info = edi_document.pop('currency')
currency_id = res_currency.edi_import(cr, uid, currency_info, context=context)
currency = res_currency.browse(cr, uid, currency_id)
edi_document['currency_id'] = self.edi_m2o(cr, uid, currency, context=context)
# change type: out_invoice'<->'in_invoice','out_refund'<->'in_refund'
invoice_type = edi_document['type']
invoice_type = invoice_type.startswith('in_') and invoice_type.replace('in_','out_') or invoice_type.replace('out_','in_')
edi_document['type'] = invoice_type
# import company as a new partner
partner_id = self._edi_import_company(cr, uid, edi_document, context=context)
# Set Account
invoice_account = self._edi_invoice_account(cr, uid, partner_id, invoice_type, context=context)
edi_document['account_id'] = invoice_account and self.edi_m2o(cr, uid, invoice_account, context=context) or False
# reference: should contain the value of the 'internal_number'
edi_document['reference'] = edi_document.get('internal_number', False)
# reference_type: 'none'
edi_document['reference_type'] = 'none'
# internal number: reset to False, auto-generated
edi_document['internal_number'] = False
# discard web preview fields, if present
edi_document.pop('partner_ref', None)
# journal_id: should be selected based on type: simply put the 'type' in the context when calling create(), will be selected correctly
context = dict(context, type=invoice_type)
# for invoice lines, the account_id value should be taken from the product's default, i.e. from the default category, as it will not be provided.
for edi_invoice_line in edi_document['invoice_line']:
product_info = edi_invoice_line['product_id']
product_id = self.edi_import_relation(cr, uid, 'product.product', product_info[1],
product_info[0], context=context)
account = self._edi_product_account(cr, uid, product_id, invoice_type, context=context)
# TODO: could be improved with fiscal positions perhaps
# account = fpos_obj.map_account(cr, uid, fiscal_position_id, account.id)
edi_invoice_line['account_id'] = self.edi_m2o(cr, uid, account, context=context) if account else False
# discard web preview fields, if present
edi_invoice_line.pop('price_subtotal', None)
# for tax lines, we disconnect from the invoice.line, so all tax lines will be of type 'manual', and default accounts should be picked based
# on the tax config of the DB where it is imported.
tax_account = self._edi_tax_account(cr, uid, context=context)
tax_account_info = self.edi_m2o(cr, uid, tax_account, context=context)
for edi_tax_line in edi_document.get('tax_line', []):
edi_tax_line['account_id'] = tax_account_info
edi_tax_line['manual'] = True
return super(account_invoice,self).edi_import(cr, uid, edi_document, context=context)
def _edi_record_display_action(self, cr, uid, id, context=None):
"""Returns an appropriate action definition dict for displaying
the record with ID ``rec_id``.
:param int id: database ID of record to display
:return: action definition dict
"""
action = super(account_invoice,self)._edi_record_display_action(cr, uid, id, context=context)
try:
invoice = self.browse(cr, uid, id, context=context)
if 'out_' in invoice.type:
view_ext_id = 'invoice_form'
journal_type = 'sale'
else:
view_ext_id = 'invoice_supplier_form'
journal_type = 'purchase'
ctx = "{'type': '%s', 'journal_type': '%s'}" % (invoice.type, journal_type)
action.update(context=ctx)
view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', view_ext_id)[1]
action.update(views=[(view_id,'form'), (False, 'tree')])
except ValueError:
# ignore if views are missing
pass
return action
def _edi_paypal_url(self, cr, uid, ids, field, arg, context=None):
res = dict.fromkeys(ids, False)
for inv in self.browse(cr, uid, ids, context=context):
if inv.type == 'out_invoice' and inv.company_id.paypal_account:
params = {
"cmd": "_xclick",
"business": inv.company_id.paypal_account,
"item_name": "%s Invoice %s" % (inv.company_id.name, inv.number or ''),
"invoice": inv.number,
"amount": inv.residual,
"currency_code": inv.currency_id.name,
"button_subtype": "services",
"no_note": "1",
"bn": "OpenERP_Invoice_PayNow_" + inv.currency_id.name,
}
res[inv.id] = "https://www.paypal.com/cgi-bin/webscr?" + url_encode(params)
return res
_columns = {
'paypal_url': fields.function(_edi_paypal_url, type='char', string='Paypal Url'),
}
class account_invoice_line(osv.osv, EDIMixin):
_inherit='account.invoice.line'
class account_invoice_tax(osv.osv, EDIMixin):
_inherit = "account.invoice.tax"
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
DolphinDream/sverchok | utils/curve/knotvector.py | 1 | 8165 | # This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
#
# Adopted from Geomdl library: https://raw.githubusercontent.com/orbingol/NURBS-Python/5.x/geomdl/knotvector.py
#
"""
.. module:: knotvector
:platform: Unix, Windows
:synopsis: Provides utility functions related to knot vector generation and validation
.. moduleauthor:: Onur Rauf Bingol <[email protected]>
"""
from collections import defaultdict
import numpy as np
def generate(degree, num_ctrlpts, clamped=True):
""" Generates an equally spaced knot vector.
It uses the following equality to generate knot vector: :math:`m = n + p + 1`
where;
* :math:`p`, degree
* :math:`n + 1`, number of control points
* :math:`m + 1`, number of knots
Keyword Arguments:
* ``clamped``: Flag to choose from clamped or unclamped knot vector options. *Default: True*
:param degree: degree
:type degree: int
:param num_ctrlpts: number of control points
:type num_ctrlpts: int
:return: knot vector
:rtype: np.array of shape (m+1,)
"""
if degree == 0 or num_ctrlpts == 0:
raise ValueError("Input values should be different than zero.")
# Number of repetitions at the start and end of the array
num_repeat = degree
# Number of knots in the middle
num_segments = num_ctrlpts - (degree + 1)
if not clamped:
# No repetitions at the start and end
num_repeat = 0
# Should conform the rule: m = n + p + 1
num_segments = degree + num_ctrlpts - 1
# First knots
knot_vector = [0.0 for _ in range(0, num_repeat)]
# Middle knots
knot_vector += list(np.linspace(0.0, 1.0, num_segments + 2))
# Last knots
knot_vector += [1.0 for _ in range(0, num_repeat)]
# Return auto-generated knot vector
return np.array(knot_vector)
def from_tknots(degree, tknots):
n = len(tknots)
#m = degree + n + 1
result = [0] * (degree+1)
for j in range(1, n - degree):
u = tknots[j:j+degree].sum() / degree
result.append(u)
result.extend([1.0] * (degree+1))
return np.array(result)
def normalize(knot_vector):
""" Normalizes the input knot vector to [0, 1] domain.
:param knot_vector: knot vector to be normalized
:type knot_vector: np.array of shape (X,)
:return: normalized knot vector
:rtype: np.array
"""
if not isinstance(knot_vector, np.ndarray):
knot_vector = np.array(knot_vector)
m = knot_vector.min()
M = knot_vector.max()
if m >= M:
raise Exception("All knot values are equal")
return (knot_vector - m) / (M - m)
def concatenate_plain(kv1, kv2):
M = kv1.max()
return np.concatenate((kv1, kv2 + M))
def average(knotvectors):
kvs = np.array(knotvectors)
return kvs.mean(axis=0)
def to_multiplicity(knot_vector, tolerance=1e-6):
count = 0
prev_u = None
result = []
for u in knot_vector:
if prev_u is None:
last_match = False
else:
last_match = abs(u - prev_u) < tolerance
#print(f"Match: {u} - {prev_u} = {abs(u - prev_u)}, => {last_match}")
if prev_u is None:
count = 1
elif last_match:
count += 1
else:
result.append((prev_u, count))
count = 1
prev_u = u
if last_match:
result.append((u, count))
else:
result.append((u, 1))
return result
def from_multiplicity(pairs):
result = []
for u, count in pairs:
result.extend([u] * count)
return np.array(result)
def is_clamped(knot_vector, degree, check_start=True, check_end=True, tolerance=1e-6):
pairs = to_multiplicity(knot_vector, tolerance)
m1 = pairs[0][1]
m2 = pairs[-1][1]
start_ok = not check_start or m1 == degree+1
end_ok = not check_end or m2 == degree+1
return start_ok and end_ok
def concatenate(kv1, kv2, join_multiplicity):
join_knot = kv1.max()
kv2 = kv2 - kv2.min() + join_knot
kv1_m = to_multiplicity(kv1)
kv2_m = to_multiplicity(kv2)
kv_m = dict(kv1_m[:-1] + kv2_m)
kv_m[join_knot] = join_multiplicity
kv_m = [(k, kv_m[k]) for k in sorted(kv_m.keys())]
return from_multiplicity(kv_m)
def elevate_degree_pairs(pairs, delta=1):
return [(u, count+delta) for u, count in pairs]
def elevate_degree(knot_vector, delta=1):
pairs = to_multiplicity(knot_vector)
return from_multiplicity(elevate_degree_pairs(pairs, delta))
def insert(knot_vector, u, count=1):
idx = np.searchsorted(knot_vector, u)
result = knot_vector
for i in range(count):
result = np.insert(result, idx, u)
return result
def rescale(knot_vector, new_t_min, new_t_max):
t_min = knot_vector[0]
t_max = knot_vector[-1]
k = (new_t_max - new_t_min) / (t_max - t_min)
return k * (knot_vector - t_min) + new_t_min
def reverse(knot_vector):
t_max = knot_vector[-1]
t_min = knot_vector[0]
kv = t_max - knot_vector + t_min
return kv[::-1]
def find_multiplicity(knot_vector, u, tolerance=1e-6):
pairs = to_multiplicity(knot_vector, tolerance)
#print(f"kv {knot_vector} => {pairs}")
for k, count in pairs:
if abs(k - u) < tolerance:
return count
return 0
def get_internal_knots(knot_vector, output_multiplicity = False, tolerance=1e-6):
pairs = to_multiplicity(knot_vector)
internal = pairs[1:-1]
if output_multiplicity:
return internal
else:
return [u for u,_ in internal]
def get_min_continuity(knotvector, degree):
ms = to_multiplicity(knotvector)[1:-1]
if not ms:
return degree
multiplicities = [p[1] for p in ms]
max_mult = max(multiplicities)
return degree - max_mult
def difference(src_kv, dst_kv, tolerance=1e-6):
src_pairs = dict(to_multiplicity(src_kv, tolerance))
dst_pairs = to_multiplicity(dst_kv, tolerance)
result = []
for dst_u, dst_multiplicity in dst_pairs:
src_multiplicity = src_pairs.get(dst_u, 0)
diff = dst_multiplicity - src_multiplicity
if diff > 0:
result.append((dst_u, diff))
return result
def equal(kv1, kv2):
if len(kv1) != len(kv2):
return False
return (kv1 == kv2).all()
def merge(kv1, kv2):
kv2 = rescale(kv2, kv1[0], kv1[-1])
kv1_pairs = to_multiplicity(kv1)
kv2_pairs = to_multiplicity(kv2)
pairs = defaultdict(int)
for u, multiplicity in kv1_pairs:
pairs[u] = multiplicity
for u, multiplicity in kv2_pairs:
pairs[u] = max(pairs[u], multiplicity)
result = [(u, pairs[u]) for u in sorted(pairs.keys())]
return from_multiplicity(result)
def check(degree, knot_vector, num_ctrlpts):
""" Checks the validity of the input knot vector.
Please refer to The NURBS Book (2nd Edition), p.50 for details.
:param degree: degree of the curve or the surface
:type degree: int
:param knot_vector: knot vector to be checked
:type knot_vector: np.array of shape (X,)
:param num_ctrlpts: number of control points
:type num_ctrlpts: int
:return: String with error description, if the knotvector is invalid;
None, if the knotvector is valid.
"""
if not isinstance(knot_vector, (list, tuple, np.ndarray)):
raise TypeError("Knot vector must be a list, tuple, or numpy array")
if knot_vector is None or len(knot_vector) == 0:
raise ValueError("Input knot vector cannot be empty")
# Check the formula; m = p + n + 1
m = len(knot_vector)
rhs = degree + num_ctrlpts + 1
if m != rhs:
return f"Knot vector has invalid length {m}; for degree {degree} and {num_ctrlpts} control points it must have {rhs} items"
# Check ascending order
prev_knot = knot_vector[0]
for knot in knot_vector:
if prev_knot > knot:
return "Knot vector items are not all non-decreasing"
prev_knot = knot
return None
| gpl-3.0 |
jmp0xf/raven-python | raven/transport/base.py | 19 | 1335 | """
raven.transport.base
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
class Transport(object):
"""
All transport implementations need to subclass this class
You must implement a send method (or an async_send method if
sub-classing AsyncTransport) and the compute_scope method.
Please see the HTTPTransport class for an example of a
compute_scope implementation.
"""
async = False
scheme = []
def send(self, data, headers):
"""
You need to override this to do something with the actual
data. Usually - this is sending to a server
"""
raise NotImplementedError
class AsyncTransport(Transport):
"""
All asynchronous transport implementations should subclass this
class.
You must implement a async_send method (and the compute_scope
method as describe on the base Transport class).
"""
async = True
def async_send(self, data, headers, success_cb, error_cb):
"""
Override this method for asynchronous transports. Call
`success_cb()` if the send succeeds or `error_cb(exception)`
if the send fails.
"""
raise NotImplementedError
| bsd-3-clause |
cwisecarver/osf.io | api/users/urls.py | 12 | 1402 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.UserList.as_view(), name=views.UserList.view_name),
url(r'^(?P<user_id>\w+)/$', views.UserDetail.as_view(), name=views.UserDetail.view_name),
url(r'^(?P<user_id>\w+)/addons/$', views.UserAddonList.as_view(), name=views.UserAddonList.view_name),
url(r'^(?P<user_id>\w+)/addons/(?P<provider>\w+)/$', views.UserAddonDetail.as_view(), name=views.UserAddonDetail.view_name),
url(r'^(?P<user_id>\w+)/addons/(?P<provider>\w+)/accounts/$', views.UserAddonAccountList.as_view(), name=views.UserAddonAccountList.view_name),
url(r'^(?P<user_id>\w+)/addons/(?P<provider>\w+)/accounts/(?P<account_id>\w+)/$', views.UserAddonAccountDetail.as_view(), name=views.UserAddonAccountDetail.view_name),
url(r'^(?P<user_id>\w+)/institutions/$', views.UserInstitutions.as_view(), name=views.UserInstitutions.view_name),
url(r'^(?P<user_id>\w+)/nodes/$', views.UserNodes.as_view(), name=views.UserNodes.view_name),
url(r'^(?P<user_id>\w+)/preprints/$', views.UserPreprints.as_view(), name=views.UserPreprints.view_name),
url(r'^(?P<user_id>\w+)/registrations/$', views.UserRegistrations.as_view(), name=views.UserRegistrations.view_name),
url(r'^(?P<user_id>\w+)/relationships/institutions/$', views.UserInstitutionsRelationship.as_view(), name=views.UserInstitutionsRelationship.view_name),
]
| apache-2.0 |
Mellthas/quodlibet | quodlibet/quodlibet/qltk/menubutton.py | 2 | 1425 | # Copyright 2011, 2014 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import Gtk
from . import add_css
class MenuButton(Gtk.MenuButton):
"""TODO: remove. This used to be an implementation of Gtk.MenuButton
when it wasn't available in gtk+
"""
def __init__(self, widget=None, arrow=False, down=True):
super(MenuButton, self).__init__()
bbox = Gtk.HBox(spacing=3)
if widget:
bbox.pack_start(widget, True, True, 0)
if arrow:
arrow_type = Gtk.ArrowType.DOWN if down else Gtk.ArrowType.UP
bbox.pack_start(
Gtk.Arrow.new(arrow_type, Gtk.ShadowType.IN),
True, True, 0)
self.add(bbox)
self.set_direction(Gtk.ArrowType.DOWN if down else Gtk.ArrowType.UP)
def get_menu(self):
return self.get_popup()
def set_menu(self, menu):
self.set_popup(menu)
class SmallMenuButton(MenuButton):
def __init__(self, *args, **kwargs):
super(SmallMenuButton, self).__init__(*args, **kwargs)
self.set_size_request(26, 26)
add_css(self, """
* {
padding: 0px 4px;
}
""")
| gpl-2.0 |
whn09/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/wishart_test.py | 38 | 14437 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Wishart."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import linalg
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
distributions = distributions_lib
def make_pd(start, n):
"""Deterministically create a positive definite matrix."""
x = np.tril(linalg.circulant(np.arange(start, start + n)))
return np.dot(x, x.T)
def chol(x):
"""Compute Cholesky factorization."""
return linalg.cholesky(x).T
def wishart_var(df, x):
"""Compute Wishart variance for numpy scale matrix."""
x = np.sqrt(df) * np.asarray(x)
d = np.expand_dims(np.diag(x), -1)
return x**2 + np.dot(d, d.T)
class WishartCholeskyTest(test.TestCase):
def testEntropy(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
# sp.stats.wishart(df=4, scale=make_pd(1., 2)).entropy()
self.assertAllClose(6.301387092430769, w.entropy().eval())
w = distributions.WishartCholesky(df=1, scale=[[1.]])
# sp.stats.wishart(df=1,scale=1).entropy()
self.assertAllClose(0.78375711047393404, w.entropy().eval())
def testMeanLogDetAndLogNormalizingConstant(self):
with self.test_session():
def entropy_alt(w):
return (
w.log_normalization()
- 0.5 * (w.df - w.dimension - 1.) * w.mean_log_det()
+ 0.5 * w.df * w.dimension).eval()
w = distributions.WishartCholesky(df=4,
scale=chol(make_pd(1., 2)))
self.assertAllClose(w.entropy().eval(), entropy_alt(w))
w = distributions.WishartCholesky(df=5, scale=[[1.]])
self.assertAllClose(w.entropy().eval(), entropy_alt(w))
def testMean(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual(df * scale, w.mean().eval())
def testMode(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual((df - 2. - 1.) * scale, w.mode().eval())
def testStd(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual(chol(wishart_var(df, scale)), w.stddev().eval())
def testVariance(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual(wishart_var(df, scale), w.variance().eval())
def testSample(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
chol_w = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=False)
x = chol_w.sample(1, seed=42).eval()
chol_x = [chol(x[0])]
full_w = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=False)
self.assertAllClose(x, full_w.sample(1, seed=42).eval())
chol_w_chol = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, chol_w_chol.sample(1, seed=42).eval())
eigen_values = array_ops.matrix_diag_part(
chol_w_chol.sample(
1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
full_w_chol = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, full_w_chol.sample(1, seed=42).eval())
eigen_values = array_ops.matrix_diag_part(
full_w_chol.sample(
1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
# Check first and second moments.
df = 4.
chol_w = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False)
x = chol_w.sample(10000, seed=42)
self.assertAllEqual((10000, 3, 3), x.get_shape())
moment1_estimate = math_ops.reduce_mean(x, reduction_indices=[0]).eval()
self.assertAllClose(chol_w.mean().eval(), moment1_estimate, rtol=0.05)
# The Variance estimate uses the squares rather than outer-products
# because Wishart.Variance is the diagonal of the Wishart covariance
# matrix.
variance_estimate = (math_ops.reduce_mean(
math_ops.square(x), reduction_indices=[0]) -
math_ops.square(moment1_estimate)).eval()
self.assertAllClose(
chol_w.variance().eval(), variance_estimate, rtol=0.05)
# Test that sampling with the same seed twice gives the same results.
def testSampleMultipleTimes(self):
with self.test_session():
df = 4.
n_val = 100
random_seed.set_random_seed(654321)
chol_w1 = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False,
name="wishart1")
samples1 = chol_w1.sample(n_val, seed=123456).eval()
random_seed.set_random_seed(654321)
chol_w2 = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False,
name="wishart2")
samples2 = chol_w2.sample(n_val, seed=123456).eval()
self.assertAllClose(samples1, samples2)
def testProb(self):
with self.test_session():
# Generate some positive definite (pd) matrices and their Cholesky
# factorizations.
x = np.array(
[make_pd(1., 2), make_pd(2., 2), make_pd(3., 2), make_pd(4., 2)])
chol_x = np.array([chol(x[0]), chol(x[1]), chol(x[2]), chol(x[3])])
# Since Wishart wasn"t added to SciPy until 0.16, we'll spot check some
# pdfs with hard-coded results from upstream SciPy.
log_prob_df_seq = np.array([
# math.log(stats.wishart.pdf(x[0], df=2+0, scale=x[0]))
-3.5310242469692907,
# math.log(stats.wishart.pdf(x[1], df=2+1, scale=x[1]))
-7.689907330328961,
# math.log(stats.wishart.pdf(x[2], df=2+2, scale=x[2]))
-10.815845159537895,
# math.log(stats.wishart.pdf(x[3], df=2+3, scale=x[3]))
-13.640549882916691,
])
# This test checks that batches don't interfere with correctness.
w = distributions.WishartCholesky(
df=[2, 3, 4, 5],
scale=chol_x,
cholesky_input_output_matrices=True)
self.assertAllClose(log_prob_df_seq, w.log_prob(chol_x).eval())
# Now we test various constructions of Wishart with different sample
# shape.
log_prob = np.array([
# math.log(stats.wishart.pdf(x[0], df=4, scale=x[0]))
-4.224171427529236,
# math.log(stats.wishart.pdf(x[1], df=4, scale=x[0]))
-6.3378770664093453,
# math.log(stats.wishart.pdf(x[2], df=4, scale=x[0]))
-12.026946850193017,
# math.log(stats.wishart.pdf(x[3], df=4, scale=x[0]))
-20.951582705289454,
])
for w in (
distributions.WishartCholesky(
df=4,
scale=chol_x[0],
cholesky_input_output_matrices=False),
distributions.WishartFull(
df=4,
scale=x[0],
cholesky_input_output_matrices=False)):
self.assertAllEqual((2, 2), w.event_shape_tensor().eval())
self.assertEqual(2, w.dimension.eval())
self.assertAllClose(log_prob[0], w.log_prob(x[0]).eval())
self.assertAllClose(log_prob[0:2], w.log_prob(x[0:2]).eval())
self.assertAllClose(
np.reshape(log_prob, (2, 2)),
w.log_prob(np.reshape(x, (2, 2, 2, 2))).eval())
self.assertAllClose(
np.reshape(np.exp(log_prob), (2, 2)),
w.prob(np.reshape(x, (2, 2, 2, 2))).eval())
self.assertAllEqual((2, 2),
w.log_prob(np.reshape(x, (2, 2, 2, 2))).get_shape())
for w in (
distributions.WishartCholesky(
df=4,
scale=chol_x[0],
cholesky_input_output_matrices=True),
distributions.WishartFull(
df=4,
scale=x[0],
cholesky_input_output_matrices=True)):
self.assertAllEqual((2, 2), w.event_shape_tensor().eval())
self.assertEqual(2, w.dimension.eval())
self.assertAllClose(log_prob[0], w.log_prob(chol_x[0]).eval())
self.assertAllClose(log_prob[0:2], w.log_prob(chol_x[0:2]).eval())
self.assertAllClose(
np.reshape(log_prob, (2, 2)),
w.log_prob(np.reshape(chol_x, (2, 2, 2, 2))).eval())
self.assertAllClose(
np.reshape(np.exp(log_prob), (2, 2)),
w.prob(np.reshape(chol_x, (2, 2, 2, 2))).eval())
self.assertAllEqual((2, 2),
w.log_prob(np.reshape(x, (2, 2, 2, 2))).get_shape())
def testBatchShape(self):
with self.test_session() as sess:
scale = make_pd(1., 2)
chol_scale = chol(scale)
w = distributions.WishartCholesky(df=4, scale=chol_scale)
self.assertAllEqual([], w.batch_shape)
self.assertAllEqual([], w.batch_shape_tensor().eval())
w = distributions.WishartCholesky(
df=[4., 4], scale=np.array([chol_scale, chol_scale]))
self.assertAllEqual([2], w.batch_shape)
self.assertAllEqual([2], w.batch_shape_tensor().eval())
scale_deferred = array_ops.placeholder(dtypes.float32)
w = distributions.WishartCholesky(df=4, scale=scale_deferred)
self.assertAllEqual(
[], sess.run(w.batch_shape_tensor(),
feed_dict={scale_deferred: chol_scale}))
self.assertAllEqual(
[2],
sess.run(w.batch_shape_tensor(),
feed_dict={scale_deferred: [chol_scale, chol_scale]}))
def testEventShape(self):
with self.test_session() as sess:
scale = make_pd(1., 2)
chol_scale = chol(scale)
w = distributions.WishartCholesky(df=4, scale=chol_scale)
self.assertAllEqual([2, 2], w.event_shape)
self.assertAllEqual([2, 2], w.event_shape_tensor().eval())
w = distributions.WishartCholesky(
df=[4., 4], scale=np.array([chol_scale, chol_scale]))
self.assertAllEqual([2, 2], w.event_shape)
self.assertAllEqual([2, 2], w.event_shape_tensor().eval())
scale_deferred = array_ops.placeholder(dtypes.float32)
w = distributions.WishartCholesky(df=4, scale=scale_deferred)
self.assertAllEqual(
[2, 2],
sess.run(w.event_shape_tensor(),
feed_dict={scale_deferred: chol_scale}))
self.assertAllEqual(
[2, 2],
sess.run(w.event_shape_tensor(),
feed_dict={scale_deferred: [chol_scale, chol_scale]}))
def testValidateArgs(self):
with self.test_session() as sess:
df_deferred = array_ops.placeholder(dtypes.float32)
chol_scale_deferred = array_ops.placeholder(dtypes.float32)
x = make_pd(1., 3)
chol_scale = chol(x)
# Check expensive, deferred assertions.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"cannot be less than"):
chol_w = distributions.WishartCholesky(
df=df_deferred,
scale=chol_scale_deferred,
validate_args=True)
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={df_deferred: 2.,
chol_scale_deferred: chol_scale})
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Cholesky decomposition was not successful"):
chol_w = distributions.WishartFull(
df=df_deferred, scale=chol_scale_deferred)
# np.ones((3, 3)) is not positive, definite.
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={
df_deferred: 4.,
chol_scale_deferred: np.ones(
(3, 3), dtype=np.float32)
})
# Ensure no assertions.
chol_w = distributions.WishartCholesky(
df=df_deferred,
scale=chol_scale_deferred,
validate_args=False)
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={df_deferred: 4,
chol_scale_deferred: chol_scale})
# Bogus log_prob, but since we have no checks running... c"est la vie.
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={df_deferred: 4,
chol_scale_deferred: np.ones((3, 3))})
# Still has these assertions because they're resolveable at graph
# construction
with self.assertRaisesRegexp(ValueError, "cannot be less than"):
chol_w = distributions.WishartCholesky(
df=2, scale=chol_scale, validate_args=False)
with self.assertRaisesRegexp(TypeError, "not a floating-point type"):
chol_w = distributions.WishartCholesky(
df=4.,
scale=np.asarray(
chol_scale, dtype=np.int32),
validate_args=False)
if __name__ == "__main__":
test.main()
| apache-2.0 |
ADM91/PowerSystem-RL | system/line_connection_cases/within_energized.py | 1 | 3779 | import numpy as np
from system.take_snapshot import take_snapshot
from copy import deepcopy
def within_energized(ps, island_1, bus_ids, spad_lim):
# Take preliminary snapshot of the system
state_list, island_list = take_snapshot(ps, 'Preliminary state', [], [])
# Set opf constraint to SPA diff
# Make sure branch in question is on island branch matrix (isn't if each bus is added via blackout connection)
branch_ind = np.all(ps.islands[ps.island_map[island_1]]['branch'][:, 0:2] == bus_ids, axis=1)
if not np.any(branch_ind):
# Add branch to the island
branch_ind = np.all(ps.islands['blackout']['branch'][:, 0:2] == bus_ids, axis=1)
line_data = ps.islands['blackout']['branch'][branch_ind, :]
line_data[:, 10] = 1 # Enable the line
ps.islands[ps.island_map[island_1]]['branch'] = np.append(
ps.islands[ps.island_map[island_1]]['branch'],
np.concatenate((line_data, np.zeros((len(line_data), 4))), axis=1),
axis=0)
# Remove branch from blackout
ps.islands['blackout']['branch'] = np.delete(ps.islands['blackout']['branch'], np.where(branch_ind), axis=0)
# Sort branches and Re-identify branch-ind
b1 = ps.islands[ps.island_map[island_1]]['branch'][:, 0]
b2 = ps.islands[ps.island_map[island_1]]['branch'][:, 1]
line_order = np.lexsort((b2, b1)) # First sort by bus1 then by bus2
ps.islands[ps.island_map[island_1]]['branch'] = ps.islands[ps.island_map[island_1]]['branch'][line_order, :]
branch_ind = np.all(ps.islands[ps.island_map[island_1]]['branch'][:, 0:2] == bus_ids, axis=1)
# print('branches: %s' % ps.islands[ps.island_map[island_1]]['branch'][:, 0:2])
# print('selected branch: %s' % bus_ids)
# print('branch_ind: %s' % branch_ind)
# print('bus ids: %s' % bus_ids)
# print(ps.islands['blackout']['bus'])
# Set opf constraints
ps.islands[ps.island_map[island_1]] = ps.set_opf_constraints(test_case=ps.islands[ps.island_map[island_1]],
set_branch=branch_ind,
max_spa=spad_lim,
set_gen=False,
set_loads=False)
# Run opf on the islands
success = ps.evaluate_islands() # Matpower needs to be altered for this to work -- Think I got it
if success == 0:
return [], []
# Take snapshot
title = 'Rescheduling for connection of branch %s - %s' % (int(bus_ids[0]), int(bus_ids[1]))
state_list, island_list = take_snapshot(ps, title, state_list, island_list)
# Close the line and restore the SPA diff constraint
ps.islands[ps.island_map[island_1]]['branch'][branch_ind, 10] = 1
ps.islands[ps.island_map[island_1]] = ps.set_opf_constraints(test_case=ps.islands[ps.island_map[island_1]],
set_branch=branch_ind,
max_spa=360,
set_gen=False,
set_loads=False)
# Run opf to get final steady state
success = ps.evaluate_islands()
if success == 0:
return [], []
# Take final snapshot
title = 'Solving state after line connection'
state_list, island_list = take_snapshot(ps, title, state_list, island_list)
# Ensure that current state variable has the most recent information
# ps.current_state = deepcopy(state_list[-1])
return state_list, island_list
| gpl-3.0 |
spinellic/Mission-Planner | Lib/getpass.py | 62 | 5742 | """Utilities to get a password and/or the current user name.
getpass(prompt[, stream]) - Prompt for a password, with echo turned off.
getuser() - Get the user name from the environment or password database.
GetPassWarning - This UserWarning is issued when getpass() cannot prevent
echoing of the password contents while reading.
On Windows, the msvcrt module will be used.
On the Mac EasyDialogs.AskPassword is used, if available.
"""
# Authors: Piers Lauder (original)
# Guido van Rossum (Windows support and cleanup)
# Gregory P. Smith (tty support & GetPassWarning)
import os, sys, warnings
__all__ = ["getpass","getuser","GetPassWarning"]
class GetPassWarning(UserWarning): pass
def unix_getpass(prompt='Password: ', stream=None):
"""Prompt for a password, with echo turned off.
Args:
prompt: Written on stream to ask for the input. Default: 'Password: '
stream: A writable file object to display the prompt. Defaults to
the tty. If no tty is available defaults to sys.stderr.
Returns:
The seKr3t input.
Raises:
EOFError: If our input tty or stdin was closed.
GetPassWarning: When we were unable to turn echo off on the input.
Always restores terminal settings before returning.
"""
fd = None
tty = None
try:
# Always try reading and writing directly on the tty first.
fd = os.open('/dev/tty', os.O_RDWR|os.O_NOCTTY)
tty = os.fdopen(fd, 'w+', 1)
input = tty
if not stream:
stream = tty
except EnvironmentError, e:
# If that fails, see if stdin can be controlled.
try:
fd = sys.stdin.fileno()
except (AttributeError, ValueError):
passwd = fallback_getpass(prompt, stream)
input = sys.stdin
if not stream:
stream = sys.stderr
if fd is not None:
passwd = None
try:
old = termios.tcgetattr(fd) # a copy to save
new = old[:]
new[3] &= ~termios.ECHO # 3 == 'lflags'
tcsetattr_flags = termios.TCSAFLUSH
if hasattr(termios, 'TCSASOFT'):
tcsetattr_flags |= termios.TCSASOFT
try:
termios.tcsetattr(fd, tcsetattr_flags, new)
passwd = _raw_input(prompt, stream, input=input)
finally:
termios.tcsetattr(fd, tcsetattr_flags, old)
stream.flush() # issue7208
except termios.error, e:
if passwd is not None:
# _raw_input succeeded. The final tcsetattr failed. Reraise
# instead of leaving the terminal in an unknown state.
raise
# We can't control the tty or stdin. Give up and use normal IO.
# fallback_getpass() raises an appropriate warning.
del input, tty # clean up unused file objects before blocking
passwd = fallback_getpass(prompt, stream)
stream.write('\n')
return passwd
def win_getpass(prompt='Password: ', stream=None):
"""Prompt for password with echo off, using Windows getch()."""
if sys.stdin is not sys.__stdin__:
return fallback_getpass(prompt, stream)
import msvcrt
for c in prompt:
msvcrt.putch(c)
pw = ""
while 1:
c = msvcrt.getch()
if c == '\r' or c == '\n':
break
if c == '\003':
raise KeyboardInterrupt
if c == '\b':
pw = pw[:-1]
else:
pw = pw + c
msvcrt.putch('\r')
msvcrt.putch('\n')
return pw
def fallback_getpass(prompt='Password: ', stream=None):
warnings.warn("Can not control echo on the terminal.", GetPassWarning,
stacklevel=2)
if not stream:
stream = sys.stderr
print >>stream, "Warning: Password input may be echoed."
return _raw_input(prompt, stream)
def _raw_input(prompt="", stream=None, input=None):
# A raw_input() replacement that doesn't save the string in the
# GNU readline history.
if not stream:
stream = sys.stderr
if not input:
input = sys.stdin
prompt = str(prompt)
if prompt:
stream.write(prompt)
stream.flush()
# NOTE: The Python C API calls flockfile() (and unlock) during readline.
line = input.readline()
if not line:
raise EOFError
if line[-1] == '\n':
line = line[:-1]
return line
def getuser():
"""Get the username from the environment or password database.
First try various environment variables, then the password
database. This works on Windows as long as USERNAME is set.
"""
import os
for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
user = os.environ.get(name)
if user:
return user
# If this fails, the exception will "explain" why
import pwd
return pwd.getpwuid(os.getuid())[0]
# Bind the name getpass to the appropriate function
try:
import termios
# it's possible there is an incompatible termios from the
# McMillan Installer, make sure we have a UNIX-compatible termios
termios.tcgetattr, termios.tcsetattr
except (ImportError, AttributeError):
try:
import msvcrt
except ImportError:
try:
from EasyDialogs import AskPassword
except ImportError:
getpass = fallback_getpass
else:
getpass = AskPassword
else:
getpass = win_getpass
else:
getpass = unix_getpass
| gpl-3.0 |
s3nk4s/flaskTutorials | FlaskApp/FlaskApp/venv/local/lib/python2.7/sre_compile.py | 156 | 16427 | #
# Secret Labs' Regular Expression Engine
#
# convert template to internal format
#
# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
import _sre, sys
import sre_parse
from sre_constants import *
assert _sre.MAGIC == MAGIC, "SRE module mismatch"
if _sre.CODESIZE == 2:
MAXCODE = 65535
else:
MAXCODE = 0xFFFFFFFFL
def _identityfunction(x):
return x
_LITERAL_CODES = set([LITERAL, NOT_LITERAL])
_REPEATING_CODES = set([REPEAT, MIN_REPEAT, MAX_REPEAT])
_SUCCESS_CODES = set([SUCCESS, FAILURE])
_ASSERT_CODES = set([ASSERT, ASSERT_NOT])
def _compile(code, pattern, flags):
# internal: compile a (sub)pattern
emit = code.append
_len = len
LITERAL_CODES = _LITERAL_CODES
REPEATING_CODES = _REPEATING_CODES
SUCCESS_CODES = _SUCCESS_CODES
ASSERT_CODES = _ASSERT_CODES
for op, av in pattern:
if op in LITERAL_CODES:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
emit(_sre.getlower(av, flags))
else:
emit(OPCODES[op])
emit(av)
elif op is IN:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
def fixup(literal, flags=flags):
return _sre.getlower(literal, flags)
else:
emit(OPCODES[op])
fixup = _identityfunction
skip = _len(code); emit(0)
_compile_charset(av, flags, code, fixup)
code[skip] = _len(code) - skip
elif op is ANY:
if flags & SRE_FLAG_DOTALL:
emit(OPCODES[ANY_ALL])
else:
emit(OPCODES[ANY])
elif op in REPEATING_CODES:
if flags & SRE_FLAG_TEMPLATE:
raise error, "internal: unsupported template operator"
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif _simple(av) and op is not REPEAT:
if op is MAX_REPEAT:
emit(OPCODES[REPEAT_ONE])
else:
emit(OPCODES[MIN_REPEAT_ONE])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
else:
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
code[skip] = _len(code) - skip
if op is MAX_REPEAT:
emit(OPCODES[MAX_UNTIL])
else:
emit(OPCODES[MIN_UNTIL])
elif op is SUBPATTERN:
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2)
# _compile_info(code, av[1], flags)
_compile(code, av[1], flags)
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2+1)
elif op in SUCCESS_CODES:
emit(OPCODES[op])
elif op in ASSERT_CODES:
emit(OPCODES[op])
skip = _len(code); emit(0)
if av[0] >= 0:
emit(0) # look ahead
else:
lo, hi = av[1].getwidth()
if lo != hi:
raise error, "look-behind requires fixed-width pattern"
emit(lo) # look behind
_compile(code, av[1], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is CALL:
emit(OPCODES[op])
skip = _len(code); emit(0)
_compile(code, av, flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is AT:
emit(OPCODES[op])
if flags & SRE_FLAG_MULTILINE:
av = AT_MULTILINE.get(av, av)
if flags & SRE_FLAG_LOCALE:
av = AT_LOCALE.get(av, av)
elif flags & SRE_FLAG_UNICODE:
av = AT_UNICODE.get(av, av)
emit(ATCODES[av])
elif op is BRANCH:
emit(OPCODES[op])
tail = []
tailappend = tail.append
for av in av[1]:
skip = _len(code); emit(0)
# _compile_info(code, av, flags)
_compile(code, av, flags)
emit(OPCODES[JUMP])
tailappend(_len(code)); emit(0)
code[skip] = _len(code) - skip
emit(0) # end of branch
for tail in tail:
code[tail] = _len(code) - tail
elif op is CATEGORY:
emit(OPCODES[op])
if flags & SRE_FLAG_LOCALE:
av = CH_LOCALE[av]
elif flags & SRE_FLAG_UNICODE:
av = CH_UNICODE[av]
emit(CHCODES[av])
elif op is GROUPREF:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
else:
emit(OPCODES[op])
emit(av-1)
elif op is GROUPREF_EXISTS:
emit(OPCODES[op])
emit(av[0]-1)
skipyes = _len(code); emit(0)
_compile(code, av[1], flags)
if av[2]:
emit(OPCODES[JUMP])
skipno = _len(code); emit(0)
code[skipyes] = _len(code) - skipyes + 1
_compile(code, av[2], flags)
code[skipno] = _len(code) - skipno
else:
code[skipyes] = _len(code) - skipyes + 1
else:
raise ValueError, ("unsupported operand type", op)
def _compile_charset(charset, flags, code, fixup=None):
# compile charset subprogram
emit = code.append
if fixup is None:
fixup = _identityfunction
for op, av in _optimize_charset(charset, fixup):
emit(OPCODES[op])
if op is NEGATE:
pass
elif op is LITERAL:
emit(fixup(av))
elif op is RANGE:
emit(fixup(av[0]))
emit(fixup(av[1]))
elif op is CHARSET:
code.extend(av)
elif op is BIGCHARSET:
code.extend(av)
elif op is CATEGORY:
if flags & SRE_FLAG_LOCALE:
emit(CHCODES[CH_LOCALE[av]])
elif flags & SRE_FLAG_UNICODE:
emit(CHCODES[CH_UNICODE[av]])
else:
emit(CHCODES[av])
else:
raise error, "internal: unsupported set operator"
emit(OPCODES[FAILURE])
def _optimize_charset(charset, fixup):
# internal: optimize character set
out = []
outappend = out.append
charmap = [0]*256
try:
for op, av in charset:
if op is NEGATE:
outappend((op, av))
elif op is LITERAL:
charmap[fixup(av)] = 1
elif op is RANGE:
for i in range(fixup(av[0]), fixup(av[1])+1):
charmap[i] = 1
elif op is CATEGORY:
# XXX: could append to charmap tail
return charset # cannot compress
except IndexError:
# character set contains unicode characters
return _optimize_unicode(charset, fixup)
# compress character map
i = p = n = 0
runs = []
runsappend = runs.append
for c in charmap:
if c:
if n == 0:
p = i
n = n + 1
elif n:
runsappend((p, n))
n = 0
i = i + 1
if n:
runsappend((p, n))
if len(runs) <= 2:
# use literal/range
for p, n in runs:
if n == 1:
outappend((LITERAL, p))
else:
outappend((RANGE, (p, p+n-1)))
if len(out) < len(charset):
return out
else:
# use bitmap
data = _mk_bitmap(charmap)
outappend((CHARSET, data))
return out
return charset
def _mk_bitmap(bits):
data = []
dataappend = data.append
if _sre.CODESIZE == 2:
start = (1, 0)
else:
start = (1L, 0L)
m, v = start
for c in bits:
if c:
v = v + m
m = m + m
if m > MAXCODE:
dataappend(v)
m, v = start
return data
# To represent a big charset, first a bitmap of all characters in the
# set is constructed. Then, this bitmap is sliced into chunks of 256
# characters, duplicate chunks are eliminated, and each chunk is
# given a number. In the compiled expression, the charset is
# represented by a 16-bit word sequence, consisting of one word for
# the number of different chunks, a sequence of 256 bytes (128 words)
# of chunk numbers indexed by their original chunk position, and a
# sequence of chunks (16 words each).
# Compression is normally good: in a typical charset, large ranges of
# Unicode will be either completely excluded (e.g. if only cyrillic
# letters are to be matched), or completely included (e.g. if large
# subranges of Kanji match). These ranges will be represented by
# chunks of all one-bits or all zero-bits.
# Matching can be also done efficiently: the more significant byte of
# the Unicode character is an index into the chunk number, and the
# less significant byte is a bit index in the chunk (just like the
# CHARSET matching).
# In UCS-4 mode, the BIGCHARSET opcode still supports only subsets
# of the basic multilingual plane; an efficient representation
# for all of UTF-16 has not yet been developed. This means,
# in particular, that negated charsets cannot be represented as
# bigcharsets.
def _optimize_unicode(charset, fixup):
try:
import array
except ImportError:
return charset
charmap = [0]*65536
negate = 0
try:
for op, av in charset:
if op is NEGATE:
negate = 1
elif op is LITERAL:
charmap[fixup(av)] = 1
elif op is RANGE:
for i in xrange(fixup(av[0]), fixup(av[1])+1):
charmap[i] = 1
elif op is CATEGORY:
# XXX: could expand category
return charset # cannot compress
except IndexError:
# non-BMP characters
return charset
if negate:
if sys.maxunicode != 65535:
# XXX: negation does not work with big charsets
return charset
for i in xrange(65536):
charmap[i] = not charmap[i]
comps = {}
mapping = [0]*256
block = 0
data = []
for i in xrange(256):
chunk = tuple(charmap[i*256:(i+1)*256])
new = comps.setdefault(chunk, block)
mapping[i] = new
if new == block:
block = block + 1
data = data + _mk_bitmap(chunk)
header = [block]
if _sre.CODESIZE == 2:
code = 'H'
else:
code = 'I'
# Convert block indices to byte array of 256 bytes
mapping = array.array('b', mapping).tostring()
# Convert byte array to word array
mapping = array.array(code, mapping)
assert mapping.itemsize == _sre.CODESIZE
header = header + mapping.tolist()
data[0:0] = header
return [(BIGCHARSET, data)]
def _simple(av):
# check if av is a "simple" operator
lo, hi = av[2].getwidth()
if lo == 0 and hi == MAXREPEAT:
raise error, "nothing to repeat"
return lo == hi == 1 and av[2][0][0] != SUBPATTERN
def _compile_info(code, pattern, flags):
# internal: compile an info block. in the current version,
# this contains min/max pattern width, and an optional literal
# prefix or a character map
lo, hi = pattern.getwidth()
if lo == 0:
return # not worth it
# look for a literal prefix
prefix = []
prefixappend = prefix.append
prefix_skip = 0
charset = [] # not used
charsetappend = charset.append
if not (flags & SRE_FLAG_IGNORECASE):
# look for literal prefix
for op, av in pattern.data:
if op is LITERAL:
if len(prefix) == prefix_skip:
prefix_skip = prefix_skip + 1
prefixappend(av)
elif op is SUBPATTERN and len(av[1]) == 1:
op, av = av[1][0]
if op is LITERAL:
prefixappend(av)
else:
break
else:
break
# if no prefix, look for charset prefix
if not prefix and pattern.data:
op, av = pattern.data[0]
if op is SUBPATTERN and av[1]:
op, av = av[1][0]
if op is LITERAL:
charsetappend((op, av))
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is IN:
charset = av
## if prefix:
## print "*** PREFIX", prefix, prefix_skip
## if charset:
## print "*** CHARSET", charset
# add an info block
emit = code.append
emit(OPCODES[INFO])
skip = len(code); emit(0)
# literal flag
mask = 0
if prefix:
mask = SRE_INFO_PREFIX
if len(prefix) == prefix_skip == len(pattern.data):
mask = mask + SRE_INFO_LITERAL
elif charset:
mask = mask + SRE_INFO_CHARSET
emit(mask)
# pattern length
if lo < MAXCODE:
emit(lo)
else:
emit(MAXCODE)
prefix = prefix[:MAXCODE]
if hi < MAXCODE:
emit(hi)
else:
emit(0)
# add literal prefix
if prefix:
emit(len(prefix)) # length
emit(prefix_skip) # skip
code.extend(prefix)
# generate overlap table
table = [-1] + ([0]*len(prefix))
for i in xrange(len(prefix)):
table[i+1] = table[i]+1
while table[i+1] > 0 and prefix[i] != prefix[table[i+1]-1]:
table[i+1] = table[table[i+1]-1]+1
code.extend(table[1:]) # don't store first entry
elif charset:
_compile_charset(charset, flags, code)
code[skip] = len(code) - skip
try:
unicode
except NameError:
STRING_TYPES = (type(""),)
else:
STRING_TYPES = (type(""), type(unicode("")))
def isstring(obj):
for tp in STRING_TYPES:
if isinstance(obj, tp):
return 1
return 0
def _code(p, flags):
flags = p.pattern.flags | flags
code = []
# compile info block
_compile_info(code, p, flags)
# compile the pattern
_compile(code, p.data, flags)
code.append(OPCODES[SUCCESS])
return code
def compile(p, flags=0):
# internal: convert pattern list to internal format
if isstring(p):
pattern = p
p = sre_parse.parse(p, flags)
else:
pattern = None
code = _code(p, flags)
# print code
# XXX: <fl> get rid of this limitation!
if p.pattern.groups > 100:
raise AssertionError(
"sorry, but this version only supports 100 named groups"
)
# map in either direction
groupindex = p.pattern.groupdict
indexgroup = [None] * p.pattern.groups
for k, i in groupindex.items():
indexgroup[i] = k
return _sre.compile(
pattern, flags | p.pattern.flags, code,
p.pattern.groups-1,
groupindex, indexgroup
)
| mit |
ismailsunni/inasafe | safe_extras/raven/conf/remote.py | 6 | 4164 | from __future__ import absolute_import
import logging
import os
import warnings
from raven.utils.compat import PY2, text_type
from raven.exceptions import InvalidDsn
from raven.utils.encoding import to_string
from raven.utils.urlparse import parse_qsl, urlparse
ERR_UNKNOWN_SCHEME = 'Unsupported Sentry DSN scheme: {0} ({1})'
logger = logging.getLogger('raven')
def discover_default_transport():
from raven.transport.threaded import ThreadedHTTPTransport
from raven.transport.http import HTTPTransport
# Google App Engine
# https://cloud.google.com/appengine/docs/python/how-requests-are-handled#Python_The_environment
if 'CURRENT_VERSION_ID' in os.environ and 'INSTANCE_ID' in os.environ:
logger.info('Detected environment to be Google App Engine. Using synchronous HTTP transport.')
return HTTPTransport
# AWS Lambda
# https://alestic.com/2014/11/aws-lambda-environment/
if 'LAMBDA_TASK_ROOT' in os.environ:
logger.info('Detected environment to be AWS Lambda. Using synchronous HTTP transport.')
return HTTPTransport
return ThreadedHTTPTransport
DEFAULT_TRANSPORT = discover_default_transport()
class RemoteConfig(object):
def __init__(self, base_url=None, project=None, public_key=None,
secret_key=None, transport=None, options=None):
if base_url:
base_url = base_url.rstrip('/')
store_endpoint = '%s/api/%s/store/' % (base_url, project)
else:
store_endpoint = None
self.base_url = base_url
self.project = project
self.public_key = public_key
self.secret_key = secret_key
self.options = options or {}
self.store_endpoint = store_endpoint
self._transport_cls = transport or DEFAULT_TRANSPORT
def __unicode__(self):
return text_type(self.base_url)
def __str__(self):
return text_type(self.base_url)
def is_active(self):
return all([self.base_url, self.project, self.public_key, self.secret_key])
def get_transport(self):
if not self.store_endpoint:
return
if not hasattr(self, '_transport'):
self._transport = self._transport_cls(**self.options)
return self._transport
def get_public_dsn(self):
url = urlparse(self.base_url)
netloc = url.hostname
if url.port:
netloc += ':%s' % url.port
return '//%s@%s%s/%s' % (self.public_key, netloc, url.path, self.project)
@classmethod
def from_string(cls, value, transport=None, transport_registry=None):
# in Python 2.x sending the DSN as a unicode value will eventually
# cause issues in httplib
if PY2:
value = to_string(value)
url = urlparse(value.strip())
if url.scheme not in ('http', 'https'):
warnings.warn('Transport selection via DSN is deprecated. You should explicitly pass the transport class to Client() instead.')
if transport is None:
if not transport_registry:
from raven.transport import TransportRegistry, default_transports
transport_registry = TransportRegistry(default_transports)
if not transport_registry.supported_scheme(url.scheme):
raise InvalidDsn(ERR_UNKNOWN_SCHEME.format(url.scheme, value))
transport = transport_registry.get_transport_cls(url.scheme)
netloc = url.hostname
if url.port:
netloc += ':%s' % url.port
path_bits = url.path.rsplit('/', 1)
if len(path_bits) > 1:
path = path_bits[0]
else:
path = ''
project = path_bits[-1]
if not all([netloc, project, url.username]):
raise InvalidDsn('Invalid Sentry DSN: %r' % url.geturl())
base_url = '%s://%s%s' % (url.scheme.rsplit('+', 1)[-1], netloc, path)
return cls(
base_url=base_url,
project=project,
public_key=url.username,
secret_key=url.password,
options=dict(parse_qsl(url.query)),
transport=transport,
)
| gpl-3.0 |
ericgriffin/fflock | web2py/gluon/contrib/login_methods/motp_auth.py | 44 | 4592 | #!/usr/bin/env python
import time
from hashlib import md5
from gluon.dal import DAL
def motp_auth(db=DAL('sqlite://storage.sqlite'),
time_offset=60):
"""
motp allows you to login with a one time password(OTP) generated on a motp client,
motp clients are available for practically all platforms.
to know more about OTP visit http://en.wikipedia.org/wiki/One-time_password
to know more visit http://motp.sourceforge.net
Written by Madhukar R Pai ([email protected])
License : MIT or GPL v2
thanks and credits to the web2py community
to use motp_auth:
motp_auth.py has to be located in gluon/contrib/login_methods/ folder
first auth_user has to have 2 extra fields - motp_secret and motp_pin
for that define auth like shown below:
## after auth = Auth(db)
db.define_table(
auth.settings.table_user_name,
Field('first_name', length=128, default=''),
Field('last_name', length=128, default=''),
Field('email', length=128, default='', unique=True), # required
Field('password', 'password', length=512, # required
readable=False, label='Password'),
Field('motp_secret',length=512,default='',
label='MOTP Seceret'),
Field('motp_pin',length=128,default='',
label='MOTP PIN'),
Field('registration_key', length=512, # required
writable=False, readable=False, default=''),
Field('reset_password_key', length=512, # required
writable=False, readable=False, default=''),
Field('registration_id', length=512, # required
writable=False, readable=False, default=''))
##validators
custom_auth_table = db[auth.settings.table_user_name]
# get the custom_auth_table
custom_auth_table.first_name.requires = \
IS_NOT_EMPTY(error_message=auth.messages.is_empty)
custom_auth_table.last_name.requires = \
IS_NOT_EMPTY(error_message=auth.messages.is_empty)
custom_auth_table.password.requires = CRYPT()
custom_auth_table.email.requires = [
IS_EMAIL(error_message=auth.messages.invalid_email),
IS_NOT_IN_DB(db, custom_auth_table.email)]
auth.settings.table_user = custom_auth_table # tell auth to use custom_auth_table
## before auth.define_tables()
##after that:
from gluon.contrib.login_methods.motp_auth import motp_auth
auth.settings.login_methods.append(motp_auth(db=db))
##Instructions for using MOTP
- after configuring motp for web2py, Install a MOTP client on your phone (android,IOS, java, windows phone, etc)
- initialize the motp client (to reset a motp secret type in #**#),
During user creation enter the secret generated during initialization into the motp_secret field in auth_user and
similarly enter a pre-decided pin into the motp_pin
- done.. to login, just generate a fresh OTP by typing in the pin and use the OTP as password
###To Dos###
- both motp_secret and pin are stored in plain text! need to have some way of encrypting
- web2py stores the password in db on successful login (should not happen)
- maybe some utility or page to check the otp would be useful
- as of now user field is hardcoded to email. Some way of selecting user table and user field.
"""
def verify_otp(otp, pin, secret, offset=60):
epoch_time = int(time.time())
time_start = int(str(epoch_time - offset)[:-1])
time_end = int(str(epoch_time + offset)[:-1])
for t in range(time_start - 1, time_end + 1):
to_hash = str(t) + secret + pin
hash = md5(to_hash).hexdigest()[:6]
if otp == hash:
return True
return False
def motp_auth_aux(email,
password,
db=db,
offset=time_offset):
if db:
user_data = db(db.auth_user.email == email).select().first()
if user_data:
if user_data['motp_secret'] and user_data['motp_pin']:
motp_secret = user_data['motp_secret']
motp_pin = user_data['motp_pin']
otp_check = verify_otp(
password, motp_pin, motp_secret, offset=offset)
if otp_check:
return True
else:
return False
else:
return False
return False
return motp_auth_aux
| apache-2.0 |
guorendong/iridium-browser-ubuntu | tools/telemetry/telemetry/core/backends/chrome/desktop_browser_finder_unittest.py | 8 | 8800 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core.backends.chrome import desktop_browser_finder
from telemetry.core import browser_options
from telemetry.core.platform import desktop_device
from telemetry.unittest_util import system_stub
# This file verifies the logic for finding a browser instance on all platforms
# at once. It does so by providing stubs for the OS/sys/subprocess primitives
# that the underlying finding logic usually uses to locate a suitable browser.
# We prefer this approach to having to run the same test on every platform on
# which we want this code to work.
class FindTestBase(unittest.TestCase):
def setUp(self):
self._finder_options = browser_options.BrowserFinderOptions()
self._finder_options.chrome_root = '../../../'
self._finder_stubs = system_stub.Override(desktop_browser_finder,
['os', 'subprocess', 'sys'])
self._path_stubs = system_stub.Override(desktop_browser_finder.path,
['os', 'sys'])
def tearDown(self):
self._finder_stubs.Restore()
self._path_stubs.Restore()
@property
def _files(self):
return self._path_stubs.os.path.files
def DoFindAll(self):
return desktop_browser_finder.FindAllAvailableBrowsers(
self._finder_options, desktop_device.DesktopDevice())
def DoFindAllTypes(self):
browsers = self.DoFindAll()
return [b.browser_type for b in browsers]
def CanFindAvailableBrowsers(self):
return desktop_browser_finder.CanFindAvailableBrowsers()
def has_type(array, browser_type):
return len([x for x in array if x.browser_type == browser_type]) != 0
class FindSystemTest(FindTestBase):
def setUp(self):
super(FindSystemTest, self).setUp()
self._finder_stubs.sys.platform = 'win32'
self._path_stubs.sys.platform = 'win32'
def testFindProgramFiles(self):
if not self.CanFindAvailableBrowsers():
return
self._files.append(
'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe')
self._path_stubs.os.program_files = 'C:\\Program Files'
self.assertIn('system', self.DoFindAllTypes())
def testFindProgramFilesX86(self):
if not self.CanFindAvailableBrowsers():
return
self._files.append(
'C:\\Program Files(x86)\\Google\\Chrome\\Application\\chrome.exe')
self._path_stubs.os.program_files_x86 = 'C:\\Program Files(x86)'
self.assertIn('system', self.DoFindAllTypes())
def testFindLocalAppData(self):
if not self.CanFindAvailableBrowsers():
return
self._files.append(
'C:\\Local App Data\\Google\\Chrome\\Application\\chrome.exe')
self._path_stubs.os.local_app_data = 'C:\\Local App Data'
self.assertIn('system', self.DoFindAllTypes())
class FindLocalBuildsTest(FindTestBase):
def setUp(self):
super(FindLocalBuildsTest, self).setUp()
self._finder_stubs.sys.platform = 'win32'
self._path_stubs.sys.platform = 'win32'
def testFindBuild(self):
if not self.CanFindAvailableBrowsers():
return
self._files.append('..\\..\\..\\build\\Release\\chrome.exe')
self.assertIn('release', self.DoFindAllTypes())
def testFindOut(self):
if not self.CanFindAvailableBrowsers():
return
self._files.append('..\\..\\..\\out\\Release\\chrome.exe')
self.assertIn('release', self.DoFindAllTypes())
def testFindXcodebuild(self):
if not self.CanFindAvailableBrowsers():
return
self._files.append('..\\..\\..\\xcodebuild\\Release\\chrome.exe')
self.assertIn('release', self.DoFindAllTypes())
class OSXFindTest(FindTestBase):
def setUp(self):
super(OSXFindTest, self).setUp()
self._finder_stubs.sys.platform = 'darwin'
self._path_stubs.sys.platform = 'darwin'
self._files.append('/Applications/Google Chrome Canary.app/'
'Contents/MacOS/Google Chrome Canary')
self._files.append('/Applications/Google Chrome.app/' +
'Contents/MacOS/Google Chrome')
self._files.append(
'../../../out/Release/Chromium.app/Contents/MacOS/Chromium')
self._files.append(
'../../../out/Debug/Chromium.app/Contents/MacOS/Chromium')
self._files.append(
'../../../out/Release/Content Shell.app/Contents/MacOS/Content Shell')
self._files.append(
'../../../out/Debug/Content Shell.app/Contents/MacOS/Content Shell')
def testFindAll(self):
if not self.CanFindAvailableBrowsers():
return
types = self.DoFindAllTypes()
self.assertEquals(
set(types),
set(['debug', 'release',
'content-shell-debug', 'content-shell-release',
'canary', 'system']))
class LinuxFindTest(FindTestBase):
def setUp(self):
super(LinuxFindTest, self).setUp()
self._finder_stubs.sys.platform = 'linux2'
self._path_stubs.sys.platform = 'linux2'
self._files.append('/foo/chrome')
self._files.append('../../../out/Release/chrome')
self._files.append('../../../out/Debug/chrome')
self._files.append('../../../out/Release/content_shell')
self._files.append('../../../out/Debug/content_shell')
self.has_google_chrome_on_path = False
this = self
def call_hook(*args, **kwargs): # pylint: disable=W0613
if this.has_google_chrome_on_path:
return 0
raise OSError('Not found')
self._finder_stubs.subprocess.call = call_hook
def testFindAllWithExact(self):
if not self.CanFindAvailableBrowsers():
return
types = self.DoFindAllTypes()
self.assertEquals(
set(types),
set(['debug', 'release',
'content-shell-debug', 'content-shell-release']))
def testFindWithProvidedExecutable(self):
if not self.CanFindAvailableBrowsers():
return
self._finder_options.browser_executable = '/foo/chrome'
self.assertIn('exact', self.DoFindAllTypes())
def testFindWithProvidedApk(self):
if not self.CanFindAvailableBrowsers():
return
self._finder_options.browser_executable = '/foo/chrome.apk'
self.assertNotIn('exact', self.DoFindAllTypes())
def testFindUsingDefaults(self):
if not self.CanFindAvailableBrowsers():
return
self.has_google_chrome_on_path = True
self.assertIn('release', self.DoFindAllTypes())
del self._files[1]
self.has_google_chrome_on_path = True
self.assertIn('system', self.DoFindAllTypes())
self.has_google_chrome_on_path = False
del self._files[1]
self.assertEquals(['content-shell-debug', 'content-shell-release'],
self.DoFindAllTypes())
def testFindUsingRelease(self):
if not self.CanFindAvailableBrowsers():
return
self.assertIn('release', self.DoFindAllTypes())
class WinFindTest(FindTestBase):
def setUp(self):
super(WinFindTest, self).setUp()
self._finder_stubs.sys.platform = 'win32'
self._path_stubs.sys.platform = 'win32'
self._path_stubs.os.local_app_data = 'c:\\Users\\Someone\\AppData\\Local'
self._files.append('c:\\tmp\\chrome.exe')
self._files.append('..\\..\\..\\build\\Release\\chrome.exe')
self._files.append('..\\..\\..\\build\\Debug\\chrome.exe')
self._files.append('..\\..\\..\\build\\Release\\content_shell.exe')
self._files.append('..\\..\\..\\build\\Debug\\content_shell.exe')
self._files.append(self._path_stubs.os.local_app_data + '\\' +
'Google\\Chrome\\Application\\chrome.exe')
self._files.append(self._path_stubs.os.local_app_data + '\\' +
'Google\\Chrome SxS\\Application\\chrome.exe')
def testFindAllGivenDefaults(self):
if not self.CanFindAvailableBrowsers():
return
types = self.DoFindAllTypes()
self.assertEquals(set(types),
set(['debug', 'release',
'content-shell-debug', 'content-shell-release',
'system', 'canary']))
def testFindAllWithExact(self):
if not self.CanFindAvailableBrowsers():
return
self._finder_options.browser_executable = 'c:\\tmp\\chrome.exe'
types = self.DoFindAllTypes()
self.assertEquals(
set(types),
set(['exact',
'debug', 'release',
'content-shell-debug', 'content-shell-release',
'system', 'canary']))
def testFindAllWithExactApk(self):
if not self.CanFindAvailableBrowsers():
return
self._finder_options.browser_executable = 'c:\\tmp\\chrome_shell.apk'
types = self.DoFindAllTypes()
self.assertEquals(
set(types),
set(['debug', 'release',
'content-shell-debug', 'content-shell-release',
'system', 'canary']))
| bsd-3-clause |
miyakz1192/neutron | neutron/plugins/sriovnicagent/sriov_nic_agent.py | 15 | 14528 | # Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import constants as q_constants
from neutron.common import topics
from neutron.common import utils as q_utils
from neutron import context
from neutron.i18n import _LE, _LI
from neutron.openstack.common import loopingcall
from neutron.plugins.sriovnicagent.common import config # noqa
from neutron.plugins.sriovnicagent.common import exceptions as exc
from neutron.plugins.sriovnicagent import eswitch_manager as esm
LOG = logging.getLogger(__name__)
class SriovNicSwitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
target = oslo_messaging.Target(version='1.1')
def __init__(self, context, agent, sg_agent):
super(SriovNicSwitchRpcCallbacks, self).__init__()
self.context = context
self.agent = agent
self.sg_agent = sg_agent
def port_update(self, context, **kwargs):
LOG.debug("port_update received")
port = kwargs.get('port')
# Put the port mac address in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
self.agent.updated_devices.add(port['mac_address'])
LOG.debug("port_update RPC received for port: %s", port['id'])
class SriovNicSwitchAgent(object):
def __init__(self, physical_devices_mappings, exclude_devices,
polling_interval):
self.polling_interval = polling_interval
self.setup_eswitch_mgr(physical_devices_mappings,
exclude_devices)
configurations = {'device_mappings': physical_devices_mappings}
self.agent_state = {
'binary': 'neutron-sriov-nic-agent',
'host': cfg.CONF.host,
'topic': q_constants.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': q_constants.AGENT_TYPE_NIC_SWITCH,
'start_flag': True}
# Stores port update notifications for processing in the main loop
self.updated_devices = set()
self.context = context.get_admin_context_without_session()
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc)
self._setup_rpc()
# Initialize iteration counter
self.iter_num = 0
def _setup_rpc(self):
self.agent_id = 'nic-switch-agent.%s' % socket.gethostname()
LOG.info(_LI("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
# Handle updates from service
self.endpoints = [SriovNicSwitchRpcCallbacks(self.context, self,
self.sg_agent)]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
devices = len(self.eswitch_mgr.get_assigned_devices())
self.agent_state.get('configurations')['devices'] = devices
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def setup_eswitch_mgr(self, device_mappings, exclude_devices={}):
self.eswitch_mgr = esm.ESwitchManager(device_mappings, exclude_devices)
def scan_devices(self, registered_devices, updated_devices):
curr_devices = self.eswitch_mgr.get_assigned_devices()
device_info = {}
device_info['current'] = curr_devices
device_info['added'] = curr_devices - registered_devices
# we don't want to process updates for devices that don't exist
device_info['updated'] = updated_devices & curr_devices
# we need to clean up after devices are removed
device_info['removed'] = registered_devices - curr_devices
return device_info
def _device_info_has_changes(self, device_info):
return (device_info.get('added')
or device_info.get('updated')
or device_info.get('removed'))
def process_network_devices(self, device_info):
resync_a = False
resync_b = False
self.sg_agent.prepare_devices_filter(device_info.get('added'))
if device_info.get('updated'):
self.sg_agent.refresh_firewall()
# Updated devices are processed the same as new ones, as their
# admin_state_up may have changed. The set union prevents duplicating
# work when a device is new and updated in the same polling iteration.
devices_added_updated = (set(device_info.get('added'))
| set(device_info.get('updated')))
if devices_added_updated:
resync_a = self.treat_devices_added_updated(devices_added_updated)
if device_info.get('removed'):
resync_b = self.treat_devices_removed(device_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def treat_device(self, device, pci_slot, admin_state_up):
if self.eswitch_mgr.device_exists(device, pci_slot):
try:
self.eswitch_mgr.set_device_state(device, pci_slot,
admin_state_up)
except exc.SriovNicError:
LOG.exception(_LE("Failed to set device %s state"), device)
return
if admin_state_up:
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
LOG.info(_LI("No device with MAC %s defined on agent."), device)
def treat_devices_added_updated(self, devices):
try:
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context, devices, self.agent_id)
except Exception as e:
LOG.debug("Unable to get port details for devices "
"with MAC address %(devices)s: %(e)s",
{'devices': devices, 'e': e})
# resync is needed
return True
for device_details in devices_details_list:
device = device_details['device']
LOG.debug("Port with MAC address %s is added", device)
if 'port_id' in device_details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': device_details})
profile = device_details['profile']
self.treat_device(device_details['device'],
profile.get('pci_slot'),
device_details['admin_state_up'])
else:
LOG.info(_LI("Device with MAC %s not defined on plugin"),
device)
return False
def treat_devices_removed(self, devices):
resync = False
for device in devices:
LOG.info(_LI("Removing device with mac_address %s"), device)
try:
dev_details = self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug("Removing port failed for device %(device)s "
"due to %(exc)s", {'device': device, 'exc': e})
resync = True
continue
if dev_details['exists']:
LOG.info(_LI("Port %s updated."), device)
else:
LOG.debug("Device %s not defined on plugin", device)
return resync
def daemon_loop(self):
sync = True
devices = set()
LOG.info(_LI("SRIOV NIC Agent RPC Daemon Started!"))
while True:
start = time.time()
LOG.debug("Agent rpc_loop - iteration:%d started",
self.iter_num)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
devices.clear()
sync = False
device_info = {}
# Save updated devices dict to perform rollback in case
# resync would be needed, and then clear self.updated_devices.
# As the greenthread should not yield between these
# two statements, this will should be thread-safe.
updated_devices_copy = self.updated_devices
self.updated_devices = set()
try:
device_info = self.scan_devices(devices, updated_devices_copy)
if self._device_info_has_changes(device_info):
LOG.debug("Agent loop found changes! %s", device_info)
# If treat devices fails - indicates must resync with
# plugin
sync = self.process_network_devices(device_info)
devices = device_info['current']
except Exception:
LOG.exception(_LE("Error in agent loop. Devices info: %s"),
device_info)
sync = True
# Restore devices that were removed from this set earlier
# without overwriting ones that may have arrived since.
self.updated_devices |= updated_devices_copy
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
class SriovNicAgentConfigParser(object):
def __init__(self):
self.device_mappings = {}
self.exclude_devices = {}
def parse(self):
"""Parses device_mappings and exclude_devices.
Parse and validate the consistency in both mappings
"""
self.device_mappings = q_utils.parse_mappings(
cfg.CONF.SRIOV_NIC.physical_device_mappings)
self.exclude_devices = config.parse_exclude_devices(
cfg.CONF.SRIOV_NIC.exclude_devices)
self._validate()
def _validate(self):
"""Validate configuration.
Validate that network_device in excluded_device
exists in device mappings
"""
dev_net_set = set(self.device_mappings.itervalues())
for dev_name in self.exclude_devices.iterkeys():
if dev_name not in dev_net_set:
raise ValueError(_("Device name %(dev_name)s is missing from "
"physical_device_mappings") % {'dev_name':
dev_name})
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
config_parser = SriovNicAgentConfigParser()
config_parser.parse()
device_mappings = config_parser.device_mappings
exclude_devices = config_parser.exclude_devices
except ValueError:
LOG.exception(_LE("Failed on Agent configuration parse. "
"Agent terminated!"))
raise SystemExit(1)
LOG.info(_LI("Physical Devices mappings: %s"), device_mappings)
LOG.info(_LI("Exclude Devices: %s"), exclude_devices)
polling_interval = cfg.CONF.AGENT.polling_interval
try:
agent = SriovNicSwitchAgent(device_mappings,
exclude_devices,
polling_interval)
except exc.SriovNicError:
LOG.exception(_LE("Agent Initialization Failed"))
raise SystemExit(1)
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
agent.daemon_loop()
if __name__ == '__main__':
main()
| apache-2.0 |
Oliver2213/NVDAYoutube-dl | addon/globalPlugins/nvdaYoutubeDL/youtube_dl/extractor/soundgasm.py | 149 | 2041 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class SoundgasmIE(InfoExtractor):
IE_NAME = 'soundgasm'
_VALID_URL = r'https?://(?:www\.)?soundgasm\.net/u/(?P<user>[0-9a-zA-Z_\-]+)/(?P<title>[0-9a-zA-Z_\-]+)'
_TEST = {
'url': 'http://soundgasm.net/u/ytdl/Piano-sample',
'md5': '010082a2c802c5275bb00030743e75ad',
'info_dict': {
'id': '88abd86ea000cafe98f96321b23cc1206cbcbcc9',
'ext': 'm4a',
'title': 'ytdl_Piano-sample',
'description': 'Royalty Free Sample Music'
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('title')
audio_title = mobj.group('user') + '_' + mobj.group('title')
webpage = self._download_webpage(url, display_id)
audio_url = self._html_search_regex(
r'(?s)m4a\:\s"([^"]+)"', webpage, 'audio URL')
audio_id = re.split('\/|\.', audio_url)[-2]
description = self._html_search_regex(
r'(?s)<li>Description:\s(.*?)<\/li>', webpage, 'description',
fatal=False)
return {
'id': audio_id,
'display_id': display_id,
'url': audio_url,
'title': audio_title,
'description': description
}
class SoundgasmProfileIE(InfoExtractor):
IE_NAME = 'soundgasm:profile'
_VALID_URL = r'https?://(?:www\.)?soundgasm\.net/u/(?P<id>[^/]+)/?(?:\#.*)?$'
_TEST = {
'url': 'http://soundgasm.net/u/ytdl',
'info_dict': {
'id': 'ytdl',
},
'playlist_count': 1,
}
def _real_extract(self, url):
profile_id = self._match_id(url)
webpage = self._download_webpage(url, profile_id)
entries = [
self.url_result(audio_url, 'Soundgasm')
for audio_url in re.findall(r'href="([^"]+/u/%s/[^"]+)' % profile_id, webpage)]
return self.playlist_result(entries, profile_id)
| gpl-2.0 |
hasgeek/coaster | coaster/views/misc.py | 1 | 6508 | """
Miscellaneous view helpers
--------------------------
Helper functions for view handlers.
All items in this module can be imported directly from :mod:`coaster.views`.
"""
from urllib.parse import urlsplit
import re
from flask import Response, current_app, json, request
from flask import session as request_session
from flask import url_for
from werkzeug.exceptions import MethodNotAllowed, NotFound
from werkzeug.routing import RequestRedirect
__all__ = ['get_current_url', 'get_next_url', 'jsonp', 'endpoint_for']
__jsoncallback_re = re.compile(r'^[a-z$_][0-9a-z$_]*$', re.I)
def _index_url():
if request:
return request.script_root or '/'
else:
return '/'
def _clean_external_url(url):
if url.startswith(('http://', 'https://', '//')):
# Do the domains and ports match?
pnext = urlsplit(url)
preq = urlsplit(request.url)
if pnext.port != preq.port:
return ''
if not (
pnext.hostname == preq.hostname
or pnext.hostname.endswith('.' + preq.hostname)
):
return ''
return url
def get_current_url():
"""
Return the current URL including the query string as a relative path. If the app
uses subdomains, return an absolute path
"""
if current_app.config.get('SERVER_NAME') and (
# Check current hostname against server name, ignoring port numbers, if any
# (split on ':')
request.environ['HTTP_HOST'].split(':', 1)[0]
!= current_app.config['SERVER_NAME'].split(':', 1)[0]
):
return request.url
url = url_for(request.endpoint, **request.view_args)
query = request.query_string
if query:
return url + '?' + query.decode()
else:
return url
__marker = object()
def get_next_url(referrer=False, external=False, session=False, default=__marker):
"""
Get the next URL to redirect to. Don't return external URLs unless
explicitly asked for. This is to protect the site from being an unwitting
redirector to external URLs. Subdomains are okay, however.
This function looks for a ``next`` parameter in the request or in the session
(depending on whether parameter ``session`` is True). If no ``next`` is present,
it checks the referrer (if enabled), and finally returns either the provided
default (which can be any value including ``None``) or the script root
(typically ``/``).
"""
if session:
next_url = request_session.pop('next', None) or request.args.get('next', '')
else:
next_url = request.args.get('next', '')
if next_url and not external:
next_url = _clean_external_url(next_url)
if next_url:
return next_url
if default is __marker:
usedefault = False
else:
usedefault = True
if referrer and request.referrer:
if external:
return request.referrer
else:
return _clean_external_url(request.referrer) or (
default if usedefault else _index_url()
)
else:
return default if usedefault else _index_url()
def jsonp(*args, **kw):
"""
Returns a JSON response with a callback wrapper, if asked for.
Consider using CORS instead, as JSONP makes the client app insecure.
See the :func:`~coaster.views.decorators.cors` decorator.
"""
data = json.dumps(dict(*args, **kw), indent=2)
callback = request.args.get('callback', request.args.get('jsonp'))
if callback and __jsoncallback_re.search(callback) is not None:
data = callback + '(' + data + ');'
mimetype = 'application/javascript'
else:
mimetype = 'application/json'
return Response(data, mimetype=mimetype)
def endpoint_for(url, method=None, return_rule=False, follow_redirects=True):
"""
Given an absolute URL, retrieve the matching endpoint name (or rule) and
view arguments. Requires a current request context to determine runtime
environment.
:param str method: HTTP method to use (defaults to GET)
:param bool return_rule: Return the URL rule instead of the endpoint name
:param bool follow_redirects: Follow redirects to final endpoint
:return: Tuple of endpoint name or URL rule or `None`, view arguments
"""
parsed_url = urlsplit(url)
if not parsed_url.netloc:
# We require an absolute URL
return None, {}
# Take the current runtime environment...
environ = dict(request.environ)
# ...but replace the HTTP host with the URL's host...
environ['HTTP_HOST'] = parsed_url.netloc
# ...and the path with the URL's path (after discounting the app path, if not
# hosted at root).
environ['PATH_INFO'] = parsed_url.path[len(environ.get('SCRIPT_NAME', '')) :]
# Create a new request with this environment...
url_request = current_app.request_class(environ)
# ...and a URL adapter with the new request.
url_adapter = current_app.create_url_adapter(url_request)
# Run three hostname tests, one of which must pass:
# 1. Does the URL map have host matching enabled? If so, the URL adapter will
# validate the hostname.
if current_app.url_map.host_matching:
pass
# 2. If not, does the domain match? url_adapter.server_name will prefer
# app.config['SERVER_NAME'], but if that is not specified, it will take it from the
# environment.
elif parsed_url.netloc == url_adapter.server_name:
pass
# 3. If subdomain matching is enabled, does the subdomain match?
elif current_app.subdomain_matching and parsed_url.netloc.endswith(
'.' + url_adapter.server_name
):
pass
# If no test passed, we don't have a matching endpoint.
else:
return None, {}
# Now retrieve the endpoint or rule, watching for redirects or resolution failures
try:
return url_adapter.match(parsed_url.path, method, return_rule=return_rule)
except RequestRedirect as r:
# A redirect typically implies `/folder` -> `/folder/`
# This will not be a redirect response from a view, since the view isn't being
# called
if follow_redirects:
return endpoint_for(
r.new_url,
method=method,
return_rule=return_rule,
follow_redirects=follow_redirects,
)
except (NotFound, MethodNotAllowed):
pass
# If we got here, no endpoint was found.
return None, {}
| bsd-2-clause |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/axes_grid/demo_edge_colorbar.py | 11 | 2597 | import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
def get_demo_image():
import numpy as np
from matplotlib.cbook import get_sample_data
f = get_sample_data("axes_grid/bivariate_normal.npy", asfileobj=False)
z = np.load(f)
# z is a numpy array of 15x15
return z, (-3,4,-4,3)
def demo_bottom_cbar(fig):
"""
A grid of 2x2 images with a colorbar for each column.
"""
grid = AxesGrid(fig, 121, # similar to subplot(132)
nrows_ncols = (2, 2),
axes_pad = 0.10,
share_all=True,
label_mode = "1",
cbar_location = "bottom",
cbar_mode="edge",
cbar_pad = 0.25,
cbar_size = "15%",
direction="column"
)
Z, extent = get_demo_image()
cmaps = [plt.get_cmap("autumn"), plt.get_cmap("summer")]
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest",
cmap=cmaps[i//2])
if i % 2:
cbar = grid.cbar_axes[i//2].colorbar(im)
for cax in grid.cbar_axes:
cax.toggle_label(True)
cax.axis[cax.orientation].set_label("Bar")
# This affects all axes as share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
def demo_right_cbar(fig):
"""
A grid of 2x2 images. Each row has its own colorbar.
"""
grid = AxesGrid(F, 122, # similar to subplot(122)
nrows_ncols = (2, 2),
axes_pad = 0.10,
label_mode = "1",
share_all = True,
cbar_location="right",
cbar_mode="edge",
cbar_size="7%",
cbar_pad="2%",
)
Z, extent = get_demo_image()
cmaps = [plt.get_cmap("spring"), plt.get_cmap("winter")]
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest",
cmap=cmaps[i//2])
if i % 2:
grid.cbar_axes[i//2].colorbar(im)
for cax in grid.cbar_axes:
cax.toggle_label(True)
cax.axis[cax.orientation].set_label('Foo')
# This affects all axes because we set share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
if 1:
F = plt.figure(1, (5.5, 2.5))
F.subplots_adjust(left=0.05, right=0.93)
demo_bottom_cbar(F)
demo_right_cbar(F)
plt.draw()
plt.show()
| mit |
ramaganapathy1/AMuDA-Ir-back-end | vEnv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/version.py | 1151 | 11556 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
| mit |
Nirvedh/CoarseCoherence | src/arch/x86/isa/insts/__init__.py | 91 | 2409 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["romutil",
"general_purpose",
"simd128",
"simd64",
"system",
"x87"]
microcode = '''
# X86 microcode
'''
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
| bsd-3-clause |
mozilla/django-badger | badger/templatetags/badger_tags.py | 3 | 2006 | # django
import django
from django import template
from django.conf import settings
from django.shortcuts import get_object_or_404
from badger.models import Award, Badge
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
import hashlib
import urllib
from django.utils.translation import ugettext_lazy as _
if django.VERSION < (1, 7, 0):
from django.contrib.auth.models import SiteProfileNotAvailable
if django.VERSION >= (1, 7, 0):
class SiteProfileNotAvailable(Exception):
pass
register = template.Library()
@register.filter
def permissions_for(obj, user):
try:
return obj.get_permissions_for(user)
except:
return {}
@register.filter
def key(obj, name):
try:
return obj[name]
except:
return None
@register.simple_tag
def user_avatar(user, secure=False, size=256, rating='pg', default=''):
try:
profile = user.get_profile()
if profile.avatar:
return profile.avatar.url
except SiteProfileNotAvailable:
pass
except ObjectDoesNotExist:
pass
except AttributeError:
pass
base_url = (secure and 'https://secure.gravatar.com' or
'http://www.gravatar.com')
m = hashlib.md5(user.email)
return '%(base_url)s/avatar/%(hash)s?%(params)s' % dict(
base_url=base_url, hash=m.hexdigest(),
params=urllib.urlencode(dict(
s=size, d=default, r=rating
))
)
@register.simple_tag
def award_image(award):
if award.image:
img_url = award.image.url
elif award.badge.image:
img_url = award.badge.image.url
else:
img_url = "/media/img/default-badge.png"
return img_url
@register.simple_tag
def user_award_list(badge, user):
if badge.allows_award_to(user):
return '<li><a class="award_badge" href="%s">%s</a></li>' % (reverse('badger.views.award_badge', args=[badge.slug, ]), _(u'Issue award'))
else:
return ''
| bsd-3-clause |
curiosityio/taiga-docker | taiga-back/taiga-back/taiga/projects/attachments/api.py | 3 | 3874 | # Copyright (C) 2014-2016 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2016 Jesús Espino <[email protected]>
# Copyright (C) 2014-2016 David Barragán <[email protected]>
# Copyright (C) 2014-2016 Alejandro Alonso <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os.path as path
import mimetypes
mimetypes.init()
from django.utils.translation import ugettext as _
from django.contrib.contenttypes.models import ContentType
from taiga.base import filters
from taiga.base import exceptions as exc
from taiga.base.api import ModelCrudViewSet
from taiga.base.api.mixins import BlockedByProjectMixin
from taiga.base.api.utils import get_object_or_404
from taiga.projects.notifications.mixins import WatchedResourceMixin
from taiga.projects.history.mixins import HistoryResourceMixin
from . import permissions
from . import serializers
from . import models
class BaseAttachmentViewSet(HistoryResourceMixin, WatchedResourceMixin,
BlockedByProjectMixin, ModelCrudViewSet):
model = models.Attachment
serializer_class = serializers.AttachmentSerializer
filter_fields = ["project", "object_id"]
content_type = None
def update(self, *args, **kwargs):
partial = kwargs.get("partial", False)
if not partial:
raise exc.NotSupported(_("Partial updates are not supported"))
return super().update(*args, **kwargs)
def get_content_type(self):
app_name, model = self.content_type.split(".", 1)
return get_object_or_404(ContentType, app_label=app_name, model=model)
def pre_save(self, obj):
if not obj.id:
obj.content_type = self.get_content_type()
obj.owner = self.request.user
obj.size = obj.attached_file.size
obj.name = path.basename(obj.attached_file.name)
if obj.project_id != obj.content_object.project_id:
raise exc.WrongArguments(_("Project ID not matches between object and project"))
super().pre_save(obj)
def post_delete(self, obj):
# NOTE: When destroy an attachment, the content_object change
# after and not before
self.persist_history_snapshot(obj, delete=True)
super().pre_delete(obj)
def get_object_for_snapshot(self, obj):
return obj.content_object
class UserStoryAttachmentViewSet(BaseAttachmentViewSet):
permission_classes = (permissions.UserStoryAttachmentPermission,)
filter_backends = (filters.CanViewUserStoryAttachmentFilterBackend,)
content_type = "userstories.userstory"
class IssueAttachmentViewSet(BaseAttachmentViewSet):
permission_classes = (permissions.IssueAttachmentPermission,)
filter_backends = (filters.CanViewIssueAttachmentFilterBackend,)
content_type = "issues.issue"
class TaskAttachmentViewSet(BaseAttachmentViewSet):
permission_classes = (permissions.TaskAttachmentPermission,)
filter_backends = (filters.CanViewTaskAttachmentFilterBackend,)
content_type = "tasks.task"
class WikiAttachmentViewSet(BaseAttachmentViewSet):
permission_classes = (permissions.WikiAttachmentPermission,)
filter_backends = (filters.CanViewWikiAttachmentFilterBackend,)
content_type = "wiki.wikipage"
| mit |
kalebhartje/schoolboost | cms/envs/acceptance_static.py | 3 | 2314 | """
This config file extends the test environment configuration
so that we can run the lettuce acceptance tests.
This is used in the django-admin call as acceptance.py
contains random seeding, causing django-admin to create a random collection
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .test import *
# You need to start the server in debug mode,
# otherwise the browser will not render the pages correctly
DEBUG = True
# Disable warnings for acceptance tests, to make the logs readable
import logging
logging.disable(logging.ERROR)
import os
import random
MODULESTORE_OPTIONS = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'host': 'localhost',
'db': 'acceptance_xmodule',
'collection': 'acceptance_modulestore',
'fs_root': TEST_ROOT / "data",
'render_template': 'mitxmako.shortcuts.render_to_string',
}
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.draft.DraftModuleStore',
'OPTIONS': MODULESTORE_OPTIONS
},
'direct': {
'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore',
'OPTIONS': MODULESTORE_OPTIONS
},
'draft': {
'ENGINE': 'xmodule.modulestore.draft.DraftModuleStore',
'OPTIONS': MODULESTORE_OPTIONS
}
}
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'OPTIONS': {
'host': 'localhost',
'db': 'acceptance_xcontent',
},
# allow for additional options that can be keyed on a name, e.g. 'trashcan'
'ADDITIONAL_OPTIONS': {
'trashcan': {
'bucket': 'trash_fs'
}
}
}
# Set this up so that rake lms[acceptance] and running the
# harvest command both use the same (test) database
# which they can flush without messing up your dev db
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': TEST_ROOT / "db" / "test_mitx.db",
'TEST_NAME': TEST_ROOT / "db" / "test_mitx.db",
}
}
# Include the lettuce app for acceptance testing, including the 'harvest' django-admin command
INSTALLED_APPS += ('lettuce.django',)
LETTUCE_APPS = ('contentstore',)
LETTUCE_SERVER_PORT = random.randint(1024, 65535)
LETTUCE_BROWSER = 'chrome'
| agpl-3.0 |
egoid/baytree | lib/python2.7/site-packages/django/contrib/gis/gdal/prototypes/generation.py | 68 | 4298 | """
This module contains functions that generate ctypes prototypes for the
GDAL routines.
"""
from ctypes import c_char_p, c_double, c_int, c_int64, c_void_p
from functools import partial
from django.contrib.gis.gdal.prototypes.errcheck import (
check_arg_errcode, check_const_string, check_errcode, check_geom,
check_geom_offset, check_pointer, check_srs, check_str_arg, check_string,
)
class gdal_char_p(c_char_p):
pass
def double_output(func, argtypes, errcheck=False, strarg=False, cpl=False):
"Generates a ctypes function that returns a double value."
func.argtypes = argtypes
func.restype = c_double
if errcheck:
func.errcheck = partial(check_arg_errcode, cpl=cpl)
if strarg:
func.errcheck = check_str_arg
return func
def geom_output(func, argtypes, offset=None):
"""
Generates a function that returns a Geometry either by reference
or directly (if the return_geom keyword is set to True).
"""
# Setting the argument types
func.argtypes = argtypes
if not offset:
# When a geometry pointer is directly returned.
func.restype = c_void_p
func.errcheck = check_geom
else:
# Error code returned, geometry is returned by-reference.
func.restype = c_int
def geomerrcheck(result, func, cargs):
return check_geom_offset(result, func, cargs, offset)
func.errcheck = geomerrcheck
return func
def int_output(func, argtypes):
"Generates a ctypes function that returns an integer value."
func.argtypes = argtypes
func.restype = c_int
return func
def int64_output(func, argtypes):
"Generates a ctypes function that returns a 64-bit integer value."
func.argtypes = argtypes
func.restype = c_int64
return func
def srs_output(func, argtypes):
"""
Generates a ctypes prototype for the given function with
the given C arguments that returns a pointer to an OGR
Spatial Reference System.
"""
func.argtypes = argtypes
func.restype = c_void_p
func.errcheck = check_srs
return func
def const_string_output(func, argtypes, offset=None, decoding=None, cpl=False):
func.argtypes = argtypes
if offset:
func.restype = c_int
else:
func.restype = c_char_p
def _check_const(result, func, cargs):
res = check_const_string(result, func, cargs, offset=offset, cpl=cpl)
if res and decoding:
res = res.decode(decoding)
return res
func.errcheck = _check_const
return func
def string_output(func, argtypes, offset=-1, str_result=False, decoding=None):
"""
Generates a ctypes prototype for the given function with the
given argument types that returns a string from a GDAL pointer.
The `const` flag indicates whether the allocated pointer should
be freed via the GDAL library routine VSIFree -- but only applies
only when `str_result` is True.
"""
func.argtypes = argtypes
if str_result:
# Use subclass of c_char_p so the error checking routine
# can free the memory at the pointer's address.
func.restype = gdal_char_p
else:
# Error code is returned
func.restype = c_int
# Dynamically defining our error-checking function with the
# given offset.
def _check_str(result, func, cargs):
res = check_string(result, func, cargs, offset=offset, str_result=str_result)
if res and decoding:
res = res.decode(decoding)
return res
func.errcheck = _check_str
return func
def void_output(func, argtypes, errcheck=True, cpl=False):
"""
For functions that don't only return an error code that needs to
be examined.
"""
if argtypes:
func.argtypes = argtypes
if errcheck:
# `errcheck` keyword may be set to False for routines that
# return void, rather than a status code.
func.restype = c_int
func.errcheck = partial(check_errcode, cpl=cpl)
else:
func.restype = None
return func
def voidptr_output(func, argtypes, errcheck=True):
"For functions that return c_void_p."
func.argtypes = argtypes
func.restype = c_void_p
if errcheck:
func.errcheck = check_pointer
return func
| mit |
digibyte/digibyte | contrib/gitian-build.py | 1 | 12593 | #!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'apt-cacher-ng', 'make', 'wget']
if args.kvm:
programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install docker', file=sys.stderr)
exit(1)
else:
programs += ['lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/digibyte-core/gitian.sigs.git'])
if not os.path.isdir('digibyte-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/digibyte-core/digibyte-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('digibyte'):
subprocess.check_call(['git', 'clone', 'https://github.com/digibyte/digibyte.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
exit(0)
def build():
global args, workdir
os.makedirs('digibyte-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'http://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://digibytecore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_call(['make', '-C', '../digibyte/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'digibyte='+args.commit, '--url', 'digibyte='+args.url, '../digibyte/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../digibyte/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/digibyte-*.tar.gz build/out/src/digibyte-*.tar.gz ../digibyte-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'digibyte='+args.commit, '--url', 'digibyte='+args.url, '../digibyte/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../digibyte/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/digibyte-*-win-unsigned.tar.gz inputs/digibyte-win-unsigned.tar.gz', shell=True)
subprocess.check_call('mv build/out/digibyte-*.zip build/out/digibyte-*.exe ../digibyte-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'digibyte='+args.commit, '--url', 'digibyte='+args.url, '../digibyte/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../digibyte/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/digibyte-*-osx-unsigned.tar.gz inputs/digibyte-osx-unsigned.tar.gz', shell=True)
subprocess.check_call('mv build/out/digibyte-*.tar.gz build/out/digibyte-*.dmg ../digibyte-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
if args.windows:
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../digibyte/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../digibyte/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call('mv build/out/digibyte-*win64-setup.exe ../digibyte-binaries/'+args.version, shell=True)
subprocess.check_call('mv build/out/digibyte-*win32-setup.exe ../digibyte-binaries/'+args.version, shell=True)
if args.macos:
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../digibyte/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../digibyte/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/digibyte-osx-signed.dmg ../digibyte-binaries/'+args.version+'/digibyte-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../digibyte/contrib/gitian-descriptors/gitian-linux.yml'])
print('\nVerifying v'+args.version+' Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../digibyte/contrib/gitian-descriptors/gitian-win.yml'])
print('\nVerifying v'+args.version+' MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../digibyte/contrib/gitian-descriptors/gitian-osx.yml'])
print('\nVerifying v'+args.version+' Signed Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../digibyte/contrib/gitian-descriptors/gitian-win-signer.yml'])
print('\nVerifying v'+args.version+' Signed MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../digibyte/contrib/gitian-descriptors/gitian-osx-signer.yml'])
os.chdir(workdir)
def main():
global args, workdir
parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/digibyte/digibyte', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', help='GPG signer to sign each build assert file')
parser.add_argument('version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if args.buildsign:
args.build=True
args.sign=True
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
# Set enviroment variable USE_LXC or USE_DOCKER, let gitian-builder know that we use lxc or docker
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if not 'GITIAN_HOST_IP' in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if not 'LXC_GUEST_IP' in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
script_name = os.path.basename(sys.argv[0])
# Signer and version shouldn't be empty
if args.signer == '':
print(script_name+': Missing signer.')
print('Try '+script_name+' --help for more information')
exit(1)
if args.version == '':
print(script_name+': Missing version.')
print('Try '+script_name+' --help for more information')
exit(1)
# Add leading 'v' for tags
args.commit = ('' if args.commit else 'v') + args.version
print(args.commit)
if args.setup:
setup()
os.chdir('digibyte')
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
verify()
if __name__ == '__main__':
main()
| mit |
asterix135/infonex_crm | home/views.py | 1 | 9135 | import datetime
from calendar import monthrange, month_name
from pprint import pprint
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db.models import Count, Sum
from django.http import HttpResponse
from django.views.generic import TemplateView
from django.utils import timezone
from crm.models import Contact
from registration.models import RegDetails, Invoice
from .mixins import CurrentRegistrationCounts
from . import charts
from home.constants import PRE_CRM_TOTALS
from home.sales_sheet_mixins import *
#############################
# primary page views
#############################
class Index(TemplateView, CurrentRegistrationCounts):
template_name = 'home/index.html'
def _get_sales_data(self):
current_month = datetime.datetime.today().month
current_year = datetime.datetime.today().year
user = self.request.user
if (user.groups.filter(name='sales').exists() or
user.groups.filter(name='sponsorship').exists()):
user_bookings = Invoice.objects.filter(sales_credit=user)
elif user.groups.filter(name='conference_developer').exists():
user_bookings = Invoice.objects.filter(
reg_details__conference__developer=user
)
elif (user.groups.filter(name='management').exists or user.is_superuser):
user_bookings = Invoice.objects.all()
else:
user_bookings = Invoice.objects.none()
month_bookings = user_bookings.filter(
reg_details__register_date__month=current_month,
reg_details__register_date__year=current_year
).aggregate(Sum('pre_tax_price'))['pre_tax_price__sum']
monthly_payments = user_bookings.filter(
payment_date__month=current_month,
payment_date__year=current_year
).aggregate(Sum('pre_tax_price'))['pre_tax_price__sum']
if not month_bookings:
month_bookings = 0
if not monthly_payments:
monthly_payments = 0
return month_bookings, monthly_payments
def _get_month_regs(self):
month = self.request.GET.get('month',
datetime.datetime.today().month)
year = self.request.GET.get('year',
datetime.datetime.today().year)
user = self.request.user
month_regs = RegDetails.objects.filter(
register_date__year=year,
register_date__month=month,
invoice__sales_credit=user,
).order_by('register_date')
return month_regs
def _get_month_payments(self):
month = self.request.GET.get('month',
datetime.datetime.today().month)
year = self.request.GET.get('year',
datetime.datetime.today().year)
user = self.request.user
month_payments = RegDetails.objects.filter(
invoice__payment_date__year=year,
invoice__payment_date__month=month,
invoice__sales_credit=user,
).order_by('register_date')
return month_payments
def get_context_data(self, **kwargs):
context = super(Index, self).get_context_data(**kwargs)
user = self.request.user
context['reg_permission_ok'] = (
user.groups.filter(name='db_admin').exists() or
user.groups.filter(name='registration').exists() or
user.groups.filter(name='management').exists() or
user.is_superuser
)
context['today_contacts'] = \
today_contacts = Contact.objects.filter(
author=user,
date_of_contact__date=datetime.datetime.today().date()
).count()
month_sales, month_payments = self._get_sales_data()
context['month_sales'] = month_sales
context['month_payments'] = month_payments
if user.groups.filter(name='sales').exists() or \
user.groups.filter(name='sponsorship').exists() or \
user.is_superuser:
context['month_reg_list'] = self._get_month_regs()
context['month_payment_list'] = self._get_month_payments()
context['registration_count_list'] = self.get_current_reg_counts()
return context
class SalesSheetView(TemplateView, CorporateSalesTotals, HistoricalSalesData):
template_name = 'home/sales_sheet.html'
def _list_salespeople(self):
return User.objects.filter(groups__name='sales', is_active=True)
def _list_pd(self):
return User.objects.filter(
groups__name='conference_developer', is_active=True
)
def _individual_sales_for_period(self, employee, start_date, end_date):
is_for_sales = employee.groups.filter(name='sales').exists()
is_for_pd = employee.groups.filter(name='conference_developer').exists()
if is_for_sales and not is_for_pd:
return Invoice.objects.filter(
reg_details__register_date__gte = start_date,
reg_details__register_date__lte = end_date,
sales_credit = employee
).aggregate(Sum('pre_tax_price'))['pre_tax_price__sum']
elif is_for_pd:
return 100000
else:
return 0
# def generate_sales_team_data(self):
# today = datetime.date.today()
# sales_team = self._list_salespeople()
# sales_team_data = {}
# for team_member in sales_team:
# personal_sales = {}
# for years_ago in range(2):
# year_data = {}
# year = today.year - years_ago
# for month in range(1, 13):
# if years_ago == 0 and month > today.month:
# year_data[month_name[month]] = 0
# range_start = datetime.datetime.strptime(
# f'{int(year)}-{int(month)}-01',
# '%Y-%m-%d'
# )
# start_date, end_date = self._month_start_and_end_dates(
# range_start
# )
# year_data[month_name[month]] = self._individual_sales_for_period(
# team_member, start_date, end_date
# )
# personal_sales[str(year)] = year_data
# sales_team_data[team_member.first_name] = personal_sales
#
# pprint(sales_team_data)
#
# return sales_team_data
def get_context_data(self, **kwargs):
current_month = datetime.date.today().month
current_year = datetime.date.today().year
context=super(SalesSheetView, self).get_context_data(**kwargs)
context['test'] = 'foo'
context['current_month_sales'] = self.get_all_monthly_data(
current_month, current_year
)
context['ytd_sales'] = self.get_year_to_date_data(
current_month, current_year
)
historical_sales_data = self.get_year_historical()
context ['historical_table_headers'] = historical_sales_data[0]
context['historical_sales'] = historical_sales_data[1]
context['historical_table_totals'] = historical_sales_data[2]
context['sales_team_data'] = None
return context
###########################
# Page elements
###########################
@login_required
def recent_contact_chart(request):
user = request.user
contact_counts = []
today_contacts = Contact.objects.filter(
author=user,
date_of_contact__date=datetime.datetime.today().date()
)
for i in range (0, 8):
if timezone.now().isoweekday() - i not in [0, -1]:
day_count = Contact.objects.filter(
author=user,
date_of_contact__date=(
timezone.now()-datetime.timedelta(days=i)
).date()
).count()
contact_counts.append(day_count)
if timezone.now().isoweekday() == 2:
labels = ['Today', 'Monday', 'Friday', 'Thursday',
'Wednesday', 'Tuesday']
elif timezone.now().isoweekday() == 3:
labels = ['Today', 'Tuesday', 'Monday', 'Friday',
'Thursday', 'Wednesday']
elif timezone.now().isoweekday() == 4:
labels = ['Today', 'Wednesday', 'Tuesday', 'Monday',
'Friday', 'Thursday']
elif timezone.now().isoweekday() == 5:
labels = ['Today', 'Thursday', 'Wednesday', 'Tuesday',
'Monday', 'Friday']
else:
labels = ['Today', 'Friday', 'Thursday', 'Wednesday',
'Tuesday', 'Monday']
contact_chart = charts.MyBarChartDrawing()
contact_chart.chart.data = [contact_counts]
contact_chart.title.text = 'Contacts over the Past Week'
contact_chart.chart.categoryAxis.categoryNames = labels
chart_object = contact_chart.asString('png')
return HttpResponse(chart_object, 'image/png')
###########################
# AJAX exlements
###########################
| mit |
supriyasingh01/github_basics | Internetworking Distributed Project/finalProject/ovs/pox-master/pox/openflow/nicira_ext.py | 32 | 3980 | # Copyright 2011 Andreas Wundsam
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
import struct
from pox.lib.util import initHelper
# Nicira Vendor extensions. Welcome to embrace-and-extend-town
VENDOR_ID = 0x00002320
# sub_types
ROLE_REQUEST = 10
ROLE_REPLY = 11
# role request / reply patterns
ROLE_OTHER = 0
ROLE_MASTER = 1
ROLE_SLAVE = 2
class nx_data(object):
""" base class for the data field of Nicira vendor extension
commands. Picked from the floodlight source code.
"""
def __init__ (self, **kw):
self.subtype = 0
self.length = 4
initHelper(self, kw)
def _assert (self):
return (True, None)
def pack (self, assertstruct=True):
if(assertstruct):
if(not self._assert()[0]):
return None
packed = ""
packed += struct.pack("!L", self.subtype)
return packed
def unpack (self, binaryString):
if (len(binaryString) < 4):
return binaryString
(self.subtype,) = struct.unpack_from("!L", binaryString, 0)
return binaryString[4:]
def __len__ (self):
return 4
def __eq__ (self, other):
if type(self) != type(other): return False
if self.subtype != other.subtype: return False
return True
def __ne__ (self, other): return not self.__eq__(other)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += prefix + 'subtype: ' + str(self.subtype) + '\n'
return outstr
class role_data(nx_data):
""" base class for the data field of nx role requests."""
def __init__ (self, subtype, **kw):
nx_data.__init__(self)
self.subtype = subtype
self.role = ROLE_OTHER
self.length = 8
initHelper(self, kw)
def _assert (self):
return (True, None)
def pack (self, assertstruct=True):
if(assertstruct):
if(not self._assert()[0]):
return None
packed = ""
packed += nx_data.pack(self)
packed += struct.pack("!L", self.role)
return packed
def unpack (self, binaryString):
if (len(binaryString) < 8):
return binaryString
nx_data.unpack(self, binaryString[0:])
(self.role,) = struct.unpack_from("!L", binaryString, 4)
return binaryString[8:]
def __len__ (self):
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not nx_data.__eq__(self, other): return False
if self.role != other.role: return False
return True
def __ne__ (self, other): return not self.__eq__(other)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += nx_data.show(self, prefix + ' ')
outstr += prefix + 'role: ' + str(self.role) + '\n'
return outstr
class role_request_data(role_data):
""" Role request. C->S """
def __init__ (self, **kw):
role_data.__init__(self, ROLE_REQUEST, **kw)
class role_reply_data(role_data):
""" Role reply S->C """
def __init__ (self, **kw):
role_data.__init__(self, ROLE_REPLY, **kw)
_nx_subtype_to_type = {
ROLE_REQUEST: role_request_data,
ROLE_REPLY: role_reply_data
}
def unpack_vendor_data_nx(data):
if len(data) < 4: raise RuntimeError("NX vendor data<4 bytes")
nx = nx_data()
nx.unpack(data)
if nx.subtype in _nx_subtype_to_type:
res = _nx_subtype_to_type[nx.subtype]()
res.unpack(data)
return res
else:
raise NotImplementedError("subtype not implemented: %d" % nx.subtype)
| cc0-1.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.